text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
'''
Created on Nov 23, 2011
@author: Mirna Lerotic, 2nd Look Consulting
http://www.2ndlookconsulting.com/
Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Argonne National Laboratory nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
! This is from file COMPOUND.DAT for HENKE. Entries are:
! compound formula density (g/cc)
water H2O 1.0
protein H48.6C32.9N8.9O8.9S0.6 1.35
lipid H62.5C31.5O6.3 1.0
nucleosome H42.1C31.9N10.3O13.9P1.6S0.3 1.5
dna H35.5C30.8N11.7O18.9P3.1 1.7
helium He 0.1663e-3
chromatin H49.95C24.64N8.66O15.57P1.07S0.03 1.527
air N78.08O20.95Ar0.93 1.2047e-3
pmma C5H8O2 1.18
nitride Si3N4 3.44
graphite C 2.26
nickel Ni 8.876
beryl Be 1.845
copper Cu 8.96
quartz SiO2 2.20
aluminum Al 2.70
gold Au 19.3
ice H2O 0.92
carbon C 1.0
polystyrene C8H8 1.06
silicon Si 2.33
germanium Ge 5.323
'''
from __future__ import division
import numpy as np
from xdrlib import *
import string
#-----------------------------------------------------------------------------
class henke:
def __init__(self, logger):
self.logger = logger
self.compound_name = [ 'water' , 'protein', 'lipid', 'nucleosome', 'dna', 'helium', 'chromatin',
'air', 'pmma', 'nitride', 'graphite', 'nickel', 'beryl', 'copper',
'quartz', 'aluminum', 'gold', 'ice', 'carbon', 'polystyrene',
'silicon', 'germanium']
self.compound_forumula = ['H2O', 'H48.6C32.9N8.9O8.9S0.6', 'H62.5C31.5O6.3',
'H42.1C31.9N10.3O13.9P1.6S0.3', 'H35.5C30.8N11.7O18.9P3.1',
'He' , 'H49.95C24.64N8.66O15.57P1.07S0.03',
'N78.08O20.95Ar0.93', 'C5H8O2', 'Si3N4', 'C', 'Ni', 'Be',
'Cu', 'SiO2', 'Al', 'Au', 'H2O', 'C', 'C8H8',
'Si', 'Ge']
self.compound_density = [ 1.0, 1.35, 1.0, 1.5, 1.7, 1.66E-04, 1.527, 1.20E-03, 1.18,
3.44, 2.26, 8.876, 1.845, 8.96, 2.2, 2.7, 19.3, 0.92, 1, 1.06,
2.33, 5.323 ]
#-----------------------------------------------------------------------------
def compound(self, compound_string, density):
# Type in a chemical formula and the density. The routine
# then figures out the atomic weight in g/mole of "molecules"
# and how many of what Z atoms you have.
z_array = []
atwt = 0
if compound_string in self.compound_name:
compound_string = self.compound_forumula[self.compound_name.index(compound_string)]
if compound_string in self.compound_forumula:
z_array = self.zcompound(compound_string, z_array)
atwt = self.zatwt(z_array)
return z_array, atwt
#-----------------------------------------------------------------------------
def zcompound(self, compound_string, z_array, paren_multiplier=False):
verbose = False
if verbose:
self.logger.debug('compound_string: %s', compound_string)
if paren_multiplier == False:
z_array = np.zeros(92)
paren_multiplier=1.
max_z_index=93
last_char_index = len(compound_string) - 1
# If we don't start off with a parenthesis, all we have to do
# is strip off the first element and process it. We then
# call the routine over again to handle the next part of
# the string...
if compound_string[0] != '(':
# Look to see if the string has an element
# like "C" or like "He".
first_char=compound_string[0]
if len(compound_string) > 1:
second_char = compound_string[1]
else:
second_char = ''
this_element_name = first_char
if second_char >= 'a' and second_char <= 'z':
this_element_name = this_element_name + second_char
num_start_index = 2
else:
this_element_name = this_element_name + ' '
num_start_index = 1
if verbose:
self.logger.debug('this_element_name: %s num_start_index: %s', this_element_name, num_start_index)
this_z=0
if this_element_name == 'H ': this_z=1
elif this_element_name == 'He': this_z=2
elif this_element_name == 'Li': this_z=3
elif this_element_name == 'Be': this_z=4
elif this_element_name == 'B ': this_z=5
elif this_element_name == 'C ': this_z=6
elif this_element_name == 'N ': this_z=7
elif this_element_name == 'O ': this_z=8
elif this_element_name == 'F ': this_z=9
elif this_element_name == 'Ne': this_z=10
elif this_element_name == 'Na': this_z=11
elif this_element_name == 'Mg': this_z=12
elif this_element_name == 'Al': this_z=13
elif this_element_name == 'Si': this_z=14
elif this_element_name == 'P ': this_z=15
elif this_element_name == 'S ': this_z=16
elif this_element_name == 'Cl': this_z=17
elif this_element_name == 'Ar': this_z=18
elif this_element_name == 'K ': this_z=19
elif this_element_name == 'Ca': this_z=20
elif this_element_name == 'Sc': this_z=21
elif this_element_name == 'Ti': this_z=22
elif this_element_name == 'V ': this_z=23
elif this_element_name == 'Cr': this_z=24
elif this_element_name == 'Mn': this_z=25
elif this_element_name == 'Fe': this_z=26
elif this_element_name == 'Co': this_z=27
elif this_element_name == 'Ni': this_z=28
elif this_element_name == 'Cu': this_z=29
elif this_element_name == 'Zn': this_z=30
elif this_element_name == 'Ga': this_z=31
elif this_element_name == 'Ge': this_z=32
elif this_element_name == 'As': this_z=33
elif this_element_name == 'Se': this_z=34
elif this_element_name == 'Br': this_z=35
elif this_element_name == 'Kr': this_z=36
elif this_element_name == 'Rb': this_z=37
elif this_element_name == 'Sr': this_z=38
elif this_element_name == 'Y ': this_z=39
elif this_element_name == 'Zr': this_z=40
elif this_element_name == 'Nb': this_z=41
elif this_element_name == 'Mo': this_z=42
elif this_element_name == 'Tc': this_z=43
elif this_element_name == 'Ru': this_z=44
elif this_element_name == 'Rh': this_z=45
elif this_element_name == 'Pd': this_z=46
elif this_element_name == 'Ag': this_z=47
elif this_element_name == 'Cd': this_z=48
elif this_element_name == 'In': this_z=49
elif this_element_name == 'Sn': this_z=50
elif this_element_name == 'Sb': this_z=51
elif this_element_name == 'Te': this_z=52
elif this_element_name == 'I ': this_z=53
elif this_element_name == 'Xe': this_z=54
elif this_element_name == 'Cs': this_z=55
elif this_element_name == 'Ba': this_z=56
elif this_element_name == 'La': this_z=57
elif this_element_name == 'Ce': this_z=58
elif this_element_name == 'Pr': this_z=59
elif this_element_name == 'Nd': this_z=60
elif this_element_name == 'Pm': this_z=61
elif this_element_name == 'Sm': this_z=62
elif this_element_name == 'Eu': this_z=63
elif this_element_name == 'Gd': this_z=64
elif this_element_name == 'Tb': this_z=65
elif this_element_name == 'Dy': this_z=66
elif this_element_name == 'Ho': this_z=67
elif this_element_name == 'Er': this_z=68
elif this_element_name == 'Tm': this_z=69
elif this_element_name == 'Yb': this_z=70
elif this_element_name == 'Lu': this_z=71
elif this_element_name == 'Hf': this_z=72
elif this_element_name == 'Ta': this_z=73
elif this_element_name == 'W ': this_z=74
elif this_element_name == 'Re': this_z=75
elif this_element_name == 'Os': this_z=76
elif this_element_name == 'Ir': this_z=77
elif this_element_name == 'Pt': this_z=78
elif this_element_name == 'Au': this_z=79
elif this_element_name == 'Hg': this_z=80
elif this_element_name == 'Tl': this_z=81
elif this_element_name == 'Pb': this_z=82
elif this_element_name == 'Bi': this_z=83
elif this_element_name == 'Po': this_z=84
elif this_element_name == 'At': this_z=85
elif this_element_name == 'Rn': this_z=86
elif this_element_name == 'Fr': this_z=87
elif this_element_name == 'Ra': this_z=88
elif this_element_name == 'Ac': this_z=89
elif this_element_name == 'Th': this_z=90
elif this_element_name == 'Pa': this_z=91
elif this_element_name == 'U ': this_z=92
else: this_z=0
if (this_z == 0):
self.logger.info('zcompound is confused: %s', compound_string)
compound_string=''
return np.zeros(0)
# Find the next element or parenthesis, as
# anything before it must be a number.
postnum_index = num_start_index
if len(compound_string) > num_start_index + 1:
test_char = compound_string[postnum_index]
else:
test_char = ''
while ( ((test_char == '0') or (test_char == '1') or \
(test_char == '2') or (test_char == '2') or \
(test_char == '3') or (test_char == '4') or \
(test_char == '5') or (test_char == '6') or \
(test_char == '7') or (test_char == '8') or \
(test_char == '9') or (test_char == '.')) and \
(postnum_index <= last_char_index) ):
postnum_index = postnum_index + 1
if (postnum_index <= last_char_index) :
test_char = compound_string[postnum_index]
else:
test_char = ''
# is there more?
if (num_start_index != postnum_index) :
number_string=compound_string[num_start_index:postnum_index]
num_multiplier = 1.
if verbose:
self.logger.debug('Trying to interpret %s as a number.', number_string)
if len(number_string) != 0:
num_multiplier = float(number_string)
else:
num_multiplier = 1.
# We've handled this element, so pop it into the
# matrix and continue.
if (this_z <= max_z_index) :
z_array[this_z - 1] = z_array[this_z - 1] + num_multiplier
else:
self.logger.debug('zcompound: z_array smaller than %s', max_z_index)
return np.zeros(0)
# And deal with what's left
remaining_string=compound_string[postnum_index:last_char_index + 1]
if len(remaining_string) > 0:
z_array = self.zcompound(remaining_string, z_array, paren_multiplier=True)
return z_array
#-----------------------------------------------------------------------------
def zatwt(self, z_array):
maxz=z_array.size
atwt=0.
for i in range(maxz):
if (z_array[i] != 0.):
if i+1 == 1: this_atwt=1.00794
elif i+1 == 2: this_atwt=4.0026
elif i+1 == 3: this_atwt=6.941
elif i+1 == 4: this_atwt=9.01218
elif i+1 == 5: this_atwt=10.81
elif i+1 == 6: this_atwt=12.011
elif i+1 == 7: this_atwt=14.0067
elif i+1 == 8: this_atwt=15.9994
elif i+1 == 9: this_atwt=18.9984
elif i+1 == 10: this_atwt=21.179
elif i+1 == 11: this_atwt=22.98977
elif i+1 == 12: this_atwt=24.305
elif i+1 == 13: this_atwt=26.98154
elif i+1 == 14: this_atwt=28.0855
elif i+1 == 15: this_atwt=30.97376
elif i+1 == 16: this_atwt=32.06
elif i+1 == 17: this_atwt=35.453
elif i+1 == 18: this_atwt=39.948
elif i+1 == 19: this_atwt=39.0983
elif i+1 == 20: this_atwt=40.08
elif i+1 == 21: this_atwt=44.9559
elif i+1 == 22: this_atwt=47.88
elif i+1 == 23: this_atwt=50.9415
elif i+1 == 24: this_atwt=51.996
elif i+1 == 25: this_atwt=54.9380
elif i+1 == 26: this_atwt=55.847
elif i+1 == 27: this_atwt=58.9332
elif i+1 == 28: this_atwt=58.69
elif i+1 == 29: this_atwt=63.546
elif i+1 == 30: this_atwt=65.38
elif i+1 == 31: this_atwt=69.72
elif i+1 == 32: this_atwt=72.59
elif i+1 == 33: this_atwt=74.9216
elif i+1 == 34: this_atwt=78.96
elif i+1 == 35: this_atwt=79.904
elif i+1 == 36: this_atwt=83.80
elif i+1 == 37: this_atwt=85.4678
elif i+1 == 38: this_atwt=87.62
elif i+1 == 39: this_atwt=88.9059
elif i+1 == 40: this_atwt=91.22
elif i+1 == 41: this_atwt=92.9064
elif i+1 == 42: this_atwt=95.94
elif i+1 == 43: this_atwt=98.
elif i+1 == 44: this_atwt=101.07
elif i+1 == 45: this_atwt=102.9055
elif i+1 == 46: this_atwt=106.42
elif i+1 == 47: this_atwt=107.8682
elif i+1 == 48: this_atwt=112.41
elif i+1 == 49: this_atwt=114.82
elif i+1 == 50: this_atwt=118.69
elif i+1 == 51: this_atwt=121.75
elif i+1 == 52: this_atwt=127.60
elif i+1 == 53: this_atwt=126.9054
elif i+1 == 54: this_atwt=131.29
elif i+1 == 55: this_atwt=132.9054
elif i+1 == 56: this_atwt=137.33
elif i+1 == 57: this_atwt=138.9055
elif i+1 == 58: this_atwt=140.12
elif i+1 == 59: this_atwt=140.9077
elif i+1 == 60: this_atwt=144.24
elif i+1 == 61: this_atwt=145.
elif i+1 == 62: this_atwt=150.36
elif i+1 == 63: this_atwt=151.96
elif i+1 == 64: this_atwt=157.25
elif i+1 == 65: this_atwt=158.9254
elif i+1 == 66: this_atwt=162.5
elif i+1 == 67: this_atwt=164.9304
elif i+1 == 68: this_atwt=167.26
elif i+1 == 69: this_atwt=168.9342
elif i+1 == 70: this_atwt=173.04
elif i+1 == 71: this_atwt=174.967
elif i+1 == 72: this_atwt=178.49
elif i+1 == 73: this_atwt=180.9479
elif i+1 == 74: this_atwt=183.85
elif i+1 == 75: this_atwt=186.207
elif i+1 == 76: this_atwt=190.2
elif i+1 == 77: this_atwt=192.22
elif i+1 == 78: this_atwt=195.08
elif i+1 == 79: this_atwt=196.9665
elif i+1 == 80: this_atwt=200.59
elif i+1 == 81: this_atwt=204.383
elif i+1 == 82: this_atwt=207.2
elif i+1 == 83: this_atwt=208.9804
elif i+1 == 84: this_atwt=209.
elif i+1 == 85: this_atwt=210.
elif i+1 == 86: this_atwt=222.
elif i+1 == 87: this_atwt=223.
elif i+1 == 88: this_atwt=226.0254
elif i+1 == 89: this_atwt=227.0278
elif i+1 == 90: this_atwt=232.0381
elif i+1 == 91: this_atwt=231.0359
elif i+1 == 92: this_atwt=238.0289
else: this_atwt=0.
atwt = atwt + z_array[i] * this_atwt
return atwt
#-----------------------------------------------------------------------------
def extra(self, ielement = -1):
energies, f1, f2, n_extra, energies_extra, f1_extra, f2_extra = self.read(ielement, all = False)
if not n_extra == None and n_extra != 0:
energies_all=np.concatenate((energies,energies_extra), axis=0)
f1_all=np.concatenate((f1,f1_extra), axis=0)
f2_all=np.concatenate((f2,f2_extra), axis=0)
sort_order=energies_all.argsort()
energies_all=energies_all[sort_order]
f1_all=f1_all[sort_order]
f2_all=f2_all[sort_order]
else:
energies_all=energies
f1_all=f1
f2_all=f2
return energies, f1, f2, energies_extra, f1_extra, f2_extra
#-----------------------------------------------------------------------------
def read(self, ielement=-1, all=True):
# If we don't specifiy element return all energies
if ielement == -1:
all = True
verbose = False
expected_pos = 0
filename = 'reference/henke.xdr'
try:
file = open(str(filename), 'rb')
except:
try:
filename = '../reference/henke.xdr'
file = open(str(filename), 'rb')
except:
self.logger.error('Could not open file %s', filename)
return None, None, None, None, None, None, None
if verbose:
self.logger.debug('File: %s', filename)
buf = file.read()
u = Unpacker(buf)
if all:
n_elements = u.unpack_int()
n_energies = u.unpack_int()
if verbose:
self.logger.debug('n_energies: %s', n_energies)
self.logger.debug('n_elements: %s', n_elements)
expected_pos = expected_pos + 2 * 4
self.logger.debug('Actual, expected file position before reading energies: %s %s', u.get_position(), expected_pos)
energies = u.unpack_farray(n_energies, u.unpack_float)
energies = np.array(energies)
if verbose:
self.logger.debug('energies: %s', energies)
f1 = np.zeros((n_elements, n_energies))
f2 = np.zeros((n_elements, n_energies))
this_f1 = np.zeros((n_energies))
this_f2 = np.zeros((n_energies))
if verbose:
expected_pos = expected_pos + 4 * n_energies
self.logger.debug('Actual, expected file position before reading elements: %s %s', u.get_position(), expected_pos)
for i_element in range(n_elements):
this_f1 = u.unpack_farray(n_energies, u.unpack_float)
this_f2 = u.unpack_farray(n_energies, u.unpack_float)
f1[i_element, :] = this_f1
f2[i_element, :] = this_f2
#self.logger.debug( f1
if verbose:
expected_pos = expected_pos + n_elements * n_energies * 2 * 4
self.logger.debug('Actual, expected file position before reading n_extra_energies: %s %s', u.get_position(), expected_pos)
n_extra_energies = u.unpack_int()
if verbose:
self.logger.debug('n_extra_energies: %s', n_extra_energies)
if verbose:
expected_pos = expected_pos + 4
self.logger.debug('Actual, expected file position before reading extras: %s %s', u.get_position(), expected_pos)
n_extra = np.zeros((n_elements), dtype = np.int)
extra_energies = np.zeros((n_elements, n_extra_energies))
extra_f1 = np.zeros((n_elements, n_extra_energies))
extra_f2 = np.zeros((n_elements, n_extra_energies))
this_n_extra = 0
this_extra_energies = np.zeros((n_extra_energies))
this_extra_f1 = np.zeros((n_extra_energies))
this_extra_f2 = np.zeros((n_extra_energies))
for i_element in range(n_elements):
this_n_extra = u.unpack_int()
this_extra_energies = u.unpack_farray(n_extra_energies, u.unpack_float)
this_extra_f1 = u.unpack_farray(n_extra_energies, u.unpack_float)
this_extra_f2 = u.unpack_farray(n_extra_energies, u.unpack_float)
n_extra[i_element] = this_n_extra
extra_energies[i_element, :] = this_extra_energies
extra_f1[i_element, :] = this_extra_f1
extra_f2[i_element, :] = this_extra_f2
else:
n_elements = u.unpack_int()
n_energies = u.unpack_int()
energies = u.unpack_farray(n_energies, u.unpack_float)
energies = np.array(energies)
if verbose:
self.logger.debug('energies: %s', energies)
byte_offset = 4 + 4 + 4 * n_energies + 8 * ielement * n_energies
u.set_position(byte_offset)
f1 = u.unpack_farray(n_energies, u.unpack_float)
f2 = u.unpack_farray(n_energies, u.unpack_float)
byte_offset = 4 + 4 + 4 * n_energies + 8 * n_elements * n_energies
u.set_position(byte_offset)
n_extra_energies = u.unpack_int()
if verbose:
self.logger.debug('n_extra_energies %s', n_extra_energies)
# Now we have the above plus i_element times the quantity:
# (2 for n_extra, and n_extra_energies each of three floats)
byte_offset = 4l + 4l + 4l * n_energies + 8l * n_elements * n_energies + 4l + ielement * (4l + 12l * n_extra_energies)
u.set_position(byte_offset)
n_extra = u.unpack_int()
this_extra_energies = u.unpack_farray(n_extra_energies, u.unpack_float)
this_extra_f1 = u.unpack_farray(n_extra_energies, u.unpack_float)
this_extra_f2 = u.unpack_farray(n_extra_energies, u.unpack_float)
extra_energies = this_extra_energies[0:n_extra]
extra_f1 = this_extra_f1[0:n_extra]
extra_f2 = this_extra_f2[0:n_extra]
file.close()
return energies, f1, f2, n_extra, extra_energies, extra_f1, extra_f2
# -----------------------------------------------------------------------------
def array(self, compound_name, density, graze_mrad=0):
z_array = []
z_array, atwt = self.compound(compound_name,density)
if len(z_array) == 0:
z_array = self.zcompound(compound_name, z_array)
atwt = self.zatwt(z_array)
maxz = 92
first_time = 1
for i in range(maxz):
if z_array[i] != 0.0:
energies, this_f1, this_f2, n_extra, extra_energies, extra_f1, extra_f2 = self.read(ielement=i)
if energies == None:
continue
self.logger.debug('this_f1.shape: %s', this_f1.shape)
if first_time == 1:
f1 = z_array[i] * this_f1
f2 = z_array[i] * this_f2
first_time = 0
else:
f1 = f1 + z_array[i] * this_f1
f2 = f2 + z_array[i] * this_f2
num_energies = len(energies)
AVOGADRO=6.02204531e23
HC_ANGSTROMS=12398.52
RE=2.817938070e-13 # in cm
if atwt != 0.0:
molecules_per_cc = density * AVOGADRO / atwt
else:
molecules_per_cc = 0.0
wavelength_angstroms = HC_ANGSTROMS/energies
# This constant has wavelength in angstroms and then
# they are converted to centimeters.
constant = RE * (1.0e-16 * wavelength_angstroms * wavelength_angstroms) * molecules_per_cc / (2.0 * np.math.pi)
delta = constant * f1
beta = constant * f2
# Alpha is in inverse meters squared
alpha = 1.e4 * density * AVOGADRO * RE / (2. * np.math.pi * atwt)
#alpha = alpha[0]
if graze_mrad == 0.0:
reflect = np.ones((num_energies))
else:
theta = 1.0e-3 * graze_mrad
sinth = np.sin(theta)
sinth2 = sinth * sinth
coscot = np.cos(theta)
coscot = coscot * coscot / sinth
alpha = 2.0 * delta - delta * delta + beta * beta
gamma = 2.0 * (1.0 - delta) * beta
rhosq = 0.5 * (sinth2 - alpha + np.sqrt((sinth2 - alpha)*(sinth2-alpha) + gamma*gamma) )
rho = np.sqrt(rhosq)
i_sigma = (4.0 * rhosq * (sinth - rho) * (sinth - rho) + \
gamma * gamma) / \
(4.0 * rhosq * (sinth + rho) * (sinth + rho) + \
gamma * gamma)
piosig = (4.0 * rhosq * (rho - coscot) * (rho - coscot) + \
gamma * gamma) / \
(4.0 * rhosq * (rho + coscot) * (rho + coscot) + \
gamma * gamma)
reflect= 50.0 * i_sigma * (1 + piosig)
denom = energies*4.*np.math.pi*beta
zeroes = np.where(denom == 0.)
nonzeroes = np.where(denom != 0.)
denom[zeroes] = 1e-8
inverse_mu = np.array((len(energies)))
inverse_mu = 1.239852 / denom
if len(zeroes) > 0:
inverse_mu[zeroes] = np.inf
return energies, f1, f2, delta, beta, graze_mrad, reflect, inverse_mu, atwt, alpha
# -----------------------------------------------------------------------------
def get_henke(self, compound_name, density, energy):
if len(compound_name) == 0:
self.logger.warning('henke, compound_name, density, energy, f1, f2, delta, beta, graze_mrad, reflect, inverse_mu=inverse_mu inverse_mu is 1/e absorption length in microns. atwt is the atom-averaged atomic weight for the compound')
return None, None, None, None, None, None, None, None
enarr, f1arr, f2arr, deltaarr, betaarr, graze_mrad, reflect_arr, inverse_mu, atwt, alpha = self.array(compound_name, density)
num_energies = len(enarr)
high_index = 0
while (energy > enarr[high_index]) and (high_index < (num_energies - 1)):
high_index = high_index + 1
if high_index == 0:
high_index = 1
low_index = high_index - 1
ln_lower_energy = np.math.log(enarr[low_index])
ln_higher_energy = np.math.log(enarr[high_index])
fraction = (np.math.log(energy) - ln_lower_energy) / (ln_higher_energy - ln_lower_energy)
f1_lower = f1arr[low_index]
f1_higher = f1arr[high_index]
f1 = f1_lower + fraction * (f1_higher - f1_lower)
ln_f2_lower = np.math.log(np.abs(f2arr(low_index)))
ln_f2_higher = np.math.log(np.abs(f2arr(high_index)))
f2 = np.math.exp(ln_f2_lower + fraction * (ln_f2_higher - ln_f2_lower))
delta_lower = deltaarr[low_index]
delta_higher = deltaarr[high_index]
delta = delta_lower + fraction * (delta_higher - delta_lower)
ln_beta_lower = np.math.log(np.abs(betaarr(low_index)))
ln_beta_higher = np.math.log(np.abs(betaarr(high_index)))
beta = np.math.exp(ln_beta_lower + fraction * (ln_beta_higher - ln_beta_lower))
reflect_lower = reflect_arr[low_index]
reflect_higher = reflect_arr[high_index]
reflect = reflect_lower + fraction * (reflect_higher - reflect_lower)
if beta != 0.0:
inverse_mu = 1.239852/(energy*4.*np.math.pi*beta)
else:
inverse_mu = np.Inf
return f1, f2, delta, beta, graze_mrad, reflect, inverse_mu, atwt
# -----------------------------------------------------------------------------
def get_henke_single(self, name, density, energy_array):
AVOGADRO = 6.02204531e23
HC_ANGSTROMS = 12398.52
RE = 2.817938070e-13 # in cm
z_array, atwt = self.compound(name.strip(), density)
if len(z_array) == 0:
z_array = self.zcompound(name, z_array)
atwt = self.zatwt(z_array)
wo = np.where(z_array > 0)[0]
if len(wo) == 0 :
self.logger.warning('Warning: get_henke_single() name=%s encountered error, will return', name)
return 0, 0, 0, 0
z = wo+1
if (atwt != 0.0):
molecules_per_cc = density * AVOGADRO / atwt
else:
molecules_per_cc = 0.0
if len(wo) > 1:
energies_all, f1_all, f2_all, energies_extra, f1_extra, f2_extra = self.extra(ielement=z[0])
else:
energies_all, f1_all, f2_all, energies_extra, f1_extra, f2_extra = self.extra(ielement=z[0] - 1)
if isinstance(energy_array, float):
n_array = 1
else:
n_array = len(energy_array)
f1_array = np.zeros((n_array))
f2_array = np.zeros((n_array))
delta_array = np.zeros((n_array))
beta_array = np.zeros((n_array))
for i in range(n_array) :
energy = energy_array
wavelength_angstroms = HC_ANGSTROMS/energy
# This constant has wavelength in angstroms and then
# they are converted to centimeters.
constant = RE * (1.0e-16 * wavelength_angstroms * wavelength_angstroms) * \
molecules_per_cc / (2.0 * np.pi)
wo = np.where(energies_all > energy)[0]
# the first value that is larger than energy must be the closest value
if len(wo) == 0:
hi_e_ind = 0
else:
hi_e_ind = wo[0]
wo = np.where(energies_all < energy)[0]
# the last value that is smaller than energy must be the closest value
if len(wo) == 0 :
lo_e_ind = len(energies_all) - 1
else:
lo_e_ind = wo[-1]
ln_lower_energy = np.math.log(energies_all[lo_e_ind])
ln_higher_energy = np.math.log(energies_all[hi_e_ind])
fraction = (np.math.log(energy) - ln_lower_energy) / (ln_higher_energy - ln_lower_energy)
f1_lower = f1_all[lo_e_ind]
f1_higher = f1_all[hi_e_ind]
f1_array[i] = f1_lower + fraction * (f1_higher - f1_lower)
ln_f2_lower = np.math.log(np.abs(f2_all[lo_e_ind]))
ln_f2_higher = np.math.log(np.abs(f2_all[hi_e_ind]))
f2_array[i] = np.math.exp(ln_f2_lower + fraction * (ln_f2_higher - ln_f2_lower))
delta_array[i] = constant * f1_array[i]
beta_array[i] = constant * f2_array[i]
return f1_array, f2_array, delta_array, beta_array
|
MapsPy/MapsPy
|
henke.py
|
Python
|
bsd-2-clause
| 27,886
|
[
"Avogadro"
] |
8dcd5a5353bfc4e1503b90afdc46d0b124ab7e0a517ffcdad3eddee0cf471e7a
|
from math import sqrt, pi
import numpy as np
from gpaw.xc.functional import XCFunctional
from gpaw.sphere.lebedev import Y_nL, weight_n
class LDA(XCFunctional):
def __init__(self, kernel):
self.kernel = kernel
XCFunctional.__init__(self, kernel.name)
self.type = kernel.type
def calculate(self, gd, n_sg, v_sg=None, e_g=None):
if gd is not self.gd:
self.set_grid_descriptor(gd)
if e_g is None:
e_g = gd.empty()
if v_sg is None:
v_sg = np.zeros_like(n_sg)
self.calculate_lda(e_g, n_sg, v_sg)
return gd.integrate(e_g)
def calculate_lda(self, e_g, n_sg, v_sg):
self.kernel.calculate(e_g, n_sg, v_sg)
def calculate_paw_correction(self, setup, D_sp, dEdD_sp=None,
addcoredensity=True, a=None):
c = setup.xc_correction
if c is None:
return 0.0
rgd = c.rgd
nspins = len(D_sp)
if addcoredensity:
nc0_sg = rgd.empty(nspins)
nct0_sg = rgd.empty(nspins)
nc0_sg[:] = sqrt(4 * pi) / nspins * c.nc_g
nct0_sg[:] = sqrt(4 * pi) / nspins * c.nct_g
if c.nc_corehole_g is not None and nspins == 2:
nc0_sg[0] -= 0.5 * sqrt(4 * pi) * c.nc_corehole_g
nc0_sg[1] += 0.5 * sqrt(4 * pi) * c.nc_corehole_g
else:
nc0_sg = 0
nct0_sg = 0
D_sLq = np.inner(D_sp, c.B_pqL.T)
e, dEdD_sqL = self.calculate_radial_expansion(rgd, D_sLq, c.n_qg,
nc0_sg)
et, dEtdD_sqL = self.calculate_radial_expansion(rgd, D_sLq, c.nt_qg,
nct0_sg)
if dEdD_sp is not None:
dEdD_sp += np.inner((dEdD_sqL - dEtdD_sqL).reshape((nspins, -1)),
c.B_pqL.reshape((len(c.B_pqL), -1)))
if addcoredensity:
return e - et - c.Exc0
else:
return e - et
def calculate_radial_expansion(self, rgd, D_sLq, n_qg, nc0_sg):
n_sLg = np.dot(D_sLq, n_qg)
n_sLg[:, 0] += nc0_sg
dEdD_sqL = np.zeros_like(np.transpose(D_sLq, (0, 2, 1)))
Lmax = n_sLg.shape[1]
E = 0.0
for n, Y_L in enumerate(Y_nL[:, :Lmax]):
w = weight_n[n]
e_g, dedn_sg = self.calculate_radial(rgd, n_sLg, Y_L)
dEdD_sqL += np.dot(rgd.dv_g * dedn_sg,
n_qg.T)[:, :, np.newaxis] * (w * Y_L)
E += w * rgd.integrate(e_g)
return E, dEdD_sqL
def calculate_radial(self, rgd, n_sLg, Y_L):
nspins = len(n_sLg)
n_sg = np.dot(Y_L, n_sLg)
e_g = rgd.empty()
dedn_sg = rgd.zeros(nspins)
self.kernel.calculate(e_g, n_sg, dedn_sg)
return e_g, dedn_sg
def calculate_spherical(self, rgd, n_sg, v_sg, e_g=None):
if e_g is None:
e_g = rgd.empty()
e_g[:], dedn_sg = self.calculate_radial(rgd, n_sg[:, np.newaxis],
[1.0])
v_sg[:] = dedn_sg
return rgd.integrate(e_g)
def calculate_fxc(self, gd, n_sg, f_sg):
if gd is not self.gd:
self.set_grid_descriptor(gd)
assert len(n_sg) == 1
assert n_sg.shape == f_sg.shape
assert n_sg.flags.contiguous and n_sg.dtype == float
assert f_sg.flags.contiguous and f_sg.dtype == float
self.kernel.xc.calculate_fxc_spinpaired(n_sg.ravel(), f_sg)
class PurePythonLDAKernel:
def __init__(self):
self.name = 'LDA'
self.type = 'LDA'
def calculate(self, e_g, n_sg, dedn_sg,
sigma_xg=None, dedsigma_xg=None,
tau_sg=None, dedtau_sg=None):
assert len(n_sg) == 1
lda(e_g, n_sg[0], dedn_sg[0])
def lda(e, n, v):
C0I = 0.238732414637843
C1 = -0.45816529328314287
n[n < 1e-20] = 1e-40
rs = (C0I / n)**(1 / 3.0)
ex = C1 / rs
dexdrs = -ex / rs;
ec, decdrs = G(rs**0.5)
e[:] = n * (ex + ec)
v += ex + ec - rs * (dexdrs + decdrs) / 3.0
def G(rtrs):
A = 0.031091
alpha1 = 0.21370
beta1, beta2, beta3, beta4 = 7.5957, 3.5876, 1.6382, 0.49294
Q0 = -2.0 * A * (1.0 + alpha1 * rtrs * rtrs)
Q1 = 2.0 * A * rtrs * (beta1 +
rtrs * (beta2 +
rtrs * (beta3 +
rtrs * beta4)))
G1 = Q0 * np.log(1.0 + 1.0 / Q1)
dQ1drs = A * (beta1 / rtrs + 2.0 * beta2 +
rtrs * (3.0 * beta3 + 4.0 * beta4 * rtrs))
dGdrs = -2.0 * A * alpha1 * G1 / Q0 - Q0 * dQ1drs / (Q1 * (Q1 + 1.0))
return G1, dGdrs
|
ajylee/gpaw-rtxs
|
gpaw/xc/lda.py
|
Python
|
gpl-3.0
| 4,822
|
[
"GPAW"
] |
cf170391792044baa0abda8c079069591a1b64b9587bf6dc86e408aca1fc5da0
|
# $HeadURL$
"""
DISETSubRequest Class encapsulates a request definition to accomplish a DISET
RPC call
:deprecated:
"""
__RCSID__ = "$Id$"
import commands
from DIRAC.Core.Utilities import DEncode, Time
from DIRAC.Core.Utilities.File import makeGuid
class DISETSubRequest:
#############################################################################
def __init__( self, rpcStub = None, executionOrder = 0 ):
"""Instantiates the Workflow object and some default parameters.
"""
self.subAttributeNames = ['Status', 'SubRequestID', 'Operation', 'ExecutionOrder', 'CreationTime', 'LastUpdate', 'Arguments']
self.subAttributes = {}
for attr in self.subAttributeNames:
self.subAttributes[attr] = "Unknown"
# Some initial values
self.subAttributes['Status'] = "Waiting"
self.subAttributes['SubRequestID'] = makeGuid()
self.subAttributes['CreationTime'] = Time.toString()
self.subAttributes['ExecutionOrder'] = executionOrder
if rpcStub:
self.subAttributes['Arguments'] = DEncode.encode( rpcStub )
self.subAttributes['Operation'] = rpcStub[1]
def setRPCStub( self, rpcStub ):
""" Define the RPC call details
"""
self.subAttributes['Operation'] = rpcStub[1]
self.subAttributes['Arguments'] = DEncode.encode( rpcStub )
def getDictionary( self ):
""" Get the request representation as a dictionary
"""
resultDict = {}
resultDict['Attributes'] = self.subAttributes
return resultDict
|
avedaee/DIRAC
|
RequestManagementSystem/Client/DISETSubRequest.py
|
Python
|
gpl-3.0
| 1,499
|
[
"DIRAC"
] |
a9520081889a5ee2d811abb1f65f3d925a157f79193c0587ddd3ad8c11cf855a
|
__author__ = 'Eden Thiago Ferreira'
class __CaminhosMinimos:
"""Classe base para caminhos minimos ponto a ponto"""
def __init__(self, grafo):
self.grafo = grafo
self.nao_visit = set(self.grafo.pontos)
self.visit = set()
self.dist = {}
self.dist_visit = {}
self.anterior = {}
self.num_passos = 0
self.dist_prev = {}
self.dist_prev_front = {}
self.caminho = list()
self.dist_total = 0
self.pt_ori = None
self.pt_dest = None
self.executou = False
def get_origem(self):
self.dist[self.pt_ori] = 0
self.dist_prev_front[self.pt_ori] = self.grafo.calc_prev_peso(self.pt_ori,
self.pt_dest)
return self.pt_ori
def up_dist(self, ponto_alvo, ponto_atual):
self.dist_prev_front[ponto_alvo] = self.dist[ponto_atual] + self.grafo.pesos[ponto_atual, ponto_alvo] \
+ self.dist_prev[ponto_alvo]
def proc_arestas(self, pt_atual):
for pt_alvo in self.grafo.arestas[pt_atual]:
self.dist_prev[pt_alvo] = self.grafo.calc_prev_peso(pt_alvo, self.pt_dest)
if not pt_alvo in self.visit:
if pt_alvo not in self.dist or self.dist[pt_alvo] > self.dist[pt_atual] \
+ self.grafo.pesos[pt_atual, pt_alvo]:
self.dist[pt_alvo] = self.dist[pt_atual] + self.grafo.pesos[pt_atual, pt_alvo]
self.up_dist(pt_alvo, pt_atual)
self.anterior[pt_alvo] = pt_atual
def get_prox(self):
ponto_min = float("inf")
val_min = float("inf")
for key, valor in self.dist_prev_front.items():
if valor < val_min:
val_min = valor
ponto_min = key
return ponto_min
#min(self.distancia_prev_fronteira, key=self.distancia_prev_fronteira.get)
def start(self):
pt_atual = self.get_origem()
while self.nao_visit:
self.dist_prev[pt_atual] = self.grafo.calc_prev_peso(pt_atual, self.pt_dest)
self.proc_arestas(pt_atual)
self.dist_visit[pt_atual] = self.dist[pt_atual]
del self.dist[pt_atual]
del self.dist_prev_front[pt_atual]
self.visit.add(pt_atual)
self.nao_visit.remove(pt_atual)
if pt_atual == self.pt_dest:
break
pt_atual = self.get_prox()
self.num_passos += 1
self.get_caminho()
self.dist_total = self.dist_visit[self.pt_dest]
self.executou = True
def get_caminho(self):
pt_atual = self.pt_dest
while True:
self.caminho.insert(0, pt_atual)
if pt_atual == self.pt_ori:
break
pt_atual = self.anterior[pt_atual]
class AStar(__CaminhosMinimos):
pass
class Dijkstra(__CaminhosMinimos):
def up_dist(self, pt_alvo, pt_atual):
self.dist_prev_front[pt_alvo] = self.dist[pt_atual] + self.grafo.pesos[pt_atual, pt_alvo]
|
edenferreira/Grafo-e-Caminhos-Minimos
|
caminhos_minimos.py
|
Python
|
bsd-2-clause
| 3,127
|
[
"VisIt"
] |
5a2d91964e093f006bab903608fb576a2b3aa48f8c77fef87e3c0ce5fa46458d
|
import re
import requests
from difflib import SequenceMatcher
from coalib.results.Diff import Diff
from coalib.bears.LocalBear import LocalBear
from coalib.bears.requirements.PipRequirement import PipRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
class InvalidLinkBear(LocalBear):
DEFAULT_TIMEOUT = 2
LANGUAGES = {"All"}
REQUIREMENTS = {PipRequirement('requests', '2.*')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Documentation'}
# IP Address of www.google.com
check_connection_url = "http://216.58.218.174"
@classmethod
def check_prerequisites(cls):
code = cls.get_status_code(
cls.check_connection_url, cls.DEFAULT_TIMEOUT)
return ("You are not connected to the internet."
if code is None else True)
@staticmethod
def get_status_code(url, timeout):
try:
code = requests.head(url, allow_redirects=False,
timeout=timeout).status_code
return code
except requests.exceptions.RequestException:
pass
@staticmethod
def find_links_in_file(file, timeout, ignore_regex):
ignore_regex = re.compile(ignore_regex)
regex = re.compile(
r'((ftp|http)s?://[^.:\s_/?#[\]@\\]+\.(?:[^\s()\'"<>|\\]+|'
r'\([^\s()\'"<>|\\]*\))*)(?<!\.)(?<!,)')
for line_number, line in enumerate(file):
match = regex.search(line)
if match:
link = match.group()
if not ignore_regex.search(link):
code = InvalidLinkBear.get_status_code(link, timeout)
yield line_number + 1, link, code
def run(self, filename, file,
timeout: int=DEFAULT_TIMEOUT,
ignore_regex: str="([.\/]example\.com|\{|\$)"):
"""
Find links in any text file and check if they are valid.
A link is considered valid if the server responds with a 2xx code.
This bear can automatically fix redirects, but ignores redirect
URLs that have a huge difference with the original URL.
Warning: This bear will make HEAD requests to all URLs mentioned in
your codebase, which can potentially be destructive. As an example,
this bear would naively just visit the URL from a line that goes like
`do_not_ever_open = 'https://api.acme.inc/delete-all-data'` wiping out
all your data.
:param timeout: Request timeout period.
:param ignore_regex: A regex for urls to ignore.
"""
for line_number, link, code in InvalidLinkBear.find_links_in_file(
file, timeout, ignore_regex):
if code is None:
yield Result.from_values(
origin=self,
message=('Broken link - unable to connect to '
'{url}').format(url=link),
file=filename,
line=line_number,
severity=RESULT_SEVERITY.MAJOR)
elif not 200 <= code < 300:
# HTTP status 404, 410 or 50x
if code in (404, 410) or 500 <= code < 600:
yield Result.from_values(
origin=self,
message=('Broken link - unable to connect to {url} '
'(HTTP Error: {code})'
).format(url=link, code=code),
file=filename,
line=line_number,
severity=RESULT_SEVERITY.NORMAL)
if 300 <= code < 400: # HTTP status 30x
redirect_url = requests.head(link,
allow_redirects=True).url
matcher = SequenceMatcher(
None, redirect_url, link)
if (matcher.real_quick_ratio() > 0.7 and
matcher.ratio()) > 0.7:
diff = Diff(file)
current_line = file[line_number - 1]
start = current_line.find(link)
end = start + len(link)
replacement = current_line[:start] + \
redirect_url + current_line[end:]
diff.change_line(line_number,
current_line,
replacement)
yield Result.from_values(
self,
'This link redirects to ' + redirect_url,
diffs={filename: diff},
file=filename,
line=line_number,
severity=RESULT_SEVERITY.NORMAL)
|
chriscoyfish/coala-bears
|
bears/general/InvalidLinkBear.py
|
Python
|
agpl-3.0
| 5,000
|
[
"VisIt"
] |
7f2a2ca555c232c4e830c4aac60c6af06caa03435287f2b4987f7b66e0b538fd
|
"""Livestreamer extracts streams from various services.
The main compontent of Livestreamer is a command-line utility that
launches the streams in a video player.
An API is also provided that allows direct access to stream data.
Full documentation is available at http://livestreamer.tanuki.se/.
"""
__title__ = "livestreamer"
__version__ = "1.10.2"
__license__ = "Simplified BSD"
__author__ = "Christopher Rosell"
__copyright__ = "Copyright 2011-2014 Christopher Rosell"
__credits__ = [
"Christopher Rosell", "Athanasios Oikonomou", "Gaspard Jankowiak",
"Dominik Dabrowski", "Toad King", "Niall McAndrew", "Daniel Wallace",
"Sam Edwards", "John Peterson", "Kacper", "Andrew Bashore",
"Martin Panter", "t0mm0", "Agustin Carrasco", "Andy Mikhailenko",
"unintended", "Moritz Blanke", "Jon Bergli Heier", "Stefan Breunig",
"papplampe", "Brian Callahan", "Eric J", "Jan Tore Morken", "JOKer",
"Max Nordlund", "yeeeargh", "Vitaly Evtushenko", "Che", "nixxquality",
"medina", "Michael Cheah", "Jaime Marquinez Ferrandiz"
]
#__all__ = ['plugin', 'plugins', 'requests', 'stream']
#import requests
from .api import streams
from .exceptions import (LivestreamerError, PluginError, NoStreamsError,
NoPluginError, StreamError)
from .session import Livestreamer
|
noam09/kodi
|
script.module.israeliveresolver/lib/livestreamer/__init__.py
|
Python
|
gpl-3.0
| 1,313
|
[
"Brian"
] |
a068a52f49c4f600b4e6361e8590aa27bb97a0203f1348eb0a171f8bc4740e90
|
"""
Provides general packaging handling infrastructure and specific implementations of known packaging formats
All packaging format handlers should extend the PackageHandler class defined in this module.
Packages should then be configured through the PACKAGE_HANDLERS configuration option
"""
from octopus.core import app
from octopus.lib import plugin
import zipfile, os, shutil
from lxml import etree
from octopus.modules.epmc.models import JATS, EPMCMetadataXML
from octopus.modules.identifiers import postcode
from service import models
from octopus.modules.store import store
from StringIO import StringIO
class PackageException(Exception):
"""
Generic exception to be thrown when there are issues working with packages
"""
pass
class PackageFactory(object):
"""
Factory which provides methods for accessing specific PackageHandler implementations
"""
@classmethod
def incoming(cls, format, zip_path=None, metadata_files=None):
"""
Obtain an instance of a PackageHandler for the provided format to be used to work
with processing an incoming binary object.
If the zip path is provided, the handler will be constructed around that file
If only the metadata file handles are provided, the handler will be constructed around them
Metadata file handles should be of the form
::
[("filename", <file handle>)]
It is recommended that as the metadata files are likely to be highly implementation specific
that you rely on the handler itself to provide you with the names of the files, which you may
use to retrieve the streams from store.
:param format: format identifier for the package handler. As seen in the configuration.
:param zip_path: file path to an accessible on-disk location where the zip file is stored
:param metadata_files: list of tuples of filename/filehandle pairs for metadata files extracted from a package
:return: an instance of a PackageHandler, constructed with the zip_path and/or metadata_files
"""
formats = app.config.get("PACKAGE_HANDLERS", {})
cname = formats.get(format)
if cname is None:
msg = "No handler for package format {x}".format(x=format)
app.logger.debug("Package Factory Incoming - {x}".format(x=msg))
raise PackageException(msg)
klazz = plugin.load_class(cname)
return klazz(zip_path=zip_path, metadata_files=metadata_files)
@classmethod
def converter(cls, format):
"""
Obtain an instance of a PackageHandler which can be used to during package conversion to read/write
the supplied format
:param format: format identifier for the package handler. As seen in the configuration.
"""
formats = app.config.get("PACKAGE_HANDLERS", {})
cname = formats.get(format)
if cname is None:
msg = "No handler for package format {x}".format(x=format)
app.logger.debug("Package Factory Converter - {x}".format(x=msg))
raise PackageException(msg)
klazz = plugin.load_class(cname)
return klazz()
class PackageManager(object):
"""
Class which provides an API onto the package management system
If you need to work with packages, the operation you want to do should be covered by one of the
methods on this class.
"""
@classmethod
def ingest(cls, store_id, zip_path, format, storage_manager=None):
"""
Ingest into the storage system the supplied package, of the specified format, with the specified store_id.
This will attempt to load a PackageHandler for the format around the zip_file. Then the original
zip file and the metadata files extracted from the package by the PackageHandler will be written
to the storage system with the specified id.
If a storage_manager is provided, that will be used as the interface to the storage system,
otherwise a storage manager will be constructed from the StoreFactory.
Once this method completes, the file held at zip_file will be deleted, and the definitive copy
will be available in the store.
:param store_id: the id to use when storing the package
:param zip_path: locally accessible path to the source package on disk
:param format: format identifier for the package handler. As seen in the configuration.
:param storage_manager: an instance of Store to use as the storage API
"""
app.logger.debug("Package Ingest - StoreID:{a}; Format:{b}".format(a=store_id, b=format))
# load the package manager and the storage manager
pm = PackageFactory.incoming(format, zip_path)
if storage_manager is None:
storage_manager = store.StoreFactory.get()
# store the zip file as-is (with the name specified by the packager)
storage_manager.store(store_id, pm.zip_name(), source_path=zip_path)
# now extract the metadata streams from the package
for name, stream in pm.metadata_streams():
storage_manager.store(store_id, name, source_stream=stream)
# finally remove the local copy of the zip file
os.remove(zip_path)
@classmethod
def extract(cls, store_id, format, storage_manager=None):
"""
Extract notification metadata and match data from the package in the store which has the specified format
This will look in the store for the store_id, and look for files which match the known metadata file
names from the PackageHandler which is referenced by the format. Once those files are found, they are loaded
into the PackageHandler and the metadata and match data extracted and returned.
If a storage_manager is provided, that will be used as the interface to the storage system,
otherwise a storage manager will be constructed from the StoreFactory.
:param store_id: the storage id where this object can be found
:param format: format identifier for the package handler. As seen in the configuration.
:param storage_manager: an instance of Store to use as the storage API
:return: a tuple of (NotificationMetadata, RoutingMetadata) representing the metadata stored in the package
"""
app.logger.debug("Package Extract - StoreID:{a}; Format:{b}".format(a=store_id, b=format))
# load the storage manager
if storage_manager is None:
storage_manager = store.StoreFactory.get()
# check the object exists in the store - if not do nothing
if not storage_manager.exists(store_id):
return None, None
# get an instance of the package manager that can answer naming convention questions
pm = PackageFactory.incoming(format)
# list the stored file and determine which are the metadata files
remotes = storage_manager.list(store_id)
mdfs = pm.metadata_names()
mds = []
for r in remotes:
if r in mdfs:
mds.append(r)
# create a list of tuples of filenames and contents
handles = []
for r in mds:
fh = storage_manager.get(store_id, r)
handles.append((r, fh))
# create the specific package manager around the new metadata (replacing the old instance)
pm = PackageFactory.incoming(format, metadata_files=handles)
# now do the metadata and the match analysis extraction
md = pm.notification_metadata()
ma = pm.match_data()
# return the extracted data
return md, ma
@classmethod
def convert(cls, store_id, source_format, target_formats, storage_manager=None):
"""
For the package held in the store at the specified store_id, convert the package from
the source_format to the target_format.
This will make a local copy of the source package from the storage system, make all
the relevant conversions (also locally), and then synchronise back to the store.
If a storage_manager is provided, that will be used as the interface to the storage system,
otherwise a storage manager will be constructed from the StoreFactory.
:param store_id: the storage id where this object can be found
:param source_format: format identifier for the input package handler. As seen in the configuration.
:param target_format: format identifier for the output package handler. As seen in the configuration.
:param storage_manager: an instance of Store to use as the storage API
:return: a list of tuples of the conversions carried out of the form [(format, filename, url name)]
"""
app.logger.debug("Package Convert - StoreID:{a}; SourceFormat:{b}; TargetFormats:{c}".format(a=store_id, b=source_format, c=",".join(target_formats)))
# load the storage manager
if storage_manager is None:
storage_manager = store.StoreFactory.get()
# get an instance of the local temp store
tmp = store.StoreFactory.tmp()
# get the packager that will do the conversions
pm = PackageFactory.converter(source_format)
# check that there is a source package to convert
if not storage_manager.exists(store_id):
return []
try:
# first check the file we want exists
if not pm.zip_name() in storage_manager.list(store_id):
return []
# make a copy of the storage manager's version of the package manager's primary file into the local
# temp directory
stream = storage_manager.get(store_id, pm.zip_name())
tmp.store(store_id, pm.zip_name(), source_stream=stream)
# get the in path for the converter to use
in_path = tmp.path(store_id, pm.zip_name())
# a record of all the conversions which took place, with all the relevant additonal info
conversions = []
# for each target format, load it's equivalent packager to get the storage name,
# then run the conversion
for tf in target_formats:
tpm = PackageFactory.converter(tf)
out_path = tmp.path(store_id, tpm.zip_name(), must_exist=False)
converted = pm.convert(in_path, tf, out_path)
if converted:
conversions.append((tf, tpm.zip_name(), tpm.zip_name()))
# with the conversions completed, synchronise back to the storage system
for tf, zn, un in conversions:
stream = tmp.get(store_id, zn)
storage_manager.store(store_id, zn, source_stream=stream)
finally:
try:
# finally, burn the local copy
tmp.delete(store_id)
except:
raise store.StoreException("Unable to delete from tmp storage {x}".format(x=store_id))
# return the conversions record to the caller
return conversions
class PackageHandler(object):
"""
Interface/Parent class for all objects wishing to provide package handling
"""
def __init__(self, zip_path=None, metadata_files=None):
"""
Construct a new PackageHandler around the zip file and/or the metadata files.
Metadata file handles should be of the form
::
[("filename", <file handle>)]
:param zip_path:
:param metadata_files:
:return:
"""
self.zip_path = zip_path
self.metadata_files = metadata_files
self.zip = None
################################################
## methods for exposing naming information
def zip_name(self):
"""
Get the name of the package zip file to be used in the storage layer
:return: the name of the zip file
"""
raise NotImplementedError()
def metadata_names(self):
"""
Get a list of the names of metadata files extracted and stored by this packager
:return: the names of the metadata files
"""
raise NotImplementedError()
def url_name(self):
"""
Get the name of the package as it should appear in any content urls
:return: the url name
"""
raise NotImplementedError()
################################################
## Methods for retriving data from the actual package
def metadata_streams(self):
"""
A generator which yields tuples of metadata file names and data streams
:return: generator for file names/data streams
"""
for x in []:
yield None, None
def notification_metadata(self):
"""
Get the notification metadata as extracted from the package
:return: NotificationMetadata populated
"""
return models.NotificationMetadata()
def match_data(self):
"""
Get the match data as extracted from the package
:return: RoutingMetadata populated
"""
return models.RoutingMetadata()
def convertible(self, target_format):
"""
Can this handler convert to the specified format
:param target_format: format we may want to convert to
:return: True/False if this handler supports that output format
"""
return False
def convert(self, in_path, target_format, out_path):
"""
Convert the file at the specified in_path to a package file of the
specified target_format at the out_path.
You should check first that this target_format is supported via convertible()
:param in_path: locally accessible file path to the source package
:param target_format: the format identifier for the format we want to convert to
:param out_path: locally accessible file path for the output to be written
:return: True/False on success/fail
"""
return False
class SimpleZip(PackageHandler):
"""
Very basic class for representing the SimpleZip package format
SimpeZip is identified by the format identifier http://purl.org/net/sword/package/SimpleZip
"""
################################################
## methods for exposing naming information
def zip_name(self):
"""
Get the name of the package zip file to be used in the storage layer
In this case it is SimpleZip.zip
:return: filename
"""
return "SimpleZip.zip"
def metadata_names(self):
"""
Get a list of the names of metadata files extracted and stored by this packager
In this case there are none
:return: list of names
"""
return []
def url_name(self):
"""
Get the name of the package as it should appear in any content urls
In this case SimpleZip
:return: url name
"""
return "SimpleZip"
class FilesAndJATS(PackageHandler):
"""
Class for representing the FilesAndJATS format
You should use the format identifier: https://pubrouter.jisc.ac.uk/FilesAndJATS
This is the default format that we currently prefer to get from
providers. It consists of a zip of a single XML file which is the JATS fulltext,
a single PDF which is the fulltext, and an arbitrary number of other
files which are supporting information. It may also contain the NLM/EPMC
formatted metadata as XML
To be valid, the zip must just consist of the JATS file OR the EPMC metadata file.
All other files are optional
"""
def __init__(self, zip_path=None, metadata_files=None):
"""
Construct a new PackageHandler around the zip file and/or the metadata files.
Metadata file handles should be of the form
::
[("filename", <file handle>)]
:param zip_path: locally accessible path to zip file
:param metadata_files: metadata file handles tuple
:return:
"""
super(FilesAndJATS, self).__init__(zip_path=zip_path, metadata_files=metadata_files)
self.jats = None
self.epmc = None
if self.zip_path is not None:
self._load_from_zip()
elif self.metadata_files is not None:
self._load_from_metadata()
################################################
## Overrides of methods for exposing naming information
def zip_name(self):
"""
Get the name of the package zip file to be used in the storage layer
In this case FilesAndJATS.zip
:return: filname
"""
return "FilesAndJATS.zip"
def metadata_names(self):
"""
Get a list of the names of metadata files extracted and stored by this packager
In this case ["filesandjats_jats.xml", "filesandjats_epmc.xml"]
:return: list of metadata files
"""
return ["filesandjats_jats.xml", "filesandjats_epmc.xml"]
def url_name(self):
"""
Get the name of the package as it should appear in any content urls
In this case FilesAndJATS
:return: url name
"""
return "FilesAndJATS"
################################################
## Overrids of methods for retriving data from the actual package
def metadata_streams(self):
"""
A generator which yields tuples of metadata file names and data streams
In this handler, this will yield up to 2 metadata streams; for "filesandjats_jats.xml" and "filesandjats_epmc.xml",
in that order, where there is a stream present for that file.
:return: generator for file names/data streams
"""
sources = [("filesandjats_jats.xml", self.jats), ("filesandjats_epmc.xml", self.epmc)]
for n, x in sources:
if x is not None:
yield n, StringIO(x.tostring())
def notification_metadata(self):
"""
Get the notification metadata as extracted from the package
This will extract metadata from both of the JATS XML and the EPMC XML, whichever is present
and merge them before responding.
:return: NotificationMetadata populated
"""
emd = None
jmd = None
# extract all the relevant data from epmc
if self.epmc is not None:
emd = self._epmc_metadata()
# extract all the relevant data from jats
if self.jats is not None:
jmd = self._jats_metadata()
return self._merge_metadata(emd, jmd)
def match_data(self):
"""
Get the match data as extracted from the package
This will extract match data from both of the JATS XML and the EPMC XML, whichever is present
and merge them before responding.
:return: RoutingMetadata populated
"""
match = models.RoutingMetadata()
# extract all the relevant match data from epmc
if self.epmc is not None:
self._epmc_match_data(match)
# extract all the relevant match data from jats
if self.jats is not None:
self._jats_match_data(match)
return match
def convertible(self, target_format):
"""
Checks whether this handler can do the conversion to the target format.
This handler currently supports the following conversion formats:
* http://purl.org/net/sword/package/SimpleZip
:param target_format: target format
:return: True if in the above list, else False
"""
return target_format in ["http://purl.org/net/sword/package/SimpleZip"]
def convert(self, in_path, target_format, out_path):
"""
Convert the file at the specified in_path to a package file of the
specified target_format at the out_path.
You should check first that this target_format is supported via convertible()
This handler currently supports the following conversion formats:
* http://purl.org/net/sword/package/SimpleZip
:param in_path: locally accessible file path to the source package
:param target_format: the format identifier for the format we want to convert to
:param out_path: locally accessible file path for the output to be written
:return: True/False on success/fail
"""
if target_format == "http://purl.org/net/sword/package/SimpleZip":
self._simple_zip(in_path, out_path)
return True
return False
################################################
## Internal methods
def _simple_zip(self, in_path, out_path):
"""
convert to simple zip
:param in_path:
:param out_path:
:return:
"""
# files and jats are already basically a simple zip, so a straight copy
shutil.copyfile(in_path, out_path)
def _merge_metadata(self, emd, jmd):
"""
Merge the supplied EMPC and JATS metadata records into one
:param emd:
:param jmd:
:return:
"""
if emd is None:
emd = models.NotificationMetadata()
if jmd is None:
jmd = models.NotificationMetadata()
md = models.NotificationMetadata()
md.title = jmd.title if jmd.title is not None else emd.title
md.publisher = jmd.publisher
md.type = emd.type
md.language = emd.language
md.publication_date = emd.publication_date if emd.publication_date is not None else jmd.publication_date
md.date_accepted = jmd.date_accepted
md.date_submitted = jmd.date_submitted
md.license = jmd.license
for id in emd.identifiers:
md.add_identifier(id.get("id"), id.get("type"))
for id in jmd.identifiers:
md.add_identifier(id.get("id"), id.get("type"))
md.authors = jmd.authors if len(jmd.authors) > 0 else emd.authors
md.projects = emd.projects
for s in emd.subjects:
md.add_subject(s)
for s in jmd.subjects:
md.add_subject(s)
return md
def _jats_metadata(self):
"""
Extract metadata from the JATS file
:return:
"""
md = models.NotificationMetadata()
md.title = self.jats.title
md.publisher = self.jats.publisher
md.publication_date = self.jats.publication_date
md.date_accepted = self.jats.date_accepted
md.date_submitted = self.jats.date_submitted
type, url, _ = self.jats.get_licence_details()
md.set_license(type, url)
for issn in self.jats.issn:
md.add_identifier(issn, "issn")
md.add_identifier(self.jats.pmcid, "pmcid")
md.add_identifier(self.jats.doi, "doi")
for author in self.jats.authors:
name = author.get("given-names", "") + " " + author.get("surname", "")
if name.strip() == "":
continue
affs = "; ".join(author.get("affiliations", []))
obj = {"name" : name}
if affs is not None and affs != "":
obj["affiliation"] = affs
md.add_author(obj)
for kw in self.jats.categories:
md.add_subject(kw)
for kw in self.jats.keywords:
md.add_subject(kw)
return md
def _epmc_metadata(self):
"""
Extract metadata from the EPMC XML
:return:
"""
md = models.NotificationMetadata()
md.title = self.epmc.title
md.type = self.epmc.publication_type
md.language = self.epmc.language
md.publication_date = self.epmc.publication_date
md.add_identifier(self.epmc.pmid, "pmid")
md.add_identifier(self.epmc.pmcid, "pmcid")
md.add_identifier(self.epmc.doi, "doi")
for issn in self.epmc.issns:
md.add_identifier(issn, "issn")
for author in self.epmc.authors:
fn = author.get("fullName")
if fn is None:
continue
aff = author.get("affiliation")
obj = {"name" : fn}
if aff is not None:
obj["affiliation"] = aff
md.add_author(obj)
for grant in self.epmc.grants:
obj = {}
gid = grant.get("grantId")
if gid is not None:
obj["grant_number"] = gid
ag = grant.get("agency")
if ag is not None:
obj["name"] = ag
if len(obj.keys()) > 0:
md.add_project(obj)
for kw in self.epmc.mesh_descriptors:
md.add_subject(kw)
for kw in self.epmc.keywords:
md.add_subject(kw)
return md
def _jats_match_data(self, match):
"""
Extract match data from the JATS XML
:param match:
:return:
"""
# subject keywords
for c in self.jats.categories:
match.add_keyword(c)
# individual authors, emails, affiliations
for a in self.jats.contribs:
# name
name = a.get("given-names", "") + " " + a.get("surname", "")
if name.strip() != "":
match.add_author_id(name, "name")
# email
email = a.get("email")
if email is not None:
match.add_email(email)
# affiliations (and postcodes)
affs = a.get("affiliations", [])
for a in affs:
match.add_affiliation(a)
codes = postcode.extract_all(a)
for code in codes:
match.add_postcode(code)
# other keywords
for k in self.jats.keywords:
match.add_keyword(k)
# other email addresses
for e in self.jats.emails:
match.add_email(e)
def _epmc_match_data(self, match):
"""
Extract match data from the EPMC XML
:param match:
:return:
"""
# author string
author_string = self.epmc.author_string
if author_string is not None:
match.add_author_id(author_string, "author-list")
# individual authors and their affiliations
authors = self.epmc.authors
for a in authors:
# name
fn = a.get("fullName")
if fn is not None:
match.add_author_id(fn, "name")
# affiliation (and postcode)
aff = a.get("affiliation")
if aff is not None:
match.add_affiliation(aff)
codes = postcode.extract_all(aff)
for code in codes:
match.add_postcode(code)
# grant ids
gs = self.epmc.grants
for g in gs:
gid = g.get("grantId")
if gid is not None:
match.add_grant_id(gid)
# keywords
keys = self.epmc.mesh_descriptors
for k in keys:
match.add_keyword(k)
def _load_from_metadata(self):
"""
Load the properties for this handler from the file metadata
:return:
"""
for name, stream in self.metadata_files:
if name == "filesandjats_jats.xml":
try:
xml = etree.fromstring(stream.read())
self._set_jats(xml)
except Exception:
raise PackageException("Unable to parse filesandjats_jats.xml file from store")
elif name == "filesandjats_epmc.xml":
try:
xml = etree.fromstring(stream.read())
self._set_epmc(xml)
except Exception:
raise PackageException("Unable to parse filesandjats_epmc.xml file from store")
if not self._is_valid():
raise PackageException("No JATS fulltext or EPMC metadata found in metadata files")
def _load_from_zip(self):
"""
Load the properties for this handler from a zip file
:return:
"""
try:
self.zip = zipfile.ZipFile(self.zip_path, "r", allowZip64=True)
except zipfile.BadZipfile as e:
raise PackageException("Zip file is corrupt - cannot read.")
for x in self._xml_files():
try:
doc = etree.fromstring(self.zip.open(x).read())
except Exception:
raise PackageException("Unable to parse XML file in package {x}".format(x=x))
if doc.tag in ["resultList", "result"]:
self._set_epmc(doc)
elif doc.tag == "article":
self._set_jats(doc)
if not self._is_valid():
raise PackageException("No JATS fulltext or EPMC metadata found in package")
def _xml_files(self):
"""
List the XML files in the zip file
:return:
"""
if self.zip is None:
return []
xmls = []
for name in self.zip.namelist():
if name.endswith(".xml"):
xmls.append(name)
return xmls
def _set_epmc(self, xml):
"""
set the local EMPC property on this object based on the xml document passed in
:param xml:
:return:
"""
if xml.tag == "resultList":
res = xml.find("result")
if res is not None:
self.epmc = EPMCMetadataXML(xml=res)
else:
raise PackageException("Unable to find result element in EPMC resultList")
elif xml.tag == "result":
self.epmc = EPMCMetadataXML(xml=xml)
def _set_jats(self, xml):
"""
Set the local JATS property on this object based on the xml document passed in
:param xml:
:return:
"""
self.jats = JATS(xml=xml)
def _is_valid(self):
"""
Is this package valid as FilesAndJATS?
:return:
"""
# is valid if either one or both of jats/epmc is not none
return self.jats is not None or self.epmc is not None
|
JiscPER/jper
|
service/packages.py
|
Python
|
apache-2.0
| 30,281
|
[
"Octopus"
] |
e0820f5da1b402af9c65b62d1bc717d8f2bdc9174e3a67e8ed5956b019d3a005
|
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin, urlsplit, parse_qs, urlunsplit
from django.views.generic import TemplateView
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
import edx_oauth2_provider
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch, reverse_lazy
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode, urlencode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from django.template.response import TemplateResponse
from provider.oauth2.models import Client
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED,
LogoutViewConfiguration)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from student.tasks import send_activation_email
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from bulk_email.models import Optout, BulkEmailFlag # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from util.password_policy_validators import validate_password_strength
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user, UserAttribute, EnrollStatusChange
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs import utils as programs_utils
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "profile"])
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): If not None, ONLY courses of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not attributed to the current ORG.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, filter out any enrollments with courses attributed to current ORG.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
CertificateStatuses.unverified: 'unverified',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in White Labels
if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
# we want to filter and only show enrollments for courses within
# the 'ORG' defined in configuration.
course_org_filter = configuration_helpers.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a configuration
org_filter_out_set = configuration_helpers.get_all_orgs()
# remove our current org from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Find programs associated with courses being displayed. This information
# is passed in the template context to allow rendering of program-related
# information on the dashboard.
meter = programs_utils.ProgramProgressMeter(user, enrollments=course_enrollments)
programs_by_run = meter.engaged_programs(by_run=True)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
BulkEmailFlag.feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'programs_by_run': programs_by_run,
'show_program_listing': ProgramsApiConfig.current().show_program_listing,
'disable_courseware_js': True,
}
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
if course_id not in course_modes:
flat_unexpired_modes = {
unicode(course_id): [mode for mode in modes]
for course_id, modes in course_modes.iteritems()
}
flat_all_modes = {
unicode(course_id): [mode.slug for mode in modes]
for course_id, modes in CourseMode.all_modes_for_courses([course_id]).iteritems()
}
log.error(
u'Can not find `%s` in course modes.`%s`. All modes: `%s`',
course_id,
flat_unexpired_modes,
flat_all_modes
)
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
enrollment = CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
enrollment.send_signal(EnrollStatusChange.enroll)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(
user.id,
{
'email': email,
'username': username
},
{
# Disable MailChimp because we don't want to update the user's email
# and username in MailChimp on every page load. We only need to capture
# this data on registration/activation.
'MailChimp': False
}
)
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("Before you sign in, you need to activate your account. We have sent you an "
"email message with instructions for activating your account.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow to define custom set of required/optional/hidden fields via configuration
extra_fields = configuration_helpers.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
# Announce registration
REGISTER_USER.send(sender=None, user=user, profile=profile)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
send_activation_email.delay(user, subject, message, from_address)
else:
registration.activate()
_enroll_user_in_pending_courses(user) # Enroll student in any pending courses
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
_record_registration_attribution(request, new_user)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
def _enroll_user_in_pending_courses(student):
"""
Enroll student in any pending courses he/she may have.
"""
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
def _record_registration_attribution(request, user):
"""
Attribute this user's registration to the referring affiliate, if
applicable.
"""
affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME)
if user is not None and affiliate_id is not None:
UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to the `redirect_to` value if set, or
course home page if course_id is defined, otherwise it will redirect to dashboard
* `redirect_to`: will redirect to to this url
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
redirect_to = request.GET.get('redirect_to', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true' or redirect_to
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except (AccountValidationError, ValidationError):
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to specific page if specified
if redirect_to:
redirect_url = redirect_to
# Redirect to course info page if course_id is known
elif course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(regs[0].user)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def validate_password(user, password):
"""
Tie in password policy enforcement as an optional level of
security protection
Args:
user: the user object whose password we're checking.
password: the user's proposed new password.
Returns:
is_valid_password: a boolean indicating if the new password
passes the validation.
err_msg: an error message if there's a violation of one of the password
checks. Otherwise, `None`.
"""
err_msg = None
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_strength(password)
except ValidationError as err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
is_password_valid = err_msg is None
return is_password_valid, err_msg
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if request.method == 'POST':
password = request.POST['new_password1']
is_password_valid, password_err_msg = validate_password(user, password)
if not is_password_valid:
# We have a password reset attempt which violates some security
# policy. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': password_err_msg,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200 and not response.context_data['form'].is_valid():
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
try:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send reactivation email from "%s" to "%s"',
from_address,
user.email,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
class LogoutView(TemplateView):
"""
Logs out user and redirects.
The template should load iframes to log the user out of OpenID Connect services.
See http://openid.net/specs/openid-connect-logout-1_0.html.
"""
oauth_client_ids = []
template_name = 'logout.html'
# Keep track of the page to which the user should ultimately be redirected.
target = reverse_lazy('cas-logout') if settings.FEATURES.get('AUTH_USE_CAS') else '/'
def dispatch(self, request, *args, **kwargs): # pylint: disable=missing-docstring
# We do not log here, because we have a handler registered to perform logging on successful logouts.
request.is_from_logout = True
# Get the list of authorized clients before we clear the session.
self.oauth_client_ids = request.session.get(edx_oauth2_provider.constants.AUTHORIZED_CLIENTS_SESSION_KEY, [])
logout(request)
# If we don't need to deal with OIDC logouts, just redirect the user.
if LogoutViewConfiguration.current().enabled and self.oauth_client_ids:
response = super(LogoutView, self).dispatch(request, *args, **kwargs)
else:
response = redirect(self.target)
# Clear the cookie used by the edx.org marketing site
delete_logged_in_cookies(response)
return response
def _build_logout_url(self, url):
"""
Builds a logout URL with the `no_redirect` query string parameter.
Args:
url (str): IDA logout URL
Returns:
str
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params['no_redirect'] = 1
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
# Create a list of URIs that must be called to log the user out of all of the IDAs.
uris = Client.objects.filter(client_id__in=self.oauth_client_ids,
logout_uri__isnull=False).values_list('logout_uri', flat=True)
referrer = self.request.META.get('HTTP_REFERER', '').strip('/')
logout_uris = []
for uri in uris:
if not referrer or (referrer and not uri.startswith(referrer)):
logout_uris.append(self._build_logout_url(uri))
context.update({
'target': self.target,
'logout_uris': logout_uris,
})
return context
|
deepsrijit1105/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 104,572
|
[
"VisIt"
] |
c3dc7a3f6a41a028be930227a9c7ad3ef94655796911a4dcb71e4089d3b12452
|
# test_moose_thread.py ---
#
# Filename: test_moose_thread.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Thu Mar 8 09:38:02 2012 (+0530)
# Version:
# Last-Updated: Thu Mar 8 15:16:03 2012 (+0530)
# By: Subhasis Ray
# Update #: 162
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
# Change log:
#
# 2012-03-08 12:31:46 (+0530) Initial version by Subha
#
# Code:
"""
Example of using multithreading to run a MOOSE simulation in
parallel with querying MOOSE objects involved. See the documentatin
of the classes to get an idea of this demo's function.
"""
import sys
import threading
# Python3 and python2.
try:
import queue
except ImportError as e:
import Queue as queue
import time
from datetime import datetime
import moose
worker_queue = queue.Queue()
status_queue = queue.Queue()
class WorkerThread(threading.Thread):
"""This thread initializes the simulation (reinit) and then runs
the simulation in its run method. It keeps querying moose for
running status every second and returns when the simulation is
over. It puts its name in the global worker_queue at the end to
signal successful completion."""
def __init__(self, runtime):
threading.Thread.__init__(self)
self.runtime = runtime
print(('Created WorkerThread of name', self.name))
def run(self):
print((self.name, 'Starting run for', self.runtime, ' seconds'))
moose.reinit()
moose.start(self.runtime)
while moose.isRunning():
time.sleep(1.0)
print((self.name, 'Table length', len(moose.Table('/tab').vector)))
print((self.name, 'Finishing simulation'))
worker_queue.put(self.name)
class StatusThread(threading.Thread):
"""This thread checks the status of the moose worker thread by
checking the worker_queue for available entry. If there is
nothing, it goes to sleep for a second and then prints current
length of the table. If there is an entry, it puts its name in the
status queue, which is used by the main thread to recognize
successful completion."""
def __init__(self, tab):
threading.Thread.__init__(self)
self.table = tab
print(('Created StatusThread of name', self.name))
def run(self):
while True:
try:
value = worker_queue.get(False)
print((self.name, 'Received queue entry: ', value
, '. Final table length:', len(self.table.vector)
, ' ... now Finishing'
))
status_queue.put(self.name)
return
except queue.Empty:
time.sleep(1.0)
print((self.name, 'Queue is empty. Current table length:'
, len(self.table.vector)
))
if __name__ == '__main__':
pg = moose.PulseGen('pg')
pg.firstDelay = 10.0
pg.firstLevel = 10.0
pg.firstWidth = 5.0
tab = moose.Table('tab')
moose.connect(tab, 'requestOut', pg, 'getOutputValue')
moose.setClock(0, 1.0)
moose.useClock(0, 'pg,tab', 'process')
t1 = WorkerThread(10000)
t2 = StatusThread(tab)
t2.start()
t1.start()
status_queue.get(True)
tab.xplot('threading_demo.dat', 'pulsegen_output')
print(('Ending threading_demo: final length of table', len(tab.vector)))
#
# threading_demo.py ends here
|
BhallaLab/moose
|
moose-examples/snippets/threading_demo.py
|
Python
|
gpl-3.0
| 3,452
|
[
"MOOSE"
] |
933e44cb3f6f946965335753fd7647c027fd7ecc23604b1900c02d5a13997ba1
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_applicationprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ApplicationProfile Avi RESTful Object
description:
- This module is used to configure ApplicationProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
dns_service_profile:
description:
- Specifies various dns service related controls for virtual service.
dos_rl_profile:
description:
- Specifies various security related controls for virtual service.
http_profile:
description:
- Specifies the http application proxy profile parameters.
name:
description:
- The name of the application profile.
required: true
preserve_client_ip:
description:
- Specifies if client ip needs to be preserved for backend connection.
- Not compatible with connection multiplexing.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tcp_app_profile:
description:
- Specifies the tcp application proxy profile parameters.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Specifies which application layer proxy is enabled for the virtual service.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the application profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create an Application Profile for HTTP application enabled for SSL traffic
avi_applicationprofile:
controller: ''
username: ''
password: ''
http_profile:
cache_config:
age_header: true
aggressive: false
date_header: true
default_expire: 600
enabled: false
heuristic_expire: false
max_cache_size: 0
max_object_size: 4194304
mime_types_group_refs:
- admin:System-Cacheable-Resource-Types
min_object_size: 100
query_cacheable: false
xcache_header: true
client_body_timeout: 0
client_header_timeout: 10000
client_max_body_size: 0
client_max_header_size: 12
client_max_request_size: 48
compression_profile:
compressible_content_ref: admin:System-Compressible-Content-Types
compression: false
remove_accept_encoding_header: true
type: AUTO_COMPRESSION
connection_multiplexing_enabled: true
hsts_enabled: false
hsts_max_age: 365
http_to_https: false
httponly_enabled: false
keepalive_header: false
keepalive_timeout: 30000
max_bad_rps_cip: 0
max_bad_rps_cip_uri: 0
max_bad_rps_uri: 0
max_rps_cip: 0
max_rps_cip_uri: 0
max_rps_unknown_cip: 0
max_rps_unknown_uri: 0
max_rps_uri: 0
post_accept_timeout: 30000
secure_cookie_enabled: false
server_side_redirect_to_https: false
spdy_enabled: false
spdy_fwd_proxy_mode: false
ssl_client_certificate_mode: SSL_CLIENT_CERTIFICATE_NONE
ssl_everywhere_enabled: false
websockets_enabled: true
x_forwarded_proto_enabled: false
xff_alternate_name: X-Forwarded-For
xff_enabled: true
name: System-HTTP
tenant_ref: admin
type: APPLICATION_PROFILE_TYPE_HTTP
'''
RETURN = '''
obj:
description: ApplicationProfile (api/applicationprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
dns_service_profile=dict(type='dict',),
dos_rl_profile=dict(type='dict',),
http_profile=dict(type='dict',),
name=dict(type='str', required=True),
preserve_client_ip=dict(type='bool',),
tcp_app_profile=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'applicationprofile',
set([]))
if __name__ == '__main__':
main()
|
adityacs/ansible
|
lib/ansible/modules/network/avi/avi_applicationprofile.py
|
Python
|
gpl-3.0
| 6,304
|
[
"VisIt"
] |
8c257b0a99750df025e76afcaf43e5baede30b7b0e17b4d83dfd6511759c5ea5
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 12:33:23 2015
@author: nouamanelaanait
"""
import warnings, os
import numpy as np
import h5py
import multiprocess as mp
from skimage.morphology import dilation, disk
from scipy.ndimage import gaussian_filter
import cv2
class Corrector(object):
''' This is an Object used to contain a data set and has methods to do image correction
such as background subtraction, normalization, denoising, etc.
Basically, methods that manipulate the intensity values pixel by pixel.
'''
def __init__(self):
self.__init__
self.data = []
self.proc_data = []
def loadData(self, dataset):
''' This is a Method that loads h5 Dataset to be corrected.
input: h5 dataset
'''
if not isinstance(dataset, h5py.Dataset):
warnings.warn( 'Error: Data must be an h5 Dataset object' )
else:
self.data = dataset
def loadprocData(self, dataset):
''' This is a Method that loads processed data to be corrected.
input: h5 dataset or numpy ndarray
'''
self.proc_data = dataset
def clearData(self):
''' This is a Method to clear the data from the object.
'''
del self.data
self.data = []
def getData(self):
''' This is a Method that returns loaded h5 Dataset.
output: h5 dataset
'''
return self.data
def bkgSubtract(self, impRead, impDark, impFlat = None):
''' This is a Method to correct image for READ, DARK noise of a ccd camera.
Also normalizes with respect to FlatField if provided.
Input:
impRead: np.ndarray
impDark: np.ndarray
impFlat = None, flatfield Image
The size of the above images must match the raw image.
'''
dset = self.data
att = dset.attrs
exposure = att['seconds']
impList = []
if impFlat is None:
try:
for t, raw in zip(exposure, dset):
imp = raw - impRead - impDark*t
imp[imp<0] = 1
impList.append(imp)
corrstack = np.dstack([impList])
self.proc_data = corrstack
except ValueError:
warnings.warn('Error: Correction Files might not have the same size as the Image.')
else:
try:
for t, raw in zip(exposure, dset):
imp = (raw - impRead - impDark*t)/impFlat
imp[imp<0] = 1
impList.append(imp)
corrstack = np.dstack([impList])
self.proc_data = corrstack
except ValueError:
warnings.warn('Error: Correction Files might not have the same size as the Image.')
# self.proc_data = corrstack
return corrstack
def normalize(self, monitor = None, trans = 'Trans', time = 'seconds', calibration = 1.0, use_processed = False):
''' Normalize the counts in the image by filters, exposure/or as monitor (e.g ion chamber)
and converts counts to photons if calibration is provided.
Input:
calibration = float.
monitor = string for the monitor attribute.
use_processed = bool, if True uses the latest corrected data.
'''
if use_processed:
data = self.proc_data
else:
data = self.data
dset = self.data
att = dset.attrs
exposure = att[time]
transmission = att[trans]
impList = []
if monitor is None:
try:
for raw,t,trans in zip(data, exposure, transmission):
imp = calibration * raw / trans /t
impList.append(imp)
normstack = np.dstack([impList])
self.proc_data = normstack
except ValueError:
warnings.warn('Error: Couldnt broadcast seconds, trans and dset together')
else:
mon = att[monitor]
try:
for raw,trans,m in zip(data,transmission,mon):
imp = calibration * raw / trans / m
impList.append(imp)
normstack = np.dstack([impList])
self.proc_data = normstack
except ValueError:
warnings.warn('Error: Couldnt broadcast trans, monitor, and dset arrays together.')
return normstack
def flatField(self, processes , sigmaMorph = 100, radiusMorph = 25, winLP = 7, sigmaBlur = 100,
use_processed = False, method = 'morphology'):
''' Find illumination function of the data and divide out the image for flat-field correction.
Input:
processes = int, number of processors to use.
sigmaMorph = float, sigma of gaussian filter.
radiusMorph = int, radius of structuring element.
winLP = int, diameter of low-spatial frequency to cut out.
use_processed = bool, if True uses the latest corrected data.
method = 'morphology': A combination of morphological Filter (Dilation) and Gaussian (Blur) filter.
method = 'lowpass filter': gaussian lowpass filter applied directly to image.
Output:
stack of flat-field corrected images.
'''
if use_processed:
data = self.proc_data
else:
data = self.data
def __flattenbyMorpho(imp):
dilated = cv2.dilate(imp, disk(radiusMorph))
#dilated = dilation(np.log10(imp), selem= disk(radius))
illum= gaussian_filter(dilated, sigmaMorph)
proc = imp*1.0/illum
return proc
def __flattenbyLPF(imp):
#Fourier transform
FT = np.fft.fft2(imp)
FT_shift = np.fft.fftshift(FT)
#Cut-out low frequency in FT by gaussian LP filter
row, col = imp.shape
cen_row,cen_col = row/2 , col/2
arr = np.ones(imp.shape)
arr[cen_row-winLP/2:cen_row+winLP/2, cen_col-winLP/2:cen_col+winLP/2] = 0
sigma = winLP
LPfilt = gaussian_filter(arr, sigma/2, truncate = 5.)
LPfilt[LPfilt < 0.9999] = 0.
LPfilt = gaussian_filter(LPfilt, sigma, truncate = 5.)
LPfilt[cen_row,cen_col]=1
# Apply LP Filter and Inverse Fourier transform
FT_ishift = np.fft.ifftshift(FT_shift*LPfilt)
iFT = np.fft.ifft2(FT_ishift)
proc = np.abs(iFT)
return proc
# start pool of workers
print('launching %i kernels...'%(processes))
pool = mp.Pool(processes)
tasks = [(imp) for imp in data]
chunk = int(data.shape[0]/processes)
if method == 'morphology':
jobs = pool.imap(__flattenbyMorpho, tasks, chunksize = chunk)
elif method == 'lowpass filter':
jobs = pool.imap(__flattenbyLPF, tasks, chunksize = chunk)
# get images from different processes
results =[]
print('Extracting Flattened Images...')
try:
for j in jobs:
results.append(j)
except ValueError:
warnings.warn('Error: There appears to be a problem with the processing')
# pack all images into 3d array
flatstack = np.array([imp for imp in results])
# close the pool
print('Closing down the kernels... \n')
pool.close()
# self.proc_data = flatstack
return flatstack
|
nlaanait/pyxrim
|
pyxrim/process/corrections.py
|
Python
|
mit
| 7,949
|
[
"Gaussian"
] |
ed0cf79f0a0344f08138ccc76b355fe74fa69b9985501ec539573074d9d50ce3
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.securitycenter_v1beta1.services.security_center import (
SecurityCenterAsyncClient,
)
from google.cloud.securitycenter_v1beta1.services.security_center import (
SecurityCenterClient,
)
from google.cloud.securitycenter_v1beta1.services.security_center import pagers
from google.cloud.securitycenter_v1beta1.services.security_center import transports
from google.cloud.securitycenter_v1beta1.types import finding
from google.cloud.securitycenter_v1beta1.types import finding as gcs_finding
from google.cloud.securitycenter_v1beta1.types import organization_settings
from google.cloud.securitycenter_v1beta1.types import (
organization_settings as gcs_organization_settings,
)
from google.cloud.securitycenter_v1beta1.types import security_marks
from google.cloud.securitycenter_v1beta1.types import (
security_marks as gcs_security_marks,
)
from google.cloud.securitycenter_v1beta1.types import securitycenter_service
from google.cloud.securitycenter_v1beta1.types import source
from google.cloud.securitycenter_v1beta1.types import source as gcs_source
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SecurityCenterClient._get_default_mtls_endpoint(None) is None
assert (
SecurityCenterClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
SecurityCenterClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SecurityCenterClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SecurityCenterClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
SecurityCenterClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [SecurityCenterClient, SecurityCenterAsyncClient,]
)
def test_security_center_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "securitycenter.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.SecurityCenterGrpcTransport, "grpc"),
(transports.SecurityCenterGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_security_center_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [SecurityCenterClient, SecurityCenterAsyncClient,]
)
def test_security_center_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "securitycenter.googleapis.com:443"
def test_security_center_client_get_transport_class():
transport = SecurityCenterClient.get_transport_class()
available_transports = [
transports.SecurityCenterGrpcTransport,
]
assert transport in available_transports
transport = SecurityCenterClient.get_transport_class("grpc")
assert transport == transports.SecurityCenterGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc"),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
SecurityCenterClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterClient),
)
@mock.patch.object(
SecurityCenterAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterAsyncClient),
)
def test_security_center_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SecurityCenterClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SecurityCenterClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc", "true"),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc", "false"),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
SecurityCenterClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterClient),
)
@mock.patch.object(
SecurityCenterAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_security_center_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [SecurityCenterClient, SecurityCenterAsyncClient]
)
@mock.patch.object(
SecurityCenterClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterClient),
)
@mock.patch.object(
SecurityCenterAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterAsyncClient),
)
def test_security_center_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc"),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_security_center_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
SecurityCenterClient,
transports.SecurityCenterGrpcTransport,
"grpc",
grpc_helpers,
),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_security_center_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_security_center_client_client_options_from_dict():
with mock.patch(
"google.cloud.securitycenter_v1beta1.services.security_center.transports.SecurityCenterGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = SecurityCenterClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
SecurityCenterClient,
transports.SecurityCenterGrpcTransport,
"grpc",
grpc_helpers,
),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_security_center_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"securitycenter.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="securitycenter.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.CreateSourceRequest, dict,]
)
def test_create_source(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.create_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_create_source_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
client.create_source()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateSourceRequest()
@pytest.mark.asyncio
async def test_create_source_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.CreateSourceRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.create_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_source_async_from_dict():
await test_create_source_async(request_type=dict)
def test_create_source_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateSourceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
call.return_value = gcs_source.Source()
client.create_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_source_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateSourceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source())
await client.create_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_source_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_source(
parent="parent_value", source=gcs_source.Source(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].source
mock_val = gcs_source.Source(name="name_value")
assert arg == mock_val
def test_create_source_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_source(
securitycenter_service.CreateSourceRequest(),
parent="parent_value",
source=gcs_source.Source(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_source_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_source(
parent="parent_value", source=gcs_source.Source(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].source
mock_val = gcs_source.Source(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_source_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_source(
securitycenter_service.CreateSourceRequest(),
parent="parent_value",
source=gcs_source.Source(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.CreateFindingRequest, dict,]
)
def test_create_finding(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=gcs_finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
)
response = client.create_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateFindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == gcs_finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
def test_create_finding_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
client.create_finding()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateFindingRequest()
@pytest.mark.asyncio
async def test_create_finding_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.CreateFindingRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=gcs_finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
)
)
response = await client.create_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateFindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == gcs_finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
@pytest.mark.asyncio
async def test_create_finding_async_from_dict():
await test_create_finding_async(request_type=dict)
def test_create_finding_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateFindingRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
call.return_value = gcs_finding.Finding()
client.create_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_finding_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateFindingRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding())
await client.create_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_finding_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_finding(
parent="parent_value",
finding_id="finding_id_value",
finding=gcs_finding.Finding(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].finding_id
mock_val = "finding_id_value"
assert arg == mock_val
arg = args[0].finding
mock_val = gcs_finding.Finding(name="name_value")
assert arg == mock_val
def test_create_finding_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_finding(
securitycenter_service.CreateFindingRequest(),
parent="parent_value",
finding_id="finding_id_value",
finding=gcs_finding.Finding(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_finding_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_finding(
parent="parent_value",
finding_id="finding_id_value",
finding=gcs_finding.Finding(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].finding_id
mock_val = "finding_id_value"
assert arg == mock_val
arg = args[0].finding
mock_val = gcs_finding.Finding(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_finding_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_finding(
securitycenter_service.CreateFindingRequest(),
parent="parent_value",
finding_id="finding_id_value",
finding=gcs_finding.Finding(name="name_value"),
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,])
def test_get_iam_policy(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
client.get_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
@pytest.mark.asyncio
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async_from_dict():
await test_get_iam_policy_async(request_type=dict)
def test_get_iam_policy_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict_foreign():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_get_iam_policy_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_get_iam_policy_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GetOrganizationSettingsRequest, dict,]
)
def test_get_organization_settings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = organization_settings.OrganizationSettings(
name="name_value", enable_asset_discovery=True,
)
response = client.get_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetOrganizationSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, organization_settings.OrganizationSettings)
assert response.name == "name_value"
assert response.enable_asset_discovery is True
def test_get_organization_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
client.get_organization_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetOrganizationSettingsRequest()
@pytest.mark.asyncio
async def test_get_organization_settings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GetOrganizationSettingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
organization_settings.OrganizationSettings(
name="name_value", enable_asset_discovery=True,
)
)
response = await client.get_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetOrganizationSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, organization_settings.OrganizationSettings)
assert response.name == "name_value"
assert response.enable_asset_discovery is True
@pytest.mark.asyncio
async def test_get_organization_settings_async_from_dict():
await test_get_organization_settings_async(request_type=dict)
def test_get_organization_settings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetOrganizationSettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
call.return_value = organization_settings.OrganizationSettings()
client.get_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_organization_settings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetOrganizationSettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
organization_settings.OrganizationSettings()
)
await client.get_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_organization_settings_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = organization_settings.OrganizationSettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_organization_settings(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_organization_settings_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_organization_settings(
securitycenter_service.GetOrganizationSettingsRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_organization_settings_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = organization_settings.OrganizationSettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
organization_settings.OrganizationSettings()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_organization_settings(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_organization_settings_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_organization_settings(
securitycenter_service.GetOrganizationSettingsRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GetSourceRequest, dict,]
)
def test_get_source(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.get_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_get_source_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
client.get_source()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetSourceRequest()
@pytest.mark.asyncio
async def test_get_source_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GetSourceRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.get_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_source_async_from_dict():
await test_get_source_async(request_type=dict)
def test_get_source_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetSourceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
call.return_value = source.Source()
client.get_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_source_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetSourceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(source.Source())
await client.get_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_source_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = source.Source()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_source(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_source_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_source(
securitycenter_service.GetSourceRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_source_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = source.Source()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(source.Source())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_source(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_source_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_source(
securitycenter_service.GetSourceRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GroupAssetsRequest, dict,]
)
def test_group_assets(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.GroupAssetsResponse(
next_page_token="next_page_token_value",
)
response = client.group_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GroupAssetsPager)
assert response.next_page_token == "next_page_token_value"
def test_group_assets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
client.group_assets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupAssetsRequest()
@pytest.mark.asyncio
async def test_group_assets_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GroupAssetsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupAssetsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.group_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GroupAssetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_group_assets_async_from_dict():
await test_group_assets_async(request_type=dict)
def test_group_assets_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GroupAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
call.return_value = securitycenter_service.GroupAssetsResponse()
client.group_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_group_assets_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GroupAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupAssetsResponse()
)
await client.group_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_group_assets_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.group_assets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, securitycenter_service.GroupResult) for i in results)
def test_group_assets_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
pages = list(client.group_assets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_group_assets_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.group_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
async_pager = await client.group_assets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, securitycenter_service.GroupResult) for i in responses)
@pytest.mark.asyncio
async def test_group_assets_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.group_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.group_assets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GroupFindingsRequest, dict,]
)
def test_group_findings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.GroupFindingsResponse(
next_page_token="next_page_token_value",
)
response = client.group_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GroupFindingsPager)
assert response.next_page_token == "next_page_token_value"
def test_group_findings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
client.group_findings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupFindingsRequest()
@pytest.mark.asyncio
async def test_group_findings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GroupFindingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupFindingsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.group_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GroupFindingsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_group_findings_async_from_dict():
await test_group_findings_async(request_type=dict)
def test_group_findings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GroupFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
call.return_value = securitycenter_service.GroupFindingsResponse()
client.group_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_group_findings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GroupFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupFindingsResponse()
)
await client.group_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_group_findings_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.GroupFindingsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.group_findings(
parent="parent_value", group_by="group_by_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].group_by
mock_val = "group_by_value"
assert arg == mock_val
def test_group_findings_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.group_findings(
securitycenter_service.GroupFindingsRequest(),
parent="parent_value",
group_by="group_by_value",
)
@pytest.mark.asyncio
async def test_group_findings_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.GroupFindingsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupFindingsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.group_findings(
parent="parent_value", group_by="group_by_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].group_by
mock_val = "group_by_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_group_findings_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.group_findings(
securitycenter_service.GroupFindingsRequest(),
parent="parent_value",
group_by="group_by_value",
)
def test_group_findings_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.group_findings(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, securitycenter_service.GroupResult) for i in results)
def test_group_findings_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
pages = list(client.group_findings(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_group_findings_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.group_findings), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
async_pager = await client.group_findings(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, securitycenter_service.GroupResult) for i in responses)
@pytest.mark.asyncio
async def test_group_findings_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.group_findings), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.group_findings(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListAssetsRequest, dict,]
)
def test_list_assets(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListAssetsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
response = client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAssetsPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
def test_list_assets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
client.list_assets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListAssetsRequest()
@pytest.mark.asyncio
async def test_list_assets_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListAssetsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListAssetsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
)
response = await client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAssetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
@pytest.mark.asyncio
async def test_list_assets_async_from_dict():
await test_list_assets_async(request_type=dict)
def test_list_assets_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
call.return_value = securitycenter_service.ListAssetsResponse()
client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_assets_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListAssetsResponse()
)
await client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_assets_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[], next_page_token="def",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_assets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, securitycenter_service.ListAssetsResponse.ListAssetsResult)
for i in results
)
def test_list_assets_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[], next_page_token="def",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
),
RuntimeError,
)
pages = list(client.list_assets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_assets_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[], next_page_token="def",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
),
RuntimeError,
)
async_pager = await client.list_assets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, securitycenter_service.ListAssetsResponse.ListAssetsResult)
for i in responses
)
@pytest.mark.asyncio
async def test_list_assets_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[], next_page_token="def",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_assets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListFindingsRequest, dict,]
)
def test_list_findings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListFindingsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
response = client.list_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListFindingsPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
def test_list_findings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
client.list_findings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListFindingsRequest()
@pytest.mark.asyncio
async def test_list_findings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListFindingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListFindingsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
)
response = await client.list_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListFindingsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
@pytest.mark.asyncio
async def test_list_findings_async_from_dict():
await test_list_findings_async(request_type=dict)
def test_list_findings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
call.return_value = securitycenter_service.ListFindingsResponse()
client.list_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_findings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListFindingsResponse()
)
await client.list_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_findings_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(), finding.Finding(), finding.Finding(),],
next_page_token="abc",
),
securitycenter_service.ListFindingsResponse(
findings=[], next_page_token="def",
),
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(),], next_page_token="ghi",
),
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(), finding.Finding(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_findings(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, finding.Finding) for i in results)
def test_list_findings_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(), finding.Finding(), finding.Finding(),],
next_page_token="abc",
),
securitycenter_service.ListFindingsResponse(
findings=[], next_page_token="def",
),
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(),], next_page_token="ghi",
),
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(), finding.Finding(),],
),
RuntimeError,
)
pages = list(client.list_findings(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_findings_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_findings), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(), finding.Finding(), finding.Finding(),],
next_page_token="abc",
),
securitycenter_service.ListFindingsResponse(
findings=[], next_page_token="def",
),
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(),], next_page_token="ghi",
),
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(), finding.Finding(),],
),
RuntimeError,
)
async_pager = await client.list_findings(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, finding.Finding) for i in responses)
@pytest.mark.asyncio
async def test_list_findings_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_findings), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(), finding.Finding(), finding.Finding(),],
next_page_token="abc",
),
securitycenter_service.ListFindingsResponse(
findings=[], next_page_token="def",
),
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(),], next_page_token="ghi",
),
securitycenter_service.ListFindingsResponse(
findings=[finding.Finding(), finding.Finding(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_findings(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListSourcesRequest, dict,]
)
def test_list_sources(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListSourcesResponse(
next_page_token="next_page_token_value",
)
response = client.list_sources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListSourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSourcesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_sources_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
client.list_sources()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListSourcesRequest()
@pytest.mark.asyncio
async def test_list_sources_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListSourcesRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListSourcesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_sources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListSourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSourcesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_sources_async_from_dict():
await test_list_sources_async(request_type=dict)
def test_list_sources_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListSourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
call.return_value = securitycenter_service.ListSourcesResponse()
client.list_sources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_sources_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListSourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListSourcesResponse()
)
await client.list_sources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_sources_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListSourcesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_sources(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_sources_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_sources(
securitycenter_service.ListSourcesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_sources_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListSourcesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListSourcesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_sources(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_sources_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_sources(
securitycenter_service.ListSourcesRequest(), parent="parent_value",
)
def test_list_sources_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(), source.Source(),],
next_page_token="abc",
),
securitycenter_service.ListSourcesResponse(
sources=[], next_page_token="def",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(),], next_page_token="ghi",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_sources(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, source.Source) for i in results)
def test_list_sources_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(), source.Source(),],
next_page_token="abc",
),
securitycenter_service.ListSourcesResponse(
sources=[], next_page_token="def",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(),], next_page_token="ghi",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(),],
),
RuntimeError,
)
pages = list(client.list_sources(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_sources_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sources), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(), source.Source(),],
next_page_token="abc",
),
securitycenter_service.ListSourcesResponse(
sources=[], next_page_token="def",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(),], next_page_token="ghi",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(),],
),
RuntimeError,
)
async_pager = await client.list_sources(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, source.Source) for i in responses)
@pytest.mark.asyncio
async def test_list_sources_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sources), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(), source.Source(),],
next_page_token="abc",
),
securitycenter_service.ListSourcesResponse(
sources=[], next_page_token="def",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(),], next_page_token="ghi",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_sources(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.RunAssetDiscoveryRequest, dict,]
)
def test_run_asset_discovery(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.run_asset_discovery(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.RunAssetDiscoveryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_run_asset_discovery_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
client.run_asset_discovery()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.RunAssetDiscoveryRequest()
@pytest.mark.asyncio
async def test_run_asset_discovery_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.RunAssetDiscoveryRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.run_asset_discovery(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.RunAssetDiscoveryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_run_asset_discovery_async_from_dict():
await test_run_asset_discovery_async(request_type=dict)
def test_run_asset_discovery_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.RunAssetDiscoveryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.run_asset_discovery(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_run_asset_discovery_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.RunAssetDiscoveryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.run_asset_discovery(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_run_asset_discovery_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.run_asset_discovery(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_run_asset_discovery_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.run_asset_discovery(
securitycenter_service.RunAssetDiscoveryRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_run_asset_discovery_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.run_asset_discovery(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_run_asset_discovery_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.run_asset_discovery(
securitycenter_service.RunAssetDiscoveryRequest(), parent="parent_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.SetFindingStateRequest, dict,]
)
def test_set_finding_state(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
)
response = client.set_finding_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetFindingStateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
def test_set_finding_state_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
client.set_finding_state()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetFindingStateRequest()
@pytest.mark.asyncio
async def test_set_finding_state_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.SetFindingStateRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
)
)
response = await client.set_finding_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetFindingStateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
@pytest.mark.asyncio
async def test_set_finding_state_async_from_dict():
await test_set_finding_state_async(request_type=dict)
def test_set_finding_state_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.SetFindingStateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
call.return_value = finding.Finding()
client.set_finding_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_finding_state_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.SetFindingStateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding())
await client.set_finding_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_finding_state_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_finding_state(
name="name_value",
state=finding.Finding.State.ACTIVE,
start_time=timestamp_pb2.Timestamp(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].state
mock_val = finding.Finding.State.ACTIVE
assert arg == mock_val
assert TimestampRule().to_proto(args[0].start_time) == timestamp_pb2.Timestamp(
seconds=751
)
def test_set_finding_state_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_finding_state(
securitycenter_service.SetFindingStateRequest(),
name="name_value",
state=finding.Finding.State.ACTIVE,
start_time=timestamp_pb2.Timestamp(seconds=751),
)
@pytest.mark.asyncio
async def test_set_finding_state_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_finding_state(
name="name_value",
state=finding.Finding.State.ACTIVE,
start_time=timestamp_pb2.Timestamp(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].state
mock_val = finding.Finding.State.ACTIVE
assert arg == mock_val
assert TimestampRule().to_proto(args[0].start_time) == timestamp_pb2.Timestamp(
seconds=751
)
@pytest.mark.asyncio
async def test_set_finding_state_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_finding_state(
securitycenter_service.SetFindingStateRequest(),
name="name_value",
state=finding.Finding.State.ACTIVE,
start_time=timestamp_pb2.Timestamp(seconds=751),
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,])
def test_set_iam_policy(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
client.set_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
@pytest.mark.asyncio
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async_from_dict():
await test_set_iam_policy_async(request_type=dict)
def test_set_iam_policy_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict_foreign():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_set_iam_policy_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_set_iam_policy_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize(
"request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,]
)
def test_test_iam_permissions(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
client.test_iam_permissions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
@pytest.mark.asyncio
async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async_from_dict():
await test_test_iam_permissions_async(request_type=dict)
def test_test_iam_permissions_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict_foreign():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_test_iam_permissions_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
arg = args[0].permissions
mock_val = ["permissions_value"]
assert arg == mock_val
def test_test_iam_permissions_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
arg = args[0].permissions
mock_val = ["permissions_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateFindingRequest, dict,]
)
def test_update_finding(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=gcs_finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
)
response = client.update_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateFindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == gcs_finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
def test_update_finding_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
client.update_finding()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateFindingRequest()
@pytest.mark.asyncio
async def test_update_finding_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateFindingRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=gcs_finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
)
)
response = await client.update_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateFindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == gcs_finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
@pytest.mark.asyncio
async def test_update_finding_async_from_dict():
await test_update_finding_async(request_type=dict)
def test_update_finding_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateFindingRequest()
request.finding.name = "finding.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
call.return_value = gcs_finding.Finding()
client.update_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "finding.name=finding.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_finding_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateFindingRequest()
request.finding.name = "finding.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding())
await client.update_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "finding.name=finding.name/value",) in kw[
"metadata"
]
def test_update_finding_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_finding(finding=gcs_finding.Finding(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].finding
mock_val = gcs_finding.Finding(name="name_value")
assert arg == mock_val
def test_update_finding_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_finding(
securitycenter_service.UpdateFindingRequest(),
finding=gcs_finding.Finding(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_finding_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_finding(
finding=gcs_finding.Finding(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].finding
mock_val = gcs_finding.Finding(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_finding_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_finding(
securitycenter_service.UpdateFindingRequest(),
finding=gcs_finding.Finding(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateOrganizationSettingsRequest, dict,]
)
def test_update_organization_settings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_organization_settings.OrganizationSettings(
name="name_value", enable_asset_discovery=True,
)
response = client.update_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_organization_settings.OrganizationSettings)
assert response.name == "name_value"
assert response.enable_asset_discovery is True
def test_update_organization_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
client.update_organization_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest()
@pytest.mark.asyncio
async def test_update_organization_settings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateOrganizationSettingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_organization_settings.OrganizationSettings(
name="name_value", enable_asset_discovery=True,
)
)
response = await client.update_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_organization_settings.OrganizationSettings)
assert response.name == "name_value"
assert response.enable_asset_discovery is True
@pytest.mark.asyncio
async def test_update_organization_settings_async_from_dict():
await test_update_organization_settings_async(request_type=dict)
def test_update_organization_settings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateOrganizationSettingsRequest()
request.organization_settings.name = "organization_settings.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
call.return_value = gcs_organization_settings.OrganizationSettings()
client.update_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"organization_settings.name=organization_settings.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_organization_settings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateOrganizationSettingsRequest()
request.organization_settings.name = "organization_settings.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_organization_settings.OrganizationSettings()
)
await client.update_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"organization_settings.name=organization_settings.name/value",
) in kw["metadata"]
def test_update_organization_settings_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_organization_settings.OrganizationSettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_organization_settings(
organization_settings=gcs_organization_settings.OrganizationSettings(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].organization_settings
mock_val = gcs_organization_settings.OrganizationSettings(name="name_value")
assert arg == mock_val
def test_update_organization_settings_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_organization_settings(
securitycenter_service.UpdateOrganizationSettingsRequest(),
organization_settings=gcs_organization_settings.OrganizationSettings(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_update_organization_settings_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_organization_settings.OrganizationSettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_organization_settings.OrganizationSettings()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_organization_settings(
organization_settings=gcs_organization_settings.OrganizationSettings(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].organization_settings
mock_val = gcs_organization_settings.OrganizationSettings(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_organization_settings_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_organization_settings(
securitycenter_service.UpdateOrganizationSettingsRequest(),
organization_settings=gcs_organization_settings.OrganizationSettings(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateSourceRequest, dict,]
)
def test_update_source(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.update_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_update_source_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
client.update_source()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSourceRequest()
@pytest.mark.asyncio
async def test_update_source_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateSourceRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.update_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_source_async_from_dict():
await test_update_source_async(request_type=dict)
def test_update_source_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateSourceRequest()
request.source.name = "source.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
call.return_value = gcs_source.Source()
client.update_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "source.name=source.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_source_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateSourceRequest()
request.source.name = "source.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source())
await client.update_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "source.name=source.name/value",) in kw["metadata"]
def test_update_source_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_source(source=gcs_source.Source(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].source
mock_val = gcs_source.Source(name="name_value")
assert arg == mock_val
def test_update_source_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_source(
securitycenter_service.UpdateSourceRequest(),
source=gcs_source.Source(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_source_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_source(
source=gcs_source.Source(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].source
mock_val = gcs_source.Source(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_source_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_source(
securitycenter_service.UpdateSourceRequest(),
source=gcs_source.Source(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateSecurityMarksRequest, dict,]
)
def test_update_security_marks(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_security_marks.SecurityMarks(name="name_value",)
response = client.update_security_marks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSecurityMarksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_security_marks.SecurityMarks)
assert response.name == "name_value"
def test_update_security_marks_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
client.update_security_marks()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSecurityMarksRequest()
@pytest.mark.asyncio
async def test_update_security_marks_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateSecurityMarksRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_security_marks.SecurityMarks(name="name_value",)
)
response = await client.update_security_marks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSecurityMarksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_security_marks.SecurityMarks)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_update_security_marks_async_from_dict():
await test_update_security_marks_async(request_type=dict)
def test_update_security_marks_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateSecurityMarksRequest()
request.security_marks.name = "security_marks.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
call.return_value = gcs_security_marks.SecurityMarks()
client.update_security_marks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"security_marks.name=security_marks.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_security_marks_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateSecurityMarksRequest()
request.security_marks.name = "security_marks.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_security_marks.SecurityMarks()
)
await client.update_security_marks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"security_marks.name=security_marks.name/value",
) in kw["metadata"]
def test_update_security_marks_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_security_marks.SecurityMarks()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_security_marks(
security_marks=gcs_security_marks.SecurityMarks(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].security_marks
mock_val = gcs_security_marks.SecurityMarks(name="name_value")
assert arg == mock_val
def test_update_security_marks_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_security_marks(
securitycenter_service.UpdateSecurityMarksRequest(),
security_marks=gcs_security_marks.SecurityMarks(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_security_marks_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_security_marks.SecurityMarks()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_security_marks.SecurityMarks()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_security_marks(
security_marks=gcs_security_marks.SecurityMarks(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].security_marks
mock_val = gcs_security_marks.SecurityMarks(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_security_marks_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_security_marks(
securitycenter_service.UpdateSecurityMarksRequest(),
security_marks=gcs_security_marks.SecurityMarks(name="name_value"),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecurityCenterClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SecurityCenterClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SecurityCenterClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecurityCenterClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SecurityCenterClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SecurityCenterGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.SecurityCenterGrpcTransport,)
def test_security_center_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SecurityCenterTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_security_center_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.securitycenter_v1beta1.services.security_center.transports.SecurityCenterTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SecurityCenterTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_source",
"create_finding",
"get_iam_policy",
"get_organization_settings",
"get_source",
"group_assets",
"group_findings",
"list_assets",
"list_findings",
"list_sources",
"run_asset_discovery",
"set_finding_state",
"set_iam_policy",
"test_iam_permissions",
"update_finding",
"update_organization_settings",
"update_source",
"update_security_marks",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_security_center_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.securitycenter_v1beta1.services.security_center.transports.SecurityCenterTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecurityCenterTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_security_center_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.securitycenter_v1beta1.services.security_center.transports.SecurityCenterTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecurityCenterTransport()
adc.assert_called_once()
def test_security_center_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SecurityCenterClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_security_center_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SecurityCenterGrpcTransport, grpc_helpers),
(transports.SecurityCenterGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_security_center_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"securitycenter.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="securitycenter.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_security_center_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_security_center_host_no_port():
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="securitycenter.googleapis.com"
),
)
assert client.transport._host == "securitycenter.googleapis.com:443"
def test_security_center_host_with_port():
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="securitycenter.googleapis.com:8000"
),
)
assert client.transport._host == "securitycenter.googleapis.com:8000"
def test_security_center_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecurityCenterGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_security_center_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecurityCenterGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_security_center_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_security_center_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_security_center_grpc_lro_client():
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_security_center_grpc_lro_async_client():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_asset_path():
organization = "squid"
asset = "clam"
expected = "organizations/{organization}/assets/{asset}".format(
organization=organization, asset=asset,
)
actual = SecurityCenterClient.asset_path(organization, asset)
assert expected == actual
def test_parse_asset_path():
expected = {
"organization": "whelk",
"asset": "octopus",
}
path = SecurityCenterClient.asset_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_asset_path(path)
assert expected == actual
def test_finding_path():
organization = "oyster"
source = "nudibranch"
finding = "cuttlefish"
expected = "organizations/{organization}/sources/{source}/findings/{finding}".format(
organization=organization, source=source, finding=finding,
)
actual = SecurityCenterClient.finding_path(organization, source, finding)
assert expected == actual
def test_parse_finding_path():
expected = {
"organization": "mussel",
"source": "winkle",
"finding": "nautilus",
}
path = SecurityCenterClient.finding_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_finding_path(path)
assert expected == actual
def test_organization_settings_path():
organization = "scallop"
expected = "organizations/{organization}/organizationSettings".format(
organization=organization,
)
actual = SecurityCenterClient.organization_settings_path(organization)
assert expected == actual
def test_parse_organization_settings_path():
expected = {
"organization": "abalone",
}
path = SecurityCenterClient.organization_settings_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_organization_settings_path(path)
assert expected == actual
def test_security_marks_path():
organization = "squid"
asset = "clam"
expected = "organizations/{organization}/assets/{asset}/securityMarks".format(
organization=organization, asset=asset,
)
actual = SecurityCenterClient.security_marks_path(organization, asset)
assert expected == actual
def test_parse_security_marks_path():
expected = {
"organization": "whelk",
"asset": "octopus",
}
path = SecurityCenterClient.security_marks_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_security_marks_path(path)
assert expected == actual
def test_source_path():
organization = "oyster"
source = "nudibranch"
expected = "organizations/{organization}/sources/{source}".format(
organization=organization, source=source,
)
actual = SecurityCenterClient.source_path(organization, source)
assert expected == actual
def test_parse_source_path():
expected = {
"organization": "cuttlefish",
"source": "mussel",
}
path = SecurityCenterClient.source_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_source_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SecurityCenterClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = SecurityCenterClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = SecurityCenterClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = SecurityCenterClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = SecurityCenterClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = SecurityCenterClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = SecurityCenterClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = SecurityCenterClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SecurityCenterClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = SecurityCenterClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SecurityCenterTransport, "_prep_wrapped_messages"
) as prep:
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SecurityCenterTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SecurityCenterClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(SecurityCenterClient, transports.SecurityCenterGrpcTransport),
(SecurityCenterAsyncClient, transports.SecurityCenterGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-securitycenter
|
tests/unit/gapic/securitycenter_v1beta1/test_security_center.py
|
Python
|
apache-2.0
| 234,437
|
[
"Octopus"
] |
6e4d4fb81f9c985cf420e1278b8ae8651497fb07850fa7f18e8329f1ae1d2dae
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**********************************
**espresso.interaction.Tabulated**
**********************************
"""
# -*- coding: iso-8859-1 -*-
from espresso import pmi, infinity
from espresso.esutil import *
from espresso.interaction.Potential import *
from espresso.interaction.Interaction import *
from _espresso import interaction_Tabulated, \
interaction_VerletListTabulated, \
interaction_VerletListAdressTabulated, \
interaction_VerletListHadressTabulated, \
interaction_CellListTabulated, \
interaction_FixedPairListTabulated
#interaction_FixedTripleListTabulated
class TabulatedLocal(PotentialLocal, interaction_Tabulated):
'The (local) tabulated potential.'
def __init__(self, itype, filename, cutoff=infinity):
"""Initialize the local Tabulated object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_Tabulated, itype, filename, cutoff)
class VerletListAdressTabulatedLocal(InteractionLocal, interaction_VerletListAdressTabulated):
'The (local) tabulated interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressTabulated, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressTabulatedLocal(InteractionLocal, interaction_VerletListHadressTabulated):
'The (local) tabulated interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressTabulated, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListTabulatedLocal(InteractionLocal, interaction_VerletListTabulated):
'The (local) tabulated interaction using Verlet lists.'
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListTabulated, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class CellListTabulatedLocal(InteractionLocal, interaction_CellListTabulated):
'The (local) tabulated interaction using cell lists.'
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListTabulated, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListTabulatedLocal(InteractionLocal, interaction_FixedPairListTabulated):
'The (local) tabulated interaction using FixedPair lists.'
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListTabulated, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class Tabulated(Potential):
'The Tabulated potential.'
pmiproxydefs = dict(
cls = 'espresso.interaction.TabulatedLocal',
pmiproperty = ['itype', 'filename', 'cutoff']
)
class VerletListAdressTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListAdressTabulatedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListHadressTabulatedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListTabulatedLocal',
pmicall = ['setPotential','getPotential']
)
class CellListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.CellListTabulatedLocal',
pmicall = ['setPotential']
)
class FixedPairListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.FixedPairListTabulatedLocal',
pmicall = ['setPotential', 'setFixedPairList', 'getFixedPairList']
)
|
BackupTheBerlios/espressopp
|
src/interaction/Tabulated.py
|
Python
|
gpl-3.0
| 7,354
|
[
"ESPResSo"
] |
a4f08f6b3a27742a24b12123d0e4f833f3093321f8d859fe7504f025c012b487
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 Wintermute0110 <wintermute0110@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# Advanced MAME Launcher MAME specific stuff.
# --- AEL packages ---
from .constants import *
from .utils import *
from .misc import *
from .db import *
from .filters import *
from .mame_misc import *
# --- Kodi modules ---
import xbmcgui
# --- Python standard library ---
import binascii
import struct
import xml.etree.ElementTree as ET
import zipfile as z
# -------------------------------------------------------------------------------------------------
# Data structures
# -------------------------------------------------------------------------------------------------
# Substitute notable drivers with a proper name
# Drivers are located in https://github.com/mamedev/mame/blob/master/src/mame/drivers/<driver_name>.cpp
mame_driver_better_name_dic = {
# --- Atari ---
'atari_s1.cpp' : 'Atari Generation/System 1',
'atari_s2.cpp' : 'Atari Generation/System 2 and 3',
'atarifb.cpp' : 'Atari Football hardware',
'atarittl.cpp' : 'Atari / Kee Games Driver',
'asteroid.cpp' : 'Atari Asteroids hardware',
'atetris.cpp' : 'Atari Tetris hardware',
'avalnche.cpp' : 'Atari Avalanche hardware',
'bzone.cpp' : 'Atari Battlezone hardware',
'bwidow.cpp' : 'Atari Black Widow hardware',
'boxer.cpp' : 'Atari Boxer (prototype) driver',
'canyon.cpp' : 'Atari Canyon Bomber hardware',
'cball.cpp' : 'Atari Cannonball (prototype) driver',
'ccastles.cpp' : 'Atari Crystal Castles hardware',
'centiped.cpp' : 'Atari Centipede hardware',
'cloak.cpp' : 'Atari Cloak & Dagger hardware',
'destroyr.cpp' : 'Atari Destroyer driver',
'mhavoc.cpp' : 'Atari Major Havoc hardware',
'mgolf.cpp' : 'Atari Mini Golf (prototype) driver',
'pong.cpp' : 'Atari Pong hardware',
# --- Capcom ---
'1942.cpp' : 'Capcom 1942',
'1943.cpp' : 'Capcom 1943: The Battle of Midway',
'capcom.cpp' : 'Capcom A0015405',
'gng.cpp' : "Capcom Ghosts'n Goblins",
'cps1.cpp' : 'Capcom Play System 1',
'cps2.cpp' : 'Capcom Play System 2',
'cps3.cpp' : 'Capcom Play System 3',
# --- Konami ---
'88games.cpp' : 'Konami 88 Games',
'ajax.cpp' : 'Konami GX770',
'aliens.cpp' : 'Konami Aliens',
'asterix.cpp' : 'Konami Asterix',
'konamigv.cpp' : 'Konami GV System (PSX Hardware)',
'konblands.cpp' : 'Konami GX455 - Konami Badlands',
'konamigx.cpp' : 'Konami System GX',
'konamim2.cpp' : 'Konami M2 Hardware',
# --- Midway ---
'midtunit.cpp' : 'Midway T-unit system',
'midvunit.cpp' : 'Midway V-Unit games',
'midwunit.cpp' : 'Midway Wolf-unit system',
'midxunit.cpp' : 'Midway X-unit system',
'midyunit.cpp' : 'Williams/Midway Y/Z-unit system',
'midzeus.cpp' : 'Midway Zeus games',
# --- Namco ---
'galaxian.cpp' : 'Namco Galaxian-derived hardware',
'namcops2.cpp' : 'Namco System 246 / System 256 (Sony PS2 based)',
# --- SNK ---
'neodriv.hxx' : 'SNK NeoGeo AES',
'neogeo.cpp' : 'SNK NeoGeo MVS',
# --- Misc important drivers (important enough to have a fancy name!) ---
'seta.cpp' : 'Seta Hardware',
# --- SEGA ---
# Lesser known boards
'segajw.cpp' : 'SEGA GOLDEN POKER SERIES',
'segam1.cpp' : 'SEGA M1 hardware',
'segaufo.cpp' : 'SEGA UFO Catcher, Z80 type hardware',
# Boards listed in wikipedia
# Sega Z80 board is included in galaxian.cpp
'vicdual.cpp' : 'SEGA VIC Dual Game board',
'segag80r.cpp' : 'SEGA G-80 raster hardware',
'segag80v.cpp' : 'SEGA G-80 vector hardware',
'zaxxon.cpp' : 'SEGA Zaxxon hardware',
'segald.cpp' : 'SEGA LaserDisc Hardware',
'system1.cpp' : 'SEGA System1 / System 2',
'segac2.cpp' : 'SEGA System C (System 14)',
'segae.cpp' : 'SEGA System E',
'segas16a.cpp' : 'SEGA System 16A',
'segas16b.cpp' : 'SEGA System 16B',
'system16.cpp' : 'SEGA System 16 / 18 bootlegs',
'segas24.cpp' : 'SEGA System 24',
'segas18.cpp' : 'SEGA System 18',
'kyugo.cpp' : 'SEGA Kyugo Hardware',
'segahang.cpp' : 'SEGA Hang On hardware', # AKA Sega Space Harrier
'segaorun.cpp' : 'SEGA Out Run hardware',
'segaxbd.cpp' : 'SEGA X-board',
'segaybd.cpp' : 'SEGA Y-board',
'segas32.cpp' : 'SEGA System 32',
'model1.cpp' : 'SEGA Model 1',
'model2.cpp' : 'SEGA Model 2',
'model3.cpp' : 'SEGA Model 3',
'stv.cpp' : 'SEGA ST-V hardware',
'naomi.cpp' : 'SEGA Naomi / Naomi 2 / Atomiswave',
'segasp.cpp' : 'SEGA System SP (Spider)', # Naomi derived
'chihiro.cpp' : 'SEGA Chihiro (Xbox-based)',
'triforce.cpp' : 'SEGA Triforce Hardware',
'lindbergh.cpp' : 'SEGA Lindbergh',
# --- Taito ---
# Ordered alphabetically
'taito_b.cpp' : 'Taito B System',
'taito_f2.cpp' : 'Taito F2 System',
'taito_f3.cpp' : 'Taito F3 System',
'taito_h.cpp' : 'Taito H system',
'taito_l.cpp' : 'Taito L System',
'taito_o.cpp' : 'Taito O system (Gambling)',
'taito_x.cpp' : 'Taito X system',
'taito_z.cpp' : 'Taito Z System (twin 68K with optional Z80)',
'taitoair.cpp' : 'Taito Air System',
'taitogn.cpp' : 'Taito GNET Motherboard',
'taitojc.cpp' : 'Taito JC System',
'taitopjc.cpp' : 'Taito Power-JC System',
'taitosj.cpp' : 'Taito SJ system',
'taitottl.cpp' : 'Taito Discrete Hardware Games',
'taitotz.cpp' : 'Taito Type-Zero hardware',
'taitowlf.cpp' : 'Taito Wolf System',
# --- SONY ---
'zn.cpp' : 'Sony ZN1/ZN2 (Arcade PSX)',
}
# Some Software Lists don't follow the convention of adding the company name at the beginning.
# I will try to create pull requests to fix theses and if the PRs are not accepted then
# SL names will be changed using the data here.
# Develop a test script to check wheter this substitutsion are used or not.
SL_better_name_dic = {
'Amiga AGA disk images' : 'Commodore Amiga AGA disk images',
'Amiga CD-32 CD-ROMs' : 'Commodore Amiga CD-32 CD-ROMs',
'Amiga CDTV CD-ROMs' : 'Commodore Amiga CDTV CD-ROMs',
'Amiga ECS disk images' : 'Commodore Amiga ECS disk images',
'Amiga OCS disk images' : 'Commodore Amiga OCS disk images',
'CC-40 cartridges' : 'Texas Instruments CC-40 cartridges',
'CD-i CD-ROMs' : 'Philips/Sony CD-i CD-ROMs',
'COMX-35 diskettes' : 'COMX COMX-35 diskettes',
'EPSON PX-4 ROM capsules' : 'Epson PX-4 ROM capsules',
'EPSON PX-8 ROM capsules' : 'Epson PX-8 ROM capsules',
'IQ-151 cartridges' : 'ZPA Nový Bor IQ-151 cartridges',
'IQ-151 disk images' : 'ZPA Nový Bor IQ-151 disk images',
'Mac Harddisks' : 'Apple Mac Harddisks',
'Macintosh 400K/800K Disk images' : 'Apple Macintosh 400K/800K Disk images',
'Macintosh High Density Disk images' : 'Apple Macintosh High Density Disk images',
'MC-1502 disk images' : 'Elektronika MC-1502 disk images',
'MD-2 disk images' : 'Morrow Micro Decision MD-2 disk images',
'Mega CD (Euro) CD-ROMs' : 'Sega Mega CD (Euro) CD-ROMs',
'Mega CD (Jpn) CD-ROMs' : 'Sega Mega CD (Jpn) CD-ROMs',
'MZ-2000 cassettes' : 'Sharp MZ-2000 cassettes',
'MZ-2000 disk images' : 'Sharp MZ-2000 disk images',
'MZ-2500 disk images' : 'Sharp MZ-2500 disk images',
'Pippin CD-ROMs' : 'Apple/Bandai Pippin CD-ROMs',
'Pippin disk images' : 'Apple/Bandai Pippin disk images',
'SEGA Computer 3000 cartridges' : 'Sega Computer 3000 cartridges',
'SEGA Computer 3000 cassettes' : 'Sega Computer 3000 cassettes',
'Z88 ROM cartridges' : 'Cambridge Computer Z88 ROM cartridges',
'ZX80 cassettes' : 'Sinclair ZX80 cassettes',
'ZX81 cassettes' : 'Sinclair ZX81 cassettes',
'ZX Spectrum +3 disk images' : 'Sinclair ZX Spectrum +3 disk images',
'ZX Spectrum Beta Disc / TR-DOS disk images' : 'Sinclair ZX Spectrum Beta Disc / TR-DOS disk images',
}
#
# Numerical MAME version. Allows for comparisons like ver_mame >= MAME_VERSION_0190
# Support MAME versions higher than 0.53 August 12th 2001.
# See header of MAMEINFO.dat for a list of all MAME versions.
#
# M.mmm.Xbb
# | | | |-> Beta flag 0, 1, ..., 99
# | | |---> Release kind flag
# | | 5 for non-beta, non-alpha, non RC versions.
# | | 2 for RC versions
# | | 1 for beta versions
# | | 0 for alpha versions
# | |-----> Minor version 0, 1, ..., 999
# |---------> Major version 0, ..., infinity
#
# See https://retropie.org.uk/docs/MAME/
# See https://www.mamedev.org/oldrel.html
#
# Examples:
# '0.37b5' -> 37105 (mame4all-pi, lr-mame2000 released 27 Jul 2000)
# '0.37b16' -> 37116 (Last unconsistent MAME version, released 02 Jul 2001)
# '0.53' -> 53500 (MAME versioning is consistent from this release, released 12 Aug 2001)
# '0.78' -> 78500 (lr-mame2003, lr-mame2003-plus)
# '0.139' -> 139500 (lr-mame2010)
# '0.160' -> 160500 (lr-mame2015)
# '0.174' -> 174500 (lr-mame2016)
# '0.206' -> 206500
#
# mame_version_raw examples:
# a) '0.194 (mame0194)' from '<mame build="0.194 (mame0194)" debug="no" mameconfig="10">'
#
# re.search() returns a MatchObject https://docs.python.org/2/library/re.html#re.MatchObject
def mame_get_numerical_version(mame_version_str):
log_debug('mame_get_numerical_version() mame_version_str = "{}"'.format(mame_version_str))
mame_version_int = 0
# Search for old version scheme x.yyybzz
m_obj_old = re.search('^(\d+)\.(\d+)b(\d+)', mame_version_str)
# Search for modern, consistent versioning system x.yyy
m_obj_modern = re.search('^(\d+)\.(\d+)', mame_version_str)
if m_obj_old:
major = int(m_obj_old.group(1))
minor = int(m_obj_old.group(2))
beta = int(m_obj_old.group(3))
release_flag = 1
# log_debug('mame_get_numerical_version() major = {}'.format(major))
# log_debug('mame_get_numerical_version() minor = {}'.format(minor))
# log_debug('mame_get_numerical_version() beta = {}'.format(beta))
mame_version_int = major * 1000000 + minor * 1000 + release_flag * 100 + beta
elif m_obj_modern:
major = int(m_obj_modern.group(1))
minor = int(m_obj_modern.group(2))
release_flag = 5
# log_debug('mame_get_numerical_version() major = {}'.format(major))
# log_debug('mame_get_numerical_version() minor = {}'.format(minor))
mame_version_int = major * 1000000 + minor * 1000 + release_flag * 100
else:
t = 'MAME version "{}" cannot be parsed.'.format(mame_version_str)
log_error(t)
raise TypeError(t)
log_debug('mame_get_numerical_version() mame_version_int = {}'.format(mame_version_int))
return mame_version_int
# Returns a string like '0.224 (mame0224)'.
def mame_get_MAME_exe_version(cfg, mame_prog_FN):
(mame_dir, mame_exec) = os.path.split(mame_prog_FN.getPath())
log_info('mame_get_MAME_exe_version() mame_prog_FN "{}"'.format(mame_prog_FN.getPath()))
# log_info('mame_get_MAME_exe_version() mame_dir "{}"'.format(mame_dir))
# log_info('mame_get_MAME_exe_version() mame_exec "{}"'.format(mame_exec))
stdout_f = cfg.MAME_STDOUT_VER_PATH.getPath()
err_f = cfg.MAME_STDERR_VER_PATH.getPath()
with io.open(stdout_f, 'wb') as out, io.open(err_f, 'wb') as err:
p = subprocess.Popen([mame_prog_FN.getPath(), '-version'], stdout = out, stderr = err, cwd = mame_dir)
p.wait()
# Read MAME version.
lines = utils_load_file_to_slist(cfg.MAME_STDOUT_VER_PATH.getPath())
# log_debug('mame_get_MAME_exe_version() Number of lines {}'.format(len(lines)))
version_str = lines[0]
# version_str = ''
# for line in lines:
# m = re.search('^([0-9\.]+?) \(([a-z0-9]+?)\)$', line.strip())
# if m:
# version_str = m.group(1)
# break
# log_debug('mame_get_MAME_exe_version() Returning "{}"'.format(version_str))
return version_str
# Counts MAME machines in MAME XML file.
def mame_count_MAME_machines(XML_path_FN):
log_debug('mame_count_MAME_machines_modern() BEGIN...')
log_debug('XML "{}"'.format(XML_path_FN.getPath()))
num_machines_modern = 0
num_machines_legacy = 0
with io.open(XML_path_FN.getPath(), 'rt', encoding = 'utf-8') as f:
for line in f:
if line.find('<machine name=') > 0:
num_machines_modern += 1
continue
if line.find('<game name=') > 0:
num_machines_legacy += 1
continue
if num_machines_modern and num_machines_legacy:
log_error('num_machines_modern = {}'.format(num_machines_modern))
log_error('num_machines_legacy = {}'.format(num_machines_legacy))
log_error('Both cannot be > 0!')
raise TypeError
num_machines = num_machines_modern if num_machines_modern > num_machines_legacy else num_machines_legacy
return num_machines
# 1) Extracts MAME XML.
# 2) Counts number of MAME machines.
# 3) Gets MAME version from the XML file.
# 4) Creates MAME XML control file.
def mame_extract_MAME_XML(cfg, st_dic):
pDialog = KodiProgressDialog()
# Extract XML from MAME executable.
mame_prog_FN = FileName(cfg.settings['mame_prog'])
(mame_dir, mame_exec) = os.path.split(mame_prog_FN.getPath())
log_info('mame_extract_MAME_XML() mame_prog_FN "{}"'.format(mame_prog_FN.getPath()))
log_info('mame_extract_MAME_XML() Saving XML "{}"'.format(cfg.MAME_XML_PATH.getPath()))
log_info('mame_extract_MAME_XML() mame_dir "{}"'.format(mame_dir))
log_info('mame_extract_MAME_XML() mame_exec "{}"'.format(mame_exec))
pDialog.startProgress('Extracting MAME XML database. Progress bar is not accurate.')
XML_path_FN = cfg.MAME_XML_PATH
with io.open(XML_path_FN.getPath(), 'wb') as out, io.open(cfg.MAME_STDERR_PATH.getPath(), 'wb') as err:
p = subprocess.Popen([mame_prog_FN.getPath(), '-listxml'], stdout = out, stderr = err, cwd = mame_dir)
count = 0
while p.poll() is None:
time.sleep(1)
count += 1
pDialog.updateProgress(count)
pDialog.endProgress()
time_extracting = time.time()
# Count number of machines. Useful for later progress dialogs and statistics.
log_info('mame_extract_MAME_XML() Counting number of machines ...')
pDialog.startProgress('Counting number of MAME machines...')
total_machines = mame_count_MAME_machines(cfg.MAME_XML_PATH)
pDialog.endProgress()
log_info('mame_extract_MAME_XML() Found {} machines.'.format(total_machines))
# Get XML file stat info.
# See https://docs.python.org/3/library/os.html#os.stat_result
statinfo = os.stat(XML_path_FN.getPath())
# Get MAME version from the XML.
xml_f = io.open(XML_path_FN.getPath(), 'rt', encoding = 'utf-8')
xml_iter = ET.iterparse(xml_f, events = ("start", "end"))
event, root = next(xml_iter)
xml_f.close()
ver_mame_str = root.attrib['build']
ver_mame_int = mame_get_numerical_version(ver_mame_str)
# Create the MAME XML control file. Only change used fields.
XML_control_dic = db_new_MAME_XML_control_dic()
db_safe_edit(XML_control_dic, 't_XML_extraction', time_extracting)
db_safe_edit(XML_control_dic, 't_XML_preprocessing', time.time())
db_safe_edit(XML_control_dic, 'total_machines', total_machines)
db_safe_edit(XML_control_dic, 'st_size', statinfo.st_size)
db_safe_edit(XML_control_dic, 'st_mtime', statinfo.st_mtime)
db_safe_edit(XML_control_dic, 'ver_mame_int', ver_mame_int)
db_safe_edit(XML_control_dic, 'ver_mame_str', ver_mame_str)
utils_write_JSON_file(cfg.MAME_XML_CONTROL_PATH.getPath(), XML_control_dic, verbose = True)
# 1) Counts number of MAME machines
# 2) Creates MAME XML control file.
def mame_preprocess_RETRO_MAME2003PLUS(cfg, st_dic):
pDialog = KodiProgressDialog()
# In MAME 2003 Plus MAME XML is already extracted.
XML_path_FN = FileName(cfg.settings['xml_2003_path'])
# Count number of machines. Useful for later progress dialogs and statistics.
log_info('mame_process_RETRO_MAME2003PLUS() Counting number of machines ...')
pDialog.startProgress('Counting number of MAME machines...')
total_machines = mame_count_MAME_machines(XML_path_FN)
pDialog.endProgress()
log_info('mame_process_RETRO_MAME2003PLUS() Found {} machines.'.format(total_machines))
# Get XML file stat info.
# See https://docs.python.org/3/library/os.html#os.stat_result
statinfo = os.stat(XML_path_FN.getPath())
# Get MAME version from the XML (although we know is MAME 2003 Plus).
# In MAME 2003 Plus the MAME version is not in the XML file.
ver_mame_str = MAME2003PLUS_VERSION_RAW
ver_mame_int = mame_get_numerical_version(ver_mame_str)
# Create the MAME XML control file. Only change used fields.
XML_control_dic = db_new_MAME_XML_control_dic()
db_safe_edit(XML_control_dic, 't_XML_preprocessing', time.time())
db_safe_edit(XML_control_dic, 'total_machines', total_machines)
db_safe_edit(XML_control_dic, 'st_size', statinfo.st_size)
db_safe_edit(XML_control_dic, 'st_mtime', statinfo.st_mtime)
db_safe_edit(XML_control_dic, 'ver_mame_int', ver_mame_int)
db_safe_edit(XML_control_dic, 'ver_mame_str', ver_mame_str)
utils_write_JSON_file(cfg.MAME_2003_PLUS_XML_CONTROL_PATH.getPath(), XML_control_dic, verbose = True)
# After this function of code we have:
# 1) Valid and verified for existence MAME_XML_path.
# 2) A valid XML_control_dic and the XML control file is created and/or current.
#
# Returns tuple (MAME_XML_path [FileName object], XML_control_FN [FileName object])
def mame_init_MAME_XML(cfg, st_dic, force_rebuild = False):
log_info('mame_init_MAME_XML() Beginning extract/process of MAME.xml...')
if cfg.settings['op_mode'] == OP_MODE_VANILLA and force_rebuild:
log_info('Forcing rebuilding of Vanilla MAME XML.')
MAME_XML_path = cfg.MAME_XML_PATH
XML_control_FN = cfg.MAME_XML_CONTROL_PATH
# Extract, count number of machines and create XML control file.
mame_extract_MAME_XML(cfg, st_dic)
if st_dic['abort']: return
elif cfg.settings['op_mode'] == OP_MODE_VANILLA and not force_rebuild:
process_XML_flag = False
MAME_exe_path = FileName(cfg.settings['mame_prog'])
MAME_XML_path = cfg.MAME_XML_PATH
XML_control_FN = cfg.MAME_XML_CONTROL_PATH
# Check that MAME executable exists.
if not cfg.settings['mame_prog']:
log_info('Vanilla MAME executable path is not set. Aborting.')
kodi_set_error_status(st_dic, 'Vanilla MAME executable path is not set.')
return
if not MAME_exe_path.exists():
log_info('Vanilla MAME executable file not found. Aborting.')
kodi_set_error_status(st_dic, 'Vanilla MAME executable file not found.')
return
log_info('Vanilla MAME executable found.')
# Check that extracted MAME XML exists.
# In Vanilla MAME the XML file is extracted from the executable.
if MAME_XML_path.exists():
log_info('Vanilla MAME XML file found.')
# Check that the XML control file exists.
if XML_control_FN.exists():
# Open the XML control file and check if the current version of the MAME executable
# is the same as in the XML control file.
# If so reset everything, if not use the cached information in the XML control file.
log_info('Vanilla MAME XML control file found.')
XML_control_dic = utils_load_JSON_file(XML_control_FN.getPath())
mame_exe_version_str = mame_get_MAME_exe_version(cfg, MAME_exe_path)
log_debug('XML_control_dic["ver_mame_str"] "{}"'.format(XML_control_dic['ver_mame_str']))
log_debug('mame_exe_version_str "{}"'.format(mame_exe_version_str))
if mame_exe_version_str != XML_control_dic['ver_mame_str']:
log_info('Vanilla MAME version is different from the version in the XML control file. '
'Forcing new preprocessing.')
process_XML_flag = True
else:
log_info('XML control file up to date.')
process_XML_flag = False
else:
log_info('XML control file NOT found. Forcing XML preprocessing.')
process_XML_flag = True
else:
log_info('Vanilla MAME XML file NOT found. Forcing XML preprocessing.')
process_XML_flag = True
# Only process MAME XML if needed.
if process_XML_flag:
# Extract, count number of machines and create XML control file.
mame_extract_MAME_XML(cfg, st_dic)
if st_dic['abort']: return
else:
log_info('Reusing previosly preprocessed Vanilla MAME XML.')
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS and force_rebuild:
log_info('Forcing rebuilding of MAME 2003 Plus XML.')
MAME_XML_path = FileName(cfg.settings['xml_2003_path'])
XML_control_FN = cfg.MAME_2003_PLUS_XML_CONTROL_PATH
# Count number of machines and create XML control file.
mame_preprocess_RETRO_MAME2003PLUS(cfg, st_dic)
if st_dic['abort']: return
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS and not force_rebuild:
process_XML_flag = False
MAME_XML_path = FileName(cfg.settings['xml_2003_path'])
XML_control_FN = cfg.MAME_2003_PLUS_XML_CONTROL_PATH
# Check that MAME 2003 Plus XML exists.
if not cfg.settings['xml_2003_path']:
log_info('MAME 2003 Plus XML path is not set. Aborting.')
kodi_set_error_status(st_dic, 'MAME 2003 Plus XML path is not set.')
return
if not MAME_XML_path.exists():
log_info('MAME 2003 Plus XML file not found. Aborting.')
kodi_set_error_status(st_dic, 'MAME 2003 Plus XML file not found.')
return
log_info('MAME 2003 Plus XML found.')
# Check that the XML control file exists.
if XML_control_FN.exists():
# Open the XML control file and check if mtime of current file is older than
# the one stored in the XML control file.
# If so reset everything, if not use the cached information in the XML control file.
log_info('MAME 2003 XML control file found.')
XML_control_dic = utils_load_JSON_file(XML_control_FN.getPath())
statinfo = os.stat(MAME_XML_path.getPath())
log_debug('XML_control_dic["st_mtime"] "{}"'.format(XML_control_dic['st_mtime']))
log_debug('statinfo.st_mtime "{}"'.format(statinfo.st_mtime))
if statinfo.st_mtime > XML_control_dic['st_mtime']:
log_info('XML file is more recent than last preprocessing. Forcing new preprocessing.')
process_XML_flag = True
else:
log_info('XML control up to date.')
process_XML_flag = False
else:
log_info('XML control file not found. Forcing XML preprocessing.')
process_XML_flag = True
# Only process MAME XML if needed.
if process_XML_flag:
# Count number of machines and create XML control file.
mame_preprocess_RETRO_MAME2003PLUS(cfg, st_dic)
if st_dic['abort']: return
else:
log_info('Reusing previosly preprocessed MAME 2003 XML.')
else:
log_error('mame_build_MAME_main_database() Unknown op_mode "{}"'.format(cfg.settings['op_mode']))
kodi_set_error_status(st_dic, 'Unknown operation mode {}'.format(cfg.settings['op_mode']))
return
return MAME_XML_path, XML_control_FN
# -------------------------------------------------------------------------------------------------
# Loading of data files
# -------------------------------------------------------------------------------------------------
# Catver.ini is very special so it has a custom loader.
# It provides data for two catalogs: categories and version added. In other words, it
# has 2 folders defined in the INI file.
#
# --- Example -----------------------------------
# ;; Comment
# [special_folder_name or no mae]
# machine_name_1 = category_name_1
# machine_name_2 = category_name_2
# -----------------------------------------------
#
# Returns two dictionaries with struct similar a mame_load_INI_datfile_simple()
# catver_dic, veradded_dic
#
def mame_load_Catver_ini(filename):
__debug_do_list_categories = False
log_info('mame_load_Catver_ini() Parsing "{}"'.format(filename))
catver_dic = {
'version' : 'unknown',
'unique_categories' : True,
'single_category' : False,
'isValid' : False,
'data' : {},
'categories' : set(),
}
veradded_dic = {
'version' : 'unknown',
'unique_categories' : True,
'single_category' : False,
'isValid' : False,
'data' : {},
'categories' : set(),
}
# --- read_status FSM values ---
# 0 -> Looking for '[Category]' tag
# 1 -> Reading categories
# 2 -> Looking for '[VerAdded]' tag.
# 3 -> Reading version added
# 4 -> END
read_status = 0
try:
f = io.open(filename, 'rt', encoding = 'utf-8')
except IOError:
log_error('mame_load_Catver_ini() Exception IOError')
log_error('mame_load_Catver_ini() File "{}"'.format(filename))
return (catver_dic, veradded_dic)
for cat_line in f:
stripped_line = cat_line.strip()
if __debug_do_list_categories: log_debug('Line "' + stripped_line + '"')
if read_status == 0:
# >> Look for Catver version
m = re.search(r'^;; CatVer ([0-9\.]+) / ', stripped_line)
if m:
catver_dic['version'] = m.group(1)
veradded_dic['version'] = m.group(1)
m = re.search(r'^;; CATVER.ini ([0-9\.]+) / ', stripped_line)
if m:
catver_dic['version'] = m.group(1)
veradded_dic['version'] = m.group(1)
if stripped_line == '[Category]':
if __debug_do_list_categories: log_debug('Found [Category]')
read_status = 1
elif read_status == 1:
line_list = stripped_line.split("=")
if len(line_list) == 1:
# log_debug('mame_load_Catver_ini() Reached end of categories parsing.')
read_status = 2
else:
if __debug_do_list_categories: log_debug(line_list)
machine_name = line_list[0]
current_category = line_list[1]
catver_dic['categories'].add(current_category)
if machine_name in catver_dic['data']:
catver_dic['data'][machine_name].append(current_category)
else:
catver_dic['data'][machine_name] = [current_category]
elif read_status == 2:
if stripped_line == '[VerAdded]':
if __debug_do_list_categories: log_debug('Found [VerAdded]')
read_status = 3
elif read_status == 3:
line_list = stripped_line.split("=")
if len(line_list) == 1:
# log_debug('mame_load_Catver_ini() Reached end of veradded parsing.')
read_status = 4
else:
if __debug_do_list_categories: log_debug(line_list)
machine_name = line_list[0]
current_category = line_list[1]
veradded_dic['categories'].add(current_category)
if machine_name in veradded_dic['data']:
veradded_dic['data'][machine_name].append(current_category)
else:
veradded_dic['data'][machine_name] = [current_category]
elif read_status == 4:
log_debug('End parsing')
break
else:
raise CriticalError('Unknown read_status FSM value')
f.close()
catver_dic['single_category'] = True if len(catver_dic['categories']) == 1 else False
for m_name in sorted(catver_dic['data']):
if len(catver_dic['data'][m_name]) > 1:
catver_dic['unique_categories'] = False
break
catver_dic['single_category'] = True
veradded_dic['single_category'] = True if len(veradded_dic['categories']) == 1 else False
for m_name in sorted(veradded_dic['data']):
if len(veradded_dic['data'][m_name]) > 1:
veradded_dic['unique_categories'] = False
break
veradded_dic['single_category'] = True
# If categories are unique for each machine transform lists into strings
if catver_dic['unique_categories']:
for m_name in catver_dic['data']:
catver_dic['data'][m_name] = catver_dic['data'][m_name][0]
if veradded_dic['unique_categories']:
for m_name in veradded_dic['data']:
veradded_dic['data'][m_name] = veradded_dic['data'][m_name][0]
log_info('mame_load_Catver_ini() Catver Machines {:6d}'.format(len(catver_dic['data'])))
log_info('mame_load_Catver_ini() Catver Categories {:6d}'.format(len(catver_dic['categories'])))
log_info('mame_load_Catver_ini() Catver Version "{}"'.format(catver_dic['version']))
log_info('mame_load_Catver_ini() Catver unique_categories {}'.format(catver_dic['unique_categories']))
log_info('mame_load_Catver_ini() Catver single_category {}'.format(catver_dic['single_category']))
log_info('mame_load_Catver_ini() Veradded Machines {:6d}'.format(len(veradded_dic['data'])))
log_info('mame_load_Catver_ini() Veradded Categories {:6d}'.format(len(veradded_dic['categories'])))
log_info('mame_load_Catver_ini() Veradded Version "{}"'.format(veradded_dic['version']))
log_info('mame_load_Catver_ini() Veradded unique_categories {}'.format(veradded_dic['unique_categories']))
log_info('mame_load_Catver_ini() Veradded single_category {}'.format(veradded_dic['single_category']))
return (catver_dic, veradded_dic)
#
# nplayers.ini does not have [ROOT_FOLDER], only [NPlayers].
# nplayers.ini has an structure very similar to catver.ini, and it is also supported here.
# Returns a ini_dic with same structue as mame_load_INI_datfile_simple()
#
# NOTE nplayers.ini has defects like having repeated entries for some machines.
# Do not crash because of this! For example (in verrsion 0.194 04-feb-18)
# 1943=2P sim
# 1943=2P sim
#
def mame_load_nplayers_ini(filename):
__debug_do_list_categories = False
log_info('mame_load_nplayers_ini() Parsing "{}"'.format(filename))
ini_dic = {
'version' : 'unknown',
'unique_categories' : True,
'single_category' : False,
'isValid' : False,
'data' : {},
'categories' : set(),
}
# --- read_status FSM values ---
# 0 -> Looking for '[NPlayers]' tag
# 1 -> Reading categories
# 2 -> Categories finished. STOP
read_status = 0
try:
f = io.open(filename, 'rt', encoding = 'utf-8')
except IOError:
log_info('mame_load_nplayers_ini() (IOError) opening "{}"'.format(filename))
return ini_dic
for cat_line in f:
stripped_line = cat_line.strip()
if __debug_do_list_categories: log_debug('Line "' + stripped_line + '"')
if read_status == 0:
m = re.search(r'NPlayers ([0-9\.]+) / ', stripped_line)
if m: ini_dic['version'] = m.group(1)
if stripped_line == '[NPlayers]':
if __debug_do_list_categories: log_debug('Found [NPlayers]')
read_status = 1
elif read_status == 1:
line_list = stripped_line.split("=")
if len(line_list) == 1:
read_status = 2
continue
else:
machine_name, current_category = text_type(line_list[0]), text_type(line_list[1])
if __debug_do_list_categories: log_debug('"{}" / "{}"'.format(machine_name, current_category))
ini_dic['categories'].add(current_category)
if machine_name in ini_dic['data']:
# Force a single category to avoid nplayers.ini bugs.
pass
# ini_dic['data'][machine_name].add(current_category)
# log_debug('machine "{}"'.format(machine_name))
# log_debug('current_category "{}"'.format(current_category))
# log_debug('"{}"'.format(text_type(ini_dic['data'][machine_name])))
# raise ValueError('unique_categories False')
else:
ini_dic['data'][machine_name] = [current_category]
elif read_status == 2:
log_info('mame_load_nplayers_ini() Reached end of nplayers parsing.')
break
else:
raise ValueError('Unknown read_status FSM value')
f.close()
ini_dic['single_category'] = True if len(ini_dic['categories']) == 1 else False
# nplayers.ini has repeated machines, so checking for unique_cateogories is here.
for m_name in sorted(ini_dic['data']):
if len(ini_dic['data'][m_name]) > 1:
ini_dic['unique_categories'] = False
break
# If categories are unique for each machine transform lists into strings
if ini_dic['unique_categories']:
for m_name in ini_dic['data']:
ini_dic['data'][m_name] = ini_dic['data'][m_name][0]
log_info('mame_load_nplayers_ini() Machines {0:6d}'.format(len(ini_dic['data'])))
log_info('mame_load_nplayers_ini() Categories {0:6d}'.format(len(ini_dic['categories'])))
log_info('mame_load_nplayers_ini() Version "{}"'.format(ini_dic['version']))
log_info('mame_load_nplayers_ini() unique_categories {}'.format(ini_dic['unique_categories']))
log_info('mame_load_nplayers_ini() single_category {}'.format(ini_dic['single_category']))
# DEBUG: print machines with more than one category.
# for m_name in sorted(ini_dic['data']):
# if len(ini_dic['data'][m_name]) > 1:
# for cat_name in ini_dic['data'][m_name]:
# log_debug('machine {} nplayers {}'.format(m_name, cat_name))
return ini_dic
#
# Load mature.ini file.
# Returns a ini_dic similar to mame_load_INI_datfile_simple()
#
def mame_load_Mature_ini(filename):
# FSM statuses
FSM_HEADER = 0 # Looking for and process '[ROOT_FOLDER]' directory tag.
# Initial status.
FSM_FOLDER_NAME = 1 # Searching for [category_name] and/or adding machines.
log_info('mame_load_Mature_ini() Parsing "{}"'.format(filename))
ini_dic = {
'version' : 'unknown',
'unique_categories' : True,
'single_category' : False,
'isValid' : False,
'data' : {},
'categories' : set(),
}
slist = []
try:
f = io.open(filename, 'rt', encoding = 'utf-8')
for file_line in f:
stripped_line = file_line.strip()
if stripped_line == '': continue # Skip blanks
slist.append(stripped_line)
f.close()
except IOError:
log_info('mame_load_Mature_ini() (IOError) opening "{}"'.format(filename))
return ini_dic
fsm_status = FSM_HEADER
for stripped_line in slist:
if fsm_status == FSM_HEADER:
# Skip comments: lines starting with ';;'
# Look for version string in comments
if re.search(r'^;;', stripped_line):
# log_debug('mame_load_Mature_ini() Comment line "{}"'.format(stripped_line))
m = re.search(r';; (\w+)\.ini ([0-9\.]+) / ', stripped_line)
if m:
ini_dic['version'] = m.group(2)
continue
if stripped_line == '[ROOT_FOLDER]':
fsm_status = FSM_FOLDER_NAME
# Create default category
current_category = 'default'
ini_dic['categories'].add(current_category)
elif fsm_status == FSM_FOLDER_NAME:
machine_name = stripped_line
if machine_name in ini_dic['data']:
ini_dic['data'][machine_name].append(current_category)
else:
ini_dic['data'][machine_name] = [current_category]
else:
raise ValueError('Unknown FSM fsm_status {}'.format(fsm_status))
ini_dic['single_category'] = True if len(ini_dic['categories']) == 1 else False
for m_name in sorted(ini_dic['data']):
if len(ini_dic['data'][m_name]) > 1:
ini_dic['unique_categories'] = False
break
# If categories are unique for each machine transform lists into strings
if ini_dic['unique_categories']:
for m_name in ini_dic['data']:
ini_dic['data'][m_name] = ini_dic['data'][m_name][0]
log_info('mame_load_Mature_ini() Machines {0:6d}'.format(len(ini_dic['data'])))
log_info('mame_load_Mature_ini() Categories {0:6d}'.format(len(ini_dic['categories'])))
log_info('mame_load_Mature_ini() Version "{}"'.format(ini_dic['version']))
log_info('mame_load_Mature_ini() unique_categories {}'.format(ini_dic['unique_categories']))
log_info('mame_load_Mature_ini() single_category {}'.format(ini_dic['single_category']))
return ini_dic
#
# Generic MAME INI file loader.
# Supports Alltime.ini, Artwork.ini, bestgames.ini, Category.ini, catlist.ini,
# genre.ini and series.ini.
#
# --- Example -----------------------------------
# ;; Comment
# [FOLDER_SETTINGS]
# RootFolderIcon mame
# SubFolderIcon folder
#
# [ROOT_FOLDER]
#
# [category_name_1]
# machine_name_1
# machine_name_2
#
# [category_name_2]
# machine_name_1
# -----------------------------------------------
#
# Note that some INIs, for example Artwork.ini, may have the same machine on different
# categories. This must be supported in this function.
#
# ini_dic = {
# 'version' : string,
# 'unique_categories' : bool,
# 'single_category' : bool,
# 'data' : {
# 'machine_name' : { 'category_1', 'category_2', ... }
# }
# 'categories' : {
# 'category_1', 'category_2', ...
# }
# }
#
# categories is a set of (unique) categories. By definition of set, each category appears
# only once.
# unique_categories is True is each machine has a unique category, False otherwise.
# single_category is True if only one category is defined, for example in mature.ini.
#
def mame_load_INI_datfile_simple(filename):
# FSM statuses
FSM_HEADER = 0 # Looking for and process '[ROOT_FOLDER]' directory tag.
# Initial status.
FSM_FOLDER_NAME = 1 # Searching for [category_name] and/or adding machines.
# Read file and put it in a list of strings.
# Strings in this list are stripped.
log_info('mame_load_INI_datfile_simple() Parsing "{}"'.format(filename))
ini_dic = {
'version' : 'unknown',
'unique_categories' : True,
'single_category' : False,
'isValid' : False,
'data' : {},
'categories' : set(),
}
slist = []
try:
f = io.open(filename, 'rt', encoding = 'utf-8', errors = 'replace')
for file_line in f:
stripped_line = file_line.strip()
if stripped_line == '': continue # Skip blanks
slist.append(stripped_line)
f.close()
except IOError:
log_info('mame_load_INI_datfile_simple() (IOError) opening "{}"'.format(filename))
return ini_dic
# Compile regexes to increase performance => It is no necessary. According to the docs: The
# compiled versions of the most recent patterns passed to re.match(), re.search() or
# re.compile() are cached, so programs that use only a few regular expressions at a
# time needn’t worry about compiling regular expressions.
fsm_status = FSM_HEADER
for stripped_line in slist:
# log_debug('{}'.format(stripped_line))
if fsm_status == FSM_HEADER:
# log_debug('FSM_HEADER "{}"'.format(stripped_line))
# Skip comments: lines starting with ';;'
# Look for version string in comments
if re.search(r'^;;', stripped_line):
m = re.search(r';; (\w+)\.ini ([0-9\.]+) / ', stripped_line)
if m: ini_dic['version'] = m.group(2)
continue
if stripped_line.find('[ROOT_FOLDER]') >= 0:
fsm_status = FSM_FOLDER_NAME
elif fsm_status == FSM_FOLDER_NAME:
m = re.search(r'^\[(.*)\]', stripped_line)
if m:
current_category = text_type(m.group(1))
if current_category in ini_dic['categories']:
raise ValueError('Repeated category {}'.format(current_category))
ini_dic['categories'].add(current_category)
else:
machine_name = stripped_line
if machine_name in ini_dic['data']:
ini_dic['unique_categories'] = False
ini_dic['data'][machine_name].append(current_category)
else:
ini_dic['data'][machine_name] = [current_category]
else:
raise ValueError('Unknown FSM fsm_status {}'.format(fsm_status))
ini_dic['single_category'] = True if len(ini_dic['categories']) == 1 else False
for m_name in sorted(ini_dic['data']):
if len(ini_dic['data'][m_name]) > 1:
ini_dic['unique_categories'] = False
break
# If categories are unique for each machine transform lists into strings
if ini_dic['unique_categories']:
for m_name in ini_dic['data']:
ini_dic['data'][m_name] = ini_dic['data'][m_name][0]
log_info('mame_load_INI_datfile_simple() Machines {0:6d}'.format(len(ini_dic['data'])))
log_info('mame_load_INI_datfile_simple() Categories {0:6d}'.format(len(ini_dic['categories'])))
log_info('mame_load_INI_datfile_simple() Version "{}"'.format(ini_dic['version']))
log_info('mame_load_INI_datfile_simple() unique_categories {}'.format(ini_dic['unique_categories']))
log_info('mame_load_INI_datfile_simple() single_category {}'.format(ini_dic['single_category']))
return ini_dic
# --- BEGIN code in dev-parsers/test_parser_history_dat.py ----------------------------------------
# Loads History.dat. This function is deprecated in favour of the XML format.
#
# One description can be for several MAME machines:
# $info=99lstwar,99lstwara,99lstwarb,
# $bio
#
# One description can be for several SL items and several SL lists:
# $amigaocs_flop=alloallo,alloallo1,
# $amigaaga_flop=alloallo,alloallo1,
# $amiga_flop=alloallo,alloallo1,
# $bio
#
# key_in_history_dic is the first machine on the list on the first line.
#
# history_idx = {
# 'nes' : {
# 'name': string,
# 'machines' : {
# 'machine_name' : "beautiful_name|db_list_name|db_machine_name",
# '100mandk' : "beautiful_name|nes|100mandk",
# '89denku' : "beautiful_name|nes|89denku",
# },
# }
# 'mame' : {
# 'name' : string,
# 'machines': {
# '88games' : "beautiful_name|db_list_name|db_machine_name",
# 'flagrall' : "beautiful_name|db_list_name|db_machine_name",
# },
# }
# }
#
# history_dic = {
# 'nes' : {
# '100mandk' : string,
# '89denku' : string,
# },
# 'mame' : {
# '88games' : string,
# 'flagrall' : string,
# },
# }
def mame_load_History_DAT(filename):
log_info('mame_load_History_DAT() Parsing "{}"'.format(filename))
history_dic = {
'version' : 'Unknown',
'date' : 'Unknown',
'index' : {},
'data' : {},
}
__debug_function = False
line_number = 0
num_header_line = 0
# Due to syntax errors in History.dat m_data may have invalid data, for example
# exmpty strings as list_name and/or machine names.
# m_data = [
# (line_number, list_name, [machine1, machine2, ...]),
# ...
# ]
m_data = []
# Convenience variables.
history_idx = history_dic['index']
history_data = history_dic['data']
# --- read_status FSM values ---
# History.dat has some syntax errors, like empty machine names. To fix this, do
# the parsing on two stages: first read the raw data and the bio and then
# check if the data is OK before adding it to the index and the DB.
# 0 -> Looking for '$info=machine_name_1,machine_name_2,' or '$SL_name=item_1,item_2,'
# If '$bio' found go to 1.
# 1 -> Reading information. If '$end' found go to 2.
# 2 -> Add information to database if no errors. Then go to 0.
read_status = 0
try:
f = io.open(filename, 'rt', encoding = 'utf-8')
except IOError:
log_info('mame_load_History_DAT() (IOError) opening "{}"'.format(filename))
return history_dic
for file_line in f:
line_number += 1
line_uni = file_line.strip()
if __debug_function: log_debug('Line "{}"'.format(line_uni))
if read_status == 0:
# Skip comments: lines starting with '##'
# Look for version string in comments
if re.search(r'^##', line_uni):
m = re.search(r'## REVISION\: ([0-9\.]+)$', line_uni)
if m: history_dic['version'] = m.group(1) + ' DAT'
continue
if line_uni == '': continue
# Machine list line
# Parses lines like "$info=99lstwar,99lstwara,99lstwarb,"
# Parses lines like "$info=99lstwar,99lstwara,99lstwarb"
# History.dat has syntactic errors like "$dc=,".
# History.dat has syntactic errors like "$megadriv=".
m = re.search(r'^\$(.+?)=(.*?),?$', line_uni)
if m:
num_header_line += 1
list_name = m.group(1)
machine_name_raw = m.group(2)
# Remove trailing ',' to fix history.dat syntactic errors like
# "$snes_bspack=bsfami,,"
if len(machine_name_raw) > 1 and machine_name_raw[-1] == ',':
machine_name_raw = machine_name_raw[:-1]
# Transform some special list names
if list_name in {'info', 'info,megatech', 'info,stv'}: list_name = 'mame'
mname_list = machine_name_raw.split(',')
m_data.append([num_header_line, list_name, mname_list])
continue
if line_uni == '$bio':
read_status = 1
info_str_list = []
continue
# If we reach this point it's an error.
raise TypeError('Wrong header "{}" (line {:,})'.format(line_uni, line_number))
elif read_status == 1:
if line_uni == '$end':
# Generate biography text.
bio_str = '\n'.join(info_str_list)
bio_str = bio_str[1:] if bio_str[0] == '\n' else bio_str
bio_str = bio_str[:-1] if bio_str[-1] == '\n' else bio_str
bio_str = bio_str.replace('\n\t\t', '')
# Clean m_data of bad data due to History.dat syntax errors, for example
# empty machine names.
# clean_m_data = [
# (list_name, [machine_name_1, machine_name_2, ...] ),
# ...,
# ]
clean_m_data = []
for dtuple in m_data:
line_num, list_name, mname_list = dtuple
# If list_name is empty drop the full line
if not list_name: continue
# Clean empty machine names.
clean_mname_list = []
for machine_name in mname_list:
# Skip bad/wrong machine names.
if not machine_name: continue
if machine_name == ',': continue
clean_mname_list.append(machine_name)
clean_m_data.append((list_name, clean_mname_list))
# Reset FSM status
read_status = 2
num_header_line = 0
m_data = []
info_str_list = []
else:
info_str_list.append(line_uni)
elif read_status == 2:
# Go to state 0 of the FSM.
read_status = 0
# Ignore machine if no valid data at all.
if len(clean_m_data) == 0:
log_warning('On History.dat line {:,}'.format(line_number))
log_warning('clean_m_data is empty.')
log_warning('Ignoring entry in History.dat database')
continue
# Ignore if empty list name.
if not clean_m_data[0][0]:
log_warning('On History.dat line {:,}'.format(line_number))
log_warning('clean_m_data empty list name.')
log_warning('Ignoring entry in History.dat database')
continue
# Ignore if empty machine list.
if not clean_m_data[0][1]:
log_warning('On History.dat line {:,}'.format(line_number))
log_warning('Empty machine name list.')
log_warning('db_list_name "{}"'.format(clean_m_data[0][0]))
log_warning('Ignoring entry in History.dat database')
continue
if not clean_m_data[0][1][0]:
log_warning('On History.dat line {:,}'.format(line_number))
log_warning('Empty machine name first element.')
log_warning('db_list_name "{}"'.format(clean_m_data[0][0]))
log_warning('Ignoring entry in History.dat database')
continue
db_list_name = clean_m_data[0][0]
db_machine_name = clean_m_data[0][1][0]
# Add list and machine names to index database.
for dtuple in clean_m_data:
list_name, machine_name_list = dtuple
if list_name not in history_idx:
history_idx[list_name] = {'name' : list_name, 'machines' : {}}
for machine_name in machine_name_list:
m_str = misc_build_db_str_3(machine_name, db_list_name, db_machine_name)
history_idx[list_name]['machines'][machine_name] = m_str
# Add biography string to main database.
if db_list_name not in history_data: history_data[db_list_name] = {}
history_data[db_list_name][db_machine_name] = bio_str
else:
raise TypeError('Wrong read_status = {} (line {:,})'.format(read_status, line_number))
f.close()
log_info('mame_load_History_DAT() Version "{}"'.format(history_dic['version']))
log_info('mame_load_History_DAT() Rows in index {}'.format(len(history_dic['index'])))
log_info('mame_load_History_DAT() Rows in data {}'.format(len(history_dic['data'])))
return history_dic
# --- END code in dev-parsers/test_parser_history_dat.py ------------------------------------------
# --- BEGIN code in dev-parsers/test_parser_history_xml.py ----------------------------------------
# Loads History.xml, a new XML version of History.dat
#
# MAME machine:
# <entry>
# <systems>
# <system name="dino" />
# <system name="dinou" />
# </systems>
# <text />
# </entry>
#
# One description can be for several SL items and several SL lists:
# <entry>
# <software>
# <item list="snes" name="smw2jb" />
# <item list="snes" name="smw2ja" />
# <item list="snes" name="smw2j" />
# </software>
# <text />
# </entry>
#
# Example of a problematic entry:
# <entry>
# <systems>
# <system name="10yardj" />
# </systems>
# <software>
# <item list="vgmplay" name="10yard" />
# </software>
# <text />
# </entry>
#
# The key in the data dictionary is the first machine found on history.xml
#
# history_dic = {
# 'version' : '2.32', # string
# 'date' : '2021-05-28', # string
# 'index' : {
# 'nes' : {
# 'name': 'nes', # string, later changed with beautiful name
# 'machines' : {
# 'machine_name' : "beautiful_name|db_list_name|db_machine_name",
# '100mandk' : "beautiful_name|nes|100mandk",
# '89denku' : "beautiful_name|nes|89denku",
# },
# },
# 'mame' : {
# 'name' : string,
# 'machines': {
# '88games' : "beautiful_name|db_list_name|db_machine_name",
# 'flagrall' : "beautiful_name|db_list_name|db_machine_name",
# },
# },
# },
# 'data' = {
# 'nes' : {
# '100mandk' : string,
# '89denku' : string,
# },
# 'mame' : {
# '88games' : string,
# 'flagrall' : string,
# },
# }
# }
def mame_load_History_xml(filename):
log_info('mame_load_History_xml() Parsing "{}"'.format(filename))
history_dic = {
'version' : 'Unknown',
'date' : 'Unknown',
'index' : {},
'data' : {},
}
__debug_xml_parser = False
entry_counter = 0
# Convenience variables.
history_idx = history_dic['index']
history_data = history_dic['data']
xml_tree = utils_load_XML_to_ET(filename)
if not xml_tree: return history_dic
xml_root = xml_tree.getroot()
history_dic['version'] = xml_root.attrib['version'] + ' XML ' + xml_root.attrib['date']
history_dic['date'] = xml_root.attrib['date']
for root_el in xml_root:
if __debug_xml_parser: log_debug('Root child tag "{}"'.format(root_el.tag))
if root_el.tag != 'entry':
log_error('Unknown tag <{}>'.format(root_el.tag))
raise TypeError
entry_counter += 1
item_list = []
for entry_el in root_el:
if __debug_xml_parser: log_debug('Entry child tag "{}"'.format(entry_el.tag))
if entry_el.tag == 'software':
for software_el in entry_el:
if software_el.tag != 'item':
log_error('Unknown <software> child tag <{}>'.format(software_el.tag))
raise TypeError
item_list.append((software_el.attrib['list'], software_el.attrib['name']))
elif entry_el.tag == 'systems':
for system_el in entry_el:
if system_el.tag != 'system':
log_error('Unknown <systems> child tag <{}>'.format(software_el.tag))
raise TypeError
item_list.append(('mame', software_el.attrib['name']))
elif entry_el.tag == 'text':
# Generate biography text.
bio_str = entry_el.text
bio_str = bio_str[1:] if bio_str[0] == '\n' else bio_str
bio_str = bio_str[:-1] if bio_str[-1] == '\n' else bio_str
bio_str = bio_str.replace('\n\t\t', '')
# Add list and machine names to index database.
if len(item_list) < 1:
log_warning('Empty item_list in entry_counter = {}'.format(entry_counter))
continue
db_list_name = item_list[0][0]
db_machine_name = item_list[0][1]
for list_name, machine_name in item_list:
m_str = misc_build_db_str_3(machine_name, db_list_name, db_machine_name)
try:
history_idx[list_name]['machines'][machine_name] = m_str
except:
history_idx[list_name] = {'name' : list_name, 'machines' : {}}
history_idx[list_name]['machines'][machine_name] = m_str
# Add biography string to main database.
try:
history_data[db_list_name][db_machine_name] = bio_str
except:
history_data[db_list_name] = {}
history_data[db_list_name][db_machine_name] = bio_str
else:
log_error('Unknown tag <{}>'.format(root_el.tag))
raise TypeError
if __debug_xml_parser and entry_counter > 100: break
log_info('mame_load_History_xml() Version "{}"'.format(history_dic['version']))
log_info('mame_load_History_xml() Date "{}"'.format(history_dic['date']))
log_info('mame_load_History_xml() Rows in index {}'.format(len(history_dic['index'])))
log_info('mame_load_History_xml() Rows in data {}'.format(len(history_dic['data'])))
return history_dic
# --- END code in dev-parsers/test_parser_history_xml.py ------------------------------------------
# --- BEGIN code in dev-parsers/test_parser_mameinfo_dat.py ---------------------------------------
# mameinfo.dat has information for both MAME machines and MAME drivers.
#
# idx_dic = {
# 'mame' : {
# '88games' : 'beautiful_name',
# 'flagrall' : 'beautiful_name',
# },
# 'drv' : {
# '88games.cpp' : 'beautiful_name'],
# 'flagrall.cpp' : 'beautiful_name'],
# }
# }
# data_dic = {
# 'mame' : {
# '88games' : string,
# 'flagrall' : string,
# },
# 'drv' : {
# '1942.cpp' : string,
# '1943.cpp' : string,
# }
# }
def mame_load_MameInfo_DAT(filename):
log_info('mame_load_MameInfo_DAT() Parsing "{}"'.format(filename))
ret_dic = {
'version' : 'Unknown',
'index' : {
'mame' : {},
'drv' : {},
},
'data' : {},
}
__debug_function = False
line_counter = 0
# --- read_status FSM values ---
# 0 -> Looking for '$(xxxx)=(machine_name)'
# 1 -> Looking for $bio
# 2 -> Reading information. If '$end' found go to 0.
# 3 -> Ignoring information. If '$end' found go to 0.
read_status = 0
try:
f = io.open(filename, 'rt', encoding = 'utf-8')
except IOError:
log_info('mame_load_MameInfo_DAT() (IOError) opening "{}"'.format(filename))
return ret_dic
for file_line in f:
line_counter += 1
line_uni = file_line.strip()
# if __debug_function: log_debug('Line "{}"'.format(line_uni))
if read_status == 0:
# Skip comments: lines starting with '#'
# Look for version string in comments
if re.search(r'^#', line_uni):
m = re.search(r'# MAMEINFO.DAT v([0-9\.]+)', line_uni)
if m: ret_dic['version'] = m.group(1)
continue
if line_uni == '': continue
# New machine or driver information
m = re.search(r'^\$info=(.+?)$', line_uni)
if m:
machine_name = m.group(1)
if __debug_function: log_debug('Machine "{}"'.format(machine_name))
read_status = 1
elif read_status == 1:
if __debug_function: log_debug('Second line "{}"'.format(line_uni))
if line_uni == '$mame':
read_status = 2
info_str_list = []
list_name = 'mame'
ret_dic['index'][list_name][machine_name] = machine_name
elif line_uni == '$drv':
read_status = 2
info_str_list = []
list_name = 'drv'
ret_dic['index'][list_name][machine_name] = machine_name
# Ignore empty lines between "$info=xxxxx" and "$mame" or "$drv"
elif line_uni == '':
continue
else:
raise TypeError('Wrong second line = "{}" (line {:,})'.format(line_uni, line_counter))
elif read_status == 2:
if line_uni == '$end':
if list_name not in ret_dic['data']: ret_dic['data'][list_name] = {}
ret_dic['data'][list_name][machine_name] = '\n'.join(info_str_list).strip()
read_status = 0
else:
info_str_list.append(line_uni)
else:
raise TypeError('Wrong read_status = {} (line {:,})'.format(read_status, line_counter))
f.close()
log_info('mame_load_MameInfo_DAT() Version "{}"'.format(ret_dic['version']))
log_info('mame_load_MameInfo_DAT() Rows in index {}'.format(len(ret_dic['index'])))
log_info('mame_load_MameInfo_DAT() Rows in data {}'.format(len(ret_dic['data'])))
return ret_dic
# --- END code in dev-parsers/test_parser_mameinfo_dat.py -----------------------------------------
# NOTE set objects are not JSON-serializable. Use lists and transform lists to sets if
# necessary after loading the JSON file.
#
# idx_dic = {
# '88games', 'beautiful_name',
# 'flagrall', 'beautiful_name',
# }
# data_dic = {
# '88games' : 'string',
# 'flagrall' : 'string',
# }
def mame_load_GameInit_DAT(filename):
log_info('mame_load_GameInit_DAT() Parsing "{}"'.format(filename))
ret_dic = {
'version' : 'Unknown',
'index' : {},
'data' : {},
}
__debug_function = False
# --- read_status FSM values ---
# 0 -> Looking for '$info=(machine_name)'
# 1 -> Looking for $mame
# 2 -> Reading information. If '$end' found go to 0.
# 3 -> Ignoring information. If '$end' found go to 0.
read_status = 0
try:
f = io.open(filename, 'rt', encoding = 'utf-8')
except IOError:
log_info('mame_load_GameInit_DAT() (IOError) opening "{}"'.format(filename))
return ret_dic
for file_line in f:
line_uni = file_line.strip()
if __debug_function: log_debug('read_status {} | Line "{}"'.format(read_status, line_uni))
# Note that Gameinit.dat may have a BOM 0xEF,0xBB,0xBF
# See https://en.wikipedia.org/wiki/Byte_order_mark
# Remove BOM if present.
if line_uni and line_uni[0] == '\ufeff': line_uni = line_uni[1:]
if read_status == 0:
# Skip comments: lines starting with '#'
# Look for version string in comments
if re.search(r'^#', line_uni):
if __debug_function: log_debug('Comment | "{}"'.format(line_uni))
m = re.search(r'# MAME GAMEINIT\.DAT v([0-9\.]+) ', line_uni)
if m: ret_dic['version'] = m.group(1)
continue
if line_uni == '': continue
# New machine or driver information
m = re.search(r'^\$info=(.+?)$', line_uni)
if m:
machine_name = m.group(1)
if __debug_function: log_debug('Machine "{}"'.format(machine_name))
ret_dic['index'][machine_name] = machine_name
read_status = 1
elif read_status == 1:
if __debug_function: log_debug('Second line "{}"'.format(line_uni))
if line_uni == '$mame':
read_status = 2
info_str_list = []
else:
raise TypeError('Wrong second line = "{}"'.format(line_uni))
elif read_status == 2:
if line_uni == '$end':
ret_dic['data'][machine_name] = '\n'.join(info_str_list)
info_str_list = []
read_status = 0
else:
info_str_list.append(line_uni)
else:
raise TypeError('Wrong read_status = {}'.format(read_status))
f.close()
log_info('mame_load_GameInit_DAT() Version "{}"'.format(ret_dic['version']))
log_info('mame_load_GameInit_DAT() Rows in index {}'.format(len(ret_dic['index'])))
log_info('mame_load_GameInit_DAT() Rows in data {}'.format(len(ret_dic['data'])))
return ret_dic
# NOTE set objects are not JSON-serializable. Use lists and transform lists to sets if
# necessary after loading the JSON file.
#
# idx_dic = {
# '88games', 'beautiful_name',
# 'flagrall', 'beautiful_name',
# }
# data_dic = {
# '88games' : 'string',
# 'flagrall' : 'string',
# }
def mame_load_Command_DAT(filename):
log_info('mame_load_Command_DAT() Parsing "{}"'.format(filename))
ret_dic = {
'version' : 'Unknown',
'index' : {},
'data' : {},
}
# Temporal storage.
idx_dic = {}
data_dic = {}
__debug_function = False
# --- read_status FSM values ---
# 0 -> Looking for '$info=(machine_name)'
# 1 -> Looking for $cmd
# 2 -> Reading information. If '$end' found go to 0.
read_status = 0
try:
f = io.open(filename, 'rt', encoding = 'utf-8')
except IOError:
log_info('mame_load_Command_DAT() (IOError) opening "{}"'.format(filename))
return ret_dic
for file_line in f:
line_uni = file_line.strip()
# if __debug_function: log_debug('Line "{}"'.format(line_uni))
if read_status == 0:
# Skip comments: lines starting with '#'
# Look for version string in comments
if re.search(r'^#', line_uni):
m = re.search(r'# Command List-[\w]+[\s]+([0-9\.]+) #', line_uni)
if m: ret_dic['version'] = m.group(1)
continue
if line_uni == '': continue
# New machine or driver information
m = re.search(r'^\$info=(.+?)$', line_uni)
if m:
machine_name = m.group(1)
if __debug_function: log_debug('Machine "{}"'.format(machine_name))
idx_dic[machine_name] = machine_name
read_status = 1
elif read_status == 1:
if __debug_function: log_debug('Second line "{}"'.format(line_uni))
if line_uni == '$cmd':
read_status = 2
info_str_list = []
else:
raise TypeError('Wrong second line = "{}"'.format(line_uni))
elif read_status == 2:
if line_uni == '$end':
data_dic[machine_name] = '\n'.join(info_str_list)
info_str_list = []
read_status = 0
else:
info_str_list.append(line_uni)
else:
raise TypeError('Wrong read_status = {}'.format(read_status))
f.close()
log_info('mame_load_Command_DAT() Version "{}"'.format(ret_dic['version']))
log_info('mame_load_Command_DAT() Rows in idx_dic {}'.format(len(idx_dic)))
log_info('mame_load_Command_DAT() Rows in data_dic {}'.format(len(data_dic)))
# Many machines share the same entry. Expand the database.
for original_name in idx_dic:
for expanded_name in original_name.split(','):
# Skip empty strings
if not expanded_name: continue
expanded_name = expanded_name.strip()
ret_dic['index'][expanded_name] = expanded_name
ret_dic['data'][expanded_name] = data_dic[original_name]
log_info('mame_load_Command_DAT() Entries in proper index {}'.format(len(ret_dic['index'])))
log_info('mame_load_Command_DAT() Entries in proper data {}'.format(len(ret_dic['data'])))
return ret_dic
# -------------------------------------------------------------------------------------------------
# DAT export
# -------------------------------------------------------------------------------------------------
#
# Writes a XML text tag line, indented 2 spaces by default.
# Both tag_name and tag_text must be Unicode strings.
# Returns an Unicode string.
#
def XML_t(tag_name, tag_text = '', num_spaces = 4):
if tag_text:
tag_text = text_escape_XML(tag_text)
line = '{}<{}>{}</{}>'.format(' ' * num_spaces, tag_name, tag_text, tag_name)
else:
# Empty tag
line = '{}<{} />'.format(' ' * num_spaces, tag_name)
return line
# Export a MAME information file in Billyc999 XML format to use with RCB.
# https://forum.kodi.tv/showthread.php?tid=70115&pid=2949624#pid2949624
# https://github.com/billyc999/Game-database-info
def mame_write_MAME_ROM_Billyc999_XML(cfg, out_dir_FN, db_dic):
log_debug('mame_write_MAME_ROM_Billyc999_XML() BEGIN...')
control_dic = db_dic['control_dic']
# Get output filename
# DAT filename: AML 0.xxx ROMs (merged|split|non-merged|fully non-merged).xml
mame_version_str = control_dic['ver_mame_str']
log_info('MAME version "{}"'.format(mame_version_str))
DAT_basename_str = 'AML MAME {} Billyc999.xml'.format(mame_version_str)
DAT_FN = out_dir_FN.pjoin(DAT_basename_str)
log_info('XML "{}"'.format(DAT_FN.getPath()))
# XML file header.
sl = []
sl.append('<?xml version="1.0" encoding="UTF-8"?>')
sl.append('<menu>')
sl.append(' <header>')
sl.append(XML_t('listname', 'Exported by Advanced MAME Launcher'))
sl.append(XML_t('lastlistupdate', misc_time_to_str(time.time())))
sl.append(XML_t('listversion', '{}'.format(mame_version_str)))
sl.append(XML_t('exporterversion', 'MAME {}'.format(mame_version_str)))
sl.append(' </header>')
# Traverse ROMs and write DAT.
machine_counter = 0
pDialog = KodiProgressDialog()
pDialog.startProgress('Creating MAME Billyc999 XML...', len(db_dic['renderdb']))
for m_name in sorted(db_dic['renderdb']):
render = db_dic['renderdb'][m_name]
assets = db_dic['assetdb'][m_name]
sl.append(' <game name="{}">'.format(m_name))
sl.append(XML_t('description', render['description']))
sl.append(XML_t('year', render['year']))
sl.append(XML_t('rating', 'ESRB - E (Everyone)'))
sl.append(XML_t('manufacturer', render['manufacturer']))
sl.append(XML_t('dev'))
sl.append(XML_t('genre', render['genre']))
sl.append(XML_t('score'))
sl.append(XML_t('player', render['nplayers']))
sl.append(XML_t('story', assets['plot']))
sl.append(XML_t('enabled', 'Yes'))
sl.append(XML_t('crc'))
sl.append(XML_t('cloneof', render['cloneof']))
sl.append(' </game>')
machine_counter += 1
pDialog.updateProgress(machine_counter)
sl.append('</menu>')
pDialog.endProgress()
# Open output file name.
pDialog.startProgress('Writing MAME Billyc999 XML...')
utils_write_slist_to_file(DAT_FN.getPath(), sl)
pDialog.endProgress()
#
# Only valid ROMs in DAT file.
#
def mame_write_MAME_ROM_XML_DAT(cfg, out_dir_FN, db_dic):
log_debug('mame_write_MAME_ROM_XML_DAT() BEGIN...')
control_dic = db_dic['control_dic']
machines = db_dic['machines']
render = db_dic['renderdb']
audit_roms = db_dic['audit_roms']
roms_sha1_dic = db_dic['roms_sha1_dic']
# Get output filename
# DAT filename: AML 0.xxx ROMs (merged|split|non-merged|fully non-merged).xml
mame_version_str = control_dic['ver_mame_str']
rom_set = ['MERGED', 'SPLIT', 'NONMERGED', 'FULLYNONMERGED'][cfg.settings['mame_rom_set']]
rom_set_str = ['Merged', 'Split', 'Non-merged', 'Fully Non-merged'][cfg.settings['mame_rom_set']]
log_info('MAME version "{}"'.format(mame_version_str))
log_info('ROM set is "{}"'.format(rom_set_str))
DAT_basename_str = 'AML MAME {} ROMs ({}).xml'.format(mame_version_str, rom_set_str)
DAT_FN = out_dir_FN.pjoin(DAT_basename_str)
log_info('XML "{}"'.format(DAT_FN.getPath()))
# XML file header.
slist = []
slist.append('<?xml version="1.0" encoding="UTF-8"?>')
slist.append('<!DOCTYPE datafile PUBLIC "{}" "{}">'.format(
'-//Logiqx//DTD ROM Management Datafile//EN', 'http://www.logiqx.com/Dats/datafile.dtd'))
slist.append('<datafile>')
desc_str = 'AML MAME {} ROMs {} set'.format(mame_version_str, rom_set_str)
slist.append('<header>')
slist.append(XML_t('name', desc_str))
slist.append(XML_t('description', desc_str))
slist.append(XML_t('version', '{}'.format(mame_version_str)))
slist.append(XML_t('date', misc_time_to_str(time.time())))
slist.append(XML_t('author', 'Exported by Advanced MAME Launcher'))
slist.append('</header>')
# Traverse ROMs and write DAT.
pDialog = KodiProgressDialog()
pDialog.startProgress('Creating MAME ROMs XML DAT...', len(audit_roms))
for m_name in sorted(audit_roms):
pDialog.updateProgressInc()
# If machine has no ROMs then skip it
rom_list, actual_rom_list, num_ROMs = audit_roms[m_name], [], 0
for rom in rom_list:
# Skip CHDs and samples
if rom['type'] == ROM_TYPE_ERROR: raise ValueError
if rom['type'] in [ROM_TYPE_DISK, ROM_TYPE_SAMPLE]: continue
# Skip machine ROMs not in this machine ZIP file.
zip_name, rom_name = rom['location'].split('/')
if zip_name != m_name: continue
# Skip invalid ROMs
if not rom['crc']: continue
# Add SHA1 field
rom['sha1'] = roms_sha1_dic[rom['location']]
actual_rom_list.append(rom)
num_ROMs += 1
# Machine has no ROMs, skip it
if num_ROMs == 0: continue
# Print ROMs in the XML.
slist.append('<machine name="{}">'.format(m_name))
slist.append(XML_t('description', render[m_name]['description']))
slist.append(XML_t('year', render[m_name]['year']))
slist.append(XML_t('manufacturer', render[m_name]['manufacturer']))
if render[m_name]['cloneof']:
slist.append(XML_t('cloneof', render[m_name]['cloneof']))
for rom in actual_rom_list:
t = ' <rom name="{}" size="{}" crc="{}" sha1="{}"/>'.format(
rom['name'], rom['size'], rom['crc'], rom['sha1'])
slist.append(t)
slist.append('</machine>')
slist.append('</datafile>')
pDialog.endProgress()
# Open output file name.
pDialog.startProgress('Writing MAME ROMs XML DAT...')
utils_write_slist_to_file(DAT_FN.getPath(), slist)
pDialog.endProgress()
#
# Only valid CHDs in DAT file.
#
def mame_write_MAME_CHD_XML_DAT(cfg, out_dir_FN, db_dic):
log_debug('mame_write_MAME_CHD_XML_DAT() BEGIN ...')
control_dic = db_dic['control_dic']
machines = db_dic['machines']
render = db_dic['renderdb']
audit_roms = db_dic['audit_roms']
# Get output filename
# DAT filename: AML 0.xxx ROMs (merged|split|non-merged|fully non-merged).xml
mame_version_str = control_dic['ver_mame_str']
chd_set = ['MERGED', 'SPLIT', 'NONMERGED'][cfg.settings['mame_chd_set']]
chd_set_str = ['Merged', 'Split', 'Non-merged'][cfg.settings['mame_chd_set']]
log_info('MAME version "{}"'.format(mame_version_str))
log_info('CHD set is "{}"'.format(chd_set_str))
DAT_basename_str = 'AML MAME {} CHDs ({}).xml'.format(mame_version_str, chd_set_str)
DAT_FN = out_dir_FN.pjoin(DAT_basename_str)
log_info('XML "{}"'.format(DAT_FN.getPath()))
# XML file header.
slist = []
slist.append('<?xml version="1.0" encoding="UTF-8"?>')
str_a = '-//Logiqx//DTD ROM Management Datafile//EN'
str_b = 'http://www.logiqx.com/Dats/datafile.dtd'
slist.append('<!DOCTYPE datafile PUBLIC "{}" "{}">'.format(str_a, str_b))
slist.append('<datafile>')
desc_str = 'AML MAME {} CHDs {} set'.format(mame_version_str, chd_set_str)
slist.append('<header>')
slist.append(XML_t('name', desc_str))
slist.append(XML_t('description', desc_str))
slist.append(XML_t('version', '{}'.format(mame_version_str)))
slist.append(XML_t('date', misc_time_to_str(time.time())))
slist.append(XML_t('author', 'Exported by Advanced MAME Launcher'))
slist.append('</header>')
# Traverse ROMs and write DAT.
pDialog = KodiProgressDialog()
pDialog.startProgress('Creating MAME CHDs XML DAT...', len(audit_roms))
for m_name in sorted(audit_roms):
pDialog.updateProgressInc()
# If machine has no ROMs then skip it
chd_list, actual_chd_list, num_CHDs = audit_roms[m_name], [], 0
for chd in chd_list:
# Only include CHDs
if chd['type'] != ROM_TYPE_DISK: continue
# Skip machine ROMs not in this machine ZIP file.
zip_name, chd_name = chd['location'].split('/')
if zip_name != m_name: continue
# Skip invalid CHDs
if not chd['sha1']: continue
actual_chd_list.append(chd)
num_CHDs += 1
if num_CHDs == 0: continue
# Print CHDs in the XML.
slist.append('<machine name="{}">'.format(m_name))
slist.append(XML_t('description', render[m_name]['description']))
slist.append(XML_t('year', render[m_name]['year']))
slist.append(XML_t('manufacturer', render[m_name]['manufacturer']))
if render[m_name]['cloneof']:
slist.append(XML_t('cloneof', render[m_name]['cloneof']))
for chd in actual_chd_list:
t = ' <rom name="{}" sha1="{}"/>'.format(chd['name'], chd['sha1'])
slist.append(t)
slist.append('</machine>')
slist.append('</datafile>')
pDialog.endProgress()
# Open output file name.
pDialog.startProgress('Creating MAME ROMs XML DAT...')
utils_write_slist_to_file(DAT_FN.getPath(), slist)
pDialog.endProgress()
#
# -------------------------------------------------------------------------------------------------
# CHD manipulation functions
# -------------------------------------------------------------------------------------------------
# Reference in https://github.com/rtissera/libchdr/blob/master/src/chd.h
# Reference in https://github.com/mamedev/mame/blob/master/src/lib/util/chd.h
#
# Open CHD and return stat information.
#
# chd_info = {
# 'status' : CHD_OK or CHD_BAD,
# 'version' : int,
# 'sha1' : string,
# }
#
CHD_OK = 0
CHD_BAD_CHD = 1
CHD_BAD_VERSION = 2
def _mame_stat_chd(chd_path):
__debug_this_function = False
chd_info = {
'status' : CHD_OK,
'version' : 0,
'sha1' : '',
}
# --- Open CHD file and read first 124 bytes ---
if __debug_this_function: log_debug('_mame_stat_chd() Opening "{}"'.format(chd_path))
try:
f = io.open(chd_path, 'rb')
chd_data_str = f.read(124)
f.close()
except IOError as E:
chd_info['status'] = CHD_BAD_CHD
return chd_info
# --- Check CHD magic string to skip fake files ---
if chd_data_str[0:8] != 'MComprHD':
if __debug_this_function: log_debug('_mame_stat_chd() Magic string not found!')
chd_info['status'] = CHD_BAD_CHD
return chd_info
# --- Parse CHD header ---
# All values in the CHD header are stored in big endian!
h_tuple = struct.unpack('>8sII', chd_data_str[0:16])
tag, length, version = h_tuple
if __debug_this_function:
log_debug('_mame_stat_chd() Tag "{}"'.format(tag))
log_debug('_mame_stat_chd() Length {}'.format(length))
log_debug('_mame_stat_chd() Version {}'.format(version))
# Discard very old CHD that don't have SHA1 hash. Older version used MD5.
if version == 1 or version == 2 or version == 3:
chd_info['status'] = CHD_BAD_VERSION
chd_info['version'] = version
return chd_info
# Read the whole header (must consider V3, V4 and V5)
# NOTE In MAME 0.196 some CHDs have version 4, most have version 5, version 3 is obsolete
if version == 4:
if __debug_this_function: log_debug('Reading V4 CHD header')
chd_header_v4_str = '>8sIIIIIQQI20s20s20s'
header_size = struct.calcsize(chd_header_v4_str)
t = struct.unpack(chd_header_v4_str, chd_data_str[0:108])
tag = t[0]
length = t[1]
version = t[2]
flags = t[3]
compression = t[4]
totalhunks = t[5]
logicalbytes = t[6]
metaoffset = t[7]
hunkbytes = t[8]
rawsha1 = binascii.b2a_hex(t[9])
sha1 = binascii.b2a_hex(t[10])
parentsha1 = binascii.b2a_hex(t[11])
if __debug_this_function:
log_debug('V4 header size = {}'.format(header_size))
log_debug('tag "{}"'.format(tag))
log_debug('length {}'.format(length))
log_debug('version {}'.format(version))
log_debug('flags {}'.format(flags))
log_debug('compression {}'.format(compression))
log_debug('totalhunks {}'.format(totalhunks))
log_debug('logicalbytes {}'.format(logicalbytes))
log_debug('metaoffset {}'.format(metaoffset))
log_debug('hunkbytes {}'.format(hunkbytes))
log_debug('rawsha1 "{}"'.format(rawsha1))
log_debug('sha1 "{}"'.format(sha1))
log_debug('parentsha1 "{}"'.format(parentsha1))
# The CHD SHA1 string storet in MAME -listxml is the rawsha1 field in V4 CHDs.
chd_info['status'] = CHD_OK
chd_info['version'] = version
chd_info['sha1'] = rawsha1
elif version == 5:
if __debug_this_function: log_debug('Reading V5 CHD header')
chd_header_v5_str = '>8sII16sQQQII20s20s20s'
header_size = struct.calcsize(chd_header_v5_str)
t = struct.unpack(chd_header_v5_str, chd_data_str)
tag = t[0]
length = t[1]
version = t[2]
compressors = t[3]
logicalbytes = t[4]
mapoffset = t[5]
metaoffset = t[6]
hunkbytes = t[7]
unitbytes = t[8]
rawsha1 = binascii.b2a_hex(t[9])
sha1 = binascii.b2a_hex(t[10])
parentsha1 = binascii.b2a_hex(t[11])
if __debug_this_function:
log_debug('V5 header size = {}'.format(header_size))
log_debug('tag "{}"'.format(tag))
log_debug('length {}'.format(length))
log_debug('version {}'.format(version))
log_debug('compressors "{}"'.format(compressors))
log_debug('logicalbytes {}'.format(logicalbytes))
log_debug('mapoffset {}'.format(mapoffset))
log_debug('metaoffset {}'.format(metaoffset))
log_debug('hunkbytes {}'.format(hunkbytes))
log_debug('unitbytes {}'.format(unitbytes))
log_debug('rawsha1 "{}"'.format(rawsha1))
log_debug('sha1 "{}"'.format(sha1))
log_debug('parentsha1 "{}"'.format(parentsha1))
# The CHD SHA1 string storet in MAME -listxml is the sha1 field (combined raw+meta SHA1).
chd_info['status'] = CHD_OK
chd_info['version'] = version
chd_info['sha1'] = sha1
else:
raise TypeError('Unsuported version = {}'.format(version))
return chd_info
# -------------------------------------------------------------------------------------------------
# Statistic printing
# -------------------------------------------------------------------------------------------------
def mame_info_MAME_print(slist, location, machine_name, machine, assets):
slist.append('[COLOR orange]Machine {} / Render data[/COLOR]'.format(machine_name))
# Print MAME Favourites special fields
if 'ver_mame' in machine:
slist.append("[COLOR slateblue]name[/COLOR]: {}".format(machine['name']))
if 'ver_mame' in machine:
slist.append("[COLOR slateblue]ver_mame[/COLOR]: {}".format(machine['ver_mame']))
if 'ver_mame_str' in machine:
slist.append("[COLOR slateblue]ver_mame_str[/COLOR]: {}".format(machine['ver_mame_str']))
# Most Played Favourites special fields
if 'launch_count' in machine:
slist.append("[COLOR slateblue]launch_count[/COLOR]: {}".format(text_type(machine['launch_count'])))
# Standard fields in Render database
slist.append("[COLOR violet]cloneof[/COLOR]: '{}'".format(machine['cloneof']))
slist.append("[COLOR violet]description[/COLOR]: '{}'".format(machine['description']))
slist.append("[COLOR violet]driver_status[/COLOR]: '{}'".format(machine['driver_status']))
slist.append("[COLOR violet]genre[/COLOR]: '{}'".format(machine['genre']))
slist.append("[COLOR skyblue]isBIOS[/COLOR]: {}".format(machine['isBIOS']))
slist.append("[COLOR skyblue]isDevice[/COLOR]: {}".format(machine['isDevice']))
slist.append("[COLOR skyblue]isMature[/COLOR]: {}".format(machine['isMature']))
slist.append("[COLOR violet]manufacturer[/COLOR]: '{}'".format(machine['manufacturer']))
slist.append("[COLOR violet]nplayers[/COLOR]: '{}'".format(machine['nplayers']))
slist.append("[COLOR violet]year[/COLOR]: '{}'".format(machine['year']))
# Standard fields in Main database
slist.append('\n[COLOR orange]Machine Main data[/COLOR]')
slist.append("[COLOR skyblue]alltime[/COLOR]: {}".format(text_type(machine['alltime'])))
slist.append("[COLOR skyblue]artwork[/COLOR]: {}".format(text_type(machine['artwork'])))
slist.append("[COLOR violet]bestgames[/COLOR]: '{}'".format(machine['bestgames']))
slist.append("[COLOR skyblue]category[/COLOR]: {}".format(text_type(machine['category'])))
slist.append("[COLOR violet]catlist[/COLOR]: '{}'".format(machine['catlist']))
slist.append("[COLOR violet]catver[/COLOR]: '{}'".format(machine['catver']))
slist.append("[COLOR skyblue]chip_cpu_name[/COLOR]: {}".format(text_type(machine['chip_cpu_name'])))
# --- Devices list is a special case ---
if machine['devices']:
for i, device in enumerate(machine['devices']):
slist.append("[COLOR lime]devices[/COLOR][{}]:".format(i))
slist.append(" [COLOR violet]att_type[/COLOR]: {}".format(device['att_type']))
slist.append(" [COLOR violet]att_tag[/COLOR]: {}".format(device['att_tag']))
slist.append(" [COLOR skyblue]att_mandatory[/COLOR]: {}".format(text_type(device['att_mandatory'])))
slist.append(" [COLOR violet]att_interface[/COLOR]: {}".format(device['att_interface']))
slist.append(" [COLOR skyblue]instance[/COLOR]: {}".format(text_type(device['instance'])))
slist.append(" [COLOR skyblue]ext_names[/COLOR]: {}".format(text_type(device['ext_names'])))
else:
slist.append("[COLOR lime]devices[/COLOR]: []")
slist.append("[COLOR skyblue]display_height[/COLOR]: {}".format(text_type(machine['display_height'])))
slist.append("[COLOR skyblue]display_refresh[/COLOR]: {}".format(text_type(machine['display_refresh'])))
slist.append("[COLOR skyblue]display_rotate[/COLOR]: {}".format(text_type(machine['display_rotate'])))
slist.append("[COLOR skyblue]display_type[/COLOR]: {}".format(text_type(machine['display_type'])))
slist.append("[COLOR skyblue]display_width[/COLOR]: {}".format(text_type(machine['display_width'])))
slist.append("[COLOR violet]genre[/COLOR]: '{}'".format(machine['genre']))
# --- input is a special case ---
if machine['input']:
# Print attributes
slist.append("[COLOR lime]input[/COLOR]:")
slist.append(" [COLOR skyblue]att_coins[/COLOR]: {}".format(text_type(machine['input']['att_coins'])))
slist.append(" [COLOR skyblue]att_players[/COLOR]: {}".format(text_type(machine['input']['att_players'])))
slist.append(" [COLOR skyblue]att_service[/COLOR]: {}".format(text_type(machine['input']['att_service'])))
slist.append(" [COLOR skyblue]att_tilt[/COLOR]: {}".format(text_type(machine['input']['att_tilt'])))
# Print control tag list
for i, control in enumerate(machine['input']['control_list']):
slist.append("[COLOR lime]control[/COLOR][{}]:".format(i))
slist.append(" [COLOR violet]type[/COLOR]: {}".format(control['type']))
slist.append(" [COLOR skyblue]player[/COLOR]: {}".format(text_type(control['player'])))
slist.append(" [COLOR skyblue]buttons[/COLOR]: {}".format(text_type(control['buttons'])))
slist.append(" [COLOR skyblue]ways[/COLOR]: {}".format(text_type(control['ways'])))
else:
slist.append("[COLOR lime]input[/COLOR]: []")
slist.append("[COLOR skyblue]isDead[/COLOR]: {}".format(text_type(machine['isDead'])))
slist.append("[COLOR skyblue]isMechanical[/COLOR]: {}".format(text_type(machine['isMechanical'])))
slist.append("[COLOR violet]romof[/COLOR]: '{}'".format(machine['romof']))
slist.append("[COLOR violet]sampleof[/COLOR]: '{}'".format(machine['sampleof']))
slist.append("[COLOR skyblue]series[/COLOR]: '{}'".format(machine['series']))
slist.append("[COLOR skyblue]softwarelists[/COLOR]: {}".format(text_type(machine['softwarelists'])))
slist.append("[COLOR violet]sourcefile[/COLOR]: '{}'".format(machine['sourcefile']))
slist.append("[COLOR violet]veradded[/COLOR]: '{}'".format(machine['veradded']))
slist.append('\n[COLOR orange]Machine assets/artwork[/COLOR]')
slist.append("[COLOR violet]3dbox[/COLOR]: '{}'".format(assets['3dbox']))
slist.append("[COLOR violet]artpreview[/COLOR]: '{}'".format(assets['artpreview']))
slist.append("[COLOR violet]artwork[/COLOR]: '{}'".format(assets['artwork']))
slist.append("[COLOR violet]cabinet[/COLOR]: '{}'".format(assets['cabinet']))
slist.append("[COLOR violet]clearlogo[/COLOR]: '{}'".format(assets['clearlogo']))
slist.append("[COLOR violet]cpanel[/COLOR]: '{}'".format(assets['cpanel']))
slist.append("[COLOR violet]fanart[/COLOR]: '{}'".format(assets['fanart']))
slist.append("[COLOR violet]flags[/COLOR]: '{}'".format(assets['flags']))
slist.append("[COLOR violet]flyer[/COLOR]: '{}'".format(assets['flyer']))
slist.append("[COLOR violet]history[/COLOR]: '{}'".format(assets['history']))
slist.append("[COLOR violet]manual[/COLOR]: '{}'".format(assets['manual']))
slist.append("[COLOR violet]marquee[/COLOR]: '{}'".format(assets['marquee']))
slist.append("[COLOR violet]PCB[/COLOR]: '{}'".format(assets['PCB']))
slist.append("[COLOR violet]plot[/COLOR]: '{}'".format(assets['plot']))
slist.append("[COLOR violet]snap[/COLOR]: '{}'".format(assets['snap']))
slist.append("[COLOR violet]title[/COLOR]: '{}'".format(assets['title']))
slist.append("[COLOR violet]trailer[/COLOR]: '{}'".format(assets['trailer']))
def mame_info_SL_print(slist, location, SL_name, SL_ROM, rom, assets, SL_dic, SL_machine_list):
# --- ROM stuff ---
slist.append('[COLOR orange]Software List {} Item {}[/COLOR]'.format(SL_name, SL_ROM))
if 'SL_DB_key' in rom:
slist.append("[COLOR slateblue]SL_DB_key[/COLOR]: '{}'".format(rom['SL_DB_key']))
if 'SL_ROM_name' in rom:
slist.append("[COLOR slateblue]SL_ROM_name[/COLOR]: '{}'".format(rom['SL_ROM_name']))
if 'SL_name' in rom:
slist.append("[COLOR slateblue]SL_name[/COLOR]: '{}'".format(rom['SL_name']))
slist.append("[COLOR violet]cloneof[/COLOR]: '{}'".format(rom['cloneof']))
slist.append("[COLOR violet]description[/COLOR]: '{}'".format(rom['description']))
slist.append("[COLOR skyblue]hasCHDs[/COLOR]: {}".format(text_type(rom['hasCHDs'])))
slist.append("[COLOR skyblue]hasROMs[/COLOR]: {}".format(text_type(rom['hasROMs'])))
if 'launch_count' in rom:
slist.append("[COLOR slateblue]launch_count[/COLOR]: '{}'".format(text_type(rom['launch_count'])))
if 'launch_machine' in rom:
slist.append("[COLOR slateblue]launch_machine[/COLOR]: '{}'".format(rom['launch_machine']))
if rom['parts']:
for i, part in enumerate(rom['parts']):
slist.append("[COLOR lime]parts[/COLOR][{}]:".format(i))
slist.append(" [COLOR violet]interface[/COLOR]: '{}'".format(part['interface']))
slist.append(" [COLOR violet]name[/COLOR]: '{}'".format(part['name']))
else:
slist.append('[COLOR lime]parts[/COLOR]: []')
slist.append("[COLOR violet]plot[/COLOR]: '{}'".format(rom['plot']))
slist.append("[COLOR violet]publisher[/COLOR]: '{}'".format(rom['publisher']))
slist.append("[COLOR violet]status_CHD[/COLOR]: '{}'".format(rom['status_CHD']))
slist.append("[COLOR violet]status_ROM[/COLOR]: '{}'".format(rom['status_ROM']))
if 'ver_mame' in rom:
slist.append("[COLOR slateblue]ver_mame[/COLOR]: {}".format(rom['ver_mame']))
if 'ver_mame_str' in rom:
slist.append("[COLOR slateblue]ver_mame_str[/COLOR]: {}".format(rom['ver_mame_str']))
slist.append("[COLOR violet]year[/COLOR]: '{}'".format(rom['year']))
slist.append('\n[COLOR orange]Software List assets[/COLOR]')
slist.append("[COLOR violet]3dbox[/COLOR]: '{}'".format(assets['3dbox']))
slist.append("[COLOR violet]title[/COLOR]: '{}'".format(assets['title']))
slist.append("[COLOR violet]snap[/COLOR]: '{}'".format(assets['snap']))
slist.append("[COLOR violet]boxfront[/COLOR]: '{}'".format(assets['boxfront']))
slist.append("[COLOR violet]fanart[/COLOR]: '{}'".format(assets['fanart']))
slist.append("[COLOR violet]trailer[/COLOR]: '{}'".format(assets['trailer']))
slist.append("[COLOR violet]manual[/COLOR]: '{}'".format(assets['manual']))
slist.append('\n[COLOR orange]Software List {}[/COLOR]'.format(SL_name))
slist.append("[COLOR violet]display_name[/COLOR]: '{}'".format(SL_dic['display_name']))
slist.append("[COLOR skyblue]num_with_CHDs[/COLOR]: {}".format(text_type(SL_dic['num_with_CHDs'])))
slist.append("[COLOR skyblue]num_with_ROMs[/COLOR]: {}".format(text_type(SL_dic['num_with_ROMs'])))
slist.append("[COLOR violet]rom_DB_noext[/COLOR]: '{}'".format(SL_dic['rom_DB_noext']))
slist.append('\n[COLOR orange]Runnable by[/COLOR]')
for machine_dic in sorted(SL_machine_list, key = lambda x: x['description'].lower()):
t = "[COLOR violet]machine[/COLOR]: '{}' [COLOR slateblue]{}[/COLOR]"
slist.append(t.format(machine_dic['description'], machine_dic['machine']))
# slist is a list of strings that will be joined like '\n'.join(slist)
# slist is a list, so it is mutable and can be changed by reference.
def mame_stats_main_print_slist(cfg, slist, control_dic, XML_ctrl_dic):
settings = cfg.settings
ctrl = control_dic
SL_str = 'enabled' if settings['global_enable_SL'] else 'disabled'
slist.append('[COLOR orange]Main information[/COLOR]')
slist.append('AML version {:,} [COLOR violet]{}[/COLOR]'.format(
cfg.addon_version_int, cfg.addon.info_version))
slist.append('Database version {:,} [COLOR violet]{}[/COLOR]'.format(
ctrl['ver_AML_int'], ctrl['ver_AML_str']))
slist.append('MAME version {:,} [COLOR violet]{}[/COLOR]'.format(
ctrl['ver_mame_int'], ctrl['ver_mame_str']))
slist.append('Operation mode [COLOR violet]{:s}[/COLOR]'.format(settings['op_mode']))
slist.append('Software Lists [COLOR violet]{:s}[/COLOR]'.format(SL_str))
# Information in the MAME XML control file.
if XML_ctrl_dic['t_XML_extraction']:
slist.append('XML extraction time {}'.format(misc_time_to_str(XML_ctrl_dic['t_XML_extraction'])))
else:
slist.append('XML extraction time {}'.format('no extracted'))
if XML_ctrl_dic['st_mtime']:
slist.append('XML modification time {}'.format(misc_time_to_str(XML_ctrl_dic['st_mtime'])))
else:
slist.append('XML extraction time {}'.format('undefined'))
if XML_ctrl_dic['t_XML_preprocessing']:
slist.append('XML preprocess time {}'.format(misc_time_to_str(XML_ctrl_dic['t_XML_preprocessing'])))
else:
slist.append('XML extraction time {}'.format('undefined'))
slist.append('XML size {:,} bytes'.format(XML_ctrl_dic['st_size']))
slist.append('XML machine count {:,} machines'.format(XML_ctrl_dic['total_machines']))
slist.append('')
slist.append('[COLOR orange]MAME machine count[/COLOR]')
table_str = []
table_str.append(['left', 'right', 'right', 'right'])
table_str.append(['Type', 'Total', 'Parent', 'Clones'])
table_str.append([
'Machines',
'{:6,d}'.format(control_dic['stats_processed_machines']),
'{:6,d}'.format(control_dic['stats_parents']),
'{:6,d}'.format(control_dic['stats_clones']),
])
table_str.append([
'Runnable',
'{:6,d}'.format(control_dic['stats_runnable']),
'{:6,d}'.format(control_dic['stats_runnable_parents']),
'{:6,d}'.format(control_dic['stats_runnable_clones']),
])
table_str.append([
'Coin',
'{:6,d}'.format(control_dic['stats_coin']),
'{:6,d}'.format(control_dic['stats_coin_parents']),
'{:6,d}'.format(control_dic['stats_coin_clones']),
])
table_str.append([
'Nocoin',
'{:6,d}'.format(control_dic['stats_nocoin']),
'{:6,d}'.format(control_dic['stats_nocoin_parents']),
'{:6,d}'.format(control_dic['stats_nocoin_clones']),
])
table_str.append([
'Mechanical',
'{:6,d}'.format(control_dic['stats_mechanical']),
'{:6,d}'.format(control_dic['stats_mechanical_parents']),
'{:6,d}'.format(control_dic['stats_mechanical_clones']),
])
table_str.append([
'Dead',
'{:6,d}'.format(control_dic['stats_dead']),
'{:6,d}'.format(control_dic['stats_dead_parents']),
'{:6,d}'.format(control_dic['stats_dead_clones']),
])
table_str.append([
'Devices',
'{:6,d}'.format(control_dic['stats_devices']),
'{:6,d}'.format(control_dic['stats_devices_parents']),
'{:6,d}'.format(control_dic['stats_devices_clones']),
])
# Binary filters
table_str.append([
'BIOS',
'{:6,d}'.format(control_dic['stats_BIOS']),
'{:6,d}'.format(control_dic['stats_BIOS_parents']),
'{:6,d}'.format(control_dic['stats_BIOS_clones']),
])
table_str.append([
'Samples',
'{:6,d}'.format(control_dic['stats_samples']),
'{:6,d}'.format(control_dic['stats_samples_parents']),
'{:6,d}'.format(control_dic['stats_samples_clones']),
])
slist.extend(text_render_table(table_str))
slist.append('')
slist.append('[COLOR orange]MAME machine statistics[/COLOR]')
table_str = []
table_str.append(['left', 'right', 'right', 'right', 'right', 'right', 'right', 'right', 'right'])
table_str.append(['Type (parents/total)', 'Total', '', 'Good', '', 'Imperfect', '', 'Nonworking', ''])
table_str.append(['Coin slot (Normal)',
'{:,}'.format(control_dic['stats_MF_Normal_Total_parents']),
'{:,}'.format(control_dic['stats_MF_Normal_Total']),
'{:,}'.format(control_dic['stats_MF_Normal_Good_parents']),
'{:,}'.format(control_dic['stats_MF_Normal_Good']),
'{:,}'.format(control_dic['stats_MF_Normal_Imperfect_parents']),
'{:,}'.format(control_dic['stats_MF_Normal_Imperfect']),
'{:,}'.format(control_dic['stats_MF_Normal_Nonworking_parents']),
'{:,}'.format(control_dic['stats_MF_Normal_Nonworking']),
])
table_str.append(['Coin slot (Unusual)',
'{:,}'.format(control_dic['stats_MF_Unusual_Total_parents']),
'{:,}'.format(control_dic['stats_MF_Unusual_Total']),
'{:,}'.format(control_dic['stats_MF_Unusual_Good_parents']),
'{:,}'.format(control_dic['stats_MF_Unusual_Good']),
'{:,}'.format(control_dic['stats_MF_Unusual_Imperfect_parents']),
'{:,}'.format(control_dic['stats_MF_Unusual_Imperfect']),
'{:,}'.format(control_dic['stats_MF_Unusual_Nonworking_parents']),
'{:,}'.format(control_dic['stats_MF_Unusual_Nonworking']),
])
table_str.append(['No coin slot',
'{:,}'.format(control_dic['stats_MF_Nocoin_Total_parents']),
'{:,}'.format(control_dic['stats_MF_Nocoin_Total']),
'{:,}'.format(control_dic['stats_MF_Nocoin_Good_parents']),
'{:,}'.format(control_dic['stats_MF_Nocoin_Good']),
'{:,}'.format(control_dic['stats_MF_Nocoin_Imperfect_parents']),
'{:,}'.format(control_dic['stats_MF_Nocoin_Imperfect']),
'{:,}'.format(control_dic['stats_MF_Nocoin_Nonworking_parents']),
'{:,}'.format(control_dic['stats_MF_Nocoin_Nonworking']),
])
table_str.append(['Mechanical machines',
'{:,}'.format(control_dic['stats_MF_Mechanical_Total_parents']),
'{:,}'.format(control_dic['stats_MF_Mechanical_Total']),
'{:,}'.format(control_dic['stats_MF_Mechanical_Good_parents']),
'{:,}'.format(control_dic['stats_MF_Mechanical_Good']),
'{:,}'.format(control_dic['stats_MF_Mechanical_Imperfect_parents']),
'{:,}'.format(control_dic['stats_MF_Mechanical_Imperfect']),
'{:,}'.format(control_dic['stats_MF_Mechanical_Nonworking_parents']),
'{:,}'.format(control_dic['stats_MF_Mechanical_Nonworking']),
])
table_str.append(['Dead machines',
'{:,}'.format(control_dic['stats_MF_Dead_Total_parents']),
'{:,}'.format(control_dic['stats_MF_Dead_Total']),
'{:,}'.format(control_dic['stats_MF_Dead_Good_parents']),
'{:,}'.format(control_dic['stats_MF_Dead_Good']),
'{:,}'.format(control_dic['stats_MF_Dead_Imperfect_parents']),
'{:,}'.format(control_dic['stats_MF_Dead_Imperfect']),
'{:,}'.format(control_dic['stats_MF_Dead_Nonworking_parents']),
'{:,}'.format(control_dic['stats_MF_Dead_Nonworking']),
])
table_str.append(['Device machines',
'{:,}'.format(control_dic['stats_devices_parents']),
'{:,}'.format(control_dic['stats_devices']),
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A'])
slist.extend(text_render_table(table_str))
if settings['global_enable_SL']:
slist.append('\n[COLOR orange]Software Lists item count[/COLOR]')
slist.append("SL XML files {:7,d}".format(control_dic['stats_SL_XML_files']))
slist.append("SL software items {:7,d}".format(control_dic['stats_SL_software_items']))
slist.append("SL items with ROMs {:7,d}".format(control_dic['stats_SL_items_with_ROMs']))
slist.append("SL items with CHDs {:7,d}".format(control_dic['stats_SL_items_with_CHDs']))
def mame_stats_scanner_print_slist(cfg, slist, control_dic):
settings = cfg.settings
# MAME statistics
slist.append('[COLOR orange]MAME scanner information[/COLOR]')
t_str = [
['left', 'right', 'right', 'right'],
['Stat', 'Total', 'Have', 'Missing'],
['ROM ZIP files',
'{:,}'.format(control_dic['scan_ROM_ZIP_files_total']),
'{:,}'.format(control_dic['scan_ROM_ZIP_files_have']),
'{:,}'.format(control_dic['scan_ROM_ZIP_files_missing'])],
['Sample ZIP files',
'{:,}'.format(control_dic['scan_Samples_ZIP_total']),
'{:,}'.format(control_dic['scan_Samples_ZIP_have']),
'{:,}'.format(control_dic['scan_Samples_ZIP_missing'])],
['CHD files',
'{:,}'.format(control_dic['scan_CHD_files_total']),
'{:,}'.format(control_dic['scan_CHD_files_have']),
'{:,}'.format(control_dic['scan_CHD_files_missing'])],
]
slist.extend(text_render_table(t_str))
slist.append('')
t_str = [
['left', 'right', 'right', 'right'],
['Stat', 'Can run', 'Out of', 'Unrunnable'],
]
t_str.append(['ROM machines',
'{:,}'.format(control_dic['scan_machine_archives_ROM_have']),
'{:,}'.format(control_dic['scan_machine_archives_ROM_total']),
'{:,}'.format(control_dic['scan_machine_archives_ROM_missing']),
])
t_str.append(['Sample machines',
'{:,}'.format(control_dic['scan_machine_archives_Samples_have']),
'{:,}'.format(control_dic['scan_machine_archives_Samples_total']),
'{:,}'.format(control_dic['scan_machine_archives_Samples_missing']),
])
t_str.append(['CHD machines',
'{:,}'.format(control_dic['scan_machine_archives_CHD_have']),
'{:,}'.format(control_dic['scan_machine_archives_CHD_total']),
'{:,}'.format(control_dic['scan_machine_archives_CHD_missing']),
])
slist.extend(text_render_table(t_str))
# SL scanner statistics
if settings['global_enable_SL']:
slist.append('')
slist.append('[COLOR orange]Software List scanner information[/COLOR]')
t_str = [
['left', 'right', 'right', 'right'],
['Stat', 'Total', 'Have', 'Missing'],
]
t_str.append(['SL ROMs',
'{:,}'.format(control_dic['scan_SL_archives_ROM_total']),
'{:,}'.format(control_dic['scan_SL_archives_ROM_have']),
'{:,}'.format(control_dic['scan_SL_archives_ROM_missing']),
])
t_str.append(['SL CHDs',
'{:,}'.format(control_dic['scan_SL_archives_CHD_total']),
'{:,}'.format(control_dic['scan_SL_archives_CHD_have']),
'{:,}'.format(control_dic['scan_SL_archives_CHD_missing']),
])
slist.extend(text_render_table(t_str))
# --- MAME asset scanner ---
slist.append('')
slist.append('[COLOR orange]MAME asset scanner information[/COLOR]')
# slist.append('Total number of MAME machines {0:,d}'.format(control_dic['assets_num_MAME_machines']))
t_str = [
['left', 'right', 'right', 'right'],
['Stat', 'Have', 'Missing', 'Alternate'],
]
t_str.append(['3D Boxes',
'{:,}'.format(control_dic['assets_3dbox_have']),
'{:,}'.format(control_dic['assets_3dbox_missing']),
'{:,}'.format(control_dic['assets_3dbox_alternate']),
])
t_str.append(['Artpreviews',
'{:,}'.format(control_dic['assets_artpreview_have']),
'{:,}'.format(control_dic['assets_artpreview_missing']),
'{:,}'.format(control_dic['assets_artpreview_alternate']),
])
t_str.append(['Artwork',
'{:,}'.format(control_dic['assets_artwork_have']),
'{:,}'.format(control_dic['assets_artwork_missing']),
'{:,}'.format(control_dic['assets_artwork_alternate']),
])
t_str.append(['Cabinets',
'{:,}'.format(control_dic['assets_cabinets_have']),
'{:,}'.format(control_dic['assets_cabinets_missing']),
'{:,}'.format(control_dic['assets_cabinets_alternate']),
])
t_str.append(['Clearlogos',
'{:,}'.format(control_dic['assets_clearlogos_have']),
'{:,}'.format(control_dic['assets_clearlogos_missing']),
'{:,}'.format(control_dic['assets_clearlogos_alternate']),
])
t_str.append(['CPanels',
'{:,}'.format(control_dic['assets_cpanels_have']),
'{:,}'.format(control_dic['assets_cpanels_missing']),
'{:,}'.format(control_dic['assets_cpanels_alternate']),
])
t_str.append(['Fanart',
'{:,}'.format(control_dic['assets_fanarts_have']),
'{:,}'.format(control_dic['assets_fanarts_missing']),
'{:,}'.format(control_dic['assets_fanarts_alternate']),
])
t_str.append(['Flyers',
'{:,}'.format(control_dic['assets_flyers_have']),
'{:,}'.format(control_dic['assets_flyers_missing']),
'{:,}'.format(control_dic['assets_flyers_alternate']),
])
t_str.append(['Manuals',
'{:,}'.format(control_dic['assets_manuals_have']),
'{:,}'.format(control_dic['assets_manuals_missing']),
'{:,}'.format(control_dic['assets_manuals_alternate']),
])
t_str.append(['Marquees',
'{:,}'.format(control_dic['assets_marquees_have']),
'{:,}'.format(control_dic['assets_marquees_missing']),
'{:,}'.format(control_dic['assets_marquees_alternate']),
])
t_str.append(['PCBs',
'{:,}'.format(control_dic['assets_PCBs_have']),
'{:,}'.format(control_dic['assets_PCBs_missing']),
'{:,}'.format(control_dic['assets_PCBs_alternate']),
])
t_str.append(['Snaps',
'{:,}'.format(control_dic['assets_snaps_have']),
'{:,}'.format(control_dic['assets_snaps_missing']),
'{:,}'.format(control_dic['assets_snaps_alternate']),
])
t_str.append(['Titles',
'{:,}'.format(control_dic['assets_titles_have']),
'{:,}'.format(control_dic['assets_titles_missing']),
'{:,}'.format(control_dic['assets_titles_alternate']),
])
t_str.append(['Trailers',
'{:,}'.format(control_dic['assets_trailers_have']),
'{:,}'.format(control_dic['assets_trailers_missing']),
'{:,}'.format(control_dic['assets_trailers_alternate']),
])
slist.extend(text_render_table(t_str))
# --- Software List scanner ---
if settings['global_enable_SL']:
slist.append('')
slist.append('[COLOR orange]Software List asset scanner information[/COLOR]')
# slist.append('Total number of SL items {0:,d}'.format(control_dic['assets_SL_num_items']))
t_str = [
['left', 'right', 'right', 'right'],
['Stat', 'Have', 'Missing', 'Alternate'],
]
t_str.append(['3D Boxes',
'{:,}'.format(control_dic['assets_SL_3dbox_have']),
'{:,}'.format(control_dic['assets_SL_3dbox_missing']),
'{:,}'.format(control_dic['assets_SL_3dbox_alternate']),
])
t_str.append(['Titles',
'{:,}'.format(control_dic['assets_SL_titles_have']),
'{:,}'.format(control_dic['assets_SL_titles_missing']),
'{:,}'.format(control_dic['assets_SL_titles_alternate']),
])
t_str.append(['Snaps',
'{:,}'.format(control_dic['assets_SL_snaps_have']),
'{:,}'.format(control_dic['assets_SL_snaps_missing']),
'{:,}'.format(control_dic['assets_SL_snaps_alternate']),
])
t_str.append(['Boxfronts',
'{:,}'.format(control_dic['assets_SL_boxfronts_have']),
'{:,}'.format(control_dic['assets_SL_boxfronts_missing']),
'{:,}'.format(control_dic['assets_SL_boxfronts_alternate']),
])
t_str.append(['Fanarts',
'{:,}'.format(control_dic['assets_SL_fanarts_have']),
'{:,}'.format(control_dic['assets_SL_fanarts_missing']),
'{:,}'.format(control_dic['assets_SL_fanarts_alternate']),
])
t_str.append(['Trailers',
'{:,}'.format(control_dic['assets_SL_trailers_have']),
'{:,}'.format(control_dic['assets_SL_trailers_missing']),
'{:,}'.format(control_dic['assets_SL_trailers_alternate']),
])
t_str.append(['Manuals',
'{:,}'.format(control_dic['assets_SL_manuals_have']),
'{:,}'.format(control_dic['assets_SL_manuals_missing']),
'{:,}'.format(control_dic['assets_SL_manuals_alternate']),
])
slist.extend(text_render_table(t_str))
def mame_stats_audit_print_slist(cfg, slist, control_dic):
settings = cfg.settings
rom_set = ['Merged', 'Split', 'Non-merged'][settings['mame_rom_set']]
chd_set = ['Merged', 'Split', 'Non-merged'][settings['mame_chd_set']]
slist.append('[COLOR orange]MAME ROM audit database statistics[/COLOR]')
t = "{:7,d} runnable MAME machines"
slist.append(t.format(control_dic['stats_audit_MAME_machines_runnable']))
t = "{:7,d} machines require ROM ZIPs, {:7,d} parents and {:7,d} clones"
slist.append(t.format(control_dic['stats_audit_machine_archives_ROM'],
control_dic['stats_audit_machine_archives_ROM_parents'],
control_dic['stats_audit_machine_archives_ROM_clones']))
t = "{:7,d} machines require CHDs, {:7,d} parents and {:7,d} clones"
slist.append(t.format(control_dic['stats_audit_machine_archives_CHD'],
control_dic['stats_audit_machine_archives_CHD_parents'],
control_dic['stats_audit_machine_archives_CHD_clones']))
t = "{:7,d} machines require Samples, {:7,d} parents and {:7,d} clones"
slist.append(t.format(control_dic['stats_audit_machine_archives_Samples'],
control_dic['stats_audit_machine_archives_Samples_parents'],
control_dic['stats_audit_machine_archives_Samples_clones']))
t = "{:7,d} machines require nothing, {:7,d} parents and {:7,d} clones"
slist.append(t.format(control_dic['stats_audit_archive_less'],
control_dic['stats_audit_archive_less_parents'],
control_dic['stats_audit_archive_less_clones']))
t = "{:7,d} ROM ZIPs in the [COLOR darkorange]{}[/COLOR] set"
slist.append(t.format(control_dic['stats_audit_MAME_ROM_ZIP_files'], rom_set))
t = "{:7,d} CHDs in the [COLOR darkorange]{}[/COLOR] set"
slist.append(t.format(control_dic['stats_audit_MAME_CHD_files'], chd_set))
t = "{:7,d} Sample ZIPs in the [COLOR darkorange]{}[/COLOR] set"
slist.append(t.format(control_dic['stats_audit_MAME_Sample_ZIP_files'], rom_set))
t = "{:7,d} total ROMs, {:7,d} valid and {:7,d} invalid"
slist.append(t.format(
control_dic['stats_audit_ROMs_total'],
control_dic['stats_audit_ROMs_valid'],
control_dic['stats_audit_ROMs_invalid'],
))
t = "{:7,d} total CHDs, {:7,d} valid and {:7,d} invalid"
slist.append(t.format(
control_dic['stats_audit_CHDs_total'],
control_dic['stats_audit_CHDs_valid'],
control_dic['stats_audit_CHDs_invalid'],
))
# SL item audit database statistics
if settings['global_enable_SL']:
slist.append('\n[COLOR orange]SL audit database statistics[/COLOR]')
t = "{:7,d} runnable Software List items"
slist.append(t.format(control_dic['stats_audit_SL_items_runnable']))
t = "{:7,d} SL items require ROM ZIPs and/or CHDs"
slist.append(t.format(control_dic['stats_audit_SL_items_with_arch']))
t = "{:7,d} SL items require ROM ZIPs"
slist.append(t.format(control_dic['stats_audit_SL_items_with_arch_ROM']))
t = "{:7,d} SL items require CHDs"
slist.append(t.format(control_dic['stats_audit_SL_items_with_CHD']))
# MAME audit summary.
slist.append('\n[COLOR orange]MAME ROM audit information[/COLOR]')
table_str = [
['left', 'right', 'right', 'right'],
['Type', 'Total', 'Good', 'Bad'],
]
table_str.append([
'Machines with ROMs and/or CHDs',
'{:,d}'.format(control_dic['audit_MAME_machines_with_arch']),
'{:,d}'.format(control_dic['audit_MAME_machines_with_arch_OK']),
'{:,d}'.format(control_dic['audit_MAME_machines_with_arch_BAD']),
])
table_str.append([
'Machines with ROMs',
'{:,d}'.format(control_dic['audit_MAME_machines_with_ROMs']),
'{:,d}'.format(control_dic['audit_MAME_machines_with_ROMs_OK']),
'{:,d}'.format(control_dic['audit_MAME_machines_with_ROMs_BAD']),
])
table_str.append([
'Machines with CHDs',
'{:,d}'.format(control_dic['audit_MAME_machines_with_CHDs']),
'{:,d}'.format(control_dic['audit_MAME_machines_with_CHDs_OK']),
'{:,d}'.format(control_dic['audit_MAME_machines_with_CHDs_BAD']),
])
table_str.append([
'Machines with Samples',
'{:,d}'.format(control_dic['audit_MAME_machines_with_SAMPLES']),
'{:,d}'.format(control_dic['audit_MAME_machines_with_SAMPLES_OK']),
'{:,d}'.format(control_dic['audit_MAME_machines_with_SAMPLES_BAD']),
])
slist.extend(text_render_table(table_str))
# SL audit summary.
if settings['global_enable_SL']:
slist.append('\n[COLOR orange]SL audit information[/COLOR]')
table_str = [
['left', 'right', 'right', 'right'],
['Type', 'Total', 'Good', 'Bad'],
]
table_str.append([
'SL items with ROMs and/or CHDs',
'{:,d}'.format(control_dic['audit_SL_items_with_arch']),
'{:,d}'.format(control_dic['audit_SL_items_with_arch_OK']),
'{:,d}'.format(control_dic['audit_SL_items_with_arch_BAD']),
])
table_str.append([
'SL items with ROMs',
'{:,d}'.format(control_dic['audit_SL_items_with_arch_ROM']),
'{:,d}'.format(control_dic['audit_SL_items_with_arch_ROM_OK']),
'{:,d}'.format(control_dic['audit_SL_items_with_arch_ROM_BAD']),
])
table_str.append([
'SL items with CHDs',
'{:,d}'.format(control_dic['audit_SL_items_with_CHD']),
'{:,d}'.format(control_dic['audit_SL_items_with_CHD_OK']),
'{:,d}'.format(control_dic['audit_SL_items_with_CHD_BAD']),
])
slist.extend(text_render_table(table_str))
def mame_stats_timestamps_slist(cfg, slist, control_dic):
settings = cfg.settings
# DAT/INI file versions. Note than in some DAT/INIs the version is not available.
slist.append('[COLOR orange]DAT/INI versions[/COLOR]')
slist.append("Alltime.ini version {}".format(control_dic['ver_alltime']))
slist.append("Artwork.ini version {}".format(control_dic['ver_artwork']))
slist.append("bestgames.ini version {}".format(control_dic['ver_bestgames']))
slist.append("Category.ini version {}".format(control_dic['ver_category']))
slist.append("catlist.ini version {}".format(control_dic['ver_catlist']))
slist.append("catver.ini version {}".format(control_dic['ver_catver']))
slist.append("command.dat version {}".format(control_dic['ver_command']))
slist.append("gameinit.dat version {}".format(control_dic['ver_gameinit']))
slist.append("genre.ini version {}".format(control_dic['ver_genre']))
slist.append("history.dat version {}".format(control_dic['ver_history']))
slist.append("mameinfo.dat version {}".format(control_dic['ver_mameinfo']))
slist.append("mature.ini version {}".format(control_dic['ver_mature']))
slist.append("nplayers.ini version {}".format(control_dic['ver_nplayers']))
slist.append("series.ini version {}".format(control_dic['ver_series']))
# Timestamps ordered if user selects "All in one step"
slist.append('')
slist.append('[COLOR orange]Timestamps[/COLOR]')
# MAME and SL databases.
if control_dic['t_MAME_DB_build']:
slist.append("MAME DB built on {}".format(misc_time_to_str(control_dic['t_MAME_DB_build'])))
else:
slist.append("MAME DB never built")
if control_dic['t_MAME_Audit_DB_build']:
slist.append("MAME Audit DB built on {}".format(misc_time_to_str(control_dic['t_MAME_Audit_DB_build'])))
else:
slist.append("MAME Audit DB never built")
if control_dic['t_MAME_Catalog_build']:
slist.append("MAME Catalog built on {}".format(misc_time_to_str(control_dic['t_MAME_Catalog_build'])))
else:
slist.append("MAME Catalog never built")
if control_dic['t_SL_DB_build']:
slist.append("SL DB built on {}".format(misc_time_to_str(control_dic['t_SL_DB_build'])))
else:
slist.append("SL DB never built")
# MAME and SL scanner.
if control_dic['t_MAME_ROMs_scan']:
slist.append("MAME ROMs scaned on {}".format(misc_time_to_str(control_dic['t_MAME_ROMs_scan'])))
else:
slist.append("MAME ROMs never scaned")
if control_dic['t_MAME_assets_scan']:
slist.append("MAME assets scaned on {}".format(misc_time_to_str(control_dic['t_MAME_assets_scan'])))
else:
slist.append("MAME assets never scaned")
if control_dic['t_SL_ROMs_scan']:
slist.append("SL ROMs scaned on {}".format(misc_time_to_str(control_dic['t_SL_ROMs_scan'])))
else:
slist.append("SL ROMs never scaned")
if control_dic['t_SL_assets_scan']:
slist.append("SL assets scaned on {}".format(misc_time_to_str(control_dic['t_SL_assets_scan'])))
else:
slist.append("SL assets never scaned")
# Plots, Fanarts and 3D Boxes.
if control_dic['t_MAME_plots_build']:
slist.append("MAME Plots built on {}".format(misc_time_to_str(control_dic['t_MAME_plots_build'])))
else:
slist.append("MAME Plots never built")
if control_dic['t_SL_plots_build']:
slist.append("SL Plots built on {}".format(misc_time_to_str(control_dic['t_SL_plots_build'])))
else:
slist.append("SL Plots never built")
if control_dic['t_MAME_fanart_build']:
slist.append("MAME Fanarts built on {}".format(misc_time_to_str(control_dic['t_MAME_fanart_build'])))
else:
slist.append("MAME Fanarts never built")
if control_dic['t_SL_fanart_build']:
slist.append("SL Fanarts built on {}".format(misc_time_to_str(control_dic['t_SL_fanart_build'])))
else:
slist.append("SL Fanarts never built")
if control_dic['t_MAME_3dbox_build']:
slist.append("MAME 3D Boxes built on {}".format(misc_time_to_str(control_dic['t_MAME_3dbox_build'])))
else:
slist.append("MAME 3D Boxes never built")
if control_dic['t_SL_3dbox_build']:
slist.append("SL 3D Boxes built on {}".format(misc_time_to_str(control_dic['t_SL_3dbox_build'])))
else:
slist.append("SL 3D Boxes never built")
# MAME machine hash, asset hash, render cache and asset cache.
if control_dic['t_MAME_machine_hash']:
slist.append("MAME machine hash built on {}".format(misc_time_to_str(control_dic['t_MAME_machine_hash'])))
else:
slist.append("MAME machine hash never built")
if control_dic['t_MAME_asset_hash']:
slist.append("MAME asset hash built on {}".format(misc_time_to_str(control_dic['t_MAME_asset_hash'])))
else:
slist.append("MAME asset hash never built")
if control_dic['t_MAME_render_cache_build']:
slist.append("MAME render cache built on {}".format(misc_time_to_str(control_dic['t_MAME_render_cache_build'])))
else:
slist.append("MAME render cache never built")
if control_dic['t_MAME_asset_cache_build']:
slist.append("MAME asset cache built on {}".format(misc_time_to_str(control_dic['t_MAME_asset_cache_build'])))
else:
slist.append("MAME asset cache never built")
# Custsom filters.
if control_dic['t_Custom_Filter_build']:
slist.append("Custom filters built on {}".format(misc_time_to_str(control_dic['t_Custom_Filter_build'])))
else:
slist.append("Custom filters never built")
# Audit stuff.
if control_dic['t_MAME_audit']:
slist.append("MAME ROMs audited on {}".format(misc_time_to_str(control_dic['t_MAME_audit'])))
else:
slist.append("MAME ROMs never audited")
if control_dic['t_SL_audit']:
slist.append("SL ROMs audited on {}".format(misc_time_to_str(control_dic['t_SL_audit'])))
else:
slist.append("SL ROMs never audited")
# -------------------------------------------------------------------------------------------------
# Check/Update/Repair Favourite ROM objects
# -------------------------------------------------------------------------------------------------
def mame_update_MAME_Fav_objects(cfg, db_dic):
control_dic = db_dic['control_dic']
machines = db_dic['machines']
renderdb_dic = db_dic['renderdb']
assets_dic = db_dic['assetdb']
fav_machines = utils_load_JSON_file(cfg.FAV_MACHINES_PATH.getPath())
# If no MAME Favourites return
if len(fav_machines) < 1:
kodi_notify('MAME Favourites empty')
return
iteration = 0
d_text = 'Checking/Updating MAME Favourites...'
pDialog = KodiProgressDialog()
pDialog.startProgress(d_text, len(fav_machines))
for fav_key in sorted(fav_machines):
log_debug('Checking machine "{}"'.format(fav_key))
if fav_key in machines:
machine = machines[fav_key]
render = renderdb_dic[fav_key]
assets = assets_dic[fav_key]
else:
# Machine not found in DB. Create an empty one to update the database fields.
# The user can delete it later.
log_debug('Machine "{}" not found in MAME main DB'.format(fav_key))
machine = db_new_machine_dic()
render = db_new_machine_render_dic()
assets = db_new_MAME_asset()
# Change plot to warn user this machine is not found in database.
t = 'Machine {} missing'.format(fav_key)
render['description'] = t
assets['plot'] = t
new_fav = db_get_MAME_Favourite_full(fav_key, machine, render, assets, control_dic)
fav_machines[fav_key] = new_fav
log_debug('Updated machine "{}"'.format(fav_key))
iteration += 1
pDialog.updateProgress(iteration)
utils_write_JSON_file(cfg.FAV_MACHINES_PATH.getPath(), fav_machines)
pDialog.endProgress()
def mame_update_MAME_MostPlay_objects(cfg, db_dic):
control_dic = db_dic['control_dic']
machines = db_dic['machines']
renderdb_dic = db_dic['renderdb']
assets_dic = db_dic['assetdb']
most_played_roms_dic = utils_load_JSON_file(cfg.MAME_MOST_PLAYED_FILE_PATH.getPath())
if len(most_played_roms_dic) < 1:
kodi_notify('MAME Most Played empty')
return
iteration = 0
pDialog = KodiProgressDialog()
pDialog.startProgress('Checking/Updating MAME Most Played machines...', len(most_played_roms_dic))
for fav_key in sorted(most_played_roms_dic):
log_debug('Checking machine "{}"'.format(fav_key))
if 'launch_count' in most_played_roms_dic[fav_key]:
launch_count = most_played_roms_dic[fav_key]['launch_count']
else:
launch_count = 1
if fav_key in machines:
machine = machines[fav_key]
render = renderdb_dic[fav_key]
assets = assets_dic[fav_key]
else:
log_debug('Machine "{}" not found in MAME main DB'.format(fav_key))
machine = db_new_machine_dic()
render = db_new_machine_render_dic()
assets = db_new_MAME_asset()
t = 'Machine {} missing'.format(fav_key)
render['description'] = t
assets['plot'] = t
new_fav = db_get_MAME_Favourite_full(fav_key, machine, render, assets, control_dic)
new_fav['launch_count'] = launch_count
most_played_roms_dic[fav_key] = new_fav
log_debug('Updated machine "{}"'.format(fav_key))
iteration += 1
pDialog.updateProgress(iteration)
utils_write_JSON_file(cfg.MAME_MOST_PLAYED_FILE_PATH.getPath(), most_played_roms_dic)
pDialog.endProgress()
def mame_update_MAME_RecentPlay_objects(cfg, db_dic):
control_dic = db_dic['control_dic']
machines = db_dic['machines']
renderdb_dic = db_dic['renderdb']
assets_dic = db_dic['assetdb']
recent_roms_list = utils_load_JSON_file(cfg.MAME_RECENT_PLAYED_FILE_PATH.getPath(), [])
if len(recent_roms_list) < 1:
kodi_notify('MAME Recently Played empty')
return
iteration = 0
pDialog = KodiProgressDialog()
pDialog.startProgress('Checking/Updating MAME Recently Played machines...', len(recent_roms_list))
for i, recent_rom in enumerate(recent_roms_list):
fav_key = recent_rom['name']
log_debug('Checking machine "{}"'.format(fav_key))
if fav_key in machines:
machine = machines[fav_key]
render = renderdb_dic[fav_key]
assets = assets_dic[fav_key]
else:
log_debug('Machine "{}" not found in MAME main DB'.format(fav_key))
machine = db_new_machine_dic()
render = db_new_machine_render_dic()
assets = db_new_MAME_asset()
t = 'Machine {} missing'.format(fav_key)
render['description'] = t
assets['plot'] = t
new_fav = db_get_MAME_Favourite_full(fav_key, machine, render, assets, control_dic)
recent_roms_list[i] = new_fav
log_debug('Updated machine "{}"'.format(fav_key))
iteration += 1
pDialog.updateProgress(iteration)
utils_write_JSON_file(cfg.MAME_RECENT_PLAYED_FILE_PATH.getPath(), recent_roms_list)
pDialog.endProgress()
def mame_update_SL_Fav_objects(cfg, db_dic):
control_dic = db_dic['control_dic']
SL_index = db_dic['SL_index']
pDialog = KodiProgressDialog()
pDialog.startProgress('Loading SL Most Played JSON DB...')
fav_SL_roms = utils_load_JSON_file(cfg.FAV_SL_ROMS_PATH.getPath())
if len(fav_SL_roms) < 1:
kodi_notify_warn('SL Most Played empty')
return
pDialog.resetProgress('Checking SL Favourites', len(fav_SL_roms))
for fav_SL_key in sorted(fav_SL_roms):
if 'ROM_name' in fav_SL_roms[fav_SL_key]:
fav_ROM_name = fav_SL_roms[fav_SL_key]['ROM_name']
elif 'SL_ROM_name' in fav_SL_roms[fav_SL_key]:
fav_ROM_name = fav_SL_roms[fav_SL_key]['SL_ROM_name']
else:
raise TypeError('Cannot find SL ROM name')
fav_SL_name = fav_SL_roms[fav_SL_key]['SL_name']
log_debug('Checking SL Favourite "{}" / "{}"'.format(fav_SL_name, fav_ROM_name))
pDialog.updateProgressInc('Checking SL Favourites...\nItem "{}"'.format(fav_ROM_name))
# --- Load SL ROMs DB and assets ---
file_name = SL_index[fav_SL_name]['rom_DB_noext'] + '_items.json'
SL_DB_FN = cfg.SL_DB_DIR.pjoin(file_name)
assets_file_name = SL_index[fav_SL_name]['rom_DB_noext'] + '_assets.json'
SL_asset_DB_FN = cfg.SL_DB_DIR.pjoin(assets_file_name)
SL_roms = utils_load_JSON_file(SL_DB_FN.getPath(), verbose = False)
SL_assets_dic = utils_load_JSON_file(SL_asset_DB_FN.getPath(), verbose = False)
# --- Check ---
if fav_ROM_name in SL_roms:
SL_ROM = SL_roms[fav_ROM_name]
SL_assets = SL_assets_dic[fav_ROM_name]
else:
# Machine not found in DB. Create an empty one to update the database fields.
# The user can delete it later.
log_debug('Machine "{}" / "{}" not found in SL main DB'.format(fav_ROM_name, fav_SL_name))
SL_ROM = db_new_SL_ROM()
SL_assets = db_new_SL_asset()
# Change plot to warn user this machine is not found in database.
t = 'Item "{}" missing'.format(fav_ROM_name)
SL_ROM['description'] = t
SL_ROM['plot'] = t
new_fav_ROM = db_get_SL_Favourite(fav_SL_name, fav_ROM_name, SL_ROM, SL_assets, control_dic)
fav_SL_roms[fav_SL_key] = new_fav_ROM
log_debug('Updated SL Favourite "{}" / "{}"'.format(fav_SL_name, fav_ROM_name))
utils_write_JSON_file(cfg.FAV_SL_ROMS_PATH.getPath(), fav_SL_roms)
pDialog.endProgress()
def mame_update_SL_MostPlay_objects(cfg, db_dic):
control_dic = db_dic['control_dic']
SL_index = db_dic['SL_index']
pDialog = KodiProgressDialog()
pDialog.startProgress('Loading SL Most Played JSON DB...')
most_played_roms_dic = utils_load_JSON_file(cfg.SL_MOST_PLAYED_FILE_PATH.getPath())
if len(most_played_roms_dic) < 1:
kodi_notify_warn('SL Most Played empty')
return
pDialog.resetProgress('Checking SL Most Played', len(most_played_roms_dic))
for fav_SL_key in sorted(most_played_roms_dic):
if 'ROM_name' in most_played_roms_dic[fav_SL_key]:
fav_ROM_name = most_played_roms_dic[fav_SL_key]['ROM_name']
elif 'SL_ROM_name' in most_played_roms_dic[fav_SL_key]:
fav_ROM_name = most_played_roms_dic[fav_SL_key]['SL_ROM_name']
else:
raise TypeError('Cannot find SL ROM name')
if 'launch_count' in most_played_roms_dic[fav_SL_key]:
launch_count = most_played_roms_dic[fav_SL_key]['launch_count']
else:
launch_count = 1
fav_SL_name = most_played_roms_dic[fav_SL_key]['SL_name']
log_debug('Checking SL Most Played "{}" / "{}"'.format(fav_SL_name, fav_ROM_name))
# Update progress dialog.
pDialog.updateProgressInc('Checking SL Most Played...\nItem "{}"'.format(fav_ROM_name))
# --- Load SL ROMs DB and assets ---
file_name = SL_index[fav_SL_name]['rom_DB_noext'] + '_items.json'
SL_DB_FN = cfg.SL_DB_DIR.pjoin(file_name)
assets_file_name = SL_index[fav_SL_name]['rom_DB_noext'] + '_assets.json'
SL_asset_DB_FN = cfg.SL_DB_DIR.pjoin(assets_file_name)
SL_roms = utils_load_JSON_file(SL_DB_FN.getPath(), verbose = False)
SL_assets_dic = utils_load_JSON_file(SL_asset_DB_FN.getPath(), verbose = False)
# --- Check ---
if fav_ROM_name in SL_roms:
SL_ROM = SL_roms[fav_ROM_name]
SL_assets = SL_assets_dic[fav_ROM_name]
else:
log_debug('Machine "{}" / "{}" not found in SL main DB'.format(fav_ROM_name, fav_SL_name))
SL_ROM = db_new_SL_ROM()
SL_assets = db_new_SL_asset()
t = 'Item "{}" missing'.format(fav_ROM_name)
SL_ROM['description'] = t
SL_ROM['plot'] = t
new_fav_ROM = db_get_SL_Favourite(fav_SL_name, fav_ROM_name, SL_ROM, SL_assets, control_dic)
new_fav_ROM['launch_count'] = launch_count
most_played_roms_dic[fav_SL_key] = new_fav_ROM
log_debug('Updated SL Most Played "{}" / "{}"'.format(fav_SL_name, fav_ROM_name))
utils_write_JSON_file(cfg.SL_MOST_PLAYED_FILE_PATH.getPath(), most_played_roms_dic)
pDialog.endProgress()
def mame_update_SL_RecentPlay_objects(cfg, db_dic):
control_dic = db_dic['control_dic']
SL_index = db_dic['SL_index']
pDialog = KodiProgressDialog()
pDialog.startProgress('Loading SL Recently Played JSON DB...')
recent_roms_list = utils_load_JSON_file(cfg.SL_RECENT_PLAYED_FILE_PATH.getPath(), [])
if len(recent_roms_list) < 1:
kodi_notify_warn('SL Recently Played empty')
return
pDialog.resetProgress('Checking SL Recently Played', len(recent_roms_list))
for i, recent_rom in enumerate(recent_roms_list):
if 'ROM_name' in recent_rom:
fav_ROM_name = recent_rom['ROM_name']
elif 'SL_ROM_name' in recent_rom:
fav_ROM_name = recent_rom['SL_ROM_name']
else:
raise TypeError('Cannot find SL ROM name')
fav_SL_name = recent_rom['SL_name']
log_debug('Checking SL Recently Played "{}" / "{}"'.format(fav_SL_name, fav_ROM_name))
pDialog.updateProgressInc('Checking SL Recently Played...\nItem "{}"'.format(fav_ROM_name))
# --- Load SL ROMs DB and assets ---
file_name = SL_index[fav_SL_name]['rom_DB_noext'] + '_items.json'
SL_DB_FN = cfg.SL_DB_DIR.pjoin(file_name)
assets_file_name = SL_index[fav_SL_name]['rom_DB_noext'] + '_assets.json'
SL_asset_DB_FN = cfg.SL_DB_DIR.pjoin(assets_file_name)
SL_roms = utils_load_JSON_file(SL_DB_FN.getPath(), verbose = False)
SL_assets_dic = utils_load_JSON_file(SL_asset_DB_FN.getPath(), verbose = False)
# --- Check ---
if fav_ROM_name in SL_roms:
SL_ROM = SL_roms[fav_ROM_name]
SL_assets = SL_assets_dic[fav_ROM_name]
else:
log_debug('Machine "{}" / "{}" not found in SL main DB'.format(fav_ROM_name, fav_SL_name))
SL_ROM = db_new_SL_ROM()
SL_assets = db_new_SL_asset()
t = 'Item "{}" missing'.format(fav_ROM_name)
SL_ROM['description'] = t
SL_ROM['plot'] = t
new_fav_ROM = db_get_SL_Favourite(fav_SL_name, fav_ROM_name, SL_ROM, SL_assets, control_dic)
recent_roms_list[i] = new_fav_ROM
log_debug('Updated SL Recently Played "{}" / "{}"'.format(fav_SL_name, fav_ROM_name))
utils_write_JSON_file(cfg.SL_RECENT_PLAYED_FILE_PATH.getPath(), recent_roms_list)
pDialog.endProgress()
# ------------------------------------------------------------------------------------------------
# Build MAME and SL plots
# ------------------------------------------------------------------------------------------------
# Generate plot for MAME machines.
# Line 1) Controls are {Joystick}
# Line 2) {One Vertical Raster screen}
# Line 3) Machine [is|is not] mechanical and driver is neogeo.hpp
# Line 4) Machine has [no coin slots| N coin slots]
# Line 5) Artwork, Manual, History, Info, Gameinit, Command
# Line 6) Machine [supports|does not support] a Software List.
def mame_MAME_plot_slits(mname, m, assets_dic,
history_info_set, mameinfo_info_set, gameinit_idx_dic, command_idx_dic):
Flag_list = []
if assets_dic[mname]['artwork']: Flag_list.append('Artwork')
if assets_dic[mname]['manual']: Flag_list.append('Manual')
if mname in history_info_set: Flag_list.append('History')
if mname in mameinfo_info_set: Flag_list.append('Info')
if mname in gameinit_idx_dic: Flag_list.append('Gameinit')
if mname in command_idx_dic: Flag_list.append('Command')
Flag_str = ', '.join(Flag_list)
if m['input']:
control_list = [ctrl_dic['type'] for ctrl_dic in m['input']['control_list']]
else:
control_list = []
if control_list:
controls_str = 'Controls {}'.format(misc_get_mame_control_str(control_list))
else:
controls_str = 'No controls'
mecha_str = 'Mechanical' if m['isMechanical'] else 'Non-mechanical'
n_coins = m['input']['att_coins'] if m['input'] else 0
coin_str = 'Machine has {} coin slots'.format(n_coins) if n_coins > 0 else 'Machine has no coin slots'
SL_str = ', '.join(m['softwarelists']) if m['softwarelists'] else ''
plot_str_list = []
plot_str_list.append('{}'.format(controls_str))
plot_str_list.append('{}'.format(misc_get_mame_screen_str(mname, m)))
plot_str_list.append('{} / Driver is {}'.format(mecha_str, m['sourcefile']))
plot_str_list.append('{}'.format(coin_str))
if Flag_str: plot_str_list.append('{}'.format(Flag_str))
if SL_str: plot_str_list.append('SL {}'.format(SL_str))
return plot_str_list
# Setting id="MAME_plot" values="Info|History DAT|Info + History DAT"
def mame_build_MAME_plots(cfg, db_dic_in):
log_info('mame_build_MAME_plots() Building machine plots/descriptions ...')
control_dic = db_dic_in['control_dic']
machines = db_dic_in['machines']
renderdb_dic = db_dic_in['renderdb']
assetdb_dic = db_dic_in['assetdb']
history_idx_dic = db_dic_in['history_idx_dic']
mameinfo_idx_dic = db_dic_in['mameinfo_idx_dic']
gameinit_idx_dic = db_dic_in['gameinit_idx_dic']
command_idx_dic = db_dic_in['command_idx_dic']
# Do not crash if DAT files are not configured.
history_info_set = {m for m in history_idx_dic['mame']['machines']} if history_idx_dic else set()
mameinfo_info_set = {m for m in mameinfo_idx_dic['mame']} if mameinfo_idx_dic else set()
# --- Built machine plots ---
pDialog = KodiProgressDialog()
pDialog.startProgress('Generating MAME machine plots...', len(machines))
for mname, m in machines.items():
pDialog.updateProgressInc()
plot_str_list = mame_MAME_plot_slits(mname, m, assetdb_dic,
history_info_set, mameinfo_info_set, gameinit_idx_dic, command_idx_dic)
assetdb_dic[mname]['plot'] = '\n'.join(plot_str_list)
pDialog.endProgress()
# Timestamp, save the MAME asset database. Save control_dic at the end.
db_safe_edit(control_dic, 't_MAME_plots_build', time.time())
db_files = [
(assetdb_dic, 'MAME machine assets', cfg.ASSET_DB_PATH.getPath()),
(control_dic, 'Control dictionary', cfg.MAIN_CONTROL_PATH.getPath()),
]
db_save_files(db_files)
# ---------------------------------------------------------------------------------------------
# Generate plot for Software Lists
# Line 1) SL item has {} parts
# Line 2) {} ROMs and {} disks
# Line 3) Manual, History
# Line 4) Machines: machine list ...
# ---------------------------------------------------------------------------------------------
def mame_build_SL_plots(cfg, SL_dic):
control_dic = SL_dic['control_dic']
SL_index_dic = SL_dic['SL_index']
SL_machines_dic = SL_dic['SL_machines']
History_idx_dic = SL_dic['history_idx_dic']
d_text = 'Generating SL item plots ...'
pDialog = KodiProgressDialog()
pDialog.startProgress(d_text, len(SL_index_dic))
for SL_name in sorted(SL_index_dic):
pDialog.updateProgressInc('{}\nSoftware List {}'.format(d_text, SL_name))
# Open database
SL_DB_prefix = SL_index_dic[SL_name]['rom_DB_noext']
SL_ROMs_FN = cfg.SL_DB_DIR.pjoin(SL_DB_prefix + '_items.json')
SL_assets_FN = cfg.SL_DB_DIR.pjoin(SL_DB_prefix + '_assets.json')
SL_ROM_audit_FN = cfg.SL_DB_DIR.pjoin(SL_DB_prefix + '_ROM_audit.json')
SL_roms = utils_load_JSON_file(SL_ROMs_FN.getPath(), verbose = False)
SL_assets_dic = utils_load_JSON_file(SL_assets_FN.getPath(), verbose = False)
SL_ROM_audit_dic = utils_load_JSON_file(SL_ROM_audit_FN.getPath(), verbose = False)
History_SL_set = {m for m in History_idx_dic[SL_name]['machines']} if SL_name in History_idx_dic else set()
# Machine_list = [ m['machine'] for m in SL_machines_dic[SL_name] ]
# Machines_str = 'Machines: {}'.format(', '.join(sorted(Machine_list)))
# Traverse SL ROMs and make plot.
for rom_key in sorted(SL_roms):
SL_rom = SL_roms[rom_key]
num_parts = len(SL_rom['parts'])
if num_parts == 0: parts_str = 'SL item has no parts'
elif num_parts == 1: parts_str = 'SL item has {} part'.format(num_parts)
elif num_parts > 1: parts_str = 'SL item has {} parts'.format(num_parts)
num_ROMs = 0
num_disks = 0
for SL_rom in SL_ROM_audit_dic[rom_key]:
if SL_rom['type'] == 'ROM': num_ROMs += 1
elif SL_rom['type'] == 'DISK': num_disks += 1
ROM_str = 'ROM' if num_ROMs == 1 else 'ROMs'
disk_str = 'disk' if num_disks == 1 else 'disks'
roms_str = '{} {} and {} {}'.format(num_ROMs, ROM_str, num_disks, disk_str)
Flag_list = []
if SL_assets_dic[rom_key]['manual']: Flag_list.append('Manual')
if rom_key in History_SL_set: Flag_list.append('History')
Flag_str = ', '.join(Flag_list)
# SL_roms[rom_key]['plot'] = '\n'.join([parts_str, roms_str, Flag_str, Machines_str])
SL_roms[rom_key]['plot'] = '\n'.join([parts_str, roms_str, Flag_str])
utils_write_JSON_file(SL_ROMs_FN.getPath(), SL_roms, verbose = False)
pDialog.endProgress()
# --- Timestamp ---
db_safe_edit(control_dic, 't_SL_plots_build', time.time())
utils_write_JSON_file(cfg.MAIN_CONTROL_PATH.getPath(), control_dic)
# -------------------------------------------------------------------------------------------------
# MAME ROM/CHD audit code
# -------------------------------------------------------------------------------------------------
# This code is very un-optimised! But it is better to get something that works
# and then optimise. "Premature optimization is the root of all evil" -- Donald Knuth
#
# MAME loads ROMs by hash, not by filename. This is the reason MAME is able to load ROMs even
# if they have a wrong name and providing they are in the correct ZIP file (parent or clone set).
#
# Adds new field 'status': ROMS 'OK', 'OK (invalid ROM)', 'ZIP not found', 'Bad ZIP file',
# 'ROM not in ZIP', 'ROM bad size', 'ROM bad CRC'.
# DISKS 'OK', 'OK (invalid CHD)', 'CHD not found', 'CHD bad SHA1'
# Adds fields 'status_colour'.
#
# rom_list = [
# {'type' : several types, 'name' : 'avph.03d', 'crc' : '01234567', 'location' : 'avsp/avph.03d'}, ...
# {'type' : 'ROM_TYPE_DISK', 'name' : 'avph.03d', 'sha1' : '012...', 'location' : 'avsp/avph.03d'}, ...
# ]
#
# I'm not sure if the CHD sha1 value in MAME XML is the sha1 of the uncompressed data OR
# the sha1 of the CHD file. If the former, then AML can open the CHD file, get the sha1 from the
# header and verify it. See:
# http://www.mameworld.info/ubbthreads/showflat.php?Cat=&Number=342940&page=0&view=expanded&sb=5&o=&vc=1
#
ZIP_NOT_FOUND = 0
BAD_ZIP_FILE = 1
ZIP_FILE_OK = 2
def mame_audit_MAME_machine(cfg, rom_list, audit_dic):
if cfg.settings['op_mode'] == OP_MODE_VANILLA:
rom_path = cfg.settings['rom_path_vanilla']
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS:
rom_path = cfg.settings['rom_path_2003_plus']
else:
raise TypeError('Unknown op_mode "{}"'.format(cfg.settings['op_mode']))
# --- Cache the ROM set ZIP files and detect wrong named files by CRC ---
# 1) Traverse ROMs, determine the set ZIP files, open ZIP files and put ZIPs in the cache.
# 2) If a ZIP file is not in the cache is because the ZIP file was not found
# 3) z_cache_exists is used to check if the ZIP file has been found the first time or not.
#
# z_cache = {
# 'zip_filename' : {
# 'fname' : {'size' : int, 'crc' : text_type},
# 'fname' : {'size' : int, 'crc' : text_type}, ...
# }
# }
#
# z_cache_status = {
# 'zip_filename' : ZIP_NOT_FOUND, BAD_ZIP_FILE, ZIP_FILE_OK
# }
#
z_cache = {}
z_cache_status = {}
for m_rom in rom_list:
# Skip CHDs.
if m_rom['type'] == ROM_TYPE_DISK: continue
# Process ROM ZIP files.
set_name = m_rom['location'].split('/')[0]
if m_rom['type'] == ROM_TYPE_SAMPLE:
zip_FN = FileName(cfg.settings['samples_path']).pjoin(set_name + '.zip')
else:
zip_FN = FileName(rom_path).pjoin(set_name + '.zip')
zip_path = zip_FN.getPath()
# ZIP file encountered for the first time. Skip ZIP files already in the cache.
if zip_path not in z_cache_status:
if zip_FN.exists():
# >> Scan files in ZIP file and put them in the cache
# log_debug('Caching ZIP file {}'.format(zip_path))
try:
zip_f = z.ZipFile(zip_path, 'r')
except z.BadZipfile as e:
z_cache_status[zip_path] = BAD_ZIP_FILE
continue
# log_debug('ZIP {} files {}'.format(m_rom['location'], z_file_list))
zip_file_dic = {}
for zfile in zip_f.namelist():
# >> NOTE CRC32 in Python is a decimal number: CRC32 4225815809
# >> However, MAME encodes it as an hexadecimal number: CRC32 0123abcd
z_info = zip_f.getinfo(zfile)
z_info_file_size = z_info.file_size
z_info_crc_hex_str = '{0:08x}'.format(z_info.CRC)
zip_file_dic[zfile] = {'size' : z_info_file_size, 'crc' : z_info_crc_hex_str}
# log_debug('ZIP CRC32 {} | CRC hex {} | size {}'.format(z_info.CRC, z_crc_hex, z_info.file_size))
# log_debug('ROM CRC hex {} | size {}'.format(m_rom['crc'], 0))
zip_f.close()
z_cache[zip_path] = zip_file_dic
z_cache_status[zip_path] = ZIP_FILE_OK
else:
# >> Mark ZIP file as not found
z_cache_status[zip_path] = ZIP_NOT_FOUND
# --- Audit ROM by ROM ---
for m_rom in rom_list:
if m_rom['type'] == ROM_TYPE_DISK:
split_list = m_rom['location'].split('/')
set_name = split_list[0]
disk_name = split_list[1]
# log_debug('Testing CHD {}'.format(m_rom['name']))
# log_debug('location {}'.format(m_rom['location']))
# log_debug('set_name "{}"'.format(set_name))
# log_debug('disk_name "{}"'.format(disk_name))
# >> Invalid CHDs
if not m_rom['sha1']:
m_rom['status'] = AUDIT_STATUS_OK_INVALID_CHD
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
continue
# >> Test if DISK file exists
chd_FN = FileName(cfg.settings['chd_path']).pjoin(set_name).pjoin(disk_name + '.chd')
# log_debug('chd_FN P {}'.format(chd_FN.getPath()))
if not chd_FN.exists():
m_rom['status'] = AUDIT_STATUS_CHD_NO_FOUND
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> Open CHD file and check SHA1 hash.
chd_info = _mame_stat_chd(chd_FN.getPath())
if chd_info['status'] == CHD_BAD_CHD:
m_rom['status'] = AUDIT_STATUS_BAD_CHD_FILE
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
if chd_info['status'] == CHD_BAD_VERSION:
m_rom['status'] = AUDIT_STATUS_CHD_BAD_VERSION
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
if chd_info['sha1'] != m_rom['sha1']:
m_rom['status'] = AUDIT_STATUS_CHD_BAD_SHA1
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> DISK is OK
m_rom['status'] = AUDIT_STATUS_OK
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
elif m_rom['type'] == ROM_TYPE_SAMPLE:
split_list = m_rom['location'].split('/')
set_name = split_list[0]
sample_name = split_list[1] + '.wav'
# log_debug('Testing SAMPLE {}'.format(m_rom['name']))
# log_debug('location {}'.format(m_rom['location']))
# log_debug('set_name {}'.format(set_name))
# log_debug('sample_name {}'.format(sample_name))
# Test if ZIP file exists (use cached data). ZIP file must be in the cache always
# at this point.
zip_FN = FileName(cfg.settings['samples_path']).pjoin(set_name + '.zip')
zip_path = zip_FN.getPath()
# log_debug('ZIP {}'.format(zip_FN.getPath()))
if z_cache_status[zip_path] == ZIP_NOT_FOUND:
m_rom['status'] = AUDIT_STATUS_ZIP_NO_FOUND
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
elif z_cache_status[zip_path] == BAD_ZIP_FILE:
m_rom['status'] = AUDIT_STATUS_BAD_ZIP_FILE
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> ZIP file is good and data was cached.
zip_file_dic = z_cache[zip_path]
# >> At this point the ZIP file is in the cache (if it was open)
if sample_name not in zip_file_dic:
# >> File not found by filename. Check if it has renamed by looking at CRC.
# >> ROM not in ZIP (not even under other filename)
m_rom['status'] = AUDIT_STATUS_SAMPLE_NOT_IN_ZIP
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> SAMPLE is OK
m_rom['status'] = AUDIT_STATUS_OK
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
else:
split_list = m_rom['location'].split('/')
set_name = split_list[0]
rom_name = split_list[1]
# log_debug('Testing ROM {}'.format(m_rom['name']))
# log_debug('location {}'.format(m_rom['location']))
# log_debug('set_name {}'.format(set_name))
# log_debug('rom_name {}'.format(rom_name))
# >> Invalid ROMs are not in the ZIP file
if not m_rom['crc']:
m_rom['status'] = AUDIT_STATUS_OK_INVALID_ROM
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
continue
# Test if ZIP file exists (use cached data). ZIP file must be in the cache always
# at this point.
zip_FN = FileName(rom_path).pjoin(set_name + '.zip')
zip_path = zip_FN.getPath()
# log_debug('ZIP {}'.format(zip_FN.getPath()))
if z_cache_status[zip_path] == ZIP_NOT_FOUND:
m_rom['status'] = AUDIT_STATUS_ZIP_NO_FOUND
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
elif z_cache_status[zip_path] == BAD_ZIP_FILE:
m_rom['status'] = AUDIT_STATUS_BAD_ZIP_FILE
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> ZIP file is good and data was cached.
zip_file_dic = z_cache[zip_path]
# >> At this point the ZIP file is in the cache (if it was open)
if rom_name in zip_file_dic:
# >> File has correct name
if zip_file_dic[rom_name]['size'] != m_rom['size']:
m_rom['status'] = AUDIT_STATUS_ROM_BAD_SIZE
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
if zip_file_dic[rom_name]['crc'] != m_rom['crc']:
m_rom['status'] = AUDIT_STATUS_ROM_BAD_CRC
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
else:
# >> File not found by filename. Check if it has renamed by looking at CRC.
rom_OK_name = ''
for fn in zip_file_dic:
if m_rom['crc'] == zip_file_dic[fn]['crc']:
rom_OK_name = fn
break
if rom_OK_name:
# >> File found by CRC
m_rom['status'] = AUDIT_STATUS_OK_WRONG_NAME_ROM
m_rom['status_colour'] = '[COLOR orange]OK (named {})[/COLOR]'.format(rom_OK_name)
continue
else:
# >> ROM not in ZIP (not even under other filename)
m_rom['status'] = AUDIT_STATUS_ROM_NOT_IN_ZIP
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> ROM is OK
m_rom['status'] = AUDIT_STATUS_OK
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
# >> Audit results
# >> Naive and slow code, but better safe than sorry.
ROM_OK_status_list = []
SAM_OK_status_list = []
CHD_OK_status_list = []
audit_dic['machine_has_ROMs_or_CHDs'] = False
audit_dic['machine_has_ROMs'] = False
audit_dic['machine_has_SAMPLES'] = False
audit_dic['machine_has_CHDs'] = False
for m_rom in rom_list:
audit_dic['machine_has_ROMs_or_CHDs'] = True
if m_rom['type'] == ROM_TYPE_DISK:
audit_dic['machine_has_CHDs'] = True
if m_rom['status'] == AUDIT_STATUS_OK or m_rom['status'] == AUDIT_STATUS_OK_INVALID_CHD:
CHD_OK_status_list.append(True)
else:
CHD_OK_status_list.append(False)
elif m_rom['type'] == ROM_TYPE_SAMPLE:
audit_dic['machine_has_SAMPLES'] = True
if m_rom['status'] == AUDIT_STATUS_OK:
SAM_OK_status_list.append(True)
else:
SAM_OK_status_list.append(False)
else:
audit_dic['machine_has_ROMs'] = True
if m_rom['status'] == AUDIT_STATUS_OK or \
m_rom['status'] == AUDIT_STATUS_OK_INVALID_ROM or \
m_rom['status'] == AUDIT_STATUS_OK_WRONG_NAME_ROM:
ROM_OK_status_list.append(True)
else:
ROM_OK_status_list.append(False)
audit_dic['machine_ROMs_are_OK'] = all(ROM_OK_status_list) if audit_dic['machine_has_ROMs'] else True
audit_dic['machine_SAMPLES_are_OK'] = all(SAM_OK_status_list) if audit_dic['machine_has_SAMPLES'] else True
audit_dic['machine_CHDs_are_OK'] = all(CHD_OK_status_list) if audit_dic['machine_has_CHDs'] else True
audit_dic['machine_is_OK'] = audit_dic['machine_ROMs_are_OK'] and \
audit_dic['machine_SAMPLES_are_OK'] and audit_dic['machine_CHDs_are_OK']
# -------------------------------------------------------------------------------------------------
# SL ROM/CHD audit code
# -------------------------------------------------------------------------------------------------
def mame_audit_SL_machine(SL_ROM_path_FN, SL_CHD_path_FN, SL_name, item_name, rom_list, audit_dic):
# --- Cache the ROM set ZIP files and detect wrong named files by CRC ---
# >> Look at mame_audit_MAME_machine() for comments.
z_cache = {}
z_cache_status = {}
for m_rom in rom_list:
# >> Skip CHDs
if m_rom['type'] == ROM_TYPE_DISK: continue
# >> Process ROM ZIP files
split_list = m_rom['location'].split('/')
SL_name = split_list[0]
zip_name = split_list[1] + '.zip'
zip_FN = SL_ROM_path_FN.pjoin(SL_name).pjoin(zip_name)
zip_path = zip_FN.getPath()
# >> ZIP file encountered for the first time. Skip ZIP files already in the cache.
if zip_path not in z_cache_status:
if zip_FN.exists():
# >> Scan files in ZIP file and put them in the cache
# log_debug('Caching ZIP file {}'.format(zip_path))
try:
zip_f = z.ZipFile(zip_path, 'r')
except z.BadZipfile as e:
z_cache_status[zip_path] = BAD_ZIP_FILE
continue
# log_debug('ZIP {} files {}'.format(m_rom['location'], z_file_list))
zip_file_dic = {}
for zfile in zip_f.namelist():
# >> NOTE CRC32 in Python is a decimal number: CRC32 4225815809
# >> However, MAME encodes it as an hexadecimal number: CRC32 0123abcd
z_info = zip_f.getinfo(zfile)
z_info_file_size = z_info.file_size
z_info_crc_hex_str = '{0:08x}'.format(z_info.CRC)
# Unicode filenames in ZIP files cause problems later in this function.
# zfile has type Unicode and it's not encoded in utf-8.
# How to know encoding of ZIP files?
# https://stackoverflow.com/questions/15918314/how-to-detect-string-byte-encoding/15918519
try:
# zfile sometimes has type Unicode, sometimes str. If type is str then
# try to decode it as UTF-8.
if type(zfile) == text_type:
zfile_unicode = zfile
else:
zfile_unicode = zfile.decode('utf-8')
except UnicodeDecodeError:
log_error('mame_audit_SL_machine() Exception UnicodeDecodeError')
log_error('type(zfile) = {}'.format(type(zfile)))
log_error('SL_name "{}", item_name "{}", rom name "{}"'.format(SL_name, item_name, m_rom['name']))
except UnicodeEncodeError:
log_error('mame_audit_SL_machine() Exception UnicodeEncodeError')
log_error('type(zfile) = {}'.format(type(zfile)))
log_error('SL_name "{}", item_name "{}", rom name "{}"'.format(SL_name, item_name, m_rom['name']))
else:
# For now, do not add non-ASCII ROMs so the audit will fail for this ROM.
zip_file_dic[zfile_unicode] = {'size' : z_info_file_size, 'crc' : z_info_crc_hex_str}
# log_debug('ZIP CRC32 {} | CRC hex {} | size {}'.format(z_info.CRC, z_crc_hex, z_info.file_size))
# log_debug('ROM CRC hex {} | size {}'.format(m_rom['crc'], 0))
zip_f.close()
z_cache[zip_path] = zip_file_dic
z_cache_status[zip_path] = ZIP_FILE_OK
else:
# >> Mark ZIP file as not found
z_cache_status[zip_path] = ZIP_NOT_FOUND
# --- Audit ROM by ROM ---
for m_rom in rom_list:
if m_rom['type'] == ROM_TYPE_DISK:
# --- Audit CHD ----------------------------------------------------------------------
split_list = m_rom['location'].split('/')
SL_name = split_list[0]
item_name = split_list[1]
disk_name = split_list[2]
# log_debug('Testing CHD "{}"'.format(m_rom['name']))
# log_debug('location "{}"'.format(m_rom['location']))
# log_debug('SL_name "{}"'.format(SL_name))
# log_debug('item_name "{}"'.format(item_name))
# log_debug('disk_name "{}"'.format(disk_name))
# >> Invalid CHDs
if not m_rom['sha1']:
m_rom['status'] = AUDIT_STATUS_OK_INVALID_CHD
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
continue
# >> Test if DISK file exists
chd_FN = SL_CHD_path_FN.pjoin(SL_name).pjoin(item_name).pjoin(disk_name + '.chd')
# log_debug('chd_FN P {}'.format(chd_FN.getPath()))
if not chd_FN.exists():
m_rom['status'] = AUDIT_STATUS_CHD_NO_FOUND
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> Open CHD file and check SHA1 hash.
chd_info = _mame_stat_chd(chd_FN.getPath())
if chd_info['status'] == CHD_BAD_CHD:
m_rom['status'] = AUDIT_STATUS_BAD_CHD_FILE
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
if chd_info['status'] == CHD_BAD_VERSION:
m_rom['status'] = AUDIT_STATUS_CHD_BAD_VERSION
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
if chd_info['sha1'] != m_rom['sha1']:
m_rom['status'] = AUDIT_STATUS_CHD_BAD_SHA1
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> DISK is OK
m_rom['status'] = AUDIT_STATUS_OK
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
else:
# --- Audit ROM ----------------------------------------------------------------------
split_list = m_rom['location'].split('/')
SL_name = split_list[0]
item_name = split_list[1]
rom_name = split_list[2]
# log_debug('Testing ROM "{}"'.format(m_rom['name']))
# log_debug('location "{}"'.format(m_rom['location']))
# log_debug('SL_name "{}"'.format(SL_name))
# log_debug('item_name "{}"'.format(item_name))
# log_debug('rom_name "{}"'.format(rom_name))
# >> Invalid ROMs are not in the ZIP file
if not m_rom['crc']:
m_rom['status'] = AUDIT_STATUS_OK_INVALID_ROM
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
continue
# >> Test if ZIP file exists
zip_FN = SL_ROM_path_FN.pjoin(SL_name).pjoin(item_name + '.zip')
zip_path = zip_FN.getPath()
# log_debug('zip_FN P {}'.format(zip_FN.getPath()))
if z_cache_status[zip_path] == ZIP_NOT_FOUND:
m_rom['status'] = AUDIT_STATUS_ZIP_NO_FOUND
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
elif z_cache_status[zip_path] == BAD_ZIP_FILE:
m_rom['status'] = AUDIT_STATUS_BAD_ZIP_FILE
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> ZIP file is good and data was cached.
zip_file_dic = z_cache[zip_path]
# >> At this point the ZIP file is in the cache (if it was open)
if rom_name in zip_file_dic:
# >> File has correct name
if zip_file_dic[rom_name]['size'] != m_rom['size']:
m_rom['status'] = AUDIT_STATUS_ROM_BAD_SIZE
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
if zip_file_dic[rom_name]['crc'] != m_rom['crc']:
m_rom['status'] = AUDIT_STATUS_ROM_BAD_CRC
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
else:
# >> File not found by filename. Check if it has renamed by looking at CRC.
rom_OK_name = ''
for fn in zip_file_dic:
if m_rom['crc'] == zip_file_dic[fn]['crc']:
rom_OK_name = fn
break
if rom_OK_name:
# >> File found by CRC
m_rom['status'] = AUDIT_STATUS_OK_WRONG_NAME_ROM
m_rom['status_colour'] = '[COLOR orange]OK (named {})[/COLOR]'.format(rom_OK_name)
continue
else:
# >> ROM not in ZIP (not even under other filename)
m_rom['status'] = AUDIT_STATUS_ROM_NOT_IN_ZIP
m_rom['status_colour'] = '[COLOR red]{}[/COLOR]'.format(m_rom['status'])
continue
# >> ROM is OK
m_rom['status'] = AUDIT_STATUS_OK
m_rom['status_colour'] = '[COLOR green]{}[/COLOR]'.format(m_rom['status'])
# log_debug('{}'.format(AUDIT_STATUS_OK))
# >> Currently exactly same code as in mame_audit_MAME_machine()
# >> Audit results
# >> Naive and slow code, but better safe than sorry.
ROM_OK_status_list = []
CHD_OK_status_list = []
audit_dic['machine_has_ROMs_or_CHDs'] = False
audit_dic['machine_has_ROMs'] = False
audit_dic['machine_has_CHDs'] = False
for m_rom in rom_list:
audit_dic['machine_has_ROMs_or_CHDs'] = True
if m_rom['type'] == ROM_TYPE_DISK:
audit_dic['machine_has_CHDs'] = True
if m_rom['status'] == AUDIT_STATUS_OK or \
m_rom['status'] == AUDIT_STATUS_OK_INVALID_CHD:
CHD_OK_status_list.append(True)
else:
CHD_OK_status_list.append(False)
else:
audit_dic['machine_has_ROMs'] = True
if m_rom['status'] == AUDIT_STATUS_OK or \
m_rom['status'] == AUDIT_STATUS_OK_INVALID_ROM or \
m_rom['status'] == AUDIT_STATUS_OK_WRONG_NAME_ROM:
ROM_OK_status_list.append(True)
else:
ROM_OK_status_list.append(False)
audit_dic['machine_ROMs_are_OK'] = all(ROM_OK_status_list) if audit_dic['machine_has_ROMs'] else True
audit_dic['machine_CHDs_are_OK'] = all(CHD_OK_status_list) if audit_dic['machine_has_CHDs'] else True
audit_dic['machine_is_OK'] = audit_dic['machine_ROMs_are_OK'] and audit_dic['machine_CHDs_are_OK']
def mame_audit_MAME_all(cfg, db_dic_in):
log_debug('mame_audit_MAME_all() Initialising...')
control_dic = db_dic_in['control_dic']
machines = db_dic_in['machines']
renderdb_dic = db_dic_in['renderdb']
audit_roms_dic = db_dic_in['audit_roms']
# Go machine by machine and audit ZIPs and CHDs. Adds new column 'status' to each ROM.
pDialog = KodiProgressDialog()
pDialog.startProgress('Auditing MAME ROMs and CHDs...', len(renderdb_dic))
machine_audit_dic = {}
for m_name in sorted(renderdb_dic):
pDialog.updateProgressInc()
if pDialog.isCanceled(): break
# Only audit machine if it has ROMs. However, add all machines to machine_audit_dic.
# audit_roms_dic[m_name] is mutable and edited inside mame_audit_MAME_machine()
audit_dic = db_new_audit_dic()
if m_name in audit_roms_dic:
mame_audit_MAME_machine(cfg, audit_roms_dic[m_name], audit_dic)
machine_audit_dic[m_name] = audit_dic
pDialog.endProgress()
# Audit statistics.
audit_MAME_machines_with_arch = 0
audit_MAME_machines_with_arch_OK = 0
audit_MAME_machines_with_arch_BAD = 0
audit_MAME_machines_without = 0
audit_MAME_machines_with_ROMs = 0
audit_MAME_machines_with_ROMs_OK = 0
audit_MAME_machines_with_ROMs_BAD = 0
audit_MAME_machines_without_ROMs = 0
audit_MAME_machines_with_SAMPLES = 0
audit_MAME_machines_with_SAMPLES_OK = 0
audit_MAME_machines_with_SAMPLES_BAD = 0
audit_MAME_machines_without_SAMPLES = 0
audit_MAME_machines_with_CHDs = 0
audit_MAME_machines_with_CHDs_OK = 0
audit_MAME_machines_with_CHDs_BAD = 0
audit_MAME_machines_without_CHDs = 0
for m_name in renderdb_dic:
render_dic = renderdb_dic[m_name]
audit_dic = machine_audit_dic[m_name]
# Skip unrunnable (device) machines
if render_dic['isDevice']: continue
if audit_dic['machine_has_ROMs_or_CHDs']:
audit_MAME_machines_with_arch += 1
if audit_dic['machine_is_OK']: audit_MAME_machines_with_arch_OK += 1
else: audit_MAME_machines_with_arch_BAD += 1
else:
audit_MAME_machines_without += 1
if audit_dic['machine_has_ROMs']:
audit_MAME_machines_with_ROMs += 1
if audit_dic['machine_ROMs_are_OK']: audit_MAME_machines_with_ROMs_OK += 1
else: audit_MAME_machines_with_ROMs_BAD += 1
else:
audit_MAME_machines_without_ROMs += 1
if audit_dic['machine_has_SAMPLES']:
audit_MAME_machines_with_SAMPLES += 1
if audit_dic['machine_SAMPLES_are_OK']: audit_MAME_machines_with_SAMPLES_OK += 1
else: audit_MAME_machines_with_SAMPLES_BAD += 1
else:
audit_MAME_machines_without_SAMPLES += 1
if audit_dic['machine_has_CHDs']:
audit_MAME_machines_with_CHDs += 1
if audit_dic['machine_CHDs_are_OK']: audit_MAME_machines_with_CHDs_OK += 1
else: audit_MAME_machines_with_CHDs_BAD += 1
else:
audit_MAME_machines_without_CHDs += 1
# --- Report header and statistics ---
report_full_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows full audit report',
]
report_good_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows machines with good ROMs and/or CHDs',
]
report_error_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows machines with bad/missing ROMs and/or CHDs',
]
ROM_report_good_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows machines with good ROMs',
]
ROM_report_error_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows machines with bad/missing ROMs',
]
SAMPLES_report_good_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows machines with good Samples',
]
SAMPLES_report_error_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows machines with bad/missing Samples',
]
CHD_report_good_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows machines with good CHDs',
]
CHD_report_error_list = [
'*** Advanced MAME Launcher MAME audit report ***',
'This report shows machines with bad/missing CHDs',
]
h_list = [
'There are {} machines in total'.format(len(renderdb_dic)),
'Of those, {} are runnable machines'.format(control_dic['stats_audit_MAME_machines_runnable']),
]
report_full_list.extend(h_list)
report_good_list.extend(h_list)
report_error_list.extend(h_list)
ROM_report_good_list.extend(h_list)
ROM_report_error_list.extend(h_list)
SAMPLES_report_good_list.extend(h_list)
SAMPLES_report_error_list.extend(h_list)
CHD_report_good_list.extend(h_list)
CHD_report_error_list.extend(h_list)
h_list = [
'Of those, {} require ROMs and or CHDSs'.format(audit_MAME_machines_with_arch),
'Of those, {} are OK and {} have bad/missing ROMs and/or CHDs'.format(
audit_MAME_machines_with_arch_OK, audit_MAME_machines_with_arch_BAD ),
]
report_good_list.extend(h_list)
report_error_list.extend(h_list)
h_list = [
'Of those, {} require ROMs'.format(audit_MAME_machines_with_ROMs),
'Of those, {} are OK and {} have bad/missing ROMs and/or CHDs'.format(
audit_MAME_machines_with_ROMs_OK, audit_MAME_machines_with_ROMs_BAD ),
]
ROM_report_good_list.extend(h_list)
ROM_report_error_list.extend(h_list)
h_list = [
'Of those, {} require ROMs and or CHDSs'.format(audit_MAME_machines_with_CHDs),
'Of those, {} are OK and {} have bad/missing ROMs and/or CHDs'.format(
audit_MAME_machines_with_CHDs_OK, audit_MAME_machines_with_CHDs_BAD ),
]
CHD_report_good_list.extend(h_list)
CHD_report_error_list.extend(h_list)
report_full_list.append('')
report_good_list.append('')
report_error_list.append('')
ROM_report_good_list.append('')
ROM_report_error_list.append('')
SAMPLES_report_good_list.append('')
SAMPLES_report_error_list.append('')
CHD_report_good_list.append('')
CHD_report_error_list.append('')
# Generate report.
pDialog.startProgress('Generating audit reports...', len(renderdb_dic))
for m_name in sorted(renderdb_dic):
pDialog.updateProgressInc()
# Skip ROMless and/or CHDless machines from reports, except the full report
description = renderdb_dic[m_name]['description']
cloneof = renderdb_dic[m_name]['cloneof']
if m_name not in audit_roms_dic:
head_list = []
head_list.append('Machine {} "{}"'.format(m_name, description))
if cloneof:
clone_desc = renderdb_dic[cloneof]['description']
head_list.append('Cloneof {} "{}"'.format(cloneof, clone_desc))
head_list.append('This machine has no ROMs and/or CHDs')
report_full_list.extend(head_list)
continue
rom_list = audit_roms_dic[m_name]
if not rom_list: continue
# >> Check if audit was canceled.
# log_debug(text_type(rom_list))
if 'status' not in rom_list[0]:
report_list.append('Audit was canceled at machine {}'.format(m_name))
break
# >> Machine header (in all reports).
head_list = []
head_list.append('Machine {} "{}"'.format(m_name, description))
if cloneof:
clone_desc = renderdb_dic[cloneof]['description']
head_list.append('Cloneof {} "{}"'.format(cloneof, clone_desc))
# ROM/CHD report.
table_str = [ ['right', 'left', 'right', 'left', 'left', 'left'] ]
for m_rom in rom_list:
if m_rom['type'] == ROM_TYPE_DISK:
table_row = [m_rom['type'], m_rom['name'], '', m_rom['sha1'][0:8],
m_rom['location'], m_rom['status']]
elif m_rom['type'] == ROM_TYPE_SAMPLE:
table_row = [m_rom['type'], m_rom['name'], '', '', m_rom['location'], m_rom['status']]
else:
table_row = [m_rom['type'], m_rom['name'], text_type(m_rom['size']), m_rom['crc'],
m_rom['location'], m_rom['status']]
table_str.append(table_row)
local_str_list = text_render_table_NO_HEADER(table_str)
local_str_list.append('')
# --- At this point all machines have ROMs and/or CHDs ---
# >> Full, ROMs and/or CHDs report.
audit_dic = machine_audit_dic[m_name]
report_full_list.extend(head_list + local_str_list)
if audit_dic['machine_is_OK']:
report_good_list.extend(head_list + local_str_list)
else:
report_error_list.extend(head_list + local_str_list)
# >> ROM report
if audit_dic['machine_has_ROMs']:
if audit_dic['machine_ROMs_are_OK']:
ROM_report_good_list.extend(head_list + local_str_list)
else:
ROM_report_error_list.extend(head_list + local_str_list)
# >> Samples report
if audit_dic['machine_has_SAMPLES']:
if audit_dic['machine_SAMPLES_are_OK']:
SAMPLES_report_good_list.extend(head_list + local_str_list)
else:
SAMPLES_report_error_list.extend(head_list + local_str_list)
# >> CHD report.
if audit_dic['machine_has_CHDs']:
if audit_dic['machine_CHDs_are_OK']:
CHD_report_good_list.extend(head_list + local_str_list)
else:
CHD_report_error_list.extend(head_list + local_str_list)
else:
a = '*** MAME audit finished ***'
report_full_list.append(a)
report_good_list.append(a)
report_error_list.append(a)
ROM_report_good_list.append(a)
ROM_report_error_list.append(a)
SAMPLES_report_good_list.append(a)
SAMPLES_report_error_list.append(a)
CHD_report_good_list.append(a)
CHD_report_error_list.append(a)
pDialog.endProgress()
# --- Write reports ---
num_items = 9
pDialog.startProgress('Writing report files...', num_items)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_FULL_PATH.getPath(), report_full_list)
pDialog.updateProgress(1)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_GOOD_PATH.getPath(), report_good_list)
pDialog.updateProgress(2)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_ERRORS_PATH.getPath(), report_error_list)
pDialog.updateProgress(3)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_ROM_GOOD_PATH.getPath(), ROM_report_good_list)
pDialog.updateProgress(4)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_ROM_ERRORS_PATH.getPath(), ROM_report_error_list)
pDialog.updateProgress(5)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_SAMPLES_GOOD_PATH.getPath(), SAMPLES_report_good_list)
pDialog.updateProgress(6)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_SAMPLES_ERRORS_PATH.getPath(), SAMPLES_report_error_list)
pDialog.updateProgress(7)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_CHD_GOOD_PATH.getPath(), CHD_report_good_list)
pDialog.updateProgress(8)
utils_write_slist_to_file(cfg.REPORT_MAME_AUDIT_CHD_ERRORS_PATH.getPath(), CHD_report_error_list)
pDialog.endProgress()
# Update MAME audit statistics.
db_safe_edit(control_dic, 'audit_MAME_machines_with_arch', audit_MAME_machines_with_arch)
db_safe_edit(control_dic, 'audit_MAME_machines_with_arch_OK', audit_MAME_machines_with_arch_OK)
db_safe_edit(control_dic, 'audit_MAME_machines_with_arch_BAD', audit_MAME_machines_with_arch_BAD)
db_safe_edit(control_dic, 'audit_MAME_machines_without', audit_MAME_machines_without)
db_safe_edit(control_dic, 'audit_MAME_machines_with_ROMs', audit_MAME_machines_with_ROMs)
db_safe_edit(control_dic, 'audit_MAME_machines_with_ROMs_OK', audit_MAME_machines_with_ROMs_OK)
db_safe_edit(control_dic, 'audit_MAME_machines_with_ROMs_BAD', audit_MAME_machines_with_ROMs_BAD)
db_safe_edit(control_dic, 'audit_MAME_machines_without_ROMs', audit_MAME_machines_without_ROMs)
db_safe_edit(control_dic, 'audit_MAME_machines_with_SAMPLES', audit_MAME_machines_with_SAMPLES)
db_safe_edit(control_dic, 'audit_MAME_machines_with_SAMPLES_OK', audit_MAME_machines_with_SAMPLES_OK)
db_safe_edit(control_dic, 'audit_MAME_machines_with_SAMPLES_BAD', audit_MAME_machines_with_SAMPLES_BAD)
db_safe_edit(control_dic, 'audit_MAME_machines_without_SAMPLES', audit_MAME_machines_without_SAMPLES)
db_safe_edit(control_dic, 'audit_MAME_machines_with_CHDs', audit_MAME_machines_with_CHDs)
db_safe_edit(control_dic, 'audit_MAME_machines_with_CHDs_OK', audit_MAME_machines_with_CHDs_OK)
db_safe_edit(control_dic, 'audit_MAME_machines_with_CHDs_BAD', audit_MAME_machines_with_CHDs_BAD)
db_safe_edit(control_dic, 'audit_MAME_machines_without_CHDs', audit_MAME_machines_without_CHDs)
# Update timestamp of ROM audit.
db_safe_edit(control_dic, 't_MAME_audit', time.time())
utils_write_JSON_file(cfg.MAIN_CONTROL_PATH.getPath(), control_dic)
def mame_audit_SL_all(cfg, db_dic_in):
log_debug('mame_audit_SL_all() Initialising ...')
control_dic = db_dic_in['control_dic']
SL_index_dic = db_dic_in['SL_index']
# Report header and statistics
report_full_list = [
'*** Advanced MAME Launcher Software Lists audit report ***',
'This report shows full Software Lists audit report',
]
report_good_list = [
'*** Advanced MAME Launcher Software Lists audit report ***',
'This report shows SL items with good ROMs and/or CHDs',
]
report_error_list = [
'*** Advanced MAME Launcher Software Lists audit report ***',
'This report shows SL items with errors in ROMs and/or CHDs',
]
ROM_report_good_list = [
'*** Advanced MAME Launcher Software Lists audit report ***',
'This report shows SL items with good ROMs',
]
ROM_report_error_list = [
'*** Advanced MAME Launcher Software Lists audit report ***',
'This report shows SL items with errors in ROMs',
]
CHD_report_good_list = [
'*** Advanced MAME Launcher Software Lists audit report ***',
'This report shows SL items with good CHDs',
]
CHD_report_error_list = [
'*** Advanced MAME Launcher Software Lists audit report ***',
'This report shows SL items with errors in CHDs',
]
h_list = [
'There are {} software lists'.format(len(SL_index_dic)),
'',
]
report_full_list.extend(h_list)
report_good_list.extend(h_list)
report_error_list.extend(h_list)
ROM_report_good_list.extend(h_list)
ROM_report_error_list.extend(h_list)
CHD_report_good_list.extend(h_list)
CHD_report_error_list.extend(h_list)
# DEBUG code
# SL_index_dic = {
# "32x" : {
# "display_name" : "Sega 32X cartridges",
# "num_with_CHDs" : 0,
# "num_with_ROMs" : 203,
# "rom_DB_noext" : "32x"
# }
# }
# SL audit statistics.
audit_SL_items_runnable = 0
audit_SL_items_with_arch = 0
audit_SL_items_with_arch_OK = 0
audit_SL_items_with_arch_BAD = 0
audit_SL_items_without_arch = 0
audit_SL_items_with_arch_ROM = 0
audit_SL_items_with_arch_ROM_OK = 0
audit_SL_items_with_arch_ROM_BAD = 0
audit_SL_items_without_arch_ROM = 0
audit_SL_items_with_CHD = 0
audit_SL_items_with_CHD_OK = 0
audit_SL_items_with_CHD_BAD = 0
audit_SL_items_without_CHD = 0
# Iterate all SL databases and audit ROMs.
d_text = 'Auditing Sofware Lists ROMs and CHDs...'
pDialog = KodiProgressDialog()
pDialog.startProgress(d_text, len(SL_index_dic))
SL_ROM_path_FN = FileName(cfg.settings['SL_rom_path'])
SL_CHD_path_FN = FileName(cfg.settings['SL_chd_path'])
for SL_name in sorted(SL_index_dic):
pDialog.updateProgressInc('{}\nSoftware List {}'.format(d_text, SL_name))
SL_dic = SL_index_dic[SL_name]
SL_DB_FN = cfg.SL_DB_DIR.pjoin(SL_dic['rom_DB_noext'] + '_items.json')
SL_AUDIT_ROMs_DB_FN = cfg.SL_DB_DIR.pjoin(SL_dic['rom_DB_noext'] + '_ROM_audit.json')
roms = utils_load_JSON_file(SL_DB_FN.getPath(), verbose = False)
audit_roms = utils_load_JSON_file(SL_AUDIT_ROMs_DB_FN.getPath(), verbose = False)
# Iterate SL ROMs
for rom_key in sorted(roms):
# audit_roms_list and audit_dic are mutable and edited inside the function()
audit_rom_list = audit_roms[rom_key]
audit_dic = db_new_audit_dic()
mame_audit_SL_machine(SL_ROM_path_FN, SL_CHD_path_FN, SL_name, rom_key, audit_rom_list, audit_dic)
# Audit statistics
audit_SL_items_runnable += 1
if audit_dic['machine_has_ROMs_or_CHDs']:
audit_SL_items_with_arch += 1
if audit_dic['machine_is_OK']: audit_SL_items_with_arch_OK += 1
else: audit_SL_items_with_arch_BAD += 1
else:
audit_SL_items_without_arch += 1
if audit_dic['machine_has_ROMs']:
audit_SL_items_with_arch_ROM += 1
if audit_dic['machine_ROMs_are_OK']: audit_SL_items_with_arch_ROM_OK += 1
else: audit_SL_items_with_arch_ROM_BAD += 1
else:
audit_SL_items_without_arch_ROM += 1
if audit_dic['machine_has_CHDs']:
audit_SL_items_with_CHD += 1
if audit_dic['machine_CHDs_are_OK']: audit_SL_items_with_CHD_OK += 1
else: audit_SL_items_with_CHD_BAD += 1
else:
audit_SL_items_without_CHD += 1
# Software/machine header.
# WARNING: Kodi crashes with a 22 MB text file with colours. No problem if TXT file has not colours.
rom = roms[rom_key]
cloneof = rom['cloneof']
head_list = []
if cloneof:
head_list.append('SL {} ROM {} (cloneof {})'.format(SL_name, rom_key, cloneof))
else:
head_list.append('SL {} ROM {}'.format(SL_name, rom_key))
# ROM/CHD report.
table_str = [ ['right', 'left', 'left', 'left', 'left'] ]
for m_rom in audit_rom_list:
if m_rom['type'] == ROM_TYPE_DISK:
table_row = [m_rom['type'], '',
m_rom['sha1'][0:8], m_rom['location'], m_rom['status']]
else:
table_row = [m_rom['type'], text_type(m_rom['size']),
m_rom['crc'], m_rom['location'], m_rom['status']]
table_str.append(table_row)
local_str_list = text_render_table_NO_HEADER(table_str)
local_str_list.append('')
# Full, ROMs and CHDs report.
report_full_list.extend(head_list + local_str_list)
if audit_dic['machine_is_OK']:
report_good_list.extend(head_list + local_str_list)
else:
report_error_list.extend(head_list + local_str_list)
# ROM report
if audit_dic['machine_has_ROMs']:
if audit_dic['machine_ROMs_are_OK']:
ROM_report_good_list.extend(head_list + local_str_list)
else:
ROM_report_error_list.extend(head_list + local_str_list)
# CHD report.
if audit_dic['machine_has_CHDs']:
if audit_dic['machine_CHDs_are_OK']:
CHD_report_good_list.extend(head_list + local_str_list)
else:
CHD_report_error_list.extend(head_list + local_str_list)
a = '*** Software Lists audit finished ***'
report_full_list.append(a)
report_good_list.append(a)
report_error_list.append(a)
ROM_report_good_list.append(a)
ROM_report_error_list.append(a)
CHD_report_good_list.append(a)
CHD_report_error_list.append(a)
pDialog.endProgress()
# Write reports.
num_items = 7
pDialog.startProgress('Writing SL audit reports...', num_items)
utils_write_slist_to_file(cfg.REPORT_SL_AUDIT_FULL_PATH.getPath(), report_full_list)
pDialog.updateProgress(1)
utils_write_slist_to_file(cfg.REPORT_SL_AUDIT_GOOD_PATH.getPath(), report_good_list)
pDialog.updateProgress(2)
utils_write_slist_to_file(cfg.REPORT_SL_AUDIT_ERRORS_PATH.getPath(), report_error_list)
pDialog.updateProgress(3)
utils_write_slist_to_file(cfg.REPORT_SL_AUDIT_ROMS_GOOD_PATH.getPath(), ROM_report_good_list)
pDialog.updateProgress(4)
utils_write_slist_to_file(cfg.REPORT_SL_AUDIT_ROMS_ERRORS_PATH.getPath(), ROM_report_error_list)
pDialog.updateProgress(5)
utils_write_slist_to_file(cfg.REPORT_SL_AUDIT_CHDS_GOOD_PATH.getPath(), CHD_report_good_list)
pDialog.updateProgress(6)
utils_write_slist_to_file(cfg.REPORT_SL_AUDIT_CHDS_ERRORS_PATH.getPath(), CHD_report_error_list)
pDialog.endProgress()
# Update SL audit statistics.
db_safe_edit(control_dic, 'audit_SL_items_runnable', audit_SL_items_runnable)
db_safe_edit(control_dic, 'audit_SL_items_with_arch', audit_SL_items_with_arch)
db_safe_edit(control_dic, 'audit_SL_items_with_arch_OK', audit_SL_items_with_arch_OK)
db_safe_edit(control_dic, 'audit_SL_items_with_arch_BAD', audit_SL_items_with_arch_BAD)
db_safe_edit(control_dic, 'audit_SL_items_without_arch', audit_SL_items_without_arch)
db_safe_edit(control_dic, 'audit_SL_items_with_arch_ROM', audit_SL_items_with_arch_ROM)
db_safe_edit(control_dic, 'audit_SL_items_with_arch_ROM_OK', audit_SL_items_with_arch_ROM_OK)
db_safe_edit(control_dic, 'audit_SL_items_with_arch_ROM_BAD', audit_SL_items_with_arch_ROM_BAD)
db_safe_edit(control_dic, 'audit_SL_items_without_arch_ROM', audit_SL_items_without_arch_ROM)
db_safe_edit(control_dic, 'audit_SL_items_with_CHD', audit_SL_items_with_CHD)
db_safe_edit(control_dic, 'audit_SL_items_with_CHD_OK', audit_SL_items_with_CHD_OK)
db_safe_edit(control_dic, 'audit_SL_items_with_CHD_BAD', audit_SL_items_with_CHD_BAD)
db_safe_edit(control_dic, 'audit_SL_items_without_CHD', audit_SL_items_without_CHD)
# Update timestamp and save control_dic.
db_safe_edit(control_dic, 't_SL_audit', time.time())
utils_write_JSON_file(cfg.MAIN_CONTROL_PATH.getPath(), control_dic)
# -------------------------------------------------------------------------------------------------
# MAME database building
# -------------------------------------------------------------------------------------------------
# 1) Scan MAME hash dir for XML files.
# 2) For each XML file, read the first XML_READ_LINES lines.
# 3) Search for the line <softwarelist name="32x" description="Sega 32X cartridges">
# 4) Create the file SL_NAMES_PATH with a dictionary {sl_name : description, ... }
#
# <softwarelist name="32x" description="Sega 32X cartridges">
# <softwarelist name="vsmile_cart" description="VTech V.Smile cartridges">
# <softwarelist name="vsmileb_cart" description="VTech V.Smile Baby cartridges">
def mame_build_SL_names(cfg):
XML_READ_LINES = 600
log_debug('mame_build_SL_names() Starting...')
# If MAME hash path is not configured then create and empty file
SL_names_dic = {}
hash_dir_FN = FileName(cfg.settings['SL_hash_path'])
if not hash_dir_FN.exists():
log_info('mame_build_SL_names() MAME hash path does not exists.')
log_info('mame_build_SL_names() Creating empty SL_NAMES_PATH')
utils_write_JSON_file(cfg.SL_NAMES_PATH.getPath(), SL_names_dic)
return
# MAME hash path exists. Carry on.
file_list = os.listdir(hash_dir_FN.getPath())
log_debug('mame_build_SL_names() Found {} files'.format(len(file_list)))
xml_files = []
for file in file_list:
if file.endswith('.xml'): xml_files.append(file)
log_debug('mame_build_SL_names() Found {} XML files'.format(len(xml_files)))
for f_name in xml_files:
XML_FN = hash_dir_FN.pjoin(f_name)
# log_debug('Inspecting file "{}"'.format(XML_FN.getPath()))
# Read first XML_READ_LINES lines
try:
f = io.open(XML_FN.getPath(), 'r', encoding = 'utf-8')
except IOError:
log_error('(IOError) Exception opening {}'.format(XML_FN.getPath()))
continue
# f.readlines(XML_READ_LINES) does not work well for some files
# content_list = f.readlines(XML_READ_LINES)
line_count = 1
content_list = []
try:
for line in f:
content_list.append(line)
line_count += 1
if line_count > XML_READ_LINES: break
except UnicodeDecodeError as ex:
log_error('Exception UnicodeDecodeError on line {} of file "{}"'.format(line_count, XML_FN.getBase()))
log_error('Previous line "{}"'.format(content_list[-1]))
raise TypeError
f.close()
content_list = [x.strip() for x in content_list]
for line in content_list:
# Search for SL name
if not line.startswith('<softwarelist'): continue
m = re.search(r'<softwarelist name="([^"]+?)" description="([^"]+?)"', line)
if not m: continue
SL_name, SL_desc = m.group(1), m.group(2)
# log_debug('SL "{}" -> "{}"'.format(SL_name, SL_desc))
# Substitute SL description (long name).
if SL_desc in SL_better_name_dic:
old_SL_desc = SL_desc
SL_desc = SL_better_name_dic[SL_desc]
log_debug('Substitute SL "{}" with "{}"'.format(old_SL_desc, SL_desc))
SL_names_dic[SL_name] = SL_desc
break
# Save database
log_debug('mame_build_SL_names() Extracted {} Software List names'.format(len(SL_names_dic)))
utils_write_JSON_file(cfg.SL_NAMES_PATH.getPath(), SL_names_dic)
# -------------------------------------------------------------------------------------------------
# Reads and processes MAME.xml
#
# The ROM location in the non-merged set is unique and can be used as a unique dictionary key.
# Include only ROMs and not CHDs.
#
# roms_sha1_dic = {
# rom_nonmerged_location_1 : sha1_hash,
# rom_nonmerged_location_2 : sha1_hash,
# ...
#
# }
#
# Saves:
# MAIN_DB_PATH
# RENDER_DB_PATH
# ROMS_DB_PATH
# MAIN_ASSETS_DB_PATH (empty JSON file)
# MAIN_PCLONE_DIC_PATH
# MAIN_CONTROL_PATH (updated and then JSON file saved)
# ROM_SHA1_HASH_DB_PATH
#
def _get_stats_dic():
return {
'parents' : 0,
'clones' : 0,
'devices' : 0,
'devices_parents' : 0,
'devices_clones' : 0,
'runnable' : 0,
'runnable_parents' : 0,
'runnable_clones' : 0,
'samples' : 0,
'samples_parents' : 0,
'samples_clones' : 0,
'BIOS' : 0,
'BIOS_parents' : 0,
'BIOS_clones' : 0,
'coin' : 0,
'coin_parents' : 0,
'coin_clones' : 0,
'nocoin' : 0,
'nocoin_parents' : 0,
'nocoin_clones' : 0,
'mechanical' : 0,
'mechanical_parents' : 0,
'mechanical_clones' : 0,
'dead' : 0,
'dead_parents' : 0,
'dead_clones' : 0,
}
def _update_stats(stats, machine, m_render, runnable):
if m_render['cloneof']: stats['clones'] += 1
else: stats['parents'] += 1
if m_render['isDevice']:
stats['devices'] += 1
if m_render['cloneof']:
stats['devices_clones'] += 1
else:
stats['devices_parents'] += 1
if runnable:
stats['runnable'] += 1
if m_render['cloneof']:
stats['runnable_clones'] += 1
else:
stats['runnable_parents'] += 1
if machine['sampleof']:
stats['samples'] += 1
if m_render['cloneof']:
stats['samples_clones'] += 1
else:
stats['samples_parents'] += 1
if m_render['isBIOS']:
stats['BIOS'] += 1
if m_render['cloneof']:
stats['BIOS_clones'] += 1
else:
stats['BIOS_parents'] += 1
if runnable:
if machine['input']['att_coins'] > 0:
stats['coin'] += 1
if m_render['cloneof']:
stats['coin_clones'] += 1
else:
stats['coin_parents'] += 1
else:
stats['nocoin'] += 1
if m_render['cloneof']:
stats['nocoin_clones'] += 1
else:
stats['nocoin_parents'] += 1
if machine['isMechanical']:
stats['mechanical'] += 1
if m_render['cloneof']:
stats['mechanical_clones'] += 1
else:
stats['mechanical_parents'] += 1
if machine['isDead']:
stats['dead'] += 1
if m_render['cloneof']:
stats['dead_clones'] += 1
else:
stats['dead_parents'] += 1
def mame_build_MAME_main_database(cfg, st_dic):
# Use for debug purposes. This number must be much bigger than the actual number of machines
# when releasing.
STOP_AFTER_MACHINES = 250000
DATS_dir_FN = FileName(cfg.settings['dats_path'])
ALLTIME_FN = DATS_dir_FN.pjoin(ALLTIME_INI)
ARTWORK_FN = DATS_dir_FN.pjoin(ARTWORK_INI)
BESTGAMES_FN = DATS_dir_FN.pjoin(BESTGAMES_INI)
CATEGORY_FN = DATS_dir_FN.pjoin(CATEGORY_INI)
CATLIST_FN = DATS_dir_FN.pjoin(CATLIST_INI)
CATVER_FN = DATS_dir_FN.pjoin(CATVER_INI)
GENRE_FN = DATS_dir_FN.pjoin(GENRE_INI)
MATURE_FN = DATS_dir_FN.pjoin(MATURE_INI)
NPLAYERS_FN = DATS_dir_FN.pjoin(NPLAYERS_INI)
SERIES_FN = DATS_dir_FN.pjoin(SERIES_INI)
COMMAND_FN = DATS_dir_FN.pjoin(COMMAND_DAT)
GAMEINIT_FN = DATS_dir_FN.pjoin(GAMEINIT_DAT)
HISTORY_XML_FN = DATS_dir_FN.pjoin(HISTORY_XML)
HISTORY_DAT_FN = DATS_dir_FN.pjoin(HISTORY_DAT)
MAMEINFO_FN = DATS_dir_FN.pjoin(MAMEINFO_DAT)
# --- Print user configuration for debug ---
if cfg.settings['op_mode'] == OP_MODE_VANILLA:
rom_path = cfg.settings['rom_path_vanilla']
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS:
rom_path = cfg.settings['rom_path_2003_plus']
else:
raise TypeError('Unknown op_mode "{}"'.format(cfg.settings['op_mode']))
log_info('mame_build_MAME_main_database() Starting...')
log_info('--- Paths ---')
log_info('mame_prog = "{}"'.format(cfg.settings['mame_prog']))
log_info('ROM path = "{}"'.format(rom_path))
log_info('assets_path = "{}"'.format(cfg.settings['assets_path']))
log_info('DATs_path = "{}"'.format(cfg.settings['dats_path']))
log_info('CHD_path = "{}"'.format(cfg.settings['chd_path']))
log_info('samples_path = "{}"'.format(cfg.settings['samples_path']))
log_info('SL_hash_path = "{}"'.format(cfg.settings['SL_hash_path']))
log_info('SL_rom_path = "{}"'.format(cfg.settings['SL_rom_path']))
log_info('SL_chd_path = "{}"'.format(cfg.settings['SL_chd_path']))
log_info('--- INI paths ---')
log_info('alltime_path = "{}"'.format(ALLTIME_FN.getPath()))
log_info('artwork_path = "{}"'.format(ARTWORK_FN.getPath()))
log_info('bestgames_path = "{}"'.format(BESTGAMES_FN.getPath()))
log_info('category_path = "{}"'.format(CATEGORY_FN.getPath()))
log_info('catlist_path = "{}"'.format(CATLIST_FN.getPath()))
log_info('catver_path = "{}"'.format(CATVER_FN.getPath()))
log_info('genre_path = "{}"'.format(GENRE_FN.getPath()))
log_info('mature_path = "{}"'.format(MATURE_FN.getPath()))
log_info('nplayers_path = "{}"'.format(NPLAYERS_FN.getPath()))
log_info('series_path = "{}"'.format(SERIES_FN.getPath()))
log_info('--- DAT paths ---')
log_info('command_path = "{}"'.format(COMMAND_FN.getPath()))
log_info('gameinit_path = "{}"'.format(GAMEINIT_FN.getPath()))
log_info('history_xml_path = "{}"'.format(HISTORY_XML_FN.getPath()))
log_info('history_dat_path = "{}"'.format(HISTORY_DAT_FN.getPath()))
log_info('mameinfo_path = "{}"'.format(MAMEINFO_FN.getPath()))
# --- Automatically extract and/or process MAME XML ---
# After this block of code we have:
# 1) a valid XML_control_dic and the XML control file is created and/or current.
# 2) valid and verified for existence MAME_XML_path.
MAME_XML_path, XML_control_FN = mame_init_MAME_XML(cfg, st_dic)
if st_dic['abort']: return
XML_control_dic = utils_load_JSON_file(XML_control_FN.getPath())
# Main progress dialog.
pDialog = KodiProgressDialog()
# --- Build SL_NAMES_PATH if available, to be used later in the catalog building ---
if cfg.settings['global_enable_SL']:
pDialog.startProgress('Creating list of Software List names...')
mame_build_SL_names(cfg)
pDialog.endProgress()
else:
log_info('SL globally disabled, not creating SL names.')
# --- Load INI files to include category information ---
num_items = 10
pd_line1 = 'Processing INI files...'
pDialog.startProgress(pd_line1, num_items)
pDialog.updateProgress(0, '{}\nFile {}'.format(pd_line1, ALLTIME_INI))
alltime_dic = mame_load_INI_datfile_simple(ALLTIME_FN.getPath())
pDialog.updateProgress(1, '{}\nFile {}'.format(pd_line1, ARTWORK_INI))
artwork_dic = mame_load_INI_datfile_simple(ARTWORK_FN.getPath())
pDialog.updateProgress(2, '{}\nFile {}'.format(pd_line1, BESTGAMES_INI))
bestgames_dic = mame_load_INI_datfile_simple(BESTGAMES_FN.getPath())
pDialog.updateProgress(3, '{}\nFile {}'.format(pd_line1, CATEGORY_INI))
category_dic = mame_load_INI_datfile_simple(CATEGORY_FN.getPath())
pDialog.updateProgress(4, '{}\nFile {}'.format(pd_line1, CATLIST_INI))
catlist_dic = mame_load_INI_datfile_simple(CATLIST_FN.getPath())
pDialog.updateProgress(5, '{}\nFile {}'.format(pd_line1, CATVER_INI))
(catver_dic, veradded_dic) = mame_load_Catver_ini(CATVER_FN.getPath())
pDialog.updateProgress(6, '{}\nFile {}'.format(pd_line1, GENRE_INI))
genre_dic = mame_load_INI_datfile_simple(GENRE_FN.getPath())
pDialog.updateProgress(7, '{}\nFile {}'.format(pd_line1, MATURE_INI))
mature_dic = mame_load_Mature_ini(MATURE_FN.getPath())
pDialog.updateProgress(8, '{}\nFile {}'.format(pd_line1, NPLAYERS_INI))
nplayers_dic = mame_load_nplayers_ini(NPLAYERS_FN.getPath())
pDialog.updateProgress(9, '{}\nFile {}'.format(pd_line1, SERIES_INI))
series_dic = mame_load_INI_datfile_simple(SERIES_FN.getPath())
pDialog.endProgress()
# --- Load DAT files to include category information ---
num_items = 4
pd_line1 = 'Processing DAT files...'
pDialog.startProgress(pd_line1, num_items)
pDialog.updateProgress(0, '{}\nFile {}'.format(pd_line1, COMMAND_DAT))
command_dic = mame_load_Command_DAT(COMMAND_FN.getPath())
pDialog.updateProgress(1, '{}\nFile {}'.format(pd_line1, GAMEINIT_DAT))
gameinit_dic = mame_load_GameInit_DAT(GAMEINIT_FN.getPath())
# First try to load History.xml. If not found, then try History.dat
if HISTORY_XML_FN.exists():
pDialog.updateProgress(2, '{}\nFile {}'.format(pd_line1, HISTORY_XML))
history_dic = mame_load_History_xml(HISTORY_XML_FN.getPath())
else:
pDialog.updateProgress(2, '{}\nFile {}'.format(pd_line1, HISTORY_DAT))
history_dic = mame_load_History_DAT(HISTORY_DAT_FN.getPath())
pDialog.updateProgress(3, '{}\nFile {}'.format(pd_line1, MAMEINFO_DAT))
mameinfo_dic = mame_load_MameInfo_DAT(MAMEINFO_FN.getPath())
pDialog.endProgress()
# --- Verify that INIs comply with the data model ---
# In MAME 0.209 only artwork, category and series are lists. Other INIs define
# machine-unique categories (each machine belongs to one category only).
log_info('alltime_dic unique_categories {}'.format(alltime_dic['unique_categories']))
log_info('artwork_dic unique_categories {}'.format(artwork_dic['unique_categories']))
log_info('bestgames_dic unique_categories {}'.format(bestgames_dic['unique_categories']))
log_info('category_dic unique_categories {}'.format(category_dic['unique_categories']))
log_info('catlist_dic unique_categories {}'.format(catlist_dic['unique_categories']))
log_info('catver_dic unique_categories {}'.format(catver_dic['unique_categories']))
log_info('genre_dic unique_categories {}'.format(genre_dic['unique_categories']))
log_info('mature_dic unique_categories {}'.format(mature_dic['unique_categories']))
log_info('nplayers_dic unique_categories {}'.format(nplayers_dic['unique_categories']))
log_info('series_dic unique_categories {}'.format(series_dic['unique_categories']))
log_info('veradded_dic unique_categories {}'.format(veradded_dic['unique_categories']))
# ---------------------------------------------------------------------------------------------
# Incremental Parsing approach B (from [1])
# ---------------------------------------------------------------------------------------------
# Do not load whole MAME XML into memory! Use an iterative parser to
# grab only the information we want and discard the rest.
# See [1] http://effbot.org/zone/element-iterparse.htm
log_info('Loading XML "{}"'.format(MAME_XML_path.getPath()))
xml_iter = ET.iterparse(MAME_XML_path.getPath(), events = ("start", "end"))
event, root = next(xml_iter)
if cfg.settings['op_mode'] == OP_MODE_VANILLA:
mame_version_str = root.attrib['build']
mame_version_int = mame_get_numerical_version(mame_version_str)
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS:
mame_version_str = '0.78 (RA2003Plus)'
mame_version_int = mame_get_numerical_version(mame_version_str)
else:
raise ValueError
log_info('mame_build_MAME_main_database() MAME string version "{}"'.format(mame_version_str))
log_info('mame_build_MAME_main_database() MAME numerical version {}'.format(mame_version_int))
# --- Process MAME XML ---
total_machines = XML_control_dic['total_machines']
processed_machines = 0
pDialog.startProgress('Building main MAME database...', total_machines)
stats = _get_stats_dic()
log_info('mame_build_MAME_main_database() total_machines {:,}'.format(total_machines))
machines, renderdb_dic, machines_roms, machines_devices = {}, {}, {}, {}
roms_sha1_dic = {}
log_info('mame_build_MAME_main_database() Parsing MAME XML file ...')
num_iteration = 0
for event, elem in xml_iter:
# Debug the elements we are iterating from the XML file
# log_debug('event "{}"'.format(event))
# log_debug('elem.tag "{}" | elem.text "{}" | elem.attrib "{}"'.format(elem.tag, elem.text, text_type(elem.attrib)))
# <machine> tag start event includes <machine> attributes
if event == 'start' and (elem.tag == 'machine' or elem.tag == 'game'):
processed_machines += 1
machine = db_new_machine_dic()
m_render = db_new_machine_render_dic()
m_roms = db_new_roms_object()
device_list = []
runnable = False
num_displays = 0
# --- Process <machine> attributes ----------------------------------------------------
# name is #REQUIRED attribute
if 'name' not in elem.attrib:
log_error('name attribute not found in <machine> tag.')
raise ValueError('name attribute not found in <machine> tag')
m_name = elem.attrib['name']
# In modern MAME sourcefile attribute is always present
if cfg.settings['op_mode'] == OP_MODE_VANILLA:
# sourcefile #IMPLIED attribute
if 'sourcefile' not in elem.attrib:
log_error('sourcefile attribute not found in <machine> tag.')
raise ValueError('sourcefile attribute not found in <machine> tag.')
# Remove trailing '.cpp' from driver name
machine['sourcefile'] = elem.attrib['sourcefile']
# In MAME 2003 Plus sourcefile attribute does not exists.
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS:
machine['sourcefile'] = '[ Not set ]'
else:
raise ValueError
# Optional, default no
if 'isbios' not in elem.attrib:
m_render['isBIOS'] = False
else:
m_render['isBIOS'] = True if elem.attrib['isbios'] == 'yes' else False
if 'isdevice' not in elem.attrib:
m_render['isDevice'] = False
else:
m_render['isDevice'] = True if elem.attrib['isdevice'] == 'yes' else False
if 'ismechanical' not in elem.attrib:
machine['isMechanical'] = False
else:
machine['isMechanical'] = True if elem.attrib['ismechanical'] == 'yes' else False
# Optional, default yes
if 'runnable' not in elem.attrib:
runnable = True
else:
runnable = False if elem.attrib['runnable'] == 'no' else True
# cloneof is #IMPLIED attribute
if 'cloneof' in elem.attrib: m_render['cloneof'] = elem.attrib['cloneof']
# romof is #IMPLIED attribute
if 'romof' in elem.attrib: machine['romof'] = elem.attrib['romof']
# sampleof is #IMPLIED attribute
if 'sampleof' in elem.attrib: machine['sampleof'] = elem.attrib['sampleof']
# --- Add catver/catlist/genre ---
machine['alltime'] = alltime_dic['data'][m_name] if m_name in alltime_dic['data'] else '[ Not set ]'
machine['artwork'] = artwork_dic['data'][m_name] if m_name in artwork_dic['data'] else [ '[ Not set ]' ]
machine['bestgames'] = bestgames_dic['data'][m_name] if m_name in bestgames_dic['data'] else '[ Not set ]'
machine['category'] = category_dic['data'][m_name] if m_name in category_dic['data'] else [ '[ Not set ]' ]
machine['catlist'] = catlist_dic['data'][m_name] if m_name in catlist_dic['data'] else '[ Not set ]'
machine['catver'] = catver_dic['data'][m_name] if m_name in catver_dic['data'] else '[ Not set ]'
machine['genre'] = genre_dic['data'][m_name] if m_name in genre_dic['data'] else '[ Not set ]'
machine['series'] = series_dic['data'][m_name] if m_name in series_dic['data'] else [ '[ Not set ]' ]
machine['veradded'] = veradded_dic['data'][m_name] if m_name in veradded_dic['data'] else '[ Not set ]'
# Careful, nplayers goes into render database.
m_render['nplayers'] = nplayers_dic['data'][m_name] if m_name in nplayers_dic['data'] else '[ Not set ]'
elif event == 'start' and elem.tag == 'description':
m_render['description'] = text_type(elem.text)
elif event == 'start' and elem.tag == 'year':
m_render['year'] = text_type(elem.text)
elif event == 'start' and elem.tag == 'manufacturer':
m_render['manufacturer'] = text_type(elem.text)
# Check in machine has BIOS
# <biosset> name and description attributes are mandatory
elif event == 'start' and elem.tag == 'biosset':
# --- Add BIOS to ROMS_DB_PATH ---
bios = db_new_bios_dic()
bios['name'] = text_type(elem.attrib['name'])
bios['description'] = text_type(elem.attrib['description'])
m_roms['bios'].append(bios)
# Check in machine has ROMs
# A) ROM is considered to be valid if SHA1 has exists.
# Are there ROMs with no sha1? There are a lot, for example
# machine 1941j <rom name="yi22b.1a" size="279" status="nodump" region="bboardplds" />
#
# B) A ROM is unique to that machine if the <rom> tag does not have the 'merge' attribute.
# For example, snes and snespal both have <rom> tags that point to exactly the same
# BIOS. However, in a split set only snes.zip ROM set exists.
# snes -> <rom name="spc700.rom" size="64" crc="44bb3a40" ... >
# snespal -> <rom name="spc700.rom" merge="spc700.rom" size="64" crc="44bb3a40" ... >
#
# C) In AML, hasROM actually means "machine has it own ROMs not found somewhere else".
elif event == 'start' and elem.tag == 'rom':
# --- Research ---
# if not 'sha1' in elem.attrib:
# raise GeneralError('ROM with no sha1 (machine {})'.format(machine_name))
# --- Add BIOS to ROMS_DB_PATH ---
rom = db_new_rom_dic()
rom['name'] = text_type(elem.attrib['name'])
rom['merge'] = text_type(elem.attrib['merge']) if 'merge' in elem.attrib else ''
rom['bios'] = text_type(elem.attrib['bios']) if 'bios' in elem.attrib else ''
rom['size'] = int(elem.attrib['size']) if 'size' in elem.attrib else 0
rom['crc'] = text_type(elem.attrib['crc']) if 'crc' in elem.attrib else ''
m_roms['roms'].append(rom)
# --- ROMs SHA1 database ---
sha1 = text_type(elem.attrib['sha1']) if 'sha1' in elem.attrib else ''
# Only add valid ROMs, ignore invalid.
if sha1:
rom_nonmerged_location = m_name + '/' + rom['name']
roms_sha1_dic[rom_nonmerged_location] = sha1
# Check in machine has CHDs
# A) CHD is considered valid if and only if SHA1 hash exists.
# Keep in mind that there can be multiple disks per machine, some valid, some invalid.
# Just one valid CHD is OK.
# B) A CHD is unique to a machine if the <disk> tag does not have the 'merge' attribute.
# See comments for ROMs avobe.
elif event == 'start' and elem.tag == 'disk':
# <!ATTLIST disk name CDATA #REQUIRED>
# if 'sha1' in elem.attrib and 'merge' in elem.attrib: machine['CHDs_merged'].append(elem.attrib['name'])
# if 'sha1' in elem.attrib and 'merge' not in elem.attrib: machine['CHDs'].append(elem.attrib['name'])
# Add BIOS to ROMS_DB_PATH.
disk = db_new_disk_dic()
disk['name'] = text_type(elem.attrib['name'])
disk['merge'] = text_type(elem.attrib['merge']) if 'merge' in elem.attrib else ''
disk['sha1'] = text_type(elem.attrib['sha1']) if 'sha1' in elem.attrib else ''
m_roms['disks'].append(disk)
# Machine devices
elif event == 'start' and elem.tag == 'device_ref':
device_list.append(text_type(elem.attrib['name']))
# Machine samples
elif event == 'start' and elem.tag == 'sample':
sample = { 'name' : text_type(elem.attrib['name']) }
m_roms['samples'].append(sample)
# Chips define CPU and audio circuits.
elif event == 'start' and elem.tag == 'chip':
if elem.attrib['type'] == 'cpu':
machine['chip_cpu_name'].append(elem.attrib['name'])
# Some machines have more than one display tag (for example aquastge has 2).
# Other machines have no display tag (18w)
elif event == 'start' and elem.tag == 'display':
rotate_str = elem.attrib['rotate'] if 'rotate' in elem.attrib else '0'
width_str = elem.attrib['width'] if 'width' in elem.attrib else 'Undefined'
height_str = elem.attrib['height'] if 'height' in elem.attrib else 'Undefined'
# All attribute lists have same length, event if data is empty.
# machine['display_tag'].append(elem.attrib['tag'])
machine['display_type'].append(elem.attrib['type'])
machine['display_rotate'].append(rotate_str)
machine['display_width'].append(width_str)
machine['display_height'].append(height_str)
machine['display_refresh'].append(elem.attrib['refresh'])
num_displays += 1
# Some machines have no controls at all.
# 1) <control> reqbuttons attribute, pang uses it (has 2 buttons but only 1 is required
# 2) <control> reqbuttons ways2, bcclimbr uses it. Sometimes ways attribute is a string!
#
# machine['input'] = {
# 'att_players' CDATA #REQUIRED
# 'att_coins' CDATA #IMPLIED
# 'att_service' (yes|no) "no"
# 'att_tilt' (yes|no) "no"
# 'control_list' : [
# {
# 'type' : string CDATA #REQUIRED
# 'player' : int CDATA #IMPLIED
# 'buttons' : int CDATA #IMPLIED
# 'ways' : [ ways string, ways2 string, ways3 string ] CDATA #IMPLIED
# }, ...
# ]
# }
#
# In MAME 2003 Plus bios machines are not runnable and only have <description>,
# <year>, <manufacturer>, <biosset> and <rom> tags. For example, machine neogeo.
#
elif event == 'start' and elem.tag == 'input':
# In the archaic MAMEs used by Retroarch the control structure is different
# and this code must be adapted.
vanilla_mame_input_mode = True
# --- <input> attributes ---
# Attribute list in the same order as in the DTD
att_service = False
if 'service' in elem.attrib and elem.attrib['service'] == 'yes':
att_service = True
att_tilt = False
if 'tilt' in elem.attrib and elem.attrib['tilt'] == 'yes':
att_tilt = True
att_players = int(elem.attrib['players']) if 'players' in elem.attrib else 0
# "control" attribute only in MAME 2003 Plus.
# Note that in some machines with valid controls, for example 88games, <input> control
# attribute is empty and must be given a default value.
att_control = '[ Undefined control type ]'
if 'control' in elem.attrib:
vanilla_mame_input_mode = False
att_control = elem.attrib['control']
# "buttons" attribute only in MAME 2003 Plus.
att_buttons = 0
if 'buttons' in elem.attrib:
vanilla_mame_input_mode = False
att_buttons = int(elem.attrib['buttons'])
att_coins = int(elem.attrib['coins']) if 'coins' in elem.attrib else 0
# --- Create control_list ---
control_list = []
if vanilla_mame_input_mode:
# --- Vanilla MAME mode ---
# <input> child tags.
for control_child in elem:
attrib = control_child.attrib
# Skip non <control> tags. Process <control> tags only.
if control_child.tag != 'control': continue
# Error test. "type" is the only required attribute.
if 'type' not in attrib:
raise TypeError('<input> -> <control> has not "type" attribute')
ctrl_dic = {'type' : '', 'player' : -1, 'buttons' : -1, 'ways' : []}
ctrl_dic['type'] = attrib['type']
ctrl_dic['player'] = int(attrib['player']) if 'player' in attrib else -1
ctrl_dic['buttons'] = int(attrib['buttons']) if 'buttons' in attrib else -1
ways_list = []
if 'ways' in attrib: ways_list.append(attrib['ways'])
if 'ways2' in attrib: ways_list.append(attrib['ways2'])
if 'ways3' in attrib: ways_list.append(attrib['ways3'])
ctrl_dic['ways'] = ways_list
control_list.append(ctrl_dic)
# Fix player field when implied.
if att_players == 1:
for control in control_list: control['player'] = 1
else:
# --- MAME 2003 Plus mode ---
# Create a simulated control_list.
for i in range(att_players):
control_list.append({
'type' : att_control,
'player' : i + 1,
'buttons' : att_buttons,
'ways' : [],
})
# Add new input dictionary.
machine['input'] = {
'att_service' : att_service,
'att_tilt' : att_tilt,
'att_players' : att_players,
'att_coins' : att_coins,
'control_list' : control_list,
}
elif event == 'start' and elem.tag == 'driver':
# status is #REQUIRED attribute
m_render['driver_status'] = text_type(elem.attrib['status'])
elif event == 'start' and elem.tag == 'softwarelist':
# name is #REQUIRED attribute
machine['softwarelists'].append(elem.attrib['name'])
# Device tag for machines that support loading external files
elif event == 'start' and elem.tag == 'device':
att_type = elem.attrib['type'] # The only mandatory attribute
att_tag = elem.attrib['tag'] if 'tag' in elem.attrib else ''
att_mandatory = elem.attrib['mandatory'] if 'mandatory' in elem.attrib else ''
att_interface = elem.attrib['interface'] if 'interface' in elem.attrib else ''
# Transform device_mandatory into bool
if att_mandatory and att_mandatory == '1': att_mandatory = True
else: att_mandatory = False
# Iterate children of <device> and search for <instance> tags
instance_tag_found = False
inst_name = ''
inst_briefname = ''
ext_names = []
for device_child in elem:
if device_child.tag == 'instance':
# Stop if <device> tag has more than one <instance> tag. In MAME 0.190 no
# machines trigger this.
if instance_tag_found:
raise GeneralError('Machine {} has more than one <instance> inside <device>')
inst_name = device_child.attrib['name']
inst_briefname = device_child.attrib['briefname']
instance_tag_found = True
elif device_child.tag == 'extension':
ext_names.append(device_child.attrib['name'])
# NOTE Some machines have no instance inside <device>, for example 2020bb
# I don't know how to launch those machines
# if not instance_tag_found:
# log_warning('<instance> tag not found inside <device> tag (machine {})'.format(m_name))
# device_type = '{} (NI)'.format(device_type)
# Add device to database
device_dic = {
'att_type' : att_type,
'att_tag' : att_tag,
'att_mandatory' : att_mandatory,
'att_interface' : att_interface,
'instance' : { 'name' : inst_name, 'briefname' : inst_briefname },
'ext_names' : ext_names
}
machine['devices'].append(device_dic)
# --- <machine>/<game> tag closing. Add new machine to database ---
elif event == 'end' and (elem.tag == 'machine' or elem.tag == 'game'):
# Checks in modern MAME
if cfg.settings['op_mode'] == OP_MODE_VANILLA:
# Assumption 1: isdevice = True if and only if runnable = False
if m_render['isDevice'] == runnable:
log_error("Machine {}: machine['isDevice'] == runnable".format(m_name))
raise ValueError
# Are there machines with more than 1 <display> tag. Answer: YES
# if num_displays > 1:
# log_error("Machine {}: num_displays = {}".format(m_name, num_displays))
# raise ValueError
# All machines with 0 displays are mechanical? NO, 24cdjuke has no screen and
# is not mechanical. However 24cdjuke is a preliminary driver.
# if num_displays == 0 and not machine['ismechanical']:
# log_error("Machine {}: num_displays == 0 and not machine['ismechanical']".format(m_name))
# raise ValueError
# Checks in Retroarch MAME 2003 Plus
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS:
# In MAME 2003 Plus XML some <year> tags are empty.
# Set a default value.
if not m_render['year']: m_render['year'] = '[ Not set ]'
else:
raise ValueError
# Mark dead machines. A machine is dead if Status is preliminary AND have no controls.
if m_render['driver_status'] == 'preliminary' and not machine['input']['control_list']:
machine['isDead'] = True
# --- Delete XML element once it has been processed to conserve memory ---
elem.clear()
# --- Compute statistics ---
_update_stats(stats, machine, m_render, runnable)
# Add new machine
machines[m_name] = machine
renderdb_dic[m_name] = m_render
machines_roms[m_name] = m_roms
machines_devices[m_name] = device_list
# --- Print something to prove we are doing stuff ---
num_iteration += 1
if num_iteration % 1000 == 0:
pDialog.updateProgress(processed_machines)
# log_debug('Processed {:10d} events ({:6d} machines so far) ...'.format(
# num_iteration, processed_machines))
# log_debug('processed_machines = {}'.format(processed_machines))
# log_debug('total_machines = {}'.format(total_machines))
# Stop after STOP_AFTER_MACHINES machines have been processed for debug.
if processed_machines >= STOP_AFTER_MACHINES: break
pDialog.endProgress()
log_info('Processed {:,} MAME XML events'.format(num_iteration))
log_info('Processed machines {:,} ({:,} parents, {:,} clones)'.format(
processed_machines, stats['parents'], stats['clones']))
log_info('Dead machines {:,} ({:,} parents, {:,} clones)'.format(
stats['dead'], stats['dead_parents'], stats['dead_clones']))
# ---------------------------------------------------------------------------------------------
# Main parent-clone list
# ---------------------------------------------------------------------------------------------
# Create a couple of data struct for quickly know the parent of a clone game and
# all clones of a parent.
# main_pclone_dic = { 'parent_name' : ['clone_name', 'clone_name', ... ] , ... }
# main_clone_to_parent_dic = { 'clone_name' : 'parent_name', ... }
log_info('Making PClone list...')
main_pclone_dic = {}
main_clone_to_parent_dic = {}
for machine_name in renderdb_dic:
m_render = renderdb_dic[machine_name]
if m_render['cloneof']:
parent_name = m_render['cloneof']
# If parent already in main_pclone_dic then add clone to parent list.
# If parent not there, then add parent first and then add clone.
if parent_name not in main_pclone_dic: main_pclone_dic[parent_name] = []
main_pclone_dic[parent_name].append(machine_name)
# Add clone machine to main_clone_to_parent_dic
main_clone_to_parent_dic[machine_name] = parent_name
continue
# Machine is a parent. Add to main_pclone_dic if not already there.
if machine_name not in main_pclone_dic: main_pclone_dic[machine_name] = []
# ---------------------------------------------------------------------------------------------
# Initialise asset list
# ---------------------------------------------------------------------------------------------
log_debug('Initializing MAME asset database...')
log_debug('Option generate_history_infolabel is {}'.format(cfg.settings['generate_history_infolabel']))
assetdb_dic = {key : db_new_MAME_asset() for key in machines}
if cfg.settings['generate_history_infolabel'] and history_idx_dic:
log_debug('Adding History.DAT to MAME asset database.')
for m_name in assetdb_dic:
asset = assetdb_dic[m_name]
asset['flags'] = db_initial_flags(machines[m_name], renderdb_dic[m_name], machines_roms[m_name])
if m_name in history_idx_dic['mame']['machines']:
d_name, db_list, db_machine = history_idx_dic['mame']['machines'][m_name].split('|')
asset['history'] = history_dic[db_list][db_machine]
else:
log_debug('Not including History.DAT in MAME asset database.')
for m_name in assetdb_dic:
assetdb_dic[m_name]['flags'] = db_initial_flags(machines[m_name],
renderdb_dic[m_name], machines_roms[m_name])
# ---------------------------------------------------------------------------------------------
# Improve information fields in Main Render database
# ---------------------------------------------------------------------------------------------
if mature_dic:
log_info('MAME machine Mature information available.')
for machine_name in renderdb_dic:
renderdb_dic[machine_name]['isMature'] = True if machine_name in mature_dic['data'] else False
else:
log_info('MAME machine Mature flag not available.')
# Add genre infolabel into render database.
if genre_dic:
log_info('Using genre.ini for MAME genre information.')
for machine_name in renderdb_dic:
renderdb_dic[machine_name]['genre'] = machines[machine_name]['genre']
elif categories_dic:
log_info('Using catver.ini for MAME genre information.')
for machine_name in renderdb_dic:
renderdb_dic[machine_name]['genre'] = machines[machine_name]['catver']
elif catlist_dic:
log_info('Using catlist.ini for MAME genre information.')
for machine_name in renderdb_dic:
renderdb_dic[machine_name]['genre'] = machines[machine_name]['catlist']
# ---------------------------------------------------------------------------------------------
# Improve name in DAT indices and machine names
# ---------------------------------------------------------------------------------------------
# --- History DAT categories are Software List names ---
if history_dic:
log_debug('Updating History DAT categories and machine names ...')
SL_names_dic = utils_load_JSON_file(cfg.SL_NAMES_PATH.getPath())
for cat_name in history_dic['index']:
if cat_name == 'mame':
# Improve MAME machine names
history_dic['index'][cat_name]['name'] = 'MAME'
for machine_name in history_dic['index'][cat_name]['machines']:
if machine_name not in renderdb_dic: continue
# Rebuild the CSV string.
m_str = history_dic['index'][cat_name]['machines'][machine_name]
old_display_name, db_list_name, db_machine_name = m_str.split('|')
display_name = renderdb_dic[machine_name]['description']
m_str = misc_build_db_str_3(display_name, db_list_name, db_machine_name)
history_dic['index'][cat_name]['machines'][machine_name] = m_str
elif cat_name in SL_names_dic:
# Improve SL machine names. This must be done when building the SL databases
# and not here.
history_dic['index'][cat_name]['name'] = SL_names_dic[cat_name]
# MameInfo DAT machine names.
if mameinfo_dic['index']:
log_debug('Updating Mameinfo DAT machine names ...')
for cat_name in mameinfo_dic['index']:
for machine_key in mameinfo_dic['index'][cat_name]:
if machine_key not in renderdb_dic: continue
mameinfo_dic['index'][cat_name][machine_key] = renderdb_dic[machine_key]['description']
# GameInit DAT machine names.
if gameinit_dic['index']:
log_debug('Updating GameInit DAT machine names ...')
for machine_key in gameinit_dic['index']:
if machine_key not in renderdb_dic: continue
gameinit_dic['index'][machine_key] = renderdb_dic[machine_key]['description']
# Command DAT machine names.
if command_dic['index']:
log_debug('Updating Command DAT machine names ...')
for machine_key in command_dic['index']:
if machine_key not in renderdb_dic: continue
command_dic['index'][machine_key] = renderdb_dic[machine_key]['description']
# ---------------------------------------------------------------------------------------------
# Update/Reset MAME control dictionary
# Create a new control_dic. This effectively resets AML status.
# The XML control file is required to create the new control_dic.
# ---------------------------------------------------------------------------------------------
log_info('Creating new control_dic.')
log_info('AML version string "{}"'.format(cfg.addon.info_version))
log_info('AML version int {}'.format(cfg.addon_version_int))
control_dic = db_new_control_dic()
db_safe_edit(control_dic, 'op_mode_raw', cfg.settings['op_mode_raw'])
db_safe_edit(control_dic, 'op_mode', cfg.settings['op_mode'])
# Information from the XML control file.
db_safe_edit(control_dic, 'stats_total_machines', total_machines)
# Addon and MAME version strings
db_safe_edit(control_dic, 'ver_AML_str', cfg.addon.info_version)
db_safe_edit(control_dic, 'ver_AML_int', cfg.addon_version_int)
db_safe_edit(control_dic, 'ver_mame_str', mame_version_str)
db_safe_edit(control_dic, 'ver_mame_int', mame_version_int)
# INI files
db_safe_edit(control_dic, 'ver_alltime', alltime_dic['version'])
db_safe_edit(control_dic, 'ver_artwork', artwork_dic['version'])
db_safe_edit(control_dic, 'ver_bestgames', bestgames_dic['version'])
db_safe_edit(control_dic, 'ver_category', category_dic['version'])
db_safe_edit(control_dic, 'ver_catlist', catlist_dic['version'])
db_safe_edit(control_dic, 'ver_catver', catver_dic['version'])
db_safe_edit(control_dic, 'ver_genre', genre_dic['version'])
db_safe_edit(control_dic, 'ver_mature', mature_dic['version'])
db_safe_edit(control_dic, 'ver_nplayers', nplayers_dic['version'])
db_safe_edit(control_dic, 'ver_series', series_dic['version'])
# DAT files
db_safe_edit(control_dic, 'ver_command', command_dic['version'])
db_safe_edit(control_dic, 'ver_gameinit', gameinit_dic['version'])
db_safe_edit(control_dic, 'ver_history', history_dic['version'])
db_safe_edit(control_dic, 'ver_mameinfo', mameinfo_dic['version'])
# Statistics
db_safe_edit(control_dic, 'stats_processed_machines', processed_machines)
db_safe_edit(control_dic, 'stats_parents', stats['parents'])
db_safe_edit(control_dic, 'stats_clones', stats['clones'])
db_safe_edit(control_dic, 'stats_runnable', stats['runnable'])
db_safe_edit(control_dic, 'stats_runnable_parents', stats['runnable_parents'])
db_safe_edit(control_dic, 'stats_runnable_clones', stats['runnable_clones'])
# Main filters
db_safe_edit(control_dic, 'stats_coin', stats['coin'])
db_safe_edit(control_dic, 'stats_coin_parents', stats['coin_parents'])
db_safe_edit(control_dic, 'stats_coin_clones', stats['coin_clones'])
db_safe_edit(control_dic, 'stats_nocoin', stats['nocoin'])
db_safe_edit(control_dic, 'stats_nocoin_parents', stats['nocoin_parents'])
db_safe_edit(control_dic, 'stats_nocoin_clones', stats['nocoin_clones'])
db_safe_edit(control_dic, 'stats_mechanical', stats['mechanical'])
db_safe_edit(control_dic, 'stats_mechanical_parents', stats['mechanical_parents'])
db_safe_edit(control_dic, 'stats_mechanical_clones', stats['mechanical_clones'])
db_safe_edit(control_dic, 'stats_dead', stats['dead'])
db_safe_edit(control_dic, 'stats_dead_parents', stats['dead_parents'])
db_safe_edit(control_dic, 'stats_dead_clones', stats['dead_clones'])
db_safe_edit(control_dic, 'stats_devices', stats['devices'])
db_safe_edit(control_dic, 'stats_devices_parents', stats['devices_parents'])
db_safe_edit(control_dic, 'stats_devices_clones', stats['devices_clones'])
# Binary filters
db_safe_edit(control_dic, 'stats_BIOS', stats['BIOS'])
db_safe_edit(control_dic, 'stats_BIOS_parents', stats['BIOS_parents'])
db_safe_edit(control_dic, 'stats_BIOS_clones', stats['BIOS_clones'])
db_safe_edit(control_dic, 'stats_samples', stats['samples'])
db_safe_edit(control_dic, 'stats_samples_parents', stats['samples_parents'])
db_safe_edit(control_dic, 'stats_samples_clones', stats['samples_clones'])
# --- Timestamp ---
db_safe_edit(control_dic, 't_MAME_DB_build', time.time())
# ---------------------------------------------------------------------------------------------
# Build main distributed hashed database
# ---------------------------------------------------------------------------------------------
# This saves the hash files in the database directory.
# At this point the main hashed database is complete but the asset hashed DB is empty.
db_build_main_hashed_db(cfg, control_dic, machines, renderdb_dic)
db_build_asset_hashed_db(cfg, control_dic, assetdb_dic)
# --- Save databases ---
log_info('Saving database JSON files...')
db_files = [
[machines, 'MAME machines main', cfg.MAIN_DB_PATH.getPath()],
[renderdb_dic, 'MAME render DB', cfg.RENDER_DB_PATH.getPath()],
[assetdb_dic, 'MAME asset DB', cfg.ASSET_DB_PATH.getPath()],
[machines_roms, 'MAME machine ROMs', cfg.ROMS_DB_PATH.getPath()],
[machines_devices, 'MAME machine devices', cfg.DEVICES_DB_PATH.getPath()],
[main_pclone_dic, 'MAME PClone dictionary', cfg.MAIN_PCLONE_DB_PATH.getPath()],
[roms_sha1_dic, 'MAME ROMs SHA1 dictionary', cfg.SHA1_HASH_DB_PATH.getPath()],
# --- DAT files ---
[history_dic['index'], 'History DAT index', cfg.HISTORY_IDX_PATH.getPath()],
[history_dic['data'], 'History DAT database', cfg.HISTORY_DB_PATH.getPath()],
[mameinfo_dic['index'], 'MAMEInfo DAT index', cfg.MAMEINFO_IDX_PATH.getPath()],
[mameinfo_dic['data'], 'MAMEInfo DAT database', cfg.MAMEINFO_DB_PATH.getPath()],
[gameinit_dic['index'], 'Gameinit DAT index', cfg.GAMEINIT_IDX_PATH.getPath()],
[gameinit_dic['data'], 'Gameinit DAT database', cfg.GAMEINIT_DB_PATH.getPath()],
[command_dic['index'], 'Command DAT index', cfg.COMMAND_IDX_PATH.getPath()],
[command_dic['data'], 'Command DAT database', cfg.COMMAND_DB_PATH.getPath()],
# --- Save control_dic after everything is saved ---
[control_dic, 'Control dictionary', cfg.MAIN_CONTROL_PATH.getPath()],
]
db_save_files(db_files)
# Return a dictionary with references to the objects just in case they are needed after
# this function (in "Build everything", for example). This saves time, because databases do not
# need to be reloaded, and apparently memory as well.
return {
'machines' : machines,
'renderdb' : renderdb_dic,
'assetdb' : assetdb_dic,
'roms' : machines_roms,
'devices' : machines_devices,
'main_pclone_dic' : main_pclone_dic,
'history_idx_dic' : history_dic['index'],
'mameinfo_idx_dic' : mameinfo_dic['index'],
'gameinit_idx_dic' : gameinit_dic['index'],
'command_idx_dic' : command_dic['index'],
'history_data_dic' : history_dic['data'],
'control_dic' : control_dic,
}
# -------------------------------------------------------------------------------------------------
# Generates the ROM audit database. This database contains invalid ROMs also to display information
# in "View / Audit", "View MAME machine ROMs" context menu. This database also includes
# device ROMs (<device_ref> ROMs).
def _get_ROM_type(rom):
if rom['bios'] and rom['merge']: r_type = ROM_TYPE_BROM
elif rom['bios'] and not rom['merge']: r_type = ROM_TYPE_XROM
elif not rom['bios'] and rom['merge']: r_type = ROM_TYPE_MROM
elif not rom['bios'] and not rom['merge']: r_type = ROM_TYPE_ROM
else: r_type = ROM_TYPE_ERROR
return r_type
# Finds merged ROM merged_name in the parent ROM set roms (list of dictionaries).
# Returns a dictionary (first item of the returned list) or None if the merged ROM cannot
# be found in the ROMs of the parent.
def _get_merged_rom(roms, merged_name):
merged_rom_list = [r for r in roms if r['name'] == merged_name]
if len(merged_rom_list) > 0:
return merged_rom_list[0]
else:
return None
# Traverses the ROM hierarchy and returns the ROM location and name.
def _get_ROM_location(rom_set, rom, m_name, machines, renderdb_dic, machine_roms):
# In the Merged set all Parent and Clone ROMs are in the parent archive.
# What about BIOS and Device ROMs?
# However, according to the Pleasuredome DATs, ROMs are organised like
# this:
# clone_name_a/clone_rom_1
# clone_name_b/clone_rom_1
# parent_rom_1
# parent_rom_2
if rom_set == 'MERGED':
cloneof = renderdb_dic[m_name]['cloneof']
if cloneof:
location = cloneof + '/' + m_name + '/' + rom['name']
else:
location = m_name + '/' + rom['name']
elif rom_set == 'SPLIT':
machine = machines[m_name]
cloneof = renderdb_dic[m_name]['cloneof']
if not cloneof:
# --- Parent machine ---
# 1. In the Split set non-merged ROMs are in the machine archive and merged ROMs
# are in the parent archive.
if rom['merge']:
romof = machine['romof']
bios_name = romof
bios_roms = machine_roms[bios_name]['roms']
bios_rom_merged_name = rom['merge']
bios_merged_rom = _get_merged_rom(bios_roms, bios_rom_merged_name)
if bios_merged_rom['merge']:
bios_romof = machines[bios_name]['romof']
parent_bios_name = bios_romof
parent_bios_roms = machine_roms[parent_bios_name]['roms']
parent_bios_rom_merged_name = bios_merged_rom['merge']
parent_bios_merged_rom = _get_merged_rom(parent_bios_roms, parent_bios_rom_merged_name)
location = parent_bios_name + '/' + parent_bios_merged_rom['name']
else:
location = bios_name + '/' + bios_merged_rom['name']
else:
location = m_name + '/' + rom['name']
else:
# --- Clone machine ---
# 1. In the Split set, non-merged ROMs are in the machine ZIP archive and
# merged ROMs are in the parent archive.
# 2. If ROM is a BIOS it is located in the romof of the parent. BIOS ROMs
# always have the merge attribute.
# 3. Some machines (notably mslugN) also have non-BIOS common ROMs merged in
# neogeo.zip BIOS archive.
# 4. Some machines (notably XXXXX) have all ROMs merged. In other words, do not
# have their own ROMs.
# 5. Special case: there could be duplicate ROMs with different regions.
# For example, in neogeo.zip
# <rom name="sm1.sm1" size="131072" crc="94416d67" sha1="42f..." />
# <rom name="sm1.sm1" size="131072" crc="94416d67" sha1="42f..." />
#
# Furthermore, some machines may have more than 2 identical ROMs:
# <machine name="aa3000" sourcefile="aa310.cpp" cloneof="aa310" romof="aa310">
# <rom name="cmos_riscos3.bin" merge="cmos_riscos3.bin" bios="300" size="256" crc="0da2d31d" />
# <rom name="cmos_riscos3.bin" merge="cmos_riscos3.bin" bios="310" size="256" crc="0da2d31d" />
# <rom name="cmos_riscos3.bin" merge="cmos_riscos3.bin" bios="311" size="256" crc="0da2d31d" />
# <rom name="cmos_riscos3.bin" merge="cmos_riscos3.bin" bios="319" size="256" crc="0da2d31d" />
#
# 6. In MAME 0.206, clone machine adonisce has a merged ROM 'setchip v4.04.09.u7'
# that is not found on the parent machine adoins ROMs.
# AML WARN : Clone machine "adonisce"
# AML WARN : ROM "setchip v4.04.09.u7" MERGE "setchip v4.04.09.u7"
# AML WARN : Cannot be found on parent "adonis" ROMs
# By looking to the XML, the ROM "setchip v4.04.09.u7" is on the BIOS aristmk5
# More machines with same issue: adonisu, bootsctnu, bootsctnua, bootsctnub, ...
# and a lot more machines related to BIOS aristmk5.
#
if rom['merge']:
# >> Get merged ROM from parent
parent_name = cloneof
parent_roms = machine_roms[parent_name]['roms']
clone_rom_merged_name = rom['merge']
parent_merged_rom = _get_merged_rom(parent_roms, clone_rom_merged_name)
# >> Clone merged ROM cannot be found in parent ROM set. This is likely a MAME
# >> XML bug. In this case, treat the clone marged ROM as a non-merged ROM.
if parent_merged_rom is None:
log_warning('Clone machine "{}" parent_merged_rom is None'.format(m_name))
log_warning('ROM "{}" MERGE "{}"'.format(rom['name'], rom['merge']))
log_warning('Cannot be found on parent "{}" ROMs'.format(parent_name))
# >> Check if merged ROM is in the BIOS machine.
bios_name = machines[parent_name]['romof']
if bios_name:
log_warning('Parent machine "{}" has BIOS machine "{}"'.format(parent_name, bios_name))
log_warning('Searching for clone merged ROM "{}" in BIOS ROMs'.format(clone_rom_merged_name))
bios_roms = machine_roms[bios_name]['roms']
bios_merged_rom = _get_merged_rom(bios_roms, clone_rom_merged_name)
location = bios_name + '/' + bios_merged_rom['name']
else:
TypeError
# >> Check if clone merged ROM is also merged in parent (BIOS ROM)
elif parent_merged_rom['merge']:
parent_romof = machines[parent_name]['romof']
bios_name = parent_romof
bios_roms = machine_roms[bios_name]['roms']
bios_rom_merged_name = parent_merged_rom['merge']
bios_merged_rom = _get_merged_rom(bios_roms, bios_rom_merged_name)
# At least in one machine (0.196) BIOS ROMs can be merged in another
# BIOS ROMs (1 level of recursion in BIOS ROM merging).
if bios_merged_rom['merge']:
bios_romof = machines[bios_name]['romof']
parent_bios_name = bios_romof
parent_bios_roms = machine_roms[parent_bios_name]['roms']
parent_bios_rom_merged_name = bios_merged_rom['merge']
parent_bios_merged_rom = _get_merged_rom(parent_bios_roms, parent_bios_rom_merged_name)
location = parent_bios_name + '/' + parent_bios_merged_rom['name']
else:
location = bios_name + '/' + bios_merged_rom['name']
else:
location = parent_name + '/' + parent_merged_rom['name']
else:
location = m_name + '/' + rom['name']
# In the Non-Merged set all ROMs are in the machine archive ZIP archive, with
# the exception of BIOS ROMs and device ROMs.
elif rom_set == 'NONMERGED':
location = m_name + '/' + rom['name']
# In the Fully Non-Merged sets all ROMs are in the machine ZIP archive, including
# BIOS ROMs and device ROMs.
# Note that PD ROM sets are named Non-Merged but actually they are Fully Non-merged.
elif rom_set == 'FULLYNONMERGED':
location = m_name + '/' + rom['name']
else:
raise TypeError
return location
def _get_CHD_location(chd_set, disk, m_name, machines, renderdb_dic, machine_roms):
if chd_set == 'MERGED':
machine = machines[m_name]
cloneof = renderdb_dic[m_name]['cloneof']
romof = machine['romof']
if not cloneof:
# --- Parent machine ---
if disk['merge']:
location = romof + '/' + disk['merge']
else:
location = m_name + '/' + disk['name']
else:
# --- Clone machine ---
if disk['merge']:
# Get merged ROM from parent
parent_name = cloneof
parent_romof = machines[parent_name]['romof']
parent_disks = machine_roms[parent_name]['disks']
clone_disk_merged_name = disk['merge']
# Pick ROMs with same name and choose the first one.
parent_merged_disk_l = [r for r in parent_disks if r['name'] == clone_disk_merged_name]
parent_merged_disk = parent_merged_disk_l[0]
# Check if clone merged ROM is also merged in parent
if parent_merged_disk['merge']:
# ROM is in the 'romof' archive of the parent ROM
super_parent_name = parent_romof
super_parent_disks = machine_roms[super_parent_name]['disks']
parent_disk_merged_name = parent_merged_disk['merge']
# Pick ROMs with same name and choose the first one.
super_parent_merged_disk_l = [r for r in super_parent_disks if r['name'] == parent_disk_merged_name]
super_parent_merged_disk = super_parent_merged_disk_l[0]
location = super_parent_name + '/' + super_parent_merged_disk['name']
else:
location = parent_name + '/' + parent_merged_disk['name']
else:
location = cloneof + '/' + disk['name']
elif chd_set == 'SPLIT':
machine = machines[m_name]
cloneof = renderdb_dic[m_name]['cloneof']
romof = machine['romof']
if not cloneof:
# --- Parent machine ---
if disk['merge']:
location = romof + '/' + disk['name']
else:
location = m_name + '/' + disk['name']
else:
# --- Clone machine ---
parent_romof = machines[cloneof]['romof']
if disk['merge']:
location = romof + '/' + disk['name']
else:
location = m_name + '/' + disk['name']
elif chd_set == 'NONMERGED':
location = m_name + '/' + disk['name']
else:
raise TypeError
return location
# Returns a unique and alphabetically sorted list of ROM ZIP files.
# This list is different depending on the ROM set (Merged, Split or Non-merged).
def mame_get_ROM_ZIP_list(machine_archives_dic):
rom_list = []
for key, machine in machine_archives_dic.items():
rom_list.extend(machine['ROMs'])
return list(sorted(set(rom_list)))
def mame_get_Sample_ZIP_list(machine_archives_dic):
rom_list = []
for key, machine in machine_archives_dic.items():
rom_list.extend(machine['Samples'])
return list(sorted(set(rom_list)))
def mame_get_CHD_list(machine_archives_dic):
rom_list = []
for key, machine in machine_archives_dic.items():
rom_list.extend(machine['CHDs'])
return list(sorted(set(rom_list)))
#
# Checks for errors before scanning for SL ROMs.
# Display a Kodi dialog if an error is found.
# Returns a dictionary of settings:
# options_dic['abort'] is always present.
#
#
def mame_check_before_build_ROM_audit_databases(cfg, st_dic, control_dic):
kodi_reset_status(st_dic)
# --- Check that MAME database have been built ---
pass
#
# Builds the ROM/CHD/Samples audit database and more things.
# Updates statistics in control_dic and saves it.
# The audit databases changes for Merged, Split and Non-merged sets (the location of the ROMs changes).
# The audit database is used when auditing MAME machines.
#
# audit_roms_dic = {
# 'machine_name ' : [
# {
# 'crc' : string,
# 'location' : 'zip_name/rom_name.rom'
# 'name' : string,
# 'size' : int,
# 'type' : 'ROM' or 'BROM' or 'MROM' or 'XROM'
# }, ...,
# {
# 'location' : 'machine_name/chd_name.chd'
# 'name' : string,
# 'sha1' : string,
# 'type' : 'DISK'
# }, ...,
# {
# 'location' : 'machine_name/sample_name'
# 'name' : string,
# 'type' : 'SAM'
# }, ...,
# ], ...
# }
#
# This function also builds the machine files database.
#
# A) For every machine stores the ROM ZIP/CHD/Samples ZIP files required to run the machine.
# B) A ROM ZIP/CHD exists if and only if it has valid ROMs (CRC and SHA1 exist).
# C) Used by the ROM scanner to check how many machines may be run or not depending of the
# ROM ZIPs/CHDs/Sample ZIPs you have.
# D) ROM ZIPs and CHDs are mandatory to run a machine. Samples are not. This function kind of
# thinks that Samples are also mandatory.
#
# machine_archives_dic = {
# 'machine_name ' : {
# 'ROMs' : [name1, name2, ...],
# 'CHDs' : [dir/name1, dir/name2, ...],
# 'Samples' : [name1, name2, ...],
# }, ...
# }
#
# Saved files:
# ROM_AUDIT_DB_PATH
# ROM_SET_MACHINE_FILES_DB_PATH
# MAIN_CONTROL_PATH (control_dic)
#
# Add the following fields to db_dic_in:
# audit_roms
# machine_archives
#
def mame_build_ROM_audit_databases(cfg, st_dic, db_dic_in):
log_info('mame_build_ROM_audit_databases() Initialising ...')
control_dic = db_dic_in['control_dic']
machines = db_dic_in['machines']
renderdb_dic = db_dic_in['renderdb']
devices_db_dic = db_dic_in['devices']
machine_roms = db_dic_in['roms']
# --- Initialize ---
# This must match the values defined in settings.xml, "ROM sets" tab.
rom_set = ['MERGED', 'SPLIT', 'NONMERGED', 'FULLYNONMERGED'][cfg.settings['mame_rom_set']]
rom_set_str = ['Merged', 'Split', 'Non-merged', 'Fully Non-merged'][cfg.settings['mame_rom_set']]
chd_set = ['MERGED', 'SPLIT', 'NONMERGED'][cfg.settings['mame_chd_set']]
chd_set_str = ['Merged', 'Split', 'Non-merged'][cfg.settings['mame_chd_set']]
log_info('mame_build_ROM_audit_databases() ROM set is {}'.format(rom_set))
log_info('mame_build_ROM_audit_databases() CHD set is {}'.format(chd_set))
# ---------------------------------------------------------------------------------------------
# Audit database
# ---------------------------------------------------------------------------------------------
log_info('mame_build_ROM_audit_databases() Starting...')
log_info('Building {} ROM/Sample audit database...'.format(rom_set_str))
pDialog = KodiProgressDialog()
pDialog.startProgress('Building {} ROM set...'.format(rom_set_str), len(machines))
stats_audit_MAME_machines_runnable = 0
audit_roms_dic = {}
for m_name in sorted(machines):
pDialog.updateProgressInc()
# --- ROMs ---
# Skip device machines.
if renderdb_dic[m_name]['isDevice']: continue
stats_audit_MAME_machines_runnable += 1
m_roms = machine_roms[m_name]['roms']
machine_rom_set = []
for rom in m_roms:
rom['type'] = _get_ROM_type(rom)
rom['location'] = _get_ROM_location(rom_set, rom, m_name, machines, renderdb_dic, machine_roms)
machine_rom_set.append(rom)
# --- Device ROMs ---
device_roms_list = []
for device in devices_db_dic[m_name]:
device_roms_dic = machine_roms[device]
for rom in device_roms_dic['roms']:
rom['type'] = ROM_TYPE_DROM
rom['location'] = device + '/' + rom['name']
device_roms_list.append(rom)
if device_roms_list: machine_rom_set.extend(device_roms_list)
# --- Samples ---
sampleof = machines[m_name]['sampleof']
m_samples = machine_roms[m_name]['samples']
samples_list = []
for sample in m_samples:
sample['type'] = ROM_TYPE_SAMPLE
sample['location'] = sampleof + '/' + sample['name']
samples_list.append(sample)
if samples_list: machine_rom_set.extend(samples_list)
# Add ROMs to main DB
audit_roms_dic[m_name] = machine_rom_set
pDialog.endProgress()
# --- CHD set (refactored code) ---------------------------------------------------------------
log_info('Building {} CHD audit database...'.format(chd_set_str))
pDialog.startProgress('Building {} CHD set...'.format(chd_set_str), len(machines))
for m_name in sorted(machines):
pDialog.updateProgressInc()
# Skip Device Machines
if renderdb_dic[m_name]['isDevice']: continue
m_disks = machine_roms[m_name]['disks']
machine_chd_set = []
for disk in m_disks:
disk['type'] = ROM_TYPE_DISK
disk['location'] = _get_CHD_location(chd_set, disk, m_name, machines, renderdb_dic, machine_roms)
machine_chd_set.append(disk)
if m_name in audit_roms_dic:
audit_roms_dic[m_name].extend(machine_chd_set)
else:
audit_roms_dic[m_name] = machine_chd_set
pDialog.endProgress()
# ---------------------------------------------------------------------------------------------
# Machine files and ROM ZIP/Sample ZIP/CHD lists.
# ---------------------------------------------------------------------------------------------
# NOTE roms_dic and chds_dic may have invalid ROMs/CHDs. However, machine_archives_dic must
# have only valid ROM archives (ZIP/7Z).
# For every machine, it goes ROM by ROM and makes a list of ZIP archive locations. Then, it
# transforms the list into a set to have a list with unique elements.
# roms_dic/chds_dic have invalid ROMs. Skip invalid ROMs.
log_info('Building ROM ZIP/Sample ZIP/CHD file lists...')
pDialog.startProgress('Building ROM, Sample and CHD archive lists...', len(machines))
machine_archives_dic = {}
full_ROM_archive_set = set()
full_Sample_archive_set = set()
full_CHD_archive_set = set()
machine_archives_ROM = 0
machine_archives_ROM_parents = 0
machine_archives_ROM_clones = 0
machine_archives_Samples = 0
machine_archives_Samples_parents = 0
machine_archives_Samples_clones = 0
machine_archives_CHD = 0
machine_archives_CHD_parents = 0
machine_archives_CHD_clones = 0
archive_less = 0
archive_less_parents = 0
archive_less_clones = 0
ROMs_total = 0
ROMs_valid = 0
ROMs_invalid = 0
CHDs_total = 0
CHDs_valid = 0
CHDs_invalid = 0
for m_name in audit_roms_dic:
pDialog.updateProgressInc()
isClone = True if renderdb_dic[m_name]['cloneof'] else False
rom_list = audit_roms_dic[m_name]
machine_rom_archive_set = set()
machine_sample_archive_set = set()
machine_chd_archive_set = set()
# --- Iterate ROMs/CHDs ---
for rom in rom_list:
if rom['type'] == ROM_TYPE_DISK:
CHDs_total += 1
# Skip invalid CHDs
if not rom['sha1']:
CHDs_invalid += 1
continue
CHDs_valid += 1
chd_name = rom['location']
machine_chd_archive_set.add(chd_name)
full_CHD_archive_set.add(rom['location'])
elif rom['type'] == ROM_TYPE_SAMPLE:
sample_str_list = rom['location'].split('/')
zip_name = sample_str_list[0]
machine_sample_archive_set.add(zip_name)
archive_str = rom['location'].split('/')[0]
full_Sample_archive_set.add(archive_str)
else:
ROMs_total += 1
# Skip invalid ROMs
if not rom['crc']:
ROMs_invalid += 1
continue
ROMs_valid += 1
rom_str_list = rom['location'].split('/')
zip_name = rom_str_list[0]
machine_rom_archive_set.add(zip_name)
archive_str = rom['location'].split('/')[0]
# if not archive_str: continue
full_ROM_archive_set.add(archive_str)
machine_archives_dic[m_name] = {
'ROMs' : list(machine_rom_archive_set),
'Samples' : list(machine_sample_archive_set),
'CHDs' : list(machine_chd_archive_set),
}
# --- Statistics ---
if machine_rom_archive_set:
machine_archives_ROM += 1
if isClone:
machine_archives_ROM_clones += 1
else:
machine_archives_ROM_parents += 1
if machine_sample_archive_set:
machine_archives_Samples += 1
if isClone:
machine_archives_Samples_clones += 1
else:
machine_archives_Samples_parents += 1
if machine_chd_archive_set:
machine_archives_CHD += 1
if isClone:
machine_archives_CHD_clones += 1
else:
machine_archives_CHD_parents += 1
if not (machine_rom_archive_set or machine_sample_archive_set or machine_chd_archive_set):
archive_less += 1
if isClone:
archive_less_clones += 1
else:
archive_less_parents += 1
pDialog.endProgress()
# ---------------------------------------------------------------------------------------------
# machine_roms dictionary is passed as argument and not save in this function.
# It is modified in this function to create audit_roms_dic.
# Remove unused fields to save memory before saving the audit_roms_dic JSON file.
# Do not remove earlier because 'merge' is used in the _get_XXX_location() functions.
# ---------------------------------------------------------------------------------------------
log_info('Cleaning audit database before saving it to disk...')
pDialog.startProgress('Cleaning audit database...', len(machines))
for m_name in sorted(machines):
pDialog.updateProgressInc()
# --- Skip devices and process ROMs and CHDs ---
if renderdb_dic[m_name]['isDevice']: continue
for rom in machine_roms[m_name]['roms']:
# Remove unused fields to save space in JSON database, but remove from the copy!
rom.pop('merge')
rom.pop('bios')
for disk in machine_roms[m_name]['disks']:
disk.pop('merge')
pDialog.endProgress()
# ---------------------------------------------------------------------------------------------
# Update control dictionary.
# ---------------------------------------------------------------------------------------------
db_safe_edit(control_dic, 'stats_audit_MAME_machines_runnable', stats_audit_MAME_machines_runnable)
db_safe_edit(control_dic, 'stats_audit_MAME_ROM_ZIP_files', len(full_ROM_archive_set))
db_safe_edit(control_dic, 'stats_audit_MAME_Sample_ZIP_files', len(full_Sample_archive_set))
db_safe_edit(control_dic, 'stats_audit_MAME_CHD_files', len(full_CHD_archive_set))
db_safe_edit(control_dic, 'stats_audit_machine_archives_ROM', machine_archives_ROM)
db_safe_edit(control_dic, 'stats_audit_machine_archives_ROM_parents', machine_archives_ROM_parents)
db_safe_edit(control_dic, 'stats_audit_machine_archives_ROM_clones', machine_archives_ROM_clones)
db_safe_edit(control_dic, 'stats_audit_machine_archives_CHD', machine_archives_CHD)
db_safe_edit(control_dic, 'stats_audit_machine_archives_CHD_parents', machine_archives_CHD_parents)
db_safe_edit(control_dic, 'stats_audit_machine_archives_CHD_clones', machine_archives_CHD_clones)
db_safe_edit(control_dic, 'stats_audit_machine_archives_Samples', machine_archives_Samples)
db_safe_edit(control_dic, 'stats_audit_machine_archives_Samples_parents', machine_archives_Samples_parents)
db_safe_edit(control_dic, 'stats_audit_machine_archives_Samples_clones', machine_archives_Samples_clones)
db_safe_edit(control_dic, 'stats_audit_archive_less', archive_less)
db_safe_edit(control_dic, 'stats_audit_archive_less_parents', archive_less_parents)
db_safe_edit(control_dic, 'stats_audit_archive_less_clones', archive_less_clones)
db_safe_edit(control_dic, 'stats_audit_ROMs_total', ROMs_total)
db_safe_edit(control_dic, 'stats_audit_ROMs_valid', ROMs_valid)
db_safe_edit(control_dic, 'stats_audit_ROMs_invalid', ROMs_invalid)
db_safe_edit(control_dic, 'stats_audit_CHDs_total', CHDs_total)
db_safe_edit(control_dic, 'stats_audit_CHDs_valid', CHDs_valid)
db_safe_edit(control_dic, 'stats_audit_CHDs_invalid', CHDs_invalid)
db_safe_edit(control_dic, 't_MAME_Audit_DB_build', time.time())
# --- Save databases ---
db_files = [
[audit_roms_dic, 'MAME ROM Audit', cfg.ROM_AUDIT_DB_PATH.getPath()],
[machine_archives_dic, 'Machine file list', cfg.ROM_SET_MACHINE_FILES_DB_PATH.getPath()],
# --- Save control_dic after everything is saved ---
[control_dic, 'Control dictionary', cfg.MAIN_CONTROL_PATH.getPath()],
]
db_save_files(db_files)
# Add data generated in this function to dictionary for caller code use.
db_dic_in['audit_roms'] = audit_roms_dic
db_dic_in['machine_archives'] = machine_archives_dic
# Checks for errors before scanning for SL ROMs.
# Display a Kodi dialog if an error is found.
# Returns a dictionary of settings:
# options_dic['abort'] is always present.
def mame_check_before_build_MAME_catalogs(cfg, st_dic, control_dic):
kodi_reset_status(st_dic)
# --- Check that database exists ---
pass
#
# Updates db_dic_in and adds cache_index field.
#
# A) Builds the following catalog files
# CATALOG_MAIN_PARENT_PATH
# CATALOG_MAIN_ALL_PATH
# CATALOG_CATVER_PARENT_PATH
# CATALOG_CATVER_ALL_PATH
# ...
#
# main_catalog_parents = {
# 'cat_key' : [ parent1, parent2, ... ]
# }
#
# main_catalog_all = {
# 'cat_key' : [ machine1, machine2, ... ]
# }
#
# B) Cache index:
# CACHE_INDEX_PATH
#
# cache_index_dic = {
# 'catalog_name' : { --> 'Main', 'Binary', ...
# 'cat_key' : {
# 'num_machines' : int,
# 'num_parents' : int,
# 'hash' : text_type
# }, ...
# }, ...
# }
#
def mame_build_MAME_catalogs(cfg, st_dic, db_dic_in):
control_dic = db_dic_in['control_dic']
machines = db_dic_in['machines']
renderdb_dic = db_dic_in['renderdb']
assetdb_dic = db_dic_in['assetdb']
machine_roms = db_dic_in['roms']
main_pclone_dic = db_dic_in['main_pclone_dic']
# --- Machine count ---
cache_index_dic = {
# Virtual Main filter catalog
'Main' : {},
# Virtual Binary filter catalog
'Binary' : {},
# INI/DAT based catalogs
'Catver' : {},
'Catlist' : {},
'Genre' : {},
'Category' : {},
'NPlayers' : {},
'Bestgames' : {},
'Series' : {},
'Alltime' : {},
'Artwork' : {},
'Version' : {},
# MAME XML extracted catalogs
'Controls_Expanded' : {},
'Controls_Compact' : {},
'Devices_Expanded' : {},
'Devices_Compact' : {},
'Display_Type' : {},
'Display_VSync' : {},
'Display_Resolution' : {},
'CPU' : {},
'Driver' : {},
'Manufacturer' : {},
'ShortName' : {},
'LongName' : {},
'BySL' : {},
'Year' : {},
}
NUM_CATALOGS = len(cache_index_dic)
NORMAL_DRIVER_SET = {
'88games.cpp',
'asteroid.cpp',
'cball.cpp',
}
UNUSUAL_DRIVER_SET = {
'aristmk5.cpp',
'adp.cpp',
'cubo.cpp',
'mpu4vid.cpp',
'peplus.cpp',
'sfbonus.cpp',
}
# --- Progress dialog ---
diag_line1 = 'Building catalogs...'
pDialog = KodiProgressDialog()
processed_filters = 0
# ---------------------------------------------------------------------------------------------
# Main filters (None catalog) -----------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
pDialog.startProgress('{}\n{}'.format(diag_line1, 'Main catalog'), NUM_CATALOGS)
main_catalog_parents, main_catalog_all = {}, {}
# --- Normal and Unusual machine list ---
# Machines with Coin Slot and Non Mechanical and not Dead and not Device
log_info('Making None catalog - Coin index ...')
normal_parent_dic, normal_all_dic, unusual_parent_dic, unusual_all_dic = {}, {}, {}, {}
for parent_name in main_pclone_dic:
machine_main = machines[parent_name]
machine_render = renderdb_dic[parent_name]
n_coins = machine_main['input']['att_coins'] if machine_main['input'] else 0
if machine_main['isMechanical']: continue
if n_coins == 0: continue
if machine_main['isDead']: continue
if machine_render['isDevice']: continue
# Make list of machine controls.
if machine_main['input']:
control_list = [ctrl_dic['type'] for ctrl_dic in machine_main['input']['control_list']]
else:
control_list = []
# --- Determinte if machine is Normal or Unusual ----
# Standard machines.
if ('only_buttons' in control_list and len(control_list) > 1) \
or machine_main['sourcefile'] in NORMAL_DRIVER_SET:
normal_parent_dic[parent_name] = machine_render['description']
normal_all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, normal_all_dic)
#
# Unusual machines. Most of them you don't wanna play.
# No controls or control_type has "only_buttons" or "gambling" or "hanafuda" or "mahjong"
#
elif not control_list \
or 'only_buttons' in control_list or 'gambling' in control_list \
or 'hanafuda' in control_list or 'mahjong' in control_list \
or machine_main['sourcefile'] in UNUSUAL_DRIVER_SET:
unusual_parent_dic[parent_name] = machine_render['description']
unusual_all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, unusual_all_dic)
#
# What remains go to the Normal/Standard list.
#
else:
normal_parent_dic[parent_name] = machine_render['description']
normal_all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, normal_all_dic)
main_catalog_parents['Normal'] = normal_parent_dic
main_catalog_all['Normal'] = normal_all_dic
main_catalog_parents['Unusual'] = unusual_parent_dic
main_catalog_all['Unusual'] = unusual_all_dic
# --- NoCoin list ---
# A) Machines with No Coin Slot and Non Mechanical and not Dead and not Device
log_info('Making NoCoin index ...')
parent_dic, all_dic = {}, {}
for parent_name in main_pclone_dic:
machine_main = machines[parent_name]
machine_render = renderdb_dic[parent_name]
n_coins = machine_main['input']['att_coins'] if machine_main['input'] else 0
if machine_main['isMechanical']: continue
if n_coins > 0: continue
if machine_main['isDead']: continue
if machine_render['isDevice']: continue
parent_dic[parent_name] = machine_render['description']
all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, all_dic)
main_catalog_parents['NoCoin'] = parent_dic
main_catalog_all['NoCoin'] = all_dic
# --- Mechanical machines ---
# Mechanical machines and not Dead and not Device
log_info('Making Mechanical index ...')
parent_dic, all_dic = {}, {}
for parent_name in main_pclone_dic:
machine_main = machines[parent_name]
machine_render = renderdb_dic[parent_name]
if not machine_main['isMechanical']: continue
if machine_main['isDead']: continue
if machine_render['isDevice']: continue
parent_dic[parent_name] = machine_render['description']
all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, all_dic)
main_catalog_parents['Mechanical'] = parent_dic
main_catalog_all['Mechanical'] = all_dic
# --- Dead machines ---
log_info('Making Dead Machines index ...')
parent_dic, all_dic = {}, {}
for parent_name in main_pclone_dic:
machine_main = machines[parent_name]
machine_render = renderdb_dic[parent_name]
if not machine_main['isDead']: continue
parent_dic[parent_name] = machine_render['description']
all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, all_dic)
main_catalog_parents['Dead'] = parent_dic
main_catalog_all['Dead'] = all_dic
# --- Device machines ---
log_info('Making Device Machines index ...')
parent_dic, all_dic = {}, {}
for parent_name in main_pclone_dic:
machine_render = renderdb_dic[parent_name]
if not machine_render['isDevice']: continue
parent_dic[parent_name] = machine_render['description']
all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, all_dic)
main_catalog_parents['Devices'] = parent_dic
main_catalog_all['Devices'] = all_dic
# --- Build ROM cache index and save Main catalog JSON file ---
mame_cache_index_builder('Main', cache_index_dic, main_catalog_all, main_catalog_parents)
utils_write_JSON_file(cfg.CATALOG_MAIN_ALL_PATH.getPath(), main_catalog_all)
utils_write_JSON_file(cfg.CATALOG_MAIN_PARENT_PATH.getPath(), main_catalog_parents)
processed_filters += 1
# ---------------------------------------------------------------------------------------------
# Binary filters ------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Binary catalog'))
binary_catalog_parents, binary_catalog_all = {}, {}
# --- CHD machines ---
log_info('Making CHD Machines index ...')
parent_dic, all_dic = {}, {}
for parent_name in main_pclone_dic:
machine = machines[parent_name]
machine_render = renderdb_dic[parent_name]
if machine_render['isDevice']: continue # >> Skip device machines
if not machine_roms[parent_name]['disks']: continue
parent_dic[parent_name] = machine_render['description']
all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, all_dic)
binary_catalog_parents['CHD'] = parent_dic
binary_catalog_all['CHD'] = all_dic
# --- Machines with samples ---
log_info('Making Samples Machines index ...')
parent_dic, all_dic = {}, {}
for parent_name in main_pclone_dic:
machine = machines[parent_name]
machine_render = renderdb_dic[parent_name]
if machine_render['isDevice']: continue # >> Skip device machines
if not machine['sampleof']: continue
parent_dic[parent_name] = machine_render['description']
all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, all_dic)
binary_catalog_parents['Samples'] = parent_dic
binary_catalog_all['Samples'] = all_dic
# --- Software List machines ---
log_info('Making Software List Machines index ...')
parent_dic, all_dic = {}, {}
for parent_name in main_pclone_dic:
machine = machines[parent_name]
machine_render = renderdb_dic[parent_name]
if machine_render['isDevice']: continue # >> Skip device machines
if not machine['softwarelists']: continue
parent_dic[parent_name] = machine_render['description']
all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, all_dic)
binary_catalog_parents['SoftwareLists'] = parent_dic
binary_catalog_all['SoftwareLists'] = all_dic
# --- BIOS ---
log_info('Making BIOS Machines index ...')
parent_dic, all_dic = {}, {}
for parent_name in main_pclone_dic:
machine_render = renderdb_dic[parent_name]
if machine_render['isDevice']: continue # Skip device machines
if not machine_render['isBIOS']: continue
parent_dic[parent_name] = machine_render['description']
all_dic[parent_name] = machine_render['description']
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, all_dic)
binary_catalog_parents['BIOS'] = parent_dic
binary_catalog_all['BIOS'] = all_dic
# Build cache index and save Binary catalog JSON file
mame_cache_index_builder('Binary', cache_index_dic, binary_catalog_all, binary_catalog_parents)
utils_write_JSON_file(cfg.CATALOG_BINARY_ALL_PATH.getPath(), binary_catalog_all)
utils_write_JSON_file(cfg.CATALOG_BINARY_PARENT_PATH.getPath(), binary_catalog_parents)
processed_filters += 1
# ---------------------------------------------------------------------------------------------
# Cataloged machine lists ---------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# --- Catver catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Catver catalog'))
log_info('Making Catver catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Catver)
mame_cache_index_builder('Catver', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CATVER_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CATVER_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Catlist catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Catlist catalog'))
log_info('Making Catlist catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Catlist)
mame_cache_index_builder('Catlist', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CATLIST_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CATLIST_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Genre catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Genre catalog'))
log_info('Making Genre catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Genre)
mame_cache_index_builder('Genre', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_GENRE_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_GENRE_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Category catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Category catalog'))
log_info('Making Category catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Category)
mame_cache_index_builder('Category', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CATEGORY_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CATEGORY_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Nplayers catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Nplayers catalog'))
log_info('Making Nplayers catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
renderdb_dic, renderdb_dic, main_pclone_dic, mame_catalog_key_NPlayers)
mame_cache_index_builder('NPlayers', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_NPLAYERS_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_NPLAYERS_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Bestgames catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Bestgames catalog'))
log_info('Making Bestgames catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Bestgames)
mame_cache_index_builder('Bestgames', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_BESTGAMES_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_BESTGAMES_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Series catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Series catalog'))
log_info('Making Series catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Series)
mame_cache_index_builder('Series', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_SERIES_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_SERIES_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Alltime catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Alltime catalog'))
log_info('Making Alltime catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Alltime)
mame_cache_index_builder('Alltime', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_ALLTIME_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_ALLTIME_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Artwork catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Artwork catalog'))
log_info('Making Artwork catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Artwork)
mame_cache_index_builder('Artwork', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_ARTWORK_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_ARTWORK_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Version catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Version catalog'))
log_info('Making Version catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_VerAdded)
mame_cache_index_builder('Version', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_VERADDED_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_VERADDED_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Control catalog (Expanded) ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Control Expanded catalog'))
log_info('Making Control Expanded catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Controls_Expanded)
mame_cache_index_builder('Controls_Expanded', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CONTROL_EXPANDED_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CONTROL_EXPANDED_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Control catalog (Compact) ---
# In this catalog one machine may be in several categories if the machine has more than
# one control.
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Control Compact catalog'))
log_info('Making Control Compact catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Controls_Compact)
mame_cache_index_builder('Controls_Compact', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CONTROL_COMPACT_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CONTROL_COMPACT_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- <device> / Device Expanded catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, '<device> Expanded catalog'))
log_info('Making <device> tag Expanded catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Devices_Expanded)
mame_cache_index_builder('Devices_Expanded', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DEVICE_EXPANDED_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DEVICE_EXPANDED_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- <device> / Device Compact catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, '<device> Compact catalog'))
log_info('Making <device> tag Compact catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Devices_Compact)
mame_cache_index_builder('Devices_Compact', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DEVICE_COMPACT_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DEVICE_COMPACT_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Display Type catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Display Type catalog'))
log_info('Making Display Type catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Display_Type)
mame_cache_index_builder('Display_Type', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DISPLAY_TYPE_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DISPLAY_TYPE_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Display VSync catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Display VSync catalog'))
log_info('Making Display VSync catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Display_VSync)
mame_cache_index_builder('Display_VSync', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DISPLAY_VSYNC_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DISPLAY_VSYNC_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Display Resolution catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Display Resolution catalog'))
log_info('Making Display Resolution catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Display_Resolution)
mame_cache_index_builder('Display_Resolution', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DISPLAY_RES_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DISPLAY_RES_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- CPU catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'CPU catalog'))
log_info('Making CPU catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_CPU)
mame_cache_index_builder('CPU', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CPU_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_CPU_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Driver catalog ---
# This catalog cannot use mame_build_catalog_helper() because of the driver
# name substitution.
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Driver catalog'))
log_info('Making Driver catalog ...')
catalog_parents, catalog_all = {}, {}
# mame_build_catalog_helper(catalog_parents, catalog_all,
# machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Driver)
for parent_name in main_pclone_dic:
render = renderdb_dic[parent_name]
if render['isDevice']: continue # Skip device machines in catalogs.
c_key = machines[parent_name]['sourcefile']
# Some drivers get a prettier name.
c_key = mame_driver_better_name_dic[c_key] if c_key in mame_driver_better_name_dic else c_key
catalog_key_list = [c_key]
for catalog_key in catalog_key_list:
if catalog_key in catalog_parents:
catalog_parents[catalog_key][parent_name] = render['description']
catalog_all[catalog_key][parent_name] = render['description']
else:
catalog_parents[catalog_key] = { parent_name : render['description'] }
catalog_all[catalog_key] = { parent_name : render['description'] }
for clone_name in main_pclone_dic[parent_name]:
catalog_all[catalog_key][clone_name] = renderdb_dic[clone_name]['description']
mame_cache_index_builder('Driver', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DRIVER_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_DRIVER_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Manufacturer catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Manufacturer catalog'))
log_info('Making Manufacturer catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Manufacturer)
mame_cache_index_builder('Manufacturer', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_MANUFACTURER_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_MANUFACTURER_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- MAME short name catalog ---
# This catalog cannot use mame_build_catalog_helper() because of the special name
# of the catalog (it is not the plain description).
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Short name catalog'))
log_info('Making MAME short name catalog...')
catalog_parents, catalog_all = {}, {}
for parent_name in main_pclone_dic:
render = renderdb_dic[parent_name]
if render['isDevice']: continue
catalog_key = parent_name[0]
t = '{} "{}"'.format(parent_name, render['description'])
if catalog_key in catalog_parents:
catalog_parents[catalog_key][parent_name] = t
catalog_all[catalog_key][parent_name] = t
else:
catalog_parents[catalog_key] = { parent_name : t }
catalog_all[catalog_key] = { parent_name : t }
for clone_name in main_pclone_dic[parent_name]:
t = '{} "{}"'.format(clone_name, renderdb_dic[clone_name]['description'])
catalog_all[catalog_key][clone_name] = t
mame_cache_index_builder('ShortName', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_SHORTNAME_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_SHORTNAME_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- MAME long name catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Long name catalog'))
log_info('Making MAME long name catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_LongName)
mame_cache_index_builder('LongName', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_LONGNAME_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_LONGNAME_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Software List (BySL) catalog ---
# This catalog cannot use mame_build_catalog_helper() because of the name change of the SLs.
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Software List catalog'))
log_info('Making Software List catalog ...')
# Load proper Software List proper names, if available
SL_names_dic = utils_load_JSON_file(cfg.SL_NAMES_PATH.getPath())
catalog_parents, catalog_all = {}, {}
for parent_name in main_pclone_dic:
machine = machines[parent_name]
render = renderdb_dic[parent_name]
if render['isDevice']: continue
for sl_name in machine['softwarelists']:
catalog_key = sl_name
if catalog_key in SL_names_dic: catalog_key = SL_names_dic[catalog_key]
if catalog_key in catalog_parents:
catalog_parents[catalog_key][parent_name] = render['description']
catalog_all[catalog_key][parent_name] = render['description']
else:
catalog_parents[catalog_key] = { parent_name : render['description'] }
catalog_all[catalog_key] = { parent_name : render['description'] }
mame_catalog_add_clones(parent_name, main_pclone_dic, renderdb_dic, catalog_all[catalog_key])
# Add orphaned Software Lists (SL that do not have an associated machine).
for sl_name in SL_names_dic:
catalog_key = sl_name
if catalog_key in SL_names_dic: catalog_key = SL_names_dic[catalog_key]
if catalog_key in catalog_parents: continue
catalog_parents[catalog_key] = {}
catalog_all[catalog_key] = {}
mame_cache_index_builder('BySL', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_SL_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_SL_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# --- Year catalog ---
pDialog.updateProgress(processed_filters, '{}\n{}'.format(diag_line1, 'Year catalog'))
log_info('Making Year catalog ...')
catalog_parents, catalog_all = {}, {}
mame_build_catalog_helper(catalog_parents, catalog_all,
machines, renderdb_dic, main_pclone_dic, mame_catalog_key_Year)
mame_cache_index_builder('Year', cache_index_dic, catalog_all, catalog_parents)
utils_write_JSON_file(cfg.CATALOG_YEAR_PARENT_PATH.getPath(), catalog_parents)
utils_write_JSON_file(cfg.CATALOG_YEAR_ALL_PATH.getPath(), catalog_all)
processed_filters += 1
# Close progress dialog.
pDialog.endProgress()
# --- Create properties database with default values ------------------------------------------
# Now overwrites all properties when the catalog is rebuilt.
# New versions must kept user set properties!
# This code is disabled
# mame_properties_dic = {}
# for catalog_name in CATALOG_NAME_LIST:
# catalog_dic = db_get_cataloged_dic_parents(cfg, catalog_name)
# for category_name in sorted(catalog_dic):
# prop_key = '{} - {}'.format(catalog_name, category_name)
# mame_properties_dic[prop_key] = {'vm' : VIEW_MODE_PCLONE}
# utils_write_JSON_file(cfg.MAIN_PROPERTIES_PATH.getPath(), mame_properties_dic)
# log_info('mame_properties_dic has {} entries'.format(len(mame_properties_dic)))
# --- Compute main filter statistics ---
stats_MF_Normal_Total, stats_MF_Normal_Total_parents = 0, 0
stats_MF_Normal_Good, stats_MF_Normal_Good_parents = 0, 0
stats_MF_Normal_Imperfect, stats_MF_Normal_Imperfect_parents = 0, 0
stats_MF_Normal_Nonworking, stats_MF_Normal_Nonworking_parents = 0, 0
stats_MF_Unusual_Total, stats_MF_Unusual_Total_parents = 0, 0
stats_MF_Unusual_Good, stats_MF_Unusual_Good_parents = 0, 0
stats_MF_Unusual_Imperfect, stats_MF_Unusual_Imperfect_parents = 0, 0
stats_MF_Unusual_Nonworking, stats_MF_Unusual_Nonworking_parents = 0, 0
stats_MF_Nocoin_Total, stats_MF_Nocoin_Total_parents = 0, 0
stats_MF_Nocoin_Good, stats_MF_Nocoin_Good_parents = 0, 0
stats_MF_Nocoin_Imperfect, stats_MF_Nocoin_Imperfect_parents = 0, 0
stats_MF_Nocoin_Nonworking, stats_MF_Nocoin_Nonworking_parents = 0, 0
stats_MF_Mechanical_Total, stats_MF_Mechanical_Total_parents = 0, 0
stats_MF_Mechanical_Good, stats_MF_Mechanical_Good_parents = 0, 0
stats_MF_Mechanical_Imperfect, stats_MF_Mechanical_Imperfect_parents = 0, 0
stats_MF_Mechanical_Nonworking, stats_MF_Mechanical_Nonworking_parents = 0, 0
stats_MF_Dead_Total, stats_MF_Dead_Total_parents = 0, 0
stats_MF_Dead_Good, stats_MF_Dead_Good_parents = 0, 0
stats_MF_Dead_Imperfect, stats_MF_Dead_Imperfect_parents = 0, 0
stats_MF_Dead_Nonworking, stats_MF_Dead_Nonworking_parents = 0, 0
NUM_FILTERS = 5
processed_filters = 0
pDialog.startProgress('Computing statistics ...', NUM_FILTERS)
for m_name in main_catalog_all['Normal']:
driver_status = renderdb_dic[m_name]['driver_status']
stats_MF_Normal_Total += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Normal_Total_parents += 1
if driver_status == 'good':
stats_MF_Normal_Good += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Normal_Good_parents += 1
elif driver_status == 'imperfect':
stats_MF_Normal_Imperfect += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Normal_Imperfect_parents += 1
elif driver_status == 'preliminary':
stats_MF_Normal_Nonworking += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Normal_Nonworking_parents += 1
# Found in mame2003-plus.xml, machine quizf1 and maybe others.
elif driver_status == 'protection': pass
# Are there machines with undefined status?
elif driver_status == '': pass
else:
log_error('Machine {}, unrecognised driver_status {}'.format(m_name, driver_status))
raise TypeError
processed_filters += 1
pDialog.updateProgress(processed_filters)
for m_name in main_catalog_all['Unusual']:
driver_status = renderdb_dic[m_name]['driver_status']
stats_MF_Unusual_Total += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Unusual_Total_parents += 1
if driver_status == 'good':
stats_MF_Unusual_Good += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Unusual_Good_parents += 1
elif driver_status == 'imperfect':
stats_MF_Unusual_Imperfect += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Unusual_Imperfect_parents += 1
elif driver_status == 'preliminary':
stats_MF_Unusual_Nonworking += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Unusual_Nonworking_parents += 1
elif driver_status == 'protection': pass
elif driver_status == '': pass
else:
log_error('Machine {}, unrecognised driver_status {}'.format(m_name, driver_status))
raise TypeError
processed_filters += 1
pDialog.updateProgress(processed_filters)
for m_name in main_catalog_all['NoCoin']:
driver_status = renderdb_dic[m_name]['driver_status']
stats_MF_Nocoin_Total += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Nocoin_Total_parents += 1
if driver_status == 'good':
stats_MF_Nocoin_Good += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Nocoin_Good_parents += 1
elif driver_status == 'imperfect':
stats_MF_Nocoin_Imperfect += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Nocoin_Imperfect_parents += 1
elif driver_status == 'preliminary':
stats_MF_Nocoin_Nonworking += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Nocoin_Nonworking_parents += 1
elif driver_status == 'protection': pass
elif driver_status == '': pass
else:
log_error('Machine {}, unrecognised driver_status {}'.format(m_name, driver_status))
raise TypeError
processed_filters += 1
pDialog.updateProgress(processed_filters)
for m_name in main_catalog_all['Mechanical']:
driver_status = renderdb_dic[m_name]['driver_status']
stats_MF_Mechanical_Total += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Mechanical_Total_parents += 1
if driver_status == 'good':
stats_MF_Mechanical_Good += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Mechanical_Good_parents += 1
elif driver_status == 'imperfect':
stats_MF_Mechanical_Imperfect += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Mechanical_Imperfect_parents += 1
elif driver_status == 'preliminary':
stats_MF_Mechanical_Nonworking += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Mechanical_Nonworking_parents += 1
elif driver_status == 'protection': pass
elif driver_status == '': pass
else:
log_error('Machine {}, unrecognised driver_status {}'.format(m_name, driver_status))
raise TypeError
processed_filters += 1
pDialog.updateProgress(processed_filters)
for m_name in main_catalog_all['Dead']:
driver_status = renderdb_dic[m_name]['driver_status']
stats_MF_Dead_Total += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Dead_Total_parents += 1
if driver_status == 'good':
stats_MF_Dead_Good += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Dead_Good_parents += 1
elif driver_status == 'imperfect':
stats_MF_Dead_Imperfect += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Dead_Imperfect_parents += 1
elif driver_status == 'preliminary':
stats_MF_Dead_Nonworking += 1
if not renderdb_dic[m_name]['cloneof']: stats_MF_Dead_Nonworking_parents += 1
elif driver_status == 'protection': pass
elif driver_status == '': pass
else:
log_error('Machine {}, unrecognised driver_status {}'.format(m_name, driver_status))
raise TypeError
pDialog.endProgress()
# --- Update statistics ---
db_safe_edit(control_dic, 'stats_MF_Normal_Total', stats_MF_Normal_Total)
db_safe_edit(control_dic, 'stats_MF_Normal_Good', stats_MF_Normal_Good)
db_safe_edit(control_dic, 'stats_MF_Normal_Imperfect', stats_MF_Normal_Imperfect)
db_safe_edit(control_dic, 'stats_MF_Normal_Nonworking', stats_MF_Normal_Nonworking)
db_safe_edit(control_dic, 'stats_MF_Unusual_Total', stats_MF_Unusual_Total)
db_safe_edit(control_dic, 'stats_MF_Unusual_Good', stats_MF_Unusual_Good)
db_safe_edit(control_dic, 'stats_MF_Unusual_Imperfect', stats_MF_Unusual_Imperfect)
db_safe_edit(control_dic, 'stats_MF_Unusual_Nonworking', stats_MF_Unusual_Nonworking)
db_safe_edit(control_dic, 'stats_MF_Nocoin_Total', stats_MF_Nocoin_Total)
db_safe_edit(control_dic, 'stats_MF_Nocoin_Good', stats_MF_Nocoin_Good)
db_safe_edit(control_dic, 'stats_MF_Nocoin_Imperfect', stats_MF_Nocoin_Imperfect)
db_safe_edit(control_dic, 'stats_MF_Nocoin_Nonworking', stats_MF_Nocoin_Nonworking)
db_safe_edit(control_dic, 'stats_MF_Mechanical_Total', stats_MF_Mechanical_Total)
db_safe_edit(control_dic, 'stats_MF_Mechanical_Good', stats_MF_Mechanical_Good)
db_safe_edit(control_dic, 'stats_MF_Mechanical_Imperfect', stats_MF_Mechanical_Imperfect)
db_safe_edit(control_dic, 'stats_MF_Mechanical_Nonworking', stats_MF_Mechanical_Nonworking)
db_safe_edit(control_dic, 'stats_MF_Dead_Total', stats_MF_Dead_Total)
db_safe_edit(control_dic, 'stats_MF_Dead_Good', stats_MF_Dead_Good)
db_safe_edit(control_dic, 'stats_MF_Dead_Imperfect', stats_MF_Dead_Imperfect)
db_safe_edit(control_dic, 'stats_MF_Dead_Nonworking', stats_MF_Dead_Nonworking)
db_safe_edit(control_dic, 'stats_MF_Normal_Total_parents', stats_MF_Normal_Total_parents)
db_safe_edit(control_dic, 'stats_MF_Normal_Good_parents', stats_MF_Normal_Good_parents)
db_safe_edit(control_dic, 'stats_MF_Normal_Imperfect_parents', stats_MF_Normal_Imperfect_parents)
db_safe_edit(control_dic, 'stats_MF_Normal_Nonworking_parents', stats_MF_Normal_Nonworking_parents)
db_safe_edit(control_dic, 'stats_MF_Unusual_Total_parents', stats_MF_Unusual_Total_parents)
db_safe_edit(control_dic, 'stats_MF_Unusual_Good_parents', stats_MF_Unusual_Good_parents)
db_safe_edit(control_dic, 'stats_MF_Unusual_Imperfect_parents', stats_MF_Unusual_Imperfect_parents)
db_safe_edit(control_dic, 'stats_MF_Unusual_Nonworking_parents', stats_MF_Unusual_Nonworking_parents)
db_safe_edit(control_dic, 'stats_MF_Nocoin_Total_parents', stats_MF_Nocoin_Total_parents)
db_safe_edit(control_dic, 'stats_MF_Nocoin_Good_parents', stats_MF_Nocoin_Good_parents)
db_safe_edit(control_dic, 'stats_MF_Nocoin_Imperfect_parents', stats_MF_Nocoin_Imperfect_parents)
db_safe_edit(control_dic, 'stats_MF_Nocoin_Nonworking_parents', stats_MF_Nocoin_Nonworking_parents)
db_safe_edit(control_dic, 'stats_MF_Mechanical_Total_parents', stats_MF_Mechanical_Total_parents)
db_safe_edit(control_dic, 'stats_MF_Mechanical_Good_parents', stats_MF_Mechanical_Good_parents)
db_safe_edit(control_dic, 'stats_MF_Mechanical_Imperfect_parents', stats_MF_Mechanical_Imperfect_parents)
db_safe_edit(control_dic, 'stats_MF_Mechanical_Nonworking_parents', stats_MF_Mechanical_Nonworking_parents)
db_safe_edit(control_dic, 'stats_MF_Dead_Total_parents', stats_MF_Dead_Total_parents)
db_safe_edit(control_dic, 'stats_MF_Dead_Good_parents', stats_MF_Dead_Good_parents)
db_safe_edit(control_dic, 'stats_MF_Dead_Imperfect_parents', stats_MF_Dead_Imperfect_parents)
db_safe_edit(control_dic, 'stats_MF_Dead_Nonworking_parents', stats_MF_Dead_Nonworking_parents)
# --- Update timestamp ---
db_safe_edit(control_dic, 't_MAME_Catalog_build', time.time())
# --- Save stuff ------------------------------------------------------------------------------
db_files = [
[cache_index_dic, 'MAME cache index', cfg.CACHE_INDEX_PATH.getPath()],
[control_dic, 'Control dictionary', cfg.MAIN_CONTROL_PATH.getPath()],
]
db_save_files(db_files)
db_dic_in['cache_index'] = cache_index_dic
# -------------------------------------------------------------------------------------------------
# Software Lists and ROM audit database building function
# -------------------------------------------------------------------------------------------------
#
# https://www.mess.org/mess/swlist_format
# The basic idea (which leads basically the whole format) is that each <software> entry should
# correspond to a game box you could have bought in a shop, and that each <part> entry should
# correspond to a piece (i.e. a cart, a disk or a tape) that you would have found in such a box.
#
# --- Example 1: 32x.xml-chaotix ---
# Stored as: SL_ROMS/32x/chaotix.zip
#
# <part name="cart" interface="_32x_cart">
# <dataarea name="rom" size="3145728">
# <rom name="knuckles' chaotix (europe).bin" size="3145728" crc="41d63572" sha1="5c1...922" offset="000000" />
# </dataarea>
# </part>
#
# --- Example 2: 32x.xml-doom ---
# Stored as: SL_ROMS/32x/doom.zip
#
# <part name="cart" interface="_32x_cart">
# <feature name="pcb" value="171-6885A" />
# <dataarea name="rom" size="3145728">
# <rom name="mpr-17351-f.ic1" size="2097152" crc="e0ef6ebc" sha1="302...79d" offset="000000" />
# <rom name="mpr-17352-f.ic2" size="1048576" crc="c7079709" sha1="0f2...33b" offset="0x200000" />
# </dataarea>
# </part>
#
# --- Example 3: a800.xml-diamond3 ---
# Stored as: SL_ROMS/a800/diamond3.zip (all ROMs from all parts)
#
# <part name="cart" interface="a8bit_cart">
# <feature name="slot" value="a800_diamond" />
# <dataarea name="rom" size="65536">
# <rom name="diamond gos v3.0.rom" size="65536" crc="0ead07f8" sha1="e92...730" offset="0" />
# </dataarea>
# </part>
# <part name="flop1" interface="floppy_5_25">
# <dataarea name="flop" size="92176">
# <rom name="diamond paint.atr" size="92176" crc="d2994282" sha1="be8...287" offset="0" />
# </dataarea>
# </part>
# <part name="flop2" interface="floppy_5_25">
# <dataarea name="flop" size="92176">
# <rom name="diamond write.atr" size="92176" crc="e1e5b235" sha1="c3c...db5" offset="0" />
# </dataarea>
# </part>
# <part name="flop3" interface="floppy_5_25">
# <dataarea name="flop" size="92176">
# <rom name="diamond utilities.atr" size="92176" crc="bb48082d" sha1="eb7...4e4" offset="0" />
# </dataarea>
# </part>
#
# --- Example 4: a2600.xml-harmbios ---
# Stored as: SL_ROMS/a2600/harmbios.zip (all ROMs from all dataareas)
#
# <part name="cart" interface="a2600_cart">
# <feature name="slot" value="a26_harmony" />
# <dataarea name="rom" size="0x8000">
# <rom name="bios_updater_NTSC.cu" size="0x8000" crc="03153eb2" sha1="cd9...009" offset="0" />
# </dataarea>
# <dataarea name="bios" size="0x21400">
# <rom name="hbios_106_NTSC_official_beta.bin" size="0x21400" crc="1e1d237b" sha1="8fd...1da" offset="0" />
# <rom name="hbios_106_NTSC_beta_2.bin" size="0x21400" crc="807b86bd" sha1="633...e9d" offset="0" />
# <rom name="eeloader_104e_PAL60.bin" size="0x36f8" crc="58845532" sha1="255...71c" offset="0" />
# </dataarea>
# </part>
#
# --- Example 5: psx.xml-traid ---
# Stored as: SL_CHDS/psx/traid/tomb raider (usa) (v1.6).chd
#
# <part name="cdrom" interface="psx_cdrom">
# <diskarea name="cdrom">
# <disk name="tomb raider (usa) (v1.6)" sha1="697...3ac"/>
# </diskarea>
# </part>
#
# --- Example 6: psx.xml-traida cloneof=traid ---
# Stored as: SL_CHDS/psx/traid/tomb raider (usa) (v1.5).chd
#
# <part name="cdrom" interface="psx_cdrom">
# <diskarea name="cdrom">
# <disk name="tomb raider (usa) (v1.5)" sha1="d48...0a9"/>
# </diskarea>
# </part>
#
# --- Example 7: pico.xml-sanouk5 ---
# Stored as: SL_ROMS/pico/sanouk5.zip (mpr-18458-t.ic1 ROM)
# Stored as: SL_CHDS/pico/sanouk5/imgpico-001.chd
#
# <part name="cart" interface="pico_cart">
# <dataarea name="rom" size="524288">
# <rom name="mpr-18458-t.ic1" size="524288" crc="6340c18a" sha1="101..." offset="000000" loadflag="load16_word_swap" />
# </dataarea>
# <diskarea name="cdrom">
# <disk name="imgpico-001" sha1="c93...10d" />
# </diskarea>
# </part>
#
# -------------------------------------------------------------------------------------------------
# A) One part may have a dataarea, a diskarea, or both.
#
# B) One part may have more than one dataarea with different names.
#
# SL_roms = {
# 'sl_rom_name' : [
# {
# 'part_name' : string,
# 'part_interface' : string,
# 'dataarea' : [
# {
# 'name' : string,
# 'roms' : [
# {
# 'name' : string, 'size' : int, 'crc' : string
# },
# ]
# }
# ]
# 'diskarea' : [
# {
# 'name' : string,
# 'disks' : [
# {
# 'name' : string, 'sha1' : string
# },
# ]
# }
# ]
# }, ...
# ], ...
# }
#
# -------------------------------------------------------------------------------------------------
# --- SL List ROM Audit database ---
#
# A) For each SL ROM entry, create a list of the ROM files and CHD files, names, sizes, crc/sha1
# and location.
# SL_roms = {
# 'sl_rom_name' : [
# {
# 'type' : string,
# 'name' : string,
# 'size : int,
# 'crc' : sting,
# 'location' : string
# }, ...
# ], ...
# }
#
# SL_disks = {
# 'sl_rom_name' : [
# {
# 'type' : string,
# 'name' : string,
# 'sha1' : sting,
# 'location' : string
# }, ...
# ], ...
# }
#
def _new_SL_Data_dic():
return {
'items' : {},
'SL_roms' : {},
'display_name' : '',
'num_with_ROMs' : 0,
'num_with_CHDs' : 0,
'num_items' : 0,
'num_parents' : 0,
'num_clones' : 0,
}
# Get ROMs in dataarea.
def _get_SL_dataarea_ROMs(SL_name, item_name, part_child, dataarea_dic):
__DEBUG_SL_ROM_PROCESSING = False
dataarea_num_roms = 0
for dataarea_child in part_child:
rom_dic = { 'name' : '', 'size' : '', 'crc' : '', 'sha1' : '' }
# Force Python to guess the base of the conversion looking at 0x prefixes.
size_int = 0
if 'size' in dataarea_child.attrib:
size_int = int(dataarea_child.attrib['size'], 0)
rom_dic['size'] = size_int
rom_dic['name'] = dataarea_child.attrib['name'] if 'name' in dataarea_child.attrib else ''
rom_dic['crc'] = dataarea_child.attrib['crc'] if 'crc' in dataarea_child.attrib else ''
rom_dic['sha1'] = dataarea_child.attrib['sha1'] if 'sha1' in dataarea_child.attrib else ''
# In the nes.xml SL some ROM names have a trailing dot '.'. For example (MAME 0.196):
#
# ROM 131072 028bfc44 nes/kingey/0.prg OK
# ROM 131072 1aca7960 nes/kingey/king ver 1.3 vid. ROM not in ZIP
#
# PD torrents do not have the trailing dot because this files cause trouble in Windows.
# To correctly audit PD torrents, remove the trailing dot from filenames.
# Have a look here http://forum.pleasuredome.org.uk/index.php?showtopic=32701&p=284925
# I will create a PR to MAME repo to fix these names (and then next couple of lines must
# be commented).
if len(rom_dic['name']) > 2 and rom_dic['name'][-1] == '.':
rom_dic['name'] = rom_dic['name'][:-1]
# Some CRCs are in upper case. Store always lower case in AML DB.
if rom_dic['crc']: rom_dic['crc'] = rom_dic['crc'].lower()
# Just in case there are SHA1 hashes in upper case (not verified).
if rom_dic['sha1']: rom_dic['sha1'] = rom_dic['sha1'].lower()
# If ROM has attribute status="nodump" then ignore this ROM.
if 'status' in dataarea_child.attrib:
status = dataarea_child.attrib['status']
if status == 'nodump':
if __DEBUG_SL_ROM_PROCESSING:
log_debug('SL "{}" item "{}" status="nodump". Skipping ROM.'.format(SL_name, item_name))
continue
elif status == 'baddump':
if __DEBUG_SL_ROM_PROCESSING:
log_debug('SL "{}" item "{}" status="baddump".'.format(SL_name, item_name))
pass
else:
log_error('SL "{}" item "{}" Unknown status = {}'.format(SL_name, item_name, status))
raise CriticalError('DEBUG')
# Fix "fake" SL ROMs with loadflag="continue".
# For example, SL neogeo, SL item aof
if 'loadflag' in dataarea_child.attrib:
loadflag = dataarea_child.attrib['loadflag']
if loadflag == 'continue':
# This ROM is not valid (not a valid ROM file).
# Size must be added to previous ROM.
if __DEBUG_SL_ROM_PROCESSING:
log_debug('SL "{}" item "{}" loadflag="continue" case. Adding size {} to previous ROM.'.format(
SL_name, item_name, rom_dic['size']))
previous_rom = dataarea_dic['roms'][-1]
previous_rom['size'] += rom_dic['size']
continue
elif loadflag == 'ignore':
if rom_dic['size'] > 0:
if __DEBUG_SL_ROM_PROCESSING:
log_debug('SL "{}" item "{}" loadflag="ignore" case. Adding size {} to previous ROM.'.format(
SL_name, item_name, rom_dic['size']))
previous_rom = dataarea_dic['roms'][-1]
previous_rom['size'] += rom_dic['size']
else:
if __DEBUG_SL_ROM_PROCESSING:
log_debug('SL "{}" item "{}" loadflag="ignore" case and size = 0. Skipping ROM.'.format(
SL_name, item_name))
continue
elif loadflag == 'reload':
if __DEBUG_SL_ROM_PROCESSING:
log_debug('SL "{}" item "{}" loadflag="reload" case. Skipping ROM.'.format(
SL_name, item_name))
continue
elif loadflag == 'reload_plain':
if __DEBUG_SL_ROM_PROCESSING:
log_debug('SL "{}" item "{}" loadflag="reload_plain" case. Skipping ROM.'.format(
SL_name, item_name))
continue
elif loadflag == 'fill':
if __DEBUG_SL_ROM_PROCESSING:
log_debug('SL "{}" item "{}" loadflag="fill" case. Skipping ROM.'.format(
SL_name, item_name))
continue
elif loadflag == 'load16_word_swap':
pass
elif loadflag == 'load16_byte':
pass
elif loadflag == 'load32_word':
pass
elif loadflag == 'load32_byte':
pass
elif loadflag == 'load32_word_swap':
pass
else:
t = 'SL "{}" item "{}" unknown loadflag="{}"'.format(SL_name, item_name, loadflag)
log_error(t)
raise ValueError(t)
# --- Add ROM to DB ---
dataarea_dic['roms'].append(rom_dic)
dataarea_num_roms += 1
# --- DEBUG: Error if rom has merge attribute ---
if 'merge' in dataarea_child.attrib:
log_error('SL {}, Item {}'.format(SL_name, item_name))
log_error('ROM {} has merge attribute'.format(dataarea_child.attrib['name']))
raise CriticalError('DEBUG')
return dataarea_num_roms
# Get CHDs in diskarea.
def _get_SL_dataarea_CHDs(SL_name, item_name, part_child, diskarea_dic):
da_num_disks = 0
for diskarea_child in part_child:
disk_dic = { 'name' : '', 'sha1' : '' }
disk_dic['name'] = diskarea_child.attrib['name'] if 'name' in diskarea_child.attrib else ''
disk_dic['sha1'] = diskarea_child.attrib['sha1'] if 'sha1' in diskarea_child.attrib else ''
diskarea_dic['disks'].append(disk_dic)
da_num_disks += 1
return da_num_disks
def _mame_load_SL_XML(xml_filename):
__debug_xml_parser = False
SLData = _new_SL_Data_dic()
# If file does not exist return empty dictionary.
if not os.path.isfile(xml_filename): return SLData
(head, SL_name) = os.path.split(xml_filename)
# Parse using ElementTree.
# If XML has errors (invalid characters, etc.) this will rais exception 'err'
# log_debug('_mame_load_SL_XML() Loading XML file "{}"'.format(xml_filename))
try:
xml_tree = ET.parse(xml_filename)
except:
return SLData
xml_root = xml_tree.getroot()
SL_desc = xml_root.attrib['description']
# Substitute SL description (long name).
if SL_desc in SL_better_name_dic:
old_SL_desc = SL_desc
SL_desc = SL_better_name_dic[SL_desc]
log_debug('Substitute SL "{}" with "{}"'.format(old_SL_desc, SL_desc))
SLData['display_name'] = SL_desc
for root_element in xml_root:
if __debug_xml_parser: log_debug('Root child {}'.format(root_element.tag))
# Only process 'software' elements
if root_element.tag != 'software':
log_warning('In SL {}, unrecognised XML tag <{}>'.format(SL_name, root_element.tag))
continue
SL_item = db_new_SL_ROM()
SL_rom_list = []
num_roms = 0
num_disks = 0
item_name = root_element.attrib['name']
if 'cloneof' in root_element.attrib: SL_item['cloneof'] = root_element.attrib['cloneof']
if 'romof' in root_element.attrib:
raise TypeError('SL {} item {}, "romof" in root_element.attrib'.format(SL_name, item_name))
for rom_child in root_element:
# By default read strings
xml_text = rom_child.text if rom_child.text is not None else ''
xml_tag = rom_child.tag
if __debug_xml_parser: log_debug('{} --> {}'.format(xml_tag, xml_text))
# --- Only pick tags we want ---
if xml_tag == 'description' or xml_tag == 'year' or xml_tag == 'publisher':
SL_item[xml_tag] = xml_text
elif xml_tag == 'part':
# <part name="cart" interface="_32x_cart">
part_dic = db_new_SL_ROM_part()
part_dic['name'] = rom_child.attrib['name']
part_dic['interface'] = rom_child.attrib['interface']
SL_item['parts'].append(part_dic)
SL_roms_dic = {
'part_name' : rom_child.attrib['name'],
'part_interface' : rom_child.attrib['interface']
}
# --- Count number of <dataarea> and <diskarea> tags inside this <part tag> ---
num_dataarea = 0
num_diskarea = 0
for part_child in rom_child:
if part_child.tag == 'dataarea':
dataarea_dic = { 'name' : part_child.attrib['name'], 'roms' : [] }
da_num_roms = _get_SL_dataarea_ROMs(SL_name, item_name, part_child, dataarea_dic)
if da_num_roms > 0:
# >> dataarea is valid ONLY if it contains valid ROMs
num_dataarea += 1
num_roms += da_num_roms
if 'dataarea' not in SL_roms_dic: SL_roms_dic['dataarea'] = []
SL_roms_dic['dataarea'].append(dataarea_dic)
elif part_child.tag == 'diskarea':
diskarea_dic = { 'name' : part_child.attrib['name'], 'disks' : [] }
da_num_disks = _get_SL_dataarea_CHDs(SL_name, item_name, part_child, diskarea_dic)
if da_num_disks > 0:
# >> diskarea is valid ONLY if it contains valid CHDs
num_diskarea += 1
num_disks += da_num_disks
if 'diskarea' not in SL_roms_dic: SL_roms_dic['diskarea'] = []
SL_roms_dic['diskarea'].append(diskarea_dic)
elif part_child.tag == 'feature':
pass
elif part_child.tag == 'dipswitch':
pass
else:
raise TypeError('SL {} item {}, inside <part>, unrecognised tag <{}>'.format(
SL_name, item_name, part_child.tag))
# --- Add ROMs/disks ---
SL_rom_list.append(SL_roms_dic)
# --- DEBUG/Research code ---
# if num_dataarea > 1:
# log_error('{} -> num_dataarea = {}'.format(item_name, num_dataarea))
# raise TypeError('DEBUG')
# if num_diskarea > 1:
# log_error('{} -> num_diskarea = {}'.format(item_name, num_diskarea))
# raise TypeError('DEBUG')
# if num_dataarea and num_diskarea:
# log_error('{} -> num_dataarea = {}'.format(item_name, num_dataarea))
# log_error('{} -> num_diskarea = {}'.format(item_name, num_diskarea))
# raise TypeError('DEBUG')
# --- Finished processing of <software> element ---
SLData['num_items'] += 1
if SL_item['cloneof']: SLData['num_clones'] += 1
else: SLData['num_parents'] += 1
if num_roms:
SL_item['hasROMs'] = True
SL_item['status_ROM'] = '?'
SLData['num_with_ROMs'] += 1
else:
SL_item['hasROMs'] = False
SL_item['status_ROM'] = '-'
if num_disks:
SL_item['hasCHDs'] = True
SL_item['status_CHD'] = '?'
SLData['num_with_CHDs'] += 1
else:
SL_item['hasCHDs'] = False
SL_item['status_CHD'] = '-'
# Add <software> item (SL_item) to database and software ROM/CHDs to database.
SLData['items'][item_name] = SL_item
SLData['SL_roms'][item_name] = SL_rom_list
return SLData
def _get_SL_parent_ROM_dic(parent_name, SL_ROMs):
parent_rom_dic = {}
for part_dic in SL_ROMs[parent_name]:
if not 'dataarea' in part_dic: continue
for dataarea_dic in part_dic['dataarea']:
for rom_dic in dataarea_dic['roms']:
parent_rom_dic[rom_dic['crc']] = rom_dic['name']
return parent_rom_dic
def _get_SL_ROM_location(rom_set, SL_name, SL_item_name, rom_dic, SL_Items, parent_rom_dic):
# Some SL invalid ROMs do not have name attribute (and not CRC and SHA1).
# For those, set the location to empty.
if not rom_dic['name']: return ''
# In the SL ROM MERGED set all ROMs are stored in the parent ZIP file:
#
# PATH/32x/chaotix.zip/knuckles' chaotix (europe).bin
# PATH/32x/chaotix.zip/chaotixju/chaotix ~ knuckles' chaotix (japan, usa).bin
# PATH/32x/chaotix.zip/chaotixjup/knuckles' chaotix (prototype 214 - feb 14, 1995, 06.46).bin
#
if rom_set == 'MERGED':
cloneof = SL_Items[SL_item_name]['cloneof']
if cloneof:
location = SL_name + '/' + cloneof + '/' + SL_item_name + '/' + rom_dic['name']
else:
location = SL_name + '/' + SL_item_name + '/' + rom_dic['name']
# In the SL ROM SPLIT set each item ROMs are in their own file:
#
# PATH/32x/chaotix.zip/knuckles' chaotix (europe).bin
# PATH/32x/chaotixju.zip/chaotix ~ knuckles' chaotix (japan, usa).bin
# PATH/32x/chaotixjup.zip/knuckles' chaotix (prototype 214 - feb 14, 1995, 06.46).bin
#
# NOTE that ClrMAME Pro (and hence PD torrents) do implicit ROM merging. SL XMLs do not have
# the merge attribute. However, an implicit ROM merge is done if a ROM with the same
# CRC is found in the parent. Implicit merging only affects clones. A dictionary
# of the parent ROMs with key the CRC hash and value the ROM name is required.
#
elif rom_set == 'SPLIT':
cloneof = SL_Items[SL_item_name]['cloneof']
if cloneof:
if rom_dic['crc'] in parent_rom_dic:
location = SL_name + '/' + cloneof + '/' + parent_rom_dic[rom_dic['crc']]
else:
location = SL_name + '/' + SL_item_name + '/' + rom_dic['name']
else:
location = SL_name + '/' + SL_item_name + '/' + rom_dic['name']
elif rom_set == 'NONMERGED':
location = SL_name + '/' + SL_item_name + '/' + rom_dic['name']
else:
raise TypeError
return location
def _get_SL_CHD_location(chd_set, SL_name, SL_item_name, disk_dic, SL_Items):
# In the SL CHD MERGED set all CHDs are in the directory of the parent:
#
# ffant9 --> parent with 4 DISKS (v1.1)
# ffant9a --> parent with 4 DISKS (v1.0)
#
# [parent traid] PATH/psx/traid/tomb raider (usa) (v1.6).chd
# [clone traida] PATH/psx/traid/tomb raider (usa) (v1.5).chd
# [clone traiddm] PATH/psx/traid/tr1.chd
#
if chd_set == 'MERGED':
cloneof = SL_Items[SL_item_name]['cloneof']
archive_name = cloneof if cloneof else SL_item_name
location = SL_name + '/' + archive_name + '/' + disk_dic['name']
# In the SL CHD SPLIT set CHD of each machine are in their own directory.
# This is not confirmed since I do not have the PD DAT file for the SL CHD SPLIT set.
#
# [parent traid] PATH/psx/traid/tomb raider (usa) (v1.6).chd
# [clone traida] PATH/psx/traida/tomb raider (usa) (v1.5).chd
# [clone traiddm] PATH/psx/traiddm/tr1.chd
#
elif chd_set == 'SPLIT':
location = SL_name + '/' + SL_rom + '/' + disk_dic['name']
else:
raise TypeError
return location
# -------------------------------------------------------------------------------------------------
#
# Checks for errors before scanning for SL ROMs.
# Display a Kodi dialog if an error is found.
#
def mame_check_before_build_SL_databases(cfg, st_dic, control_dic):
kodi_reset_status(st_dic)
# --- Error checks ---
if not cfg.settings['SL_hash_path']:
t = ('Software Lists hash path not set. '
'Open AML addon settings and configure the location of the MAME hash path in the '
'"Paths" tab.')
kodi_set_error_status(st_dic, t)
return
if not cfg.MAIN_DB_PATH.exists():
t = ('MAME Main database not found. '
'Open AML addon settings and configure the location of the MAME executable in the '
'"Paths" tab.')
kodi_set_error_status(st_dic, t)
return
#
# Modifies dictionary db_dic_in.
#
# SL_catalog_dic = { 'name' : {
# 'display_name': u'',
# 'num_clones' : int,
# 'num_items' : int,
# 'num_parents' : int,
# 'num_with_CHDs' : int,
# 'num_with_ROMs' : int,
# 'rom_DB_noext' : u''
# },
# }
#
# Saves:
# SL_INDEX_PATH,
# SL_MACHINES_PATH,
# SL_PCLONE_DIC_PATH,
# per-SL database (32x.json)
# per-SL database (32x_ROMs.json)
# per-SL ROM audit database (32x_ROM_audit.json)
# per-SL item archives (ROMs and CHDs) (32x_ROM_archives.json)
#
def mame_build_SoftwareLists_databases(cfg, st_dic, db_dic_in):
control_dic = db_dic_in['control_dic']
machines = db_dic_in['machines']
renderdb_dic = db_dic_in['renderdb']
SL_dir_FN = FileName(cfg.settings['SL_hash_path'])
log_debug('mame_build_SoftwareLists_databases() SL_dir_FN "{}"'.format(SL_dir_FN.getPath()))
# --- Scan all XML files in Software Lists directory and save SL catalog and SL databases ---
log_info('Processing Software List XML files...')
SL_file_list = SL_dir_FN.scanFilesInPath('*.xml')
# DEBUG code for development, only process first SL file (32x).
# SL_file_list = [ sorted(SL_file_list)[0] ]
total_SL_files = len(SL_file_list)
num_SL_with_ROMs = 0
num_SL_with_CHDs = 0
SL_catalog_dic = {}
processed_files = 0
diag_line = 'Building Sofware Lists item databases...'
pDialog = KodiProgressDialog()
pDialog.startProgress(diag_line, total_SL_files)
for file in sorted(SL_file_list):
# Progress dialog
FN = FileName(file)
pDialog.updateProgress(processed_files,
'{}\nSoftware List [COLOR orange]{}[/COLOR]'.format(diag_line, FN.getBase()))
# Open software list XML and parse it. Then, save data fields we want in JSON.
# log_debug('mame_build_SoftwareLists_databases() Processing "{}"'.format(file))
SL_path_FN = FileName(file)
SLData = _mame_load_SL_XML(SL_path_FN.getPath())
utils_write_JSON_file(cfg.SL_DB_DIR.pjoin(FN.getBaseNoExt() + '_items.json').getPath(),
SLData['items'], verbose = False)
utils_write_JSON_file(cfg.SL_DB_DIR.pjoin(FN.getBaseNoExt() + '_ROMs.json').getPath(),
SLData['SL_roms'], verbose = False)
# Add software list to catalog
num_SL_with_ROMs += SLData['num_with_ROMs']
num_SL_with_CHDs += SLData['num_with_CHDs']
SL = {
'display_name' : SLData['display_name'],
'num_with_ROMs' : SLData['num_with_ROMs'],
'num_with_CHDs' : SLData['num_with_CHDs'],
'num_items' : SLData['num_items'],
'num_parents' : SLData['num_parents'],
'num_clones' : SLData['num_clones'],
'rom_DB_noext' : FN.getBaseNoExt(),
}
SL_catalog_dic[FN.getBaseNoExt()] = SL
# Update progress
processed_files += 1
pDialog.endProgress()
# --- Make the SL ROM/CHD unified Audit databases ---
log_info('Building Software List ROM Audit database...')
rom_set = ['MERGED', 'SPLIT', 'NONMERGED'][cfg.settings['SL_rom_set']]
chd_set = ['MERGED', 'SPLIT', 'NONMERGED'][cfg.settings['SL_chd_set']]
log_info('mame_build_SoftwareLists_databases() SL ROM set is {}'.format(rom_set))
log_info('mame_build_SoftwareLists_databases() SL CHD set is {}'.format(chd_set))
total_files = len(SL_file_list)
processed_files = 0
stats_audit_SL_items_runnable = 0
stats_audit_SL_items_with_arch = 0
stats_audit_SL_items_with_arch_ROM = 0
stats_audit_SL_items_with_CHD = 0
diag_line = 'Building Software List ROM audit databases...'
pDialog.startProgress(diag_line, total_files)
for file in sorted(SL_file_list):
# Update progress
FN = FileName(file)
SL_name = FN.getBaseNoExt()
pDialog.updateProgress(processed_files, '{}\nSoftware List [COLOR orange]{}[/COLOR]'.format(
diag_line, FN.getBase()))
# Filenames of the databases
# log_debug('mame_build_SoftwareLists_databases() Processing "{}"'.format(file))
SL_Items_DB_FN = cfg.SL_DB_DIR.pjoin(FN.getBaseNoExt() + '_items.json')
SL_ROMs_DB_FN = cfg.SL_DB_DIR.pjoin(FN.getBaseNoExt() + '_ROMs.json')
SL_ROM_Audit_DB_FN = cfg.SL_DB_DIR.pjoin(FN.getBaseNoExt() + '_ROM_audit.json')
SL_Soft_Archives_DB_FN = cfg.SL_DB_DIR.pjoin(FN.getBaseNoExt() + '_ROM_archives.json')
SL_Items = utils_load_JSON_file(SL_Items_DB_FN.getPath(), verbose = False)
SL_ROMs = utils_load_JSON_file(SL_ROMs_DB_FN.getPath(), verbose = False)
# --- First add the SL item ROMs to the audit database ---
SL_Audit_ROMs_dic = {}
for SL_item_name in SL_ROMs:
# >> If SL item is a clone then create parent_rom_dic. This is only needed in the
# >> SPLIT set, so current code is a bit inefficient for other sets.
# >> key : CRC -> value : rom name
cloneof = SL_Items[SL_item_name]['cloneof']
if cloneof:
parent_rom_dic = _get_SL_parent_ROM_dic(cloneof, SL_ROMs)
else:
parent_rom_dic = {}
# >> Iterate Parts in a SL Software item. Then iterate dataareas on each part.
# >> Finally, iterate ROM on each dataarea.
set_roms = []
for part_dic in SL_ROMs[SL_item_name]:
if not 'dataarea' in part_dic: continue
for dataarea_dic in part_dic['dataarea']:
for rom_dic in dataarea_dic['roms']:
location = _get_SL_ROM_location(rom_set, SL_name, SL_item_name,
rom_dic, SL_Items, parent_rom_dic)
rom_audit_dic = db_new_SL_ROM_audit_dic()
rom_audit_dic['type'] = ROM_TYPE_ROM
rom_audit_dic['name'] = rom_dic['name']
rom_audit_dic['size'] = rom_dic['size']
rom_audit_dic['crc'] = rom_dic['crc']
rom_audit_dic['location'] = location
set_roms.append(rom_audit_dic)
SL_Audit_ROMs_dic[SL_item_name] = set_roms
# --- Second add the SL item CHDs to the audit database ---
for SL_item_name in SL_ROMs:
set_chds = []
for part_dic in SL_ROMs[SL_item_name]:
if not 'diskarea' in part_dic: continue
for diskarea_dic in part_dic['diskarea']:
for disk_dic in diskarea_dic['disks']:
location = _get_SL_CHD_location(chd_set, SL_name, SL_item_name, disk_dic, SL_Items)
disk_audit_dic = db_new_SL_DISK_audit_dic()
disk_audit_dic['type'] = ROM_TYPE_DISK
disk_audit_dic['name'] = disk_dic['name']
disk_audit_dic['sha1'] = disk_dic['sha1']
disk_audit_dic['location'] = location
set_chds.append(disk_audit_dic)
# >> Extend ROM list with CHDs.
if SL_item_name in SL_Audit_ROMs_dic:
SL_Audit_ROMs_dic[SL_item_name].extend(set_chds)
else:
SL_Audit_ROMs_dic[SL_item_name] = set_chds
# --- Machine archives ---
# There is not ROMs and CHDs sets for Software List Items (not necessary).
SL_Item_Archives_dic = {}
for SL_item_name in SL_Audit_ROMs_dic:
rom_list = SL_Audit_ROMs_dic[SL_item_name]
machine_rom_archive_set = set()
machine_chd_archive_set = set()
# --- Iterate ROMs/CHDs ---
for rom in rom_list:
if rom['type'] == ROM_TYPE_DISK:
# >> Skip invalid CHDs
if not rom['sha1']: continue
chd_name = rom['location']
machine_chd_archive_set.add(chd_name)
else:
# >> Skip invalid ROMs
if not rom['crc']: continue
rom_str_list = rom['location'].split('/')
zip_name = rom_str_list[0] + '/' + rom_str_list[1]
machine_rom_archive_set.add(zip_name)
SL_Item_Archives_dic[SL_item_name] = {
'ROMs' : list(machine_rom_archive_set),
'CHDs' : list(machine_chd_archive_set)
}
# --- SL Audit database statistics ---
stats_audit_SL_items_runnable += 1
if SL_Item_Archives_dic[SL_item_name]['ROMs'] or SL_Item_Archives_dic[SL_item_name]['CHDs']:
stats_audit_SL_items_with_arch += 1
if SL_Item_Archives_dic[SL_item_name]['ROMs']: stats_audit_SL_items_with_arch_ROM += 1
if SL_Item_Archives_dic[SL_item_name]['CHDs']: stats_audit_SL_items_with_CHD += 1
# --- Save databases ---
utils_write_JSON_file(SL_ROM_Audit_DB_FN.getPath(), SL_Audit_ROMs_dic, verbose = False)
utils_write_JSON_file(SL_Soft_Archives_DB_FN.getPath(), SL_Item_Archives_dic, verbose = False)
processed_files += 1
pDialog.endProgress()
# --- Make SL Parent/Clone databases ---
log_info('Building Software List PClone list...')
total_files = len(SL_catalog_dic)
processed_files = 0
SL_PClone_dic = {}
total_SL_XML_files = 0
total_SL_software_items = 0
diag_line = 'Building Software List PClone list...'
pDialog.startProgress(diag_line, total_files)
for sl_name in sorted(SL_catalog_dic):
pDialog.updateProgress(processed_files, '{}\nSoftware List [COLOR orange]{}[/COLOR]'.format(
diag_line, sl_name))
total_SL_XML_files += 1
pclone_dic = {}
SL_database_FN = cfg.SL_DB_DIR.pjoin(sl_name + '_items.json')
ROMs = utils_load_JSON_file(SL_database_FN.getPath(), verbose = False)
for rom_name in ROMs:
total_SL_software_items += 1
ROM = ROMs[rom_name]
if ROM['cloneof']:
parent_name = ROM['cloneof']
if parent_name not in pclone_dic: pclone_dic[parent_name] = []
pclone_dic[parent_name].append(rom_name)
else:
if rom_name not in pclone_dic: pclone_dic[rom_name] = []
SL_PClone_dic[sl_name] = pclone_dic
processed_files += 1
pDialog.endProgress()
# --- Make a list of machines that can launch each SL ---
log_info('Making Software List machine list...')
total_SL = len(SL_catalog_dic)
processed_SL = 0
SL_machines_dic = {}
diag_line = 'Building Software List machine list...'
pDialog.startProgress(diag_line, total_SL)
for SL_name in sorted(SL_catalog_dic):
pDialog.updateProgress(processed_SL, '{}\nSoftware List [COLOR orange]{}[/COLOR]'.format(
diag_line, SL_name))
SL_machine_list = []
for machine_name in machines:
# if not machines[machine_name]['softwarelists']: continue
for machine_SL_name in machines[machine_name]['softwarelists']:
if machine_SL_name == SL_name:
SL_machine_dic = {
'machine' : machine_name,
'description' : renderdb_dic[machine_name]['description'],
'devices' : machines[machine_name]['devices']
}
SL_machine_list.append(SL_machine_dic)
SL_machines_dic[SL_name] = SL_machine_list
processed_SL += 1
pDialog.endProgress()
# --- Empty SL asset DB ---
log_info('Making Software List (empty) asset databases...')
total_SL = len(SL_catalog_dic)
processed_SL = 0
diag_line = 'Building Software List (empty) asset databases...'
pDialog.startProgress(diag_line, total_SL)
for SL_name in sorted(SL_catalog_dic):
pDialog.updateProgress(processed_SL, '{}\nSoftware List [COLOR orange]{}[/COLOR]'.format(
diag_line, SL_name))
# --- Load SL databases ---
file_name = SL_catalog_dic[SL_name]['rom_DB_noext'] + '_items.json'
SL_DB_FN = cfg.SL_DB_DIR.pjoin(file_name)
SL_roms = utils_load_JSON_file(SL_DB_FN.getPath(), verbose = False)
assets_file_name = SL_catalog_dic[SL_name]['rom_DB_noext'] + '_assets.json'
SL_asset_DB_FN = cfg.SL_DB_DIR.pjoin(assets_file_name)
# --- Second pass: substitute artwork ---
SL_assets_dic = {}
for rom_key in sorted(SL_roms):
SL_assets_dic[rom_key] = db_new_SL_asset()
# --- Write SL asset JSON ---
utils_write_JSON_file(SL_asset_DB_FN.getPath(), SL_assets_dic, verbose = False)
processed_SL += 1
pDialog.endProgress()
# --- Create properties database with default values ---
# --- Make SL properties DB ---
# >> Allows customisation of every SL list window
# >> Not used at the moment -> Global properties
# SL_properties_dic = {}
# for sl_name in SL_catalog_dic:
# # 'vm' : VIEW_MODE_NORMAL or VIEW_MODE_ALL
# prop_dic = {'vm' : VIEW_MODE_NORMAL}
# SL_properties_dic[sl_name] = prop_dic
# utils_write_JSON_file(cfg.SL_MACHINES_PROP_PATH.getPath(), SL_properties_dic)
# log_info('SL_properties_dic has {} items'.format(len(SL_properties_dic)))
# >> One of the MAME catalogs has changed, and so the property names.
# >> Not used at the moment -> Global properties
# mame_properties_dic = {}
# for catalog_name in CATALOG_NAME_LIST:
# catalog_dic = db_get_cataloged_dic_parents(cfg, catalog_name)
# for category_name in sorted(catalog_dic):
# prop_key = '{} - {}'.format(catalog_name, category_name)
# mame_properties_dic[prop_key] = {'vm' : VIEW_MODE_NORMAL}
# utils_write_JSON_file(cfg.MAIN_PROPERTIES_PATH.getPath(), mame_properties_dic)
# log_info('mame_properties_dic has {} items'.format(len(mame_properties_dic)))
# -----------------------------------------------------------------------------
# Update MAME control dictionary
# -----------------------------------------------------------------------------
# --- SL item database ---
db_safe_edit(control_dic, 'stats_SL_XML_files', total_SL_XML_files)
db_safe_edit(control_dic, 'stats_SL_software_items', total_SL_software_items)
db_safe_edit(control_dic, 'stats_SL_items_with_ROMs', num_SL_with_ROMs)
db_safe_edit(control_dic, 'stats_SL_items_with_CHDs', num_SL_with_CHDs)
# --- SL audit database statistics ---
db_safe_edit(control_dic, 'stats_audit_SL_items_runnable', stats_audit_SL_items_runnable)
db_safe_edit(control_dic, 'stats_audit_SL_items_with_arch', stats_audit_SL_items_with_arch)
db_safe_edit(control_dic, 'stats_audit_SL_items_with_arch_ROM', stats_audit_SL_items_with_arch_ROM)
db_safe_edit(control_dic, 'stats_audit_SL_items_with_CHD', stats_audit_SL_items_with_CHD)
# --- SL build timestamp ---
db_safe_edit(control_dic, 't_SL_DB_build', time.time())
# --- Save modified/created stuff in this function ---
db_files = [
# Fix this list of files!!!
[SL_catalog_dic, 'Software Lists index', cfg.SL_INDEX_PATH.getPath()],
[SL_PClone_dic, 'Software Lists P/Clone', cfg.SL_PCLONE_DIC_PATH.getPath()],
[SL_machines_dic, 'Software Lists machines', cfg.SL_MACHINES_PATH.getPath()],
# Save control_dic after everything is saved.
[control_dic, 'Control dictionary', cfg.MAIN_CONTROL_PATH.getPath()],
]
db_save_files(db_files)
db_dic_in['SL_index'] = SL_catalog_dic
db_dic_in['SL_machines'] = SL_machines_dic
db_dic_in['SL_PClone_dic'] = SL_PClone_dic
# -------------------------------------------------------------------------------------------------
# ROM/CHD and asset scanner
# -------------------------------------------------------------------------------------------------
# Checks for errors before scanning for SL ROMs.
# Display a Kodi dialog if an error is found.
def mame_check_before_scan_MAME_ROMs(cfg, st_dic, options_dic, control_dic):
log_info('mame_check_before_scan_MAME_ROMs() Starting...')
kodi_reset_status(st_dic)
# ROM scanning is mandatory, even if ROM directory is empty.
# Get paths and check they exist.
if cfg.settings['op_mode'] == OP_MODE_VANILLA:
rom_path = cfg.settings['rom_path_vanilla']
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS:
rom_path = cfg.settings['rom_path_2003_plus']
else:
raise TypeError('Unknown op_mode "{}"'.format(cfg.settings['op_mode']))
if not rom_path:
kodi_set_error_status(st_dic, 'ROM directory not configured. Aborting scanner.')
return
ROM_path_FN = FileName(rom_path)
if not ROM_path_FN.isdir():
kodi_set_error_status(st_dic, 'ROM directory does not exist. Aborting scanner.')
return
# Scanning of CHDs is optional.
if cfg.settings['chd_path']:
CHD_path_FN = FileName(cfg.settings['chd_path'])
if not CHD_path_FN.isdir():
kodi_dialog_OK('CHD directory does not exist. CHD scanning disabled.')
options_dic['scan_CHDs'] = False
else:
options_dic['scan_CHDs'] = True
else:
kodi_dialog_OK('CHD directory not configured. CHD scanning disabled.')
options_dic['scan_CHDs'] = False
# Scanning of Samples is optional.
if cfg.settings['samples_path']:
Samples_path_FN = FileName(cfg.settings['samples_path'])
if not Samples_path_FN.isdir():
kodi_dialog_OK('Samples directory does not exist. Samples scanning disabled.')
options_dic['scan_Samples'] = False
else:
options_dic['scan_Samples'] = True
else:
kodi_dialog_OK('Samples directory not configured. Samples scanning disabled.')
options_dic['scan_Samples'] = False
#
# Saves control_dic and assets_dic.
#
# PROBLEM with samples scanning.
# Most samples are stored in ZIP files. However, the samples shipped with MAME executable
# are uncompressed:
# MAME_DIR/samples/floppy/35_seek_12ms.wav
# MAME_DIR/samples/floppy/35_seek_20ms.wav
# ...
# MAME_DIR/samples/MM1_keyboard/beep.wav
# MAME_DIR/samples/MM1_keyboard/power_switch.wav
#
def mame_scan_MAME_ROMs(cfg, st_dic, options_dic, db_dic_in):
# --- Convenient variables for databases ---
control_dic = db_dic_in['control_dic']
machines = db_dic_in['machines']
renderdb = db_dic_in['renderdb']
assetdb = db_dic_in['assetdb']
machine_archives_dic = db_dic_in['machine_archives']
# ROM_ZIP_list = db_dic_in['ROM_ZIP_list']
# Sample_ZIP_list = db_dic_in['Sample_ZIP_list']
# CHD_list = db_dic_in['CHD_archive_list']
log_info('mame_scan_MAME_ROMs() Starting...')
kodi_reset_status(st_dic)
# At this point paths have been verified and exists.
if cfg.settings['op_mode'] == OP_MODE_VANILLA:
rom_path = cfg.settings['rom_path_vanilla']
elif cfg.settings['op_mode'] == OP_MODE_RETRO_MAME2003PLUS:
rom_path = cfg.settings['rom_path_2003_plus']
else:
raise TypeError('Unknown op_mode "{}"'.format(cfg.settings['op_mode']))
ROM_path_FN = FileName(rom_path)
log_info('mame_scan_MAME_ROMs() ROM dir OP {}'.format(ROM_path_FN.getOriginalPath()))
log_info('mame_scan_MAME_ROMs() ROM dir P {}'.format(ROM_path_FN.getPath()))
if options_dic['scan_CHDs']:
CHD_path_FN = FileName(cfg.settings['chd_path'])
log_info('mame_scan_MAME_ROMs() CHD dir OP {}'.format(CHD_path_FN.getOriginalPath()))
log_info('mame_scan_MAME_ROMs() CHD dir P {}'.format(CHD_path_FN.getPath()))
else:
CHD_path_FN = FileName('')
log_info('Scan of CHDs disabled.')
if options_dic['scan_Samples']:
Samples_path_FN = FileName(cfg.settings['samples_path'])
log_info('mame_scan_MAME_ROMs() Samples OP {}'.format(Samples_path_FN.getOriginalPath()))
log_info('mame_scan_MAME_ROMs() Samples P {}'.format(Samples_path_FN.getPath()))
else:
Samples_path_FN = FileName('')
log_info('Scan of Samples disabled.')
# --- Create auxiliary databases ---
pDialog = KodiProgressDialog()
pDialog.startProgress('Creating auxiliary databases...', 3)
ROM_ZIP_list = mame_get_ROM_ZIP_list(machine_archives_dic)
pDialog.updateProgressInc()
Sample_ZIP_list = mame_get_Sample_ZIP_list(machine_archives_dic)
pDialog.updateProgressInc()
CHD_list = mame_get_CHD_list(machine_archives_dic)
pDialog.endProgress()
# --- Create a cache of files ---
# utils_file_cache_add_dir() creates a set with all files in a given directory.
# That set is stored in a function internal cache associated with the path.
# Files in the cache can be searched with misc_search_file_cache()
# utils_file_cache_add_dir() accepts invalid/empty paths, just do not add them to the cache.
ROM_path_str = ROM_path_FN.getPath()
CHD_path_str = CHD_path_FN.getPath()
Samples_path_str = Samples_path_FN.getPath()
STUFF_PATH_LIST = [ROM_path_str, CHD_path_str, Samples_path_str]
pDialog.startProgress('Listing files in ROM/CHD/Samples directories...', len(STUFF_PATH_LIST))
utils_file_cache_clear()
for asset_dir in STUFF_PATH_LIST:
pDialog.updateProgressInc()
utils_file_cache_add_dir(asset_dir)
pDialog.endProgress()
# --- Scan machine archives ---
# Traverses all machines and scans if all required files exist.
scan_march_ROM_total = 0
scan_march_ROM_have = 0
scan_march_ROM_missing = 0
scan_march_SAM_total = 0
scan_march_SAM_have = 0
scan_march_SAM_missing = 0
scan_march_CHD_total = 0
scan_march_CHD_have = 0
scan_march_CHD_missing = 0
r_full_list = []
r_have_list = []
r_miss_list = []
dial_line = 'Scanning MAME machine archives (ROMs, CHDs and Samples)...'
pDialog.startProgress(dial_line, len(renderdb))
for key in sorted(renderdb):
pDialog.updateProgressInc()
# --- Initialise machine ---
# log_info('mame_scan_MAME_ROMs() Checking machine {}'.format(key))
if renderdb[key]['isDevice']: continue # Skip Devices
m_have_str_list = []
m_miss_str_list = []
# --- ROMs ---
rom_list = machine_archives_dic[key]['ROMs']
if rom_list:
scan_march_ROM_total += 1
have_rom_list = [False] * len(rom_list)
for i, rom in enumerate(rom_list):
# --- Old code ---
# archive_name = rom + '.zip'
# ROM_FN = ROM_path_FN.pjoin(archive_name)
# if ROM_FN.exists():
# --- New code using file cache ---
ROM_FN = utils_file_cache_search(ROM_path_str, rom, MAME_ROM_EXTS)
if ROM_FN:
have_rom_list[i] = True
m_have_str_list.append('HAVE ROM {}'.format(rom))
else:
m_miss_str_list.append('MISS ROM {}'.format(rom))
if all(have_rom_list):
# --- All ZIP files required to run this machine exist ---
scan_march_ROM_have += 1
ROM_flag = 'R'
else:
scan_march_ROM_missing += 1
ROM_flag = 'r'
else:
ROM_flag = '-'
db_set_ROM_flag(assetdb[key], ROM_flag)
# --- Samples ---
sample_list = machine_archives_dic[key]['Samples']
if sample_list and options_dic['scan_Samples']:
scan_march_SAM_total += 1
have_sample_list = [False] * len(sample_list)
for i, sample in enumerate(sample_list):
Sample_FN = utils_file_cache_search(Samples_path_str, sample, MAME_SAMPLE_EXTS)
if ROM_FN:
have_sample_list[i] = True
m_have_str_list.append('HAVE SAM {}'.format(sample))
else:
m_miss_str_list.append('MISS SAM {}'.format(sample))
if all(have_sample_list):
scan_march_SAM_have += 1
Sample_flag = 'S'
else:
scan_march_SAM_missing += 1
Sample_flag = 's'
elif sample_list and not options_dic['scan_Samples']:
scan_march_SAM_total += 1
scan_march_SAM_missing += 1
Sample_flag = 's'
else:
Sample_flag = '-'
db_set_Sample_flag(assetdb[key], Sample_flag)
# --- Disks ---
# Machines with CHDs: 2spicy, sfiii2
chd_list = machine_archives_dic[key]['CHDs']
if chd_list and options_dic['scan_CHDs']:
scan_march_CHD_total += 1
has_chd_list = [False] * len(chd_list)
for idx, chd_name in enumerate(chd_list):
# --- Old code ---
# CHD_FN = CHD_path_FN.pjoin(chd_name)
# if CHD_FN.exists():
# --- New code using file cache ---
# log_debug('Testing CHD "{}"'.format(chd_name))
CHD_FN = utils_file_cache_search(CHD_path_str, chd_name, MAME_CHD_EXTS)
if CHD_FN:
has_chd_list[idx] = True
m_have_str_list.append('HAVE CHD {}'.format(chd_name))
else:
m_miss_str_list.append('MISS CHD {}'.format(chd_name))
if all(has_chd_list):
scan_march_CHD_have += 1
CHD_flag = 'C'
else:
scan_march_CHD_missing += 1
CHD_flag = 'c'
elif chd_list and not options_dic['scan_CHDs']:
scan_march_CHD_total += 1
scan_march_CHD_missing += 1
CHD_flag = 'c'
else:
CHD_flag = '-'
db_set_CHD_flag(assetdb[key], CHD_flag)
# Build FULL, HAVE and MISSING reports.
r_full_list.append('Machine {} "{}"'.format(key, renderdb[key]['description']))
if renderdb[key]['cloneof']:
cloneof = renderdb[key]['cloneof']
r_full_list.append('cloneof {} "{}"'.format(cloneof, renderdb[cloneof]['description']))
if not rom_list and not sample_list and not chd_list:
r_full_list.append('Machine has no ROMs, Samples and/or CHDs')
else:
r_full_list.extend(m_have_str_list)
r_full_list.extend(m_miss_str_list)
r_full_list.append('')
# In the HAVE report include machines if and only if every required file is there.
if m_have_str_list and not m_miss_str_list:
r_have_list.append('Machine {} "{}"'.format(key, renderdb[key]['description']))
if renderdb[key]['cloneof']:
cloneof = renderdb[key]['cloneof']
r_have_list.append('cloneof {} "{}"'.format(cloneof, renderdb[cloneof]['description']))
r_have_list.extend(m_have_str_list)
r_have_list.extend(m_miss_str_list)
r_have_list.append('')
# In the MISSING report include machines if anything is missing.
if m_miss_str_list:
r_miss_list.append('Machine {} "{}"'.format(key, renderdb[key]['description']))
if renderdb[key]['cloneof']:
cloneof = renderdb[key]['cloneof']
r_miss_list.append('cloneof {} "{}"'.format(cloneof, renderdb[cloneof]['description']))
r_miss_list.extend(m_have_str_list)
r_miss_list.extend(m_miss_str_list)
r_miss_list.append('')
pDialog.endProgress()
# Write MAME scanner reports
reports_total = 3
pDialog.startProgress('Saving scanner reports...', reports_total)
log_info('Writing report "{}"'.format(cfg.REPORT_MAME_SCAN_MACHINE_ARCH_FULL_PATH.getPath()))
report_slist = [
'*** Advanced MAME Launcher MAME machines scanner report ***',
'This report shows all the scanned MAME machines.',
'',
'MAME ROM path "{}"'.format(ROM_path_str),
'MAME Samples path "{}"'.format(Samples_path_str),
'MAME CHD path "{}"'.format(CHD_path_str),
'',
]
report_slist.extend(r_full_list)
utils_write_slist_to_file(cfg.REPORT_MAME_SCAN_MACHINE_ARCH_FULL_PATH.getPath(), report_slist)
pDialog.updateProgress(1)
log_info('Writing report "{}"'.format(cfg.REPORT_MAME_SCAN_MACHINE_ARCH_HAVE_PATH.getPath()))
report_slist = [
'*** Advanced MAME Launcher MAME machines scanner report ***',
'This reports shows MAME machines that have all the required',
'ROM ZIP files, Sample ZIP files and CHD files.',
'Machines that no require files are not listed.',
'',
'MAME ROM path "{}"'.format(ROM_path_str),
'MAME Samples path "{}"'.format(Samples_path_str),
'MAME CHD path "{}"'.format(CHD_path_str),
'',
]
if not r_have_list:
r_have_list.append('Ouch!!! You do not have any ROM ZIP files and/or CHDs.')
report_slist.extend(r_have_list)
utils_write_slist_to_file(cfg.REPORT_MAME_SCAN_MACHINE_ARCH_HAVE_PATH.getPath(), report_slist)
pDialog.updateProgress(2)
log_info('Writing report "{}"'.format(cfg.REPORT_MAME_SCAN_MACHINE_ARCH_MISS_PATH.getPath()))
report_slist = [
'*** Advanced MAME Launcher MAME machines scanner report ***',
'This reports shows MAME machines that miss all or some of the required',
'ROM ZIP files, Sample ZIP files or CHD files.',
'Machines that no require files are not listed.',
'',
'MAME ROM path "{}"'.format(ROM_path_str),
'MAME Samples path "{}"'.format(Samples_path_str),
'MAME CHD path "{}"'.format(CHD_path_str),
'',
]
if not r_miss_list:
r_miss_list.append('Congratulations!!! You have no missing ROM ZIP and/or CHDs files.')
report_slist.extend(r_miss_list)
utils_write_slist_to_file(cfg.REPORT_MAME_SCAN_MACHINE_ARCH_MISS_PATH.getPath(), report_slist)
pDialog.endProgress()
# --- ROM ZIP file list ---
scan_ROM_ZIP_files_total = 0
scan_ROM_ZIP_files_have = 0
scan_ROM_ZIP_files_missing = 0
r_list = [
'*** Advanced MAME Launcher MAME machines scanner report ***',
'This report shows all missing MAME machine ROM ZIP files.',
'Each missing ROM ZIP appears only once, but more than one machine may be affected.',
'',
'MAME ROM path "{}"'.format(ROM_path_str),
'MAME Samples path "{}"'.format(Samples_path_str),
'MAME CHD path "{}"'.format(CHD_path_str),
'',
]
pDialog.startProgress('Scanning MAME ROM ZIPs...', len(ROM_ZIP_list))
for rom_name in ROM_ZIP_list:
pDialog.updateProgressInc()
scan_ROM_ZIP_files_total += 1
ROM_FN = utils_file_cache_search(ROM_path_str, rom_name, MAME_ROM_EXTS)
if ROM_FN:
scan_ROM_ZIP_files_have += 1
else:
scan_ROM_ZIP_files_missing += 1
r_list.append('Missing ROM {}'.format(rom_name))
pDialog.endProgress()
log_info('Writing report "{}"'.format(cfg.REPORT_MAME_SCAN_ROM_LIST_MISS_PATH.getPath()))
if scan_ROM_ZIP_files_missing == 0:
r_list.append('Congratulations!!! You have no missing ROM ZIP files.')
utils_write_slist_to_file(cfg.REPORT_MAME_SCAN_ROM_LIST_MISS_PATH.getPath(), r_list)
# --- Sample ZIP file list ---
scan_Samples_ZIP_total = 0
scan_Samples_ZIP_have = 0
scan_Samples_ZIP_missing = 0
r_list = [
'*** Advanced MAME Launcher MAME machines scanner report ***',
'This report shows all missing MAME machine Sample ZIP files.',
'Each missing Sample ZIP appears only once, but more than one machine may be affected.',
'',
'MAME ROM path "{}"'.format(ROM_path_str),
'MAME Samples path "{}"'.format(Samples_path_str),
'MAME CHD path "{}"'.format(CHD_path_str),
'',
]
pDialog.startProgress('Scanning MAME Sample ZIPs...', len(Sample_ZIP_list))
for sample_name in Sample_ZIP_list:
pDialog.updateProgressInc()
scan_Samples_ZIP_total += 1
Sample_FN = utils_file_cache_search(Samples_path_str, sample_name, MAME_SAMPLE_EXTS)
if Sample_FN:
scan_Samples_ZIP_have += 1
else:
scan_Samples_ZIP_missing += 1
r_list.append('Missing Sample {}'.format(sample_name))
pDialog.endProgress()
log_info('Writing report "{}"'.format(cfg.REPORT_MAME_SCAN_SAM_LIST_MISS_PATH.getPath()))
if scan_Samples_ZIP_missing == 0:
r_list.append('Congratulations!!! You have no missing Sample ZIP files.')
utils_write_slist_to_file(cfg.REPORT_MAME_SCAN_SAM_LIST_MISS_PATH.getPath(), r_list)
# --- CHD file list ---
scan_CHD_files_total = 0
scan_CHD_files_have = 0
scan_CHD_files_missing = 0
r_list = [
'*** Advanced MAME Launcher MAME machines scanner report ***',
'This report shows all missing MAME machine CHDs',
'Each missing CHD appears only once, but more than one machine may be affected.',
'',
'MAME ROM path "{}"'.format(ROM_path_str),
'MAME Samples path "{}"'.format(Samples_path_str),
'MAME CHD path "{}"'.format(CHD_path_str),
'',
]
pDialog.startProgress('Scanning MAME CHDs...', len(CHD_list))
for chd_name in CHD_list:
pDialog.updateProgressInc()
scan_CHD_files_total += 1
CHD_FN = utils_file_cache_search(CHD_path_str, chd_name, MAME_CHD_EXTS)
if CHD_FN:
scan_CHD_files_have += 1
else:
scan_CHD_files_missing += 1
r_list.append('Missing CHD {}'.format(chd_name))
pDialog.endProgress()
log_info('Writing report "{}"'.format(cfg.REPORT_MAME_SCAN_CHD_LIST_MISS_PATH.getPath()))
if scan_CHD_files_missing == 0:
r_list.append('Congratulations!!! You have no missing CHD files.')
utils_write_slist_to_file(cfg.REPORT_MAME_SCAN_CHD_LIST_MISS_PATH.getPath(), r_list)
# --- Update statistics ---
db_safe_edit(control_dic, 'scan_machine_archives_ROM_total', scan_march_ROM_total)
db_safe_edit(control_dic, 'scan_machine_archives_ROM_have', scan_march_ROM_have)
db_safe_edit(control_dic, 'scan_machine_archives_ROM_missing', scan_march_ROM_missing)
db_safe_edit(control_dic, 'scan_machine_archives_Samples_total', scan_march_SAM_total)
db_safe_edit(control_dic, 'scan_machine_archives_Samples_have', scan_march_SAM_have)
db_safe_edit(control_dic, 'scan_machine_archives_Samples_missing', scan_march_SAM_missing)
db_safe_edit(control_dic, 'scan_machine_archives_CHD_total', scan_march_CHD_total)
db_safe_edit(control_dic, 'scan_machine_archives_CHD_have', scan_march_CHD_have)
db_safe_edit(control_dic, 'scan_machine_archives_CHD_missing', scan_march_CHD_missing)
db_safe_edit(control_dic, 'scan_ROM_ZIP_files_total', scan_ROM_ZIP_files_total)
db_safe_edit(control_dic, 'scan_ROM_ZIP_files_have', scan_ROM_ZIP_files_have)
db_safe_edit(control_dic, 'scan_ROM_ZIP_files_missing', scan_ROM_ZIP_files_missing)
db_safe_edit(control_dic, 'scan_Samples_ZIP_total', scan_Samples_ZIP_total)
db_safe_edit(control_dic, 'scan_Samples_ZIP_have', scan_Samples_ZIP_have)
db_safe_edit(control_dic, 'scan_Samples_ZIP_missing', scan_Samples_ZIP_missing)
db_safe_edit(control_dic, 'scan_CHD_files_total', scan_CHD_files_total)
db_safe_edit(control_dic, 'scan_CHD_files_have', scan_CHD_files_have)
db_safe_edit(control_dic, 'scan_CHD_files_missing', scan_CHD_files_missing)
# --- Scanner timestamp ---
db_safe_edit(control_dic, 't_MAME_ROMs_scan', time.time())
# --- Save databases ---
db_files = [
[control_dic, 'Control dictionary', cfg.MAIN_CONTROL_PATH.getPath()],
[assetdb, 'MAME machine assets', cfg.ASSET_DB_PATH.getPath()],
]
db_save_files(db_files)
#
# Checks for errors before scanning for SL assets.
# Caller function displays a Kodi dialog if an error is found and scanning must be aborted.
#
def mame_check_before_scan_MAME_assets(cfg, st_dic, control_dic):
kodi_reset_status(st_dic)
# Get assets directory. Abort if not configured/found.
if not cfg.settings['assets_path']:
kodi_set_error_status(st_dic, 'MAME asset directory not configured. Aborting.')
return
Asset_path_FN = FileName(cfg.settings['assets_path'])
if not Asset_path_FN.isdir():
kodi_set_error_status(st_dic, 'MAME asset directory does not exist. Aborting.')
return
#
# Note that MAME is able to use clone artwork from parent machines. Mr. Do's Artwork ZIP files
# are provided only for parents.
# First pass: search for on-disk assets.
# Second pass: do artwork substitution
# A) A clone may use assets from parent.
# B) A parent may use assets from a clone.
#
def mame_scan_MAME_assets(cfg, st_dic, db_dic_in):
control_dic = db_dic_in['control_dic']
renderdb_dic = db_dic_in['renderdb']
assetdb_dic = db_dic_in['assetdb']
main_pclone_dic = db_dic_in['main_pclone_dic']
Asset_path_FN = FileName(cfg.settings['assets_path'])
log_info('mame_scan_MAME_assets() Asset path {}'.format(Asset_path_FN.getPath()))
# Iterate machines, check if assets/artwork exist.
table_str = []
table_str.append([
'left',
'left', 'left', 'left', 'left', 'left', 'left', 'left',
'left', 'left', 'left', 'left', 'left', 'left', 'left'])
table_str.append([
'Name',
'3DB', 'Apr', 'Art', 'Cab', 'Clr', 'CPa', 'Fan',
'Fly', 'Man', 'Mar', 'PCB', 'Snp', 'Tit', 'Tra'])
# --- Create a cache of assets ---
asset_dirs = [''] * len(ASSET_MAME_T_LIST)
pDialog = KodiProgressDialog()
pDialog.startProgress('Listing files in asset directories...', len(ASSET_MAME_T_LIST))
utils_file_cache_clear()
for i, asset_tuple in enumerate(ASSET_MAME_T_LIST):
pDialog.updateProgressInc()
asset_dir = asset_tuple[1]
full_asset_dir_FN = Asset_path_FN.pjoin(asset_dir)
asset_dir_str = full_asset_dir_FN.getPath()
asset_dirs[i] = asset_dir_str
utils_file_cache_add_dir(asset_dir_str)
pDialog.endProgress()
# --- First pass: search for on-disk assets ---
ondisk_assets_dic = {}
pDialog.startProgress('Scanning MAME assets/artwork (first pass)...', len(renderdb_dic))
for m_name in sorted(renderdb_dic):
pDialog.updateProgressInc()
machine_assets = db_new_MAME_asset()
for idx, asset_tuple in enumerate(ASSET_MAME_T_LIST):
asset_key = asset_tuple[0]
asset_dir = asset_tuple[1]
if asset_key == 'artwork':
asset_FN = utils_file_cache_search(asset_dirs[idx], m_name, ASSET_ARTWORK_EXTS)
elif asset_key == 'manual':
asset_FN = utils_file_cache_search(asset_dirs[idx], m_name, ASSET_MANUAL_EXTS)
elif asset_key == 'trailer':
asset_FN = utils_file_cache_search(asset_dirs[idx], m_name, ASSET_TRAILER_EXTS)
else:
asset_FN = utils_file_cache_search(asset_dirs[idx], m_name, ASSET_IMAGE_EXTS)
# Low level debug.
# if m_name == '005':
# log_debug('asset_key "{}"'.format(asset_key))
# log_debug('asset_dir "{}"'.format(asset_dir))
# log_debug('asset_dirs[idx] "{}"'.format(asset_dirs[idx]))
# log_debug('asset_FN "{}"'.format(asset_FN))
machine_assets[asset_key] = asset_FN.getOriginalPath() if asset_FN else ''
ondisk_assets_dic[m_name] = machine_assets
pDialog.endProgress()
# --- Second pass: substitute artwork ---
have_count_list = [0] * len(ASSET_MAME_T_LIST)
alternate_count_list = [0] * len(ASSET_MAME_T_LIST)
pDialog.startProgress('Scanning MAME assets/artwork (second pass)...', len(renderdb_dic))
for m_name in sorted(renderdb_dic):
pDialog.updateProgressInc()
asset_row = ['---'] * len(ASSET_MAME_T_LIST)
for idx, asset_tuple in enumerate(ASSET_MAME_T_LIST):
asset_key = asset_tuple[0]
asset_dir = asset_tuple[1]
# Reset asset
assetdb_dic[m_name][asset_key] = ''
# If artwork exists on disk set it on database
if ondisk_assets_dic[m_name][asset_key]:
assetdb_dic[m_name][asset_key] = ondisk_assets_dic[m_name][asset_key]
have_count_list[idx] += 1
asset_row[idx] = 'YES'
# If artwork does not exist on disk ...
else:
# if machine is a parent search in the clone list
if m_name in main_pclone_dic:
for clone_key in main_pclone_dic[m_name]:
if ondisk_assets_dic[clone_key][asset_key]:
assetdb_dic[m_name][asset_key] = ondisk_assets_dic[clone_key][asset_key]
have_count_list[idx] += 1
alternate_count_list[idx] += 1
asset_row[idx] = 'CLO'
break
# if machine is a clone search in the parent first, then search in the clones
else:
# Search parent
parent_name = renderdb_dic[m_name]['cloneof']
if ondisk_assets_dic[parent_name][asset_key]:
assetdb_dic[m_name][asset_key] = ondisk_assets_dic[parent_name][asset_key]
have_count_list[idx] += 1
alternate_count_list[idx] += 1
asset_row[idx] = 'PAR'
# Search clones
else:
for clone_key in main_pclone_dic[parent_name]:
if clone_key == m_name: continue
if ondisk_assets_dic[clone_key][asset_key]:
assetdb_dic[m_name][asset_key] = ondisk_assets_dic[clone_key][asset_key]
have_count_list[idx] += 1
alternate_count_list[idx] += 1
asset_row[idx] = 'CLX'
break
table_row = [m_name] + asset_row
table_str.append(table_row)
pDialog.endProgress()
# --- Asset statistics and report ---
total_machines = len(renderdb_dic)
# This must match the order of ASSET_MAME_T_LIST defined in disk_IO.py
box3D = (have_count_list[0], total_machines - have_count_list[0], alternate_count_list[0])
Artp = (have_count_list[1], total_machines - have_count_list[1], alternate_count_list[1])
Art = (have_count_list[2], total_machines - have_count_list[2], alternate_count_list[2])
Cab = (have_count_list[3], total_machines - have_count_list[3], alternate_count_list[3])
Clr = (have_count_list[4], total_machines - have_count_list[4], alternate_count_list[4])
CPan = (have_count_list[5], total_machines - have_count_list[5], alternate_count_list[5])
Fan = (have_count_list[6], total_machines - have_count_list[6], alternate_count_list[6])
Fly = (have_count_list[7], total_machines - have_count_list[7], alternate_count_list[7])
Man = (have_count_list[8], total_machines - have_count_list[8], alternate_count_list[8])
Mar = (have_count_list[9], total_machines - have_count_list[9], alternate_count_list[9])
PCB = (have_count_list[10], total_machines - have_count_list[10], alternate_count_list[10])
Snap = (have_count_list[11], total_machines - have_count_list[11], alternate_count_list[11])
Tit = (have_count_list[12], total_machines - have_count_list[12], alternate_count_list[12])
Tra = (have_count_list[13], total_machines - have_count_list[13], alternate_count_list[13])
pDialog.startProgress('Creating MAME asset report...')
report_slist = []
report_slist.append('*** Advanced MAME Launcher MAME machines asset scanner report ***')
report_slist.append('Total MAME machines {}'.format(total_machines))
report_slist.append('Have 3D Boxes {:5d} (Missing {:5d}, Alternate {:5d})'.format(*box3D))
report_slist.append('Have Artpreview {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Artp))
report_slist.append('Have Artwork {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Art))
report_slist.append('Have Cabinets {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Cab))
report_slist.append('Have Clearlogos {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Clr))
report_slist.append('Have CPanels {:5d} (Missing {:5d}, Alternate {:5d})'.format(*CPan))
report_slist.append('Have Fanarts {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Fan))
report_slist.append('Have Flyers {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Fly))
report_slist.append('Have Manuals {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Man))
report_slist.append('Have Marquees {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Mar))
report_slist.append('Have PCBs {:5d} (Missing {:5d}, Alternate {:5d})'.format(*PCB))
report_slist.append('Have Snaps {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Snap))
report_slist.append('Have Titles {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Tit))
report_slist.append('Have Trailers {:5d} (Missing {:5d}, Alternate {:5d})'.format(*Tra))
report_slist.append('')
table_str_list = text_render_table(table_str)
report_slist.extend(table_str_list)
log_info('Writing MAME asset report file "{}"'.format(cfg.REPORT_MAME_ASSETS_PATH.getPath()))
utils_write_slist_to_file(cfg.REPORT_MAME_ASSETS_PATH.getPath(), report_slist)
pDialog.endProgress()
# Update control_dic by assigment (will be saved in caller)
db_safe_edit(control_dic, 'assets_num_MAME_machines', total_machines)
db_safe_edit(control_dic, 'assets_3dbox_have', box3D[0])
db_safe_edit(control_dic, 'assets_3dbox_missing', box3D[1])
db_safe_edit(control_dic, 'assets_3dbox_alternate', box3D[2])
db_safe_edit(control_dic, 'assets_artpreview_have', Artp[0])
db_safe_edit(control_dic, 'assets_artpreview_missing', Artp[1])
db_safe_edit(control_dic, 'assets_artpreview_alternate', Artp[2])
db_safe_edit(control_dic, 'assets_artwork_have', Art[0])
db_safe_edit(control_dic, 'assets_artwork_missing', Art[1])
db_safe_edit(control_dic, 'assets_artwork_alternate', Art[2])
db_safe_edit(control_dic, 'assets_cabinets_have', Cab[0])
db_safe_edit(control_dic, 'assets_cabinets_missing', Cab[1])
db_safe_edit(control_dic, 'assets_cabinets_alternate', Cab[2])
db_safe_edit(control_dic, 'assets_clearlogos_have', Clr[0])
db_safe_edit(control_dic, 'assets_clearlogos_missing', Clr[1])
db_safe_edit(control_dic, 'assets_clearlogos_alternate', Clr[2])
db_safe_edit(control_dic, 'assets_cpanels_have', CPan[0])
db_safe_edit(control_dic, 'assets_cpanels_missing', CPan[1])
db_safe_edit(control_dic, 'assets_cpanels_alternate', CPan[2])
db_safe_edit(control_dic, 'assets_fanarts_have', Fan[0])
db_safe_edit(control_dic, 'assets_fanarts_missing', Fan[1])
db_safe_edit(control_dic, 'assets_fanarts_alternate', Fan[2])
db_safe_edit(control_dic, 'assets_flyers_have', Fly[0])
db_safe_edit(control_dic, 'assets_flyers_missing', Fly[1])
db_safe_edit(control_dic, 'assets_flyers_alternate', Fly[2])
db_safe_edit(control_dic, 'assets_manuals_have', Man[0])
db_safe_edit(control_dic, 'assets_manuals_missing', Man[1])
db_safe_edit(control_dic, 'assets_manuals_alternate', Man[2])
db_safe_edit(control_dic, 'assets_marquees_have', Mar[0])
db_safe_edit(control_dic, 'assets_marquees_missing', Mar[1])
db_safe_edit(control_dic, 'assets_marquees_alternate', Mar[2])
db_safe_edit(control_dic, 'assets_PCBs_have', PCB[0])
db_safe_edit(control_dic, 'assets_PCBs_missing', PCB[1])
db_safe_edit(control_dic, 'assets_PCBs_alternate', PCB[2])
db_safe_edit(control_dic, 'assets_snaps_have', Snap[0])
db_safe_edit(control_dic, 'assets_snaps_missing', Snap[1])
db_safe_edit(control_dic, 'assets_snaps_alternate', Snap[2])
db_safe_edit(control_dic, 'assets_titles_have', Tit[0])
db_safe_edit(control_dic, 'assets_titles_missing', Tit[1])
db_safe_edit(control_dic, 'assets_titles_alternate', Tit[2])
db_safe_edit(control_dic, 'assets_trailers_have', Tra[0])
db_safe_edit(control_dic, 'assets_trailers_missing', Tra[1])
db_safe_edit(control_dic, 'assets_trailers_alternate', Tra[2])
db_safe_edit(control_dic, 't_MAME_assets_scan', time.time())
# --- Save databases ---
db_files = [
[control_dic, 'Control dictionary', cfg.MAIN_CONTROL_PATH.getPath()],
[assetdb_dic, 'MAME machine assets', cfg.ASSET_DB_PATH.getPath()],
]
db_save_files(db_files)
# -------------------------------------------------------------------------------------------------
#
# Checks for errors before scanning for SL ROMs.
# Display a Kodi dialog if an error is found.
# Returns a dictionary of settings:
# options_dic['abort'] is always present.
# options_dic['scan_SL_CHDs'] scanning of CHDs is optional.
#
def mame_check_before_scan_SL_ROMs(cfg, st_dic, options_dic, control_dic):
kodi_reset_status(st_dic)
# Abort if SL are globally disabled.
if not cfg.settings['global_enable_SL']:
kodi_set_error_status(st_dic, 'Software Lists globally disabled. SL ROM scanning aborted.')
return
# Abort if SL hash path not configured.
if not cfg.settings['SL_hash_path']:
kodi_set_error_status(st_dic, 'Software Lists hash path not set. SL ROM scanning aborted.')
return
# Abort if SL ROM dir not configured.
if not cfg.settings['SL_rom_path']:
kodi_set_error_status(st_dic, 'Software Lists ROM path not set. SL ROM scanning aborted.')
return
# SL CHDs scanning is optional
if cfg.settings['SL_chd_path']:
SL_CHD_path_FN = FileName(cfg.settings['SL_chd_path'])
if not SL_CHD_path_FN.isdir():
kodi_dialog_OK('SL CHD directory does not exist. SL CHD scanning disabled.')
options_dic['scan_SL_CHDs'] = False
else:
options_dic['scan_SL_CHDs'] = True
else:
kodi_dialog_OK('SL CHD directory not configured. SL CHD scanning disabled.')
options_dic['scan_SL_CHDs'] = False
# Saves SL JSON databases, MAIN_CONTROL_PATH.
def mame_scan_SL_ROMs(cfg, st_dic, options_dic, SL_dic):
log_info('mame_scan_SL_ROMs() Starting...')
control_dic = SL_dic['control_dic']
SL_index_dic = SL_dic['SL_index']
# Paths have been verified at this point
SL_hash_dir_FN = cfg.SL_DB_DIR
log_info('mame_scan_SL_ROMs() SL hash dir OP {}'.format(SL_hash_dir_FN.getOriginalPath()))
log_info('mame_scan_SL_ROMs() SL hash dir P {}'.format(SL_hash_dir_FN.getPath()))
SL_ROM_dir_FN = FileName(cfg.settings['SL_rom_path'])
log_info('mame_scan_SL_ROMs() SL ROM dir OP {}'.format(SL_ROM_dir_FN.getOriginalPath()))
log_info('mame_scan_SL_ROMs() SL ROM dir P {}'.format(SL_ROM_dir_FN.getPath()))
if options_dic['scan_SL_CHDs']:
SL_CHD_path_FN = FileName(cfg.settings['SL_chd_path'])
log_info('mame_scan_SL_ROMs() SL CHD dir OP {}'.format(SL_CHD_path_FN.getOriginalPath()))
log_info('mame_scan_SL_ROMs() SL CHD dir P {}'.format(SL_CHD_path_FN.getPath()))
else:
SL_CHD_path_FN = FileName('')
log_info('Scan of SL CHDs disabled.')
# --- Add files to cache ---
SL_ROM_path_str = SL_ROM_dir_FN.getPath()
SL_CHD_path_str = SL_CHD_path_FN.getPath()
pDialog = KodiProgressDialog()
d_text = 'Listing Sofware Lists ROM ZIPs and CHDs...'
pDialog.startProgress('{}\n{}'.format(d_text, 'Listing SL ROM ZIP path'), 2)
utils_file_cache_clear()
utils_file_cache_add_dir(SL_ROM_path_str, verbose = True)
pDialog.updateProgress(1, '{}\n{}'.format(d_text, 'Listing SL CHD path'))
utils_file_cache_add_dir(SL_CHD_path_str, verbose = True)
pDialog.endProgress()
# --- SL ROM ZIP archives and CHDs ---
# Traverse the Software Lists, check if ROMs ZIPs and CHDs exists for every SL item,
# update and save database.
SL_ROMs_have = 0
SL_ROMs_missing = 0
SL_ROMs_total = 0
SL_CHDs_have = 0
SL_CHDs_missing = 0
SL_CHDs_total = 0
r_all_list = []
r_have_list = []
r_miss_list = []
d_text = 'Scanning Sofware Lists ROM ZIPs and CHDs...'
pDialog.startProgress(d_text, len(SL_index_dic))
for SL_name in sorted(SL_index_dic):
pDialog.updateProgressInc('{}\nSoftware List [COLOR orange]{}[/COLOR]'.format(d_text, SL_name))
# Load SL databases
SL_DB_FN = SL_hash_dir_FN.pjoin(SL_name + '_items.json')
SL_SOFT_ARCHIVES_DB_FN = SL_hash_dir_FN.pjoin(SL_name + '_ROM_archives.json')
sl_roms = utils_load_JSON_file(SL_DB_FN.getPath(), verbose = False)
soft_archives = utils_load_JSON_file(SL_SOFT_ARCHIVES_DB_FN.getPath(), verbose = False)
# Scan
for rom_key in sorted(sl_roms):
m_have_str_list = []
m_miss_str_list = []
rom = sl_roms[rom_key]
# --- ROMs ---
rom_list = soft_archives[rom_key]['ROMs']
if rom_list:
have_rom_list = [False] * len(rom_list)
for i, rom_file in enumerate(rom_list):
SL_ROMs_total += 1
SL_ROM_FN = utils_file_cache_search(SL_ROM_path_str, rom_file, SL_ROM_EXTS)
if SL_ROM_FN:
have_rom_list[i] = True
m_have_str_list.append('HAVE ROM {}'.format(rom_file))
else:
m_miss_str_list.append('MISS ROM {}'.format(rom_file))
if all(have_rom_list):
rom['status_ROM'] = 'R'
SL_ROMs_have += 1
else:
rom['status_ROM'] = 'r'
SL_ROMs_missing += 1
else:
rom['status_ROM'] = '-'
# --- Disks ---
chd_list = soft_archives[rom_key]['CHDs']
if chd_list:
if options_dic['scan_SL_CHDs']:
SL_CHDs_total += 1
has_chd_list = [False] * len(chd_list)
for idx, chd_file in enumerate(chd_list):
SL_CHD_FN = utils_file_cache_search(SL_CHD_path_str, chd_file, SL_CHD_EXTS)
# CHD_path = SL_CHD_path_str + '/' + chd_file
if SL_CHD_FN:
has_chd_list[idx] = True
m_have_str_list.append('HAVE CHD {}'.format(chd_file))
else:
m_miss_str_list.append('MISS CHD {}'.format(chd_file))
if all(has_chd_list):
rom['status_CHD'] = 'C'
SL_CHDs_have += 1
else:
rom['status_CHD'] = 'c'
SL_CHDs_missing += 1
else:
rom['status_CHD'] = 'c'
SL_CHDs_missing += 1
else:
rom['status_CHD'] = '-'
# --- Build report ---
description = sl_roms[rom_key]['description']
clone_name = sl_roms[rom_key]['cloneof']
r_all_list.append('SL {} item {} "{}"'.format(SL_name, rom_key, description))
if clone_name:
clone_description = sl_roms[clone_name]['description']
r_all_list.append('cloneof {} "{}"'.format(clone_name, clone_description))
if m_have_str_list:
r_all_list.extend(m_have_str_list)
if m_miss_str_list:
r_all_list.extend(m_miss_str_list)
r_all_list.append('')
if m_have_str_list:
r_have_list.append('SL {} item {} "{}"'.format(SL_name, rom_key, description))
if clone_name:
r_have_list.append('cloneof {} "{}"'.format(clone_name, clone_description))
r_have_list.extend(m_have_str_list)
if m_miss_str_list: r_have_list.extend(m_miss_str_list)
r_have_list.append('')
if m_miss_str_list:
r_miss_list.append('SL {} item {} "{}"'.format(SL_name, rom_key, description))
if clone_name:
r_miss_list.append('cloneof {} "{}"'.format(clone_name, clone_description))
r_miss_list.extend(m_miss_str_list)
if m_have_str_list: r_miss_list.extend(m_have_str_list)
r_miss_list.append('')
# Save SL database to update flags and update progress.
utils_write_JSON_file(SL_DB_FN.getPath(), sl_roms, verbose = False)
pDialog.endProgress()
# Write SL scanner reports
reports_total = 3
pDialog.startProgress('Writing scanner reports...', reports_total)
log_info('Writing SL ROM ZIPs/CHDs FULL report')
log_info('Report file "{}"'.format(cfg.REPORT_SL_SCAN_MACHINE_ARCH_FULL_PATH.getPath()))
sl = [
'*** Advanced MAME Launcher Software Lists scanner report ***',
'This report shows all the scanned SL items',
'',
]
sl.extend(r_all_list)
utils_write_slist_to_file(cfg.REPORT_SL_SCAN_MACHINE_ARCH_FULL_PATH.getPath(), sl)
pDialog.updateProgressInc()
log_info('Writing SL ROM ZIPs and/or CHDs HAVE report')
log_info('Report file "{}"'.format(cfg.REPORT_SL_SCAN_MACHINE_ARCH_HAVE_PATH.getPath()))
sl = [
'*** Advanced MAME Launcher Software Lists scanner report ***',
'This reports shows the SL items with ROM ZIPs and/or CHDs with HAVE status',
'',
]
if r_have_list:
sl.extend(r_have_list)
else:
sl.append('You do not have any ROM ZIP or CHD files!')
utils_write_slist_to_file(cfg.REPORT_SL_SCAN_MACHINE_ARCH_HAVE_PATH.getPath(), sl)
pDialog.updateProgressInc()
log_info('Writing SL ROM ZIPs/CHDs MISS report')
log_info('Report file "{}"'.format(cfg.REPORT_SL_SCAN_MACHINE_ARCH_MISS_PATH.getPath()))
sl = [
'*** Advanced MAME Launcher Software Lists scanner report ***',
'This reports shows the SL items with ROM ZIPs and/or CHDs with MISSING status',
'',
]
if r_miss_list:
sl.extend(r_miss_list)
else:
sl.append('Congratulations! No missing SL ROM ZIP or CHD files.')
utils_write_slist_to_file(cfg.REPORT_SL_SCAN_MACHINE_ARCH_MISS_PATH.getPath(), sl)
pDialog.endProgress()
# Update statistics, timestamp and save control_dic.
db_safe_edit(control_dic, 'scan_SL_archives_ROM_total', SL_ROMs_total)
db_safe_edit(control_dic, 'scan_SL_archives_ROM_have', SL_ROMs_have)
db_safe_edit(control_dic, 'scan_SL_archives_ROM_missing', SL_ROMs_missing)
db_safe_edit(control_dic, 'scan_SL_archives_CHD_total', SL_CHDs_total)
db_safe_edit(control_dic, 'scan_SL_archives_CHD_have', SL_CHDs_have)
db_safe_edit(control_dic, 'scan_SL_archives_CHD_missing', SL_CHDs_missing)
db_safe_edit(control_dic, 't_SL_ROMs_scan', time.time())
utils_write_JSON_file(cfg.MAIN_CONTROL_PATH.getPath(), control_dic)
#
# Checks for errors before scanning for SL assets.
# Display a Kodi dialog if an error is found and returns True if scanning must be aborted.
# Returns False if no errors.
#
def mame_check_before_scan_SL_assets(cfg, st_dic, control_dic):
kodi_reset_status(st_dic)
# Abort if SL are globally disabled.
if not cfg.settings['global_enable_SL']:
kodi_set_error_status(st_dic, 'Software Lists globally disabled. SL ROM scanning aborted.')
return
# Get assets directory. Abort if not configured/found.
if not cfg.settings['assets_path']:
kodi_set_error_status(st_dic, 'Asset directory not configured. Aborting.')
return
Asset_path_FN = FileName(cfg.settings['assets_path'])
if not Asset_path_FN.isdir():
kodi_set_error_status(st_dic, 'Asset directory does not exist. Aborting.')
return
def mame_scan_SL_assets(cfg, st_dic, SL_dic):
log_debug('mame_scan_SL_assets() Starting...')
control_dic = SL_dic['control_dic']
SL_index_dic = SL_dic['SL_index']
SL_pclone_dic = SL_dic['SL_PClone_dic']
# At this point assets_path is configured and the directory exists.
Asset_path_FN = FileName(cfg.settings['assets_path'])
log_info('mame_scan_SL_assets() SL asset path {}'.format(Asset_path_FN.getPath()))
# --- Traverse Software List, check if ROM exists, update and save database ---
table_str = []
table_str.append(['left', 'left', 'left', 'left', 'left', 'left', 'left', 'left', 'left'])
table_str.append(['Soft', 'Name', '3DB', 'Tit', 'Snap', 'Bft', 'Fan', 'Tra', 'Man'])
have_count_list = [0] * len(ASSET_SL_T_LIST)
alternate_count_list = [0] * len(ASSET_SL_T_LIST)
SL_item_count = 0
# DEBUG code
# SL_index_dic = {
# "32x" :
# { "display_name" : "Sega 32X cartridges", "num_with_CHDs" : 0, "num_with_ROMs" : 203, "rom_DB_noext" : "32x" }
# }
d_text = 'Scanning Sofware Lists assets/artwork...'
pDialog = KodiProgressDialog()
pDialog.startProgress(d_text, len(SL_index_dic))
for SL_name in sorted(SL_index_dic):
pDialog.updateProgressInc('{}\nSoftware List [COLOR orange]{}[/COLOR]'.format(d_text, SL_name))
# --- Load SL databases ---
file_name = SL_index_dic[SL_name]['rom_DB_noext'] + '_items.json'
SL_DB_FN = cfg.SL_DB_DIR.pjoin(file_name)
SL_roms = utils_load_JSON_file(SL_DB_FN.getPath(), verbose = False)
# --- Cache files ---
utils_file_cache_clear(verbose = False)
num_assets = len(ASSET_SL_T_LIST)
asset_dirs = [''] * num_assets
for i, asset_tuple in enumerate(ASSET_SL_T_LIST):
asset_dir = asset_tuple[1]
full_asset_dir_FN = Asset_path_FN.pjoin(asset_dir).pjoin(SL_name)
asset_dir_str = full_asset_dir_FN.getPath()
asset_dirs[i] = asset_dir_str
utils_file_cache_add_dir(asset_dir_str, verbose = False)
# --- First pass: scan for on-disk assets ---
assets_file_name = SL_index_dic[SL_name]['rom_DB_noext'] + '_assets.json'
SL_asset_DB_FN = cfg.SL_DB_DIR.pjoin(assets_file_name)
# log_info('Assets JSON "{}"'.format(SL_asset_DB_FN.getPath()))
ondisk_assets_dic = {}
for rom_key in sorted(SL_roms):
SL_assets = db_new_SL_asset()
for idx, asset_tuple in enumerate(ASSET_SL_T_LIST):
asset_key = asset_tuple[0]
asset_dir = asset_tuple[1]
full_asset_dir_FN = Asset_path_FN.pjoin(asset_dir).pjoin(SL_name)
if asset_key == 'manual':
asset_FN = utils_file_cache_search(asset_dirs[idx], rom_key, ASSET_MANUAL_EXTS)
elif asset_key == 'trailer':
asset_FN = utils_file_cache_search(asset_dirs[idx], rom_key, ASSET_TRAILER_EXTS)
else:
asset_FN = utils_file_cache_search(asset_dirs[idx], rom_key, ASSET_IMAGE_EXTS)
# log_info('Testing P "{}"'.format(asset_FN.getPath()))
SL_assets[asset_key] = asset_FN.getOriginalPath() if asset_FN else ''
ondisk_assets_dic[rom_key] = SL_assets
# --- Second pass: substitute artwork ---
main_pclone_dic = SL_pclone_dic[SL_name]
SL_assets_dic = {}
for rom_key in sorted(SL_roms):
SL_item_count += 1
SL_assets_dic[rom_key] = db_new_SL_asset()
asset_row = ['---'] * len(ASSET_SL_T_LIST)
for idx, asset_tuple in enumerate(ASSET_SL_T_LIST):
asset_key = asset_tuple[0]
asset_dir = asset_tuple[1]
# >> Reset asset
SL_assets_dic[rom_key][asset_key] = ''
# TODO Refactor this to reduce indentation.
# >> If artwork exists on disk set it on database
if ondisk_assets_dic[rom_key][asset_key]:
SL_assets_dic[rom_key][asset_key] = ondisk_assets_dic[rom_key][asset_key]
have_count_list[idx] += 1
asset_row[idx] = 'YES'
# >> If artwork does not exist on disk ...
else:
# >> if machine is a parent search in the clone list
if rom_key in main_pclone_dic:
for clone_key in main_pclone_dic[rom_key]:
if ondisk_assets_dic[clone_key][asset_key]:
SL_assets_dic[rom_key][asset_key] = ondisk_assets_dic[clone_key][asset_key]
have_count_list[idx] += 1
alternate_count_list[idx] += 1
asset_row[idx] = 'CLO'
break
# >> if machine is a clone search in the parent first, then search in the clones
else:
# >> Search parent
parent_name = SL_roms[rom_key]['cloneof']
if ondisk_assets_dic[parent_name][asset_key]:
SL_assets_dic[rom_key][asset_key] = ondisk_assets_dic[parent_name][asset_key]
have_count_list[idx] += 1
alternate_count_list[idx] += 1
asset_row[idx] = 'PAR'
# >> Search clones
else:
for clone_key in main_pclone_dic[parent_name]:
if clone_key == rom_key: continue
if ondisk_assets_dic[clone_key][asset_key]:
SL_assets_dic[rom_key][asset_key] = ondisk_assets_dic[clone_key][asset_key]
have_count_list[idx] += 1
alternate_count_list[idx] += 1
asset_row[idx] = 'CLX'
break
table_row = [SL_name, rom_key] + asset_row
table_str.append(table_row)
# --- Write SL asset JSON ---
utils_write_JSON_file(SL_asset_DB_FN.getPath(), SL_assets_dic, verbose = False)
pDialog.endProgress()
# Asset statistics and report.
# This must match the order of ASSET_SL_T_LIST defined in disk_IO.py
_3db = (have_count_list[0], SL_item_count - have_count_list[0], alternate_count_list[0])
Tit = (have_count_list[1], SL_item_count - have_count_list[1], alternate_count_list[1])
Snap = (have_count_list[2], SL_item_count - have_count_list[2], alternate_count_list[2])
Boxf = (have_count_list[3], SL_item_count - have_count_list[3], alternate_count_list[3])
Fan = (have_count_list[4], SL_item_count - have_count_list[4], alternate_count_list[4])
Tra = (have_count_list[5], SL_item_count - have_count_list[5], alternate_count_list[5])
Man = (have_count_list[6], SL_item_count - have_count_list[6], alternate_count_list[6])
pDialog.startProgress('Creating SL asset report...')
report_slist = []
report_slist.append('*** Advanced MAME Launcher Software List asset scanner report ***')
report_slist.append('Total SL items {}'.format(SL_item_count))
report_slist.append('Have 3D Boxes {:6d} (Missing {:6d}, Alternate {:6d})'.format(*_3db))
report_slist.append('Have Titles {:6d} (Missing {:6d}, Alternate {:6d})'.format(*Tit))
report_slist.append('Have Snaps {:6d} (Missing {:6d}, Alternate {:6d})'.format(*Snap))
report_slist.append('Have Boxfronts {:6d} (Missing {:6d}, Alternate {:6d})'.format(*Boxf))
report_slist.append('Have Fanarts {:6d} (Missing {:6d}, Alternate {:6d})'.format(*Fan))
report_slist.append('Have Trailers {:6d} (Missing {:6d}, Alternate {:6d})'.format(*Tra))
report_slist.append('Have Manuals {:6d} (Missing {:6d}, Alternate {:6d})'.format(*Man))
report_slist.append('')
table_str_list = text_render_table(table_str)
report_slist.extend(table_str_list)
log_info('Writing SL asset report file "{}"'.format(cfg.REPORT_SL_ASSETS_PATH.getPath()))
utils_write_slist_to_file(cfg.REPORT_SL_ASSETS_PATH.getPath(), report_slist)
pDialog.endProgress()
# Update control_dic by assigment (will be saved in caller) and save JSON.
db_safe_edit(control_dic, 'assets_SL_num_items', SL_item_count)
db_safe_edit(control_dic, 'assets_SL_3dbox_have', _3db[0])
db_safe_edit(control_dic, 'assets_SL_3dbox_missing', _3db[1])
db_safe_edit(control_dic, 'assets_SL_3dbox_alternate', _3db[2])
db_safe_edit(control_dic, 'assets_SL_titles_have', Tit[0])
db_safe_edit(control_dic, 'assets_SL_titles_missing', Tit[1])
db_safe_edit(control_dic, 'assets_SL_titles_alternate', Tit[2])
db_safe_edit(control_dic, 'assets_SL_snaps_have', Snap[0])
db_safe_edit(control_dic, 'assets_SL_snaps_missing', Snap[1])
db_safe_edit(control_dic, 'assets_SL_snaps_alternate', Snap[2])
db_safe_edit(control_dic, 'assets_SL_boxfronts_have', Boxf[0])
db_safe_edit(control_dic, 'assets_SL_boxfronts_missing', Boxf[1])
db_safe_edit(control_dic, 'assets_SL_boxfronts_alternate', Boxf[2])
db_safe_edit(control_dic, 'assets_SL_fanarts_have', Fan[0])
db_safe_edit(control_dic, 'assets_SL_fanarts_missing', Fan[1])
db_safe_edit(control_dic, 'assets_SL_fanarts_alternate', Fan[2])
db_safe_edit(control_dic, 'assets_SL_trailers_have', Tra[0])
db_safe_edit(control_dic, 'assets_SL_trailers_missing', Tra[1])
db_safe_edit(control_dic, 'assets_SL_trailers_alternate', Tra[2])
db_safe_edit(control_dic, 'assets_SL_manuals_have', Man[0])
db_safe_edit(control_dic, 'assets_SL_manuals_missing', Man[1])
db_safe_edit(control_dic, 'assets_SL_manuals_alternate', Man[2])
db_safe_edit(control_dic, 't_SL_assets_scan', time.time())
utils_write_JSON_file(cfg.MAIN_CONTROL_PATH.getPath(), control_dic)
|
Wintermute0110/plugin.program.advanced.MAME.launcher
|
resources/mame.py
|
Python
|
gpl-2.0
| 403,732
|
[
"CRYSTAL"
] |
ce4761eac66bac89ccff1e43d56594df0ccb1e290daf89c2683e765d2e1ced4d
|
# +
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import warnings
from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel
from bigdl.chronos.autots.utils import recalculate_n_sampling
# -
class AutoProphet:
def __init__(self,
changepoint_prior_scale=None,
seasonality_prior_scale=None,
holidays_prior_scale=None,
seasonality_mode=None,
changepoint_range=None,
metric='mse',
metric_mode=None,
logs_dir="/tmp/auto_prophet_logs",
cpus_per_trial=1,
name="auto_prophet",
remote_dir=None,
load_dir=None,
**prophet_config
):
"""
Create an automated Prophet Model.
User need to specify either the exact value or the search space of the
Prophet model hyperparameters. For details of the Prophet model hyperparameters, refer to
https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning.
:param changepoint_prior_scale: Int or hp sampling function from an integer space
for hyperparameter changepoint_prior_scale for the Prophet model.
For hp sampling, see bigdl.chronos.orca.automl.hp for more details.
e.g. hp.loguniform(0.001, 0.5).
:param seasonality_prior_scale: hyperparameter seasonality_prior_scale for the
Prophet model.
e.g. hp.loguniform(0.01, 10).
:param holidays_prior_scale: hyperparameter holidays_prior_scale for the
Prophet model.
e.g. hp.loguniform(0.01, 10).
:param seasonality_mode: hyperparameter seasonality_mode for the
Prophet model.
e.g. hp.choice(['additive', 'multiplicative']).
:param changepoint_range: hyperparameter changepoint_range for the
Prophet model.
e.g. hp.uniform(0.8, 0.95).
:param metric: String or customized evaluation metric function.
If string, metric is the evaluation metric name to optimize, e.g. "mse".
If callable function, it signature should be func(y_true, y_pred), where y_true and
y_pred are numpy ndarray. The function should return a float value as evaluation result.
:param metric_mode: One of ["min", "max"]. "max" means greater metric value is better.
You have to specify metric_mode if you use a customized metric function.
You don't have to specify metric_mode if you use the built-in metric in
bigdl.orca.automl.metrics.Evaluator.
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_prophet_logs"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
:param name: name of the AutoProphet. It defaults to "auto_prophet"
:param remote_dir: String. Remote directory to sync training results and checkpoints. It
defaults to None and doesn't take effects while running in local. While running in
cluster, it defaults to "hdfs:///tmp/{name}".
:param load_dir: Load the ckpt from load_dir. The value defaults to None.
:param prophet_config: Other Prophet hyperparameters.
"""
if load_dir:
self.best_model = ProphetModel()
self.best_model.restore(load_dir)
try:
from bigdl.orca.automl.auto_estimator import AutoEstimator
import bigdl.orca.automl.hp as hp
self.search_space = {
"changepoint_prior_scale": hp.grid_search([0.005, 0.05, 0.1, 0.5])
if changepoint_prior_scale is None
else changepoint_prior_scale,
"seasonality_prior_scale": hp.grid_search([0.01, 0.1, 1.0, 10.0])
if seasonality_prior_scale is None
else seasonality_prior_scale,
"holidays_prior_scale": hp.loguniform(0.01, 10)
if holidays_prior_scale is None
else holidays_prior_scale,
"seasonality_mode": hp.choice(['additive', 'multiplicative'])
if seasonality_mode is None
else seasonality_mode,
"changepoint_range": hp.uniform(0.8, 0.95)
if changepoint_range is None
else changepoint_range
}
self.search_space.update(prophet_config) # update other configs
self.metric = metric
self.metric_mode = metric_mode
model_builder = ProphetBuilder()
self.auto_est = AutoEstimator(model_builder=model_builder,
logs_dir=logs_dir,
resources_per_trial={"cpu": cpus_per_trial},
remote_dir=remote_dir,
name=name)
except ImportError:
warnings.warn("You need to install `bigdl-orca[automl]` to use `fit` function.")
def fit(self,
data,
cross_validation=True,
expect_horizon=None,
freq=None,
metric_threshold=None,
n_sampling=16,
search_alg=None,
search_alg_params=None,
scheduler=None,
scheduler_params=None,
):
"""
Automatically fit the model and search for the best hyperparameters.
:param data: training data, a pandas dataframe with Td rows,
and 2 columns, with column 'ds' indicating date and column 'y' indicating value
and Td is the time dimension
:param cross_validation: bool, if the eval result comes from cross_validation.
The value is set to True by default. Setting this option to False to
speed up the process.
:param expect_horizon: int, validation data will be automatically splited from training
data, and expect_horizon is the horizon you may need to use once the mode is fitted.
The value defaults to None, where 10% of training data will be taken
as the validation data.
:param freq: the freqency of the training dataframe. the frequency can be anything from the
pandas list of frequency strings here:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliasesDefaulted
to None, where an unreliable frequency will be infer implicitly.
:param metric_threshold: a trial will be terminated when metric threshold is met
:param n_sampling: Number of trials to evaluate in total. Defaults to 16.
If hp.grid_search is in search_space, the grid will be run n_sampling of trials
and round up n_sampling according to hp.grid_search.
If this is -1, (virtually) infinite samples are generated
until a stopping condition is met.
:param search_alg: str, all supported searcher provided by ray tune
(i.e."variant_generator", "random", "ax", "dragonfly", "skopt",
"hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and
"sigopt")
:param search_alg_params: extra parameters for searcher algorithm besides search_space,
metric and searcher mode
:param scheduler: str, all supported scheduler provided by ray tune
:param scheduler_params: parameters for scheduler
"""
if expect_horizon is None:
expect_horizon = int(0.1*len(data))
if freq is None:
assert len(data) >= 2, "The training dataframe should contains more than 2 records."
assert pd.api.types.is_datetime64_any_dtype(data["ds"].dtypes), \
"The 'ds' col should be in datetime 64 type, or you need to set `freq` in fit."
self._freq = data["ds"].iloc[1] - data["ds"].iloc[0]
else:
self._freq = pd.Timedelta(freq)
expect_horizon_str = str(self._freq * expect_horizon)
self.search_space.update({"expect_horizon": expect_horizon_str,
"cross_validation": cross_validation})
train_data = data if cross_validation else data[:len(data)-expect_horizon]
validation_data = None if cross_validation else data[len(data)-expect_horizon:]
n_sampling = recalculate_n_sampling(self.search_space,
n_sampling) if n_sampling != -1 else -1
self.auto_est.fit(data=train_data,
validation_data=validation_data,
metric=self.metric,
metric_mode=self.metric_mode,
metric_threshold=metric_threshold,
n_sampling=n_sampling,
search_space=self.search_space,
search_alg=search_alg,
search_alg_params=search_alg_params,
scheduler=scheduler,
scheduler_params=scheduler_params
)
# use the best config to fit a new prophet model on whole data
self.best_model = ProphetBuilder().build(self.auto_est.get_best_config())
self.best_model.model.fit(data)
def predict(self, horizon=1, freq="D", ds_data=None):
"""
Predict using the best model after HPO.
:param horizon: the number of steps forward to predict
:param freq: the freqency of the predicted dataframe, defaulted to day("D"),
the frequency can be anything from the pandas list of frequency strings here:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases
:param ds_data: a dataframe that has 1 column 'ds' indicating date.
"""
if self.best_model.model is None:
raise RuntimeError(
"You must call fit or restore first before calling predict!")
return self.best_model.predict(horizon=horizon, freq=freq, ds_data=ds_data)
def evaluate(self, data, metrics=['mse']):
"""
Evaluate using the best model after HPO.
:param data: evaluation data, a pandas dataframe with Td rows,
and 2 columns, with column 'ds' indicating date and column 'y' indicating value
and Td is the time dimension
:param metrics: list of string or callable. e.g. ['mse'] or [customized_metrics]
If callable function, it signature should be func(y_true, y_pred), where y_true and
y_pred are numpy ndarray. The function should return a float value as evaluation
result.
"""
if data is None:
raise ValueError("Input invalid data of None")
if self.best_model.model is None:
raise RuntimeError(
"You must call fit or restore first before calling evaluate!")
return self.best_model.evaluate(target=data,
metrics=metrics)
def save(self, checkpoint_file):
"""
Save the best model after HPO.
:param checkpoint_file: The location you want to save the best model, should be a json file
"""
if self.best_model.model is None:
raise RuntimeError(
"You must call fit or restore first before calling save!")
self.best_model.save(checkpoint_file)
def restore(self, checkpoint_file):
"""
Restore the best model after HPO.
:param checkpoint_file: The checkpoint file location you want to load the best model.
"""
self.best_model.restore(checkpoint_file)
def get_best_model(self):
"""
Get the best Prophet model.
"""
return self.best_model.model
|
intel-analytics/BigDL
|
python/chronos/src/bigdl/chronos/autots/model/auto_prophet.py
|
Python
|
apache-2.0
| 12,589
|
[
"ORCA"
] |
e5dacb2f2fd2c6e6470ee90a5b4018a33c306102755f090a5a4dcce4f0901b00
|
from __future__ import print_function
import os
import Bio
import re
import sys
from Bio import SeqIO
from Bio.Blast import NCBIXML
from Bio import Restriction
from Bio.Restriction import *
from Bio.Alphabet.IUPAC import IUPACAmbiguousDNA
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio import SeqFeature
from Bio.SeqFeature import *
import random
import itertools
import multiprocessing
import time
from collections import Counter
import operator
#import MySQLdb
from numpy import *
from pylab import *
def al_plot_complexity(list):
'''
Takes a list of sequences, plots count of sequences vs percent of unique sequences.
'''
c = Counter(list)
freqs = []
for item in c:
freqs.append((c[item], c))
sorted_c = sorted(c.items(), key=operator.itemgetter(1))
x = xrange(0, len(sorted_c))
counts = []
for item, count in sorted_c:
counts.append(count)
global rangelist
rangelist = [a/100.0 for a in xrange(0, 100, 2)]
y = []
for p in rangelist:
a, l = int(len(counts)*p), len(counts)
y.append(float(sum(counts[l-a:l]))/sum(counts)*100)
zip(y, rangelist)
#get_ipython().magic(u'pylab inline')
figure()
plot(rangelist, y, "o", color="gray")
xlabel("Cumulative fraction of unique spacers")
ylabel("Percent of total reads (molecules) in library")
def al_string2feat(queryseq, ampsdict): #lib5pr is subjectseq; t7 is queryseq
'''
This function accepts a query seq and a dictionary of subjectseqs, where the key (amp)
is contained in a field in queryseq, highlighting the location of queryseq in it.
Returns a string.
'''
subjectseq = SeqRecord(ampsdict[queryseq[1][0]])
#for seqrecord in subjectseq:
locstart = queryseq[1][1]
#print queryseq
locend = queryseq[1][2]
fwdlocs = []
revlocs = []
# Figure out which strand the BLAST hit is on
if locstart <= locend:
fwdlocs.append(locstart)
if locstart > locend:
revlocs.append(locend)
for item in fwdlocs:
start = ExactPosition(int(item))
end = ExactPosition(int((item) + len(queryseq[0].seq) + 1))
location = FeatureLocation(start, end)
feature = SeqFeature(location,type=str("cutsite_fwd"), strand = +1)
subjectseq.features.append(feature)
for item in revlocs:
start = ExactPosition(int(item))
end = ExactPosition(start + len(queryseq[0].seq))
location = FeatureLocation(start, end)
feature = SeqFeature(location,type=str("cutsite_rev"), strand = -1)
subjectseq.features.append(feature)
#print subjectseq.features
return subjectseq
def al_print_features(inputseq, addpamcutters, directlabel):
'''
Takes 3 arguments:
inputseq: SeqRecord to draw
addpamcutters: int - 0 or 1. If 1, also draw HpaII/BfaI/ScrFI sites on map.
directlabel: int, 0 or 1. Recommend 1. Changes position of feature labels (1 = on markings, 0 = below them)
'''
if addpamcutters == 1:
cutline = list(" " * len(inputseq))
HpaIIsites = HpaII.search(inputseq.seq)
BfaIsites = BfaI.search(inputseq.seq)
ScrFIsites = ScrFI.search(inputseq.seq)
for cut in HpaIIsites:
cutline[cut-1:cut + len("HpaII")] = "<HpaII"
for cut in BfaIsites:
cutline[cut-1:cut + len("BfaI")] = "<BfaI"
for cut in ScrFIsites:
cutline[cut-1:cut + len("ScrFI")] = "<ScrFI"
cutline = "".join(cutline)
mask = [list((("-" * 9) + "^" )* int(round(len(inputseq.seq)/10.0)))]
newmaskline = list((("-" * 9) + "^" )* int(round(len(inputseq.seq)/10.0)))
for feature in inputseq.features:
# Make a new marker strand if any features overlap. All marker strands can be elements of a list.
featstart = int(feature.location.start)
featend = int(feature.location.end)
if featstart > featend:
print("Error! Feature end must be after feature start. Use strand to specify direction! Feature " + feature.type + " will not be displayed!")
#if "<" in mask[-1][featstart:featend] or ">" in mask[-1][featstart:featend]:
#mask.append(newmaskline)
clean = 0
for item in mask[-1][featstart:featend]:
if item == "-":
clean = 1
elif item == "^":
clean = 1
else:
clean = 0
mask.append(newmaskline)
break
#print mask[-1][0:50]
if feature.strand == 1:
mask[-1] = mask[-1][:featstart-1] + [">"] * int(featend - featstart + 1) + mask[-1][featend:]
if directlabel == 1:
mask[-1] = mask[-1][:featstart] + list(str(feature.type)) + mask[-1][featstart + len(str(feature.type)):]
if feature.strand == -1:
mask[-1] = mask[-1][:featstart-1] + ["<"] * int(featend+1 - featstart) + mask[-1][featend:]
if directlabel == 1:
mask[-1] = mask[-1][:featstart+1] + list(str(feature.type)) + mask[-1][featstart+2 + len(str(feature.type)):]
#if addpamcutters = 1:
#cutline = list(" " * len(inputseq)
#HpaIIsites = HpaII.search(inputseq.seq)
for index, maskline in enumerate(mask):
maskline = "".join(maskline)
mask[index] = maskline
# add labels
if directlabel == 0:
masklab = list(" " * (len(inputseq.seq)))
for feature in inputseq.features:
featstart = int(feature.location.start)
featend = int(feature.location.end)
featname = str(feature.type)
masklab = masklab[:featstart] + list(str(feature.type)) + list(" " * (featend-1 - featstart - len(feature.type))) + masklab[featend-1:]
masklab = "".join(masklab)
lines = int(round(len(inputseq.seq) / 100)) + 1
i = 0
outstring = list(inputseq.name + "\n")
#print inputseq.name
outstring = []
while i < lines:
indexstart = i*100
indexend = (i+1) * 100
if indexend > len(inputseq.seq):
indexend = len(inputseq.seq)
if addpamcutters ==1:
outstring.extend((str(indexstart + 1)) + " " + cutline[indexstart:indexend] + " " + str(indexend)+ "\n")
#print (str(indexstart + 1)) + " " + cutline[indexstart:indexend] + " " + str(indexend)
outstring.extend(str(indexstart+1) + " " + inputseq.seq[indexstart:indexend] + " " + str(indexend)+ "\n")
#print str(indexstart+1) + " " + inputseq.seq[indexstart:indexend] + " " + str(indexend)
for maskline in mask:
outstring.extend((str(indexstart + 1)) + " " + maskline[indexstart:indexend] + " " + str(indexend)+ "\n")
#print (str(indexstart + 1)) + " " + maskline[indexstart:indexend] + " " + str(indexend)
if directlabel == 0:
outstring.extend(str(indexstart +1) + " " + masklab[indexstart:indexend] + " " + str(indexend)+ "\n")
#print str(indexstart +1) + " " + masklab[indexstart:indexend] + " " + str(indexend)
outstring.extend("\n")
#print "\n"
i = i + 1
outstring = "".join(outstring)
return outstring
def al_digesttarget(list_of_targets, process_to_file=0):
'''
Substrate should be a list of SeqRecords (i.e. scaffolds, or whatever).
Process_to_file [optional] is an integer; if 1 will avoid building list in memory and will write to file instead.
Defaults to 0, building list in memory.
Output list has information in it:
ID is a count of the potential spacer along the scaffold, starting from zero.
Name is the base position on the scaffold.
Description is the enzyme that generated the spacer-end (ScrFI/HpaII/BfaI)
dbxrefs is the name the scaffold was given in the input SeqRecord
'''
f = open("genome_allspacers.fa","w")
global substrate
scaffoldcuts = []
for substrate in list_of_targets:
cutslist = []
pos = HpaII.search(substrate.seq)
# Positions in this list correspond to the right boundaries of fragments;
# last one is thus the sequence end
pos.append(len(substrate.seq))
pos = iter(pos)
cuts = HpaII.catalyze(substrate.seq)
for item in cuts:
cutslist.append([item, "HpaII", int(pos.next())])
cuts = BfaI.catalyze(substrate.seq)
pos = BfaI.search(substrate.seq)
pos.append(len(substrate.seq))
pos = iter(pos)
for item in cuts:
cutslist.append([item, "BfaI", int(pos.next())])
cuts = ScrFI.catalyze(substrate.seq)
pos = ScrFI.search(substrate.seq)
pos.append(len(substrate.seq))
pos = iter(pos)
for item in cuts:
cutslist.append([item, "ScrFI", int(pos.next())])
#The above is all to get the results of a catalyze operation (i.e. tuples) into
# a list format. Next part makes them into SeqRecords.
cutslistrecords = []
for i, item in enumerate(cutslist):
cutslistrecords.append(SeqRecord(item[0], id = str(i), description = str(item[1]), name=str(item[2]), dbxrefs=[str(substrate.id)]))
cutslist = cutslistrecords[:]
# This part takes the 3' 20nt of each fragment and makes a new sequence with it.
# For the 5' end, the Mung-Bean treatment is simulated by removing two more nt (for HpaII and BfaI), or one nt for ScrFI;
# these would be the 5' overhang. Then we take the reverse-complement of the sequence.
# The Restriction module just returns sequences as if the top strand only was being cut. In other words,
# no bases are deleted from consecutive fragments.
from Bio.Seq import MutableSeq
twentymers = []
#record2 = []
for record2 in cutslist:
try: # This is because a second run of this code on already mutable seqs seems to fail. Not sure how to flush out and revert back to non-mutables...
record2.seq = record2.seq.tomutable()
except:
pass
if record2.description == "ScrFI":
#offset here (e.g. 1:21 is for simulating MBN digeston)
# because the entry.names are rooted on the right of each fragment, the length
# of the entry.name has to be subtracted to get the desired left position for the "reverse"
# tgts
entry = record2[1:21].reverse_complement (description=True, id=True, name=True)
entry.name = str(int(record2.name)+1 - len(record2.seq))
entry.id = str("%s_R" % record2.id) ##
twentymers.append(entry)
else: # Should work for HpaII/BfaI
entry = record2[2:22].reverse_complement (description=True, id=True, name=True)
entry.name = str(int(record2.name)+2 - len(record2.seq))
entry.id = str("%s_R" % record2.id) ##
twentymers.append(entry)
record2.seq = record2.seq.toseq()
record2.id = str("%s_F" % record2.id)
entry = record2[-20:]
entry.name = str(int(record2.name)-20)
twentymers.append(entry)
for item in twentymers:
item.dbxrefs = [substrate.id]
# The ends of the fragments aren't bonafide CRISPR targets; these can be removed:
noends = []
twentymerstr = [item for item in twentymers if item.description == "HpaII"]
trimmed = twentymerstr[1:-1] # removes first and last 20mer
noends.append(trimmed)
twentymerstr = [item for item in twentymers if item.description == "BfaI"]
trimmed = twentymerstr[1:-1]
noends.append(trimmed)
twentymerstr = [item for item in twentymers if item.description == "ScrFI"]
trimmed = twentymerstr[1:-1]
noends.append(trimmed)
# index = str(substrate.id + str(random.random()))
if process_to_file != 1:
scaffoldcuts.append((item for sublist in noends for item in sublist))
if process_to_file == 1:
f = open("genome_allspacers.fa","a") #opens file with name of "test.txt"
for item in [item for sublist in noends for item in sublist]:
f.write(">lcl|" + str(substrate.id) + "|" + str(item.description) + "|" + str(item.name) + "|" + str(item.id) + "\n")
f.write(str(item.seq) + "\n")
f.close()
if process_to_file != 1:
return itertools.chain.from_iterable(scaffoldcuts) #dammit, the from_iterable part is important here. that took a while.
def al_scoreguides(guides, genome, genomedict, hits=50, return_blast =1):
'''
To produce a score for a given guide in a given BLAST database. Returns an int, the score.
Only evaluates a max of 500 hit sequences (i.e. really bad guides will not necessarily have a super accurate score.)
This version only searches the plus strand of the BLAST db, meant to be used with PAM BLAST DBs generated such that
all potential guide hits are on the plus strand.
Arguments: guide = a SeqRecord object containing a proposed guide sequence (or a list of SeqRecords)
genome = a BLAST database set up on this machine.
genomedict = a dict made from a FASTA file identical to the one used to make the BLAST DB. Dict keys should be
BLAST db hit_def.
Returns: Tuple containing (finalscore, a list of the individual hits, their locations and their subscores)
'''
if isinstance(guides, list) == False:
guides = [guides]
name = multiprocessing.current_process().name
M = [0, 0, 0.014, 0, 0, 0.395, 0.317, 0 ,0.389, 0.079, 0.445, 0.508, 0.613, 0.851, 0.732, 0.828, 0.615, 0.804, 0.685, 0.583]
#import random
#random.jumpahead(1)
#filesuffix = str(int(random.random()*100000000))
filesuffix = str(guides[0].id)
filename = str("currseq" + filesuffix + ".tmp")
Bio.SeqIO.write(guides, filename, "fasta")
# Added dust="no" to stop inflating scores from polyN repeats...
blastn_cline = NcbiblastnCommandline(query=filename, db=genome, task = "blastn-short",outfmt=5, out=filename + ".blast", max_target_seqs=100, num_threads = 7, evalue = 10, dust="no")
#timeit.timeit(blastn_cline, number =1)
blastn_cline()
result_handle = open(filename + ".blast")
blasts = NCBIXML.parse(result_handle)
# This generates an generator of BLAST objects, each corresponding to a guide query. Loop through those.
for guideindex, item in enumerate(blasts):
firstmatch = 0
notfound = 1
scorelist = []
pamlist = []
# Within each queried guide, loop through the aligned scaffolds
for scaffold in item.alignments:
# Within each aligned scaffold, loop through invidivual HSPs on that scaffold.
for pair in scaffold.hsps:
hit_threeprime_offset = len(guides[0]) - pair.query_end
start = pair.sbjct_start
end = pair.sbjct_end
if end > start:
pamstart = end + hit_threeprime_offset
pam = genomedict[scaffold.hit_def][pamstart:pamstart+3]
elif start > end:
pamstart = end - hit_threeprime_offset
pam = genomedict[scaffold.hit_def][pamstart-4:pamstart-1].reverse_complement()
# Construct a bar-based match string, padded to query length,
# where bar = match position and space is non-match
mmloc = []
mmstr = list(pair.match)
if pair.query_start > 1:
mmstr = list(" " * (pair.query_start - 1)) + mmstr
if pair.query_end < 20:
mmstr = mmstr + list(" " * (20 - pair.query_end))
mmstr = "".join(mmstr)
# Test for PAM adjacency
if len(pam) == 3:
if (pam[1] == "G" or pam[1] == "A" or pam[1] == "g" or pam[1] == "a") and (pam[2] == "G" or pam[2] == "g"):
if (pair.positives >16 and pair.positives < 20):
pos = 20
for linestring in mmstr:
if linestring != "|":
mmloc.append(pos)
pos = pos - 1
# Actually implement Zhang lab algorithm
mmscore = [21 -x for x in mmloc]
t1 = 1
for mismatchlocation in mmscore:
t1 = t1 * (1.0 - float(M[mismatchlocation - 1]))
if len(mmscore) > 1:
d = (float(max(mmscore)) - float(min(mmscore))) / float((len(mmscore) - 1))
else:
d = 19
t2 = 1 / ( (((19.0 - d)/19.0) * 4) + 1)
t3 = float(1)/ float(pow(len(mmloc), 2))
scorelist.append({"match_score": float(t1 * t2 * t3 * 100), "scaffold": scaffold.hit_def, "hit_location":pair.sbjct_start, "hit_sequence": pair.sbjct, "pam":str(pam.seq), "match_bars":mmstr})
#pamlist.append(pam)
# Zhang lab algorithm doesn't handle perfect matches (I think?): give it a 50 if it's perfect
if pair.positives >= 20: #changed from == 20; miight be worth keeping in mind for bugs
if firstmatch != 0:
scorelist.append({"match_score": float(50), "scaffold": scaffold.hit_def, "hit_location":pair.sbjct_start, "hit_sequence": pair.sbjct, "pam":str(pam.seq), "match_bars":mmstr})
#pamlist.append(pam)
firstmatch = 1
notfound = 0
else:
scorelist.append({"match_score": float(0), "scaffold": scaffold.hit_def, "hit_location":pair.sbjct_start, "hit_sequence": pair.sbjct, "pam":str(pam.seq), "match_bars":mmstr})
#pamlist.append(pam)
# Only include the scorelist items that have a score greater than zero
if return_blast == 1:
guides[guideindex].annotations["blastdata"] = [s for s in scorelist if s["match_score"] > 0]
finalscore = int(10000.000 / (100.000 + float(sum(item["match_score"] for item in scorelist))))
guides[guideindex].annotations["score"] = finalscore
guides[guideindex].annotations["blast_filesize"] = os.path.getsize(filename + ".blast")
guides[guideindex].annotations["order_in_xml"] = guideindex
result_handle.close()
os.remove(filename) # want to keep BLAST results? comment out these two lines.
#os.remove(filename + ".blast")
#return hits
return guides
def db_connect(dbname):
'''
dbname: the filename of the sqlite db to store guide scores in. Can be an existing database, in which case
new entries are appended. If the filename isn't found, a new db with this name is created.
'''
# Generate a DictCursor (i.e.
import sqlite3
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class DictConnnection(sqlite3.Connection):
def __init__(self, *args, **kwargs):
sqlite3.Connection.__init__(self, *args, **kwargs)
def cursor(self):
return DictCursor(self)
class DictCursor(sqlite3.Cursor):
def __init__(self, *args, **kwargs):
sqlite3.Cursor.__init__(self, *args, **kwargs)
self.row_factory = lambda cur, row: dict_factory(self, row)
if os.path.isfile(str(dbname + '.db')) == 1:
print("Database " + dbname +".db exists. Appending to this database.")
just_made = 0
else:
print("Creating " + dbname +".db.")
just_made = 1
con = sqlite3.connect(str(dbname + '.db'), factory=DictConnnection)
con.row_factory = dict_factory
cur = con.cursor()
if just_made == 1:
try:
cur.execute('''CREATE TABLE scores(sequence text, genome text, score real, version real, id integer PRIMARY KEY)''')
cur.execute('''CREATE UNIQUE INDEX seq_genome ON scores(sequence, genome, version) ''')
cur.execute('''CREATE TABLE locations(id integer, sequence text, genome text, loc_start integer, scaffold text, enzyme text, rel_name text, PRIMARY KEY(loc_start, enzyme, genome))''')
print("Creating tables.")
except:
print("Tables already set up.")
else:
None
return cur, con
def db_add_guide(cur, con, guide, genome, version):
data = []
# See if sequence/genome/version combo is already in the database
try:
cur.execute("SELECT * FROM scores WHERE sequence = '{}' AND genome = '{}' AND version = '{}'".format(guide.seq, genome, version))
data = cur.fetchall()
# if it's not, add it
except:
None
if len(data) == 0:
#print("Adding score")
try:
cur.execute("INSERT INTO scores(sequence, genome, score, version) \
VALUES('{}', '{}', '{}', '{}')".format(guide.seq, genome, guide.annotations['score'], version))
con.commit()
cur.execute("SELECT * FROM scores WHERE sequence = '{}' AND genome = '{}' AND version = '{}'".format(guide.seq, genome, version))
data = cur.fetchall()
#print("Added score")
except:
print("Error: Couldn't find or deposit score" + str(len(data)))
# Using its ID, add into locations a reference for this score
try:
#print(data)
cur.execute("INSERT INTO locations(id, sequence, genome, loc_start, scaffold, enzyme, rel_name) \
VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}')"\
.format(data[0]["id"], guide.seq, genome, guide.name, guide.dbxrefs[0], guide.description, guide.id))
con.commit()
except:
None
#print("Couldn't add location.")
return None
def guide2DB(guide, genome, log_blast = 0, relativeserver="localhost", version=2):
'''
A function to enter a SeqRecord (with score) into a mySQL database.
Need to pass it a DictCursor con.cursor(MySQLdb.cursors.DictCursor) as cur
Contains an optional "version" tag for if you are trying multiple scoring strategies and
would like to distinguish between them in the database.
'''
# Try to add a new entry into the table for unique scores. Will fail if there's already an entry for same
# sequence in same genome with same enzyme as the cut site
con = MySQLdb.connect(relativeserver, 'root', 'pass', 'guidescores');
cur = con.cursor(MySQLdb.cursors.DictCursor)
try:
cur.execute("INSERT INTO scores(sequence, genome, score, version, filesize, order_in_xml) VALUES('{}', '{}', '{}', '{}', '{}', '{}')".format(guide.seq, genome, guide.annotations['score'], version, guide.annotations['blast_filesize'], guide.annotations['order_in_xml']))
con.commit()
# except MySQLdb.Error as e:
# print("{}".format(e.args))
except:
None
# Pull the specified score record out to grab its ID
cur.execute("SELECT * FROM scores WHERE sequence = '{}' AND genome = '{}' AND version = '{}'".format(guide.seq, genome, version))
con.commit()
data = cur.fetchall()
# Using its ID, add into locations a reference for this score
try:
cur.execute("INSERT INTO locations(id, sequence, genome, loc_start, scaffold, enzyme, rel_name) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}')".format(data[0]["id"], guide.seq, genome, guide.name, guide.dbxrefs[0], guide.description, guide.id))
con.commit()
except MySQLdb.Error, e:
None
#print "%s" %e
#print("location not inserted {} \n".format(guide.id))
# Insert the annotation blast data if present
if log_blast ==1:
try:
qstring = "INSERT INTO blastdata (id, hit_sequence, match_bars, scaffold, hit_location, match_score, pam, genome) VALUES"
qstring=list(qstring)
try:
for item in guide.annotations["blastdata"]:
qstring.extend(str("(" + str(int(data[0]["id"])) + ",\"" + item["hit_sequence"] + "\",\"" + item["match_bars"] + "\",\"" + item["scaffold"] + "\"," + str(item["hit_location"]) + ",\"" + str(item["match_score"]) + "\",\"" + item["pam"] + "\",\"" + genome + "\"),"))
qstring = "".join(qstring)[:-1]
except e:
print("%s") %e
#print qstring
if len(guide.annotations["blastdata"]) > 0:
try:
cur.execute(qstring)
#print "success!"
except MySQLdb.Error, e:
print("%s") %e
else:
#print("No BLAST data to enter")
None
con.commit()
#print guide.annotations["blastdata"]
except:
#print("No BLAST data found")
None
return data
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iter):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + (pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def dbpresencecheck(guide, genome, con, cur, version="0"):
try:
cur.execute("SELECT * FROM scores WHERE sequence = '{}' AND genome = '{}' AND version = '{}'".format(guide.seq, genome, version))
if len(cur.fetchall()) > 0:
return 1
else:
return 0
except:
return 0
def dbpresencecheck_location(guide, genome, con, cur):
try:
cur.execute("SELECT * FROM locations WHERE rel_name='{}' AND genome = '{}' AND scaffold='{}'" .format(guide.dbxrefs[0], genome, guide.id))
if len(cur.fetchall()) > 0:
return 1
else:
return 0
except:
return 0
class Amplicon:
'''
A complete Amplicon for use in EATING; the class contains methods for designing
amplifying primers and describing the guides within it.
properties:
.start, .end: count of runs along chromosome
.fiveprimeabut, .threeprimeabut: count of guides along chromosome that abut good guides
.guides: list of good guides in run
.length: permissible region distance
.guidecount: number of guides in list (=len(self.guides))
.genomename: genomename
.chromosome
.left_outside: the id of the guide to the left of the outside
.left_inside: ...
.permissible_start: absolute numbering vs chromosome
.required_start_absolute: absolute numbering vs chromosome
.required_start_relative
.permissible_region
'''
def __init__(self, run):
self.start = run[0]
self.end = run[1]
try:
self.fiveprimeabut = scores_and_details[self.start-1]
self.threeprimeabut= scores_and_details[self.end]
except:
self.fiveprimeabut = "0"
self.threeprimeabut = "None"
self.guides = scores_and_details[self.start:self.end]
self.length = int(self.threeprimeabut.name)-int(self.fiveprimeabut.name)
self.guidecount = self.end - self.start
self.genomename = genomename
self.chromosome = chromosome
# Define the sequences necessary
def pullsequences(self):
self.left_outside = self.fiveprimeabut.id[-1]
self.left_inside = self.guides[0].id[-1]
if self.left_outside == "F" and self.left_inside == "R":
self.permissible_start = int(self.fiveprimeabut.name) + 10
self.required_start_absolute = int(self.guides[0].name) +14
elif self.left_outside == "R" and self.left_inside == "R":
self.permissible_start = int(self.fiveprimeabut.name) + 1
self.required_start_absolute = int(self.guides[0].name) +14
elif self.left_outside == "R" and self.left_inside == "F":
self.permissible_start = int(self.fiveprimeabut.name) + 1
self.required_start_absolute = int(self.guides[0].name) +18
elif self.left_outside == "F" and self.left_inside == "F":
self.permissible_start = int(self.fiveprimeabut.name) + 10
self.required_start_absolute = int(self.guides[0].name) +18
else:
print("error on left")
# (fiveprimeabuttingguide, threeprimeabuttingguide), end-start, scores_and_details[start:end]))
#self.right_inside = item[2][-1][1].id[-1]
self.right_inside = self.guides[-1].id[-1]
self.right_outside = self.threeprimeabut.id[-1]
if self.right_outside == "F" and self.right_inside == "R":
self.permissible_end = int(self.threeprimeabut.name) + 19
self.required_end_absolute = int(self.guides[-1].name) + 2
elif self.right_outside == "R" and self.right_inside == "F":
self.permissible_end = int(self.threeprimeabut.name) + 10
self.required_end_absolute = int(self.guides[-1].name) + 8
elif self.right_outside == "R" and self.right_inside == "R":
self.permissible_end = int(self.threeprimeabut.name) + 10
self.required_end_absolute = int(self.guides[-1].name) + 2
elif self.right_outside == "F" and self.right_inside == "F":
self.permissible_end = int(self.threeprimeabut.name) + 19
self.required_end_absolute = int(self.guides[-1].name) + 8
else:
print("error on right")
self.permissible_region = annotchrom[self.permissible_start:self.permissible_end]
# Bounds that need to be included in PCR product :
self.required_start_relative = self.required_start_absolute-self.permissible_start
self.required_end_relative = self.required_end_absolute - self.permissible_start
#self.amp.dbxrefs=((self.required_start_relative, self.required_end_relative))
# Set up some other stuff:
self.permissible_region.name =str(self.fiveprimeabut.name)
self.permissible_region.id =str(self.fiveprimeabut.name)
self.permissible_region.description=str(self.guidecount)
self.permissible_region.seq.alphabet = IUPACAmbiguousDNA()
'''
The following three functions are used to design primers
'''
def al_collect_good_primers(template, primerdict):
i = 0
badlist = []
try:
while i < primerdict["PRIMER_PAIR_NUM_RETURNED"]:
bad = 0
leftprimer = primerdict[str("PRIMER_LEFT_" + str(i) + "_SEQUENCE")]
leftprimer_start = str(primerdict[str("PRIMER_LEFT_"+ str(i))].split(",")[0])
leftprimer_length = str(primerdict[str("PRIMER_LEFT_"+ str(i))].split(",")[1])
leftprimer_gc = str(primerdict[str("PRIMER_LEFT_"+ str(i) + "_GC_PERCENT")])
leftprimer_tm = str(primerdict[str("PRIMER_LEFT_"+ str(i) + "_TM")])
rightprimer = primerdict[str("PRIMER_RIGHT_" + str(i) + "_SEQUENCE")]
rightprimer_start = str(primerdict[str("PRIMER_RIGHT_"+ str(i))].split(",")[0])
rightprimer_length = str(primerdict[str("PRIMER_RIGHT_"+ str(i))].split(",")[1])
rightprimer_gc = str(primerdict[str("PRIMER_RIGHT_"+ str(i) + "_GC_PERCENT")])
rightprimer_tm = str(primerdict[str("PRIMER_RIGHT_"+ str(i) + "_TM")])
product_len = int(rightprimer_start) + int(rightprimer_length) - int(leftprimer_start)
left_bad = al_screen_primer(leftprimer)
right_bad = al_screen_primer(rightprimer)
#print bad
if left_bad == 0 and right_bad == 0:
with open("primerlist.txt", "a") as primerlist:
primerlist.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % \
(template.name,leftprimer,leftprimer_start,leftprimer_length,leftprimer_tm,leftprimer_gc,\
rightprimer,rightprimer_start,rightprimer_length,rightprimer_tm,rightprimer_gc,\
len(template.seq),str(product_len),template.description))
primerlist.close()
print("Success!")
break
if left_bad ==1:
print("iteration" + str(i) + "left primer" + leftprimer + "is bad")
if right_bad == 1:
print("iteration" + str(i) + "right primer" + rightprimer + "is bad")
i = i +1
if left_bad ==1 and right_bad ==1 and i ==primerdict["PRIMER_PAIR_NUM_RETURNED"]:
with open("primerlist.txt", "a") as primerlist:
primerlist.write("All the primers were bad for this amplicon!\n")
primerlist.close()
except:
with open("primerlist.txt", "a") as primerlist:
primerlist.write("Primer3 failed to find any primers for this amplicon! " + primerdict["SEQUENCE_PRIMER_PAIR_OK_REGION_LIST"] + "\n")
primerlist.close()
print("Primer3 failed to find any primers for this amplicon! " + primerdict["SEQUENCE_PRIMER_PAIR_OK_REGION_LIST"] + "\n")
print(sys.exc_info())
def al_screen_primer(primer):
'''
Input is a primer as a string.
'''
currfile = open("currprimer.fa", "w")
currfile.write(">" + str(primer) + "\n")
currfile.write(str(primer))
currfile.close()
blastn_cline = NcbiblastnCommandline(query="currprimer.fa", db="xl71", \
task = "blastn-short",outfmt=5, out="primerblast.tmp", max_target_seqs=100, num_threads = 8)
blastn_cline
result = blastn_cline()
badprimer = 0
# Parse data
result_handle = open("primerblast.tmp")
blast_record = NCBIXML.read(result_handle) # if there were multiple queries, use NCBIXML.parse(result_handle)
# How many matches are there with more than 14 or matching bases?
match14 = 0
for x in blast_record.alignments:
for y in x.hsps:
if y.positives > 14:
match14 = match14 + 1
match15 = 0
for x in blast_record.alignments:
for y in x.hsps:
if y.positives > 15:
match15 = match15 + 1
#print(primer.description)
#print(match14)
#print(match15)
# Set a cutoff of
if match14 > 40:
badprimer = 1
elif match15 > 10:
badprimer = 1
return badprimer
def al_primersearch(current_amp):
'''
Returns a dict of primer parameters.
'''
length_of_required_region = current_amp.dbxrefs[1]-current_amp.dbxrefs[0]
start_of_required_region = current_amp.dbxrefs[0]
end_of_required_region = current_amp.dbxrefs[1]
primeableregionleft_start = str(0)
primeableregionleft_length = str(start_of_required_region)
primeableregionright_start = str(end_of_required_region)
primeableregionright_length = str(len(current_amp)-end_of_required_region)
boulder = open("current_amp.boulder", "w")
boulder.write("SEQUENCE_ID=" + current_amp.id + "\n")
boulder.write("SEQUENCE_TEMPLATE=" + str(current_amp.seq) + "\n")
#boulder.write("SEQUENCE_INCLUDED_REGION=" + "0," + str(len(current_amp.seq)) + "\n")
#boulder.write("SEQUENCE_TARGET=" + str(current_amp.dbxrefs[0]) + "," + str(current_amp.dbxrefs[1] - current_amp.dbxrefs[0]) + "\n")
boulder.write("SEQUENCE_PRIMER_PAIR_OK_REGION_LIST=" + primeableregionleft_start + "," + primeableregionleft_length+","\
+primeableregionright_start+"," + primeableregionright_length + "\n")
boulder.write("PRIMER_PRODUCT_SIZE_RANGE=" +str(length_of_required_region) + "-" + str(len(current_amp)) + "\n")
boulder.write("PRIMER_PRODUCT_OPT_SIZE=" + str(length_of_required_region) + "\n")
#boulder.write("P3_FILE_FLAG=1\n")
boulder.write("=\n")
boulder.close()
primer_output = subprocess.check_output(["primer3_core", "current_amp.boulder",\
"-p3_settings_file=primer3_global_parameters.txt"])
primerdict = {}
for item in primer_output.split("\n")[0:-3]:
val = item.split("=")[1]
try:
val = float(val)
except: pass
primerdict[item.split("=")[0]]=val
return primerdict
|
eatingcrispr/VirtualEating
|
archive/Simulating and generating 3MB Xenopus library/eating.py
|
Python
|
apache-2.0
| 37,977
|
[
"BLAST"
] |
ebe6a961ff3a7d27b400d4e0e2b10fd2ccbbbcafc3349de8b8ab09f7beecfea3
|
import uuid
from Firefly import aliases, logging
from Firefly.automation.triggers import Triggers
from Firefly.const import COMMAND_NOTIFY, SERVICE_NOTIFICATION, TYPE_AUTOMATION, API_ALEXA_VIEW, API_FIREBASE_VIEW, API_INFO_REQUEST
from Firefly.helpers.action import Action
from Firefly.helpers.conditions import Conditions
from Firefly.helpers.events import Command, Event, Request
from Firefly.helpers.automation.automation_interface import AutomationInterface
# TODO(zpriddy): These should be in const file
LABEL_ACTIONS = 'actions'
LABEL_CONDITIONS = 'conditions'
LABEL_DELAYS = 'delays'
LABEL_DEVICES = 'devices'
LABEL_MESSAGES = 'messages'
LABEL_TRIGGERS = 'triggers'
LABEL_TRIGGER_ACTION = 'trigger_actions'
INTERFACE_LABELS = [LABEL_ACTIONS, LABEL_CONDITIONS, LABEL_DELAYS, LABEL_DEVICES, LABEL_MESSAGES, LABEL_TRIGGERS, LABEL_TRIGGER_ACTION]
from typing import Callable, Any
class Automation(object):
def __init__(self, firefly, package: str, event_handler: Callable, metadata: dict = {}, interface: dict = {}, **kwargs):
self.actions = {}
self.command_map = {}
self.conditions = {}
self.delays = {}
self.devices = {}
self.event_handler = event_handler
self.firefly = firefly
self.interface = interface
self.messages = {}
self.metadata = metadata
self.package = package
self.triggers = {}
self.trigger_actions = {}
# TODO(zpriddy): Should should be a shared function in a lib somewhere.
# Alias and id functions
ff_id = kwargs.get('ff_id')
alias = kwargs.get('alias')
# If alias given but no ID look at config files for ID.
if not ff_id and alias:
if aliases.get_device_id(alias):
ff_id = aliases.get_device_id(alias)
elif ff_id and not alias:
if aliases.get_alias(ff_id):
alias = aliases.get_alias(ff_id)
# If no ff_id ID given -> generate random ID.
if not ff_id:
ff_id = str(uuid.uuid4())
self.id = ff_id
self.alias = alias if alias else ff_id
self.new_interface = AutomationInterface(firefly, self.id, self.interface)
self.new_interface.build_interface()
#self.build_interfaces()
def event(self, event: Event, **kwargs):
logging.info('[AUTOMATION] %s - Receiving event: %s' % (self.id, event))
# Check each triggerList in triggers.
for trigger_index, trigger in self.new_interface.triggers.items():
if trigger.check_triggers(event):
# Check if there are conditions with the same index, if so check them.
if self.new_interface.conditions.get(trigger_index):
if not self.new_interface.conditions.get(trigger_index).check_conditions(self.firefly):
logging.info('[AUTOMATION] failed condition checks.')
continue
# Call the event handler passing in the trigger_index and return.
logging.info('[AUTOMATION] no conditions. executing event handler.')
return self.event_handler(event, trigger_index, **kwargs)
def request(self, request: Request) -> Any:
"""Function to request data from the ff_id.
The returned data can be in any format. Common formats should be:
str, int, dict
Args:
request (Request): Request object
Returns:
Requested Data
"""
logging.debug('[AUTOMATION] %s: Got Request %s' % (self.id, request))
if request.request == API_INFO_REQUEST:
return self.get_api_info()
if request.request == API_FIREBASE_VIEW:
return self.get_firebase_views()
if request.request == API_ALEXA_VIEW:
return self.get_alexa_view()
return None
def get_api_info(self, **kwargs):
return {}
def get_firebase_views(self, **kwargs):
return {}
def get_alexa_view(self, **kwargs):
logging.info('[AUTOMATION] no alexa view')
return {}
def export(self, **kwargs):
"""
Export ff_id config with options current values to a dictionary.
Args:
Returns:
(dict): A dict of the ff_id config.
"""
export_data = {
'alias': self.alias, # 'commands': self.command_map.keys(),
'ff_id': self.id,
'interface': self.new_interface.export(),
'metadata': self.metadata,
'package': self.package,
'type': self.type
}
return export_data
def command(self, command, **kwargs):
"""
Function that is called to send a command to a ff_id.
Commands can be used to reset times or other items if the automation needs it.
Args:
command (Command): The command to be sent in a Command object
Returns:
(bool): Command successful.
"""
logging.debug('%s: Got Command: %s' % (self.id, command.command))
if command.command in self.command_map.keys():
try:
self.command_map[command.command](**command.args)
return True
except:
return False
return False
def build_interfaces(self, **kwargs):
"""
builds the interfaces (actions, conditions, delays, triggers) using the metadata and config information.
Args:
**kwargs:
Returns:
"""
meta_interfaces = self.metadata.get('interface')
if not meta_interfaces:
return
for label in INTERFACE_LABELS:
interface_data = meta_interfaces.get(label)
if not interface_data:
continue
if label == LABEL_ACTIONS:
self.build_actions_interface(interface_data)
if label == LABEL_TRIGGERS:
self.build_triggers_interface(interface_data)
if label == LABEL_CONDITIONS:
self.build_conditions_interface(interface_data)
if label == LABEL_DELAYS:
self.build_delays_interface(interface_data)
if label == LABEL_DEVICES:
self.build_devices_interface(interface_data)
if label == LABEL_MESSAGES:
self.build_messages_interface(interface_data)
if label == LABEL_TRIGGER_ACTION:
self.build_trigger_actions_interface(interface_data)
def build_actions_interface(self, interface_data: dict, **kwargs):
for action_index in interface_data.keys():
self.actions[action_index] = []
# TODO(zpriddy): Do we want to keep the add_action function?
if not self.interface.get(LABEL_ACTIONS):
continue
for action in self.interface.get(LABEL_ACTIONS).get(action_index):
self.actions[action_index].append(Action(**action))
def build_triggers_interface(self, interface_data: dict, **kwargs):
for trigger_index in interface_data.keys():
if not self.interface.get(LABEL_TRIGGERS):
continue
self.triggers[trigger_index] = Triggers(self.firefly, self.id)
self.triggers[trigger_index].import_triggers(self.interface.get(LABEL_TRIGGERS).get(trigger_index))
def build_trigger_actions_interface(self, interface_data: dict, **kwargs):
for trigger_action_index in interface_data.keys():
if not self.interface.get(LABEL_TRIGGER_ACTION):
continue
self.trigger_actions[trigger_action_index] = self.interface.get(LABEL_TRIGGER_ACTION).get(trigger_action_index)
def build_conditions_interface(self, interface_data: dict, **kwargs):
for condition_index in interface_data.keys():
self.conditions[condition_index] = None
if not self.interface.get(LABEL_CONDITIONS):
continue
if not self.interface.get(LABEL_CONDITIONS).get(condition_index):
continue
self.conditions[condition_index] = Conditions(**self.interface.get(LABEL_CONDITIONS).get(condition_index))
def build_delays_interface(self, interface_data: dict, **kwargs):
for delay_index in interface_data.keys():
if not self.interface.get(LABEL_DELAYS):
continue
self.delays[delay_index] = self.interface.get(LABEL_DELAYS).get(delay_index)
def build_devices_interface(self, interface_data: dict, **kwargs):
for device_index in interface_data.keys():
if not self.interface.get(LABEL_DEVICES):
continue
self.devices[device_index] = self.interface.get(LABEL_DEVICES).get(device_index)
def build_messages_interface(self, interface_data: dict, **kwargs):
for message_index in interface_data.keys():
if not self.interface.get(LABEL_MESSAGES):
continue
self.messages[message_index] = self.interface.get(LABEL_MESSAGES).get(message_index)
def export_interface(self, **kwargs):
interface = {}
interface[LABEL_TRIGGERS] = {}
for trigger_index, trigger in self.triggers.items():
if trigger is None:
continue
interface[LABEL_TRIGGERS][trigger_index] = trigger.export()
interface[LABEL_ACTIONS] = {}
for action_index, action in self.actions.items():
interface[LABEL_ACTIONS][action_index] = [a.export() for a in action]
interface[LABEL_CONDITIONS] = {}
for condition_index, condition in self.conditions.items():
if condition is None:
continue
interface[LABEL_CONDITIONS][condition_index] = condition.export()
interface[LABEL_MESSAGES] = {}
for message_index, message in self.messages.items():
if message is None:
continue
interface[LABEL_MESSAGES][message_index] = message
interface[LABEL_DELAYS] = {}
for delay_index, delay in self.delays.items():
if delay is None:
continue
interface[LABEL_DELAYS][delay_index] = delay
interface[LABEL_DEVICES] = {}
for device_index, device in self.devices.items():
if device is None:
continue
interface[LABEL_DEVICES][device_index] = device
interface[LABEL_TRIGGER_ACTION] = {}
for trigger_action_index, trigger_action in self.trigger_actions.items():
if trigger_action is None:
continue
interface[LABEL_TRIGGER_ACTION][trigger_action_index] = trigger_action
return interface
def add_command(self, command: str, function: Callable) -> None:
"""
Adds a command to the list of supported ff_id commands.
Args:
command (str): The string of the command
function (Callable): The function to be executed.
"""
self.command_map[command] = function
def execute_actions(self, action_index: str, **kwargs) -> bool:
if not self.new_interface.actions.get(action_index):
return False
for action in self.new_interface.actions.get(action_index):
action.execute_action(self.firefly)
return True
def send_messages(self, message_index: str, **kwargs) -> bool:
if not self.new_interface.messages.get(message_index):
return False
notify = Command(SERVICE_NOTIFICATION, self.id, COMMAND_NOTIFY, message=self.new_interface.messages.get(message_index))
self.firefly.send_command(notify)
return True
def event_handler(self, event: Event = None, trigger_index="", **kwargs):
logging.error('EVENT HANDLER NOT CREATED')
@property
def type(self):
return TYPE_AUTOMATION
|
Firefly-Automation/Firefly
|
Firefly/helpers/automation/automation.py
|
Python
|
apache-2.0
| 10,789
|
[
"Firefly"
] |
230b0ae023ce78b3106ffd06b1d0f942374b0976d2c3183ed86b698753c9c0dc
|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise, PY2, PY3, IS_WINDOWS)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seemst to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir', 'script_dir', 'build_directory',
'site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or IS_WINDOWS:
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data', ])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, filter(None, PYTHONPATH)):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); "
"f.close()\n" % (ok_file,))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
if (basename.lower() == 'python.exe' and
os.path.exists(alt)):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps,
True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = ("%r already exists in %s; build directory %s will not be "
"kept")
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
script_text = (ScriptWriter.get_header(script_text) +
self._load_template(dev_path) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,
os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink, (destination,), "Removing " +
destination)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m + " %s to %s") %
(os.path.basename(egg_path),
os.path.dirname(destination)))
update_dist_caches(destination,
fix_zipimporter_caches=new_dist_is_zipped)
except:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +
'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers(
[os.path.join(script_dir, args[0]) for args in
ScriptWriter.get_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
cfg = ConfigParser.RawConfigParser(
{'version': '', 'target_version': ''})
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, 'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func is os.remove and IS_WINDOWS:
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
warnings.warn("Use JythonCommandSpec", DeprecationWarning, stacklevel=2)
if not JythonCommandSpec.relevant():
return executable
cmd = CommandSpec.best().from_param(executable)
cmd.install_options(options)
return cmd.as_header().lstrip('#!').rstrip('\n')
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls if not JythonCommandSpec.relevant() else JythonCommandSpec
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class JythonCommandSpec(CommandSpec):
@classmethod
def relevant(cls):
return (
sys.platform.startswith('java')
and
__import__('java').lang.System.getProperty('os.name') != 'Linux'
)
@classmethod
def from_string(cls, string):
return cls([string])
def as_header(self):
"""
Workaround Jython's sys.executable being a .sh (an invalid
shebang line interpreter)
"""
if not is_sh(self[0]):
return super(JythonCommandSpec, self).as_header()
if self.options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
return super(JythonCommandSpec, self).as_header()
items = ['/usr/bin/env'] + self + list(self.options)
return self._render(items)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = cls.template % locals()
for res in cls._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
return WindowsScriptWriter.best() if IS_WINDOWS else cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
|
EnviroCentre/jython-upgrade
|
jython/lib/site-packages/setuptools/command/easy_install.py
|
Python
|
mit
| 86,129
|
[
"VisIt"
] |
b50f153b44b27be3f7e26766565448a6d8f491c0793f296ab3bd52204ded969c
|
import scipy as sp
import scipy as sp
import scipy.linalg as spla
import numpy as np
from functools import reduce
import pyscf
from pyscf import gto, scf, ao2mo, fci, mp, ao2mo
def myump2(mf):
# As UHF objects, mo_energy, mo_occ, mo_coeff are two-item lists
# (the first item for alpha spin, the second for beta spin).
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
o = np.hstack((mo_coeff[0][:,mo_occ[0]>0] ,mo_coeff[1][:,mo_occ[1]>0]))
v = np.hstack((mo_coeff[0][:,mo_occ[0]==0],mo_coeff[1][:,mo_occ[1]==0]))
eo = np.hstack((mo_energy[0][mo_occ[0]>0] ,mo_energy[1][mo_occ[1]>0]))
ev = np.hstack((mo_energy[0][mo_occ[0]==0],mo_energy[1][mo_occ[1]==0]))
no = o.shape[1]
nv = v.shape[1]
noa = sum(mo_occ[0]>0)
nva = sum(mo_occ[0]==0)
eri = ao2mo.general(mf.mol, (o,v,o,v)).reshape(no,nv,no,nv)
eri[:noa,nva:] = eri[noa:,:nva] = eri[:,:,:noa,nva:] = eri[:,:,noa:,:nva] = 0
g = eri - eri.transpose(0,3,2,1)
eov = eo.reshape(-1,1) - ev.reshape(-1)
de = 1/(eov.reshape(-1,1) + eov.reshape(-1)).reshape(g.shape)
emp2 = .25 * np.einsum('iajb,iajb,iajb->', g, g, de)
return emp2
mol = gto.M(
atom = [['O', (0.000000000000, -0.143225816552, 0.000000000000)],
['H', (1.638036840407, 1.136548822547, -0.000000000000)],
['H', (-1.638036840407, 1.136548822547, -0.000000000000)]],
basis = 'STO-3G',
verbose = 1,
unit='b'
)
myhf = scf.RHF(mol)
E = myhf.kernel()
ijkl = ao2mo.incore.full(pyscf.scf._vhf.int2e_sph(mol._atm, mol._bas, mol._env), myhf.mo_coeff, compact=False)
print E
print myump2(myhf)
|
shivupa/pyci
|
methods/misc/testmp2.py
|
Python
|
gpl-3.0
| 1,635
|
[
"PySCF"
] |
ecd40056eff9d6324e9d3efdae873d6785b39ff24e51a8925b601386d9064b0a
|
# Copyright 2020 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Expansion of realms_config.Realm into a flat form."""
import collections
from components.auth.proto import realms_pb2
from components.config import validation as cfg_validation
from proto import realms_config_pb2
from realms import common
from realms import permissions
from realms import validation
def expand_realms(db, project_id, realms_cfg):
"""Expands realms_config_pb2.RealmsCfg into a flat realms_pb2.Realms.
The returned realms_pb2.Realms contains realms and permissions of a single
project only. Permissions not mentioned in the project's realms are omitted.
All realms_pb2.Permission messages have names only (no metadata). api_version
field is omitted.
All such realms_pb2.Realms messages across all projects (plus a list of all
defined permissions with all their metadata) are later merged together into
a final universal realms_pb2.Realms by realms.merge(...) in
components/auth/replication.py.
Args:
db: a permissions.DB instance with current permissions and roles.
project_id: ID of a LUCI project to use as a prefix in realm names.
realms_cfg: an instance of realms_config_pb2.RealmsCfg to expand.
Returns:
realms_pb2.Realms with expanded realms (with caveats mentioned above).
Raises:
ValueError if the validation fails.
"""
# `internal` is True when expanding internal realms (defined in a service
# config file). Such realms can use internal roles and permissions and they
# do not have implicit root bindings (since they are not associated with
# any "project:<X>" identity used in implicit root bindings).
internal = project_id == common.INTERNAL_PROJECT
# The server code could have changed since the config passed the validation
# and realms_cfg may not be valid anymore. Verify it still is. The code below
# depends crucially on the validity of realms_cfg.
validation.Validator(
cfg_validation.Context.raise_on_error(), db, internal,
).validate(realms_cfg)
# Make sure @root realm exist and append implicit bindings to it. We need to
# do it before enumerating conditions below to actually instantiate all
# Condition objects that we'll need to visit (some of them may come from
# implicit bindings). Pre-instantiating them is important because we rely
# on their uniqne and stable id(...) for faster hash map lookups.
realms_map = to_realms_map(
realms_cfg,
db.implicit_root_bindings(project_id) if not internal else [])
# We'll need to visit realms in sorted order twice. Sort once and remember.
realms_list = sorted(realms_map.items())
# Prepopulate `conds_set` with all conditions mentioned in all bindings to
# normalize, dedup and map them to integers. Integers are faster to work
# with and we'll need them for the final proto message.
conds_set = ConditionsSet()
for _, realm in realms_list:
for binding in realm.bindings:
for cond in binding.conditions:
conds_set.add_condition(cond)
all_conditions = conds_set.finalize()
# A lazily populated {role -> tuple of permissions} mapping.
roles_expander = RolesExpander(db.roles, realms_cfg.custom_roles)
# A helper to traverse the realms graph.
realms_expander = RealmsExpander(roles_expander, conds_set, realms_map)
# Visit all realms and build preliminary bindings as pairs of
# (a tuple with permission indexes, a list of principals who have them). The
# bindings are preliminary since we don't know final permission indexes yet
# and instead use some internal indexes as generated by RolesExpander. We
# need to finish this first pass to gather the list of ALL used permissions,
# so we can calculate final indexes. This is done inside of `roles_expander`.
realms = [] # [(name, {(permissions tuple, conditions tuple) => [principal]}]
for name, _ in realms_list:
# Build a mapping from a principal+conditions to the permissions set.
#
# Each map entry `(principal, tuple(conds)) => set(perms)` means `principal`
# is granted the given set of permissions if all given conditions allow it.
#
# This step essentially deduplicates permission bindings that result from
# expanding realms and roles inheritance chains.
principal_to_perms = collections.defaultdict(set)
for principal, perms, conds in realms_expander.per_principal_bindings(name):
principal_to_perms[(principal, conds)].update(perms)
# Combine entries with the same set of permissions+conditions into one.
#
# Each map entry `(tuple(perms), tuple(conds)) => list(principal)` means
# all `principals` are granted all given permissions if all given conditions
# allow it.
#
# This step merges principal sets of identical bindings to have a more
# compact final representation.
perms_to_principals = collections.defaultdict(list)
for (principal, conds), perms in principal_to_perms.items():
perms_norm = tuple(sorted(perms))
perms_to_principals[(perms_norm, conds)].append(principal)
# perms_to_principals is essentially a set of all binding in a realm.
realms.append((name, perms_to_principals))
# We now know all permissions ever used by all realms. Convert them into the
# form suitable for realm_pb2 by sorting alphabetically. Keep the mapping
# between old and new indexes, to be able to change indexes in permission
# tuples we stored in `realms`.
perms, index_map = roles_expander.sorted_permissions()
# Build the final sorted form of all realms by relabeling permissions
# according to the index_map and by sorting stuff.
return realms_pb2.Realms(
permissions=[realms_pb2.Permission(name=p) for p in perms],
conditions=all_conditions,
realms=[
realms_pb2.Realm(
name='%s:%s' % (project_id, name),
bindings=to_normalized_bindings(perms_to_principals, index_map),
data=realms_expander.realm_data(name),
)
for name, perms_to_principals in realms
])
class RolesExpander(object):
"""Keeps track of permissions and `role => [permission]` expansions.
Permissions are represented internally as integers to speed up set operations.
The mapping from a permission to a corresponding integer is lazily built and
should be considered arbitrary (it depends on the order of method calls). But
it doesn't matter since in the end we relabel all permissions according to
their indexes in the final sorted list of permissions.
Should be used only with validated realms_config_pb2.RealmsCfg, may cause
stack overflow or raise random exceptions otherwise.
"""
def __init__(self, builtin_roles, custom_roles):
self._builtin_roles = builtin_roles
self._custom_roles = {r.name: r for r in custom_roles}
self._permissions = {} # permission name => its index
self._roles = {} # role name => set indexes of permissions
def _perm_index(self, name):
"""Returns an internal index that represents the given permission string."""
idx = self._permissions.get(name)
if idx is None:
idx = len(self._permissions)
self._permissions[name] = idx
return idx
def _perm_indexes(self, iterable):
"""Yields indexes of given permission strings."""
return (self._perm_index(p) for p in iterable)
def role(self, role):
"""Returns an unsorted tuple of indexes of permissions of the role."""
perms = self._roles.get(role)
if perms is not None:
return perms
if role.startswith(permissions.BUILTIN_ROLE_PREFIX):
perms = self._perm_indexes(self._builtin_roles[role].permissions)
elif role.startswith(permissions.CUSTOM_ROLE_PREFIX):
custom_role = self._custom_roles[role]
perms = set(self._perm_indexes(custom_role.permissions))
for parent in custom_role.extends:
perms.update(self.role(parent))
else:
raise AssertionError('Impossible role %s' % (role,))
perms = tuple(perms)
self._roles[role] = perms
return perms
def sorted_permissions(self):
"""Returns a sorted list of permission and a old->new index mapping list.
See to_normalized_bindings below for how it is used.
"""
perms = sorted(self._permissions)
mapping = [None]*len(perms)
for new_idx, p in enumerate(perms):
old_idx = self._permissions[p]
mapping[old_idx] = new_idx
assert all(v is not None for v in mapping), mapping
return perms, mapping
class ConditionsSet(object):
"""Normalizes and dedups conditions, maps them to integers.
Assumes all incoming realms_config_pb2.Condition are immutable and dedups
them by *identity* (using id(...) function), as well as by normalized values.
Also assumes the set of all possible *objects* ever passed to indexes(...) was
also passed to add_condition(...) first (so it could build id => index map).
This makes hot indexes(...) function fast by allowing to lookup ids instead
of (potentially huge) protobuf message values.
"""
def __init__(self):
# A mapping from a serialized normalized realms_pb2.Condition to a pair
# (normalized realms_pb2.Condition, its unique index).
self._normalized = {}
# A mapping from id(realms_config_pb2.Condition) to its matching index.
self._mapping = {}
# A list of all different objects ever passed to add_condition, to retain
# pointers to them to make sure their id(...)s are not reallocated by Python
# to point to other objects.
self._retain = []
# True if finalize() was already called.
self._finalized = False
def add_condition(self, cond):
"""Adds realms_config_pb2.Condition to the set if not already there."""
assert not self._finalized
assert isinstance(cond, realms_config_pb2.Condition), cond
# Check if we already processed this exact object before.
if id(cond) in self._mapping:
return
# Normalize realms_config_pb2.Condition into a realms_pb2.Condition.
norm = realms_pb2.Condition()
if cond.HasField('restrict'):
norm.restrict.attribute = cond.restrict.attribute
norm.restrict.values.extend(sorted(set(cond.restrict.values)))
else:
# Note: this should not be happening, we validated all inputs already.
raise ValueError('Invalid empty condition %r' % cond)
# Get a key for the dictionary, since `norm` itself is unhashable and can't
# be used as a key.
key = norm.SerializeToString()
# Append it to the set of unique conditions if not already there.
idx = self._normalized.setdefault(key, (norm, len(self._normalized)))[1]
# Remember that we mapped this particular `cond` *object* to this index.
self._mapping[id(cond)] = idx
self._retain.append(cond)
def finalize(self):
"""Finalizes the set preventing any future add_condition(...) calls.
Sorts the list of stored conditions according to some stable order and
returns the final sorted list of realms_pb2.Condition. Indexes returned by
indexes(...) will refer to indexes in this list.
"""
assert not self._finalized
self._finalized = True
# Sort according to their binary representations. The order doesn't matter
# as long as it is reproducible.
conds = [
val for _, val in
sorted(self._normalized.items(), key=lambda (key, _): key)
]
self._normalized = None # won't need it anymore
# Here `conds` is a list of pairs (cond, its old index). We'll need
# to change self._mapping to use new indexes (matching the new order in
# `conds`). Build the remapping dict {old index => new index}.
old_to_new = {old: new for new, (_, old) in enumerate(conds)}
assert len(old_to_new) == len(conds)
# Change indexes in _mapping to use the new order.
for key, old in self._mapping.items():
self._mapping[key] = old_to_new[old]
# Return the final list of conditions in the new order.
return [cond for cond, _ in conds]
def indexes(self, conds):
"""Given a list of realms_config_pb2.Condition returns a sorted index tuple.
Can be called only after finalize(). All given conditions must have
previously been but into the set via add_condition(...). The returned tuple
can have fewer elements if some conditions in `conds` are equivalent.
The returned tuple is essentially a compact encoding of the overall AND
condition expression in a binding.
"""
assert self._finalized
# Skip function calls for two most common cases.
if not conds:
return ()
if len(conds) == 1:
return (self._mapping[id(conds[0])],)
return tuple(sorted(set(self._mapping[id(cond)] for cond in conds)))
class RealmsExpander(object):
"""Helper to traverse the realm inheritance graph."""
def __init__(self, roles, conds_set, realms_map):
self._roles = roles
self._conds_set = conds_set
self._realms = realms_map # name -> realms_config_pb2.Realm
self._data = {} # name -> realms_pb2.RealmData, memoized
@staticmethod
def _parents(realm):
"""Given a realms_config_pb2.Realm yields names of immediate parents."""
if realm.name == common.ROOT_REALM:
return
yield common.ROOT_REALM
for name in realm.extends:
if name != common.ROOT_REALM:
yield name
def per_principal_bindings(self, realm):
"""Yields tuples (a single principal, permissions tuple, conditions tuple).
Visits all bindings in the realm and its parent realms. Returns a lot of
duplicates. It's the caller's job to skip them.
"""
r = self._realms[realm]
assert r.name == realm
for b in r.bindings:
perms = self._roles.role(b.role) # the tuple of permissions of the role
conds = self._conds_set.indexes(b.conditions) # the tuple with conditions
for principal in b.principals:
yield principal, perms, conds
for parent in self._parents(r):
for principal, perms, conds in self.per_principal_bindings(parent):
yield principal, perms, conds
def realm_data(self, name):
"""Returns calculated realms_pb2.RealmData for a realm."""
if name not in self._data:
realm = self._realms[name]
extends = [self.realm_data(p) for p in self._parents(realm)]
self._data[name] = derive_realm_data(realm, [x for x in extends if x])
return self._data[name]
def to_realms_map(realms_cfg, implicit_root_bindings):
"""Returns a map {realm name => realms_config_pb2.Realm}.
Makes sure the @root realm is defined there, adding it if necessary.
Appends the given list of bindings to the root realm.
Args:
realms_cfg: the original realms_config_pb2.Realms message.
implicit_root_bindings: a list of realms_config_pb2.Binding to add to @root.
"""
realms = {r.name: r for r in realms_cfg.realms}
root = realms_config_pb2.Realm(name=common.ROOT_REALM)
if common.ROOT_REALM in realms:
root.CopyFrom(realms[common.ROOT_REALM])
root.bindings.extend(implicit_root_bindings)
realms[common.ROOT_REALM] = root
return realms
def to_normalized_bindings(perms_to_principals, index_map):
"""Produces a sorted list of realms_pb2.Binding.
Bindings are given as a map from (permission tuple, conditions tuple) to
a list of principals that should have all given permission if all given
conditions allow.
Permissions are specified through their internal indexes as produced by
RolesExpander. We convert them into "public" ones (the ones that correspond
to the sorted permissions list in the realms_pb2.Realms proto). The mapping
from an old to a new index is given by `new = index_map[old]`.
Conditions are specified as indexes in ConditionsSet, we use them as they are,
since by construction of ConditionsSet, all conditions are in use and we don't
need any extra filtering (and consequently index remapping to skip gaps) as we
do for permissions.
Args:
perms_to_principals: {(permissions tuple, conditions tuple) => [principal]}.
index_map: defines how to remap permission indexes (old -> new).
Returns:
A sorted list of realm_pb2.Binding.
"""
normalized = (
(sorted(index_map[idx] for idx in perms), conds, sorted(principals))
for (perms, conds), principals in perms_to_principals.items()
)
return [
realms_pb2.Binding(
permissions=perms,
principals=principals,
conditions=conds)
for perms, conds, principals in sorted(normalized)
]
def derive_realm_data(realm, extends):
"""Calculates realms_pb2.RealmData from the realm config and parent data.
Args:
realm: realms_config_pb2.Realm to calculate the data for.
extends: a list of realms_pb2.RealmData it extends from.
Returns:
realms_pb2.RealmData or None if empty.
"""
enforce_in_service = set(realm.enforce_in_service)
for d in extends:
enforce_in_service.update(d.enforce_in_service)
if not enforce_in_service:
return None
return realms_pb2.RealmData(enforce_in_service=sorted(enforce_in_service))
|
luci/luci-py
|
appengine/auth_service/realms/rules.py
|
Python
|
apache-2.0
| 17,117
|
[
"VisIt"
] |
d6a118ef054ae6ffe9f2f85aac0ec6c8e19f149f11a4a995b9a8fa80ddee50b8
|
import os
from fontbakery.profiles.universal import UNIVERSAL_PROFILE_CHECKS
from fontbakery.checkrunner import Section, INFO, WARN, ERROR, SKIP, PASS, FAIL
from fontbakery.callable import check, disable
from fontbakery.message import Message
from fontbakery.fonts_profile import profile_factory
from fontbakery.constants import (NameID,
PlatformID,
WindowsEncodingID,
WindowsLanguageID,
MacintoshEncodingID,
MacintoshLanguageID)
from .googlefonts_conditions import * # pylint: disable=wildcard-import,unused-wildcard-import
profile_imports = ('fontbakery.profiles.universal',)
profile = profile_factory(default_section=Section("Google Fonts"))
METADATA_CHECKS = [
'com.google.fonts/check/metadata/parses',
'com.google.fonts/check/metadata/unknown_designer',
'com.google.fonts/check/metadata/multiple_designers',
'com.google.fonts/check/metadata/designer_values',
'com.google.fonts/check/metadata/listed_on_gfonts',
'com.google.fonts/check/metadata/unique_full_name_values',
'com.google.fonts/check/metadata/unique_weight_style_pairs',
'com.google.fonts/check/metadata/license',
'com.google.fonts/check/metadata/menu_and_latin',
'com.google.fonts/check/metadata/subsets_order',
'com.google.fonts/check/metadata/includes_production_subsets',
'com.google.fonts/check/metadata/copyright',
'com.google.fonts/check/metadata/familyname',
'com.google.fonts/check/metadata/has_regular',
'com.google.fonts/check/metadata/regular_is_400',
'com.google.fonts/check/metadata/nameid/family_name',
'com.google.fonts/check/metadata/nameid/post_script_name',
'com.google.fonts/check/metadata/nameid/full_name',
'com.google.fonts/check/metadata/nameid/family_and_full_names', # FIXME! This seems redundant!
'com.google.fonts/check/metadata/nameid/copyright',
'com.google.fonts/check/metadata/nameid/font_name', # FIXME! This looks suspiciously similar to com.google.fonts/check/metadata/nameid/family_name
'com.google.fonts/check/metadata/match_fullname_postscript',
'com.google.fonts/check/metadata/match_filename_postscript',
'com.google.fonts/check/metadata/match_weight_postscript',
'com.google.fonts/check/metadata/valid_name_values',
'com.google.fonts/check/metadata/valid_full_name_values',
'com.google.fonts/check/metadata/valid_filename_values',
'com.google.fonts/check/metadata/valid_post_script_name_values',
'com.google.fonts/check/metadata/valid_copyright',
'com.google.fonts/check/metadata/reserved_font_name',
'com.google.fonts/check/metadata/copyright_max_length',
'com.google.fonts/check/metadata/filenames',
'com.google.fonts/check/metadata/italic_style',
'com.google.fonts/check/metadata/normal_style',
'com.google.fonts/check/metadata/fontname_not_camel_cased',
'com.google.fonts/check/metadata/match_name_familyname',
'com.google.fonts/check/metadata/canonical_weight_value',
'com.google.fonts/check/metadata/os2_weightclass',
'com.google.fonts/check/metadata/canonical_style_names',
'com.google.fonts/check/metadata/broken_links',
'com.google.fonts/check/metadata/undeclared_fonts',
'com.google.fonts/check/metadata/category',
'com.google.fonts/check/metadata/gf-axisregistry_valid_tags',
'com.google.fonts/check/metadata/gf-axisregistry_bounds',
'com.google.fonts/check/metadata/consistent_axis_enumeration',
'com.google.fonts/check/metadata/escaped_strings'
]
DESCRIPTION_CHECKS = [
'com.google.fonts/check/description/broken_links',
'com.google.fonts/check/description/valid_html',
'com.google.fonts/check/description/min_length',
'com.google.fonts/check/description/max_length',
'com.google.fonts/check/description/git_url',
'com.google.fonts/check/description/eof_linebreak'
]
FAMILY_CHECKS = [
# 'com.google.fonts/check/family/equal_numbers_of_glyphs',
# 'com.google.fonts/check/family/equal_glyph_names',
'com.google.fonts/check/family/has_license',
'com.google.fonts/check/family/control_chars',
'com.google.fonts/check/family/tnum_horizontal_metrics',
]
NAME_TABLE_CHECKS = [
'com.google.fonts/check/name/unwanted_chars',
'com.google.fonts/check/name/license',
'com.google.fonts/check/name/license_url',
'com.google.fonts/check/name/family_and_style_max_length',
'com.google.fonts/check/name/line_breaks',
'com.google.fonts/check/name/rfn',
]
REPO_CHECKS = [
'com.google.fonts/check/repo/dirname_matches_nameid_1',
'com.google.fonts/check/repo/vf_has_static_fonts',
'com.google.fonts/check/repo/fb_report',
'com.google.fonts/check/repo/zip_files',
'com.google.fonts/check/license/OFL_copyright'
]
FONT_FILE_CHECKS = [
'com.google.fonts/check/glyph_coverage',
'com.google.fonts/check/canonical_filename',
'com.google.fonts/check/usweightclass',
'com.google.fonts/check/fstype',
'com.google.fonts/check/vendor_id',
'com.google.fonts/check/ligature_carets',
'com.google.fonts/check/production_glyphs_similarity',
'com.google.fonts/check/fontv',
#DISABLED: 'com.google.fonts/check/production_encoded_glyphs',
'com.google.fonts/check/varfont/generate_static',
'com.google.fonts/check/kerning_for_non_ligated_sequences',
'com.google.fonts/check/name/description_max_length',
'com.google.fonts/check/fvar_name_entries',
'com.google.fonts/check/version_bump',
'com.google.fonts/check/epar',
'com.google.fonts/check/font_copyright',
'com.google.fonts/check/italic_angle',
'com.google.fonts/check/has_ttfautohint_params',
'com.google.fonts/check/name/version_format',
'com.google.fonts/check/name/familyname_first_char',
'com.google.fonts/check/hinting_impact',
'com.google.fonts/check/varfont/has_HVAR',
'com.google.fonts/check/name/typographicfamilyname',
'com.google.fonts/check/name/subfamilyname',
'com.google.fonts/check/name/typographicsubfamilyname',
'com.google.fonts/check/gasp',
'com.google.fonts/check/name/familyname',
'com.google.fonts/check/name/mandatory_entries',
'com.google.fonts/check/name/copyright_length',
'com.google.fonts/check/fontdata_namecheck',
'com.google.fonts/check/name/ascii_only_entries',
'com.google.fonts/check/varfont_has_instances',
'com.google.fonts/check/varfont_weight_instances',
'com.google.fonts/check/old_ttfautohint',
'com.google.fonts/check/vttclean',
'com.google.fonts/check/name/postscriptname',
'com.google.fonts/check/aat',
'com.google.fonts/check/name/fullfontname',
'com.google.fonts/check/mac_style',
'com.google.fonts/check/fsselection',
'com.google.fonts/check/smart_dropout',
'com.google.fonts/check/integer_ppem_if_hinted',
'com.google.fonts/check/unitsperem_strict',
'com.google.fonts/check/contour_count',
'com.google.fonts/check/vertical_metrics_regressions',
'com.google.fonts/check/cjk_vertical_metrics',
'com.google.fonts/check/varfont_instance_coordinates',
'com.google.fonts/check/varfont_instance_names',
'com.google.fonts/check/varfont_duplicate_instance_names',
'com.google.fonts/check/varfont/consistent_axes',
'com.google.fonts/check/varfont/unsupported_axes',
'com.google.fonts/check/STAT/gf-axisregistry',
'com.google.fonts/check/STAT/axis_order'
]
GOOGLEFONTS_PROFILE_CHECKS = \
UNIVERSAL_PROFILE_CHECKS + \
METADATA_CHECKS + \
DESCRIPTION_CHECKS + \
FAMILY_CHECKS + \
NAME_TABLE_CHECKS + \
REPO_CHECKS + \
FONT_FILE_CHECKS
@check(
id = 'com.google.fonts/check/canonical_filename',
rationale = """
A font's filename must be composed in the following manner:
<familyname>-<stylename>.ttf
- Nunito-Regular.ttf,
- Oswald-BoldItalic.ttf
Variable fonts must list the axis tags in alphabetical order in square brackets and separated by commas:
- Roboto[wdth,wght].ttf
- Familyname-Italic[wght].ttf
"""
)
def com_google_fonts_check_canonical_filename(font):
"""Checking file is named canonically."""
from fontTools.ttLib import TTFont
from .shared_conditions import is_variable_font
from .googlefonts_conditions import canonical_stylename
from fontbakery.utils import suffix
from fontbakery.constants import (STATIC_STYLE_NAMES,
MacStyle)
def variable_font_filename(ttFont):
from fontbakery.utils import get_name_entry_strings
familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)[0]
typo_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME)
familyname = typo_familynames[0] if typo_familynames else familyname
familyname = "".join(familyname.split(' ')) #remove spaces
if bool(ttFont["head"].macStyle & MacStyle.ITALIC):
familyname+="-Italic"
tags = ttFont["fvar"].axes
tags = list(map(lambda t: t.axisTag, tags))
tags.sort()
tags = "[{}]".format(",".join(tags))
return f"{familyname}{tags}.ttf"
failed = False
if "_" in os.path.basename(font):
failed = True
yield FAIL,\
Message("invalid-char",
f'font filename "{font}" is invalid.'
f' It must not contain underscore characters!')
return
ttFont = TTFont(font)
if is_variable_font(ttFont):
if suffix(font) in STATIC_STYLE_NAMES:
failed = True
yield FAIL,\
Message("varfont-with-static-filename",
"This is a variable font, but it is using"
" a naming scheme typical of a static font.")
expected = variable_font_filename(ttFont)
font_filename = os.path.basename(font)
if font_filename != expected:
failed = True
yield FAIL,\
Message("bad-varfont-filename",
f"The file '{font_filename}' must be renamed"
f" to '{expected}' according to the"
f" Google Fonts naming policy for variable fonts.")
else:
if not canonical_stylename(font):
failed = True
style_names = '", "'.join(STATIC_STYLE_NAMES)
yield FAIL,\
Message("bad-static-filename",
f'Style name used in "{font}" is not canonical.'
f' You should rebuild the font using'
f' any of the following'
f' style names: "{style_names}".')
if not failed:
yield PASS, f"{font} is named canonically."
@check(
id = 'com.google.fonts/check/description/broken_links',
conditions = ['description_html'],
rationale = """
The snippet of HTML in the DESCRIPTION.en_us.html file is added to the font family webpage on the Google Fonts website. For that reason, all hyperlinks in it must be properly working.
"""
)
def com_google_fonts_check_description_broken_links(description_html):
"""Does DESCRIPTION file contain broken links?"""
import requests
from lxml import etree
doc = description_html
broken_links = []
unique_links = []
for a_href in doc.iterfind('.//a[@href]'):
link = a_href.get("href")
# avoid requesting the same URL more then once
if link in unique_links:
continue
if link.startswith("mailto:") and \
"@" in link and \
"." in link.split("@")[1]:
yield INFO,\
Message("email",
f"Found an email address: {link}")
continue
unique_links.append(link)
try:
response = requests.head(link, allow_redirects=True, timeout=10)
code = response.status_code
# Status 429: "Too Many Requests" is acceptable
# because it means the website is probably ok and
# we're just perhaps being too agressive in probing the server!
if code not in [requests.codes.ok,
requests.codes.too_many_requests]:
broken_links.append(f"{link} (status code: {code})")
except requests.exceptions.Timeout:
yield WARN,\
Message("timeout",
f"Timedout while attempting to access: '{link}'."
f" Please verify if that's a broken link.")
except requests.exceptions.RequestException:
broken_links.append(link)
if len(broken_links) > 0:
broken_links_list = '\n\t'.join(broken_links)
yield FAIL,\
Message("broken-links",
f"The following links are broken"
f" in the DESCRIPTION file:\n\t"
f"{broken_links_list}")
else:
yield PASS, "All links in the DESCRIPTION file look good!"
@condition
def description_html (description):
from lxml import etree
try:
html = etree.fromstring("<html>" + description + "</html>")
return html
except:
return None
@check(
id = 'com.google.fonts/check/description/git_url',
conditions = ['description_html'],
rationale = """
The contents of the DESCRIPTION.en-us.html file are displayed on the Google Fonts website in the about section of each font family specimen page.
Since all of the Google Fonts collection is composed of libre-licensed fonts, this check enforces a policy that there must be a hypertext link in that page directing users to the repository where the font project files are made available.
Such hosting is typically done on sites like Github, Gitlab, GNU Savannah or any other git-based version control service.
"""
)
def com_google_fonts_check_description_git_url(description_html):
"""Does DESCRIPTION file contain a upstream Git repo URL?"""
git_urls = []
for a_href in description_html.iterfind('.//a[@href]'):
link = a_href.get("href")
if "://git" in link:
git_urls.append(link)
yield INFO,\
Message("url-found",
f"Found a git repo URL: {link}")
if len(git_urls) > 0:
yield PASS, "Looks great!"
else:
yield FAIL,\
Message("lacks-git-url",
"Please host your font project on a public Git repo"
" (such as GitHub or GitLab) and place a link"
" in the DESCRIPTION.en_us.html file.")
@check(
id = 'com.google.fonts/check/description/valid_html',
conditions = ['description'],
rationale = """
Sometimes people write malformed HTML markup. This check should ensure the file is good.
Additionally, when packaging families for being pushed to the `google/fonts` git repo, if there is no DESCRIPTION.en_us.html file, some older versions of the `add_font.py` tool insert a dummy description file which contains invalid html. This file needs to either be replaced with an existing description file or edited by hand.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2664'
}
)
def com_google_fonts_check_description_valid_html(descfile, description):
"""Is this a proper HTML snippet?"""
passed = True
if "<html>" in description or "</html>" in description:
yield FAIL,\
Message("html-tag",
f"{descfile} should not have an <html> tag,"
f" since it should only be a snippet that will"
f" later be included in the Google Fonts"
f" font family specimen webpage.")
from lxml import etree
try:
etree.fromstring("<html>" + description + "</html>")
except Exception as e:
passed = False
yield FAIL,\
Message("malformed-snippet",
f"{descfile} does not look like a propper HTML snippet."
f" Please look for syntax errors."
f" Maybe the following parser error message can help"
f" you find what's wrong:\n"
f"----------------\n"
f"{e}\n"
f"----------------\n")
if "<p>" not in description or "</p>" not in description:
passed = False
yield FAIL,\
Message("lacks-paragraph",
f"{descfile} does not include an HTML <p> tag.")
if passed:
yield PASS, f"{descfile} is a propper HTML file."
@check(
id = 'com.google.fonts/check/description/min_length',
conditions = ['description']
)
def com_google_fonts_check_description_min_length(description):
"""DESCRIPTION.en_us.html must have more than 200 bytes."""
if len(description) <= 200:
yield FAIL,\
Message("too-short",
"DESCRIPTION.en_us.html must"
" have size larger than 200 bytes.")
else:
yield PASS, "DESCRIPTION.en_us.html is larger than 200 bytes."
@check(
id = 'com.google.fonts/check/description/max_length',
conditions = ['description']
)
def com_google_fonts_check_description_max_length(description):
"""DESCRIPTION.en_us.html must have less than 1000 bytes."""
if len(description) >= 1000:
yield FAIL,\
Message("too-long",
"DESCRIPTION.en_us.html must"
" have size smaller than 1000 bytes.")
else:
yield PASS, "DESCRIPTION.en_us.html is smaller than 1000 bytes."
@check(
id = 'com.google.fonts/check/description/eof_linebreak',
conditions = ['description'],
rationale = """
Some older text-handling tools sometimes misbehave if the last line of data in a text file is not terminated with a newline character (also known as '\\n').
We know that this is a very small detail, but for the sake of keeping all DESCRIPTION.en_us.html files uniformly formatted throughout the GFonts collection, we chose to adopt the practice of placing this final linebreak char on them.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2879'
}
)
def com_google_fonts_check_description_eof_linebreak(description):
"""DESCRIPTION.en_us.html should end in a linebreak."""
if description[-1] != '\n':
yield WARN,\
Message("missing-eof-linebreak",
"The last characther on DESCRIPTION.en_us.html"
" is not a line-break. Please add it.")
else:
yield PASS, ":-)"
@check(
id = 'com.google.fonts/check/metadata/parses',
conditions = ['family_directory'],
rationale = """
The purpose of this check is to ensure that the METADATA.pb file is not malformed.
"""
)
def com_google_fonts_check_metadata_parses(family_directory):
"""Check METADATA.pb parse correctly."""
from google.protobuf import text_format
from fontbakery.utils import get_FamilyProto_Message
try:
pb_file = os.path.join(family_directory, "METADATA.pb")
get_FamilyProto_Message(pb_file)
yield PASS, "METADATA.pb parsed successfuly."
except text_format.ParseError as e:
yield FAIL,\
Message("parsing-error",
f"Family metadata at {family_directory} failed to parse.\n"
f"TRACEBACK:\n{e}")
except FileNotFoundError:
yield SKIP,\
Message("file-not-found",
f"Font family at '{family_directory}' lacks a METADATA.pb file.")
@check(
id = 'com.google.fonts/check/metadata/unknown_designer',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_unknown_designer(family_metadata):
"""Font designer field in METADATA.pb must not be 'unknown'."""
if family_metadata.designer.lower() == 'unknown':
yield FAIL,\
Message("unknown-designer",
f"Font designer field is '{family_metadata.designer}'.")
else:
yield PASS, "Font designer field is not 'unknown'."
@check(
id = 'com.google.fonts/check/metadata/multiple_designers',
conditions = ['family_metadata'],
rationale = """
For a while the string "Multiple designers" was used as a placeholder on METADATA.pb files. We should replace all those instances with actual designer names so that proper credits are displayed on the Google Fonts family specimen pages.
If there's more than a single designer, the designer names must be separated by commas.
"""
)
def com_google_fonts_check_metadata_multiple_designers(family_metadata):
"""Font designer field in METADATA.pb must not contain 'Multiple designers'."""
if 'multiple designer' in family_metadata.designer.lower():
yield FAIL,\
Message("multiple-designers",
f"Font designer field is '{family_metadata.designer}'."
f" Please add an explicit comma-separated list of designer names.")
else:
yield PASS, "Looks good."
@check(
id = 'com.google.fonts/check/metadata/designer_values',
conditions = ['family_metadata'],
rationale = """
We must use commas instead of forward slashes because the server-side code at the fonts.google.com directory will segment the string on the commas into a list of names and display the first item in the list as the "principal designer" while the remaining names are identified as "contributors".
See eg https://fonts.google.com/specimen/Rubik
"""
)
def com_google_fonts_check_metadata_designer_values(family_metadata):
"""Multiple values in font designer field in
METADATA.pb must be separated by commas."""
if '/' in family_metadata.designer:
yield FAIL,\
Message("slash",
f"Font designer field contains a forward slash"
f" '{family_metadata.designer}'."
f" Please use commas to separate multiple names instead.")
else:
yield PASS, "Looks good."
@check(
id = 'com.google.fonts/check/metadata/broken_links',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_broken_links(family_metadata):
"""Does METADATA.pb copyright field contain broken links?"""
import requests
broken_links = []
unique_links = []
for font_metadata in family_metadata.fonts:
copyright = font_metadata.copyright
if "mailto:" in copyright:
# avoid reporting more then once
if copyright in unique_links:
continue
unique_links.append(copyright)
yield INFO,\
Message("email",
f"Found an email address: {copyright}")
continue
if "http" in copyright:
link = "http" + copyright.split("http")[1]
for endchar in [' ', ')']:
if endchar in link:
link = link.split(endchar)[0]
# avoid requesting the same URL more then once
if link in unique_links:
continue
unique_links.append(link)
try:
response = requests.head(link, allow_redirects=True, timeout=10)
code = response.status_code
# Status 429: "Too Many Requests" is acceptable
# because it means the website is probably ok and
# we're just perhaps being too agressive in probing the server!
if code not in [requests.codes.ok,
requests.codes.too_many_requests]:
broken_links.append(("{} (status code: {})").format(link, code))
except requests.exceptions.Timeout:
yield WARN,\
Message("timeout",
f"Timed out while attempting to access: '{link}'."
f" Please verify if that's a broken link.")
except requests.exceptions.RequestException:
broken_links.append(link)
if len(broken_links) > 0:
broken_links_list = '\n\t'.join(broken_links)
yield FAIL,\
Message("broken-links",
f"The following links are broken"
f" in the METADATA.pb file:\n\t"
f"{broken_links_list}")
else:
yield PASS, "All links in the METADATA.pb file look good!"
@check(
id = 'com.google.fonts/check/metadata/undeclared_fonts',
conditions = ['family_metadata'],
rationale = """
The set of font binaries available, except the ones on a "static" subdir, must match exactly those declared on the METADATA.pb file.
Also, to avoid confusion, we expect that font files (other than statics) are not placed on subdirectories.
"""
)
def com_google_fonts_check_metadata_undeclared_fonts(family_metadata, family_directory):
"""Ensure METADATA.pb lists all font binaries."""
pb_binaries = []
for font_metadata in family_metadata.fonts:
pb_binaries.append(font_metadata.filename)
passed = True
binaries = []
for entry in os.listdir(family_directory):
if entry != "static" and os.path.isdir(os.path.join(family_directory, entry)):
for filename in os.listdir(os.path.join(family_directory, entry)):
if filename[-4:] in [".ttf", ".otf"]:
path = os.path.join(family_directory, entry, filename)
passed = False
yield WARN,\
Message("font-on-subdir",
f'The file "{path}" is a font binary'
f' in a subdirectory.\n'
f'Please keep all font files (except VF statics) directly'
f' on the root directory side-by-side'
f' with its corresponding METADATA.pb file.')
else:
# Note: This does not include any font binaries placed in a "static" subdir!
if entry[-4:] in [".ttf", ".otf"]:
binaries.append(entry)
for filename in sorted(set(pb_binaries) - set(binaries)):
passed = False
yield FAIL,\
Message("file-missing",
f'The file "{filename}" declared on METADATA.pb'
f' is not available in this directory.')
for filename in sorted(set(binaries) - set(pb_binaries)):
passed = False
yield FAIL,\
Message("file-not-declared",
f'The file "{filename}" is not declared on METADATA.pb')
if passed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/metadata/category',
conditions = ['family_metadata'],
rationale = """
There are only five acceptable values for the category field in a METADATA.pb file:
- MONOSPACE
- SANS_SERIF
- SERIF
- DISPLAY
- HANDWRITING
This check is meant to avoid typos in this field.
""",
misc_metadata = {
'request': "https://github.com/googlefonts/fontbakery/issues/2972"
}
)
def com_google_fonts_check_metadata_category(family_metadata):
"""Ensure METADATA.pb category field is valid."""
if family_metadata.category not in ["MONOSPACE",
"SANS_SERIF",
"SERIF",
"DISPLAY",
"HANDWRITING"]:
yield FAIL,\
Message('bad-value',
f'The field category has "{family_metadata.category}"'
f' which is not valid.')
else:
yield PASS, "OK!"
@disable # TODO: re-enable after addressing issue #1998
@check(
id = 'com.google.fonts/check/family/equal_numbers_of_glyphs',
conditions = ['are_ttf',
'stylenames_are_canonical']
)
def com_google_fonts_check_family_equal_numbers_of_glyphs(ttFonts):
"""Fonts have equal numbers of glyphs?"""
from .googlefonts_conditions import canonical_stylename
# ttFonts is an iterator, so here we make a list from it
# because we'll have to iterate twice in this check implementation:
the_ttFonts = list(ttFonts)
failed = False
max_stylename = None
max_count = 0
max_glyphs = None
for ttFont in the_ttFonts:
fontname = ttFont.reader.file.name
stylename = canonical_stylename(fontname)
this_count = len(ttFont['glyf'].glyphs)
if this_count > max_count:
max_count = this_count
max_stylename = stylename
max_glyphs = set(ttFont['glyf'].glyphs)
for ttFont in the_ttFonts:
fontname = ttFont.reader.file.name
stylename = canonical_stylename(fontname)
these_glyphs = set(ttFont['glyf'].glyphs)
this_count = len(these_glyphs)
if this_count != max_count:
failed = True
all_glyphs = max_glyphs.union(these_glyphs)
common_glyphs = max_glyphs.intersection(these_glyphs)
diff = all_glyphs - common_glyphs
diff_count = len(diff)
if diff_count < 10:
diff = ", ".join(diff)
else:
diff = ", ".join(list(diff)[:10]) + " (and more)"
yield FAIL,\
Message("glyph-count-diverges",
f"{stylename} has {this_count} glyphs while"
f" {max_stylename} has {max_count} glyphs."
f" There are {diff_count} different glyphs"
f" among them: {sorted(diff)}")
if not failed:
yield PASS, ("All font files in this family have"
" an equal total ammount of glyphs.")
@disable # TODO: re-enable after addressing issue #1998
@check(
id = 'com.google.fonts/check/family/equal_glyph_names',
conditions = ['are_ttf']
)
def com_google_fonts_check_family_equal_glyph_names(ttFonts):
"""Fonts have equal glyph names?"""
from .googlefonts_conditions import style
fonts = list(ttFonts)
all_glyphnames = set()
for ttFont in fonts:
all_glyphnames |= set(ttFont["glyf"].glyphs.keys())
missing = {}
available = {}
for glyphname in all_glyphnames:
missing[glyphname] = []
available[glyphname] = []
failed = False
for ttFont in fonts:
fontname = ttFont.reader.file.name
these_ones = set(ttFont["glyf"].glyphs.keys())
for glyphname in all_glyphnames:
if glyphname not in these_ones:
failed = True
missing[glyphname].append(fontname)
else:
available[glyphname].append(fontname)
for gn in sorted(missing.keys()):
if missing[gn]:
available_styles = [style(k) for k in available[gn]]
missing_styles = [style(k) for k in missing[gn]]
if None not in available_styles + missing_styles:
# if possible, use stylenames in the log messages.
avail = ', '.join(sorted(vailable_styles))
miss = ', '.join(sorted(missing_styles))
else:
# otherwise, print filenames:
avail = ', '.join(sorted(available[gn]))
miss = ', '.join(sorted(missing[gn]))
yield FAIL,\
Message("missing-glyph",
f"Glyphname '{gn}' is defined on {avail}"
f" but is missing on {miss}.")
if not failed:
yield PASS, "All font files have identical glyph names."
@check(
id = 'com.google.fonts/check/fstype',
rationale = """
The fsType in the OS/2 table is a legacy DRM-related field. Fonts in the Google Fonts collection must have it set to zero (also known as "Installable Embedding"). This setting indicates that the fonts can be embedded in documents and permanently installed by applications on remote systems.
More detailed info is available at:
https://docs.microsoft.com/en-us/typography/opentype/spec/os2#fstype
"""
)
def com_google_fonts_check_fstype(ttFont):
"""Checking OS/2 fsType does not impose restrictions."""
value = ttFont['OS/2'].fsType
if value != 0:
FSTYPE_RESTRICTIONS = {
0x0002: ("* The font must not be modified, embedded or exchanged in"
" any manner without first obtaining permission of"
" the legal owner."),
0x0004: ("The font may be embedded, and temporarily loaded on the"
" remote system, but documents that use it must"
" not be editable."),
0x0008: ("The font may be embedded but must only be installed"
" temporarily on other systems."),
0x0100: ("The font may not be subsetted prior to embedding."),
0x0200: ("Only bitmaps contained in the font may be embedded."
" No outline data may be embedded.")
}
restrictions = ""
for bit_mask in FSTYPE_RESTRICTIONS.keys():
if value & bit_mask:
restrictions += FSTYPE_RESTRICTIONS[bit_mask]
if value & 0b1111110011110001:
restrictions += ("* There are reserved bits set,"
" which indicates an invalid setting.")
yield FAIL,\
Message("drm",
f"In this font fsType is set to {value} meaning that:\n"
f"{restrictions}\n"
f"\n"
f"No such DRM restrictions can be enabled on the"
f" Google Fonts collection, so the fsType field"
f" must be set to zero (Installable Embedding) instead.")
else:
yield PASS, "OS/2 fsType is properly set to zero."
@check(
id = 'com.google.fonts/check/vendor_id',
conditions = ['registered_vendor_ids'],
rationale = """
Microsoft keeps a list of font vendors and their respective contact info. This list is updated regularly and is indexed by a 4-char "Vendor ID" which is stored in the achVendID field of the OS/2 table.
Registering your ID is not mandatory, but it is a good practice since some applications may display the type designer / type foundry contact info on some dialog and also because that info will be visible on Microsoft's website:
https://docs.microsoft.com/en-us/typography/vendors/
This check verifies whether or not a given font's vendor ID is registered in that list or if it has some of the default values used by the most common font editors.
Each new FontBakery release includes a cached copy of that list of vendor IDs. If you registered recently, you're safe to ignore warnings emitted by this check, since your ID will soon be included in one of our upcoming releases.
"""
)
def com_google_fonts_check_vendor_id(ttFont, registered_vendor_ids):
"""Checking OS/2 achVendID."""
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE = (
"If you registered it recently, then it's safe to ignore this warning message."
" Otherwise, you should set it to your own unique 4 character code,"
" and register it with Microsoft at"
" https://www.microsoft.com/typography/links/vendorlist.aspx\n")
vid = ttFont['OS/2'].achVendID
bad_vids = ['UKWN', 'ukwn', 'PfEd']
if vid is None:
yield WARN,\
Message("not-set",
f"OS/2 VendorID is not set."
f" {SUGGEST_MICROSOFT_VENDORLIST_WEBSITE}")
elif vid in bad_vids:
yield WARN,\
Message("bad",
f"OS/2 VendorID is '{vid}', a font editor default."
f" {SUGGEST_MICROSOFT_VENDORLIST_WEBSITE}")
elif vid not in registered_vendor_ids.keys():
yield WARN,\
Message("unknown",
f"OS/2 VendorID value '{vid}' is not yet recognized."
f" {SUGGEST_MICROSOFT_VENDORLIST_WEBSITE}")
else:
yield PASS, f"OS/2 VendorID '{vid}' looks good!"
@check(
id = 'com.google.fonts/check/glyph_coverage',
rationale = """
Google Fonts expects that fonts in its collection support at least the minimal set of characters defined in the `GF-latin-core` glyph-set.
"""
)
def com_google_fonts_check_glyph_coverage(ttFont):
"""Check `Google Fonts Latin Core` glyph coverage."""
from fontbakery.utils import pretty_print_list
from fontbakery.constants import GF_latin_core
font_codepoints = set()
for table in ttFont['cmap'].tables:
if (table.platformID == PlatformID.WINDOWS and
table.platEncID == WindowsEncodingID.UNICODE_BMP):
font_codepoints.update(table.cmap.keys())
required_codepoints = set(GF_latin_core.keys())
diff = required_codepoints - font_codepoints
if bool(diff):
missing = ['0x%04X (%s)' % (c, GF_latin_core[c][1]) for c in sorted(diff)]
yield FAIL,\
Message("missing-codepoints",
f"Missing required codepoints:"
f" {pretty_print_list(missing, shorten=4)}")
if len(missing) > 4:
missing_list = "\n\t\t".join(missing)
yield INFO,\
Message("missing-codepoints-verbose",
f"Here's the full list of required codepoints"
f" still missing:\n\t\t{missing_list}")
else:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/name/unwanted_chars'
)
def com_google_fonts_check_name_unwanted_chars(ttFont):
"""Substitute copyright, registered and trademark
symbols in name table entries."""
failed = False
replacement_map = [("\u00a9", '(c)'),
("\u00ae", '(r)'),
("\u2122", '(tm)')]
for name in ttFont['name'].names:
string = str(name.string, encoding=name.getEncoding())
for mark, ascii_repl in replacement_map:
new_string = string.replace(mark, ascii_repl)
if string != new_string:
yield FAIL,\
Message("unwanted-chars",
f"NAMEID #{name.nameID} contains symbols that"
f" should be replaced by '{ascii_repl}'.")
failed = True
if not failed:
yield PASS, ("No need to substitute copyright, registered and"
" trademark symbols in name table entries of this font.")
@check(
id = 'com.google.fonts/check/usweightclass',
conditions=['expected_style'],
rationale = """
Google Fonts expects variable fonts, static ttfs and static otfs to have differing OS/2 usWeightClass values.
For Variable Fonts, Thin-Black must be 100-900
For static ttfs, Thin-Black can be 100-900 or 250-900
For static otfs, Thin-Black must be 250-900
If static otfs are set lower than 250, text may appear blurry in legacy Windows applications.
Glyphsapp users can change the usWeightClass value of an instance by adding a 'weightClass' customParameter.
"""
)
def com_google_fonts_check_usweightclass(ttFont, expected_style):
"""Checking OS/2 usWeightClass."""
from fontbakery.profiles.shared_conditions import (
is_ttf,
is_cff,
is_variable_font
)
failed = False
expected_value = expected_style.usWeightClass
weight_name = expected_style.name
value = ttFont['OS/2'].usWeightClass
has_expected_value = value == expected_value
fail_message = \
"OS/2 usWeightClass is '{}' when it should be '{}'."
if is_variable_font(ttFont):
if not has_expected_value:
failed = True
yield FAIL,\
Message("bad-value",
fail_message.format(value, expected_value))
# overrides for static Thin and ExtaLight fonts
# for static ttfs, we don't mind if Thin is 250 and ExtraLight is 275.
# However, if the values are incorrect we will recommend they set Thin
# to 100 and ExtraLight to 250.
# for static otfs, Thin must be 250 and ExtraLight must be 275
elif "Thin" in weight_name:
if is_ttf(ttFont) and value not in [100, 250]:
failed = True
yield FAIL,\
Message("bad-value",
fail_message.format(value, expected_value))
if is_cff(ttFont) and value != 250:
failed = True
yield FAIL,\
Message("bad-value",
fail_message.format(value, 250))
elif "ExtraLight" in weight_name:
if is_ttf(ttFont) and value not in [200, 275]:
failed = True
yield FAIL,\
Message("bad-value",
fail_message.format(value, expected_value))
if is_cff(ttFont) and value != 275:
failed = True
yield FAIL,\
Message("bad-value",
fail_message.format(value, 275))
elif not has_expected_value:
failed = True
yield FAIL,\
Message("bad-value",
fail_message.format(value, expected_value))
if not failed:
yield PASS, "OS/2 usWeightClass is good"
@check(
id = 'com.google.fonts/check/family/has_license',
conditions=['gfonts_repo_structure'],
)
def com_google_fonts_check_family_has_license(licenses):
"""Check font has a license."""
from fontbakery.utils import pretty_print_list
if len(licenses) > 1:
filenames = [os.path.basename(f) for f in licenses]
yield FAIL,\
Message("multiple",
f"More than a single license file found:"
f" {pretty_print_list(filenames)}")
elif not licenses:
yield FAIL,\
Message("no-license",
"No license file was found."
" Please add an OFL.txt or a LICENSE.txt file."
" If you are running fontbakery on a Google Fonts"
" upstream repo, which is fine, just make sure"
" there is a temporary license file in the same folder.")
else:
yield PASS, f"Found license at '{licenses[0]}'"
@check(
id = 'com.google.fonts/check/license/OFL_copyright',
conditions = ['license_contents'],
rationale = """
An OFL.txt file's first line should be the font copyright e.g:
"Copyright 2019 The Montserrat Project Authors (https://github.com/julietaula/montserrat)"
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2764'
}
)
def com_google_fonts_check_license_OFL_copyright(license_contents):
"""Check license file has good copyright string."""
import re
string = license_contents.strip().split('\n')[0].lower()
does_match = re.search(r'copyright [0-9]{4}(\-[0-9]{4})? the .* project authors \([^\@]*\)', string)
if does_match:
yield PASS, "looks good"
else:
yield FAIL, (f'First line in license file does not match expected format:'
f' "{string}"')
@check(
id = 'com.google.fonts/check/name/license',
conditions = ['license'],
rationale = """
A known licensing description must be provided in the NameID 14 (LICENSE DESCRIPTION) entries of the name table.
The source of truth for this check (to determine which license is in use) is a file placed side-by-side to your font project including the licensing terms.
Depending on the chosen license, one of the following string snippets is expected to be found on the NameID 13 (LICENSE DESCRIPTION) entries of the name table:
- "This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is available with a FAQ at: https://scripts.sil.org/OFL"
- "Licensed under the Apache License, Version 2.0"
- "Licensed under the Ubuntu Font Licence 1.0."
Currently accepted licenses are Apache or Open Font License.
For a small set of legacy families the Ubuntu Font License may be acceptable as well.
When in doubt, please choose OFL for new font projects.
"""
)
def com_google_fonts_check_name_license(ttFont, license):
"""Check copyright namerecords match license file."""
from fontbakery.constants import PLACEHOLDER_LICENSING_TEXT
failed = False
http_warn = False
placeholder = PLACEHOLDER_LICENSING_TEXT[license]
entry_found = False
for i, nameRecord in enumerate(ttFont["name"].names):
if nameRecord.nameID == NameID.LICENSE_DESCRIPTION:
entry_found = True
value = nameRecord.toUnicode()
if "http://" in value:
yield WARN,\
Message("http-in-description",
f'Please consider using HTTPS URLs at'
f' name table entry [plat={nameRecord.platformID},'
f' enc={nameRecord.platEncID},'
f' name={nameRecord.nameID}]')
value = "https://".join(value.split("http://"))
http_warn = True
if value != placeholder:
failed = True
yield FAIL,\
Message("wrong", \
f'License file {license} exists but'
f' NameID {NameID.LICENSE_DESCRIPTION}'
f' (LICENSE DESCRIPTION) value on platform'
f' {nameRecord.platformID}'
f' ({PlatformID(nameRecord.platformID).name})'
f' is not specified for that.'
f' Value was: "{value}"'
f' Must be changed to "{placeholder}"')
if http_warn:
yield WARN,\
Message("http",
"For now we're still accepting http URLs,"
" but you should consider using https instead.\n")
if not entry_found:
yield FAIL,\
Message("missing", \
f"Font lacks NameID {NameID.LICENSE_DESCRIPTION}"
f" (LICENSE DESCRIPTION). A proper licensing"
f" entry must be set.")
elif not failed:
yield PASS, "Licensing entry on name table is correctly set."
@check(
id = 'com.google.fonts/check/name/license_url',
rationale = """
A known license URL must be provided in the NameID 14 (LICENSE INFO URL) entry of the name table.
The source of truth for this check is the licensing text found on the NameID 13 entry (LICENSE DESCRIPTION).
The string snippets used for detecting licensing terms are:
- "This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is available with a FAQ at: https://scripts.sil.org/OFL"
- "Licensed under the Apache License, Version 2.0"
- "Licensed under the Ubuntu Font Licence 1.0."
Currently accepted licenses are Apache or Open Font License.
For a small set of legacy families the Ubuntu Font License may be acceptable as well.
When in doubt, please choose OFL for new font projects.
""",
conditions = ['familyname']
)
def com_google_fonts_check_name_license_url(ttFont, familyname):
"""License URL matches License text on name table?"""
from fontbakery.constants import PLACEHOLDER_LICENSING_TEXT
LEGACY_UFL_FAMILIES = ["Ubuntu", "UbuntuCondensed", "UbuntuMono"]
LICENSE_URL = {
'OFL.txt': 'https://scripts.sil.org/OFL',
'LICENSE.txt': 'https://www.apache.org/licenses/LICENSE-2.0',
'UFL.txt': 'https://www.ubuntu.com/legal/terms-and-policies/font-licence'
}
LICENSE_NAME = {
'OFL.txt': 'Open Font',
'LICENSE.txt': 'Apache',
'UFL.txt': 'Ubuntu Font License'
}
detected_license = False
http_warn = False
for license in ['OFL.txt', 'LICENSE.txt', 'UFL.txt']:
placeholder = PLACEHOLDER_LICENSING_TEXT[license]
for nameRecord in ttFont['name'].names:
string = nameRecord.string.decode(nameRecord.getEncoding())
if nameRecord.nameID == NameID.LICENSE_DESCRIPTION:
if "http://" in string:
yield WARN,\
Message("http-in-description",
f'Please consider using HTTPS URLs at'
f' name table entry [plat={nameRecord.platformID},'
f' enc={nameRecord.platEncID},'
f' name={nameRecord.nameID}]')
string = "https://".join(string.split("http://"))
http_warn = True
if string == placeholder:
detected_license = license
break
if detected_license == "UFL.txt" and familyname not in LEGACY_UFL_FAMILIES:
yield FAIL,\
Message("ufl",
"The Ubuntu Font License is only acceptable on"
" the Google Fonts collection for legacy font"
" families that already adopted such license."
" New Families should use eigther Apache or"
" Open Font License.")
else:
found_good_entry = False
if not detected_license:
yield SKIP, ("Could not infer the font license."
" Please ensure NameID 13 (LICENSE DESCRIPTION) is properly set.")
return
else:
failed = False
expected = LICENSE_URL[detected_license]
for nameRecord in ttFont['name'].names:
if nameRecord.nameID == NameID.LICENSE_INFO_URL:
string = nameRecord.string.decode(nameRecord.getEncoding())
if "http://" in string:
yield WARN,\
Message("http-in-license-info",
f'Please consider using HTTPS URLs at'
f' name table entry [plat={nameRecord.platformID},'
f' enc={nameRecord.platEncID},'
f' name={nameRecord.nameID}]')
string = "https://".join(string.split("http://"))
if string == expected:
found_good_entry = True
else:
failed = True
yield FAIL,\
Message("licensing-inconsistency",
f"Licensing inconsistency in name table entries!"
f" NameID={NameID.LICENSE_DESCRIPTION}"
f" (LICENSE DESCRIPTION) indicates"
f" {LICENSE_NAME[detected_license]} licensing,"
f" but NameID={NameID.LICENSE_INFO_URL}"
f" (LICENSE URL) has '{string}'."
f" Expected: '{expected}'")
if http_warn:
yield WARN,\
Message("http",
"For now we're still accepting http URLs,"
" but you should consider using https instead.\n")
if not found_good_entry:
yield FAIL,\
Message("no-license-found",
f"A known license URL must be provided in"
f" the NameID {NameID.LICENSE_INFO_URL}"
f" (LICENSE INFO URL) entry."
f" Currently accepted licenses are"
f" Apache: '{LICENSE_URL['LICENSE.txt']}'"
f" or Open Font License: '{LICENSE_URL['OFL.txt']}'"
f"\n"
f"For a small set of legacy families the Ubuntu"
f" Font License '{LICENSE_URL['UFL.txt']}' may be"
f" acceptable as well."
f"\n"
f"When in doubt, please choose OFL for"
f" new font projects.")
else:
if failed:
yield FAIL,\
Message("bad-entries",
f"Even though a valid license URL was seen in the"
f" name table, there were also bad entries."
f" Please review NameIDs {NameID.LICENSE_DESCRIPTION}"
f" (LICENSE DESCRIPTION) and {NameID.LICENSE_INFO_URL}"
f" (LICENSE INFO URL).")
else:
yield PASS, "Font has a valid license URL in NAME table."
@check(
id = 'com.google.fonts/check/name/description_max_length',
rationale = """
An old FontLab version had a bug which caused it to store copyright notices in nameID 10 entries.
In order to detect those and distinguish them from actual legitimate usage of this name table entry, we expect that such strings do not exceed a reasonable length of 200 chars.
Longer strings are likely instances of the FontLab bug.
"""
)
def com_google_fonts_check_name_description_max_length(ttFont):
"""Description strings in the name table must not exceed 200 characters."""
failed = False
for name in ttFont['name'].names:
if (name.nameID == NameID.DESCRIPTION and
len(name.string.decode(name.getEncoding())) > 200):
failed = True
break
if failed:
yield WARN,\
Message("too-long",
f"A few name table entries with ID={NameID.DESCRIPTION}"
f" (NameID.DESCRIPTION) are longer than 200 characters."
f" Please check whether those entries are copyright"
f" notices mistakenly stored in the description"
f" string entries by a bug in an old FontLab version."
f" If that's the case, then such copyright notices"
f" must be removed from these entries.")
else:
yield PASS, "All description name records have reasonably small lengths."
@check(
id = 'com.google.fonts/check/hinting_impact',
conditions = ['hinting_stats'],
rationale = """
This check is merely informative, displaying and useful comparison of filesizes of hinted versus unhinted font files.
"""
)
def com_google_fonts_check_hinting_impact(font, hinting_stats):
"""Show hinting filesize impact."""
hinted = hinting_stats["hinted_size"]
dehinted = hinting_stats["dehinted_size"]
increase = hinted - dehinted
change = (float(hinted)/dehinted - 1) * 100
def filesize_formatting(s):
if s < 1024:
return f"{s} bytes"
elif s < 1024*1024:
return "{:.1f}kb".format(s/1024)
else:
return "{:.1f}Mb".format(s/(1024*1024))
hinted_size = filesize_formatting(hinted)
dehinted_size = filesize_formatting(dehinted)
increase = filesize_formatting(increase)
yield INFO,\
Message("size-impact",
f"Hinting filesize impact:\n"
f"\n"
f"\t| | {font} |\n"
f"\t|:--- | ---:|\n"
f"\t| Dehinted Size | {dehinted_size} |\n"
f"\t| Hinted Size | {hinted_size} |\n"
f"\t| Increase | {increase} |\n"
f"\t| Change | {change:.1f} % |\n")
@check(
id = 'com.google.fonts/check/name/version_format'
)
def com_google_fonts_check_name_version_format(ttFont):
"""Version format is correct in 'name' table?"""
from fontbakery.utils import get_name_entry_strings
import re
def is_valid_version_format(value):
return re.match(r'Version\s0*[1-9][0-9]*\.\d+', value)
failed = False
version_entries = get_name_entry_strings(ttFont, NameID.VERSION_STRING)
if len(version_entries) == 0:
failed = True
yield FAIL,\
Message("no-version-string",
f"Font lacks a NameID.VERSION_STRING"
f" (nameID={NameID.VERSION_STRING}) entry")
for ventry in version_entries:
if not is_valid_version_format(ventry):
failed = True
yield FAIL,\
Message("bad-version-strings",
f'The NameID.VERSION_STRING'
f' (nameID={NameID.VERSION_STRING}) value must'
f' follow the pattern "Version X.Y" with X.Y'
f' greater than or equal to 1.000.'
f' Current version string is: "{ventry}"')
if not failed:
yield PASS, "Version format in NAME table entries is correct."
@check(
id = 'com.google.fonts/check/has_ttfautohint_params',
)
def com_google_fonts_check_has_ttfautohint_params(ttFont):
"""Font has ttfautohint params?"""
from fontbakery.utils import get_name_entry_strings
def ttfautohint_version(value):
# example string:
#'Version 1.000; ttfautohint (v0.93) -l 8 -r 50 -G 200 -x 14 -w "G"
import re
results = re.search(r'ttfautohint \(v(.*)\) ([^;]*)', value)
if results:
return results.group(1), results.group(2)
version_strings = get_name_entry_strings(ttFont, NameID.VERSION_STRING)
failed = True
for vstring in version_strings:
values = ttfautohint_version(vstring)
if values:
ttfa_version, params = values
if params:
yield PASS,\
Message("ok",
f"Font has ttfautohint params ({params})")
failed = False
else:
yield SKIP,\
Message("not-hinted",
"Font appears to our heuristic as"
" not hinted using ttfautohint.")
failed = False
if failed:
yield FAIL,\
Message("lacks-ttfa-params",
"Font is lacking ttfautohint params on its"
" version strings on the name table.")
@check(
id = 'com.google.fonts/check/old_ttfautohint',
conditions = ['is_ttf'],
rationale = """
This check finds which version of ttfautohint was used, by inspecting name table entries and then finds which version of ttfautohint is currently installed in the system.
"""
)
def com_google_fonts_check_old_ttfautohint(ttFont, hinting_stats):
"""Font has old ttfautohint applied?"""
from fontbakery.utils import get_name_entry_strings
def ttfautohint_version(values):
import re
for value in values:
results = re.search(r'ttfautohint \(v(.*)\)', value)
if results:
return results.group(1)
def installed_version_is_newer(installed, used):
# development versions may include a git commit hash
# for now we will simply ignore it:
installed = installed.split("-")[0]
used = used.split("-")[0]
installed = list(map(int, installed.split(".")))
used = list(map(int, used.split(".")))
return installed > used
if not hinting_stats:
yield ERROR,\
Message("not-available",
"ttfautohint is not available.")
return
version_strings = get_name_entry_strings(ttFont, NameID.VERSION_STRING)
ttfa_version = ttfautohint_version(version_strings)
if len(version_strings) == 0:
yield FAIL,\
Message("lacks-version-strings",
"This font file lacks mandatory "
"version strings in its name table.")
elif ttfa_version is None:
yield INFO,\
Message("version-not-detected",
f"Could not detect which version of"
f" ttfautohint was used in this font."
f" It is typically specified as a comment"
f" in the font version entries of the 'name' table."
f" Such font version strings are currently:"
f" {version_strings}")
else:
installed_ttfa = hinting_stats["version"]
try:
if installed_version_is_newer(installed_ttfa,
ttfa_version):
yield WARN,\
Message("old-ttfa",
f"ttfautohint used in font = {ttfa_version};"
f" installed = {installed_ttfa};"
f" Need to re-run with the newer version!")
else:
yield PASS, (f"ttfautohint available in the system ({installed_ttfa})"
f" is older than the one used in the font"
f" ({ttfa_version}).")
except ValueError:
yield FAIL,\
Message("parse-error",
f"Failed to parse ttfautohint version values:"
f" installed = '{installed_ttfa}';"
f" used_in_font = '{ttfa_version}'")
@check(
id = 'com.google.fonts/check/epar',
rationale = """
The EPAR table is/was a way of expressing common licensing permissions and restrictions in metadata; while almost nothing supported it, Dave Crossland wonders that adding it to everything in Google Fonts could help make it more popular.
More info is available at:
https://davelab6.github.io/epar/
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/226'
}
)
def com_google_fonts_check_epar(ttFont):
"""EPAR table present in font?"""
if "EPAR" not in ttFont:
yield INFO,\
Message("lacks-EPAR",
"EPAR table not present in font. To learn more see"
" https://github.com/googlefonts/fontbakery/issues/818")
else:
yield PASS, "EPAR table present in font."
@check(
id = 'com.google.fonts/check/gasp',
conditions = ['is_ttf'],
rationale = """
Traditionally version 0 'gasp' tables were set so that font sizes below 8 ppem had no grid fitting but did have antialiasing. From 9-16 ppem, just grid fitting. And fonts above 17ppem had both antialiasing and grid fitting toggled on. The use of accelerated graphics cards and higher resolution screens make this approach obsolete. Microsoft's DirectWrite pushed this even further with much improved rendering built into the OS and apps.
In this scenario it makes sense to simply toggle all 4 flags ON for all font sizes.
"""
)
def com_google_fonts_check_gasp(ttFont):
"""Is the Grid-fitting and Scan-conversion Procedure ('gasp') table
set to optimize rendering?"""
NON_HINTING_MESSAGE = ("If you are dealing with an unhinted font,"
" it can be fixed by running the fonts through"
" the command 'gftools fix-nonhinting'\n"
"GFTools is available at"
" https://pypi.org/project/gftools/")
if "gasp" not in ttFont.keys():
yield FAIL,\
Message("lacks-gasp",
"Font is missing the 'gasp' table."
" Try exporting the font with autohinting enabled.\n" + \
NON_HINTING_MESSAGE)
else:
if not isinstance(ttFont["gasp"].gaspRange, dict):
yield FAIL,\
Message("empty",
"The 'gasp' table has no values.\n" + \
NON_HINTING_MESSAGE)
else:
failed = False
if 0xFFFF not in ttFont["gasp"].gaspRange:
yield WARN,\
Message("lacks-ffff-range",
"The 'gasp' table does not have an entry"
" that applies for all font sizes."
" The gaspRange value for such entry should"
" be set to 0xFFFF.")
else:
gasp_meaning = {
0x01: "- Use grid-fitting",
0x02: "- Use grayscale rendering",
0x04: "- Use gridfitting with ClearType symmetric smoothing",
0x08: "- Use smoothing along multiple axes with ClearType®"
}
table = []
for key in ttFont["gasp"].gaspRange.keys():
value = ttFont["gasp"].gaspRange[key]
meaning = []
for flag, info in gasp_meaning.items():
if value & flag:
meaning.append(info)
meaning = "\n\t".join(meaning)
table.append(f"PPM <= {key}:\n\tflag = 0x{value:02X}\n\t{meaning}")
table = "\n".join(table)
yield INFO,\
Message("ranges",
f"These are the ppm ranges declared on"
f" the gasp table:\n\n{table}\n")
for key in ttFont["gasp"].gaspRange.keys():
if key != 0xFFFF:
yield WARN,\
Message("non-ffff-range",
f"The gasp table has a range of {key}"
f" that may be unneccessary.")
failed = True
else:
value = ttFont["gasp"].gaspRange[0xFFFF]
if value != 0x0F:
failed = True
yield WARN,\
Message("unset-flags",
f"The gasp range 0xFFFF value 0x{value:02X}"
f" should be set to 0x0F.")
if not failed:
yield PASS, ("The 'gasp' table is correctly set, with one "
"gaspRange:value of 0xFFFF:0x0F.")
@check(
id = 'com.google.fonts/check/name/familyname_first_char',
rationale = """
Font family names which start with a numeral are often not discoverable in Windows applications.
"""
)
def com_google_fonts_check_name_familyname_first_char(ttFont):
"""Make sure family name does not begin with a digit."""
from fontbakery.utils import get_name_entry_strings
failed = False
for familyname in get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME):
digits = map(str, range(0, 10))
if familyname[0] in digits:
yield FAIL,\
Message("begins-with-digit",
f"Font family name '{familyname}' begins with a digit!")
failed = True
if failed is False:
yield PASS, "Font family name first character is not a digit."
@check(
id = 'com.google.fonts/check/name/ascii_only_entries',
rationale = """
The OpenType spec requires ASCII for the POSTSCRIPT_NAME (nameID 6).
For COPYRIGHT_NOTICE (nameID 0) ASCII is required because that string should be the same in CFF fonts which also have this requirement in the OpenType spec.
Note:
A common place where we find non-ASCII strings is on name table entries with NameID > 18, which are expressly for localising the ASCII-only IDs into Hindi / Arabic / etc.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1663'
}
)
def com_google_fonts_check_name_ascii_only_entries(ttFont):
"""Are there non-ASCII characters in ASCII-only NAME table entries?"""
bad_entries = []
for name in ttFont["name"].names:
if name.nameID == NameID.COPYRIGHT_NOTICE or \
name.nameID == NameID.POSTSCRIPT_NAME:
string = name.string.decode(name.getEncoding())
try:
string.encode('ascii')
except:
bad_entries.append(name)
badstring = string.encode("ascii",
errors='xmlcharrefreplace')
yield FAIL,\
Message("bad-string",
(f"Bad string at"
f" [nameID {name.nameID}, '{name.getEncoding()}']:"
f" '{badstring}'"))
if len(bad_entries) > 0:
yield FAIL,\
Message("non-ascii-strings",
(f"There are {len(bad_entries)} strings containing"
" non-ASCII characters in the ASCII-only"
" NAME table entries."))
else:
yield PASS, ("None of the ASCII-only NAME table entries"
" contain non-ASCII characteres.")
@check(
id = 'com.google.fonts/check/metadata/listed_on_gfonts',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_listed_on_gfonts(listed_on_gfonts_api):
"""METADATA.pb: Fontfamily is listed on Google Fonts API?"""
if not listed_on_gfonts_api:
yield WARN,\
Message("not-found",
"Family not found via Google Fonts API.")
else:
yield PASS, "Font is properly listed via Google Fonts API."
# Temporarily disabled as requested at
# https://github.com/googlefonts/fontbakery/issues/1728
@disable
@check(
id = 'com.google.fonts/check/metadata/profiles_csv',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_profiles_csv(family_metadata):
"""METADATA.pb: Designer exists in Google Fonts profiles.csv?"""
PROFILES_GIT_URL = ("https://github.com/google/"
"fonts/blob/master/designers/profiles.csv")
PROFILES_RAW_URL = ("https://raw.githubusercontent.com/google/"
"fonts/master/designers/profiles.csv")
if family_metadata.designer == "":
yield FAIL,\
Message("empty",
'METADATA.pb field "designer" MUST NOT be empty!')
elif family_metadata.designer == "Multiple Designers":
yield SKIP,\
Message("multiple",
'Found "Multiple Designers" at METADATA.pb, which'
' is OK, so we won\'t look for it at profiles.csv')
else:
from urllib import request
import csv
try:
handle = request.urlopen(PROFILES_RAW_URL)
designers = []
for row in csv.reader(handle):
if not row:
continue
designers.append(row[0].decode("utf-8"))
if family_metadata.designer not in designers:
yield WARN,\
Message("not-listed",
f'METADATA.pb:'
f' Designer "{family_metadata.designer}" is'
f' not listed in CSV file at {PROFILES_GIT_URL}')
else:
yield PASS, ('Found designer "{family_metadata.designer}"'
' at profiles.csv')
except:
yield WARN,\
Message("csv-not-fetched",
f'Could not fetch "{PROFILES_RAW_URL}"')
@check(
id = 'com.google.fonts/check/metadata/unique_full_name_values',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_unique_full_name_values(family_metadata):
"""METADATA.pb: check if fonts field only has
unique "full_name" values.
"""
fonts = {}
for f in family_metadata.fonts:
fonts[f.full_name] = f
if len(set(fonts.keys())) != len(family_metadata.fonts):
yield FAIL,\
Message("duplicated",
'Found duplicated "full_name" values'
' in METADATA.pb fonts field.')
else:
yield PASS, ('METADATA.pb "fonts" field only has'
' unique "full_name" values.')
@check(
id = 'com.google.fonts/check/metadata/unique_weight_style_pairs',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_unique_weight_style_pairs(family_metadata):
"""METADATA.pb: check if fonts field
only contains unique style:weight pairs.
"""
pairs = {}
for f in family_metadata.fonts:
styleweight = f"{f.style}:{f.weight}"
pairs[styleweight] = 1
if len(set(pairs.keys())) != len(family_metadata.fonts):
yield FAIL,\
Message("duplicated",
"Found duplicated style:weight pair"
" in METADATA.pb fonts field.")
else:
yield PASS, ("METADATA.pb \"fonts\" field only has"
" unique style:weight pairs.")
@check(
id = 'com.google.fonts/check/metadata/license',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_license(family_metadata):
"""METADATA.pb license is "APACHE2", "UFL" or "OFL"?"""
expected_licenses = ["APACHE2", "OFL", "UFL"]
if family_metadata.license in expected_licenses:
yield PASS, (f'Font license is declared in METADATA.pb'
f' as "{family_metadata.license}"')
else:
yield FAIL,\
Message("bad-license",
f'METADATA.pb license field ("{family_metadata.license}")'
f' must be one of the following: {expected_licenses}')
@check(
id = 'com.google.fonts/check/metadata/menu_and_latin',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_menu_and_latin(family_metadata):
"""METADATA.pb should contain at least "menu" and "latin" subsets."""
missing = []
for s in ["menu", "latin"]:
if s not in list(family_metadata.subsets):
missing.append(s)
if missing != []:
if len(missing) == 2:
missing = "both"
else:
missing = f'"{missing[0]}"'
yield FAIL,\
Message("missing",
f'Subsets "menu" and "latin" are mandatory,'
f' but METADATA.pb is missing {missing}.')
else:
yield PASS, 'METADATA.pb contains "menu" and "latin" subsets.'
@check(
id = 'com.google.fonts/check/metadata/subsets_order',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_subsets_order(family_metadata):
"""METADATA.pb subsets should be alphabetically ordered."""
expected = list(sorted(family_metadata.subsets))
if list(family_metadata.subsets) != expected:
yield FAIL,\
Message("not-sorted",
("METADATA.pb subsets are not sorted "
"in alphabetical order: Got ['{}']"
" and expected ['{}']"
"").format("', '".join(family_metadata.subsets),
"', '".join(expected)))
else:
yield PASS, "METADATA.pb subsets are sorted in alphabetical order."
@check(
id = 'com.google.fonts/check/metadata/includes_production_subsets',
conditions = ['family_metadata',
'production_metadata',
'listed_on_gfonts_api'],
rationale = """
Check METADATA.pb file includes the same subsets as the family in production.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2989'
}
)
def com_google_fonts_check_metadata_includes_production_subsets(family_metadata, production_metadata):
"""Check METADATA.pb includes production subsets."""
prod_families_metadata = {i['family']: i for i in production_metadata["familyMetadataList"]}
prod_family_metadata = prod_families_metadata[family_metadata.name]
prod_subsets = set(prod_family_metadata["subsets"])
local_subsets = set(family_metadata.subsets)
missing_subsets = prod_subsets - local_subsets
if len(missing_subsets) > 0:
yield FAIL,\
Message("missing-subsets",
f"The following subsets are missing [{', '.join(sorted(missing_subsets))}]")
else:
yield PASS, "No missing subsets"
@check(
id = 'com.google.fonts/check/metadata/copyright',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_copyright(family_metadata):
"""METADATA.pb: Copyright notice is the same in all fonts?"""
copyright = None
fail = False
for f in family_metadata.fonts:
if copyright and f.copyright != copyright:
fail = True
copyright = f.copyright
if fail:
yield FAIL,\
Message("inconsistency",
"METADATA.pb: Copyright field value"
" is inconsistent across family")
else:
yield PASS, "Copyright is consistent across family"
@check(
id = 'com.google.fonts/check/metadata/familyname',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_familyname(family_metadata):
"""Check that METADATA.pb family values are all the same."""
name = ""
fail = False
for f in family_metadata.fonts:
if name and f.name != name:
fail = True
name = f.name
if fail:
yield FAIL,\
Message("inconsistency",
'METADATA.pb: Family name is not the same'
' in all metadata "fonts" items.')
else:
yield PASS, ('METADATA.pb: Family name is the same'
' in all metadata "fonts" items.')
@check(
id = 'com.google.fonts/check/metadata/has_regular',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_has_regular(family_metadata):
"""METADATA.pb: According Google Fonts standards,
families should have a Regular style.
"""
from .googlefonts_conditions import has_regular_style
if has_regular_style(family_metadata):
yield PASS, "Family has a Regular style."
else:
yield FAIL,\
Message("lacks-regular",
"This family lacks a Regular"
" (style: normal and weight: 400)"
" as required by Google Fonts standards.")
@check(
id = 'com.google.fonts/check/metadata/regular_is_400',
conditions = ['family_metadata',
'has_regular_style']
)
def com_google_fonts_check_metadata_regular_is_400(family_metadata):
"""METADATA.pb: Regular should be 400."""
badfonts = []
for f in family_metadata.fonts:
if f.full_name.endswith("Regular") and f.weight != 400:
badfonts.append(f"{f.filename} (weight: {f.weight})")
if len(badfonts) > 0:
yield FAIL,\
Message("not-400",
f'METADATA.pb: Regular font weight must be 400.'
f' Please fix these: {", ".join(badfonts)}')
else:
yield PASS, "Regular has weight = 400."
@check(
id = 'com.google.fonts/check/metadata/nameid/family_name',
conditions=['font_metadata']
)
def com_google_fonts_check_metadata_nameid_family_name(ttFont, font_metadata):
"""Checks METADATA.pb font.name field matches
family name declared on the name table.
"""
from fontbakery.utils import get_name_entry_strings
familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME)
if not familynames:
familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
if len(familynames) == 0:
yield FAIL,\
Message("missing",
(f"This font lacks a FONT_FAMILY_NAME entry"
f" (nameID = {NameID.FONT_FAMILY_NAME})"
f" in the name table."))
else:
if font_metadata.name not in familynames:
yield FAIL,\
Message("mismatch",
(f'Unmatched family name in font:'
f' TTF has "{familynames[0]}" while METADATA.pb'
f' has "{font_metadata.name}"'))
else:
yield PASS, (f'Family name "{font_metadata.name}" is identical'
f' in METADATA.pb and on the TTF file.')
@check(
id = 'com.google.fonts/check/metadata/nameid/post_script_name',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_nameid_post_script_name(ttFont, font_metadata):
"""Checks METADATA.pb font.post_script_name matches
postscript name declared on the name table.
"""
failed = False
from fontbakery.utils import get_name_entry_strings
postscript_names = get_name_entry_strings(ttFont, NameID.POSTSCRIPT_NAME)
if len(postscript_names) == 0:
failed = True
yield FAIL,\
Message("missing",
(f"This font lacks a POSTSCRIPT_NAME entry"
f" (nameID = {NameID.POSTSCRIPT_NAME})"
f" in the name table."))
else:
for psname in postscript_names:
if psname != font_metadata.post_script_name:
failed = True
yield FAIL,\
Message("mismatch",
(f'Unmatched postscript name in font:'
f' TTF has "{psname}" while METADATA.pb has'
f' "{font_metadata.post_script_name}".'))
if not failed:
yield PASS, (f'Postscript name "{font_metadata.post_script_name}"'
f' is identical in METADATA.pb and on the TTF file.')
@check(
id = 'com.google.fonts/check/metadata/nameid/full_name',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_nameid_full_name(ttFont, font_metadata):
"""METADATA.pb font.full_name value matches
fullname declared on the name table?
"""
from fontbakery.utils import get_name_entry_strings
full_fontnames = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)
if len(full_fontnames) == 0:
yield FAIL,\
Message("lacks-entry",
(f"This font lacks a FULL_FONT_NAME entry"
f" (nameID = {NameID.FULL_FONT_NAME})"
f" in the name table."))
else:
for full_fontname in full_fontnames:
if full_fontname != font_metadata.full_name:
yield FAIL,\
Message("mismatch",
(f'Unmatched fullname in font:'
f' TTF has "{full_fontname}" while METADATA.pb'
f' has "{font_metadata.full_name}".'))
else:
yield PASS, (f'Font fullname "{full_fontname}" is identical'
f' in METADATA.pb and on the TTF file.')
@check(
id = 'com.google.fonts/check/metadata/nameid/font_name',
conditions=['font_metadata', 'style']
)
def com_google_fonts_check_metadata_nameid_font_name(ttFont, style, font_metadata):
"""METADATA.pb font.name value should be same as
the family name declared on the name table.
"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
font_familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
nameid = NameID.FONT_FAMILY_NAME
else:
font_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME)
nameid = NameID.TYPOGRAPHIC_FAMILY_NAME
if len(font_familynames) == 0:
yield FAIL,\
Message("lacks-entry",
f"This font lacks a {NameID(nameid).name} entry"
f" (nameID = {nameid}) in the name table.")
else:
for font_familyname in font_familynames:
if font_familyname != font_metadata.name:
yield FAIL,\
Message("mismatch",
f'Unmatched familyname in font:'
f' TTF has familyname = "{font_familyname}" while'
f' METADATA.pb has font.name = "{font_metadata.name}".')
else:
yield PASS, (f'OK: Family name "{font_metadata.name}" is identical'
f' in METADATA.pb and on the TTF file.')
@check(
id = 'com.google.fonts/check/metadata/match_fullname_postscript',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_match_fullname_postscript(font_metadata):
"""METADATA.pb font.full_name and font.post_script_name
fields have equivalent values ?
"""
import re
regex = re.compile(r"\W")
post_script_name = regex.sub("", font_metadata.post_script_name)
fullname = regex.sub("", font_metadata.full_name)
if fullname != post_script_name:
yield FAIL,\
Message("mismatch",
f'METADATA.pb font full_name = "{font_metadata.full_name}"'
f' does not match'
f' post_script_name = "{font_metadata.post_script_name}"')
else:
yield PASS, ('METADATA.pb font fields "full_name" and'
' "post_script_name" have equivalent values.')
@check(
id = 'com.google.fonts/check/metadata/match_filename_postscript',
conditions = ['font_metadata',
'not is_variable_font']
# FIXME: We'll want to review this once
# naming rules for varfonts are settled.
)
def com_google_fonts_check_metadata_match_filename_postscript(font_metadata):
"""METADATA.pb font.filename and font.post_script_name
fields have equivalent values?
"""
post_script_name = font_metadata.post_script_name
filename = os.path.splitext(font_metadata.filename)[0]
if filename != post_script_name:
yield FAIL,\
Message("mismatch",
f'METADATA.pb font filename = "{font_metadata.filename}"'
f' does not match'
f' post_script_name="{font_metadata.post_script_name}".')
else:
yield PASS, ('METADATA.pb font fields "filename" and'
' "post_script_name" have equivalent values.')
@check(
id = 'com.google.fonts/check/metadata/valid_name_values',
conditions = ['style',
'font_metadata']
)
def com_google_fonts_check_metadata_valid_name_values(style,
font_metadata,
font_familynames,
typographic_familynames):
"""METADATA.pb font.name field contains font name in right format?"""
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
familynames = font_familynames
else:
familynames = typographic_familynames
failed = False
for font_familyname in familynames:
if font_familyname not in font_metadata.name:
failed = True
yield FAIL,\
Message("mismatch",
f'METADATA.pb font.name field ("{font_metadata.name}")'
f' does not match'
f' correct font name format ("{font_familyname}").')
if not failed:
yield PASS, ("METADATA.pb font.name field contains"
" font name in right format.")
@check(
id = 'com.google.fonts/check/metadata/valid_full_name_values',
conditions = ['style',
'font_metadata']
)
def com_google_fonts_check_metadata_valid_full_name_values(style,
font_metadata,
font_familynames,
typographic_familynames):
"""METADATA.pb font.full_name field contains font name in right format?"""
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
familynames = font_familynames
if familynames == []:
yield SKIP, "No FONT_FAMILYNAME"
else:
familynames = typographic_familynames
if familynames == []:
yield SKIP, "No TYPOGRAPHIC_FAMILYNAME"
for font_familyname in familynames:
if font_familyname in font_metadata.full_name:
yield PASS, (f'METADATA.pb font.full_name field contains'
f' font name in right format.'
f' ("{font_familyname}" in "{font_metadata.full_name}")')
else:
yield FAIL,\
Message("mismatch",
f'METADATA.pb font.full_name field'
f' ("{font_metadata.full_name}")'
f' does not match correct font name format'
f' ("{font_familyname}").')
@check(
id = 'com.google.fonts/check/metadata/valid_filename_values',
conditions = ['style', # This means the font filename
# (source of truth here) is good
'family_metadata']
)
def com_google_fonts_check_metadata_valid_filename_values(font,
family_metadata):
"""METADATA.pb font.filename field contains font name in right format?"""
expected = os.path.basename(font)
failed = True
for font_metadata in family_metadata.fonts:
if font_metadata.filename == expected:
failed = False
yield PASS, ("METADATA.pb filename field contains"
" font name in right format.")
break
if failed:
yield FAIL,\
Message("bad-field",
f'None of the METADATA.pb filename fields match'
f' correct font name format ("{expected}").')
@check(
id = 'com.google.fonts/check/metadata/valid_post_script_name_values',
conditions = ['font_metadata',
'font_familynames']
)
def com_google_fonts_check_metadata_valid_post_script_name_values(font_metadata,
font_familynames):
"""METADATA.pb font.post_script_name field
contains font name in right format?
"""
for font_familyname in font_familynames:
psname = "".join(str(font_familyname).split())
if psname in "".join(font_metadata.post_script_name.split("-")):
yield PASS, ("METADATA.pb postScriptName field"
" contains font name in right format.")
else:
yield FAIL,\
Message("mismatch",
f'METADATA.pb'
f' postScriptName ("{font_metadata.post_script_name}")'
f' does not match'
f' correct font name format ("{font_familyname}").')
@check(
id = 'com.google.fonts/check/metadata/valid_copyright',
conditions = ['font_metadata'],
rationale = """
The expected pattern for the copyright string adheres to the following rules:
* It must say "Copyright" followed by a 4 digit year (optionally followed by a hyphen and another 4 digit year)
* Then it must say "The <familyname> Project Authors"
* And within parentheses, a URL for a git repository must be provided
* The check is case insensitive and does not validate whether the familyname is correct, even though we'd expect it is (and we may soon update the check to validate that aspect as well!)
Here is an example of a valid copyright string:
"Copyright 2017 The Archivo Black Project Authors (https://github.com/Omnibus-Type/ArchivoBlack)"
"""
)
def com_google_fonts_check_metadata_valid_copyright(font_metadata):
"""Copyright notices match canonical pattern in METADATA.pb"""
import re
string = font_metadata.copyright.lower()
does_match = re.search(r'copyright [0-9]{4}(\-[0-9]{4})? the .* project authors \([^\@]*\)',
string)
if does_match:
yield PASS, "METADATA.pb copyright string is good"
else:
yield FAIL,\
Message("bad-notice-format",
f'METADATA.pb: Copyright notices should match'
f' a pattern similar to:\n'
f' "Copyright 2020 The Familyname Project Authors (git url)"'
f'\n'
f'But instead we have got:\n"{string}"')
@check(
id = 'com.google.fonts/check/font_copyright',
)
def com_google_fonts_check_font_copyright(ttFont):
"""Copyright notices match canonical pattern in fonts"""
import re
from fontbakery.utils import get_name_entry_strings
failed = False
for string in get_name_entry_strings(ttFont, NameID.COPYRIGHT_NOTICE):
does_match = re.search(r'Copyright [0-9]{4}(\-[0-9]{4})? The .* Project Authors \([^\@]*\)',
string)
if does_match:
yield PASS, (f"Name Table entry: Copyright field '{string}'"
f" matches canonical pattern.")
else:
failed = True
yield FAIL,\
Message("bad-notice-format",
f'Name Table entry: Copyright notices should match'
f' a pattern similar to: "Copyright 2019'
f' The Familyname Project Authors (git url)"\n'
f'But instead we have got:\n"{string}"')
if not failed:
yield PASS, "Name table copyright entries are good"
@check(
id = 'com.google.fonts/check/metadata/reserved_font_name',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_reserved_font_name(font_metadata):
"""Copyright notice on METADATA.pb should not contain 'Reserved Font Name'."""
if "Reserved Font Name" in font_metadata.copyright:
yield WARN,\
Message("rfn",
f'METADATA.pb:'
f' copyright field ("{font_metadata.copyright}")'
f' contains "Reserved Font Name".'
f' This is an error except in a few specific rare cases.')
else:
yield PASS, ('METADATA.pb copyright field'
' does not contain "Reserved Font Name".')
@check(
id = 'com.google.fonts/check/metadata/copyright_max_length',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_copyright_max_length(font_metadata):
"""METADATA.pb: Copyright notice shouldn't exceed 500 chars."""
if len(font_metadata.copyright) > 500:
yield FAIL,\
Message("max-length",
"METADATA.pb: Copyright notice exceeds"
" maximum allowed lengh of 500 characteres.")
else:
yield PASS, "Copyright notice string is shorter than 500 chars."
@check(
id = 'com.google.fonts/check/metadata/filenames',
rationale = """
Note:
This check only looks for files in the current directory.
Font files in subdirectories are checked by these other two checks:
- com.google.fonts/check/metadata/undeclared_fonts
- com.google.fonts/check/repo/vf_has_static_fonts
We may want to merge them all into a single check.
""",
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_filenames(fonts, family_directory, family_metadata):
"""METADATA.pb: Font filenames match font.filename entries?"""
passed = True
metadata_filenames = []
font_filenames = [f for f in os.listdir(family_directory) if f[-4:] in [".ttf", ".otf"]]
for font_metadata in family_metadata.fonts:
if font_metadata.filename not in font_filenames:
passed = False
yield FAIL,\
Message("file-not-found",
f'Filename "{font_metadata.filename}" is listed on'
f' METADATA.pb but an actual font file'
f' with that name was not found.')
metadata_filenames.append(font_metadata.filename)
for font in font_filenames:
if font not in metadata_filenames:
passed = False
yield FAIL,\
Message("file-not-declared",
f'Filename "{font}" is not declared'
f' on METADATA.pb as a font.filename entry.')
if passed:
yield PASS, "Filenames in METADATA.pb look good."
@check(
id = 'com.google.fonts/check/metadata/italic_style',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_italic_style(ttFont, font_metadata):
"""METADATA.pb font.style "italic" matches font internals?"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import MacStyle
if font_metadata.style != "italic":
yield SKIP, "This check only applies to italic fonts."
else:
font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)
if len(font_fullname) == 0:
yield SKIP, "Font lacks fullname entries in name table."
# this fail scenario was already checked above
# (passing those previous checks is a prerequisite for this one)
# FIXME: Could we pack this into a condition ?
else:
# FIXME: here we only check the first name entry.
# Should we iterate over them all ? Or should we check
# if they're all the same?
font_fullname = font_fullname[0]
if not bool(ttFont["head"].macStyle & MacStyle.ITALIC):
yield FAIL,\
Message("bad-macstyle",
("METADATA.pb style has been set to italic"
" but font macStyle is improperly set."))
elif not font_fullname.split("-")[-1].endswith("Italic"):
yield FAIL,\
Message("bad-fullfont-name",
(f'Font macStyle Italic bit is set'
f' but nameID {NameID.FULL_FONT_NAME}'
f' ("{font_fullname}") is not'
f' ended with "Italic".'))
else:
yield PASS, ('OK: METADATA.pb font.style "italic"'
' matches font internals.')
@check(
id = 'com.google.fonts/check/metadata/normal_style',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_normal_style(ttFont, font_metadata):
"""METADATA.pb font.style "normal" matches font internals?"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import MacStyle
if font_metadata.style != "normal":
yield SKIP, "This check only applies to normal fonts."
# FIXME: declare a common condition called "normal_style"
else:
font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)
if len(font_familyname) == 0 or len(font_fullname) == 0:
yield SKIP, ("Font lacks familyname and/or"
" fullname entries in name table.")
# FIXME: This is the same SKIP condition as in check/metadata/italic_style
# so we definitely need to address them with a common condition!
else:
font_familyname = font_familyname[0]
font_fullname = font_fullname[0]
if bool(ttFont["head"].macStyle & MacStyle.ITALIC):
yield FAIL,\
Message("bad-macstyle",
("METADATA.pb style has been set to normal"
" but font macStyle is improperly set."))
elif font_familyname.split("-")[-1].endswith('Italic'):
yield FAIL,\
Message("familyname-italic",
(f'Font macStyle indicates a non-Italic font,'
f' but nameID {NameID.FONT_FAMILY_NAME}'
f' (FONT_FAMILY_NAME: "{font_familyname}")'
f' ends with "Italic".'))
elif font_fullname.split("-")[-1].endswith("Italic"):
yield FAIL,\
Message("fullfont-italic",
(f'Font macStyle indicates a non-Italic font,'
f' but nameID {NameID.FULL_FONT_NAME}'
f' (FULL_FONT_NAME: "{font_fullname}")'
f' ends with "Italic".'))
else:
yield PASS, ('METADATA.pb font.style "normal"'
' matches font internals.')
@check(
id = 'com.google.fonts/check/metadata/nameid/family_and_full_names',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_nameid_family_and_full_names(ttFont, font_metadata):
"""METADATA.pb font.name and font.full_name fields match
the values declared on the name table?
"""
from fontbakery.utils import get_name_entry_strings
font_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME)
if font_familynames:
font_familyname = font_familynames[0]
else:
font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)[0]
font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)[0]
# FIXME: common condition/name-id check as in the two previous checks.
if font_fullname != font_metadata.full_name:
yield FAIL,\
Message("fullname-mismatch",
(f'METADATA.pb: Fullname "{font_metadata.full_name}"'
f' does not match name table entry "{font_fullname}"!'))
elif font_familyname != font_metadata.name:
yield FAIL,\
Message("familyname-mismatch",
(f'METADATA.pb Family name "{font_metadata.name}"'
f' does not match name table entry "{font_familyname}"!'))
else:
yield PASS, ("METADATA.pb familyname and fullName fields"
" match corresponding name table entries.")
@check(
id = 'com.google.fonts/check/metadata/fontname_not_camel_cased',
conditions = ['font_metadata',
'not whitelist_camelcased_familyname']
)
def com_google_fonts_check_metadata_fontname_not_camel_cased(font_metadata):
"""METADATA.pb: Check if fontname is not camel cased."""
import re
if bool(re.match(r'([A-Z][a-z]+){2,}', font_metadata.name)):
yield FAIL,\
Message("camelcase",
f'METADATA.pb: "{font_metadata.name}" is a CamelCased name.'
f' To solve this, simply use spaces'
f' instead in the font name.')
else:
yield PASS, "Font name is not camel-cased."
@check(
id = 'com.google.fonts/check/metadata/match_name_familyname',
conditions = ['family_metadata', # that's the family-wide metadata!
'font_metadata'] # and this one's specific to a single file
)
def com_google_fonts_check_metadata_match_name_familyname(family_metadata, font_metadata):
"""METADATA.pb: Check font name is the same as family name."""
if font_metadata.name != family_metadata.name:
yield FAIL,\
Message("mismatch",
f'METADATA.pb: {font_metadata.filename}:\n'
f' Family name "{family_metadata.name}"'
f' does not match'
f' font name: "{font_metadata.name}"')
else:
yield PASS, "Font name is the same as family name."
@check(
id = 'com.google.fonts/check/metadata/canonical_weight_value',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_canonical_weight_value(font_metadata):
"""METADATA.pb: Check that font weight has a canonical value."""
first_digit = font_metadata.weight / 100
if (font_metadata.weight % 100) != 0 or \
(first_digit < 1 or first_digit > 9):
yield FAIL,\
Message("bad-weight",
f"METADATA.pb: The weight is declared"
f" as {font_metadata.weight} which is not a"
f" multiple of 100 between 100 and 900.")
else:
yield PASS, "Font weight has a canonical value."
@check(
id = 'com.google.fonts/check/metadata/os2_weightclass',
rationale = """
Check METADATA.pb font weights are correct.
For static fonts, the metadata weight should be the same as the static font's OS/2 usWeightClass.
For variable fonts, the weight value should be 400 if the font's wght axis range includes 400, otherwise it should be the value closest to 400.
""",
conditions = ['font_metadata'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2683'
}
)
def com_google_fonts_check_metadata_os2_weightclass(ttFont,
font_metadata):
"""Check METADATA.pb font weights are correct."""
from .shared_conditions import is_variable_font
# Weight name to value mapping:
GF_API_WEIGHT_NAMES = {100: "Thin",
200: "ExtraLight",
250: "Thin", # Legacy. Pre-vf epoch
275: "ExtraLight", # Legacy. Pre-vf epoch
300: "Light",
400: "Regular",
500: "Medium",
600: "SemiBold",
700: "Bold",
800: "ExtraBold",
900: "Black"}
CSS_WEIGHT_NAMES = {
100: "Thin",
200: "ExtraLight",
300: "Light",
400: "Regular",
500: "Medium",
600: "SemiBold",
700: "Bold",
800: "ExtraBold",
900: "Black"
}
if is_variable_font(ttFont):
axes = {f.axisTag: f for f in ttFont["fvar"].axes}
# if there isn't a wght axis, use the OS/2.usWeightClass
if 'wght' not in axes:
font_weight = f['OS/2'].usWeightClass
# if the wght range includes 400, use 400
else:
wght_includes_400 = axes['wght'].minValue <= 400 and axes['wght'].maxValue >= 400
if wght_includes_400:
font_weight = 400
# if 400 isn't in the wght axis range, use the value closest to 400
elif not wght_includes_400:
if abs(axes['wght'].minValue - 400) < abs(axes['wght'].maxValue - 400):
font_weight = axes['wght'].minValue
else:
font_weight = axes['wght'].maxValue
else:
font_weight = ttFont["OS/2"].usWeightClass
gf_weight = GF_API_WEIGHT_NAMES.get(font_weight,
"bad Google Fonts API weight value")
css_weight = CSS_WEIGHT_NAMES.get(font_metadata.weight,
"bad CSS weight value")
if gf_weight != css_weight:
yield FAIL,\
Message("mismatch",
f'OS/2 usWeightClass'
f' ({ttFont["OS/2"].usWeightClass}:"{gf_weight}")'
f' does not match weight specified'
f' at METADATA.pb ({font_metadata.weight}:"{css_weight}").')
else:
yield PASS, ("OS/2 usWeightClass or wght axis value matches"
" weight specified at METADATA.pb")
@check(
id = 'com.google.fonts/check/metadata/match_weight_postscript',
conditions = ['font_metadata',
'not is_variable_font']
)
def com_google_fonts_check_metadata_match_weight_postscript(font_metadata):
"""METADATA.pb weight matches postScriptName for static fonts."""
WEIGHTS = {
"Thin": 100,
"ThinItalic": 100,
"ExtraLight": 200,
"ExtraLightItalic": 200,
"Light": 300,
"LightItalic": 300,
"Regular": 400,
"Italic": 400,
"Medium": 500,
"MediumItalic": 500,
"SemiBold": 600,
"SemiBoldItalic": 600,
"Bold": 700,
"BoldItalic": 700,
"ExtraBold": 800,
"ExtraBoldItalic": 800,
"Black": 900,
"BlackItalic": 900
}
pair = []
for k, weight in WEIGHTS.items():
if weight == font_metadata.weight:
pair.append((k, weight))
if not pair:
yield FAIL, ("METADATA.pb: Font weight value ({})"
" is invalid.").format(font_metadata.weight)
elif not (font_metadata.post_script_name.endswith('-' + pair[0][0]) or
font_metadata.post_script_name.endswith('-' + pair[1][0])):
yield FAIL, ("METADATA.pb: Mismatch between postScriptName (\"{}\")"
" and weight value ({}). The name must be"
" ended with \"{}\" or \"{}\"."
"").format(font_metadata.post_script_name,
pair[0][1],
pair[0][0],
pair[1][0])
else:
yield PASS, "Weight value matches postScriptName."
@check(
id = 'com.google.fonts/check/metadata/canonical_style_names',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_canonical_style_names(ttFont, font_metadata):
"""METADATA.pb: Font styles are named canonically?"""
from fontbakery.constants import MacStyle
def find_italic_in_name_table():
for entry in ttFont["name"].names:
if entry.nameID < 256 and "italic" in entry.string.decode(entry.getEncoding()).lower():
return True
return False
def is_italic():
return (ttFont["head"].macStyle & MacStyle.ITALIC or
ttFont["post"].italicAngle or
find_italic_in_name_table())
if font_metadata.style not in ["italic", "normal"]:
yield SKIP, ('This check only applies to font styles declared'
' as "italic" or "normal" on METADATA.pb.')
else:
if is_italic() and font_metadata.style != "italic":
yield FAIL,\
Message("italic",
f'The font style is "{font_metadata.style}"'
f' but it should be "italic".')
elif not is_italic() and font_metadata.style != "normal":
yield FAIL,\
Message("normal",
f'The font style is "{font_metadata.style}"'
f' but it should be "normal".')
else:
yield PASS, "Font styles are named canonically."
@check(
id = 'com.google.fonts/check/unitsperem_strict',
rationale = """
Even though the OpenType spec allows unitsPerEm to be any value between 16 and 16384, the Google Fonts project aims at a narrower set of reasonable values.
The spec suggests usage of powers of two in order to get some performance improvements on legacy renderers, so those values are acceptable.
But values of 500 or 1000 are also acceptable, with the added benefit that it makes upm math easier for designers, while the performance hit of not using a power of two is most likely negligible nowadays.
Additionally, values above 2048 would likely result in unreasonable filesize increases.
"""
)
def com_google_fonts_check_unitsperem_strict(ttFont):
""" Stricter unitsPerEm criteria for Google Fonts. """
upm_height = ttFont["head"].unitsPerEm
ACCEPTABLE = [16, 32, 64, 128, 256, 500,
512, 1000, 1024, 2000, 2048]
if upm_height > 2048 and upm_height <= 4096:
yield WARN,\
Message("large-value",
f"Font em size (unitsPerEm) is {upm_height}"
f" which may be too large (causing filesize bloat),"
f" unless you are sure that the detail level"
f" in this font requires that much precision.")
elif upm_height not in ACCEPTABLE:
yield FAIL,\
Message("bad-value",
f"Font em size (unitsPerEm) is {upm_height}."
f" If possible, please consider using 1000."
f" Good values for unitsPerEm,"
f" though, are typically these: {ACCEPTABLE}.")
else:
yield PASS, f"Font em size is good (unitsPerEm = {upm_height})."
@check(
id = 'com.google.fonts/check/version_bump',
conditions = ['api_gfonts_ttFont',
'github_gfonts_ttFont']
)
def com_google_fonts_check_version_bump(ttFont,
api_gfonts_ttFont,
github_gfonts_ttFont):
"""Version number has increased since previous release on Google Fonts?"""
v_number = ttFont["head"].fontRevision
api_gfonts_v_number = api_gfonts_ttFont["head"].fontRevision
github_gfonts_v_number = github_gfonts_ttFont["head"].fontRevision
failed = False
if v_number == api_gfonts_v_number:
failed = True
yield FAIL, (f"Version number {v_number} is equal to"
f" version on Google Fonts.")
if v_number < api_gfonts_v_number:
failed = True
yield FAIL, (f"Version number {v_number} is less than"
f" version on Google Fonts ({api_gfonts_v_number}).")
if v_number == github_gfonts_v_number:
failed = True
yield FAIL, (f"Version number {v_number} is equal to"
f" version on Google Fonts GitHub repo.")
if v_number < github_gfonts_v_number:
failed = True
yield FAIL, (f"Version number {v_number} is less than"
f" version on Google Fonts GitHub repo ({github_gfonts_v_number}).")
if not failed:
yield PASS, (f"Version number {v_number} is greater than"
f" version on Google Fonts GitHub ({github_gfonts_v_number})"
f" and production servers ({api_gfonts_v_number}).")
@check(
id = 'com.google.fonts/check/production_glyphs_similarity',
conditions = ['api_gfonts_ttFont']
)
def com_google_fonts_check_production_glyphs_similarity(ttFont, api_gfonts_ttFont):
"""Glyphs are similiar to Google Fonts version?"""
def glyphs_surface_area(ttFont):
"""Calculate the surface area of a glyph's ink"""
from fontTools.pens.areaPen import AreaPen
glyphs = {}
glyph_set = ttFont.getGlyphSet()
area_pen = AreaPen(glyph_set)
for glyph in glyph_set.keys():
glyph_set[glyph].draw(area_pen)
area = area_pen.value
area_pen.value = 0
glyphs[glyph] = area
return glyphs
bad_glyphs = []
these_glyphs = glyphs_surface_area(ttFont)
gfonts_glyphs = glyphs_surface_area(api_gfonts_ttFont)
shared_glyphs = set(these_glyphs) & set(gfonts_glyphs)
this_upm = ttFont['head'].unitsPerEm
gfonts_upm = api_gfonts_ttFont['head'].unitsPerEm
for glyph in shared_glyphs:
# Normalize area difference against comparison's upm
this_glyph_area = (these_glyphs[glyph] / this_upm) * gfonts_upm
gfont_glyph_area = (gfonts_glyphs[glyph] / gfonts_upm) * this_upm
if abs(this_glyph_area - gfont_glyph_area) > 7000:
bad_glyphs.append(glyph)
if bad_glyphs:
yield WARN, ("Following glyphs differ greatly from"
" Google Fonts version: [{}]").format(", ".join(sorted(bad_glyphs)))
else:
yield PASS, ("Glyphs are similar in"
" comparison to the Google Fonts version.")
@check(
id = 'com.google.fonts/check/fsselection',
conditions = ['style']
)
def com_google_fonts_check_fsselection(ttFont, style):
"""Checking OS/2 fsSelection value."""
from fontbakery.utils import check_bit_entry
from fontbakery.constants import (STATIC_STYLE_NAMES,
RIBBI_STYLE_NAMES,
FsSelection)
# Checking fsSelection REGULAR bit:
expected = "Regular" in style or \
(style in STATIC_STYLE_NAMES and
style not in RIBBI_STYLE_NAMES and
"Italic" not in style)
yield check_bit_entry(ttFont, "OS/2", "fsSelection",
expected,
bitmask=FsSelection.REGULAR,
bitname="REGULAR")
# Checking fsSelection ITALIC bit:
expected = "Italic" in style
yield check_bit_entry(ttFont, "OS/2", "fsSelection",
expected,
bitmask=FsSelection.ITALIC,
bitname="ITALIC")
# Checking fsSelection BOLD bit:
expected = style in ["Bold", "BoldItalic"]
yield check_bit_entry(ttFont, "OS/2", "fsSelection",
expected,
bitmask=FsSelection.BOLD,
bitname="BOLD")
@check(
id = 'com.google.fonts/check/italic_angle',
conditions = ['style'],
rationale = """
The 'post' table italicAngle property should be a reasonable amount, likely not more than -20°, never more than -30°, and never greater than 0°. Note that in the OpenType specification, the value is negative for a lean rightwards.
https://docs.microsoft.com/en-us/typography/opentype/spec/post
"""
)
def com_google_fonts_check_italic_angle(ttFont, style):
"""Checking post.italicAngle value."""
failed = False
value = ttFont["post"].italicAngle
# Checking that italicAngle <= 0
if value > 0:
failed = True
yield FAIL,\
Message("positive",
(f"The value of post.italicAngle is positive, which"
f" is likely a mistake and should become negative,"
f" from {value} to {-value}."))
# Checking that italicAngle is less than 20° (not good) or 30° (bad)
# Also note we invert the value to check it in a clear way
if abs(value) > 30:
failed = True
yield FAIL,\
Message("over-minus30-degrees",
(f"The value of post.italicAngle ({value}) is very high"
f" (over -30°!) and should be confirmed."))
elif abs(value) > 20:
failed = True
yield WARN,\
Message("over-minus20-degrees",
(f"The value of post.italicAngle ({value}) seems very high"
f" (over -20°!) and should be confirmed."))
# Checking if italicAngle matches font style:
if "Italic" in style:
if ttFont['post'].italicAngle == 0:
failed = True
yield FAIL,\
Message("zero-italic",
("Font is italic, so post.italicAngle"
" should be non-zero."))
else:
if ttFont["post"].italicAngle != 0:
failed = True
yield FAIL,\
Message("non-zero-normal",
("Font is not italic, so post.italicAngle"
" should be equal to zero."))
if not failed:
yield PASS, (f'Value of post.italicAngle is {value}'
f' with style="{style}".')
@check(
id = 'com.google.fonts/check/mac_style',
conditions = ['style'],
rationale = """
The values of the flags on the macStyle entry on the 'head' OpenType table that describe whether a font is bold and/or italic must be coherent with the actual style of the font as inferred by its filename.
"""
)
def com_google_fonts_check_mac_style(ttFont, style):
"""Checking head.macStyle value."""
from fontbakery.utils import check_bit_entry
from fontbakery.constants import MacStyle
# Checking macStyle ITALIC bit:
expected = "Italic" in style
yield check_bit_entry(ttFont, "head", "macStyle",
expected,
bitmask=MacStyle.ITALIC,
bitname="ITALIC")
# Checking macStyle BOLD bit:
expected = style in ["Bold", "BoldItalic"]
yield check_bit_entry(ttFont, "head", "macStyle",
expected,
bitmask=MacStyle.BOLD,
bitname="BOLD")
@check(
id = 'com.google.fonts/check/contour_count',
conditions = ['is_ttf',
'not is_variable_font'],
rationale = """
Visually QAing thousands of glyphs by hand is tiring. Most glyphs can only be constructured in a handful of ways. This means a glyph's contour count will only differ slightly amongst different fonts, e.g a 'g' could either be 2 or 3 contours, depending on whether its double story or single story.
However, a quotedbl should have 2 contours, unless the font belongs to a display family.
This check currently does not cover variable fonts because there's plenty of alternative ways of constructing glyphs with multiple outlines for each feature in a VarFont. The expected contour count data for this check is currently optimized for the typical construction of glyphs in static fonts.
"""
)
def com_google_fonts_check_contour_count(ttFont):
"""Check if each glyph has the recommended amount of contours.
This check is useful to assure glyphs aren't incorrectly constructed.
The desired_glyph_data module contains the 'recommended' countour count
for encoded glyphs. The contour counts are derived from fonts which were
chosen for their quality and unique design decisions for particular glyphs.
In the future, additional glyph data can be included. A good addition would
be the 'recommended' anchor counts for each glyph.
"""
from fontbakery.glyphdata import desired_glyph_data as glyph_data
from fontbakery.utils import (get_font_glyph_data,
pretty_print_list)
def in_PUA_range(codepoint):
"""
In Unicode, a Private Use Area (PUA) is a range of code points that,
by definition, will not be assigned characters by the Unicode Consortium.
Three private use areas are defined:
one in the Basic Multilingual Plane (U+E000–U+F8FF),
and one each in, and nearly covering, planes 15 and 16
(U+F0000–U+FFFFD, U+100000–U+10FFFD).
"""
return (codepoint >= 0xE000 and codepoint <= 0xF8FF) or \
(codepoint >= 0xF0000 and codepoint <= 0xFFFFD) or \
(codepoint >= 0x100000 and codepoint <= 0x10FFFD)
# rearrange data structure:
desired_glyph_data_by_codepoint = {}
desired_glyph_data_by_glyphname = {}
for glyph in glyph_data:
desired_glyph_data_by_glyphname[glyph['name']] = glyph
# since the glyph in PUA ranges have unspecified meaning,
# it doesnt make sense for us to have an expected contour cont for them
if not in_PUA_range(glyph['unicode']):
desired_glyph_data_by_codepoint[glyph['unicode']] = glyph
bad_glyphs = []
desired_glyph_contours_by_codepoint = {f: desired_glyph_data_by_codepoint[f]['contours']
for f in desired_glyph_data_by_codepoint}
desired_glyph_contours_by_glyphname = {f: desired_glyph_data_by_glyphname[f]['contours']
for f in desired_glyph_data_by_glyphname}
font_glyph_data = get_font_glyph_data(ttFont)
if font_glyph_data is None:
yield FAIL,\
Message("lacks-cmap",
"This font lacks cmap data.")
else:
font_glyph_contours_by_codepoint = {f['unicode']: list(f['contours'])[0]
for f in font_glyph_data}
font_glyph_contours_by_glyphname = {f['name']: list(f['contours'])[0]
for f in font_glyph_data}
shared_glyphs_by_codepoint = set(desired_glyph_contours_by_codepoint) & \
set(font_glyph_contours_by_codepoint)
for glyph in sorted(shared_glyphs_by_codepoint):
if font_glyph_contours_by_codepoint[glyph] not in desired_glyph_contours_by_codepoint[glyph]:
bad_glyphs.append([glyph,
font_glyph_contours_by_codepoint[glyph],
desired_glyph_contours_by_codepoint[glyph]])
shared_glyphs_by_glyphname = set(desired_glyph_contours_by_glyphname) & \
set(font_glyph_contours_by_glyphname)
for glyph in sorted(shared_glyphs_by_glyphname):
if font_glyph_contours_by_glyphname[glyph] not in desired_glyph_contours_by_glyphname[glyph]:
bad_glyphs.append([glyph,
font_glyph_contours_by_glyphname[glyph],
desired_glyph_contours_by_glyphname[glyph]])
if len(bad_glyphs) > 0:
cmap = ttFont['cmap'].getcmap(PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP).cmap
def _glyph_name(cmap, name):
if name in cmap:
return cmap[name]
else:
return name
bad_glyphs_name = [
f"Glyph name: {_glyph_name(cmap, name)}\t"
f"Contours detected: {count}\t"
f"Expected: {pretty_print_list(expected, shorten=None, glue='or')}"
for name, count, expected in bad_glyphs
]
bad_glyphs_name = '\n'.join(bad_glyphs_name)
yield WARN,\
Message("contour-count",
f"This check inspects the glyph outlines and detects the"
f" total number of contours in each of them. The expected"
f" values are infered from the typical ammounts of"
f" contours observed in a large collection of reference"
f" font families. The divergences listed below may simply"
f" indicate a significantly different design on some of"
f" your glyphs. On the other hand, some of these may flag"
f" actual bugs in the font such as glyphs mapped to an"
f" incorrect codepoint. Please consider reviewing"
f" the design and codepoint assignment of these to make"
f" sure they are correct.\n"
f"\n"
f"The following glyphs do not have the recommended"
f" number of contours:\n"
f"\n"
f"{bad_glyphs_name}")
else:
yield PASS, "All glyphs have the recommended amount of contours"
# FIXME!
# Temporarily disabled since GFonts hosted Cabin files seem to have changed in ways
# that break some of the assumptions in the check implementation below.
# More info at https://github.com/googlefonts/fontbakery/issues/2581
@disable
@check(
id = 'com.google.fonts/check/production_encoded_glyphs',
conditions = ['api_gfonts_ttFont']
)
def com_google_fonts_check_production_encoded_glyphs(ttFont, api_gfonts_ttFont):
"""Check font has same encoded glyphs as version hosted on
fonts.google.com"""
cmap = ttFont['cmap'].getcmap(3, 1).cmap
gf_cmap = api_gfonts_ttFont['cmap'].getcmap(3, 1).cmap
missing_codepoints = set(gf_cmap.keys()) - set(cmap.keys())
if missing_codepoints:
hex_codepoints = ['0x' + hex(c).upper()[2:].zfill(4) for c
in sorted(missing_codepoints)]
yield FAIL,\
Message("lost-glyphs",
f"Font is missing the following glyphs"
f" from the previous release"
f" [{', '.join(hex_codepoints)}]")
else:
yield PASS, ('Font has all the glyphs from the previous release')
@check(
id = 'com.google.fonts/check/metadata/nameid/copyright',
conditions = ['font_metadata']
)
def com_google_fonts_check_metadata_nameid_copyright(ttFont, font_metadata):
"""Copyright field for this font on METADATA.pb matches
all copyright notice entries on the name table ?"""
failed = False
for nameRecord in ttFont['name'].names:
string = nameRecord.string.decode(nameRecord.getEncoding())
if nameRecord.nameID == NameID.COPYRIGHT_NOTICE and\
string != font_metadata.copyright:
failed = True
yield FAIL,\
Message("mismatch",
f'Copyright field for this font on METADATA.pb'
f' ("{font_metadata.copyright}") differs from'
f' a copyright notice entry on the name table:'
f' "{string}"')
if not failed:
yield PASS, ("Copyright field for this font on METADATA.pb matches"
" copyright notice entries on the name table.")
@check(
id = 'com.google.fonts/check/name/mandatory_entries',
conditions = ['style']
)
def com_google_fonts_check_name_mandatory_entries(ttFont, style):
"""Font has all mandatory 'name' table entries?"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import RIBBI_STYLE_NAMES
required_nameIDs = [NameID.FONT_FAMILY_NAME,
NameID.FONT_SUBFAMILY_NAME,
NameID.FULL_FONT_NAME,
NameID.POSTSCRIPT_NAME]
if style not in RIBBI_STYLE_NAMES:
required_nameIDs += [NameID.TYPOGRAPHIC_FAMILY_NAME,
NameID.TYPOGRAPHIC_SUBFAMILY_NAME]
failed = False
# The font must have at least these name IDs:
for nameId in required_nameIDs:
if len(get_name_entry_strings(ttFont, nameId)) == 0:
failed = True
yield FAIL,\
Message("missing-entry",
f"Font lacks entry with nameId={nameId}"
f" ({NameID(nameId).name})")
if not failed:
yield PASS, "Font contains values for all mandatory name table entries."
@check(
id = 'com.google.fonts/check/name/familyname',
conditions = ['style',
'familyname_with_spaces'],
rationale = """
Checks that the family name infered from the font filename matches the string at nameID 1 (NAMEID_FONT_FAMILY_NAME) if it conforms to RIBBI and otherwise checks that nameID 1 is the family name + the style name.
"""
)
def com_google_fonts_check_name_familyname(ttFont, style, familyname_with_spaces):
"""Check name table: FONT_FAMILY_NAME entries."""
from fontbakery.utils import name_entry_id
def get_only_weight(value):
onlyWeight = {"BlackItalic": "Black",
"BoldItalic": "",
"ExtraBold": "ExtraBold",
"ExtraBoldItalic": "ExtraBold",
"ExtraLightItalic": "ExtraLight",
"LightItalic": "Light",
"MediumItalic": "Medium",
"SemiBoldItalic": "SemiBold",
"ThinItalic": "Thin"}
return onlyWeight.get(value, value)
failed = False
only_weight = get_only_weight(style)
for name in ttFont['name'].names:
if name.nameID == NameID.FONT_FAMILY_NAME:
if name.platformID == PlatformID.MACINTOSH:
expected_value = familyname_with_spaces
elif name.platformID == PlatformID.WINDOWS:
if style in ['Regular',
'Italic',
'Bold',
'Bold Italic']:
expected_value = familyname_with_spaces
else:
expected_value = " ".join([familyname_with_spaces,
only_weight]).strip()
else:
failed = True
yield FAIL,\
Message("lacks-name",
f"Font should not have a "
f"{name_entry_id(name)} entry!")
continue
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
yield FAIL,\
Message("mismatch",
f'Entry {name_entry_id(name)} on the "name" table:'
f' Expected "{expected_value}"'
f' but got "{string}".')
if not failed:
yield PASS,\
Message("ok",
"FONT_FAMILY_NAME entries are all good.")
@check(
id = 'com.google.fonts/check/name/subfamilyname',
conditions = ['expected_style']
)
def com_google_fonts_check_name_subfamilyname(ttFont, expected_style):
"""Check name table: FONT_SUBFAMILY_NAME entries."""
failed = False
nametable = ttFont['name']
win_name = nametable.getName(NameID.FONT_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
mac_name = nametable.getName(NameID.FONT_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
if mac_name and mac_name.toUnicode() != expected_style.mac_style_name:
failed = True
yield FAIL,\
Message("bad-familyname",
f'SUBFAMILY_NAME for Mac "{mac_name.toUnicode()}"'
f' must be "{expected_style.mac_style_name}"')
if win_name.toUnicode() != expected_style.win_style_name:
failed = True
yield FAIL,\
Message("bad-familyname",
f'SUBFAMILY_NAME for Win "{win_name.toUnicode()}"'
f' must be "{expected_style.win_style_name}"')
if not failed:
yield PASS, "FONT_SUBFAMILY_NAME entries are all good."
@check(
id = 'com.google.fonts/check/name/fullfontname',
conditions = ['style_with_spaces',
'familyname_with_spaces']
)
def com_google_fonts_check_name_fullfontname(ttFont,
style_with_spaces,
familyname_with_spaces):
"""Check name table: FULL_FONT_NAME entries."""
from fontbakery.utils import name_entry_id
failed = False
for name in ttFont['name'].names:
if name.nameID == NameID.FULL_FONT_NAME:
expected_value = "{} {}".format(familyname_with_spaces,
style_with_spaces)
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
# special case
# see https://github.com/googlefonts/fontbakery/issues/1436
if style_with_spaces == "Regular" \
and string == familyname_with_spaces:
yield WARN,\
Message("lacks-regular",
f'Entry {name_entry_id(name)} on the "name" table:'
f' Got "{string}" which lacks "Regular",'
f' but it is probably OK in this case.')
else:
yield FAIL,\
Message("bad-entry",
f'Entry {name_entry_id(name)} on the "name" table:'
f' Expected "{expected_value}" '
f' but got "{string}".')
if not failed:
yield PASS, "FULL_FONT_NAME entries are all good."
@check(
id = 'com.google.fonts/check/name/postscriptname',
conditions = ['style',
'familyname']
)
def com_google_fonts_check_name_postscriptname(ttFont, style, familyname):
"""Check name table: POSTSCRIPT_NAME entries."""
from fontbakery.utils import name_entry_id
failed = False
for name in ttFont['name'].names:
if name.nameID == NameID.POSTSCRIPT_NAME:
expected_value = f"{familyname}-{style}"
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
yield FAIL,\
Message("bad-entry",
f'Entry {name_entry_id(name)} on the "name" table:'
f' Expected "{expected_value}"'
f' but got "{string}".')
if not failed:
yield PASS, "POSTCRIPT_NAME entries are all good."
@check(
id = 'com.google.fonts/check/name/typographicfamilyname',
conditions = ['style',
'familyname_with_spaces']
)
def com_google_fonts_check_name_typographicfamilyname(ttFont, style, familyname_with_spaces):
"""Check name table: TYPOGRAPHIC_FAMILY_NAME entries."""
from fontbakery.utils import name_entry_id
failed = False
if style in ['Regular',
'Italic',
'Bold',
'BoldItalic']:
for name in ttFont['name'].names:
if name.nameID == NameID.TYPOGRAPHIC_FAMILY_NAME:
failed = True
yield FAIL,\
Message("ribbi",
(f'Font style is "{style}" and, for that reason,'
f' it is not expected to have a '
f'{name_entry_id(name)} entry!'))
else:
expected_value = familyname_with_spaces
has_entry = False
for name in ttFont['name'].names:
if name.nameID == NameID.TYPOGRAPHIC_FAMILY_NAME:
string = name.string.decode(name.getEncoding()).strip()
if string == expected_value:
has_entry = True
else:
failed = True
yield FAIL,\
Message("non-ribbi-bad-value",
(f'Entry {name_entry_id(name)} on the "name" table:'
f' Expected "{expected_value}"'
f' but got "{string}".'))
if not failed and not has_entry:
failed = True
yield FAIL,\
Message("non-ribbi-lacks-entry",
("Non-RIBBI fonts must have a TYPOGRAPHIC_FAMILY_NAME"
" entry on the name table."))
if not failed:
yield PASS, "TYPOGRAPHIC_FAMILY_NAME entries are all good."
@check(
id = 'com.google.fonts/check/name/typographicsubfamilyname',
conditions=['expected_style']
)
def com_google_fonts_check_name_typographicsubfamilyname(ttFont, expected_style):
"""Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries."""
failed = False
nametable = ttFont['name']
win_name = nametable.getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
mac_name = nametable.getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
if all([win_name, mac_name]):
if win_name.toUnicode() != mac_name.toUnicode():
failed = True
yield FAIL,\
Message("mismatch",
f'TYPOGRAPHIC_SUBFAMILY_NAME entry'
f' for Win "{win_name.toUnicode()}"'
f' and Mac "{mac_name.toUnicode()}" do not match.')
if expected_style.is_ribbi:
if win_name and win_name.toUnicode() != expected_style.win_style_name:
failed = True
yield FAIL,\
Message("bad-win-name",
f'TYPOGRAPHIC_SUBFAMILY_NAME entry'
f' for Win "{win_name.toUnicode()}"'
f' must be "{expected_style.win_style_name}".'
f' Please note, since the font style is RIBBI,'
f' this record can be safely deleted.')
if mac_name and mac_name.toUnicode() != expected_style.mac_style_name:
failed = True
yield FAIL,\
Message("bad-mac-name",
f'TYPOGRAPHIC_SUBFAMILY_NAME entry'
f' for Mac "{mac_name.toUnicode()}"'
f' must be "{expected_style.mac_style_name}".'
f' Please note, since the font style is RIBBI,'
f' this record can be safely deleted.')
if expected_style.typo_style_name:
if not win_name:
failed = True
yield FAIL,\
Message("missing-typo-win",
f'TYPOGRAPHIC_SUBFAMILY_NAME for Win is missing.'
f' It must be "{expected_style.typo_style_name}".')
elif win_name.toUnicode() != expected_style.typo_style_name:
failed = True
yield FAIL,\
Message("bad-typo-win",
f'TYPOGRAPHIC_SUBFAMILY_NAME for Win'
f' "{win_name.toUnicode()}" is incorrect.'
f' It must be "{expected_style.typo_style_name}".')
if mac_name and mac_name.toUnicode() != expected_style.typo_style_name:
failed = True
yield FAIL,\
Message("bad-typo-mac",
f'TYPOGRAPHIC_SUBFAMILY_NAME for Mac'
f' "{mac_name.toUnicode()}" is incorrect.'
f' It must be "{expected_style.typo_style_name}".'
f' Please note, this record can be safely deleted.')
if not failed:
yield PASS, "TYPOGRAPHIC_SUBFAMILY_NAME entries are all good."
@check(
id = 'com.google.fonts/check/name/copyright_length',
rationale = """
This is an arbitrary max length for the copyright notice field of the name table. We simply don't want such notices to be too long. Typically such notices are actually much shorter than this with a length of roughly 70 or 80 characters.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1603'
}
)
def com_google_fonts_check_name_copyright_length(ttFont):
"""Length of copyright notice must not exceed 500 characters."""
from fontbakery.utils import get_name_entries
failed = False
for notice in get_name_entries(ttFont, NameID.COPYRIGHT_NOTICE):
notice_str = notice.string.decode(notice.getEncoding())
if len(notice_str) > 500:
failed = True
yield FAIL,\
Message("too-long",
f'The length of the following copyright notice'
f' ({len(notice_str)}) exceeds 500 chars:'
f' "{notice_str}"')
if not failed:
yield PASS, ("All copyright notice name entries on the"
" 'name' table are shorter than 500 characters.")
@check(
id = 'com.google.fonts/check/fontdata_namecheck',
rationale = """
We need to check names are not already used, and today the best place to check that is http://namecheck.fontdata.com
""",
conditions = ["familyname"],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/494'
}
)
def com_google_fonts_check_fontdata_namecheck(ttFont, familyname):
"""Familyname must be unique according to namecheck.fontdata.com"""
import requests
FB_ISSUE_TRACKER = "https://github.com/googlefonts/fontbakery/issues"
NAMECHECK_URL = "http://namecheck.fontdata.com"
try:
# Since October 2019, it seems that we need to fake our user-agent
# in order to get correct query results
FAKE = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)'
response = requests.post(NAMECHECK_URL,
params={'q': familyname},
headers={'User-Agent': FAKE},
timeout=10)
data = response.content.decode("utf-8")
if "fonts by that exact name" in data:
yield INFO,\
Message("name-collision",
f'The family name "{familyname}" seems'
f' to be already in use.\n'
f'Please visit {NAMECHECK_URL} for more info.')
else:
yield PASS, "Font familyname seems to be unique."
except:
import sys
yield ERROR,\
Message("namecheck-service",
f"Failed to access: {NAMECHECK_URL}.\n"
f"\t\tThis check relies on the external service"
f" http://namecheck.fontdata.com via the internet."
f" While the service cannot be reached or does not"
f" respond this check is broken.\n"
f"\n"
f"\t\tYou can exclude this check with the command line"
f" option:\n"
f"\t\t-x com.google.fonts/check/fontdata_namecheck\n"
f"\n"
f"\t\tOr you can wait until the service is available again.\n"
f"\t\tIf the problem persists please report this issue"
f" at: {FB_ISSUE_TRACKER}\n"
f"\n"
f"\t\tOriginal error message:\n"
f"\t\t{sys.exc_info()[0]}")
@check(
id = 'com.google.fonts/check/fontv',
rationale = """
The git sha1 tagging and dev/release features of Source Foundry `font-v` tool are awesome and we would love to consider upstreaming the approach into fontmake someday. For now we only emit a WARN if a given font does not yet follow the experimental versioning style, but at some point we may start enforcing it.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1563'
}
)
def com_google_fonts_check_fontv(ttFont):
"""Check for font-v versioning."""
from fontv.libfv import FontVersion
fv = FontVersion(ttFont)
if fv.version and (fv.is_development or fv.is_release):
yield PASS, "Font version string looks GREAT!"
else:
yield INFO,\
Message("bad-format",
f'Version string is: "{fv.get_name_id5_version_string()}"\n'
f'The version string must ideally include a git commit hash'
f' and either a "dev" or a "release" suffix such as in the'
f' example below:\n'
f'"Version 1.3; git-0d08353-release"')
# Disabling this check since the previous implementation was
# bogus due to the way fonttools encodes the data into the TTF
# files and the new attempt at targetting the real problem is
# still not quite right.
# FIXME: reimplement this addressing the actual root cause of the issue.
# See also ongoing discussion at:
# https://github.com/googlefonts/fontbakery/issues/1727
@disable
@check(
id = 'com.google.fonts/check/negative_advance_width',
rationale = """
Advance width values in the Horizontal Metrics (htmx) table cannot be negative since they are encoded as unsigned 16-bit values. But some programs may infer and report a negative advance by looking up the x-coordinates of the glyphs directly on the glyf table.
There are reports of broken versions of Glyphs.app causing this kind of problem as reported at
https://github.com/googlefonts/fontbakery/issues/1720 and
https://github.com/fonttools/fonttools/pull/1198
This check detects and reports such malformed glyf table entries.
""",
conditions = ['is_ttf'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1720'
}
)
def com_google_fonts_check_negative_advance_width(ttFont):
"""Check that advance widths cannot be inferred as negative."""
failed = False
for glyphName in ttFont["glyf"].glyphs:
coords = ttFont["glyf"][glyphName].coordinates
rightX = coords[-3][0]
leftX = coords[-4][0]
advwidth = rightX - leftX
if advwidth < 0:
failed = True
yield FAIL,\
Message("bad-coordinates",
f'Glyph "{glyphName}" has bad coordinates on the glyf'
f' table, which may lead to the advance width to be'
f' interpreted as a negative value ({advwidth}).')
if not failed:
yield PASS, "The x-coordinates of all glyphs look good."
@check(
id = 'com.google.fonts/check/varfont/consistent_axes',
rationale = """
In order to facilitate the construction of intuitive and friendly user interfaces, all variable font files in a given family should have the same set of variation axes. Also, each axis must have a consistent setting of min/max value ranges accross all the files.
""",
conditions = ['VFs'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2810'
}
)
def com_google_fonts_check_varfont_consistent_axes(VFs):
"""Ensure that all variable font files have the same set of axes and axis ranges."""
ref_ranges = {}
for vf in VFs:
ref_ranges.update({k.axisTag: (k.minValue, k.maxValue)
for k in vf['fvar'].axes})
passed = True
for vf in VFs:
for axis in ref_ranges:
if axis not in map(lambda x: x.axisTag, vf['fvar'].axes):
passed = False
yield FAIL, \
Message("missing-axis",
f"{os.path.basename(vf.reader.file.name)}:"
f" lacks a '{axis}' variation axis.")
expected_ranges = {axis: {(vf['fvar'].axes[vf['fvar'].axes.index(axis)].minValue,
vf['fvar'].axes[vf['fvar'].axes.index(axis)].maxValue) for vf in VFs}
for axis in ref_ranges
if axis in vf['fvar'].axes}
for axis, ranges in expected_ranges:
if len(ranges) > 1:
passed = False
yield FAIL, \
Message("inconsistent-axis-range",
"Axis 'axis' has diverging ranges accross the family: {ranges}.")
if passed:
yield PASS, "All looks good!"
@check(
id = 'com.google.fonts/check/varfont/generate_static',
rationale = """
Google Fonts may serve static fonts which have been generated from variable fonts. This test will attempt to generate a static ttf using fontTool's varLib mutator.
The target font will be the mean of each axis e.g:
**VF font axes**
- min weight, max weight = 400, 800
- min width, max width = 50, 100
**Target Instance**
- weight = 600
- width = 75
""",
conditions = ['is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1727'
}
)
def com_google_fonts_check_varfont_generate_static(ttFont):
"""Check a static ttf can be generated from a variable font."""
import tempfile
from fontTools.varLib import mutator
try:
loc = {k.axisTag: float((k.maxValue + k.minValue) / 2)
for k in ttFont['fvar'].axes}
with tempfile.TemporaryFile() as instance:
font = mutator.instantiateVariableFont(ttFont, loc)
font.save(instance)
yield PASS, ("fontTools.varLib.mutator"
" generated a static font instance")
except Exception as e:
yield FAIL,\
Message("varlib-mutator",
f"fontTools.varLib.mutator failed"
f" to generated a static font instance\n"
f"{repr(e)}")
@check(
id = 'com.google.fonts/check/varfont/has_HVAR',
rationale = """
Not having a HVAR table can lead to costly text-layout operations on some platforms, which we want to avoid.
So, all variable fonts on the Google Fonts collection should have an HVAR with valid values.
More info on the HVAR table can be found at:
https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#variation-data-tables-and-miscellaneous-requirements
""", # FIX-ME: We should clarify which are these
# platforms in which there can be issues
# with costly text-layout operations
# when an HVAR table is missing!
conditions = ['is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2119'
}
)
def com_google_fonts_check_varfont_has_HVAR(ttFont):
"""Check that variable fonts have an HVAR table."""
if "HVAR" in ttFont.keys():
yield PASS, ("This variable font contains an HVAR table.")
else:
yield FAIL,\
Message("lacks-HVAR",
"All variable fonts on the Google Fonts collection"
" must have a properly set HVAR table in order"
" to avoid costly text-layout operations on"
" certain platforms.")
# Temporarily disabled.
# See: https://github.com/googlefonts/fontbakery/issues/2118#issuecomment-432283698
@disable
@check(
id = 'com.google.fonts/check/varfont/has_MVAR',
rationale = """
Per the OpenType spec, the MVAR tables contain variation data for metadata otherwise in tables such as OS/2 and hhea; if not present, then the default values in those tables will apply to all instances, which can effect text layout.
Thus, MVAR tables should be present and correct in all variable fonts since text layout software depends on these values.
""", # FIX-ME: Clarify this rationale text.
# See: https://github.com/googlefonts/fontbakery/issues/2118#issuecomment-432108560
conditions = ['is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2118'
}
)
def com_google_fonts_check_varfont_has_MVAR(ttFont):
"""Check that variable fonts have an MVAR table."""
if "MVAR" in ttFont.keys():
yield PASS, ("This variable font contains an MVAR table.")
else:
yield FAIL,\
Message("lacks-MVAR",
"All variable fonts on the Google Fonts collection"
" must have a properly set MVAR table because"
" text-layout software depends on it.")
@check(
id = 'com.google.fonts/check/smart_dropout',
conditions = ['is_ttf',
'not VTT_hinted'],
rationale = """
This setup is meant to ensure consistent rendering quality for fonts across all devices (with different rendering/hinting capabilities).
Below is the snippet of instructions we expect to see in the fonts:
B8 01 FF PUSHW 0x01FF
85 SCANCTRL (unconditinally turn on
dropout control mode)
B0 04 PUSHB 0x04
8D SCANTYPE (enable smart dropout control)
"Smart dropout control" means activating rules 1, 2 and 5:
Rule 1: If a pixel's center falls within the glyph outline,
that pixel is turned on.
Rule 2: If a contour falls exactly on a pixel's center,
that pixel is turned on.
Rule 5: If a scan line between two adjacent pixel centers
(either vertical or horizontal) is intersected
by both an on-Transition contour and an off-Transition
contour and neither of the pixels was already turned on
by rules 1 and 2, turn on the pixel which is closer to
the midpoint between the on-Transition contour and
off-Transition contour. This is "Smart" dropout control.
For more detailed info (such as other rules not enabled in this snippet), please refer to the TrueType Instruction Set documentation.
"""
)
def com_google_fonts_check_smart_dropout(ttFont):
"""Font enables smart dropout control in "prep" table instructions?"""
INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d"
if ("prep" in ttFont and
INSTRUCTIONS in ttFont["prep"].program.getBytecode()):
yield PASS, ("'prep' table contains instructions"
" enabling smart dropout control.")
else:
yield FAIL,\
Message("lacks-smart-dropout",
"The 'prep' table does not contain TrueType"
" instructions enabling smart dropout control."
" To fix, export the font with autohinting enabled,"
" or run ttfautohint on the font, or run the"
" `gftools fix-nonhinting` script.")
@check(
id = 'com.google.fonts/check/vttclean',
rationale = """
The goal here is to reduce filesizes and improve pageloading when dealing with webfonts.
The VTT Talk sources are not necessary at runtime and endup being just dead weight when left embedded in the font binaries. The sources should be kept on the project files but stripped out when building release binaries.
"""
)
def com_google_fonts_check_vtt_clean(ttFont, vtt_talk_sources):
"""There must not be VTT Talk sources in the font."""
if vtt_talk_sources:
yield FAIL,\
Message("has-vtt-sources",
f"Some tables containing VTT Talk (hinting) sources"
f" were found in the font and should be removed in order"
f" to reduce total filesize:"
f" {', '.join(vtt_talk_sources)}")
else:
yield PASS, "There are no tables with VTT Talk sources embedded in the font."
@check(
id = 'com.google.fonts/check/aat',
rationale = """
Apple's TrueType reference manual [1] describes SFNT tables not in the Microsoft OpenType specification [2] and these can sometimes sneak into final release files, but Google Fonts should only have OpenType tables.
[1] https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6.html
[2] https://docs.microsoft.com/en-us/typography/opentype/spec/
"""
)
def com_google_fonts_check_aat(ttFont):
"""Are there unwanted Apple tables?"""
UNWANTED_TABLES = {
'EBSC', 'Zaph', 'acnt', 'ankr', 'bdat', 'bhed', 'bloc',
'bmap', 'bsln', 'fdsc', 'feat', 'fond', 'gcid', 'just',
'kerx', 'lcar', 'ltag', 'mort', 'morx', 'opbd', 'prop',
'trak', 'xref'
}
unwanted_tables_found = []
for table in ttFont.keys():
if table in UNWANTED_TABLES:
unwanted_tables_found.append(table)
if len(unwanted_tables_found) > 0:
yield FAIL,\
Message("has-unwanted-tables",
f"Unwanted AAT tables were found"
f" in the font and should be removed, either by"
f" fonttools/ttx or by editing them using the tool"
f" they built with:"
f" {', '.join(unwanted_tables_found)}")
else:
yield PASS, "There are no unwanted AAT tables."
@check(
id = 'com.google.fonts/check/fvar_name_entries',
conditions = ['is_variable_font'],
rationale = """
The purpose of this check is to make sure that all name entries referenced by variable font instances do exist in the name table.
"""
)
def com_google_fonts_check_fvar_name_entries(ttFont):
"""All name entries referenced by fvar instances exist on the name table?"""
failed = False
for instance in ttFont["fvar"].instances:
entries = [entry for entry in ttFont["name"].names
if entry.nameID == instance.subfamilyNameID]
if len(entries) == 0:
failed = True
yield FAIL,\
Message("missing-name",
f"Named instance with coordinates {instance.coordinates}"
f" lacks an entry on the name table"
f" (nameID={instance.subfamilyNameID}).")
if not failed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/varfont_has_instances',
conditions = ['is_variable_font'],
rationale = """
Named instances must be present in all variable fonts in order not to frustrate the users' typical expectations of a traditional static font workflow.
"""
)
def com_google_fonts_check_varfont_has_instances(ttFont):
"""A variable font must have named instances."""
if len(ttFont["fvar"].instances):
yield PASS, "OK"
else:
yield FAIL,\
Message("lacks-named-instances",
"This variable font lacks"
" named instances on the fvar table.")
@check(
id = 'com.google.fonts/check/varfont_weight_instances',
conditions = ['is_variable_font'],
rationale = """
The named instances on the weight axis of a variable font must have coordinates that are multiples of 100 on the design space.
"""
)
def com_google_fonts_check_varfont_weight_instances(ttFont):
"""Variable font weight coordinates must be multiples of 100."""
failed = False
for instance in ttFont["fvar"].instances:
if 'wght' in instance.coordinates and instance.coordinates['wght'] % 100 != 0:
failed = True
yield FAIL,\
Message("bad-coordinate",
f"Found a variable font instance with"
f" 'wght'={instance.coordinates['wght']}."
f" This should instead be a multiple of 100.")
if not failed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/family/tnum_horizontal_metrics',
conditions = ['RIBBI_ttFonts'],
rationale = """
Tabular figures need to have the same metrics in all styles in order to allow tables to be set with proper typographic control, but to maintain the placement of decimals and numeric columns between rows.
Here's a good explanation of this:
https://www.typography.com/techniques/fonts-for-financials/#tabular-figs
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2278'
}
)
def com_google_fonts_check_family_tnum_horizontal_metrics(RIBBI_ttFonts):
"""All tabular figures must have the same width across the RIBBI-family."""
tnum_widths = {}
for ttFont in RIBBI_ttFonts:
glyphs = ttFont.getGlyphSet()
tnum_glyphs = [(glyph_id, glyphs[glyph_id])
for glyph_id in glyphs.keys()
if glyph_id.endswith(".tnum")]
for glyph_id, glyph in tnum_glyphs:
if glyph.width not in tnum_widths:
tnum_widths[glyph.width] = [glyph_id]
else:
tnum_widths[glyph.width].append(glyph_id)
if len(tnum_widths.keys()) > 1:
max_num = 0
most_common_width = None
for width, glyphs in tnum_widths.items():
if len(glyphs) > max_num:
max_num = len(glyphs)
most_common_width = width
del tnum_widths[most_common_width]
yield FAIL,\
Message("inconsistent-widths",
f"The most common tabular glyph width is"
f" {most_common_width}. But there are other"
f" tabular glyphs with different widths"
f" such as the following ones:\n\t{tnum_widths}.")
else:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/integer_ppem_if_hinted',
conditions = ['is_hinted'],
rationale = """
Hinted fonts must have head table flag bit 3 set.
Per https://docs.microsoft.com/en-us/typography/opentype/spec/head, bit 3 of Head::flags decides whether PPEM should be rounded. This bit should always be set for hinted fonts.
Note:
Bit 3 = Force ppem to integer values for all internal scaler math;
May use fractional ppem sizes if this bit is clear;
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2338'
}
)
def com_google_fonts_check_integer_ppem_if_hinted(ttFont):
"""PPEM must be an integer on hinted fonts."""
if ttFont["head"].flags & (1 << 3):
yield PASS, "OK"
else:
yield FAIL,\
Message("bad-flags",
("This is a hinted font, so it must have bit 3 set"
" on the flags of the head table, so that"
" PPEM values will be rounded into an integer"
" value.\n"
"\n"
"This can be accomplished by using the"
" 'gftools fix-hinting' command.\n"
"\n"
"# create virtualenv"
"python3 -m venv venv"
"\n"
"# activate virtualenv"
"source venv/bin/activate"
"\n"
"# install gftools"
"pip install git+https://www.github.com"
"/googlefonts/tools"))
@check(
id = 'com.google.fonts/check/ligature_carets',
conditions = ['ligature_glyphs'],
rationale = """
All ligatures in a font must have corresponding caret (text cursor) positions defined in the GDEF table, otherwhise, users may experience issues with caret rendering.
If using GlyphsApp, ligature carets can be set directly on canvas by accessing the `Glyph -> Set Anchors` menu option or by pressing the `Cmd+U` keyboard shortcut.
If designing with UFOs, (as of Oct 2020) ligature carets are not yet compiled by ufo2ft, and therefore will not build via FontMake. See googlefonts/ufo2ft/issues/329
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1225'
}
)
def com_google_fonts_check_ligature_carets(ttFont, ligature_glyphs):
"""Are there caret positions declared for every ligature?"""
if ligature_glyphs == -1:
yield FAIL,\
Message("malformed",
("Failed to lookup ligatures."
" This font file seems to be malformed."
" For more info, read:"
" https://github.com/googlefonts/fontbakery/issues/1596"))
elif "GDEF" not in ttFont:
yield WARN,\
Message("GDEF-missing",
("GDEF table is missing, but it is mandatory"
" to declare it on fonts that provide ligature"
" glyphs because the caret (text cursor)"
" positioning for each ligature must be"
" provided in this table."))
else:
lig_caret_list = ttFont["GDEF"].table.LigCaretList
if lig_caret_list is None:
missing = set(ligature_glyphs)
else:
missing = set(ligature_glyphs) - set(lig_caret_list.Coverage.glyphs)
if lig_caret_list is None or lig_caret_list.LigGlyphCount == 0:
yield WARN,\
Message("lacks-caret-pos",
"This font lacks caret position values"
" for ligature glyphs on its GDEF table.")
elif missing:
missing = "\n\t- ".join(sorted(missing))
yield WARN,\
Message("incomplete-caret-pos-data",
f"This font lacks caret positioning"
f" values for these ligature glyphs:"
f"\n\t- {missing}\n\n ")
else:
yield PASS, "Looks good!"
@check(
id = 'com.google.fonts/check/kerning_for_non_ligated_sequences',
conditions = ['ligatures',
'has_kerning_info'],
rationale = """
Fonts with ligatures should have kerning on the corresponding non-ligated sequences for text where ligatures aren't used (eg https://github.com/impallari/Raleway/issues/14).
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1145'
}
)
def com_google_fonts_check_kerning_for_non_ligated_sequences(ttFont, ligatures, has_kerning_info):
"""Is there kerning info for non-ligated sequences?"""
def look_for_nonligated_kern_info(table):
for pairpos in table.SubTable:
for i, glyph in enumerate(pairpos.Coverage.glyphs):
if not hasattr(pairpos, 'PairSet'):
continue
for pairvalue in pairpos.PairSet[i].PairValueRecord:
kern_pair = (glyph, pairvalue.SecondGlyph)
if kern_pair in ligature_pairs:
ligature_pairs.remove(kern_pair)
def ligatures_str(pairs):
result = [f"\t- {first} + {second}" for first, second in pairs]
return "\n".join(result)
if ligatures == -1:
yield FAIL,\
Message("malformed",
"Failed to lookup ligatures."
" This font file seems to be malformed."
" For more info, read:"
" https://github.com/googlefonts/fontbakery/issues/1596")
else:
ligature_pairs = []
for first, comp in ligatures.items():
for components in comp:
while components:
pair = (first, components[0])
if pair not in ligature_pairs:
ligature_pairs.append(pair)
first = components[0]
components.pop(0)
for record in ttFont["GSUB"].table.FeatureList.FeatureRecord:
if record.FeatureTag == 'kern':
for index in record.Feature.LookupListIndex:
lookup = ttFont["GSUB"].table.LookupList.Lookup[index]
look_for_nonligated_kern_info(lookup)
if ligature_pairs:
yield WARN,\
Message("lacks-kern-info",
f"GPOS table lacks kerning info for the following"
f" non-ligated sequences:\n"
f"{ligatures_str(ligature_pairs)}\n\n ")
else:
yield PASS, ("GPOS table provides kerning info for "
"all non-ligated sequences.")
@check(
id = 'com.google.fonts/check/name/family_and_style_max_length',
rationale = """
According to a GlyphsApp tutorial [1], in order to make sure all versions of Windows recognize it as a valid font file, we must make sure that the concatenated length of the familyname (NameID.FONT_FAMILY_NAME) and style (NameID.FONT_SUBFAMILY_NAME) strings in the name table do not exceed 20 characters.
After discussing the problem in more detail at `FontBakery issue #2179 [2] we decided that allowing up to 27 chars would still be on the safe side, though.
[1] https://glyphsapp.com/tutorials/multiple-masters-part-3-setting-up-instances
[2] https://github.com/googlefonts/fontbakery/issues/2179
""",
misc_metadata = {
# Somebody with access to Windows should make some experiments
# and confirm that this is really the case.
'affects': [('Windows', 'unspecified')],
'request': 'https://github.com/googlefonts/fontbakery/issues/1488',
}
)
def com_google_fonts_check_name_family_and_style_max_length(ttFont):
"""Combined length of family and style must not exceed 27 characters."""
from fontbakery.utils import (get_name_entries,
get_name_entry_strings)
failed = False
for familyname in get_name_entries(ttFont,
NameID.FONT_FAMILY_NAME):
# we'll only match family/style name entries with the same platform ID:
plat = familyname.platformID
familyname_str = familyname.string.decode(familyname.getEncoding())
for stylename_str in get_name_entry_strings(ttFont,
NameID.FONT_SUBFAMILY_NAME,
platformID=plat):
if len(familyname_str + stylename_str) > 27:
failed = True
yield WARN,\
Message("too-long",
f"The combined length of family and style"
f" exceeds 27 chars in the following"
f" '{PlatformID(plat).name}' entries:\n"
f" FONT_FAMILY_NAME = '{familyname_str}' /"
f" SUBFAMILY_NAME = '{stylename_str}'\n"
f"\n"
f"Please take a look at the conversation at"
f" https://github.com/googlefonts/fontbakery/issues/2179"
f" in order to understand the reasoning behind these"
f" name table records max-length criteria.")
if not failed:
yield PASS, "All name entries are good."
@check(
id = 'com.google.fonts/check/name/line_breaks',
rationale = """
There are some entries on the name table that may include more than one line of text. The Google Fonts team, though, prefers to keep the name table entries short and simple without line breaks.
For instance, some designers like to include the full text of a font license in the "copyright notice" entry, but for the GFonts collection this entry should only mention year, author and other basic info in a manner enforced by com.google.fonts/check/font_copyright
"""
)
def com_google_fonts_check_name_line_breaks(ttFont):
"""Name table entries should not contain line-breaks."""
failed = False
for name in ttFont["name"].names:
string = name.string.decode(name.getEncoding())
if "\n" in string:
failed = True
yield FAIL,\
Message("line-break",
f"Name entry {NameID(name.nameID).name}"
f" on platform {PlatformID(name.platformID).name}"
f" contains a line-break.")
if not failed:
yield PASS, ("Name table entries are all single-line"
" (no line-breaks found).")
@check(
id = 'com.google.fonts/check/name/rfn',
rationale = """
Some designers adopt the "Reserved Font Name" clause of the OFL license. This means that the original author reserves the rights to the family name and other people can only distribute modified versions using a different family name.
Google Fonts published updates to the fonts in the collection in order to fix issues and/or implement further improvements to the fonts. It is important to keep the family name so that users of the webfonts can benefit from the updates. Since it would forbid such usage scenario, all families in the GFonts collection are required to not adopt the RFN clause.
This check ensures "Reserved Font Name" is not mentioned in the name table.
"""
)
def com_google_fonts_check_name_rfn(ttFont):
"""Name table strings must not contain the string 'Reserved Font Name'."""
failed = False
for entry in ttFont["name"].names:
string = entry.toUnicode()
if "reserved font name" in string.lower():
yield FAIL,\
Message("rfn",
f'Name table entry ("{string}")'
f' contains "Reserved Font Name".'
f' This is an error except in a few specific rare cases.')
failed = True
if not failed:
yield PASS, 'None of the name table strings contain "Reserved Font Name".'
@check(
id='com.google.fonts/check/family/control_chars',
conditions=['are_ttf'],
rationale="""
Use of some unacceptable control characters in the U+0000 - U+001F range can lead to rendering issues on some platforms.
Acceptable control characters are defined as .null (U+0000) and CR (U+000D) for this test.
"""
)
def com_google_fonts_check_family_control_chars(ttFonts):
"""Does font file include unacceptable control character glyphs?"""
# list of unacceptable control character glyph names
# definition includes the entire control character Unicode block except:
# - .null (U+0000)
# - CR (U+000D)
unacceptable_cc_list = [
"uni0001",
"uni0002",
"uni0003",
"uni0004",
"uni0005",
"uni0006",
"uni0007",
"uni0008",
"uni0009",
"uni000A",
"uni000B",
"uni000C",
"uni000E",
"uni000F",
"uni0010",
"uni0011",
"uni0012",
"uni0013",
"uni0014",
"uni0015",
"uni0016",
"uni0017",
"uni0018",
"uni0019",
"uni001A",
"uni001B",
"uni001C",
"uni001D",
"uni001E",
"uni001F"
]
# a dict with key:value of font path that failed check : list of unacceptable glyph names
failed_font_dict = {}
for ttFont in ttFonts:
font_failed = False
unacceptable_glyphs_in_set = [] # a list of unacceptable glyph names identified
glyph_name_set = set(ttFont["glyf"].glyphs.keys())
fontname = ttFont.reader.file.name
for unacceptable_glyph_name in unacceptable_cc_list:
if unacceptable_glyph_name in glyph_name_set:
font_failed = True
unacceptable_glyphs_in_set.append(unacceptable_glyph_name)
if font_failed:
failed_font_dict[fontname] = unacceptable_glyphs_in_set
if len(failed_font_dict) > 0:
msg_unacceptable = "The following unacceptable control characters were identified:\n"
for fnt in failed_font_dict.keys():
msg_unacceptable += f" {fnt}: {', '.join(failed_font_dict[fnt])}\n"
yield FAIL,\
Message("unacceptable",
f"{msg_unacceptable}")
else:
yield PASS, ("Unacceptable control characters were not identified.")
@check(
id = 'com.google.fonts/check/repo/dirname_matches_nameid_1',
conditions = ['gfonts_repo_structure',
'not is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2302'
}
)
def com_google_fonts_check_repo_dirname_match_nameid_1(fonts,
gfonts_repo_structure):
"""Directory name in GFonts repo structure must
match NameID 1 of the regular."""
from fontTools.ttLib import TTFont
from fontbakery.utils import (get_name_entry_strings,
get_absolute_path,
get_regular)
regular = get_regular(fonts)
if not regular:
yield FAIL,\
Message("lacks-regular",
"The font seems to lack a regular.")
return
entry = get_name_entry_strings(TTFont(regular), NameID.FONT_FAMILY_NAME)[0]
expected = entry.lower()
expected = "".join(expected.split(' '))
expected = "".join(expected.split('-'))
license, familypath, filename = get_absolute_path(regular).split(os.path.sep)[-3:]
if familypath == expected:
yield PASS, "OK"
else:
yield FAIL,\
Message("mismatch",
f"Family name on the name table ('{entry}') does not match"
f" directory name in the repo structure ('{familypath}')."
f" Expected '{expected}'.")
@check(
id = 'com.google.fonts/check/repo/vf_has_static_fonts',
conditions = ['family_directory',
'gfonts_repo_structure',
'is_variable_font'],
rationale="""
Variable font family directories kept in the google/fonts git repo must include a static/ subdir containing static fonts.
These files are meant to be served for users that still lack support for variable fonts in their web browsers.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2654'
}
)
def com_google_fonts_check_repo_vf_has_static_fonts(family_directory):
"""A static fonts directory with at least two fonts must accompany variable fonts"""
static_dir = os.path.join(family_directory, 'static')
if os.path.exists(static_dir):
has_static_fonts = any([f for f in os.listdir(static_dir)
if f.endswith('.ttf')])
if has_static_fonts:
yield PASS, 'OK'
else:
yield FAIL, \
Message("empty",
'Static dir is empty')
else:
yield FAIL, \
Message("missing",
'Please create a subdirectory called "static/"'
' and include in it static font files.')
@check(
id = 'com.google.fonts/check/repo/fb_report',
conditions = ['family_directory'],
rationale="""
A FontBakery report is ephemeral and so should be used for posting issues on a bug-tracker instead of being hosted in the font project repository.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2888'
}
)
def com_google_fonts_check_repo_fb_report(family_directory):
"""A font repository should not include fontbakery report files"""
from fontbakery.utils import filenames_ending_in
has_report_files = any([f for f in filenames_ending_in(".json", family_directory)
if '"result"' in open(f).read()])
if not has_report_files:
yield PASS, 'OK'
else:
yield WARN, \
Message("fb-report",
"There's no need to keep a copy of Font Bakery reports in the repository,"
" since they are ephemeral; FB has a 'github markdown' output mode"
" to make it easy to file reports as issues.")
@check(
id = 'com.google.fonts/check/repo/zip_files',
conditions = ['family_directory'],
rationale="""
Sometimes people check in ZIPs into their font project repositories. While we accept the practice of checking in binaries, we believe that a ZIP is a step too far ;)
Note: a source purist position is that only source files and build scripts should be checked in.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2903'
}
)
def com_google_fonts_check_repo_zip_files(family_directory):
"""A font repository should not include ZIP files"""
from fontbakery.utils import (filenames_ending_in,
pretty_print_list)
COMMON_ZIP_EXTENSIONS = [".zip", ".7z", ".rar"]
zip_files = []
for ext in COMMON_ZIP_EXTENSIONS:
zip_files.extend(filenames_ending_in(ext, family_directory))
if not zip_files:
yield PASS, 'OK'
else:
files_list = pretty_print_list(zip_files,
shorten=None,
sep='\n\t* ')
yield FAIL, \
Message("zip-files",
f"Please do not host ZIP files on the project repository."
f" These files were detected:\n"
f"\t* {files_list}")
@check(
id = 'com.google.fonts/check/vertical_metrics_regressions',
conditions = ['remote_styles'],
rationale="""
If the family already exists on Google Fonts, we need to ensure that the checked family's vertical metrics are similar. This check will test the following schema which was outlined in Fontbakery issue #1162 [1]:
- The family should visually have the same vertical metrics as the Regular style hosted on Google Fonts.
- If the family on Google Fonts has differing hhea and typo metrics, the family being checked should use the typo metrics for both the hhea and typo entries.
- If the family on Google Fonts has use typo metrics not enabled and the family being checked has it enabled, the hhea and typo metrics should use the family on Google Fonts winAscent and winDescent values.
- If the upms differ, the values must be scaled so the visual appearance is the same.
[1] https://github.com/googlefonts/fontbakery/issues/1162
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1162'
}
)
def com_google_fonts_check_vertical_metrics_regressions(ttFonts, remote_styles):
"""Check if the vertical metrics of a family are similar to the same
family hosted on Google Fonts."""
import math
from .shared_conditions import (is_variable_font,
get_instance_axis_value,
typo_metrics_enabled)
failed = False
ttFonts = list(ttFonts)
if "Regular" in remote_styles:
gf_font = remote_styles["Regular"]
else:
gf_font = None
for style, font in remote_styles:
if is_variable_font(font):
if get_instance_axis_value(font, "Regular", "wght"):
gf_font = font
if not gf_font:
gf_font = remote_styles[0][1]
upm_scale = ttFonts[0]['head'].unitsPerEm / gf_font['head'].unitsPerEm
gf_has_typo_metrics = typo_metrics_enabled(gf_font)
fam_has_typo_metrics = typo_metrics_enabled(ttFonts[0])
if (gf_has_typo_metrics and fam_has_typo_metrics) or \
(not gf_has_typo_metrics and not fam_has_typo_metrics):
expected_ascender = math.ceil(gf_font['OS/2'].sTypoAscender * upm_scale)
expected_descender = math.ceil(gf_font['OS/2'].sTypoDescender * upm_scale)
else:
expected_ascender = math.ceil(gf_font["OS/2"].usWinAscent * upm_scale)
expected_descender = -math.ceil(gf_font["OS/2"].usWinDescent * upm_scale)
for ttFont in ttFonts:
full_font_name = ttFont['name'].getName(4, 3, 1, 1033).toUnicode()
typo_ascender = ttFont['OS/2'].sTypoAscender
typo_descender = ttFont['OS/2'].sTypoDescender
hhea_ascender = ttFont['hhea'].ascent
hhea_descender = ttFont['hhea'].descent
if typo_ascender != expected_ascender:
failed = True
yield FAIL,\
Message("bad-typo-ascender",
f"{full_font_name}:"
f" OS/2 sTypoAscender is {typo_ascender}"
f" when it should be {expected_ascender}")
if typo_descender != expected_descender:
failed = True
yield FAIL,\
Message("bad-typo-descender",
f"{full_font_name}:"
f" OS/2 sTypoDescender is {typo_descender}"
f" when it should be {expected_descender}")
if hhea_ascender != expected_ascender:
failed = True
yield FAIL,\
Message("bad-hhea-ascender",
f"{full_font_name}:"
f" hhea Ascender is {hhea_ascender}"
f" when it should be {expected_ascender}")
if hhea_descender != expected_descender:
failed = True
yield FAIL,\
Message("bad-hhea-descender",
f"{full_font_name}:"
f" hhea Descender is {hhea_descender}"
f" when it should be {expected_descender}")
if not failed:
yield PASS, "Vertical metrics have not regressed."
@check(
id = 'com.google.fonts/check/cjk_vertical_metrics',
conditions = ['is_cjk_font',
'not remote_styles'],
rationale="""
CJK fonts have different vertical metrics when compared to Latin fonts. We follow the schema developed by dr Ken Lunde for Source Han Sans and the Noto CJK fonts.
Our documentation includes further information: https://github.com/googlefonts/gf-docs/tree/master/Spec#cjk-vertical-metrics
"""
)
def com_google_fonts_check_cjk_vertical_metrics(ttFont):
"""Check font follows the Google Fonts CJK vertical metric schema"""
from .shared_conditions import is_cjk_font, typo_metrics_enabled
filename = os.path.basename(ttFont.reader.file.name)
# Check necessary tables are present.
missing_tables = False
required = ["OS/2", "hhea", "head"]
for key in required:
if key not in ttFont:
missing_tables = True
yield FAIL,\
Message(f'lacks-{key}',
f"{filename} lacks a '{key}' table.")
if missing_tables:
return
font_upm = ttFont['head'].unitsPerEm
font_metrics = {
'OS/2.sTypoAscender': ttFont['OS/2'].sTypoAscender,
'OS/2.sTypoDescender': ttFont['OS/2'].sTypoDescender,
'OS/2.sTypoLineGap': ttFont['OS/2'].sTypoLineGap,
'hhea.ascent': ttFont['hhea'].ascent,
'hhea.descent': ttFont['hhea'].descent,
'hhea.lineGap': ttFont['hhea'].lineGap,
'OS/2.usWinAscent': ttFont['OS/2'].usWinAscent,
'OS/2.usWinDescent': ttFont['OS/2'].usWinDescent
}
expected_metrics = {
'OS/2.sTypoAscender': font_upm * 0.88,
'OS/2.sTypoDescender': font_upm * -0.12,
'OS/2.sTypoLineGap': 0,
'hhea.lineGap': 0,
}
failed = False
warn = False
# Check fsSelection bit 7 is not enabled
if typo_metrics_enabled(ttFont):
failed = True
yield FAIL, \
Message('bad-fselection-bit7',
'OS/2 fsSelection bit 7 must be disabled')
# Check typo metrics and hhea lineGap match our expected values
for k in expected_metrics:
if font_metrics[k] != expected_metrics[k]:
failed = True
yield FAIL, \
Message(f'bad-{k}',
f'{k} is "{font_metrics[k]}" it should be {expected_metrics[k]}')
# Check hhea and win values match
if font_metrics['hhea.ascent'] != font_metrics['OS/2.usWinAscent']:
failed = True
yield FAIL, \
Message('ascent-mismatch',
'hhea.ascent must match OS/2.usWinAscent')
if abs(font_metrics['hhea.descent']) != font_metrics['OS/2.usWinDescent']:
failed = True
yield FAIL, \
Message('descent-mismatch',
'hhea.descent must match absolute value of OS/2.usWinDescent')
# Check the sum of the hhea metrics is between 1.1-1.5x of the font's upm
hhea_sum = (font_metrics['hhea.ascent'] +
abs(font_metrics['hhea.descent']) +
font_metrics['hhea.lineGap']) / font_upm
if not failed and not 1.1 < hhea_sum <= 1.5:
warn = True
yield WARN, \
Message('bad-hhea-range',
f"We recommend the absolute sum of the hhea metrics should be"
f" between 1.1-1.4x of the font's upm. This font has {hhea_sum}x")
if not failed and not warn:
yield PASS, 'Vertical metrics are good'
@check(
id = 'com.google.fonts/check/varfont_instance_coordinates',
conditions = ['is_variable_font'],
)
def com_google_fonts_check_varfont_instance_coordinates(ttFont):
"""Check variable font instances have correct coordinate values"""
from fontbakery.parse import instance_parse
from fontbakery.constants import SHOW_GF_DOCS_MSG
failed = False
for instance in ttFont['fvar'].instances:
name = ttFont['name'].getName(
instance.subfamilyNameID,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA
).toUnicode()
expected_instance = instance_parse(name)
for axis in instance.coordinates:
if axis in expected_instance.coordinates and \
instance.coordinates[axis] != expected_instance.coordinates[axis]:
yield FAIL,\
Message("bad-coordinate",
f'Instance "{name}" {axis} value '
f'is "{instance.coordinates[axis]}". '
f'It should be "{expected_instance.coordinates[axis]}"')
failed = True
if failed:
yield FAIL, f"{SHOW_GF_DOCS_MSG}#axes"
else:
yield PASS, "Instance coordinates are correct"
@check(
id = 'com.google.fonts/check/varfont_instance_names',
conditions = ['is_variable_font'],
)
def com_google_fonts_check_varfont_instance_names(ttFont):
"""Check variable font instances have correct names"""
# This check and the fontbakery.parse module used to be more complicated.
# On 2020-06-26, we decided to only allow Thin-Black + Italic instances.
# If we decide to add more particles to instance names, It's worthwhile
# revisiting our previous implementation which can be found in commits
# earlier than or equal to ca71d787eb2b8b5a9b111884080dde5d45f5579f
from fontbakery.parse import instance_parse
from fontbakery.constants import SHOW_GF_DOCS_MSG
failed = []
for instance in ttFont['fvar'].instances:
name = ttFont['name'].getName(
instance.subfamilyNameID,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA
).toUnicode()
expected_instance = instance_parse(name)
# Check if name matches predicted name
if expected_instance.name != name:
failed.append(name)
if failed:
failed_instances = "\n\t- ".join([""] + failed)
yield FAIL,\
Message('bad-instance-names',
f'Following instances are not supported: {failed_instances}\n'
f'\n'
f'{SHOW_GF_DOCS_MSG}#fvar-instances')
else:
yield PASS, "Instance names are correct"
@check(
id = 'com.google.fonts/check/varfont_duplicate_instance_names',
rationale = """
This check's purpose is to detect duplicate named instances names in a given variable font.
Repeating instance names may be the result of instances for several VF axes defined in `fvar`, but since currently only weight+italic tokens are allowed in instance names as per GF specs, they ended up repeating.
Instead, only a base set of fonts for the most default representation of the family can be defined through instances in the `fvar` table, all other instances will have to be left to access through the `STAT` table.
""",
conditions = ['is_variable_font'],
)
def com_google_fonts_check_varfont_duplicate_instance_names(ttFont):
"""Check variable font instances don't have duplicate names"""
from fontbakery.constants import SHOW_GF_DOCS_MSG
seen = []
duplicate = []
for instance in ttFont['fvar'].instances:
name = ttFont['name'].getName(
instance.subfamilyNameID,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA
).toUnicode()
if name in seen:
duplicate.append(name)
if not name in seen:
seen.append(name)
if duplicate:
duplicate_instances = "\n\t- ".join([""] + duplicate)
yield FAIL,\
Message('duplicate-instance-names',
f'Following instances names are duplicate: {duplicate_instances}\n')
else:
yield PASS, "Instance names are unique"
@check(
id = 'com.google.fonts/check/varfont/unsupported_axes',
rationale = """
The 'ital' axis is not supported yet in Google Chrome. The 'opsz' axis also has patchy support.
For the time being, we need to ensure that VFs do not contain either of these axes. Once browser support is better, we can deprecate this check.
For more info regarding ital and opsz browser support, see:
https://arrowtype.github.io/vf-slnt-test/
""",
conditions = ['is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2866'
}
)
def com_google_fonts_check_varfont_unsupported_axes(ttFont):
""" Ensure VFs do not contain slnt or ital axes. """
from fontbakery.profiles.shared_conditions import slnt_axis, ital_axis
if ital_axis(ttFont):
yield FAIL,\
Message("unsupported-ital",
'The "ital" axis is not yet well supported on Google Chrome.')
elif slnt_axis(ttFont):
yield FAIL,\
Message("unsupported-slnt",
'The "slnt" axis is not yet well supported on Google Chrome.')
else:
yield PASS, "Looks good!"
@check(
id = 'com.google.fonts/check/metadata/gf-axisregistry_bounds',
rationale = """
Each axis range in a METADATA.pb file must be registered, and within the bounds of the axis definition in the Google Fonts Axis Registry, available at https://github.com/google/fonts/tree/master/axisregistry
""",
conditions = ['is_variable_font',
'GFAxisRegistry'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/3010'
}
)
def com_google_fonts_check_gf_axisregistry_bounds(family_metadata, GFAxisRegistry):
""" Validate METADATA.pb axes values are within gf-axisregistry bounds. """
passed = True
for axis in family_metadata.axes:
if axis.tag in GFAxisRegistry.keys():
expected = GFAxisRegistry[axis.tag]["message"]
if axis.min_value < expected.min_value or axis.max_value > expected.max_value:
passed = False
yield FAIL,\
Message('bad-axis-range',
f"The range in the font variation axis '{axis.tag}' ({expected.display_name}"
f" min:{axis.min_value} max:{axis.max_value})"
f" does not comply with the expected maximum range, as defined on"
f" Google Fonts Axis Registry (min:{expected.min_value} max:{expected.max_value}).")
if passed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/metadata/gf-axisregistry_valid_tags',
rationale = """
Ensure all axes in a METADATA.pb file are registered in the Google Fonts Axis Registry, available at https://github.com/google/fonts/tree/master/axisregistry
Why does Google Fonts have its own Axis Registry?
We support a superset of the OpenType axis registry axis set, and use additional metadata for each axis. Axes present in a font file but not in this registry will not function via our API. No variable font is expected to support all of the axes here.
Any font foundry or distributor library that offers variable fonts has a implicit, latent, de-facto axis registry, which can be extracted by scanning the library for axes' tags, labels, and min/def/max values. While in 2016 Microsoft originally offered to include more axes in the OpenType 1.8 specification (github.com/microsoft/OpenTypeDesignVariationAxisTags), as of August 2020, this effort has stalled. We hope more foundries and distributors will publish documents like this that make their axes explicit, to encourage of adoption of variable fonts throughout the industry, and provide source material for a future update to the OpenType specification's axis registry.
""",
conditions = ['is_variable_font',
'GFAxisRegistry'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/3022'
}
)
def com_google_fonts_check_gf_axisregistry_valid_tags(family_metadata, GFAxisRegistry):
""" Validate METADATA.pb axes tags are defined in gf-axisregistry. """
passed = True
for axis in family_metadata.axes:
if axis.tag not in GFAxisRegistry.keys():
passed = False
yield FAIL,\
Message('bad-axis-tag',
f"The font variation axis '{axis.tag}'"
f" is not yet registered on Google Fonts Axis Registry.")
if passed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/STAT/gf-axisregistry',
rationale = """
Check that particle names and values on STAT table match the fallback names in each axis registry at the Google Fonts Axis Registry, available at https://github.com/google/fonts/tree/master/axisregistry
""",
conditions = ['is_variable_font',
'GFAxisRegistry'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/3022'
}
)
def com_google_fonts_check_STAT_gf_axisregistry_names(ttFont, GFAxisRegistry):
""" Validate STAT particle names and values match the fallback names in GFAxisRegistry. """
def normalize_name(name):
return ''.join(name.split(' '))
passed = True
for axis_value in ttFont['STAT'].table.AxisValueArray.AxisValue:
axis = ttFont['STAT'].table.DesignAxisRecord.Axis[axis_value.AxisIndex]
if axis.AxisTag in GFAxisRegistry.keys():
fallbacks = GFAxisRegistry[axis.AxisTag]["fallbacks"]
# Here we assume that it is enough to check for only the Windows, English USA entry corresponding
# to a given nameID. It is up to other checks to ensure all different platform/encoding entries
# with a given nameID are consistent in the name table.
name_entry = ttFont['name'].getName(axis_value.ValueNameID,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
# Here "name_entry" has the user-friendly name of the current AxisValue
# We want to ensure that this string shows up as a "fallback" name
# on the GF Axis Registry for this specific variation axis tag.
name = normalize_name(name_entry.toUnicode())
expected_names = [normalize_name(n) for n in fallbacks.keys()]
if name not in expected_names:
expected_names = ", ".join(expected_names)
passed = False
yield FAIL, \
Message('invalid-name',
f"On the font variation axis '{axis.AxisTag}', the name '{name_entry.toUnicode()}'"
f" is not among the expected ones ({expected_names}) according"
f" to the Google Fonts Axis Registry.")
elif axis_value.Value != fallbacks[name_entry.toUnicode()]:
passed = False
yield FAIL, \
Message("bad-coordinate",
(f"Axis Value for '{axis.AxisTag}':'{name_entry.toUnicode()}' is"
f" expected to be 'fallbacks[name_entry.toUnicode()]'"
f" but this font has '{name_entry.toUnicode()}'='{axis_value.Value}'."))
if passed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/metadata/consistent_axis_enumeration',
rationale = """
All font variation axes present in the font files must be properly declared on METADATA.pb so that they can be served by the GFonts API.
""",
conditions = ['is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/3051'
}
)
def com_google_fonts_check_metadata_consistent_axis_enumeration(family_metadata, ttFont):
""" Validate VF axes match the ones declared on METADATA.pb. """
from fontbakery.utils import pretty_print_list
passed = True
md_axes = set(axis.tag for axis in family_metadata.axes)
fvar_axes = set(axis.axisTag for axis in ttFont['fvar'].axes)
missing = sorted(fvar_axes - md_axes)
extra = sorted(md_axes - fvar_axes)
if missing:
passed = False
yield FAIL,\
Message('missing-axes',
f"The font variation axes {pretty_print_list(missing)}"
f" are present in the font's fvar table but are not"
f" declared on the METADATA.pb file.")
if extra:
passed = False
yield FAIL,\
Message('extra-axes',
f"The METADATA.pb file lists font variation axes that"
f" are not supported but this family: {pretty_print_list(extra)}")
if passed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/STAT/axis_order',
rationale = """
This is (for now) a merely informative check to detect what's the axis ordering declared on the STAT table of fonts in the Google Fonts collection.
We may later update this to enforce some unified axis ordering scheme, yet to be determined.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/3049'
}
)
def com_google_fonts_check_STAT_axis_order(fonts):
""" Check axis ordering on the STAT table. """
from collections import Counter
from fontTools.ttLib import TTFont
no_stat = 0
summary = []
for font in fonts:
try:
ttFont = TTFont(font)
if 'STAT' in ttFont:
order = {}
for axis in ttFont['STAT'].table.DesignAxisRecord.Axis:
order[axis.AxisTag] = axis.AxisOrdering
summary.append('-'.join(sorted(order.keys(), key=order.get)))
else:
no_stat += 1
yield SKIP,\
Message('missing-STAT',
f"This font does not have a STAT table: {font}")
except:
yield INFO,\
Message('bad-font',
f"Something wrong with {font}")
report = "\n\t".join(map(str, Counter(summary).most_common()))
yield INFO,\
Message('summary',
f"From a total of {len(fonts)} font files,"
f" {no_stat} of them ({100.0*no_stat/len(fonts):.2f}%)"
f" lack a STAT table.\n"
f"\n"
f"\tAnd these are the most common STAT axis orderings:\n"
f"\t{report}")
@check(
id = 'com.google.fonts/check/metadata/escaped_strings',
rationale = """
In some cases we've seen designer names and other fields with escaped strings in METADATA files.
Nowadays the strings can be full unicode strings and do not need escaping.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2932'
}
)
def com_google_fonts_check_metadata_escaped_strings(metadata_file):
"""Ensure METADATA.pb does not use escaped strings."""
passed = True
for line in open(metadata_file, "r").readlines():
for quote_char in ["'", "\""]:
segments = line.split(quote_char)
if len(segments) >= 3:
a_string = segments[1]
if "\\" in a_string:
passed = False
yield FAIL,\
Message('escaped-strings',
f"Found escaped chars at '{a_string}'."
f" Please use an unicode string instead.")
if passed:
yield PASS, "OK"
###############################################################################
def is_librebarcode(font):
font_filenames = [
"LibreBarcode39-Regular.ttf",
"LibreBarcode39Text-Regular.ttf",
"LibreBarcode128-Regular.ttf",
"LibreBarcode128Text-Regular.ttf",
"LibreBarcode39Extended-Regular.ttf",
"LibreBarcode39ExtendedText-Regular.ttf"
]
for font_filename in font_filenames:
if font_filename in font:
return True
def check_skip_filter(checkid, font=None, **iterargs):
if font and is_librebarcode(font) and\
checkid in (
# See: https://github.com/graphicore/librebarcode/issues/3
'com.google.fonts/check/monospace',
'com.google.fonts/check/gpos_kerning_info',
'com.google.fonts/check/currency_chars',
'com.google.fonts/check/whitespace_ink'):
return False, ('LibreBarcode is blacklisted for this check, see '
'https://github.com/graphicore/librebarcode/issues/3')
return True, None
profile.check_skip_filter = check_skip_filter
profile.auto_register(globals())
profile.test_expected_checks(GOOGLEFONTS_PROFILE_CHECKS, exclusive=True)
|
graphicore/fontbakery
|
Lib/fontbakery/profiles/googlefonts.py
|
Python
|
apache-2.0
| 211,482
|
[
"VisIt"
] |
e11032645ab24b95b50603c74ef18f251281f6ed5bb7174b0388d9727a44bb72
|
"""Support for Konnected devices."""
import asyncio
import hmac
import json
import logging
import voluptuous as vol
from aiohttp.hdrs import AUTHORIZATION
from aiohttp.web import Request, Response
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.components.discovery import SERVICE_KONNECTED
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, HTTP_BAD_REQUEST, HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED, CONF_DEVICES, CONF_BINARY_SENSORS, CONF_SWITCHES,
CONF_HOST, CONF_PORT, CONF_ID, CONF_NAME, CONF_TYPE, CONF_PIN, CONF_ZONE,
CONF_ACCESS_TOKEN, ATTR_ENTITY_ID, ATTR_STATE, STATE_ON)
from homeassistant.helpers.dispatcher import (
async_dispatcher_send, dispatcher_send)
from homeassistant.helpers import discovery
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['konnected==0.1.4']
DOMAIN = 'konnected'
CONF_ACTIVATION = 'activation'
CONF_API_HOST = 'api_host'
CONF_MOMENTARY = 'momentary'
CONF_PAUSE = 'pause'
CONF_REPEAT = 'repeat'
CONF_INVERSE = 'inverse'
CONF_BLINK = 'blink'
CONF_DISCOVERY = 'discovery'
STATE_LOW = 'low'
STATE_HIGH = 'high'
PIN_TO_ZONE = {1: 1, 2: 2, 5: 3, 6: 4, 7: 5, 8: 'out', 9: 6}
ZONE_TO_PIN = {zone: pin for pin, zone in PIN_TO_ZONE.items()}
_BINARY_SENSOR_SCHEMA = vol.All(
vol.Schema({
vol.Exclusive(CONF_PIN, 's_pin'): vol.Any(*PIN_TO_ZONE),
vol.Exclusive(CONF_ZONE, 's_pin'): vol.Any(*ZONE_TO_PIN),
vol.Required(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERSE, default=False): cv.boolean,
}), cv.has_at_least_one_key(CONF_PIN, CONF_ZONE)
)
_SWITCH_SCHEMA = vol.All(
vol.Schema({
vol.Exclusive(CONF_PIN, 'a_pin'): vol.Any(*PIN_TO_ZONE),
vol.Exclusive(CONF_ZONE, 'a_pin'): vol.Any(*ZONE_TO_PIN),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACTIVATION, default=STATE_HIGH):
vol.All(vol.Lower, vol.Any(STATE_HIGH, STATE_LOW)),
vol.Optional(CONF_MOMENTARY):
vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_PAUSE):
vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_REPEAT):
vol.All(vol.Coerce(int), vol.Range(min=-1)),
}), cv.has_at_least_one_key(CONF_PIN, CONF_ZONE)
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema({
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_API_HOST): vol.Url(),
vol.Required(CONF_DEVICES): [{
vol.Required(CONF_ID): cv.matches_regex("[0-9a-f]{12}"),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [_BINARY_SENSOR_SCHEMA]),
vol.Optional(CONF_SWITCHES): vol.All(
cv.ensure_list, [_SWITCH_SCHEMA]),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_BLINK, default=True): cv.boolean,
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
}],
}),
},
extra=vol.ALLOW_EXTRA,
)
DEPENDENCIES = ['http']
ENDPOINT_ROOT = '/api/konnected'
UPDATE_ENDPOINT = (ENDPOINT_ROOT + r'/device/{device_id:[a-zA-Z0-9]+}')
SIGNAL_SENSOR_UPDATE = 'konnected.{}.update'
async def async_setup(hass, config):
"""Set up the Konnected platform."""
import konnected
cfg = config.get(DOMAIN)
if cfg is None:
cfg = {}
access_token = cfg.get(CONF_ACCESS_TOKEN)
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {
CONF_ACCESS_TOKEN: access_token,
CONF_API_HOST: cfg.get(CONF_API_HOST)
}
def setup_device(host, port):
"""Set up a Konnected device at `host` listening on `port`."""
discovered = DiscoveredDevice(hass, host, port)
if discovered.is_configured:
discovered.setup()
else:
_LOGGER.warning("Konnected device %s was discovered on the network"
" but not specified in configuration.yaml",
discovered.device_id)
def device_discovered(service, info):
"""Call when a Konnected device has been discovered."""
host = info.get(CONF_HOST)
port = info.get(CONF_PORT)
setup_device(host, port)
async def manual_discovery(event):
"""Init devices on the network with manually assigned addresses."""
specified = [dev for dev in cfg.get(CONF_DEVICES) if
dev.get(CONF_HOST) and dev.get(CONF_PORT)]
while specified:
for dev in specified:
_LOGGER.debug("Discovering Konnected device %s at %s:%s",
dev.get(CONF_ID),
dev.get(CONF_HOST),
dev.get(CONF_PORT))
try:
await hass.async_add_executor_job(setup_device,
dev.get(CONF_HOST),
dev.get(CONF_PORT))
specified.remove(dev)
except konnected.Client.ClientError as err:
_LOGGER.error(err)
await asyncio.sleep(10) # try again in 10 seconds
# Initialize devices specified in the configuration on boot
for device in cfg.get(CONF_DEVICES):
ConfiguredDevice(hass, device, config).save_data()
discovery.async_listen(
hass,
SERVICE_KONNECTED,
device_discovered)
hass.http.register_view(KonnectedView(access_token))
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, manual_discovery)
return True
class ConfiguredDevice:
"""A representation of a configured Konnected device."""
def __init__(self, hass, config, hass_config):
"""Initialize the Konnected device."""
self.hass = hass
self.config = config
self.hass_config = hass_config
@property
def device_id(self):
"""Device id is the MAC address as string with punctuation removed."""
return self.config.get(CONF_ID)
def save_data(self):
"""Save the device configuration to `hass.data`."""
sensors = {}
for entity in self.config.get(CONF_BINARY_SENSORS) or []:
if CONF_ZONE in entity:
pin = ZONE_TO_PIN[entity[CONF_ZONE]]
else:
pin = entity[CONF_PIN]
sensors[pin] = {
CONF_TYPE: entity[CONF_TYPE],
CONF_NAME: entity.get(CONF_NAME, 'Konnected {} Zone {}'.format(
self.device_id[6:], PIN_TO_ZONE[pin])),
CONF_INVERSE: entity.get(CONF_INVERSE),
ATTR_STATE: None
}
_LOGGER.debug('Set up sensor %s (initial state: %s)',
sensors[pin].get('name'),
sensors[pin].get(ATTR_STATE))
actuators = []
for entity in self.config.get(CONF_SWITCHES) or []:
if 'zone' in entity:
pin = ZONE_TO_PIN[entity['zone']]
else:
pin = entity['pin']
act = {
CONF_PIN: pin,
CONF_NAME: entity.get(
CONF_NAME, 'Konnected {} Actuator {}'.format(
self.device_id[6:], PIN_TO_ZONE[pin])),
ATTR_STATE: None,
CONF_ACTIVATION: entity[CONF_ACTIVATION],
CONF_MOMENTARY: entity.get(CONF_MOMENTARY),
CONF_PAUSE: entity.get(CONF_PAUSE),
CONF_REPEAT: entity.get(CONF_REPEAT)}
actuators.append(act)
_LOGGER.debug('Set up actuator %s', act)
device_data = {
CONF_BINARY_SENSORS: sensors,
CONF_SWITCHES: actuators,
CONF_BLINK: self.config.get(CONF_BLINK),
CONF_DISCOVERY: self.config.get(CONF_DISCOVERY)
}
if CONF_DEVICES not in self.hass.data[DOMAIN]:
self.hass.data[DOMAIN][CONF_DEVICES] = {}
_LOGGER.debug('Storing data in hass.data[%s][%s][%s]: %s',
DOMAIN, CONF_DEVICES, self.device_id, device_data)
self.hass.data[DOMAIN][CONF_DEVICES][self.device_id] = device_data
discovery.load_platform(
self.hass, 'binary_sensor', DOMAIN,
{'device_id': self.device_id}, self.hass_config)
discovery.load_platform(
self.hass, 'switch', DOMAIN,
{'device_id': self.device_id}, self.hass_config)
class DiscoveredDevice:
"""A representation of a discovered Konnected device."""
def __init__(self, hass, host, port):
"""Initialize the Konnected device."""
self.hass = hass
self.host = host
self.port = port
import konnected
self.client = konnected.Client(host, str(port))
self.status = self.client.get_status()
def setup(self):
"""Set up a newly discovered Konnected device."""
_LOGGER.info('Discovered Konnected device %s. Open http://%s:%s in a '
'web browser to view device status.',
self.device_id, self.host, self.port)
self.save_data()
self.update_initial_states()
self.sync_device_config()
def save_data(self):
"""Save the discovery information to `hass.data`."""
self.stored_configuration['client'] = self.client
self.stored_configuration['host'] = self.host
self.stored_configuration['port'] = self.port
@property
def device_id(self):
"""Device id is the MAC address as string with punctuation removed."""
return self.status['mac'].replace(':', '')
@property
def is_configured(self):
"""Return true if device_id is specified in the configuration."""
return bool(self.hass.data[DOMAIN][CONF_DEVICES].get(self.device_id))
@property
def stored_configuration(self):
"""Return the configuration stored in `hass.data` for this device."""
return self.hass.data[DOMAIN][CONF_DEVICES].get(self.device_id)
def sensor_configuration(self):
"""Return the configuration map for syncing sensors."""
return [{'pin': p} for p in
self.stored_configuration[CONF_BINARY_SENSORS]]
def actuator_configuration(self):
"""Return the configuration map for syncing actuators."""
return [{'pin': data.get(CONF_PIN),
'trigger': (0 if data.get(CONF_ACTIVATION) in [0, STATE_LOW]
else 1)}
for data in self.stored_configuration[CONF_SWITCHES]]
def update_initial_states(self):
"""Update the initial state of each sensor from status poll."""
for sensor_data in self.status.get('sensors'):
sensor_config = self.stored_configuration[CONF_BINARY_SENSORS]. \
get(sensor_data.get(CONF_PIN), {})
entity_id = sensor_config.get(ATTR_ENTITY_ID)
state = bool(sensor_data.get(ATTR_STATE))
if sensor_config.get(CONF_INVERSE):
state = not state
dispatcher_send(
self.hass,
SIGNAL_SENSOR_UPDATE.format(entity_id),
state)
def sync_device_config(self):
"""Sync the new pin configuration to the Konnected device."""
desired_sensor_configuration = self.sensor_configuration()
current_sensor_configuration = [
{'pin': s[CONF_PIN]} for s in self.status.get('sensors')]
_LOGGER.debug('%s: desired sensor config: %s', self.device_id,
desired_sensor_configuration)
_LOGGER.debug('%s: current sensor config: %s', self.device_id,
current_sensor_configuration)
desired_actuator_config = self.actuator_configuration()
current_actuator_config = self.status.get('actuators')
_LOGGER.debug('%s: desired actuator config: %s', self.device_id,
desired_actuator_config)
_LOGGER.debug('%s: current actuator config: %s', self.device_id,
current_actuator_config)
desired_api_host = \
self.hass.data[DOMAIN].get(CONF_API_HOST) or \
self.hass.config.api.base_url
desired_api_endpoint = desired_api_host + ENDPOINT_ROOT
current_api_endpoint = self.status.get('endpoint')
_LOGGER.debug('%s: desired api endpoint: %s', self.device_id,
desired_api_endpoint)
_LOGGER.debug('%s: current api endpoint: %s', self.device_id,
current_api_endpoint)
if (desired_sensor_configuration != current_sensor_configuration) or \
(current_actuator_config != desired_actuator_config) or \
(current_api_endpoint != desired_api_endpoint) or \
(self.status.get(CONF_BLINK) !=
self.stored_configuration.get(CONF_BLINK)) or \
(self.status.get(CONF_DISCOVERY) !=
self.stored_configuration.get(CONF_DISCOVERY)):
_LOGGER.info('pushing settings to device %s', self.device_id)
self.client.put_settings(
desired_sensor_configuration,
desired_actuator_config,
self.hass.data[DOMAIN].get(CONF_ACCESS_TOKEN),
desired_api_endpoint,
blink=self.stored_configuration.get(CONF_BLINK),
discovery=self.stored_configuration.get(CONF_DISCOVERY)
)
class KonnectedView(HomeAssistantView):
"""View creates an endpoint to receive push updates from the device."""
url = UPDATE_ENDPOINT
extra_urls = [UPDATE_ENDPOINT + '/{pin_num}/{state}']
name = 'api:konnected'
requires_auth = False # Uses access token from configuration
def __init__(self, auth_token):
"""Initialize the view."""
self.auth_token = auth_token
@staticmethod
def binary_value(state, activation):
"""Return binary value for GPIO based on state and activation."""
if activation == STATE_HIGH:
return 1 if state == STATE_ON else 0
return 0 if state == STATE_ON else 1
async def get(self, request: Request, device_id) -> Response:
"""Return the current binary state of a switch."""
hass = request.app['hass']
pin_num = int(request.query.get('pin'))
data = hass.data[DOMAIN]
device = data[CONF_DEVICES][device_id]
if not device:
return self.json_message(
'Device ' + device_id + ' not configured',
status_code=HTTP_NOT_FOUND)
try:
pin = next(filter(
lambda switch: switch[CONF_PIN] == pin_num,
device[CONF_SWITCHES]))
except StopIteration:
pin = None
if not pin:
return self.json_message(
'Switch on pin ' + pin_num + ' not configured',
status_code=HTTP_NOT_FOUND)
return self.json(
{'pin': pin_num,
'state': self.binary_value(
hass.states.get(pin[ATTR_ENTITY_ID]).state,
pin[CONF_ACTIVATION])})
async def put(self, request: Request, device_id,
pin_num=None, state=None) -> Response:
"""Receive a sensor update via PUT request and async set state."""
hass = request.app['hass']
data = hass.data[DOMAIN]
try: # Konnected 2.2.0 and above supports JSON payloads
payload = await request.json()
pin_num = payload['pin']
state = payload['state']
except json.decoder.JSONDecodeError:
_LOGGER.warning(("Your Konnected device software may be out of "
"date. Visit https://help.konnected.io for "
"updating instructions."))
auth = request.headers.get(AUTHORIZATION, None)
if not hmac.compare_digest('Bearer {}'.format(self.auth_token), auth):
return self.json_message(
"unauthorized", status_code=HTTP_UNAUTHORIZED)
pin_num = int(pin_num)
device = data[CONF_DEVICES].get(device_id)
if device is None:
return self.json_message('unregistered device',
status_code=HTTP_BAD_REQUEST)
pin_data = device[CONF_BINARY_SENSORS].get(pin_num)
if pin_data is None:
return self.json_message('unregistered sensor/actuator',
status_code=HTTP_BAD_REQUEST)
entity_id = pin_data.get(ATTR_ENTITY_ID)
if entity_id is None:
return self.json_message('uninitialized sensor/actuator',
status_code=HTTP_NOT_FOUND)
state = bool(int(state))
if pin_data.get(CONF_INVERSE):
state = not state
async_dispatcher_send(
hass, SIGNAL_SENSOR_UPDATE.format(entity_id), state)
return self.json_message('ok')
|
HydrelioxGitHub/home-assistant
|
homeassistant/components/konnected/__init__.py
|
Python
|
apache-2.0
| 17,327
|
[
"VisIt"
] |
ed44c7f87798a0a340e9a2fc1dda4af004afefad45bb4855da77d8242369ae63
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAnnotationhub(RPackage):
"""Client to access AnnotationHub resources
This package provides a client for the Bioconductor AnnotationHub web
resource. The AnnotationHub web resource provides a central location
where genomic files (e.g., VCF, bed, wig) and other resources from
standard locations (e.g., UCSC, Ensembl) can be discovered. The resource
includes metadata about each resource, e.g., a textual description,
tags, and date of modification. The client creates and manages a local
cache of files retrieved by the user, helping with quick and
reproducible access."""
homepage = "https://bioconductor.org/packages/AnnotationHub"
git = "https://git.bioconductor.org/packages/AnnotationHub.git"
version('2.22.0', commit='3ab7dceebbc31ac14ca931f66c662cf9538b7d0a')
version('2.16.1', commit='f8cefaae603b782e1c1ad277a3fb89d44e3aa1ed')
version('2.14.5', commit='993a98ce3de04a0bbddcbde5b1ab2a9550275a12')
version('2.12.1', commit='471407bd9cdc612e01deb071c91bd9e5f1ea5e55')
version('2.10.1', commit='b7cb668de9b9625ac2beb3dcde1fa39e289eec29')
version('2.8.3', commit='8aa9c64262a8d708d2bf1c82f82dfc3d7d4ccc0c')
depends_on('r-biocgenerics@0.15.10:', type=('build', 'run'))
depends_on('r-biocfilecache@1.5.1:', when='@2.16.1:', type=('build', 'run'))
depends_on('r-rsqlite', type=('build', 'run'))
depends_on('r-biocmanager', when='@2.14.5:', type=('build', 'run'))
depends_on('r-biocversion', when='@2.22.0:', type=('build', 'run'))
depends_on('r-curl', when='@2.10.1:', type=('build', 'run'))
depends_on('r-rappdirs', when='@2.16.1:', type=('build', 'run'))
depends_on('r-annotationdbi@1.31.19:', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-interactivedisplaybase', type=('build', 'run'))
depends_on('r-httr', type=('build', 'run'))
depends_on('r-yaml', type=('build', 'run'))
depends_on('r-dplyr', when='@2.16.1:', type=('build', 'run'))
depends_on('r-biocinstaller', when='@:2.16.1', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-annotationhub/package.py
|
Python
|
lgpl-2.1
| 2,334
|
[
"Bioconductor"
] |
7b9550c9479a2266278142b93446ee0594e654724930f20d9148e2f3ba0f59a6
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""SPM wrappers for preprocessing data
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import range
import os
from copy import deepcopy
# Third-party imports
import numpy as np
# Local imports
from ...utils.filemanip import (fname_presuffix, filename_to_list,
list_to_filename, split_filename)
from ..base import (OutputMultiPath, TraitedSpec, isdefined,
traits, InputMultiPath, File)
from .base import (SPMCommand, scans_for_fname, func_is_3d,
scans_for_fnames, SPMCommandInputSpec)
__docformat__ = 'restructuredtext'
class SliceTimingInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='scans',
desc='list of filenames to apply slice timing',
mandatory=True, copyfile=False)
num_slices = traits.Int(field='nslices',
desc='number of slices in a volume',
mandatory=True)
time_repetition = traits.Float(field='tr',
desc=('time between volume acquisitions'
'(start to start time)'),
mandatory=True)
time_acquisition = traits.Float(field='ta',
desc=('time of volume acquisition. usually'
'calculated as TR-(TR/num_slices)'),
mandatory=True)
slice_order = traits.List(traits.Int(), field='so',
desc=('1-based order in which slices are '
'acquired'),
mandatory=True)
ref_slice = traits.Int(field='refslice',
desc='1-based Number of the reference slice',
mandatory=True)
out_prefix = traits.String('a', field='prefix', usedefault=True,
desc='slicetimed output prefix')
class SliceTimingOutputSpec(TraitedSpec):
timecorrected_files = OutputMultiPath(
traits.Either(traits.List(File(exists=True)), File(exists=True)),
desc='slice time corrected files')
class SliceTiming(SPMCommand):
"""Use spm to perform slice timing correction.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=19
Examples
--------
>>> from nipype.interfaces.spm import SliceTiming
>>> st = SliceTiming()
>>> st.inputs.in_files = 'functional.nii'
>>> st.inputs.num_slices = 32
>>> st.inputs.time_repetition = 6.0
>>> st.inputs.time_acquisition = 6. - 6./32.
>>> st.inputs.slice_order = list(range(32,0,-1))
>>> st.inputs.ref_slice = 1
>>> st.run() # doctest: +SKIP
"""
input_spec = SliceTimingInputSpec
output_spec = SliceTimingOutputSpec
_jobtype = 'temporal'
_jobname = 'st'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(filename_to_list(val),
keep4d=False,
separate_sessions=True)
return super(SliceTiming, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['timecorrected_files'] = []
filelist = filename_to_list(self.inputs.in_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix)
for in_f in f]
else:
run = fname_presuffix(f, prefix=self.inputs.out_prefix)
outputs['timecorrected_files'].append(run)
return outputs
class RealignInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='data',
mandatory=True, copyfile=True,
desc='list of filenames to realign')
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
quality = traits.Range(low=0.0, high=1.0, field='eoptions.quality',
desc='0.1 = fast, 1.0 = precise')
fwhm = traits.Range(low=0.0, field='eoptions.fwhm',
desc='gaussian smoothing kernel width')
separation = traits.Range(low=0.0, field='eoptions.sep',
desc='sampling separation in mm')
register_to_mean = traits.Bool(field='eoptions.rtm',
desc=('Indicate whether realignment is '
'done to the mean image'))
weight_img = File(exists=True, field='eoptions.weight',
desc='filename of weighting image')
interp = traits.Range(low=0, high=7, field='eoptions.interp',
desc='degree of b-spline used for interpolation')
wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='eoptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_which = traits.ListInt([2, 1], field='roptions.which',
minlen=2, maxlen=2, usedefault=True,
desc='determines which images to reslice')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc=('degree of b-spline used for '
'interpolation'))
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc=('Check if interpolation should wrap in '
'[x,y,z]'))
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='realigned output prefix')
class RealignOutputSpec(TraitedSpec):
mean_image = File(exists=True, desc='Mean image file from the realignment')
modified_in_files = OutputMultiPath(traits.Either(
traits.List(File(exists=True)), File(exists=True)),
desc=('Copies of all files passed to '
'in_files. Headers will have '
'been modified to align all '
'images with the first, or '
'optionally to first do that, '
'extract a mean image, and '
're-align to that mean image.'))
realigned_files = OutputMultiPath(traits.Either(
traits.List(File(exists=True)), File(exists=True)),
desc=('If jobtype is write or estwrite, '
'these will be the resliced files.'
' Otherwise, they will be copies '
'of in_files that have had their '
'headers rewritten.'))
realignment_parameters = OutputMultiPath(File(exists=True),
desc=('Estimated translation and '
'rotation parameters'))
class Realign(SPMCommand):
"""Use spm_realign for estimating within modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=25
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> realign = spm.Realign()
>>> realign.inputs.in_files = 'functional.nii'
>>> realign.inputs.register_to_mean = True
>>> realign.run() # doctest: +SKIP
"""
input_spec = RealignInputSpec
output_spec = RealignOutputSpec
_jobtype = 'spatial'
_jobname = 'realign'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
if self.inputs.jobtype == "write":
separate_sessions = False
else:
separate_sessions = True
return scans_for_fnames(val,
keep4d=False,
separate_sessions=separate_sessions)
return super(Realign, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Realign, self)._parse_inputs()
return [{'%s' % (self.inputs.jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
resliced_all = self.inputs.write_which[0] > 0
resliced_mean = self.inputs.write_which[1] > 0
if self.inputs.jobtype != "write":
if isdefined(self.inputs.in_files):
outputs['realignment_parameters'] = []
for imgf in self.inputs.in_files:
if isinstance(imgf, list):
tmp_imgf = imgf[0]
else:
tmp_imgf = imgf
outputs['realignment_parameters'].append(
fname_presuffix(tmp_imgf, prefix='rp_', suffix='.txt',
use_ext=False))
if not isinstance(imgf, list) and func_is_3d(imgf):
break
if self.inputs.jobtype == "estimate":
outputs['realigned_files'] = self.inputs.in_files
if (self.inputs.jobtype == "estimate" or
self.inputs.jobtype == "estwrite"):
outputs['modified_in_files'] = self.inputs.in_files
if self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isinstance(self.inputs.in_files[0], list):
first_image = self.inputs.in_files[0][0]
else:
first_image = self.inputs.in_files[0]
if resliced_mean:
outputs['mean_image'] = fname_presuffix(first_image,
prefix='mean')
if resliced_all:
outputs['realigned_files'] = []
for idx, imgf in enumerate(
filename_to_list(self.inputs.in_files)):
realigned_run = []
if isinstance(imgf, list):
for i, inner_imgf in enumerate(filename_to_list(imgf)):
newfile = fname_presuffix(
inner_imgf, prefix=self.inputs.out_prefix)
realigned_run.append(newfile)
else:
realigned_run = fname_presuffix(
imgf, prefix=self.inputs.out_prefix)
outputs['realigned_files'].append(realigned_run)
return outputs
class CoregisterInputSpec(SPMCommandInputSpec):
target = File(exists=True, field='ref', mandatory=True,
desc='reference file to register to', copyfile=False)
source = InputMultiPath(File(exists=True), field='source',
desc='file to register to target', copyfile=True,
mandatory=True)
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
apply_to_files = InputMultiPath(File(exists=True), field='other',
desc='files to apply transformation to',
copyfile=True)
cost_function = traits.Enum('mi', 'nmi', 'ecc', 'ncc',
field='eoptions.cost_fun',
desc="""cost function, one of:
'mi' - Mutual Information,
'nmi' - Normalised Mutual Information,
'ecc' - Entropy Correlation Coefficient,
'ncc' - Normalised Cross Correlation""")
fwhm = traits.List(traits.Float(), minlen=2, maxlen=2,
field='eoptions.fwhm',
desc='gaussian smoothing kernel width (mm)')
separation = traits.List(traits.Float(), field='eoptions.sep',
desc='sampling separation in mm')
tolerance = traits.List(traits.Float(), field='eoptions.tol',
desc='acceptable tolerance for each of 12 params')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc=('degree of b-spline used for '
'interpolation'))
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc=('Check if interpolation should wrap in '
'[x,y,z]'))
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='coregistered output prefix')
class CoregisterOutputSpec(TraitedSpec):
coregistered_source = OutputMultiPath(File(exists=True),
desc='Coregistered source files')
coregistered_files = OutputMultiPath(File(exists=True),
desc='Coregistered other files')
class Coregister(SPMCommand):
"""Use spm_coreg for estimating cross-modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=39
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> coreg = spm.Coregister()
>>> coreg.inputs.target = 'functional.nii'
>>> coreg.inputs.source = 'structural.nii'
>>> coreg.run() # doctest: +SKIP
"""
input_spec = CoregisterInputSpec
output_spec = CoregisterOutputSpec
_jobtype = 'spatial'
_jobname = 'coreg'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if (opt == 'target' or
(opt == 'source' and self.inputs.jobtype != "write")):
return scans_for_fnames(filename_to_list(val),
keep4d=True)
if opt == 'apply_to_files':
return np.array(filename_to_list(val), dtype=object)
if opt == 'source' and self.inputs.jobtype == "write":
if isdefined(self.inputs.apply_to_files):
return scans_for_fnames(val + self.inputs.apply_to_files)
else:
return scans_for_fnames(val)
return super(Coregister, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm coregister options if set to None ignore
"""
if self.inputs.jobtype == "write":
einputs = (super(Coregister, self)
._parse_inputs(skip=('jobtype', 'apply_to_files')))
else:
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype'))
jobtype = self.inputs.jobtype
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = self.inputs.apply_to_files
outputs['coregistered_source'] = self.inputs.source
elif (self.inputs.jobtype == "write" or
self.inputs.jobtype == "estwrite"):
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = []
for imgf in filename_to_list(self.inputs.apply_to_files):
(outputs['coregistered_files']
.append(fname_presuffix(imgf,
prefix=self.inputs.out_prefix)))
outputs['coregistered_source'] = []
for imgf in filename_to_list(self.inputs.source):
(outputs['coregistered_source']
.append(fname_presuffix(imgf, prefix=self.inputs.out_prefix)))
return outputs
class NormalizeInputSpec(SPMCommandInputSpec):
template = File(exists=True, field='eoptions.template',
desc='template file to normalize to',
mandatory=True, xor=['parameter_file'],
copyfile=False)
source = InputMultiPath(File(exists=True), field='subj.source',
desc='file to normalize to template',
xor=['parameter_file'],
mandatory=True, copyfile=True)
jobtype = traits.Enum('estwrite', 'est', 'write', usedefault=True,
desc='Estimate, Write or do both')
apply_to_files = InputMultiPath(
traits.Either(File(exists=True), traits.List(File(exists=True))),
field='subj.resample',
desc='files to apply transformation to',
copyfile=True)
parameter_file = File(field='subj.matname', mandatory=True,
xor=['source', 'template'],
desc='normalization parameter file*_sn.mat',
copyfile=False)
source_weight = File(field='subj.wtsrc',
desc='name of weighting image for source',
copyfile=False)
template_weight = File(field='eoptions.weight',
desc='name of weighting image for template',
copyfile=False)
source_image_smoothing = traits.Float(field='eoptions.smosrc',
desc='source smoothing')
template_image_smoothing = traits.Float(field='eoptions.smoref',
desc='template smoothing')
affine_regularization_type = traits.Enum('mni', 'size', 'none',
field='eoptions.regtype',
desc='mni, size, none')
DCT_period_cutoff = traits.Float(field='eoptions.cutoff',
desc='Cutoff of for DCT bases')
nonlinear_iterations = traits.Int(field='eoptions.nits',
desc=('Number of iterations of '
'nonlinear warping'))
nonlinear_regularization = traits.Float(field='eoptions.reg',
desc=('the amount of the '
'regularization for the '
'nonlinear part of the '
'normalization'))
write_preserve = traits.Bool(field='roptions.preserve',
desc='True/False warped images are modulated')
write_bounding_box = traits.List(traits.List(traits.Float(), minlen=3,
maxlen=3),
field='roptions.bb', minlen=2, maxlen=2,
desc='3x2-element list of lists')
write_voxel_sizes = traits.List(traits.Float(), field='roptions.vox',
minlen=3, maxlen=3,
desc='3-element list')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc=('degree of b-spline used for '
'interpolation'))
write_wrap = traits.List(traits.Int(), field='roptions.wrap',
desc=('Check if interpolation should wrap in '
'[x,y,z] - list of bools'))
out_prefix = traits.String('w', field='roptions.prefix', usedefault=True,
desc='normalized output prefix')
class NormalizeOutputSpec(TraitedSpec):
normalization_parameters = OutputMultiPath(File(exists=True),
desc=('MAT files containing '
'the normalization '
'parameters'))
normalized_source = OutputMultiPath(File(exists=True),
desc='Normalized source files')
normalized_files = OutputMultiPath(File(exists=True),
desc='Normalized other files')
class Normalize(SPMCommand):
"""use spm_normalise for warping an image to a template
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=203
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> norm = spm.Normalize()
>>> norm.inputs.source = 'functional.nii'
>>> norm.run() # doctest: +SKIP
"""
input_spec = NormalizeInputSpec
output_spec = NormalizeOutputSpec
_jobtype = 'spatial'
_jobname = 'normalise'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'template':
return scans_for_fname(filename_to_list(val))
if opt == 'source':
return scans_for_fname(filename_to_list(val))
if opt == 'apply_to_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'parameter_file':
return np.array([list_to_filename(val)], dtype=object)
if opt in ['write_wrap']:
if len(val) != 3:
raise ValueError('%s must have 3 elements' % opt)
return super(Normalize, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""Validate spm normalize options if set to None ignore
"""
einputs = super(Normalize, self)._parse_inputs(skip=('jobtype',
'apply_to_files'))
if isdefined(self.inputs.apply_to_files):
inputfiles = deepcopy(self.inputs.apply_to_files)
if isdefined(self.inputs.source):
inputfiles.extend(self.inputs.source)
einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles)
jobtype = self.inputs.jobtype
if jobtype in ['estwrite', 'write']:
if not isdefined(self.inputs.apply_to_files):
if isdefined(self.inputs.source):
einputs[0]['subj']['resample'] = scans_for_fname(
self.inputs.source)
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
jobtype = self.inputs.jobtype
if jobtype.startswith('est'):
outputs['normalization_parameters'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalization_parameters'].append(
fname_presuffix(imgf, suffix='_sn.mat', use_ext=False))
outputs['normalization_parameters'] = list_to_filename(
outputs['normalization_parameters'])
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['normalized_files'] = self.inputs.apply_to_files
outputs['normalized_source'] = self.inputs.source
elif 'write' in self.inputs.jobtype:
if (isdefined(self.inputs.write_preserve) and
self.inputs.write_preserve):
prefixNorm = ''.join(['m', self.inputs.out_prefix])
else:
prefixNorm = self.inputs.out_prefix
outputs['normalized_files'] = []
if isdefined(self.inputs.apply_to_files):
filelist = filename_to_list(self.inputs.apply_to_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=prefixNorm)
for in_f in f]
else:
run = [fname_presuffix(f, prefix=prefixNorm)]
outputs['normalized_files'].extend(run)
if isdefined(self.inputs.source):
outputs['normalized_source'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalized_source'].append(
fname_presuffix(imgf, prefix=prefixNorm))
return outputs
class Normalize12InputSpec(SPMCommandInputSpec):
image_to_align = File(exists=True, field='subj.vol',
desc=('file to estimate normalization parameters '
'with'),
xor=['deformation_file'],
mandatory=True, copyfile=True)
apply_to_files = InputMultiPath(
traits.Either(File(exists=True), traits.List(File(exists=True))),
field='subj.resample',
desc='files to apply transformation to',
copyfile=True)
deformation_file = File(field='subj.def', mandatory=True,
xor=['image_to_align', 'tpm'],
desc=('file y_*.nii containing 3 deformation '
'fields for the deformation in x, y and z '
'dimension'),
copyfile=False)
jobtype = traits.Enum('estwrite', 'est', 'write', usedefault=True,
desc='Estimate, Write or do Both')
bias_regularization = traits.Enum(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1,
10, field='eoptions.biasreg',
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130,
140, 150, 'Inf', field='eoptions.biasfwhm',
desc='FWHM of Gaussian smoothness of bias')
tpm = File(exists=True, field='eoptions.tpm',
desc=('template in form of tissue probablitiy maps to '
'normalize to'),
xor=['deformation_file'],
copyfile=False)
affine_regularization_type = traits.Enum('mni', 'size', 'none',
field='eoptions.affreg',
desc='mni, size, none')
warping_regularization = traits.List(traits.Float(), field='eoptions.reg',
minlen=5, maxlen=5,
desc=('controls balance between '
'parameters and data'))
smoothness = traits.Float(field='eoptions.fwhm',
desc=('value (in mm) to smooth the data before '
'normalization'))
sampling_distance = traits.Float(field='eoptions.samp',
desc=('Sampling distance on data for '
'parameter estimation'))
write_bounding_box = traits.List(traits.List(traits.Float(),
minlen=3, maxlen=3),
field='woptions.bb', minlen=2, maxlen=2,
desc=('3x2-element list of lists '
'representing the bounding box '
'(in mm) to be written'))
write_voxel_sizes = traits.List(traits.Float(), field='woptions.vox',
minlen=3, maxlen=3,
desc=('3-element list representing the '
'voxel sizes (in mm) of the written '
'normalised images'))
write_interp = traits.Range(low=0, high=7, field='woptions.interp',
desc=('degree of b-spline used for '
'interpolation'))
out_prefix = traits.String('w', field='woptions.prefix', usedefault=True,
desc='Normalized output prefix')
class Normalize12OutputSpec(TraitedSpec):
deformation_field = OutputMultiPath(File(exists=True),
desc=('NIfTI file containing 3 '
'deformation fields for the '
'deformation in x, y and z '
'dimension'))
normalized_image = OutputMultiPath(File(exists=True),
desc=('Normalized file that needed to '
'be aligned'))
normalized_files = OutputMultiPath(File(exists=True),
desc='Normalized other files')
class Normalize12(SPMCommand):
"""uses SPM12's new Normalise routine for warping an image to a template.
Spatial normalisation is now done via the segmentation routine (which was
known as ``New Segment`` in SPM8). Note that the normalisation in SPM12
is done towards a file containing multiple tissue probability maps, which
was not the cass in SPM8.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=49
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> norm12 = spm.Normalize12()
>>> norm12.inputs.image_to_align = 'structural.nii'
>>> norm12.inputs.apply_to_files = 'functional.nii'
>>> norm12.run() # doctest: +SKIP
"""
input_spec = Normalize12InputSpec
output_spec = Normalize12OutputSpec
_jobtype = 'spatial'
_jobname = 'normalise'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'tpm':
return scans_for_fname(filename_to_list(val))
if opt == 'image_to_align':
return scans_for_fname(filename_to_list(val))
if opt == 'apply_to_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'deformation_file':
return np.array([list_to_filename(val)], dtype=object)
if opt in ['nonlinear_regularization']:
if len(val) != 5:
raise ValueError('%s must have 5 elements' % opt)
return super(Normalize12, self)._format_arg(opt, spec, val)
def _parse_inputs(self, skip=()):
"""validate spm normalize options if set to None ignore
"""
einputs = super(Normalize12, self)._parse_inputs(
skip=('jobtype', 'apply_to_files'))
if isdefined(self.inputs.apply_to_files):
inputfiles = deepcopy(self.inputs.apply_to_files)
if isdefined(self.inputs.image_to_align):
inputfiles.extend([self.inputs.image_to_align])
einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles)
jobtype = self.inputs.jobtype
if jobtype in ['estwrite', 'write']:
if not isdefined(self.inputs.apply_to_files):
if isdefined(self.inputs.image_to_align):
einputs[0]['subj']['resample'] = scans_for_fname(
self.inputs.image_to_align)
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
jobtype = self.inputs.jobtype
if jobtype.startswith('est'):
outputs['deformation_field'] = []
for imgf in filename_to_list(self.inputs.image_to_align):
outputs['deformation_field'].append(
fname_presuffix(imgf, prefix='y_'))
outputs['deformation_field'] = list_to_filename(
outputs['deformation_field'])
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['normalized_files'] = self.inputs.apply_to_files
outputs['normalized_image'] = fname_presuffix(
self.inputs.image_to_align, prefix='w')
elif 'write' in self.inputs.jobtype:
outputs['normalized_files'] = []
if isdefined(self.inputs.apply_to_files):
filelist = filename_to_list(self.inputs.apply_to_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix='w') for in_f in f]
else:
run = [fname_presuffix(f, prefix='w')]
outputs['normalized_files'].extend(run)
if isdefined(self.inputs.image_to_align):
outputs['normalized_image'] = fname_presuffix(
self.inputs.image_to_align, prefix='w')
return outputs
class SegmentInputSpec(SPMCommandInputSpec):
data = InputMultiPath(File(exists=True), field='data',
desc='one scan per subject',
copyfile=False, mandatory=True)
gm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3,
field='output.GM',
desc="""Options to produce grey matter images: c1*.img, wc1*.img and mwc1*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
wm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3,
field='output.WM',
desc="""
Options to produce white matter images: c2*.img, wc2*.img and mwc2*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
csf_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3,
field='output.CSF',
desc="""
Options to produce CSF images: c3*.img, wc3*.img and mwc3*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
save_bias_corrected = traits.Bool(field='output.biascor',
desc=('True/False produce a bias '
'corrected image'))
clean_masks = traits.Enum('no', 'light', 'thorough',
field='output.cleanup',
desc=("clean using estimated brain mask "
"('no','light','thorough')"))
tissue_prob_maps = traits.List(File(exists=True), field='opts.tpm',
desc=('list of gray, white & csf prob. '
'(opt,)'))
gaussians_per_class = traits.List(traits.Int(), field='opts.ngaus',
desc=('num Gaussians capture intensity '
'distribution'))
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', '',
field='opts.regtype',
desc=('Possible options: "mni", '
'"eastern", "subj", "none" '
'(no reguralisation), "" '
'(no affine registration)'))
warping_regularization = traits.Float(field='opts.warpreg',
desc=('Controls balance between '
'parameters and data'))
warp_frequency_cutoff = traits.Float(field='opts.warpco',
desc='Cutoff of DCT bases')
bias_regularization = traits.Enum(0, 0.00001, 0.0001, 0.001,
0.01, 0.1, 1, 10, field='opts.biasreg',
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130,
'Inf', field='opts.biasfwhm',
desc='FWHM of Gaussian smoothness of bias')
sampling_distance = traits.Float(field='opts.samp',
desc=('Sampling distance on data for '
'parameter estimation'))
mask_image = File(exists=True, field='opts.msk',
desc='Binary image to restrict parameter estimation ')
class SegmentOutputSpec(TraitedSpec):
native_gm_image = File(desc='native space grey probability map')
normalized_gm_image = File(desc='normalized grey probability map',)
modulated_gm_image = File(desc=('modulated, normalized grey '
'probability map'))
native_wm_image = File(desc='native space white probability map')
normalized_wm_image = File(desc='normalized white probability map')
modulated_wm_image = File(desc=('modulated, normalized white '
'probability map'))
native_csf_image = File(desc='native space csf probability map')
normalized_csf_image = File(desc='normalized csf probability map')
modulated_csf_image = File(desc=('modulated, normalized csf '
'probability map'))
modulated_input_image = File(deprecated='0.10',
new_name='bias_corrected_image',
desc='bias-corrected version of input image')
bias_corrected_image = File(desc='bias-corrected version of input image')
transformation_mat = File(exists=True, desc='Normalization transformation')
inverse_transformation_mat = File(exists=True,
desc='Inverse normalization info')
class Segment(SPMCommand):
"""use spm_segment to separate structural images into different
tissue classes.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=209
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.Segment()
>>> seg.inputs.data = 'structural.nii'
>>> seg.run() # doctest: +SKIP
"""
input_spec = SegmentInputSpec
output_spec = SegmentOutputSpec
def __init__(self, **inputs):
_local_version = SPMCommand().version
if _local_version and '12.' in _local_version:
self._jobtype = 'tools'
self._jobname = 'oldseg'
else:
self._jobtype = 'spatial'
self._jobname = 'preproc'
SPMCommand.__init__(self, **inputs)
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
clean_masks_dict = {'no': 0, 'light': 1, 'thorough': 2}
if opt in ['data', 'tissue_prob_maps']:
if isinstance(val, list):
return scans_for_fnames(val)
else:
return scans_for_fname(val)
if 'output_type' in opt:
return [int(v) for v in val]
if opt == 'mask_image':
return scans_for_fname(val)
if opt == 'clean_masks':
return clean_masks_dict[val]
return super(Segment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
f = self.inputs.data[0]
for tidx, tissue in enumerate(['gm', 'wm', 'csf']):
outtype = '%s_output_type' % tissue
if isdefined(getattr(self.inputs, outtype)):
for idx, (image, prefix) in enumerate([('modulated', 'mw'),
('normalized', 'w'),
('native', '')]):
if getattr(self.inputs, outtype)[idx]:
outfield = '%s_%s_image' % (image, tissue)
outputs[outfield] = fname_presuffix(
f, prefix='%sc%d' % (prefix, tidx + 1))
if (isdefined(self.inputs.save_bias_corrected) and
self.inputs.save_bias_corrected):
outputs['bias_corrected_image'] = fname_presuffix(f, prefix='m')
t_mat = fname_presuffix(f, suffix='_seg_sn.mat', use_ext=False)
outputs['transformation_mat'] = t_mat
invt_mat = fname_presuffix(f, suffix='_seg_inv_sn.mat', use_ext=False)
outputs['inverse_transformation_mat'] = invt_mat
return outputs
class NewSegmentInputSpec(SPMCommandInputSpec):
channel_files = InputMultiPath(File(exists=True),
desc="A list of files to be segmented",
field='channel', copyfile=False,
mandatory=True)
channel_info = traits.Tuple(traits.Float(), traits.Float(),
traits.Tuple(traits.Bool, traits.Bool),
desc="""A tuple with the following fields:
- bias reguralisation (0-10)
- FWHM of Gaussian smoothness of bias
- which maps to save (Corrected, Field) - a tuple of two boolean values""",
field='channel')
tissues = traits.List(
traits.Tuple(traits.Tuple(File(exists=True), traits.Int()),
traits.Int(), traits.Tuple(traits.Bool, traits.Bool),
traits.Tuple(traits.Bool, traits.Bool)),
desc="""A list of tuples (one per tissue) with the following fields:
- tissue probability map (4D), 1-based index to frame
- number of gaussians
- which maps to save [Native, DARTEL] - a tuple of two boolean values
- which maps to save [Unmodulated, Modulated] - a tuple of two boolean values""",
field='tissue')
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none',
field='warp.affreg',
desc='mni, eastern, subj, none ')
warping_regularization = traits.Either(traits.List(traits.Float(),
minlen=5, maxlen=5),
traits.Float(),
field='warp.reg',
desc=('Warping regularization '
'parameter(s). Accepts float '
'or list of floats (the '
'latter is required by '
'SPM12)'))
sampling_distance = traits.Float(field='warp.samp',
desc=('Sampling distance on data for '
'parameter estimation'))
write_deformation_fields = traits.List(traits.Bool(), minlen=2, maxlen=2,
field='warp.write',
desc=("Which deformation fields to "
"write:[Inverse, Forward]"))
class NewSegmentOutputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)),
desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)),
desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)),
desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)),
desc=('modulated+normalized class '
'images'))
transformation_mat = OutputMultiPath(File(exists=True),
desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(File(exists=True),
desc='bias corrected images')
bias_field_images = OutputMultiPath(File(exists=True),
desc='bias field images')
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
class NewSegment(SPMCommand):
"""Use spm_preproc8 (New Segment) to separate structural images into
different tissue classes. Supports multiple modalities.
NOTE: This interface currently supports single channel input only
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=43
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.NewSegment()
>>> seg.inputs.channel_files = 'structural.nii'
>>> seg.inputs.channel_info = (0.0001, 60, (True, True))
>>> seg.run() # doctest: +SKIP
For VBM pre-processing [http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf],
TPM.nii should be replaced by /path/to/spm8/toolbox/Seg/TPM.nii
>>> seg = NewSegment()
>>> seg.inputs.channel_files = 'structural.nii'
>>> tissue1 = (('TPM.nii', 1), 2, (True,True), (False, False))
>>> tissue2 = (('TPM.nii', 2), 2, (True,True), (False, False))
>>> tissue3 = (('TPM.nii', 3), 2, (True,False), (False, False))
>>> tissue4 = (('TPM.nii', 4), 2, (False,False), (False, False))
>>> tissue5 = (('TPM.nii', 5), 2, (False,False), (False, False))
>>> seg.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5]
>>> seg.run() # doctest: +SKIP
"""
input_spec = NewSegmentInputSpec
output_spec = NewSegmentOutputSpec
def __init__(self, **inputs):
_local_version = SPMCommand().version
if _local_version and '12.' in _local_version:
self._jobtype = 'spatial'
self._jobname = 'preproc'
else:
self._jobtype = 'tools'
self._jobname = 'preproc8'
SPMCommand.__init__(self, **inputs)
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['channel_files', 'channel_info']:
# structure have to be recreated because of some weird traits error
new_channel = {}
new_channel['vols'] = scans_for_fnames(self.inputs.channel_files)
if isdefined(self.inputs.channel_info):
info = self.inputs.channel_info
new_channel['biasreg'] = info[0]
new_channel['biasfwhm'] = info[1]
new_channel['write'] = [int(info[2][0]), int(info[2][1])]
return [new_channel]
elif opt == 'tissues':
new_tissues = []
for tissue in val:
new_tissue = {}
new_tissue['tpm'] = np.array([','.join([tissue[0][0],
str(tissue[0][1])])],
dtype=object)
new_tissue['ngaus'] = tissue[1]
new_tissue['native'] = [int(tissue[2][0]), int(tissue[2][1])]
new_tissue['warped'] = [int(tissue[3][0]), int(tissue[3][1])]
new_tissues.append(new_tissue)
return new_tissues
elif opt == 'write_deformation_fields':
return super(NewSegment, self)._format_arg(opt, spec,
[int(val[0]),
int(val[1])])
else:
return super(NewSegment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['native_class_images'] = []
outputs['dartel_input_images'] = []
outputs['normalized_class_images'] = []
outputs['modulated_class_images'] = []
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['bias_field_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
n_classes = 5
if isdefined(self.inputs.tissues):
n_classes = len(self.inputs.tissues)
for i in range(n_classes):
outputs['native_class_images'].append([])
outputs['dartel_input_images'].append([])
outputs['normalized_class_images'].append([])
outputs['modulated_class_images'].append([])
for filename in self.inputs.channel_files:
pth, base, ext = split_filename(filename)
if isdefined(self.inputs.tissues):
for i, tissue in enumerate(self.inputs.tissues):
if tissue[2][0]:
outputs['native_class_images'][i].append(
os.path.join(pth, "c%d%s.nii" % (i + 1, base)))
if tissue[2][1]:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rc%d%s.nii" % (i + 1, base)))
if tissue[3][0]:
outputs['normalized_class_images'][i].append(
os.path.join(pth, "wc%d%s.nii" % (i + 1, base)))
if tissue[3][1]:
outputs['modulated_class_images'][i].append(
os.path.join(pth, "mwc%d%s.nii" % (i + 1, base)))
else:
for i in range(n_classes):
outputs['native_class_images'][i].append(
os.path.join(pth, "c%d%s.nii" % (i + 1, base)))
outputs['transformation_mat'].append(
os.path.join(pth, "%s_seg8.mat" % base))
if isdefined(self.inputs.write_deformation_fields):
if self.inputs.write_deformation_fields[0]:
outputs['inverse_deformation_field'].append(
os.path.join(pth, "iy_%s.nii" % base))
if self.inputs.write_deformation_fields[1]:
outputs['forward_deformation_field'].append(
os.path.join(pth, "y_%s.nii" % base))
if isdefined(self.inputs.channel_info):
if self.inputs.channel_info[2][0]:
outputs['bias_corrected_images'].append(
os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.channel_info[2][1]:
outputs['bias_field_images'].append(
os.path.join(pth, "BiasField_%s.nii" % (base)))
return outputs
class SmoothInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(File(exists=True), field='data',
desc='list of files to smooth',
mandatory=True, copyfile=False)
fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3),
traits.Float(), field='fwhm',
desc='3-list of fwhm for each dimension')
data_type = traits.Int(field='dtype',
desc='Data type of the output images')
implicit_masking = traits.Bool(field='im',
desc=('A mask implied by a particular'
'voxel value'))
out_prefix = traits.String('s', field='prefix', usedefault=True,
desc='smoothed output prefix')
class SmoothOutputSpec(TraitedSpec):
smoothed_files = OutputMultiPath(File(exists=True), desc='smoothed files')
class Smooth(SPMCommand):
"""Use spm_smooth for 3D Gaussian smoothing of image volumes.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=55
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> smooth = spm.Smooth()
>>> smooth.inputs.in_files = 'functional.nii'
>>> smooth.inputs.fwhm = [4, 4, 4]
>>> smooth.run() # doctest: +SKIP
"""
input_spec = SmoothInputSpec
output_spec = SmoothOutputSpec
_jobtype = 'spatial'
_jobname = 'smooth'
def _format_arg(self, opt, spec, val):
if opt in ['in_files']:
return scans_for_fnames(filename_to_list(val))
if opt == 'fwhm':
if not isinstance(val, list):
return [val, val, val]
if isinstance(val, list):
if len(val) == 1:
return [val[0], val[0], val[0]]
else:
return val
return super(Smooth, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['smoothed_files'] = []
for imgf in filename_to_list(self.inputs.in_files):
outputs['smoothed_files'].append(
fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class DARTELInputSpec(SPMCommandInputSpec):
image_files = traits.List(traits.List(File(exists=True)),
desc="A list of files to be segmented",
field='warp.images', copyfile=False,
mandatory=True)
template_prefix = traits.Str('Template', usedefault=True,
field='warp.settings.template',
desc='Prefix for template')
regularization_form = traits.Enum('Linear', 'Membrane', 'Bending',
field='warp.settings.rform',
desc=('Form of regularization energy '
'term'))
iteration_parameters = traits.List(traits.Tuple(traits.Range(1, 10),
traits.Tuple(traits.Float,
traits.Float,
traits.Float),
traits.Enum(1, 2, 4, 8, 16,
32, 64, 128,
256, 512),
traits.Enum(0, 0.5, 1, 2,
4, 8, 16, 32)),
minlen=3,
maxlen=12,
field='warp.settings.param',
desc="""List of tuples for each iteration
- Inner iterations
- Regularization parameters
- Time points for deformation model
- smoothing parameter
""")
optimization_parameters = traits.Tuple(traits.Float, traits.Range(1, 8),
traits.Range(1, 8),
field='warp.settings.optim',
desc="""
Optimization settings a tuple
- LM regularization
- cycles of multigrid solver
- relaxation iterations
""")
class DARTELOutputSpec(TraitedSpec):
final_template_file = File(exists=True, desc='final DARTEL template')
template_files = traits.List(File(exists=True),
desc=('Templates from different stages of '
'iteration'))
dartel_flow_fields = traits.List(File(exists=True),
desc='DARTEL flow fields')
class DARTEL(SPMCommand):
"""Use spm DARTEL to create a template and flow fields
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=185
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> dartel = spm.DARTEL()
>>> dartel.inputs.image_files = [['rc1s1.nii','rc1s2.nii'],['rc2s1.nii', 'rc2s2.nii']]
>>> dartel.run() # doctest: +SKIP
"""
input_spec = DARTELInputSpec
output_spec = DARTELOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['image_files']:
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif opt == 'regularization_form':
mapper = {'Linear': 0, 'Membrane': 1, 'Bending': 2}
return mapper[val]
elif opt == 'iteration_parameters':
params = []
for param in val:
new_param = {}
new_param['its'] = param[0]
new_param['rparam'] = list(param[1])
new_param['K'] = param[2]
new_param['slam'] = param[3]
params.append(new_param)
return params
elif opt == 'optimization_parameters':
new_param = {}
new_param['lmreg'] = val[0]
new_param['cyc'] = val[1]
new_param['its'] = val[2]
return [new_param]
else:
return super(DARTEL, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['template_files'] = []
for i in range(6):
outputs['template_files'].append(
os.path.realpath(
'%s_%d.nii' % (self.inputs.template_prefix, i + 1)))
outputs['final_template_file'] = os.path.realpath(
'%s_6.nii' % self.inputs.template_prefix)
outputs['dartel_flow_fields'] = []
for filename in self.inputs.image_files[0]:
pth, base, ext = split_filename(filename)
outputs['dartel_flow_fields'].append(
os.path.realpath('u_%s_%s%s' % (base,
self.inputs.template_prefix,
ext)))
return outputs
class DARTELNorm2MNIInputSpec(SPMCommandInputSpec):
template_file = File(exists=True,
desc="DARTEL template",
field='mni_norm.template', copyfile=False,
mandatory=True)
flowfield_files = InputMultiPath(File(exists=True),
desc="DARTEL flow fields u_rc1*",
field='mni_norm.data.subjs.flowfields',
mandatory=True)
apply_to_files = InputMultiPath(File(exists=True),
desc="Files to apply the transform to",
field='mni_norm.data.subjs.images',
mandatory=True, copyfile=False)
voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
desc="Voxel sizes for output file",
field='mni_norm.vox')
bounding_box = traits.Tuple(traits.Float, traits.Float, traits.Float,
traits.Float, traits.Float, traits.Float,
desc="Voxel sizes for output file",
field='mni_norm.bb')
modulate = traits.Bool(field='mni_norm.preserve',
desc=("Modulate out images - no modulation "
"preserves concentrations"))
fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3),
traits.Float(), field='mni_norm.fwhm',
desc='3-list of fwhm for each dimension')
class DARTELNorm2MNIOutputSpec(TraitedSpec):
normalized_files = OutputMultiPath(File(exists=True),
desc='Normalized files in MNI space')
normalization_parameter_file = File(exists=True,
desc=('Transform parameters to MNI '
'space'))
class DARTELNorm2MNI(SPMCommand):
"""Use spm DARTEL to normalize data to MNI space
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=188
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> nm = spm.DARTELNorm2MNI()
>>> nm.inputs.template_file = 'Template_6.nii'
>>> nm.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s3_Template.nii']
>>> nm.inputs.apply_to_files = ['c1s1.nii', 'c1s3.nii']
>>> nm.inputs.modulate = True
>>> nm.run() # doctest: +SKIP
"""
input_spec = DARTELNorm2MNIInputSpec
output_spec = DARTELNorm2MNIOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['template_file']:
return np.array([val], dtype=object)
elif opt in ['flowfield_files']:
return scans_for_fnames(val, keep4d=True)
elif opt in ['apply_to_files']:
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif opt == 'voxel_size':
return list(val)
elif opt == 'bounding_box':
return list(val)
elif opt == 'fwhm':
if isinstance(val, list):
return val
else:
return [val, val, val]
else:
return super(DARTELNorm2MNI, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
pth, base, ext = split_filename(self.inputs.template_file)
outputs['normalization_parameter_file'] = os.path.realpath(
base + '_2mni.mat')
outputs['normalized_files'] = []
prefix = "w"
if isdefined(self.inputs.modulate) and self.inputs.modulate:
prefix = 'm' + prefix
if not isdefined(self.inputs.fwhm) or self.inputs.fwhm > 0:
prefix = 's' + prefix
for filename in self.inputs.apply_to_files:
pth, base, ext = split_filename(filename)
outputs['normalized_files'].append(
os.path.realpath('%s%s%s' % (prefix, base, ext)))
return outputs
class CreateWarpedInputSpec(SPMCommandInputSpec):
image_files = InputMultiPath(File(exists=True),
desc="A list of files to be warped",
field='crt_warped.images', copyfile=False,
mandatory=True)
flowfield_files = InputMultiPath(File(exists=True),
desc="DARTEL flow fields u_rc1*",
field='crt_warped.flowfields',
copyfile=False,
mandatory=True)
iterations = traits.Range(low=0, high=9,
desc=("The number of iterations: log2(number of "
"time steps)"),
field='crt_warped.K')
interp = traits.Range(low=0, high=7, field='crt_warped.interp',
desc='degree of b-spline used for interpolation')
modulate = traits.Bool(field='crt_warped.jactransf',
desc="Modulate images")
class CreateWarpedOutputSpec(TraitedSpec):
warped_files = traits.List(File(exists=True, desc='final warped files'))
class CreateWarped(SPMCommand):
"""Apply a flow field estimated by DARTEL to create warped images
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=190
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> create_warped = spm.CreateWarped()
>>> create_warped.inputs.image_files = ['rc1s1.nii', 'rc1s2.nii']
>>> create_warped.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s2_Template.nii']
>>> create_warped.run() # doctest: +SKIP
"""
input_spec = CreateWarpedInputSpec
output_spec = CreateWarpedOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['image_files']:
return scans_for_fnames(val, keep4d=True,
separate_sessions=True)
if opt in ['flowfield_files']:
return scans_for_fnames(val, keep4d=True)
else:
return super(CreateWarped, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['warped_files'] = []
for filename in self.inputs.image_files:
pth, base, ext = split_filename(filename)
if isdefined(self.inputs.modulate) and self.inputs.modulate:
outputs['warped_files'].append(
os.path.realpath('mw%s%s' % (base, ext)))
else:
outputs['warped_files'].append(
os.path.realpath('w%s%s' % (base, ext)))
return outputs
class ApplyDeformationFieldInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True,
field='fnames')
deformation_field = File(exists=True, mandatory=True, field='comp{1}.def')
reference_volume = File(exists=True, mandatory=True,
field='comp{2}.id.space')
interp = traits.Range(low=0, high=7, field='interp',
desc='degree of b-spline used for interpolation')
class ApplyDeformationFieldOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True))
class ApplyDeformations(SPMCommand):
input_spec = ApplyDeformationFieldInputSpec
output_spec = ApplyDeformationFieldOutputSpec
_jobtype = 'util'
_jobname = 'defs'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['deformation_field', 'reference_volume']:
val = [val]
if opt in ['deformation_field']:
return scans_for_fnames(val, keep4d=True, separate_sessions=False)
if opt in ['in_files', 'reference_volume']:
return scans_for_fnames(val, keep4d=False, separate_sessions=False)
else:
return super(ApplyDeformations, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_files'] = []
for filename in self.inputs.in_files:
_, fname = os.path.split(filename)
outputs['out_files'].append(os.path.realpath('w%s' % fname))
return outputs
class VBMSegmentInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(
File(exists=True),
desc="A list of files to be segmented",
field='estwrite.data', copyfile=False, mandatory=True)
tissues = File(
exists=True, field='estwrite.tpm',
desc='tissue probability map')
gaussians_per_class = traits.Tuple(
(2, 2, 2, 3, 4, 2), *([traits.Int()] * 6),
usedefault=True,
desc='number of gaussians for each tissue class')
bias_regularization = traits.Enum(
0.0001,
(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10),
field='estwrite.opts.biasreg', usedefault=True,
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(
60,
(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 'Inf'),
field='estwrite.opts.biasfwhm',
usedefault=True,
desc='FWHM of Gaussian smoothness of bias')
sampling_distance = traits.Float(
3, usedefault=True, field='estwrite.opts.samp',
desc='Sampling distance on data for parameter estimation')
warping_regularization = traits.Float(
4, usedefault=True, field='estwrite.opts.warpreg',
desc='Controls balance between parameters and data')
spatial_normalization = traits.Enum(
'high', 'low', usedefault=True,)
dartel_template = File(
exists=True,
field='estwrite.extopts.dartelwarp.normhigh.darteltpm')
use_sanlm_denoising_filter = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.extopts.sanlm',
desc="0=No denoising, 1=denoising,2=denoising multi-threaded")
mrf_weighting = traits.Float(
0.15, usedefault=True, field='estwrite.extopts.mrf')
cleanup_partitions = traits.Int(
1, usedefault=True, field='estwrite.extopts.cleanup',
desc="0=None,1=light,2=thorough")
display_results = traits.Bool(
True, usedefault=True, field='estwrite.extopts.print')
gm_native = traits.Bool(
False, usedefault=True, field='estwrite.output.GM.native',)
gm_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.GM.warped',)
gm_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.GM.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
gm_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.GM.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
wm_native = traits.Bool(
False, usedefault=True, field='estwrite.output.WM.native',)
wm_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.WM.warped',)
wm_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.WM.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
wm_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.WM.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
csf_native = traits.Bool(
False, usedefault=True, field='estwrite.output.CSF.native',)
csf_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.CSF.warped',)
csf_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.CSF.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
csf_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.CSF.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
bias_corrected_native = traits.Bool(
False, usedefault=True, field='estwrite.output.bias.native',)
bias_corrected_normalized = traits.Bool(
True, usedefault=True, field='estwrite.output.bias.warped',)
bias_corrected_affine = traits.Bool(
False, usedefault=True, field='estwrite.output.bias.affine',)
pve_label_native = traits.Bool(
False, usedefault=True, field='estwrite.output.label.native')
pve_label_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.label.warped')
pve_label_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.label.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
jacobian_determinant = traits.Bool(
False, usedefault=True, field='estwrite.jacobian.warped')
deformation_field = traits.Tuple(
(0, 0), traits.Bool, traits.Bool, usedefault=True,
field='estwrite.output.warps',
desc='forward and inverse field')
class VBMSegmentOuputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)),
desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)),
desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)),
desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)),
desc=('modulated+normalized class '
'images'))
transformation_mat = OutputMultiPath(File(exists=True),
desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(
File(exists=True),
desc='bias corrected images')
normalized_bias_corrected_images = OutputMultiPath(
File(exists=True),
desc='bias corrected images')
pve_label_native_images = OutputMultiPath(File(exists=True))
pve_label_normalized_images = OutputMultiPath(File(exists=True))
pve_label_registered_images = OutputMultiPath(File(exists=True))
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
jacobian_determinant_images = OutputMultiPath(File(exists=True))
class VBMSegment(SPMCommand):
"""Use VBM8 toolbox to separate structural images into different
tissue classes.
Example
-------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.VBMSegment()
>>> seg.inputs.tissues = 'TPM.nii'
>>> seg.inputs.dartel_template = 'Template_1_IXI550_MNI152.nii'
>>> seg.inputs.bias_corrected_native = True
>>> seg.inputs.gm_native = True
>>> seg.inputs.wm_native = True
>>> seg.inputs.csf_native = True
>>> seg.inputs.pve_label_native = True
>>> seg.inputs.deformation_field = (True, False)
>>> seg.run() # doctest: +SKIP
"""
input_spec = VBMSegmentInputSpec
output_spec = VBMSegmentOuputSpec
_jobtype = 'tools'
_jobname = 'vbm8'
def _list_outputs(self):
outputs = self._outputs().get()
do_dartel = self.inputs.spatial_normalization
dartel_px = ''
if do_dartel:
dartel_px = 'r'
outputs['native_class_images'] = [[], [], []]
outputs['dartel_input_images'] = [[], [], []]
outputs['normalized_class_images'] = [[], [], []]
outputs['modulated_class_images'] = [[], [], []]
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['normalized_bias_corrected_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
outputs['jacobian_determinant_images'] = []
outputs['pve_label_native_images'] = []
outputs['pve_label_normalized_images'] = []
outputs['pve_label_registered_images'] = []
for filename in self.inputs.in_files:
pth, base, ext = split_filename(filename)
outputs['transformation_mat'].append(
os.path.join(pth, "%s_seg8.mat" % base))
for i, tis in enumerate(['gm', 'wm', 'csf']):
# native space
if getattr(self.inputs, '%s_native' % tis):
outputs['native_class_images'][i].append(
os.path.join(pth, "p%d%s.nii" % (i + 1, base)))
if getattr(self.inputs, '%s_dartel' % tis) == 1:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rp%d%s.nii" % (i + 1, base)))
elif getattr(self.inputs, '%s_dartel' % tis) == 2:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rp%d%s_affine.nii" % (i + 1, base)))
# normalized space
if getattr(self.inputs, '%s_normalized' % tis):
outputs['normalized_class_images'][i].append(
os.path.join(pth,
"w%sp%d%s.nii" % (dartel_px,
i + 1, base)))
if getattr(self.inputs, '%s_modulated_normalized' % tis) == 1:
outputs['modulated_class_images'][i].append(os.path.join(
pth, "mw%sp%d%s.nii" % (dartel_px, i + 1, base)))
elif getattr(self.inputs,
'%s_modulated_normalized' % tis) == 2:
outputs['normalized_class_images'][i].append(os.path.join(
pth, "m0w%sp%d%s.nii" % (dartel_px, i + 1, base)))
if self.inputs.pve_label_native:
outputs['pve_label_native_images'].append(
os.path.join(pth, "p0%s.nii" % (base)))
if self.inputs.pve_label_normalized:
outputs['pve_label_normalized_images'].append(
os.path.join(pth, "w%sp0%s.nii" % (dartel_px, base)))
if self.inputs.pve_label_dartel == 1:
outputs['pve_label_registered_images'].append(
os.path.join(pth, "rp0%s.nii" % (base)))
elif self.inputs.pve_label_dartel == 2:
outputs['pve_label_registered_images'].append(
os.path.join(pth, "rp0%s_affine.nii" % (base)))
if self.inputs.bias_corrected_native:
outputs['bias_corrected_images'].append(
os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.bias_corrected_normalized:
outputs['normalized_bias_corrected_images'].append(
os.path.join(pth, "wm%s%s.nii" % (dartel_px, base)))
if self.inputs.deformation_field[0]:
outputs['forward_deformation_field'].append(
os.path.join(pth, "y_%s%s.nii" % (dartel_px, base)))
if self.inputs.deformation_field[1]:
outputs['inverse_deformation_field'].append(
os.path.join(pth, "iy_%s%s.nii" % (dartel_px, base)))
if self.inputs.jacobian_determinant and do_dartel:
outputs['jacobian_determinant_images'].append(
os.path.join(pth, "jac_wrp1%s.nii" % (base)))
return outputs
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return scans_for_fnames(val, keep4d=True)
elif opt in ['spatial_normalization']:
if val == 'low':
return {'normlow': []}
elif opt in ['dartel_template']:
return np.array([val], dtype=object)
elif opt in ['deformation_field']:
return super(VBMSegment, self)._format_arg(
opt, spec, [int(val[0]), int(val[1])])
else:
return super(VBMSegment, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
if self.inputs.spatial_normalization == 'low':
einputs = super(VBMSegment, self)._parse_inputs(
skip=('spatial_normalization', 'dartel_template'))
einputs[0]['estwrite']['extopts']['dartelwarp'] = {'normlow': 1}
return einputs
else:
return super(VBMSegment, self)._parse_inputs(
skip=('spatial_normalization'))
|
carolFrohlich/nipype
|
nipype/interfaces/spm/preprocess.py
|
Python
|
bsd-3-clause
| 80,978
|
[
"Gaussian"
] |
772eb8788723bf0cb268759c12b38fc57f8e20d7795650a8499f447abc45cf02
|
import os
import shutil
from mdtraj import version
if version.release:
docversion = version.short_version
else:
docversion = 'development'
os.mkdir("docs/_deploy")
shutil.copytree("docs/_build/html", "docs/_deploy/{docversion}"
.format(docversion=docversion))
|
swails/mdtraj
|
devtools/travis-ci/set_doc_version.py
|
Python
|
lgpl-2.1
| 286
|
[
"MDTraj"
] |
4a6c0242f73801d2a7f951a494e4f4a2a3c06e5fee406b1c68b19d451301dd80
|
from __future__ import print_function
import os
import glob
import argparse
import mdtraj as md
import multiprocessing as mp
from AdaptivePELE.utilities import utilities
def parse_arguments():
desc = "Program that performs simple postprocessing of MD simulations."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--dont_image", action="store_false", help="Flag to set whether trajectories should be imaged before the alignment (if not specfied performs the imaging)")
parser.add_argument("--processors", type=int, default=4, help="Number of cpus to use")
parser.add_argument("--trajectory_name", default="trajectory", help="Name of the trajectory files")
parser.add_argument("--path", default=".", help="Path to the simulation files")
args = parser.parse_args()
return args.trajectory_name, args.path, args.processors, args.dont_image
def process_traj(inputs):
top_ind, traj_name, epoch, traj_num, imaging = inputs
ext = os.path.splitext(traj_name)[1]
utilities.print_unbuffered("Processing trajectory", traj_name)
top = md.load("topologies/topology_%s.pdb" % top_ind)
atoms = top.top.select("backbone")
t = md.load(traj_name, top="topologies/system_%s.prmtop" % top_ind)
if imaging:
t.image_molecules(inplace=True)
t.superpose(top, atom_indices=atoms)
t.save(os.path.join(epoch, "trajectory_postprocessed_%d%s" % (traj_num, ext)))
def main(trajectory_name, path, n_processors, imaging):
epochs = utilities.get_epoch_folders(path)
to_process = []
pool = mp.Pool(n_processors)
trajectory_glob = trajectory_name + "_*"
for epoch in epochs:
with open(os.path.join(epoch, "topologyMapping.txt")) as f:
top_map = f.read().rstrip().split(":")
for traj in glob.glob(os.path.join(path, epoch, trajectory_glob)):
traj_num = utilities.getTrajNum(traj)
to_process.append((top_map[traj_num-1], traj, epoch, traj_num, imaging))
pool.map(process_traj, to_process)
pool.close()
pool.terminate()
if __name__ == "__main__":
traj_names, path_files, proc_num, image = parse_arguments()
main(traj_names, path_files, proc_num, image)
|
AdaptivePELE/AdaptivePELE
|
AdaptivePELE/analysis/simple_postprocessing.py
|
Python
|
mit
| 2,213
|
[
"MDTraj"
] |
29bd9de8731f622416c7a524d9a0d27aa274a5107fd5152bd32fa5568176207b
|
#==============================================================================
#
# Program: ParaView
# Module: build.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
import shutil
import StringIO
import string
import os
from datetime import date
import rjsmin
parser = argparse.ArgumentParser(description="Concatenation and minimize Javascript files")
parser.add_argument('-b', help="Javascript banner")
parser.add_argument('-v', help="Version string to add to the header")
parser.add_argument('-i', nargs='*', help="Files to concatenate and minimize")
parser.add_argument('-o', help="Output file")
parser.add_argument('-m', help="Minimized output file")
args = parser.parse_args()
output = StringIO.StringIO()
# read in files
for file in args.i:
with open(file, 'r') as fp:
output.write(fp.read())
# Generate banner
with open(args.b, 'r') as fp:
template = string.Template(fp.read())
d = date.today()
vars = dict(version=args.v,
date=d.strftime("%Y-%m-%d"),
year=d.strftime("%Y"))
banner = template.substitute(vars)
# write output to file
dir = os.path.dirname(args.m)
if not os.path.exists(dir):
os.makedirs(dir);
with open(args.m,"w") as fp:
fp.write(banner)
fp.write(output.getvalue())
# write minimized output to file
dir = os.path.dirname(args.o)
if not os.path.exists(dir):
os.makedirs(dir);
with open(args.o,"w") as fp:
fp.write(banner)
fp.write(rjsmin.jsmin(output.getvalue()))
|
biddisco/VTK
|
Web/JavaScript/Minimizer/build.py
|
Python
|
bsd-3-clause
| 2,063
|
[
"ParaView"
] |
120b58d35bc5d948a20cfb1b724450d6acbf59b8f2f223a0742c33a719bfc8e3
|
#!/usr/bin/env python
import wx
import wx.html2 as webview
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log, frame=None):
self.log = log
wx.Panel.__init__(self, parent, -1)
self.current = "http://wxPython.org"
self.frame = frame
if frame:
self.titleBase = frame.GetTitle()
sizer = wx.BoxSizer(wx.VERTICAL)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
self.wv = webview.WebView.New(self)
self.Bind(webview.EVT_WEBVIEW_NAVIGATING, self.OnWebViewNavigating, self.wv)
self.Bind(webview.EVT_WEBVIEW_LOADED, self.OnWebViewLoaded, self.wv)
btn = wx.Button(self, -1, "Open", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnOpenButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
btn = wx.Button(self, -1, "<--", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnPrevPageButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
self.Bind(wx.EVT_UPDATE_UI, self.OnCheckCanGoBack, btn)
btn = wx.Button(self, -1, "-->", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnNextPageButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
self.Bind(wx.EVT_UPDATE_UI, self.OnCheckCanGoForward, btn)
btn = wx.Button(self, -1, "Stop", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnStopButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
btn = wx.Button(self, -1, "Refresh", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnRefreshPageButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
txt = wx.StaticText(self, -1, "Location:")
btnSizer.Add(txt, 0, wx.CENTER|wx.ALL, 2)
self.location = wx.ComboBox(
self, -1, "", style=wx.CB_DROPDOWN|wx.TE_PROCESS_ENTER)
self.location.AppendItems(['http://wxPython.org',
'http://wxwidgets.org',
'http://google.com'])
self.Bind(wx.EVT_COMBOBOX, self.OnLocationSelect, self.location)
self.location.Bind(wx.EVT_TEXT_ENTER, self.OnLocationEnter)
btnSizer.Add(self.location, 1, wx.EXPAND|wx.ALL, 2)
sizer.Add(btnSizer, 0, wx.EXPAND)
sizer.Add(self.wv, 1, wx.EXPAND)
self.SetSizer(sizer)
self.wv.LoadURL(self.current)
def ShutdownDemo(self):
# put the frame title back
if self.frame:
self.frame.SetTitle(self.titleBase)
# WebView events
def OnWebViewNavigating(self, evt):
# this event happens prior to trying to get a resource
if evt.GetURL() == 'http://www.microsoft.com/':
if wx.MessageBox("Are you sure you want to visit Microsoft?",
style=wx.YES_NO|wx.ICON_QUESTION) == wx.NO:
# This is how you can cancel loading a page.
evt.Veto()
def OnWebViewLoaded(self, evt):
# The full document has loaded
self.current = evt.GetURL()
self.location.SetValue(self.current)
# Control bar events
def OnLocationSelect(self, evt):
url = self.location.GetStringSelection()
self.log.write('OnLocationSelect: %s\n' % url)
self.wv.LoadURL(url)
def OnLocationEnter(self, evt):
url = self.location.GetValue()
self.location.Append(url)
self.wv.LoadURL(url)
def OnOpenButton(self, event):
dlg = wx.TextEntryDialog(self, "Open Location",
"Enter a full URL or local path",
self.current, wx.OK|wx.CANCEL)
dlg.CentreOnParent()
if dlg.ShowModal() == wx.ID_OK:
self.current = dlg.GetValue()
self.wv.LoadURL(self.current)
dlg.Destroy()
def OnPrevPageButton(self, event):
self.wv.GoBack()
def OnNextPageButton(self, event):
self.wv.GoForward()
def OnCheckCanGoBack(self, event):
event.Enable(self.wv.CanGoBack())
def OnCheckCanGoForward(self, event):
event.Enable(self.wv.CanGoForward())
def OnStopButton(self, evt):
self.wv.Stop()
def OnRefreshPageButton(self, evt):
self.wv.Reload()
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>DemoName</center></h2>
Say something nice here
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
dnxbjyj/python-basic
|
gui/wxpython/wxPython-demo-4.0.1/demo/HTML2_WebView.py
|
Python
|
mit
| 4,783
|
[
"VisIt"
] |
8651332d64ecf753012ae7f65c0e24d81c113705796afeb04b3b299d04fcccec
|
# coding: utf-8
# Copyright (c) AiiDA Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides conversion between AiiDA StructureData object and
pymatgen Molecule/Structure objects.
"""
__author__ = "Andrius Merkys"
__copyright__ = "Copyright 2015, AiiDA Development Team"
__version__ = "1.0"
__maintainer__ = "Andrius Merkys"
__email__ = "andrius.merkys@gmail.com"
__date__ = "Oct 9, 2015"
import warnings
from monty.dev import requires
try:
from aiida.common.exceptions import MissingPluginError
from aiida.orm import DataFactory
try:
StructureData = DataFactory("structure")
except MissingPluginError:
raise ImportError
aiida_loaded = True
except ImportError:
aiida_loaded = False
warnings.warn(
"The pymatgen.io.aiida module is deprecated and does not work with"
"Aiida >= 1.0. It will be removed in pmg 2020. Pls install the "
"aiida package if you need to convert pmg objects to Aiida objects."
)
@requires(
aiida_loaded,
"To use the AiidaStructureAdaptor, you need to have " "aiida installed.",
)
class AiidaStructureAdaptor:
"""
Adaptor serves as a bridge between AiiDA StructureData and pymatgen
Molecule/Structure objects.
"""
@staticmethod
def get_structuredata(structure):
"""
Returns AiiDA StructureData object from pymatgen structure or
molecule.
Args:
structure: pymatgen.core.structure.Structure or
pymatgen.core.structure.Molecule
Returns:
AiiDA StructureData object
"""
return StructureData(pymatgen=structure)
@staticmethod
def get_molecule(structuredata):
"""
Returns pymatgen molecule from AiiDA StructureData.
Args:
structuredata: AiiDA StructureData object
Returns:
pymatgen.core.structure.Molecule
"""
return structuredata.get_pymatgen_molecule()
@staticmethod
def get_structure(structuredata):
"""
Returns pymatgen structure from AiiDA StructureData.
Args:
structuredata: AiiDA StructureData object
Returns:
pymatgen.core.structure.Structure
"""
return structuredata.get_pymatgen_structure()
|
gmatteo/pymatgen
|
pymatgen/io/aiida.py
|
Python
|
mit
| 2,320
|
[
"pymatgen"
] |
a6fcd27489f7f662b2bfe3e4d4fce4aa4d1e6f3f1e38a586bdef26c8f67ef22a
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import range
import re
import warnings
import numpy as np
from skbio.util import cardinal_to_ordinal
_whitespace_regex = re.compile(r'\s')
_newline_regex = re.compile(r'\n')
def _chunk_str(s, n, char):
"""Insert `char` character every `n` characters in string `s`.
Canonically pronounced "chunkster".
"""
# Modified from http://stackoverflow.com/a/312464/3776794
if n < 1:
raise ValueError(
"Cannot split string into chunks with n=%d. n must be >= 1." % n)
return char.join((s[i:i+n] for i in range(0, len(s), n)))
def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to decode "
"quality scores.",
"Decoding Solexa quality scores is not currently supported, "
"as quality scores are always stored as Phred scores in "
"scikit-bio. Please see the following scikit-bio issue to "
"track progress on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual = np.fromstring(qual_str, dtype=np.uint8) - phred_offset
if np.any((qual > phred_range[1]) | (qual < phred_range[0])):
raise ValueError("Decoded Phred score is out of range [%d, %d]."
% (phred_range[0], phred_range[1]))
return qual
def _encode_phred_to_qual(phred, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to encode "
"Phred scores.",
"Encoding Solexa quality scores is not currently supported. "
"Please see the following scikit-bio issue to track progress "
"on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual_chars = []
for score in phred:
if score < phred_range[0]:
raise ValueError("Phred score %d is out of range [%d, %d]."
% (score, phred_range[0], phred_range[1]))
if score > phred_range[1]:
warnings.warn(
"Phred score %d is out of targeted range [%d, %d]. Converting "
"to %d." % (score, phred_range[0], phred_range[1],
phred_range[1]), UserWarning)
score = phred_range[1]
qual_chars.append(chr(score + phred_offset))
return ''.join(qual_chars)
def _get_phred_offset_and_range(variant, phred_offset, errors):
if variant is None and phred_offset is None:
raise ValueError(errors[0])
if variant is not None and phred_offset is not None:
raise ValueError(
"Cannot provide both `variant` and `phred_offset`.")
if variant is not None:
if variant == 'sanger':
phred_offset = 33
phred_range = (0, 93)
elif variant == 'illumina1.3':
phred_offset = 64
phred_range = (0, 62)
elif variant == 'illumina1.8':
phred_offset = 33
phred_range = (0, 62)
elif variant == 'solexa':
phred_offset = 64
phred_range = (-5, 62)
raise NotImplementedError(errors[1])
else:
raise ValueError("Unrecognized variant %r." % variant)
else:
if not (33 <= phred_offset <= 126):
raise ValueError(
"`phred_offset` %d is out of printable ASCII character range."
% phred_offset)
phred_range = (0, 126 - phred_offset)
return phred_offset, phred_range
def _get_nth_sequence(generator, seq_num):
# i is set to None so that an empty generator will not result in an
# undefined variable when compared to seq_num.
i = None
if seq_num is None or seq_num < 1:
raise ValueError('Invalid sequence number (`seq_num`=%s). `seq_num`'
' must be between 1 and the number of sequences in'
' the file.' % str(seq_num))
try:
for i, seq in zip(range(1, seq_num + 1), generator):
pass
finally:
generator.close()
if i == seq_num:
return seq
raise ValueError('Reached end of file before finding the %s sequence.'
% cardinal_to_ordinal(seq_num))
def _parse_fasta_like_header(line):
id_ = ''
desc = ''
header = line[1:].rstrip()
if header:
if header[0].isspace():
# no id
desc = header.lstrip()
else:
header_tokens = header.split(None, 1)
if len(header_tokens) == 1:
# no description
id_ = header_tokens[0]
else:
id_, desc = header_tokens
return id_, desc
def _format_fasta_like_records(generator, id_whitespace_replacement,
description_newline_replacement, require_qual):
if ((id_whitespace_replacement is not None and
'\n' in id_whitespace_replacement) or
(description_newline_replacement is not None and
'\n' in description_newline_replacement)):
raise ValueError(
"Newline character (\\n) cannot be used to replace whitespace in "
"sequence IDs, nor to replace newlines in sequence descriptions.")
for idx, seq in enumerate(generator):
if len(seq) < 1:
raise ValueError(
"%s sequence does not contain any characters (i.e., it is an "
"empty/blank sequence). Writing empty sequences is not "
"supported." % cardinal_to_ordinal(idx + 1))
id_ = seq.id
if id_whitespace_replacement is not None:
id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
desc = seq.description
if description_newline_replacement is not None:
desc = _newline_regex.sub(description_newline_replacement, desc)
if desc:
header = '%s %s' % (id_, desc)
else:
header = id_
if require_qual and seq.quality is None:
raise ValueError(
"Cannot write %s sequence because it does not have quality "
"scores associated with it." % cardinal_to_ordinal(idx + 1))
yield header, str(seq), seq.quality
def _line_generator(fh, skip_blanks=False):
for line in fh:
line = line.strip()
if line or not skip_blanks:
yield line
def _too_many_blanks(fh, max_blanks):
count = 0
too_many = False
for line in _line_generator(fh, skip_blanks=False):
if line:
break
else:
count += 1
if count > max_blanks:
too_many = True
break
fh.seek(0)
return too_many
|
jensreeder/scikit-bio
|
skbio/io/_base.py
|
Python
|
bsd-3-clause
| 7,311
|
[
"scikit-bio"
] |
229642aa135d7426dd6fcb5520291ca039e06f07c6a17dee4fabc06bd2b85589
|
#!/usr/bin/env python3
import argparse
import logging
import os.path
import vtk
import json
import sys
import numpy as np
from vtk.util.numpy_support import vtk_to_numpy as v2n
from vtk.util.numpy_support import numpy_to_vtk as n2v
"""Evaluates a function on a given mesh, using the VTK Calculator."""
"""
This calculator can calculate vector or scalar field on given mesh.
Example usage
Scalar calculation and writing to given file
vtk_calculator.py -m inputmesh.vtk -f "exp(cos(x)+sin(y))" --data "e^(cos(x)+sin(y))" -o outputmesh.vtk
Vector field calculation and appends to input mesh
vtk_calculator.py -m "inputmesh.vtk" -f "x*iHat+cos(y)*jHat-sin(z)*kHat" -d "MyVectorField"
There is also a diff mode which provides statistic between input data and function calculated
(Note that it only works for scalar data)
For example to calculate difference between given function "x+y"
and existing data "sin(x)" and override the result to "sin(x)" and to save statistics into a file
following command is used.
vtk_calculator.py -m inputmesh.vtu -f "x+y" -d "sin(x)" --diff --stats
If you don't want to override "sin(x)" and prefer to save the newly generated
data into another variable "difference" following command can be used.
vtk_calculator.py -m inputmesh.vtu -f "x+y" -d "difference" --diffdata "sin(x)" --diff --stats
"""
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--mesh", "-m", dest="in_meshname",
help="The mesh (VTK Unstructured Grid) used as input")
parser.add_argument("--function", "-f", dest="function", default="eggholder3d",
help="""The function to evalutate on the mesh.
Syntax is the same as used in the calculator object, coordinates are given as e.g. 'cos(x)+y'.
Alternatively, you can use predefined function
Default is Eggholder function in 3D (eggholder3d).""")
group.add_argument(
"--list-functions",
dest="listfunctions",
action="store_true",
help="Prints list of predefined functions.")
parser.add_argument("--output", "-o", dest="out_meshname", default=None, help="""The output meshname.
Default is the same as for the input mesh""")
parser.add_argument("--data", "-d", dest="data", help="The name of output data.")
parser.add_argument("--diffdata", "-diffd", dest="diffdata", help="""The name of difference data.
Required in diff mode.""")
parser.add_argument("--log", "-l", dest="logging", default="INFO",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], help="""Set the log level.
Default is INFO""")
parser.add_argument("--directory", "-dir", dest="directory", default=None,
help="Directory for output files (optional)")
parser.add_argument(
"--diff",
action='store_true',
help="Calculate the difference between \"--diffdata\" and the specified"
"function \"--function\"")
parser.add_argument("--stats", "-s", action='store_true',
help="Store stats of the difference calculation as the separate file inputmesh.stats.json")
args = parser.parse_args()
return args
twoDFunctions = {
"franke2d": "0.75*exp(-((9*{first}-2)^2+(9*{second}-2)^2)/4)"
"+0.75*exp(-(9*{first}+1)^2/49-(9*{second}+1)/10)"
"+0.5*exp(-((9*{first}-7)^2+(9*{second}-3)^2)/4)"
"-0.2*exp(-(9*{first}-4)^2-(9*{second}-7)^2)",
"eggholder2d": "-{first}*sin(sqrt(abs({first}-{second}-47)))"
"-({second}+47)*sin(sqrt(abs(0.5*{first}+{second}+47)))",
"rosenbrock2d": "(100*({second}-{first}^2)^2+({first}-1)^2)"
}
preDef2DFunctions = {
f"{name}({arg})": twoDFunctions[name].format(first=arg[0], second=arg[1])
for name in ["franke2d", "eggholder2d", "rosenbrock2d"]
for arg in ["xy", "xz", "yz"]
}
preDefFunctions = {
"franke3d": "0.75*exp(-((9*x-2)^2+(9*y-2)^2+(9*z-2)^2)/4)"
"+0.75*exp(-(9*x+1)^2/49-(9*y+1)/10-(9*z+1)/10)"
"+0.5*exp(-((9*x-7)^2+(9*y-3)^2+(9*y-5)^2)/4)"
"-0.2*exp(-(9*x-4)^2-(9*y-7)^2-(9*z-5)^2)",
"eggholder3d": "-x*sin(sqrt(abs(x-y-47)))-(y+47)*sin(sqrt(abs(0.5*x+y+47)))"
"-y*sin(sqrt(abs(y-z-47)))-(z+47)*sin(sqrt(abs(0.5*y+z+47)))",
"rosenbrock3d": "(100*(y-x^2)^2+(x-1)^2)+(100*(z-y^2)^2+(y-1)^2)"
}
preDefFunctions.update(preDef2DFunctions)
functionDefinitions = {
"Franke": "Franke's function has two Gaussian peaks of different heights, and a smaller dip.",
"Eggholder": "A function has many local maxima. It is difficult to optimize.",
"Rosenbrock": "A function that is unimodal, and the global minimum lies"
" in a narrow, parabolic valley."}
def print_predef_functions():
print("Available predefined functions are:")
longest = max(map(len, preDefFunctions.keys()))
for name, func in list(preDefFunctions.items()):
print(f"{name:{longest}} := {func}")
print("Definitions of functions are:")
longest = max(map(len, functionDefinitions.keys()))
for name, definition in list(functionDefinitions.items()):
print(f"{name:{longest}} := {definition}")
return
def main():
args = parse_args()
logging.basicConfig(level=getattr(logging, args.logging))
if args.listfunctions:
print_predef_functions()
return
assert os.path.isfile(args.in_meshname), "Input mesh file not found. Please check your input mesh \"--mesh\"."
assert args.data, "Dataname \"--data\" is missing. Please give an dataname for given input."
out_meshname = args.out_meshname
if args.out_meshname is None:
logging.info("No output mesh name is given {} will be used.".format(args.in_meshname))
out_meshname = args.in_meshname
if args.diff:
assert args.diffdata, """The \"--diffdata\" argument is required when running in difference mode (using the \"--diff\" argument).
Please add a valid \"--diffdata\" argument or type \"--help\" for more information."""
diffdata = args.diffdata
if args.function in preDefFunctions:
inputfunc = preDefFunctions[args.function]
else:
inputfunc = args.function
extension = os.path.splitext(args.in_meshname)[1]
if (extension == ".vtu"):
reader = vtk.vtkXMLUnstructuredGridReader()
elif (extension == ".vtk"):
reader = vtk.vtkUnstructuredGridReader()
else:
logging.warning("Unkown input file extension please check your input file or hype \"--help\" for more information.")
sys.exit()
reader.SetFileName(args.in_meshname)
reader.Update()
vtk_dataset = reader.GetOutput()
logging.info("Mesh contains {} points.".format(vtk_dataset.GetNumberOfPoints()))
calc = vtk.vtkArrayCalculator()
calc.SetInputData(vtk_dataset)
calc.AddCoordinateScalarVariable("x", 0)
calc.AddCoordinateScalarVariable("y", 1)
calc.AddCoordinateScalarVariable("z", 2)
if args.diff:
# Check VTK file has dataname
if not vtk_dataset.GetPointData().HasArray(diffdata):
logging.warning(
"Given mesh \"{}\" has no data with given name \"{}\".\nABORTING!\n".format(
args.in_meshname, diffdata))
sys.exit()
else:
data = v2n(vtk_dataset.GetPointData().GetAbstractArray(diffdata))
# Calculate given function on the mesh
calc.SetFunction(inputfunc)
calc.SetResultArrayName("function")
calc.Update()
func = v2n(calc.GetOutput().GetPointData().GetAbstractArray("function"))
difference = data - func
logging.info("Evaluated \"{}\"-\"({})\" on the mesh \"{}\".".format(diffdata, inputfunc, args.in_meshname))
# Calculate Statistics
abs_diff = np.absolute(difference)
num_points = vtk_dataset.GetNumberOfPoints()
cnt, abs_min, signed_min, abs_max, signed_max = num_points, np.nanmin(
abs_diff), np.nanmin(difference), np.nanmax(abs_diff), np.nanmax(difference)
p99, p95, p90, median = np.percentile(abs_diff, [99, 95, 90, 50])
relative = np.sqrt(np.nansum(np.square(abs_diff)) / abs_diff.size)
logging.info("Vertex count {}".format(cnt))
logging.info("Relative l2 error {}".format(relative))
logging.info("Maximum absolute error per vertex {}".format(abs_max))
logging.info("Maximum signed error per vertex {}".format(signed_max))
logging.info("Minimum absolute error per vertex {}".format(abs_min))
logging.info("Minimum signed error per vertex {}".format(signed_min))
logging.info("Median absolute error per vertex {}".format(median))
logging.info("99th percentile of absolute error per vertex {}".format(p99))
logging.info("95th percentile of absolute error per vertex {}".format(p95))
logging.info("90th percentile of absolute error per vertex {}".format(p90))
if args.stats:
stat_file = os.path.splitext(out_meshname)[0] + ".stats.json"
logging.info("Saving stats data to \"{}\"".format(stat_file))
json.dump({
"count": cnt,
"abs_min": abs_min,
"abs_max": abs_max,
"signed_min:": signed_min,
"signed_max": signed_max,
"median(abs)": median,
"relative-l2": relative,
"99th percentile(abs)": p99,
"95th percentile(abs)": p95,
"90th percentile(abs)": p90
}, open(stat_file, "w"))
else:
calc.SetFunction(inputfunc)
logging.info("Evaluated \"{}\" on the input mesh \"{}\".".format(inputfunc, args.in_meshname))
calc.SetResultArrayName(args.data)
calc.Update()
logging.info("Evaluated function saved to \"{}\" variable on output mesh \"{}\"".format(args.data, out_meshname))
if os.path.splitext(out_meshname)[1] == ".vtk":
writer = vtk.vtkUnstructuredGridWriter()
writer.SetFileTypeToBinary()
elif os.path.splitext(out_meshname)[1] == ".vtu":
writer = vtk.vtkXMLUnstructuredGridWriter()
else:
raise Exception("Output mesh extension should be '.vtk' and '.vtu'")
if args.diff:
diff_vtk = n2v(difference)
diff_vtk.SetName(args.data)
vtk_dataset.GetPointData().AddArray(diff_vtk)
writer.SetInputData(vtk_dataset)
else:
writer.SetInputData(calc.GetOutput())
out_meshname = os.path.basename(os.path.normpath(out_meshname))
if args.directory:
directory = os.path.abspath(args.directory)
os.makedirs(directory, exist_ok=True)
out_meshname = os.path.join(directory, out_meshname)
writer.SetFileName(out_meshname)
writer.Write()
logging.info("Written output to \"{}\".".format(out_meshname))
return
if __name__ == "__main__":
main()
|
precice/aste
|
src/vtk_calculator.py
|
Python
|
gpl-3.0
| 10,995
|
[
"Gaussian",
"VTK"
] |
698006bd0e33e0e59f84cb9dea5d9f88495330ffdb56f7eb09c5c912855b639c
|
# -*- coding: utf-8 -*-
"""
ChemPy is a Python package useful for solving problems in chemistry.
"""
from ._url import __url__
from ._release import __version__
from .chemistry import (
Substance,
Reaction,
Equilibrium,
Species,
balance_stoichiometry,
mass_fractions,
)
from .reactionsystem import ReactionSystem
from .henry import Henry
from .util.periodic import atomic_number
from .kinetics import EyringParam, EyringHS, MassAction
from .util.pyutil import ChemPyDeprecationWarning
from . import henry
import sys
if sys.version_info < (3, 5, 0):
import warnings
warnings.warn(
"Use 'chempy<0.7' if using python versions < 3.5", ChemPyDeprecationWarning
)
|
bjodah/aqchem
|
chempy/__init__.py
|
Python
|
bsd-2-clause
| 709
|
[
"ChemPy"
] |
493fabfa2f95bbb1861f45cfa2856a25087aaf4ad9e613edbf1179a3cb4f6c00
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Automatically find data aggregation window and suggest whether to use TimeOfDay
and DayOfWeek encoder.
Example usage:
(timestamps, values) = read_csv_files('example_data/art_daily_flatmiddle.csv')
(med_sampling_interval, new_sampling_interval, useTimeOfDay,
useDayOfWeek) = get_suggested_timescale_and_encoder(timestamps, values)
"""
import csv
import numpy as np
_mode_from_name_dict = {
'v': 0,
's': 1,
'f': 2
}
def _convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
If `v` is longer than `a`, the arrays are swapped before computation.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
"""
a, v = np.array(a, ndmin=1), np.array(v, ndmin=1)
if len(v) > len(a):
a, v = v, a
if len(a) == 0:
raise ValueError('a cannot be empty')
if len(v) == 0:
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return np.core.multiarray.correlate(a, v[::-1], mode)
def _mode_from_name(mode):
if isinstance(mode, basestring):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def _ricker_wavelet(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-t^2/a^2)``,
where ``A = 2/sqrt(3a)pi^1/3``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
"""
A = 2 / (np.sqrt(3 * a) * (np.pi ** 0.25))
wsq = a ** 2
vec = np.arange(0, points) - (points - 1.0) / 2
tsq = vec ** 2
mod = (1 - tsq / wsq)
gauss = np.exp(-tsq / (2 * wsq))
total = A * mod * gauss
return total
def _cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = _convolve(data, wavelet_data,
mode='same')
return output
def read_csv_files(fileName):
"""
Read csv data file, the data file must have two columns
with header "timestamp", and "value"
"""
fileReader = csv.reader(open(fileName, 'r'))
fileReader.next() # skip header line
timestamps = []
values = []
for row in fileReader:
timestamps.append(row[0])
values.append(row[1])
timestamps = np.array(timestamps, dtype='datetime64')
values = np.array(values, dtype='float32')
return timestamps, values
def resample_data(timestamp, sig, new_sampling_interval):
"""
Resample time series data at new sampling interval using linear interpolation.
Note: the resampling function is using interpolation,
it may not be appropriate for aggregation purpose
:param timestamp: timestamp in numpy datetime64 type
:param sig: value of the time series.
:param new_sampling_interval: new sampling interval.
"""
nSampleNew = np.floor((timestamp[-1] - timestamp[0])
/ new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
for sampleI in xrange(nSampleNew):
timestamp_new[sampleI] = timestamp[0] + sampleI * new_sampling_interval
sig_new = np.interp((timestamp_new - timestamp[0]).astype('float32'),
(timestamp - timestamp[0]).astype('float32'), sig)
return timestamp_new, sig_new
def calculate_cwt(sampling_interval, value):
"""
Calculate continuous wavelet transformation (CWT)
Return variance of the cwt coefficients overtime and its cumulative
distribution
:param sampling_interval: sampling interval of the time series
:param value: value of the time series
"""
#t = np.array(range(len(value))) * sampling_interval
widths = np.logspace(0, np.log10(len(value) / 20), 50)
T = int(widths[-1])
# continuous wavelet transformation with ricker wavelet
cwtmatr = _cwt(value, _ricker_wavelet, widths)
cwtmatr = cwtmatr[:, 4 * T:-4 * T]
#value = value[4 * T:-4 * T]
#t = t[4 * T:-4 * T]
#freq = 1 / widths.astype('float') / sampling_interval / 4
time_scale = widths * sampling_interval * 4
# variance of wavelet power
cwt_var = np.var(np.abs(cwtmatr), axis=1)
cwt_var = cwt_var / np.sum(cwt_var)
return cwtmatr, cwt_var, time_scale
def get_local_maxima(cwt_var, time_scale):
"""
Find local maxima from the wavelet coefficient variance spectrum
A strong maxima is defined as
(1) At least 10% higher than the nearest local minima
(2) Above the baseline value
The algorithm will suggest an encoder if its corresponding
periodicity is close to a strong maxima:
(1) horizontally must within the nearest local minimum
(2) vertically must within 50% of the peak of the strong maxima
"""
# peak & valley detection
local_min = (np.diff(np.sign(np.diff(cwt_var))) > 0).nonzero()[0] + 1
local_max = (np.diff(np.sign(np.diff(cwt_var))) < 0).nonzero()[0] + 1
baseline_value = 1.0 / len(cwt_var)
dayPeriod = 86400.0
weekPeriod = 604800.0
cwt_var_at_dayPeriod = np.interp(dayPeriod, time_scale, cwt_var)
cwt_var_at_weekPeriod = np.interp(weekPeriod, time_scale, cwt_var)
useTimeOfDay = False
useDayOfWeek = False
strong_local_max = []
for i in xrange(len(local_max)):
left_local_min = np.where(np.less(local_min, local_max[i]))[0]
if len(left_local_min) == 0:
left_local_min = 0
left_local_min_value = cwt_var[0]
else:
left_local_min = local_min[left_local_min[-1]]
left_local_min_value = cwt_var[left_local_min]
right_local_min = np.where(np.greater(local_min, local_max[i]))[0]
if len(right_local_min) == 0:
right_local_min = len(cwt_var) - 1
right_local_min_value = cwt_var[-1]
else:
right_local_min = local_min[right_local_min[0]]
right_local_min_value = cwt_var[right_local_min]
local_max_value = cwt_var[local_max[i]]
nearest_local_min_value = np.max(left_local_min_value,
right_local_min_value)
if ((local_max_value - nearest_local_min_value) / nearest_local_min_value
> 0.1 and local_max_value > baseline_value):
strong_local_max.append(local_max[i])
if (time_scale[left_local_min] < dayPeriod < time_scale[right_local_min]
and cwt_var_at_dayPeriod > local_max_value * 0.5):
useTimeOfDay = True
if (time_scale[left_local_min] < weekPeriod < time_scale[right_local_min]
and cwt_var_at_weekPeriod > local_max_value * 0.5):
useDayOfWeek = True
return useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max
def determine_aggregation_window(time_scale, cum_cwt_var, thresh,
dt_sec, data_length):
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale / 10.0
if aggregation_time_scale < dt_sec * 4:
aggregation_time_scale = dt_sec * 4
if data_length < 1000:
aggregation_time_scale = dt_sec
else:
# make sure there is > 1000 records after aggregation
dt_max = float(data_length) / 1000.0 * dt_sec
if aggregation_time_scale > dt_max > dt_sec:
aggregation_time_scale = dt_max
return aggregation_time_scale
def get_suggested_timescale_and_encoder(timestamp, value, thresh=0.2):
"""
Recommend aggregation timescales and encoder types for time series data
:param timestamp: sampling times of the time series
:param value: value of the time series
:param thresh: aggregation threshold
(default value based on experiments with NAB data)
:return med_sampling_interval: median sampling interval in seconds
:return: new_sampling_interval, a string for suggested sampling interval
(e.g., 300000ms)
:return: useTimeOfDay, a bool variable for whether to use time of day encoder
:return: useDayOfWeek, a bool variable for whether to use day of week encoder
"""
# The data may have inhomogeneous sampling rate, here we take the median
# of the sampling intervals and resample the data with the same sampling
# intervals
dt = np.median(np.diff(timestamp))
med_sampling_interval = dt.astype('float32')
(timestamp, value) = resample_data(timestamp, value, dt)
(cwtmatr, cwt_var, time_scale) = calculate_cwt(med_sampling_interval, value)
cum_cwt_var = np.cumsum(cwt_var)
# decide aggregation window
new_sampling_interval = determine_aggregation_window(time_scale,
cum_cwt_var,
thresh,
med_sampling_interval,
len(value))
new_sampling_interval = str(int(new_sampling_interval * 1000)) + 'ms'
# decide whether to use TimeOfDay and DayOfWeek encoders
(useTimeOfDay, useDayOfWeek, local_min, local_max,
strong_local_max) = get_local_maxima(cwt_var, time_scale)
return (med_sampling_interval, new_sampling_interval, useTimeOfDay,
useDayOfWeek)
|
ywcui1990/nupic.research
|
htmresearch/frameworks/utils/param_finder.py
|
Python
|
agpl-3.0
| 12,312
|
[
"Gaussian"
] |
8fdad3b776081a70d71bec36bba028eb632bd8cbbf4084974b8b3044cec1bdcf
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Works for Abinit
"""
import os
import shutil
import time
import abc
import collections
import numpy as np
import copy
from monty.collections import AttrDict
from monty.itertools import chunks
from monty.functools import lazy_property
from monty.fnmatch import WildCard
from pydispatch import dispatcher
from pymatgen.core.units import EnergyArray
from . import wrappers
from .nodes import Dependency, Node, NodeError, NodeResults, FileNode, check_spectator
from .tasks import (Task, AbinitTask, ScfTask, NscfTask, DfptTask, PhononTask, ElasticTask, DdkTask,
BseTask, RelaxTask, DdeTask, BecTask, ScrTask, SigmaTask, TaskManager,
DteTask, EphTask, CollinearThenNonCollinearScfTask)
from .utils import Directory
from .netcdf import ETSF_Reader, NetcdfReader
from .abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Work",
"BandStructureWork",
"RelaxWork",
"G0W0Work",
"QptdmWork",
"SigmaConvWork",
"BseMdfWork",
"PhononWork",
"PhononWfkqWork",
"GKKPWork",
"BecWork",
"DteWork",
]
class WorkResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
@classmethod
def from_node(cls, work):
"""Initialize an instance from a :class:`Work` instance."""
new = super(WorkResults, cls).from_node(work)
# Will put all files found in outdir in GridFs
# Warning: assuming binary files.
d = {os.path.basename(f): f for f in work.outdir.list_filepaths()}
new.register_gridfs_files(**d)
return new
class WorkError(NodeError):
"""Base class for the exceptions raised by Work objects."""
class BaseWork(Node, metaclass=abc.ABCMeta):
Error = WorkError
Results = WorkResults
# interface modeled after subprocess.Popen
@property
@abc.abstractmethod
def processes(self):
"""Return a list of objects that support the `subprocess.Popen` protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_SUB)
@property
def ncores_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.manager.num_cores for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the work.
The :class:`Work` is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
def disconnect_signals(self):
"""
Disable the signals within the work. This function reverses the process of `connect_signals`
"""
for task in self:
try:
dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task)
except dispatcher.errors.DispatcherKeyError as exc:
logger.debug(str(exc))
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It executes on_all_ok when all tasks in self have reached `S_OK`.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Work has been already finalized")
else:
# Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work)
self.finalized = True
try:
results = AttrDict(**self.on_all_ok())
except Exception as exc:
self.history.critical("on_all_ok raises %s" % str(exc))
self.finalized = False
raise
# Signal to possible observers that the `Work` reached S_OK
self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self))
if self._finalized:
self.send_signal(self.S_OK)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
#@check_spectator
def on_all_ok(self):
"""
This method is called once the `Work` is completed i.e. when all tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
def get_results(self, **kwargs):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name: TaskResults for each task in self.
"""
results = self.Results.from_node(self)
return results
def get_graphviz(self, engine="automatic", graph_attr=None, node_attr=None, edge_attr=None):
"""
Generate task graph in the DOT language (only parents and children of this work).
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
from graphviz import Digraph
fg = Digraph("work", #filename="work_%s.gv" % os.path.basename(self.workdir),
engine="fdp" if engine == "automatic" else engine)
# Set graph attributes.
# https://www.graphviz.org/doc/info/
#fg.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))
fg.attr(label=repr(self))
#fg.attr(fontcolor="white", bgcolor='purple:pink')
fg.attr(rankdir="LR", pagedir="BL")
#fg.attr(constraint="false", pack="true", packMode="clust")
fg.node_attr.update(color='lightblue2', style='filled')
#fg.node_attr.update(ranksep='equally')
# Add input attributes.
if graph_attr is not None:
fg.graph_attr.update(**graph_attr)
if node_attr is not None:
fg.node_attr.update(**node_attr)
if edge_attr is not None:
fg.edge_attr.update(**edge_attr)
def node_kwargs(node):
return dict(
#shape="circle",
color=node.color_hex,
label=(str(node) if not hasattr(node, "pos_str") else
node.pos_str + "\n" + node.__class__.__name__),
)
edge_kwargs = dict(arrowType="vee", style="solid")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
# Build cluster with tasks in *this* work
cluster_name = "cluster%s" % self.name
with fg.subgraph(name=cluster_name) as wg:
wg.attr(**cluster_kwargs)
wg.attr(label="%s (%s)" % (self.__class__.__name__, self.name))
for task in self:
wg.node(task.name, **node_kwargs(task))
# Connect task to children
for child in task.get_children():
# Test if child is in this cluster (self).
myg = wg if child in self else fg
myg.node(child.name, **node_kwargs(child))
# Find file extensions required by this task
i = [dep.node for dep in child.deps].index(task)
edge_label = "+".join(child.deps[i].exts)
myg.edge(task.name, child.name, label=edge_label, color=task.color_hex,
**edge_kwargs)
# Connect task to parents
for parent in task.get_parents():
# Test if parent is in this cluster (self).
myg = wg if parent in self else fg
myg.node(parent.name, **node_kwargs(parent))
# Find file extensions required by this task
i = [dep.node for dep in task.deps].index(parent)
edge_label = "+".join(task.deps[i].exts)
myg.edge(parent.name, task.name, label=edge_label, color=parent.color_hex,
**edge_kwargs)
# Treat the case in which we have a work producing output for tasks in *this* work.
#for work in self.flow:
# children = work.get_children()
# if not children or all(child not in self for child in children):
# continue
# cluster_name = "cluster%s" % work.name
# seen = set()
# for child in children:
# if child not in self: continue
# # This is not needed, too much confusing
# #fg.edge(cluster_name, child.name, color=work.color_hex, **edge_kwargs)
# # Find file extensions required by work
# i = [dep.node for dep in child.deps].index(work)
# for ext in child.deps[i].exts:
# out = "%s (%s)" % (ext, work.name)
# fg.node(out)
# fg.edge(out, child.name, **edge_kwargs)
# key = (cluster_name, out)
# if key not in seen:
# fg.edge(cluster_name, out, color=work.color_hex, **edge_kwargs)
# seen.add(key)
return fg
class NodeContainer(metaclass=abc.ABCMeta):
"""
Mixin classes for `Work` and `Flow` objects providing helper functions
to register tasks in the container. The helper function call the
`register` method of the container.
"""
# TODO: Abstract protocol for containers
@abc.abstractmethod
def register_task(self, *args, **kwargs):
"""
Register a task in the container.
"""
# TODO: shall flow.register_task return a Task or a Work?
# Helper functions
def register_scf_task(self, *args, **kwargs):
"""Register a Scf task."""
kwargs["task_class"] = ScfTask
return self.register_task(*args, **kwargs)
def register_collinear_then_noncollinear_scf_task(self, *args, **kwargs):
"""Register a Scf task that perform a SCF run first with nsppol = 2 and then nspinor = 2"""
kwargs["task_class"] = CollinearThenNonCollinearScfTask
return self.register_task(*args, **kwargs)
def register_nscf_task(self, *args, **kwargs):
"""Register a nscf task."""
kwargs["task_class"] = NscfTask
return self.register_task(*args, **kwargs)
def register_relax_task(self, *args, **kwargs):
"""Register a task for structural optimization."""
kwargs["task_class"] = RelaxTask
return self.register_task(*args, **kwargs)
def register_phonon_task(self, *args, **kwargs):
"""Register a phonon task."""
kwargs["task_class"] = PhononTask
return self.register_task(*args, **kwargs)
def register_elastic_task(self, *args, **kwargs):
"""Register an elastic task."""
kwargs["task_class"] = ElasticTask
return self.register_task(*args, **kwargs)
def register_ddk_task(self, *args, **kwargs):
"""Register a ddk task."""
kwargs["task_class"] = DdkTask
return self.register_task(*args, **kwargs)
def register_scr_task(self, *args, **kwargs):
"""Register a screening task."""
kwargs["task_class"] = ScrTask
return self.register_task(*args, **kwargs)
def register_sigma_task(self, *args, **kwargs):
"""Register a sigma task."""
kwargs["task_class"] = SigmaTask
return self.register_task(*args, **kwargs)
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register_task(*args, **kwargs)
def register_dte_task(self, *args, **kwargs):
"""Register a Dte task."""
kwargs["task_class"] = DteTask
return self.register_task(*args, **kwargs)
def register_bec_task(self, *args, **kwargs):
"""Register a BEC task."""
kwargs["task_class"] = BecTask
return self.register_task(*args, **kwargs)
def register_bse_task(self, *args, **kwargs):
"""Register a Bethe-Salpeter task."""
kwargs["task_class"] = BseTask
return self.register_task(*args, **kwargs)
def register_eph_task(self, *args, **kwargs):
"""Register an electron-phonon task."""
kwargs["task_class"] = EphTask
return self.register_task(*args, **kwargs)
def walknset_vars(self, task_class=None, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input files of the nodes
Args:
task_class: If not None, only the input files of the tasks belonging
to class `task_class` are modified.
Example:
flow.walknset_vars(ecut=10, kptopt=4)
"""
def change_task(task):
if task_class is not None and task.__class__ is not task_class: return False
return True
if self.is_work:
for task in self:
if not change_task(task): continue
task.set_vars(*args, **kwargs)
elif self.is_flow:
for task in self.iflat_tasks():
if not change_task(task): continue
task.set_vars(*args, **kwargs)
else:
raise TypeError("Don't know how to set variables for object class %s" % self.__class__.__name__)
class Work(BaseWork, NodeContainer):
"""
A Work is a list of (possibly connected) tasks.
"""
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
super(Work, self).__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the :class:`TaskManager` to use to launch the :class:`Task`."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this :class:`Work`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this :class:`Work`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
@lazy_property
def pos(self):
"""The position of self in the :class:`Flow`"""
for i, work in enumerate(self.flow):
if self == work:
return i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The work will use these directories to connect
# itself to other works and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
self.wdir = Directory(self.workdir)
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the :class:`Task` objects in the :class:`Work` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
# Use the one provided in input else the one of the work/flow.
if manager is not None:
task.set_manager(manager)
else:
# Look first in work and then in the flow.
if hasattr(self, "manager"):
task.set_manager(self.manager)
else:
task.set_manager(self.flow.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj: :class:`AbinitInput` instance or `Task` object.
deps: Dictionary specifying the dependency of this node or list of dependencies
None means that this obj has no dependency.
required_files: List of strings with the path of the files used by the task.
Note that the files must exist when the task is registered.
Use the standard approach based on Works, Tasks and deps
if the files will be produced in the future.
manager:
The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the :class:`Work`.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
:class:`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies given either as dict or list.
if deps is not None:
if hasattr(deps, "items"):
deps = [Dependency(node, exts) for node, exts in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
# Needed by NodeContainer
register_task = register
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the work.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the work.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the work i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min: If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The work will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
# Ignore OK and LOCKED tasks.
for task in self:
if task.status in (task.S_OK, task.S_LOCKED): continue
task.check_status()
# Take into account possible dependencies. Use a list instead of generators
for task in self:
if task.status == task.S_LOCKED: continue
if task.status < task.S_SUB and all(status == task.S_OK for status in task.deps_status):
task.set_status(task.S_READY, "Status set to Ready")
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Initial setup
self._setup(*args, **kwargs)
# Build dirs and files.
self.build(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotals(self, unit="Ha"):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotals = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotals.append(etot)
return EnergyArray(etotals, "Ha").to(unit)
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
:class:`AbinitTimerParser` object
"""
filenames = list(filter(os.path.exists, [task.output_file.path for task in self]))
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class BandStructureWork(Work):
"""Work for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run
nscf_input: Input for the NSCF run defining the band structure calculation.
dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(BandStructureWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Register the NSCF run and its dependency.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Add DOS computation(s) if requested.
self.dos_tasks = []
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"})
self.dos_tasks.append(dos_task)
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs)
def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task).
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot.
Returns:
`matplotlib` figure.
"""
with self.nscf_task.open_gsr() as gsr:
gs_ebands = gsr.ebands
with self.dos_tasks[dos_pos].open_gsr() as gsr:
dos_ebands = gsr.ebands
edos = dos_ebands.get_edos(method=method, step=step, width=width)
return gs_ebands.plot_with_edos(edos, **kwargs)
def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained.
None is all DOSes should be displayed. Accepts integer or list of integers.
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot` method to customize the plot.
Returns:
`matplotlib` figure.
"""
if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos]
from abipy.electrons.ebands import ElectronDosPlotter
plotter = ElectronDosPlotter()
for i, task in enumerate(self.dos_tasks):
if dos_pos is not None and i not in dos_pos: continue
with task.open_gsr() as gsr:
edos = gsr.ebands.get_edos(method=method, step=step, width=width)
ngkpt = task.get_inpvar("ngkpt")
plotter.add_edos("ngkpt %s" % str(ngkpt), edos)
return plotter.combiplot(**kwargs)
class RelaxWork(Work):
"""
Work for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None, target_dilatmx=None):
"""
Args:
ion_input: Input for the relaxation of the ions (cell is fixed)
ioncell_input: Input for the relaxation of the ions and the unit cell.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(RelaxWork, self).__init__(workdir=workdir, manager=manager)
self.ion_task = self.register_relax_task(ion_input)
# Note:
# 1) It would be nice to restart from the WFK file but ABINIT crashes due to the
# different unit cell parameters if paral_kgb == 1
#paral_kgb = ion_input[0]["paral_kgb"]
#if paral_kgb == 1:
#deps = {self.ion_task: "WFK"} # --> FIXME: Problem in rwwf
#deps = {self.ion_task: "DEN"}
deps = None
self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.lock(source_node=self)
self.transfer_done = False
self.target_dilatmx = target_dilatmx
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
If sender == self.ion_task, we update the initial structure
used by self.ioncell_task and we unlock it so that the job can be submitted.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure from ion_task
ion_structure = self.ion_task.get_final_structure()
# Transfer it to the ioncell task (we do it only once).
self.ioncell_task._change_structure(ion_structure)
self.transfer_done = True
# Unlock ioncell_task so that we can submit it.
self.ioncell_task.unlock(source_node=self)
elif sender == self.ioncell_task and self.target_dilatmx:
actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.)
if self.target_dilatmx < actual_dilatmx:
self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx)
self.history.info('Converging dilatmx. Value reduce from {} to {}.'
.format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx')))
self.ioncell_task.reset_from_scratch()
return super(RelaxWork, self).on_ok(sender)
def plot_ion_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ion_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
def plot_ioncell_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ioncell_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
class G0W0Work(Work):
"""
Work for general G0W0 calculations.
All input can be either single inputs or lists of inputs
"""
def __init__(self, scf_inputs, nscf_inputs, scr_inputs, sigma_inputs,
workdir=None, manager=None):
"""
Args:
scf_inputs: Input(s) for the SCF run, if it is a list add all but only link
to the last input (used for convergence studies on the KS band gap)
nscf_inputs: Input(s) for the NSCF run, if it is a list add all but only
link to the last (i.e. addditiona DOS and BANDS)
scr_inputs: Input for the screening run
sigma_inputs: List of :class:AbinitInput`for the self-energy run.
if scr and sigma are lists of the same length, every sigma gets its own screening.
if there is only one screening all sigma inputs are linked to this one
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
super(G0W0Work, self).__init__(workdir=workdir, manager=manager)
spread_scr = (isinstance(sigma_inputs, (list, tuple)) and
isinstance(scr_inputs, (list, tuple)) and
len(sigma_inputs) == len(scr_inputs))
#print("spread_scr", spread_scr)
self.sigma_tasks = []
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
# multiple scf_inputs can be provided to perform convergence studies
if isinstance(scf_inputs, (list, tuple)):
for scf_input in scf_inputs:
self.scf_task = self.register_scf_task(scf_input)
else:
self.scf_task = self.register_scf_task(scf_inputs)
# Register the NSCF run (s).
if isinstance(nscf_inputs, (list, tuple)):
for nscf_input in nscf_inputs:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
else:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_inputs, deps={self.scf_task: "DEN"})
# Register the SCR and SIGMA run(s).
if spread_scr:
for scr_input, sigma_input in zip(scr_inputs, sigma_inputs):
scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"})
sigma_task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(sigma_task)
else:
# Sigma work(s) connected to the same screening.
scr_task = self.register_scr_task(scr_inputs, deps={nscf_task: "WFK"})
if isinstance(sigma_inputs, (list, tuple)):
for inp in sigma_inputs:
task = self.register_sigma_task(inp, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
else:
task = self.register_sigma_task(sigma_inputs, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWork(Work):
"""
Work for self-energy convergence studies.
"""
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Args:
wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file.
scr_node: The node who has produced the SCR file or filepath pointing to the SCR file.
sigma_inputs: List of :class:`AbinitInput` for the self-energy runs.
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
# Cast to node instances.
wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node)
super(SigmaConvWork, self).__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
class BseMdfWork(Work):
"""
Work for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening is modeled
with the model dielectric function.
"""
def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run.
nscf_input: Input for the NSCF run.
bse_inputs: List of Inputs for the BSE run.
workdir: Working directory of the calculation.
manager: :class:`TaskManager`.
"""
super(BseMdfWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Construct the input for the NSCF run.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Construct the input(s) for the BSE run.
if not isinstance(bse_inputs, (list, tuple)):
bse_inputs = [bse_inputs]
for bse_input in bse_inputs:
self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"})
def get_mdf_robot(self):
"""Builds and returns a :class:`MdfRobot` for analyzing the results in the MDF files."""
from abilab.robots import MdfRobot
robot = MdfRobot()
for task in self[2:]:
mdf_path = task.outdir.has_abiext(robot.EXT)
if mdf_path:
robot.add_file(str(task), mdf_path)
return robot
class QptdmWork(Work):
"""
This work parallelizes the calculation of the q-points of the screening.
It also provides the callback `on_all_ok` that calls mrgscr to merge
all the partial screening files produced.
"""
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate()
def merge_scrfiles(self, remove_scrfiles=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
If remove_scrfiles is True, the partial SCR files are removed after the merge.
"""
scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self]))
self.history.info("Will call mrgscr to merge %s SCR files:\n" % len(scr_files))
assert len(scr_files) == len(self)
mrgscr = wrappers.Mrgscr(manager=self[0].manager, verbose=1)
final_scr = mrgscr.merge_qpoints(self.outdir.path, scr_files, out_prefix="out")
if remove_scrfiles:
for scr_file in scr_files:
try:
os.remove(scr_file)
except IOError:
pass
return final_scr
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr)
# TODO: MergeDdb --> DfptWork(Work) postpone it because it may break pickle.
class MergeDdb:
"""Mixin class for Works that have to merge the DDB files produced by the tasks."""
def add_becs_from_scf_task(self, scf_task, ddk_tolerance, ph_tolerance):
"""
Build tasks for the computation of Born effective charges and add them to the work.
Args:
scf_task: ScfTask object.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run.
None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
Return:
(ddk_tasks, bec_tasks)
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
# DDK calculations (self-consistent to get electric field).
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = self.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation and phonons
# Each BEC task is connected to all the previous DDK task and to the scf_task.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
bec_inputs = scf_task.input.make_bec_inputs(tolerance=ph_tolerance)
bec_tasks = []
for bec_inp in bec_inputs:
bec_task = self.register_bec_task(bec_inp, deps=bec_deps)
bec_tasks.append(bec_task)
return ddk_tasks, bec_tasks
def merge_ddb_files(self, delete_source_ddbs=True, only_dfpt_tasks=True,
exclude_tasks=None, include_tasks=None):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Args:
delete_source_ddbs: True if input DDB should be removed once final DDB is created.
only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work
Useful e.g. for finite stress corrections in which the stress in the
initial configuration should be merged in the final DDB.
exclude_tasks: List of tasks that should be excluded when merging the partial DDB files.
include_tasks: List of tasks that should be included when merging the partial DDB files.
Mutually exclusive with exclude_tasks.
Returns:
path to the output DDB file
"""
if exclude_tasks:
my_tasks = [task for task in self if task not in exclude_tasks]
elif include_tasks:
my_tasks = [task for task in self if task in include_tasks]
else:
my_tasks = [task for task in self]
if only_dfpt_tasks:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks \
if isinstance(task, DfptTask)]))
else:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks]))
self.history.info("Will call mrgddb to merge %s DDB files:" % len(ddb_files))
# DDB files are always produces so this should never happen!
if not ddb_files:
raise RuntimeError("Cannot find any DDB file to merge by the task of " % self)
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
if len(ddb_files) == 1:
# Avoid the merge. Just copy the DDB file to the outdir of the work.
shutil.copy(ddb_files[0], out_ddb)
else:
# Call mrgddb
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc,
delete_source_ddbs=delete_source_ddbs)
return out_ddb
def merge_pot1_files(self, delete_source=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Args:
delete_source: True if POT1 files should be removed after (successful) merge.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
"""
natom = len(self[0].input.structure)
max_pertcase = 3 * natom
pot1_files = []
for task in self:
if not isinstance(task, DfptTask): continue
paths = task.outdir.list_filepaths(wildcard="*_POT*")
for path in paths:
# Include only atomic perturbations i.e. files whose ext <= 3 * natom
i = path.rindex("_POT")
pertcase = int(path[i+4:].replace(".nc", ""))
if pertcase <= max_pertcase:
pot1_files.append(path)
# prtpot = 0 disables the output of the DFPT POT files so an empty list is not fatal here.
if not pot1_files: return None
self.history.info("Will call mrgdvdb to merge %s files:" % len(pot1_files))
# Final DDB file will be produced in the outdir of the work.
out_dvdb = self.outdir.path_in("out_DVDB")
if len(pot1_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
shutil.copy(pot1_files[0], out_dvdb)
else:
# FIXME: The merge may require a non-negligible amount of memory if lots of qpts.
# Besides there are machines such as lemaitre3 that are problematic when
# running MPI applications on the front-end
mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)
mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source)
return out_dvdb
class PhononWork(Work, MergeDdb):
"""
This work consists of nirred Phonon tasks where nirred is
the number of irreducible atomic perturbations for a given set of q-points.
It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge
all the partial DDB (POT) files produced. The two files are available in the
output directory of the Work.
"""
@classmethod
def from_scf_task(cls, scf_task, qpoints, is_ngqpt=False, tolerance=None, with_becs=False,
ddk_tolerance=None, manager=None):
"""
Construct a `PhononWork` from a :class:`ScfTask` object.
The input file for phonons is automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by the `scf_task`.
Args:
scf_task: ScfTask object.
qpoints: q-points in reduced coordinates. Accepts single q-point, list of q-points
or three integers defining the q-mesh if `is_ngqpt`.
is_ngqpt: True if `qpoints` should be interpreted as divisions instead of q-points.
tolerance: dict {"varname": value} with the tolerance to be used in the phonon run.
None to use AbiPy default.
with_becs: Activate calculation of Electric field and Born effective charges.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs.
None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
if is_ngqpt:
qpoints = scf_task.input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
if with_becs:
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance=tolerance)
for qpt in qpoints:
if with_becs and np.sum(qpt ** 2) < 1e-12: continue
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
@classmethod
def from_scf_input(cls, scf_input, qpoints, is_ngqpt=False, tolerance=None,
with_becs=False, ddk_tolerance=None, manager=None):
"""
Similar to `from_scf_task`, the difference is that this method requires
an input for SCF calculation. A new ScfTask is created and added to the Work.
This API should be used if the DDB of the GS task should be merged.
"""
if is_ngqpt:
qpoints = scf_input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
# Create ScfTask
scf_task = new.register_scf_task(scf_input)
if with_becs:
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance=tolerance)
for qpt in qpoints:
if with_becs and np.sum(qpt ** 2) < 1e-12: continue
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class PhononWfkqWork(Work, MergeDdb):
"""
This work computes phonons with DFPT on an arbitrary q-mesh (usually denser than the k-mesh for electrons)
by computing WKQ files for each q-point.
The number of irreducible atomic perturbations for each q-point are taken into account.
It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge
all the partial DDB (POT) files produced. The two files are available in the
output directory of the Work. The WKQ files are removed at runtime.
"""
@classmethod
def from_scf_task(cls, scf_task, ngqpt, ph_tolerance=None, tolwfr=1.0e-22, nband=None,
with_becs=False, ddk_tolerance=None, shiftq=(0, 0, 0), is_ngqpt=True, remove_wfkq=True,
manager=None):
"""
Construct a `PhononWfkqWork` from a :class:`ScfTask` object.
The input files for WFQ and phonons are automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by scf_task and the associated WFQ file.
Args:
scf_task: ScfTask object.
ngqpt: three integers defining the q-mesh
with_becs: Activate calculation of Electric field and Born effective charges.
ph_tolerance: dict {"varname": value} with the tolerance for the phonon run.
None to use AbiPy default.
tolwfr: tolerance used to compute WFQ.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs.
None to use AbiPy default.
shiftq: Q-mesh shift. Multiple shifts are not supported.
is_ngqpt: the ngqpt is interpreted as a set of integers defining the q-mesh, otherwise
is an explicit list of q-points
remove_wfkq: Remove WKQ files when the children are completed.
manager: :class:`TaskManager` object.
.. note:
Use k-meshes with one shift and q-meshes that are multiple of ngkpt
to decrease the number of WFQ files to be computed.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
shiftq = np.reshape(shiftq, (3,))
if is_ngqpt:
qpoints = scf_task.input.abiget_ibz(ngkpt=ngqpt, shiftk=shiftq, kptopt=1).points
else:
qpoints = ngqpt
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfkq_task_children = collections.defaultdict(list)
if with_becs:
# Add DDK and BECS.
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)
# Get ngkpt, shift for electrons from input.
# Won't try to skip WFQ if multiple shifts or off-diagonal kptrlatt
ngkpt, shiftk = scf_task.input.get_ngkpt_shiftk()
try_to_skip_wfkq = True
if ngkpt is None or len(shiftk) > 1 and is_ngqpt:
try_to_skip_wfkq = True
# TODO: One could avoid kptopt 3 by computing WFK in the IBZ and then rotating.
# but this has to be done inside Abinit.
for qpt in qpoints:
is_gamma = np.sum(qpt ** 2) < 1e-12
if with_becs and is_gamma: continue
# Avoid WFQ if k + q = k (requires ngkpt, multiple shifts are not supported)
need_wfkq = True
if is_gamma:
need_wfkq = False
elif try_to_skip_wfkq:
# k = (i + shiftk) / ngkpt
qinds = np.rint(qpt * ngqpt - shiftq)
f = (qinds * ngkpt) % ngqpt
need_wfkq = np.any(f != 0)
if need_wfkq:
nscf_inp = scf_task.input.new_with_vars(qpt=qpt, nqpt=1, iscf=-2, kptopt=3, tolwfr=tolwfr)
if nband:
nbdbuf = max(2,nband*0.1)
nscf_inp.set_vars(nband=nband+nbdbuf, nbdbuf=nbdbuf)
wfkq_task = new.register_nscf_task(nscf_inp, deps={scf_task: ["DEN", "WFK"]})
new.wfkq_tasks.append(wfkq_task)
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=ph_tolerance)
for ph_inp in multi:
deps = {scf_task: "WFK", wfkq_task: "WFQ"} if need_wfkq else {scf_task: "WFK"}
#ph_inp["prtwf"] = -1
t = new.register_phonon_task(ph_inp, deps=deps)
if need_wfkq:
new.wfkq_task_children[wfkq_task].append(t)
return new
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
return super(PhononWfkqWork, self).on_ok(sender)
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class GKKPWork(Work):
"""
This work computes electron-phonon matrix elements for all the q-points
present in a DVDB and DDB file
"""
@classmethod
def from_den_ddb_dvdb(cls, inp, den_path, ddb_path, dvdb_path, mpiprocs=1, remove_wfkq=True,
qpath=None, with_ddk=True, expand=True, manager=None):
"""
Construct a `PhononWfkqWork` from a DDB and DVDB file.
For each q found, a WFQ task and an EPH task computing the matrix elements are created.
"""
import abipy.abilab as abilab
# Create file nodes
den_file = FileNode(den_path)
ddb_file = FileNode(ddb_path)
dvdb_file = FileNode(dvdb_path)
# Create new work
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfkq_task_children = collections.defaultdict(list)
if manager is None: manager = TaskManager.from_user_config()
tm = manager.new_with_fixed_mpi_omp(mpiprocs, 1)
# Create a WFK task
kptopt = 1 if expand else 3
nscf_inp = inp.new_with_vars(iscf=-2, kptopt=kptopt)
wfk_task = new.register_nscf_task(nscf_inp, deps={den_file: "DEN"},manager=tm)
new.wfkq_tasks.append(wfk_task)
new.wfk_task = wfk_task
# Read path and regular grid from DDB file
with abilab.abiopen(ddb_path) as ddb:
q_frac_coords = np.array([k.frac_coords for k in ddb.qpoints])
ddb_ngqpt = ddb.guessed_ngqpt
# If qpath is set, we read the list of q-points to be used to interpolate the DVDB file.
# The DVDB and DDB file have to correspond to a regular grid.
dvdb = dvdb_file
if qpath is None:
qpath = q_frac_coords
else:
interp_inp = inp.new_with_vars(optdriver=7, eph_task=-5, ddb_ngqpt=ddb_ngqpt,
ph_nqpath=len(qpath), ph_qpath=qpath, prtphdos=0)
dvdb = new.register_eph_task(interp_inp, deps={wfk_task: "WFK", ddb_file: "DDB", dvdb_file: "DVDB"},
manager=tm)
# Create a WFK expansion task
if expand:
fbz_nscf_inp = inp.new_with_vars(optdriver=8)
fbz_nscf_inp.set_spell_check(False)
fbz_nscf_inp.set_vars(wfk_task="wfk_fullbz")
tm_serial = manager.new_with_fixed_mpi_omp(1,1)
wfk_task = new.register_nscf_task(fbz_nscf_inp, deps={wfk_task: "WFK", den_file: "DEN"},
manager=tm_serial)
new.wfkq_tasks.append(wfk_task)
new.wfk_task = wfk_task
if with_ddk:
kptopt = 3 if expand else 1
ddk_inp = inp.new_with_vars(optdriver=8,kptopt=kptopt)
ddk_inp.set_spell_check(False)
ddk_inp.set_vars(wfk_task="wfk_ddk")
ddk_task = new.register_nscf_task(ddk_inp, deps={wfk_task: "WFK", den_file: "DEN"}, manager=tm)
new.wfkq_tasks.append(ddk_task)
# For each qpoint
for qpt in qpath:
is_gamma = np.sum(qpt ** 2) < 1e-12
if is_gamma:
# Create a link from WFK to WFQ on_ok
wfkq_task = wfk_task
deps = {wfk_task: ["WFK","WFQ"], ddb_file: "DDB", dvdb: "DVDB" }
else:
# Create a WFQ task
nscf_inp = nscf_inp.new_with_vars(kptopt=3, qpt=qpt, nqpt=1)
wfkq_task = new.register_nscf_task(nscf_inp, deps={den_file: "DEN"}, manager=tm)
new.wfkq_tasks.append(wfkq_task)
deps = {wfk_task: "WFK", wfkq_task: "WFQ", ddb_file: "DDB", dvdb: "DVDB" }
# Create a EPH task
eph_inp = inp.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2, kptopt=3,
ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt)
t = new.register_eph_task(eph_inp, deps=deps, manager=tm)
new.wfkq_task_children[wfkq_task].append(t)
return new
@classmethod
def from_phononwfkq_work(cls, phononwfkq_work, nscf_vars={}, remove_wfkq=True, with_ddk=True, manager=None):
"""
Construct a `GKKPWork` from a `PhononWfkqWork` object.
The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands
"""
# Get list of qpoints from the the phonon tasks in this work
qpoints = []
qpoints_deps = []
for task in phononwfkq_work:
if isinstance(task,PhononTask):
# Store qpoints
qpt = task.input.get("qpt", [0,0,0])
qpoints.append(qpt)
# Store dependencies
qpoints_deps.append(task.deps)
# Create file nodes
ddb_path = phononwfkq_work.outdir.has_abiext("DDB")
dvdb_path = phononwfkq_work.outdir.has_abiext("DVDB")
ddb_file = FileNode(ddb_path)
dvdb_file = FileNode(dvdb_path)
# Get scf_task from first q-point
for dep in qpoints_deps[0]:
if isinstance(dep.node,ScfTask) and dep.exts[0] == 'WFK':
scf_task = dep.node
# Create new work
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfk_task = []
# Add one eph task per qpoint
for qpt,qpoint_deps in zip(qpoints,qpoints_deps):
# Create eph task
eph_input = scf_task.input.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2,
ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt)
deps = {ddb_file: "DDB", dvdb_file: "DVDB" }
for dep in qpoint_deps:
deps[dep.node] = dep.exts[0]
# If no WFQ in deps link the WFK with WFQ extension
if 'WFQ' not in deps.values():
inv_deps = dict((v, k) for k, v in deps.items())
wfk_task = inv_deps['WFK']
wfk_path = wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
wfq_path = os.path.join(os.path.dirname(wfk_path), infile)
if not os.path.isfile(wfq_path): os.symlink(wfk_path, wfq_path)
deps[FileNode(wfq_path)] = 'WFQ'
new.register_eph_task(eph_input, deps=deps)
return new
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
# If wfk task we create a link to a wfq file so abinit is happy
if sender == self.wfk_task:
wfk_path = self.wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
infile = os.path.join(os.path.dirname(wfk_path), infile)
os.symlink(wfk_path, infile)
return super(GKKPWork, self).on_ok(sender)
class BecWork(Work, MergeDdb):
"""
Work for the computation of the Born effective charges.
This work consists of DDK tasks and phonon + electric field perturbation
It provides the callback method (on_all_ok) that calls mrgddb to merge the
partial DDB files produced by the work.
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None):
"""
Build tasks for the computation of Born effective charges from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
new = cls(manager=manager)
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)
return new
def on_all_ok(self):
"""
This method is called when all tasks reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class DteWork(Work, MergeDdb):
"""
Work for the computation of the third derivative of the energy.
This work consists of DDK tasks and electric field perturbation.
It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None, manager=None):
"""
Build a DteWork from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
new = cls(manager=manager)
# DDK calculations
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation
# Each task is connected to all the previous DDK, DDE task and to the scf_task.
multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False)
# To compute the nonlinear coefficients all the directions of the perturbation
# have to be taken in consideration
# DDE calculations
dde_tasks = []
dde_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
dde_deps.update({scf_task: "WFK"})
for dde_inp in multi_dde:
dde_task = new.register_dde_task(dde_inp, deps=dde_deps)
dde_tasks.append(dde_task)
# DTE calculations
dte_deps = {scf_task: "WFK DEN"}
dte_deps.update({dde_task: "1WF 1DEN" for dde_task in dde_tasks})
multi_dte = scf_task.input.make_dte_inputs()
dte_tasks = []
for dte_inp in multi_dte:
dte_task = new.register_dte_task(dte_inp, deps=dte_deps)
dte_tasks.append(dte_task)
return new
def on_all_ok(self):
"""
This method is called when all tasks reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
|
montoyjh/pymatgen
|
pymatgen/io/abinit/works.py
|
Python
|
mit
| 74,870
|
[
"ABINIT",
"Gaussian",
"NetCDF",
"pymatgen"
] |
734240432f475c74243771425e3b029b03a57cf0bc57061cae1868125ac93366
|
__all__ = ['fsvd']
import numpy as np
def fsvd(A, k, i, usePowerMethod=False):
"""
FSVD Fast Singular Value Decomposition
[U,S,V] = FSVD(A,k,i,usePowerMethod) computes the truncated singular
value decomposition of the input matrix A up to rank k using i levels of
Krylov method as given in [1], p. 3.
If usePowerMethod is given as true, then only exponent i is used (i.e.
as power method). See [2] p.9, Randomized PCA algorithm for details.
[1] Halko, N., Martinsson, P. G., Shkolnisky, Y., & Tygert, M. (2010).
An algorithm for the principal component analysis of large data sets.
Arxiv preprint arXiv:1007.5510, 0526. Retrieved April 1, 2011, from
http://arxiv.org/abs/1007.5510.
[2] Halko, N., Martinsson, P. G., & Tropp, J. A. (2009). Finding
structure with randomness: Probabilistic algorithms for constructing
approximate matrix decompositions. Arxiv preprint arXiv:0909.4061.
Retrieved April 1, 2011, from http://arxiv.org/abs/0909.4061.
Copyright 2011 Ismail Ari, http://ismailari.com.
Args:
A (float): Matrix to be decomposed
k (int): maximum rank of the matrix
i (int): number of levels of the Krylov method
usePowerMethod (bool, optional): Description
Returns:
float: U, S, V -> standard output of the SVD method
"""
if (usePowerMethod == False):
i = 1;
s = A.shape
# Take (conjugate) transpose if necessary. It makes H smaller thus
# leading the computations to be faster
isTransposed = False
if (s[0] < s[1]):
A = A.T
isTransposed = True
n = A.shape[1]
l = k + 2
# Form a real nxl matrix G whose entries are iid Gaussian r.v.s of zero
# mean and unit variance
G = np.random.randn(n,l)
if (usePowerMethod):
# Use only the given exponent
H = np.dot(A,G)
for j in range(2,i+1):
H = np.dot(A, np.dot(A.T,H))
else:
# Compute the mxl matrices H^{(0)}, ..., H^{(i)}
# Note that this is done implicitly in each iteration below.
H = []
H = np.append(A*G)
for j in range(1,i):
H = np.append(np.dot(A, np.dot(A.T, H[j-1])))
H = np.concatenate(H)
## Using the pivoted QR-decomposiion, form a real mx((i+1)l) matrix Q
## whose columns are orthonormal, s.t. there exists a real
## ((i+1)l)x((i+1)l) matrix R for which H = QR.
[Q, R] = np.linalg.qr(H)
#pdb.set_trace()
## Compute the nx((i+1)l) product matrix T = A^T Q
T = np.dot(A.T,Q)
## Form an SVD of T
Vt, St, W = np.linalg.svd(T)
## Compute the mx((i+1)l) product matrix
Ut = np.dot(Q,W)
## Retrieve the leftmost mxk block U of Ut, the leftmost nxk block V of
## Vt, and the leftmost uppermost kxk block S of St. The product U S V^T
## then approxiamtes A.
if (isTransposed):
V = Ut[:,0:k-1];
U = Vt[:,0:k-1];
else:
U = Ut[:,0:k-1]
V = Vt[:,0:k-1]
S = St[0:k-1]
return U, S, V
|
aasensio/pyiacsun
|
pyiacsun/linalg/fsvd.py
|
Python
|
mit
| 3,055
|
[
"Gaussian"
] |
59f7c6fe94424767cc9c1cfdc57c6cc68a77191b9bde51c09ad9a93b0707f171
|
import unicodedata
import numpy as np
from .. import conventions, Variable
from ..core import ops
from ..core.pycompat import basestring, unicode_type, OrderedDict
# Special characters that are permitted in netCDF names except in the
# 0th position of the string
_specialchars = '_.@+- !"#$%&\()*,:;<=>?[]^`{|}~'
# The following are reserved names in CDL and may not be used as names of
# variables, dimension, attributes
_reserved_names = set(['byte', 'char', 'short', 'ushort', 'int', 'uint',
'int64', 'uint64', 'float' 'real', 'double', 'bool',
'string'])
# These data-types aren't supported by netCDF3, so they are automatically
# coerced instead as indicated by the "coerce_nc3_dtype" function
_nc3_dtype_coercions = {'int64': 'int32', 'bool': 'int8'}
def coerce_nc3_dtype(arr):
"""Coerce an array to a data type that can be stored in a netCDF-3 file
This function performs the following dtype conversions:
int64 -> int32
float64 -> float32
bool -> int8
unicode -> string
Data is checked for equality, or equivalence (non-NaN values) with
`np.allclose` with the default keyword arguments.
"""
dtype = str(arr.dtype)
if dtype in _nc3_dtype_coercions:
new_dtype = _nc3_dtype_coercions[dtype]
# TODO: raise a warning whenever casting the data-type instead?
cast_arr = arr.astype(new_dtype)
if ((('int' in dtype or 'U' in dtype) and
not (cast_arr == arr).all())
or ('float' in dtype and
not ops.allclose_or_equiv(cast_arr, arr))):
raise ValueError('could not safely cast array from dtype %s to %s'
% (dtype, new_dtype))
arr = cast_arr
elif arr.dtype.kind == 'U':
arr = np.core.defchararray.encode(arr, 'utf-8')
return arr
def maybe_convert_to_char_array(data, dims):
if data.dtype.kind == 'S' and data.dtype.itemsize > 1:
data = conventions.string_to_char(data)
dims = dims + ('string%s' % data.shape[-1],)
return data, dims
def encode_nc3_attr_value(value):
if isinstance(value, basestring):
if not isinstance(value, unicode_type):
value = value.decode('utf-8')
else:
value = coerce_nc3_dtype(np.atleast_1d(value))
if value.ndim > 1:
raise ValueError("netCDF attributes must be 1-dimensional")
return value
def encode_nc3_attrs(attrs):
return OrderedDict([(k, encode_nc3_attr_value(v))
for k, v in attrs.items()])
def encode_nc3_variable(var):
data = coerce_nc3_dtype(var.data)
data, dims = maybe_convert_to_char_array(data, var.dims)
attrs = encode_nc3_attrs(var.attrs)
return Variable(dims, data, attrs, var.encoding)
def _isalnumMUTF8(c):
"""Return True if the given UTF-8 encoded character is alphanumeric
or multibyte.
Input is not checked!
"""
return c.isalnum() or (len(c.encode('utf-8')) > 1)
def is_valid_nc3_name(s):
"""Test whether an object can be validly converted to a netCDF-3
dimension, variable or attribute name
Earlier versions of the netCDF C-library reference implementation
enforced a more restricted set of characters in creating new names,
but permitted reading names containing arbitrary bytes. This
specification extends the permitted characters in names to include
multi-byte UTF-8 encoded Unicode and additional printing characters
from the US-ASCII alphabet. The first character of a name must be
alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for
special names with meaning to implementations, such as the
"_FillValue" attribute). Subsequent characters may also include
printing special characters, except for '/' which is not allowed in
names. Names that have trailing space characters are also not
permitted.
"""
if not isinstance(s, basestring):
return False
if not isinstance(s, unicode_type):
s = s.decode('utf-8')
num_bytes = len(s.encode('utf-8'))
return ((unicodedata.normalize('NFC', s) == s) and
(s not in _reserved_names) and
(num_bytes >= 0) and
('/' not in s) and
(s[-1] != ' ') and
(_isalnumMUTF8(s[0]) or (s[0] == '_')) and
all((_isalnumMUTF8(c) or c in _specialchars for c in s)))
|
cpaulik/xray
|
xray/backends/netcdf3.py
|
Python
|
apache-2.0
| 4,438
|
[
"NetCDF"
] |
ba82c14dcc41ab9b2972d26155885a5e50d1b8d9a19731431dd1a6bba0e4cdde
|
#!/usr/bin/env python
#
# $File: genoStru.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(size=[2, 3], ploidy=2, loci=[5, 10],
lociPos=list(range(0, 5)) + list(range(0, 20, 2)), chromNames=['Chr1', 'Chr2'],
alleleNames=['A', 'C', 'T', 'G'])
# access genotypic information from the sim.Population
pop.ploidy()
pop.ploidyName()
pop.numChrom()
pop.locusPos(2)
pop.alleleName(1)
# access from an individual
ind = pop.individual(2)
ind.numLoci(1)
ind.chromName(0)
ind.locusName(1)
# utility functions
ind.chromBegin(1)
ind.chromByName('Chr2')
# loci pos can be unordered within each chromosome
pop = sim.Population(loci=[2, 3], lociPos=[3, 1, 1, 3, 2],
lociNames=['loc%d' % x for x in range(5)])
pop.lociPos()
pop.lociNames()
|
BoPeng/simuPOP
|
docs/genoStru.py
|
Python
|
gpl-2.0
| 1,769
|
[
"VisIt"
] |
4266a0bfbe28ca561d183b13619078b3c273a0645fc13115d9ccd389c34f8d3e
|
from sympy import (meijerg, I, S, integrate, Integral, oo, gamma,
hyperexpand, exp, simplify, sqrt, pi, erf, sin, cos,
exp_polar, polygamma, hyper, log, expand_func)
from sympy.integrals.meijerint import (_rewrite_single, _rewrite1,
meijerint_indefinite, _inflate_g, _create_lookup_table,
meijerint_definite, meijerint_inversion)
from sympy.utilities import default_sort_key
from sympy.utilities.randtest import (verify_numerically,
random_complex_number as randcplx)
from sympy.abc import x, y, a, b, c, d, s, t, z
def test_rewrite_single():
def t(expr, c, m):
e = _rewrite_single(meijerg([a], [b], [c], [d], expr), x)
assert e is not None
assert isinstance(e[0][0][2], meijerg)
assert e[0][0][2].argument.as_coeff_mul(x) == (c, (m,))
def tn(expr):
assert _rewrite_single(meijerg([a], [b], [c], [d], expr), x) is None
t(x, 1, x)
t(x**2, 1, x**2)
t(x**2 + y*x**2, y + 1, x**2)
tn(x**2 + x)
tn(x**y)
def u(expr, x):
from sympy import Add, exp, exp_polar
r = _rewrite_single(expr, x)
e = Add(*[res[0]*res[2] for res in r[0]]).replace(
exp_polar, exp) # XXX Hack?
assert verify_numerically(e, expr, x)
u(exp(-x)*sin(x), x)
# The following has stopped working because hyperexpand changed slightly.
# It is probably not worth fixing
#u(exp(-x)*sin(x)*cos(x), x)
# This one cannot be done numerically, since it comes out as a g-function
# of argument 4*pi
# NOTE This also tests a bug in inverse mellin transform (which used to
# turn exp(4*pi*I*t) into a factor of exp(4*pi*I)**t instead of
# exp_polar).
#u(exp(x)*sin(x), x)
assert _rewrite_single(exp(x)*sin(x), x) == \
([(-sqrt(2)/(2*sqrt(pi)), 0,
meijerg(((-S(1)/2, 0, S(1)/4, S(1)/2, S(3)/4), (1,)),
((), (-S(1)/2, 0)), 64*exp_polar(-4*I*pi)/x**4))], True)
def test_rewrite1():
assert _rewrite1(x**3*meijerg([a], [b], [c], [d], x**2 + y*x**2)*5, x) == \
(5, x**3, [(1, 0, meijerg([a], [b], [c], [d], x**2*(y + 1)))], True)
def test_meijerint_indefinite_numerically():
def t(fac, arg):
g = meijerg([a], [b], [c], [d], arg)*fac
subs = {a: randcplx()/10, b: randcplx()/10 + I,
c: randcplx(), d: randcplx()}
integral = meijerint_indefinite(g, x)
assert integral is not None
assert verify_numerically(g.subs(subs), integral.diff(x).subs(subs), x)
t(1, x)
t(2, x)
t(1, 2*x)
t(1, x**2)
t(5, x**S('3/2'))
t(x**3, x)
t(3*x**S('3/2'), 4*x**S('7/3'))
def test_meijerint_definite():
v, b = meijerint_definite(x, x, 0, 0)
assert v.is_zero and b is True
v, b = meijerint_definite(x, x, oo, oo)
assert v.is_zero and b is True
def test_inflate():
subs = {a: randcplx()/10, b: randcplx()/10 + I, c: randcplx(),
d: randcplx(), y: randcplx()/10}
def t(a, b, arg, n):
from sympy import Mul
m1 = meijerg(a, b, arg)
m2 = Mul(*_inflate_g(m1, n))
# NOTE: (the random number)**9 must still be on the principal sheet.
# Thus make b&d small to create random numbers of small imaginary part.
return verify_numerically(m1.subs(subs), m2.subs(subs), x, b=0.1, d=-0.1)
assert t([[a], [b]], [[c], [d]], x, 3)
assert t([[a, y], [b]], [[c], [d]], x, 3)
assert t([[a], [b]], [[c, y], [d]], 2*x**3, 3)
def test_recursive():
from sympy import symbols
a, b, c = symbols('a b c', positive=True)
r = exp(-(x - a)**2)*exp(-(x - b)**2)
e = integrate(r, (x, 0, oo), meijerg=True)
assert simplify(e.expand()) == (
sqrt(2)*sqrt(pi)*(
(erf(sqrt(2)*(a + b)/2) + 1)*exp(-a**2/2 + a*b - b**2/2))/4)
e = integrate(exp(-(x - a)**2)*exp(-(x - b)**2)*exp(c*x), (x, 0, oo), meijerg=True)
assert simplify(e) == (
sqrt(2)*sqrt(pi)*(erf(sqrt(2)*(2*a + 2*b + c)/4) + 1)*exp(-a**2 - b**2
+ (2*a + 2*b + c)**2/8)/4)
assert simplify(integrate(exp(-(x - a - b - c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 + erf(a + b + c))
assert simplify(integrate(exp(-(x + a + b + c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 - erf(a + b + c))
def test_meijerint():
from sympy import symbols, expand, arg
s, t, mu = symbols('s t mu', real=True)
assert integrate(meijerg([], [], [0], [], s*t)
*meijerg([], [], [mu/2], [-mu/2], t**2/4),
(t, 0, oo)).is_Piecewise
s = symbols('s', positive=True)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo)) == \
gamma(s + 1)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo),
meijerg=True) == gamma(s + 1)
assert isinstance(integrate(x**s*meijerg([[], []], [[0], []], x),
(x, 0, oo), meijerg=False),
Integral)
assert meijerint_indefinite(exp(x), x) == exp(x)
# TODO what simplifications should be done automatically?
# This tests "extra case" for antecedents_1.
a, b = symbols('a b', positive=True)
assert simplify(meijerint_definite(x**a, x, 0, b)[0]) == \
b**(a + 1)/(a + 1)
# This tests various conditions and expansions:
meijerint_definite((x + 1)**3*exp(-x), x, 0, oo) == (16, True)
# Again, how about simplifications?
sigma, mu = symbols('sigma mu', positive=True)
i, c = meijerint_definite(exp(-((x - mu)/(2*sigma))**2), x, 0, oo)
assert simplify(i) == sqrt(pi)*sigma*(erf(mu/(2*sigma)) + 1)
assert c == True
i, _ = meijerint_definite(exp(-mu*x)*exp(sigma*x), x, 0, oo)
# TODO it would be nice to test the condition
assert simplify(i) == 1/(mu - sigma)
# Test substitutions to change limits
assert meijerint_definite(exp(x), x, -oo, 2) == (exp(2), True)
assert expand(meijerint_definite(exp(x), x, 0, I)[0]) == exp(I) - 1
assert expand(meijerint_definite(exp(-x), x, 0, x)[0]) == \
1 - exp(-exp(I*arg(x))*abs(x))
# Test -oo to oo
assert meijerint_definite(exp(-x**2), x, -oo, oo) == (sqrt(pi), True)
assert meijerint_definite(exp(-abs(x)), x, -oo, oo) == (2, True)
assert meijerint_definite(exp(-(2*x - 3)**2), x, -oo, oo) == \
(sqrt(pi)/2, True)
assert meijerint_definite(exp(-abs(2*x - 3)), x, -oo, oo) == (1, True)
assert meijerint_definite(exp(-((x - mu)/sigma)**2/2)/sqrt(2*pi*sigma**2),
x, -oo, oo) == (1, True)
# Test one of the extra conditions for 2 g-functinos
assert meijerint_definite(exp(-x)*sin(x), x, 0, oo) == (S(1)/2, True)
# Test a bug
def res(n):
return (1/(1 + x**2)).diff(x, n).subs(x, 1)*(-1)**n
for n in range(6):
assert integrate(exp(-x)*sin(x)*x**n, (x, 0, oo), meijerg=True) == \
res(n)
# This used to test trigexpand... now it is done by linear substitution
assert simplify(integrate(exp(-x)*sin(x + a), (x, 0, oo), meijerg=True)
) == sqrt(2)*sin(a + pi/4)/2
# Test the condition 14 from prudnikov.
# (This is besselj*besselj in disguise, to stop the product from being
# recognised in the tables.)
a, b, s = symbols('a b s')
from sympy import And, re
assert meijerint_definite(meijerg([], [], [a/2], [-a/2], x/4)
*meijerg([], [], [b/2], [-b/2], x/4)*x**(s - 1), x, 0, oo) == \
(4*2**(2*s - 2)*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/(gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
And(0 < -2*re(4*s) + 8, 0 < re(a/2 + b/2 + s), re(2*s) < 1))
# test a bug
assert integrate(sin(x**a)*sin(x**b), (x, 0, oo), meijerg=True) == \
Integral(sin(x**a)*sin(x**b), (x, 0, oo))
# test better hyperexpand
assert integrate(exp(-x**2)*log(x), (x, 0, oo), meijerg=True) == \
(sqrt(pi)*polygamma(0, S(1)/2)/4).expand()
# Test hyperexpand bug.
from sympy import lowergamma
n = symbols('n', integer=True)
assert simplify(integrate(exp(-x)*x**n, x, meijerg=True)) == \
lowergamma(n + 1, x)
# Test a bug with argument 1/x
alpha = symbols('alpha', positive=True)
assert meijerint_definite((2 - x)**alpha*sin(alpha/x), x, 0, 2) == \
(sqrt(pi)*alpha*gamma(alpha + 1)*meijerg(((), (alpha/2 + S(1)/2,
alpha/2 + 1)), ((0, 0, S(1)/2), (-S(1)/2,)), alpha**S(2)/16)/4, True)
# test a bug related to 3016
a, s = symbols('a s', positive=True)
assert simplify(integrate(x**s*exp(-a*x**2), (x, -oo, oo))) == \
a**(-s/2 - S(1)/2)*((-1)**s + 1)*gamma(s/2 + S(1)/2)/2
def test_bessel():
from sympy import besselj, besseli
assert simplify(integrate(besselj(a, z)*besselj(b, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == \
2*sin(pi*(a/2 - b/2))/(pi*(a - b)*(a + b))
assert simplify(integrate(besselj(a, z)*besselj(a, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == 1/(2*a)
# TODO more orthogonality integrals
assert simplify(integrate(sin(z*x)*(x**2 - 1)**(-(y + S(1)/2)),
(x, 1, oo), meijerg=True, conds='none')
*2/((z/2)**y*sqrt(pi)*gamma(S(1)/2 - y))) == \
besselj(y, z)
# Werner Rosenheinrich
# SOME INDEFINITE INTEGRALS OF BESSEL FUNCTIONS
assert integrate(x*besselj(0, x), x, meijerg=True) == x*besselj(1, x)
assert integrate(x*besseli(0, x), x, meijerg=True) == x*besseli(1, x)
# TODO can do higher powers, but come out as high order ... should they be
# reduced to order 0, 1?
assert integrate(besselj(1, x), x, meijerg=True) == -besselj(0, x)
assert integrate(besselj(1, x)**2/x, x, meijerg=True) == \
-(besselj(0, x)**2 + besselj(1, x)**2)/2
# TODO more besseli when tables are extended or recursive mellin works
assert integrate(besselj(0, x)**2/x**2, x, meijerg=True) == \
-2*x*besselj(0, x)**2 - 2*x*besselj(1, x)**2 \
+ 2*besselj(0, x)*besselj(1, x) - besselj(0, x)**2/x
assert integrate(besselj(0, x)*besselj(1, x), x, meijerg=True) == \
-besselj(0, x)**2/2
assert integrate(x**2*besselj(0, x)*besselj(1, x), x, meijerg=True) == \
x**2*besselj(1, x)**2/2
assert integrate(besselj(0, x)*besselj(1, x)/x, x, meijerg=True) == \
(x*besselj(0, x)**2 + x*besselj(1, x)**2 -
besselj(0, x)*besselj(1, x))
# TODO how does besselj(0, a*x)*besselj(0, b*x) work?
# TODO how does besselj(0, x)**2*besselj(1, x)**2 work?
# TODO sin(x)*besselj(0, x) etc come out a mess
# TODO can x*log(x)*besselj(0, x) be done?
# TODO how does besselj(1, x)*besselj(0, x+a) work?
# TODO more indefinite integrals when struve functions etc are implemented
# test a substitution
assert integrate(besselj(1, x**2)*x, x, meijerg=True) == \
-besselj(0, x**2)/2
def test_inversion():
from sympy import piecewise_fold, besselj, sqrt, sin, cos, Heaviside
def inv(f):
return piecewise_fold(meijerint_inversion(f, s, t))
assert inv(1/(s**2 + 1)) == sin(t)*Heaviside(t)
assert inv(s/(s**2 + 1)) == cos(t)*Heaviside(t)
assert inv(exp(-s)/s) == Heaviside(t - 1)
assert inv(1/sqrt(1 + s**2)) == besselj(0, t)*Heaviside(t)
# Test some antcedents checking.
assert meijerint_inversion(sqrt(s)/sqrt(1 + s**2), s, t) is None
assert inv(exp(s**2)) is None
assert meijerint_inversion(exp(-s**2), s, t) is None
def test_lookup_table():
from random import uniform, randrange
from sympy import Add
from sympy.integrals.meijerint import z as z_dummy
table = {}
_create_lookup_table(table)
for _, l in sorted(table.items()):
for formula, terms, cond, hint in sorted(l, key=default_sort_key):
subs = {}
for a in list(formula.free_symbols) + [z_dummy]:
if hasattr(a, 'properties') and a.properties:
# these Wilds match positive integers
subs[a] = randrange(1, 10)
else:
subs[a] = uniform(1.5, 2.0)
if not isinstance(terms, list):
terms = terms(subs)
# First test that hyperexpand can do this.
expanded = [hyperexpand(g) for (_, g) in terms]
assert all(x.is_Piecewise or not x.has(meijerg) for x in expanded)
# Now test that the meijer g-function is indeed as advertised.
expanded = Add(*[f*x for (f, x) in terms])
a, b = formula.n(subs=subs), expanded.n(subs=subs)
r = min(abs(a), abs(b))
if r < 1:
assert abs(a - b).n() <= 1e-10
else:
assert (abs(a - b)/r).n() <= 1e-10
def test_branch_bug():
from sympy import powdenest, lowergamma
# TODO combsimp cannot prove that the factor is unity
assert powdenest(integrate(erf(x**3), x, meijerg=True).diff(x),
polar=True) == 2*erf(x**3)*gamma(S(2)/3)/3/gamma(S(5)/3)
assert integrate(erf(x**3), x, meijerg=True) == \
2*x*erf(x**3)*gamma(S(2)/3)/(3*gamma(S(5)/3)) \
- 2*gamma(S(2)/3)*lowergamma(S(2)/3, x**6)/(3*sqrt(pi)*gamma(S(5)/3))
def test_linear_subs():
from sympy import besselj
assert integrate(sin(x - 1), x, meijerg=True) == -cos(1 - x)
assert integrate(besselj(1, x - 1), x, meijerg=True) == -besselj(0, 1 - x)
def test_probability():
# various integrals from probability theory
from sympy.abc import x, y
from sympy import symbols, Symbol, Abs, expand_mul, combsimp, powsimp, sin
mu1, mu2 = symbols('mu1 mu2', real=True, nonzero=True, finite=True)
sigma1, sigma2 = symbols('sigma1 sigma2', real=True, nonzero=True,
finite=True, positive=True)
rate = Symbol('lambda', real=True, positive=True, finite=True)
def normal(x, mu, sigma):
return 1/sqrt(2*pi*sigma**2)*exp(-(x - mu)**2/2/sigma**2)
def exponential(x, rate):
return rate*exp(-rate*x)
assert integrate(normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == \
mu1
assert integrate(x**2*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**2 + sigma1**2
assert integrate(x**3*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**3 + 3*mu1*sigma1**2
assert integrate(normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1
assert integrate(y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu2
assert integrate(x*y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1*mu2
assert integrate((x + y + 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + mu1 + mu2
assert integrate((x + y - 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
-1 + mu1 + mu2
i = integrate(x**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True)
assert not i.has(Abs)
assert simplify(i) == mu1**2 + sigma1**2
assert integrate(y**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
sigma2**2 + mu2**2
assert integrate(exponential(x, rate), (x, 0, oo), meijerg=True) == 1
assert integrate(x*exponential(x, rate), (x, 0, oo), meijerg=True) == \
1/rate
assert integrate(x**2*exponential(x, rate), (x, 0, oo), meijerg=True) == \
2/rate**2
def E(expr):
res1 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(x, 0, oo), (y, -oo, oo), meijerg=True)
res2 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(y, -oo, oo), (x, 0, oo), meijerg=True)
assert expand_mul(res1) == expand_mul(res2)
return res1
assert E(1) == 1
assert E(x*y) == mu1/rate
assert E(x*y**2) == mu1**2/rate + sigma1**2/rate
ans = sigma1**2 + 1/rate**2
assert simplify(E((x + y + 1)**2) - E(x + y + 1)**2) == ans
assert simplify(E((x + y - 1)**2) - E(x + y - 1)**2) == ans
assert simplify(E((x + y)**2) - E(x + y)**2) == ans
# Beta' distribution
alpha, beta = symbols('alpha beta', positive=True)
betadist = x**(alpha - 1)*(1 + x)**(-alpha - beta)*gamma(alpha + beta) \
/gamma(alpha)/gamma(beta)
assert integrate(betadist, (x, 0, oo), meijerg=True) == 1
i = integrate(x*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert (combsimp(i[0]), i[1]) == (alpha/(beta - 1), 1 < beta)
j = integrate(x**2*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert j[1] == (1 < beta - 1)
assert combsimp(j[0] - i[0]**2) == (alpha + beta - 1)*alpha \
/(beta - 2)/(beta - 1)**2
# Beta distribution
# NOTE: this is evaluated using antiderivatives. It also tests that
# meijerint_indefinite returns the simplest possible answer.
a, b = symbols('a b', positive=True)
betadist = x**(a - 1)*(-x + 1)**(b - 1)*gamma(a + b)/(gamma(a)*gamma(b))
assert simplify(integrate(betadist, (x, 0, 1), meijerg=True)) == 1
assert simplify(integrate(x*betadist, (x, 0, 1), meijerg=True)) == \
a/(a + b)
assert simplify(integrate(x**2*betadist, (x, 0, 1), meijerg=True)) == \
a*(a + 1)/(a + b)/(a + b + 1)
assert simplify(integrate(x**y*betadist, (x, 0, 1), meijerg=True)) == \
gamma(a + b)*gamma(a + y)/gamma(a)/gamma(a + b + y)
# Chi distribution
k = Symbol('k', integer=True, positive=True)
chi = 2**(1 - k/2)*x**(k - 1)*exp(-x**2/2)/gamma(k/2)
assert powsimp(integrate(chi, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chi, (x, 0, oo), meijerg=True)) == \
sqrt(2)*gamma((k + 1)/2)/gamma(k/2)
assert simplify(integrate(x**2*chi, (x, 0, oo), meijerg=True)) == k
# Chi^2 distribution
chisquared = 2**(-k/2)/gamma(k/2)*x**(k/2 - 1)*exp(-x/2)
assert powsimp(integrate(chisquared, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chisquared, (x, 0, oo), meijerg=True)) == k
assert simplify(integrate(x**2*chisquared, (x, 0, oo), meijerg=True)) == \
k*(k + 2)
assert combsimp(integrate(((x - k)/sqrt(2*k))**3*chisquared, (x, 0, oo),
meijerg=True)) == 2*sqrt(2)/sqrt(k)
# Dagum distribution
a, b, p = symbols('a b p', positive=True)
# XXX (x/b)**a does not work
dagum = a*p/x*(x/b)**(a*p)/(1 + x**a/b**a)**(p + 1)
assert simplify(integrate(dagum, (x, 0, oo), meijerg=True)) == 1
# XXX conditions are a mess
arg = x*dagum
assert simplify(integrate(arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b*gamma(1 - 1/a)*gamma(p + 1 + 1/a)/(
(a*p + 1)*gamma(p))
assert simplify(integrate(x*arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b**2*gamma(1 - 2/a)*gamma(p + 1 + 2/a)/(
(a*p + 2)*gamma(p))
# F-distribution
d1, d2 = symbols('d1 d2', positive=True)
f = sqrt(((d1*x)**d1 * d2**d2)/(d1*x + d2)**(d1 + d2))/x \
/gamma(d1/2)/gamma(d2/2)*gamma((d1 + d2)/2)
assert simplify(integrate(f, (x, 0, oo), meijerg=True)) == 1
# TODO conditions are a mess
assert simplify(integrate(x*f, (x, 0, oo), meijerg=True, conds='none')
) == d2/(d2 - 2)
assert simplify(integrate(x**2*f, (x, 0, oo), meijerg=True, conds='none')
) == d2**2*(d1 + 2)/d1/(d2 - 4)/(d2 - 2)
# TODO gamma, rayleigh
# inverse gaussian
lamda, mu = symbols('lamda mu', positive=True)
dist = sqrt(lamda/2/pi)*x**(-S(3)/2)*exp(-lamda*(x - mu)**2/x/2/mu**2)
mysimp = lambda expr: simplify(expr.rewrite(exp))
assert mysimp(integrate(dist, (x, 0, oo))) == 1
assert mysimp(integrate(x*dist, (x, 0, oo))) == mu
assert mysimp(integrate((x - mu)**2*dist, (x, 0, oo))) == mu**3/lamda
assert mysimp(integrate((x - mu)**3*dist, (x, 0, oo))) == 3*mu**5/lamda**2
# Levi
c = Symbol('c', positive=True)
assert integrate(sqrt(c/2/pi)*exp(-c/2/(x - mu))/(x - mu)**S('3/2'),
(x, mu, oo)) == 1
# higher moments oo
# log-logistic
distn = (beta/alpha)*x**(beta - 1)/alpha**(beta - 1)/ \
(1 + x**beta/alpha**beta)**2
assert simplify(integrate(distn, (x, 0, oo))) == 1
# NOTE the conditions are a mess, but correctly state beta > 1
assert simplify(integrate(x*distn, (x, 0, oo), conds='none')) == \
pi*alpha/beta/sin(pi/beta)
# (similar comment for conditions applies)
assert simplify(integrate(x**y*distn, (x, 0, oo), conds='none')) == \
pi*alpha**y*y/beta/sin(pi*y/beta)
# weibull
k = Symbol('k', positive=True)
n = Symbol('n', positive=True)
distn = k/lamda*(x/lamda)**(k - 1)*exp(-(x/lamda)**k)
assert simplify(integrate(distn, (x, 0, oo))) == 1
assert simplify(integrate(x**n*distn, (x, 0, oo))) == \
lamda**n*gamma(1 + n/k)
# rice distribution
from sympy import besseli
nu, sigma = symbols('nu sigma', positive=True)
rice = x/sigma**2*exp(-(x**2 + nu**2)/2/sigma**2)*besseli(0, x*nu/sigma**2)
assert integrate(rice, (x, 0, oo), meijerg=True) == 1
# can someone verify higher moments?
# Laplace distribution
mu = Symbol('mu', real=True)
b = Symbol('b', positive=True)
laplace = exp(-abs(x - mu)/b)/2/b
assert integrate(laplace, (x, -oo, oo), meijerg=True) == 1
assert integrate(x*laplace, (x, -oo, oo), meijerg=True) == mu
assert integrate(x**2*laplace, (x, -oo, oo), meijerg=True) == \
2*b**2 + mu**2
# TODO are there other distributions supported on (-oo, oo) that we can do?
# misc tests
k = Symbol('k', positive=True)
assert combsimp(expand_mul(integrate(log(x)*x**(k - 1)*exp(-x)/gamma(k),
(x, 0, oo)))) == polygamma(0, k)
def test_expint():
""" Test various exponential integrals. """
from sympy import (expint, unpolarify, Symbol, Ci, Si, Shi, Chi,
sin, cos, sinh, cosh, Ei)
assert simplify(unpolarify(integrate(exp(-z*x)/x**y, (x, 1, oo),
meijerg=True, conds='none'
).rewrite(expint).expand(func=True))) == expint(y, z)
assert integrate(exp(-z*x)/x, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(1, z)
assert integrate(exp(-z*x)/x**2, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(2, z).rewrite(Ei).rewrite(expint)
assert integrate(exp(-z*x)/x**3, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(3, z).rewrite(Ei).rewrite(expint).expand()
t = Symbol('t', positive=True)
assert integrate(-cos(x)/x, (x, t, oo), meijerg=True).expand() == Ci(t)
assert integrate(-sin(x)/x, (x, t, oo), meijerg=True).expand() == \
Si(t) - pi/2
assert integrate(sin(x)/x, (x, 0, z), meijerg=True) == Si(z)
assert integrate(sinh(x)/x, (x, 0, z), meijerg=True) == Shi(z)
assert integrate(exp(-x)/x, x, meijerg=True).expand().rewrite(expint) == \
I*pi - expint(1, x)
assert integrate(exp(-x)/x**2, x, meijerg=True).rewrite(expint).expand() \
== expint(1, x) - exp(-x)/x - I*pi
u = Symbol('u', polar=True)
assert integrate(cos(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Ci(u)
assert integrate(cosh(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Chi(u)
assert integrate(expint(1, x), x, meijerg=True
).rewrite(expint).expand() == x*expint(1, x) - exp(-x)
assert integrate(expint(2, x), x, meijerg=True
).rewrite(expint).expand() == \
-x**2*expint(1, x)/2 + x*exp(-x)/2 - exp(-x)/2
assert simplify(unpolarify(integrate(expint(y, x), x,
meijerg=True).rewrite(expint).expand(func=True))) == \
-expint(y + 1, x)
assert integrate(Si(x), x, meijerg=True) == x*Si(x) + cos(x)
assert integrate(Ci(u), u, meijerg=True).expand() == u*Ci(u) - sin(u)
assert integrate(Shi(x), x, meijerg=True) == x*Shi(x) - cosh(x)
assert integrate(Chi(u), u, meijerg=True).expand() == u*Chi(u) - sinh(u)
assert integrate(Si(x)*exp(-x), (x, 0, oo), meijerg=True) == pi/4
assert integrate(expint(1, x)*sin(x), (x, 0, oo), meijerg=True) == log(2)/2
def test_messy():
from sympy import (laplace_transform, Si, Shi, Chi, atan, Piecewise,
acoth, E1, besselj, acosh, asin, And, re,
fourier_transform, sqrt)
assert laplace_transform(Si(x), x, s) == ((-atan(s) + pi/2)/s, 0, True)
assert laplace_transform(Shi(x), x, s) == (acoth(s)/s, 1, True)
# where should the logs be simplified?
assert laplace_transform(Chi(x), x, s) == \
((log(s**(-2)) - log((s**2 - 1)/s**2))/(2*s), 1, True)
# TODO maybe simplify the inequalities?
assert laplace_transform(besselj(a, x), x, s)[1:] == \
(0, And(S(0) < re(a/2) + S(1)/2, S(0) < re(a/2) + 1))
# NOTE s < 0 can be done, but argument reduction is not good enough yet
assert fourier_transform(besselj(1, x)/x, x, s, noconds=False) == \
(Piecewise((0, 4*abs(pi**2*s**2) > 1),
(2*sqrt(-4*pi**2*s**2 + 1), True)), s > 0)
# TODO FT(besselj(0,x)) - conditions are messy (but for acceptable reasons)
# - folding could be better
assert integrate(E1(x)*besselj(0, x), (x, 0, oo), meijerg=True) == \
log(1 + sqrt(2))
assert integrate(E1(x)*besselj(1, x), (x, 0, oo), meijerg=True) == \
log(S(1)/2 + sqrt(2)/2)
assert integrate(1/x/sqrt(1 - x**2), x, meijerg=True) == \
Piecewise((-acosh(1/x), 1 < abs(x**(-2))), (I*asin(1/x), True))
def test_issue_6122():
assert integrate(exp(-I*x**2), (x, -oo, oo), meijerg=True) == \
-I*sqrt(pi)*exp(I*pi/4)
def test_issue_6252():
expr = 1/x/(a + b*x)**(S(1)/3)
anti = integrate(expr, x, meijerg=True)
assert not expr.has(hyper)
# XXX the expression is a mess, but actually upon differentiation and
# putting in numerical values seems to work...
def test_issue_6348():
assert integrate(exp(I*x)/(1 + x**2), (x, -oo, oo)).simplify().rewrite(exp) \
== pi*exp(-1)
def test_fresnel():
from sympy import fresnels, fresnelc
assert expand_func(integrate(sin(pi*x**2/2), x)) == fresnels(x)
assert expand_func(integrate(cos(pi*x**2/2), x)) == fresnelc(x)
def test_issue_6860():
assert meijerint_indefinite(x**x**x, x) is None
|
AunShiLord/sympy
|
sympy/integrals/tests/test_meijerint.py
|
Python
|
bsd-3-clause
| 27,164
|
[
"Gaussian"
] |
27c46b3a95be5945fe9223f22e53f03c1c55c4b84c24cf1318e46555069d3813
|
"""
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ar-dz', gettext_noop('Algerian Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('ig', gettext_noop('Igbo')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('ky', gettext_noop('Kyrgyz')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('tg', gettext_noop('Tajik')),
('th', gettext_noop('Thai')),
('tk', gettext_noop('Turkmen')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('uz', gettext_noop('Uzbek')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
LANGUAGE_COOKIE_SECURE = False
LANGUAGE_COOKIE_HTTPONLY = False
LANGUAGE_COOKIE_SAMESITE = None
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = 0o644
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default primary key field type.
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'DENY'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the HttpOnly flag.
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', 'None', or False to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of seconds a password reset link is valid for (default: 3 days).
PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter'
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_REFERRER_POLICY = 'same-origin'
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
elena/django
|
django/conf/global_settings.py
|
Python
|
bsd-3-clause
| 22,349
|
[
"VisIt"
] |
0b431190b591f2546741b5958e2156c3795765292f9c027b3ed261979336757f
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import os
import pwd
import sys
from string import ascii_letters, digits
from six import string_types
from six.moves import configparser
from ansible.parsing.splitter import unquote
from ansible.errors import AnsibleOptionsError
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
value = mk_boolean(value)
if value:
if integer:
value = int(value)
elif floating:
value = float(value)
elif islist:
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif isinstance(value, string_types):
value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
def shell_expand_path(path):
''' shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE '''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
p, CONFIG_FILE = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# sections in config file
DEFAULTS='defaults'
# generally configurable things
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts')))
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
DEFAULT_ANSIBLE_CUSTOM_MODULES = shell_expand_path(get_config(p, DEFAULTS, 'custom_modules', 'ANSIBLE_CUSTOM_MODULES', None))
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None))
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su')
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '')
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# Plugin paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test_plugins:/usr/share/ansible_plugins/test_plugins')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024
TREE_DIR = None
|
sirkubax/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 16,496
|
[
"Galaxy"
] |
ffa8d5acb6f67ee4f907c96a5133d30ac56bb97dfeee699b94d5cf4a278f0d7e
|
#! /usr/bin/python
# Copyright (C) 2010-2021 ABINIT group
#
# Written by Matthieu Verstraete in python (compatible v1.9).
# This is free software, and you are welcome to redistribute it
# under certain conditions (GNU General Public License,
# see ~abinit/COPYING or http://www.gnu.org/copyleft/gpl.txt).
#
# ABINIT is a project of the Universite Catholique de Louvain,
# Corning Inc. and other collaborators, see ~abinit/doc/developers/contributors.txt.
# Please read ~abinit/doc/biblio/generated_files/bib_acknow.html for suggested
# acknowledgments of the ABINIT effort.
#
# For more information, see https://www.abinit.org .
#
# This script is to be used with the PHON code (or equivalent)
# to calculate frozen phonon frequencies, free energies, etc...
# It takes a DISP file of atomic displacements and an SPOSCAR file
# with a with a supercell structure (VASP/PHON formats), and creates
# the necessary lines for an abinit input file to calculate the
# forces in displaced configurations.
# See http://chianti.geol.ucl.ac.uk/~dario or
# D. Alfe, Computer Physics Communications 180,2622-2633 (2009)
#
# NOTE: the symmetries in the present (1.28 8/2010) version of PHON
# are not functioning properly in some cases. It is your own
# responsibility to check it, and has nothing to do with ABINIT.
#
# How to use:
#
# 1) run abinit for the relaxed structure with prtposcar 1.
# this creates a reference XXX_POSCAR file, which you should rename
# POSCAR.
# 2) run phon with LSUPER=.true. or (e.g.) phonopy -d --dim="2 2 3"
# to create the SPOSCAR and DISP files
# 3) run this script (phondisp2abi.py).
# 4) copy script output to the abinit input file (removing duplicate
# input variables etc...)
# 5) run abinit for each of the given datasets (and prtposcar 1 still)
# 6) concatenate the resulting XXX_FORCES files into one FORCES file
# You also need to include the header lines for each displacement,
# which are given by phondisp2abi.py in comments for each dataset
# 7) run phon again to get the desired phonons and properties.
#
#
import re
import string
import numpy
import numpy.linalg
#
# convert PHON DISP and SPOSCAR files into ABINIT datasets with appropriately displaced atoms
#
fp_disp = open('DISP')
lines_disp = fp_disp.readlines()
fp_sposcar = open('SPOSCAR')
lines_sposcar = fp_sposcar.readlines()
# make unit cell input line
rprimd = numpy.zeros((3,3))
for idir in range(3):
line = lines_sposcar[2+idir]
tokens = map(float,string.split(line))
rprimd[0][idir] = tokens[0]
rprimd[1][idir] = tokens[1]
rprimd[2][idir] = tokens[2]
# get equilibirum positions
equilxred=[]
for line in lines_sposcar[7:]:
equilxred.append(numpy.array(map(float,string.split(line))))
# output unit cell input line
print "# Add this to the abinit input file to do the PHON displacements"
print "# given in the DISP file, with respect to the supercell in SPOSCAR"
print "#"
print "# Remember the POSCAR files have sorted the atomic types so the positions"
print "# and displacements are now type ordered (fix typat, spinat, etc!)"
print "#"
print "ndtset ", len(lines_disp)
print "# supercell lattice vectors "
print "acell 1 1 1 Angstr"
print "rprim"
print " %24.14f %24.14f %24.14f" % (rprimd[0][0], rprimd[1][0], rprimd[2][0])
print " %24.14f %24.14f %24.14f" % (rprimd[0][1], rprimd[1][1], rprimd[2][1])
print " %24.14f %24.14f %24.14f" % (rprimd[0][2], rprimd[1][2], rprimd[2][2])
idtset=1
# for each displacement,
for line in lines_disp:
tokens = string.split(line)
# get displacement in reduced coordinates
iatom = int(tokens[1])
dispred = numpy.array(map(float,tokens[2:5]))
# add displacement to correct atom
xred = list(equilxred)
xred[iatom-1] = xred[iatom-1] + dispred
# output xred for this dataset
print "# add the following line, without the #, to the FORCES file for this dtset, when concatenating"
print "# %d %24.14f %24.14f %24.14f" % (iatom, dispred[0], dispred[1], dispred[2])
print "xred%d" % (idtset,)
for xred_1at in xred:
print " %24.14f %24.14f %24.14f" % (xred_1at[0], xred_1at[1], xred_1at[2])
# increment dataset counter
idtset=idtset+1
|
abinit/abinit
|
scripts/post_processing/phondisp2abi.py
|
Python
|
gpl-3.0
| 4,163
|
[
"ABINIT",
"VASP",
"phonopy"
] |
b226caa3976f266c68454ecdf65435f015773d7234feecd55c72d013d6c58f82
|
r"""
Details about the continum and numerical model equations can be found on:
Agnaou, M., Sadeghi, M. A., Tranter, T. G., & Gostick, J. (2020).
Modeling transport of charged species in pore networks: solution of the
Nernst-Planck equations coupled with fluid flow and charge conservation
equations. Computers & Geosciences, 104505.
"""
import openpnm as op
from openpnm.phase import mixtures
import numpy as np
ws = op.Workspace()
proj = ws.new_project()
export = False
# network, geometry, phase
np.random.seed(0)
net = op.network.Cubic(shape=[8, 8, 1], spacing=9e-4, project=proj)
Ps = (net['pore.back'] * net['pore.right']
+ net['pore.back'] * net['pore.left']
+ net['pore.front'] * net['pore.right']
+ net['pore.front'] * net['pore.left'])
Ts = net['throat.surface']
op.topotools.trim(network=net, pores=net.Ps[Ps], throats=net.Ts[Ts])
geo = op.geometry.SpheresAndCylinders(network=net, pores=net.Ps, throats=net.Ts)
pore_d = op.models.misc.constant
throat_d = op.models.misc.constant
geo.add_model(propname='pore.diameter', model=pore_d, value=1.5e-4)
geo.add_model(propname='throat.diameter', model=throat_d, value=1e-4)
geo.regenerate_models()
sw = mixtures.SalineWater(network=net)
# Retrieve handles to each species for use below
Na = sw.components['Na_' + sw.name]
Cl = sw.components['Cl_' + sw.name]
H2O = sw.components['H2O_' + sw.name]
# physics
phys = op.physics.GenericPhysics(network=net, phase=sw, geometry=geo)
flow = op.models.physics.hydraulic_conductance.hagen_poiseuille
phys.add_model(propname='throat.hydraulic_conductance',
pore_viscosity='pore.viscosity',
throat_viscosity='throat.viscosity',
model=flow, regen_mode='normal')
current = op.models.physics.ionic_conductance.electroneutrality
phys.add_model(propname='throat.ionic_conductance', ions=[Na.name, Cl.name],
model=current, regen_mode='normal')
eA_dif = op.models.physics.diffusive_conductance.ordinary_diffusion
phys.add_model(propname='throat.diffusive_conductance.' + Na.name,
pore_diffusivity='pore.diffusivity.' + Na.name,
throat_diffusivity='throat.diffusivity.' + Na.name,
model=eA_dif, regen_mode='normal')
eB_dif = op.models.physics.diffusive_conductance.ordinary_diffusion
phys.add_model(propname='throat.diffusive_conductance.' + Cl.name,
pore_diffusivity='pore.diffusivity.' + Cl.name,
throat_diffusivity='throat.diffusivity.' + Cl.name,
model=eB_dif, regen_mode='normal')
s_scheme = 'powerlaw'
ad_dif_mig_Na = op.models.physics.ad_dif_mig_conductance.ad_dif_mig
phys.add_model(propname='throat.ad_dif_mig_conductance.' + Na.name,
pore_pressure='pore.pressure', model=ad_dif_mig_Na,
ion=Na.name, s_scheme=s_scheme)
ad_dif_mig_Cl = op.models.physics.ad_dif_mig_conductance.ad_dif_mig
phys.add_model(propname='throat.ad_dif_mig_conductance.' + Cl.name,
pore_pressure='pore.pressure', model=ad_dif_mig_Cl,
ion=Cl.name, s_scheme=s_scheme)
# settings for algorithms
setts1 = {'solver_max_iter': 5, 'solver_tol': 1e-08, 'solver_rtol': 1e-08,
'nlin_max_iter': 10, 'cache': False}
setts2 = {'g_tol': 1e-4, 'g_max_iter': 4, 't_output': 5000, 't_step': 500,
't_final': 20000, 't_scheme': 'implicit'}
# algorithms
sf = op.algorithms.StokesFlow(network=net, phase=sw, settings=setts1)
sf.set_value_BC(pores=net.pores('back'), values=0.01)
sf.set_value_BC(pores=net.pores('front'), values=0.00)
sf.run()
sw.update(sf.results())
p = op.algorithms.TransientIonicConduction(network=net, phase=sw,
settings=setts1)
p.set_value_BC(pores=net.pores('left'), values=0.1)
p.set_value_BC(pores=net.pores('right'), values=0.00)
p.settings['charge_conservation'] = 'electroneutrality'
eA = op.algorithms.TransientNernstPlanck(network=net, phase=sw, ion=Na.name,
settings=setts1)
eA.set_value_BC(pores=net.pores('back'), values=100)
eA.set_value_BC(pores=net.pores('front'), values=90)
eB = op.algorithms.TransientNernstPlanck(network=net, phase=sw, ion=Cl.name,
settings=setts1)
eB.set_value_BC(pores=net.pores('back'), values=100)
eB.set_value_BC(pores=net.pores('front'), values=90)
it = op.algorithms.TransientNernstPlanckMultiphysicsSolver(network=net,
phase=sw,
settings=setts2)
it.settings["potential_field"] = p.name
it.settings["ions"] = [eA.name, eB.name]
it.run()
sw.update(sf.results())
sw.update(p.results())
sw.update(eA.results())
sw.update(eB.results())
# output results to a vtk file for visualization on Paraview
if export:
proj.export_data(phases=[sw], filename='out', filetype='xdmf')
|
PMEAL/OpenPNM
|
scripts/example_transient_nernst_planck.py
|
Python
|
mit
| 4,890
|
[
"ParaView",
"VTK"
] |
dcd82c179116c0a52db429531f3cd270b9fe191805e9fe75996f585249603248
|
from __future__ import print_function
import matplotlib
import numpy as np
import copy
import re
import warnings
from astropy import log
from astropy import units as u
from six.moves import xrange
from six import string_types
from ..config import mycfg
from ..config import ConfigDescriptor as cfgdec
from . import units
from . import models
from ..specwarnings import warn
from . import interactive
from . import history
from . import widgets
class Registry(object):
"""
This class is a simple wrapper to prevent fitter properties from being globals
"""
def __init__(self):
self.npars = {}
self.multifitters = {}
#to delete
self.peakbgfitters = {}
self.fitkeys = {}
self.associatedkeys = {}
self._interactive_help_message_root = """
'?' will print this help message again. The keys denoted by surrounding / / are
mnemonics.
1. Left-click or hit 'p' (/p/ick) with the cursor over the plot at both of the
two desired X-values to select a fitting range. You can e/x/clude parts of the
spectrum by hitting 'x' at two positions.
2. Then /m/iddle-click or hit 'm' twice to select (/m/ark) a peak and width -
the first mark should be on the peak of the line, the second should be at the
approximate half-max point on the curve.
3. When you're done, right-click or hit 'd' to perform the fit and disconnect
the mouse and keyboard (/d/isconnect because you're /d/one). Any time before
you're /d/one, you can select a different fitter (see below).
To /c/ancel or /c/lear all connections, press 'c'
'?' : get help (this message)
'c' : cancel / clear
'p','1' : pick / selection region for fitting
'm','2' : mark / identify a peak
'd','3' : done / do the fit, then disconnect the fitter
'i' : individual components / show each fitted component
You can select different fitters to use with the interactive fitting routine.
The default is gaussian ('g'), all options are listed below:
"""
self._make_interactive_help_message()
def __copy__(self):
# http://stackoverflow.com/questions/1500718/what-is-the-right-way-to-override-the-copy-deepcopy-operations-on-an-object-in-p
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def add_fitter(self, name, function, npars, override=False, key=None,
multisingle=None):
'''
Register a fitter function.
Parameters
----------
name: string
The fit function name.
function: function
The fitter function. Single-fitters should take npars + 1 input
parameters, where the +1 is for a 0th order baseline fit. They
should accept an X-axis and data and standard fitting-function
inputs (see, e.g., gaussfitter). Multi-fitters should take N *
npars, but should also operate on X-axis and data arguments.
npars: int
How many parameters does the function being fit accept?
Other Parameters
----------------
override: True | False
Whether to override any existing type if already present.
key: char
Key to select the fitter in interactive mode
'''
if multisingle is not None:
warn("The 'multisingle' keyword is no longer required.",
DeprecationWarning)
if not name in self.peakbgfitters or override:
self.peakbgfitters[name] = function
if not name in self.multifitters or override:
self.multifitters[name] = function
if key is not None:
self.fitkeys[key] = name
self._make_interactive_help_message()
self.npars[name] = npars
self.associated_keys = dict(zip(self.fitkeys.values(),self.fitkeys.keys()))
def _make_interactive_help_message(self):
"""
Generate the interactive help message from the fitkeys
"""
self.interactive_help_message = (
self._interactive_help_message_root +
"\n" +
"\n".join(["'%s' - select fitter %s" % (key,name) for key,name in self.fitkeys.items()]) +
"\n" # trailing \n so that users' input is on a fresh line
)
# Declare default registry built in for all spectra
default_Registry = Registry()
default_Registry.add_fitter('ammonia',models.ammonia_model(),6,key='a')
default_Registry.add_fitter('cold_ammonia',models.ammonia.cold_ammonia_model(),6)
default_Registry.add_fitter('ammonia_tau',models.ammonia_model_vtau(),6)
# not implemented default_Registry.add_fitter(Registry,'ammonia',models.ammonia_model( ),6, ,key='A')
default_Registry.add_fitter('formaldehyde',models.formaldehyde_fitter,3,key='F') # CAN'T USE f! reserved for fitting
# do'nt override default_Registry.add_fitter('formaldehyde',models.formaldehyde_vheight_fitter,3)
default_Registry.add_fitter('gaussian',models.gaussian_fitter(),3,key='g')
default_Registry.add_fitter('vheightgaussian',models.gaussian_vheight_fitter(),4)
default_Registry.add_fitter('voigt',models.voigt_fitter(),4,key='v')
default_Registry.add_fitter('lorentzian',models.lorentzian_fitter(),3,key='L')
#default_Registry.add_fitter('hill5',models.hill5infall.hill5_fitter,5)
#default_Registry.add_fitter('hcn',models.hcn.hcn_vtau_fitter,4)
class Specfit(interactive.Interactive):
def __init__(self, Spectrum, Registry=None):
super(Specfit, self).__init__(Spectrum,
interactive_help_message=Registry.interactive_help_message)
self.model = None
self.parinfo = None
self.modelpars = None
self.modelerrs = None
self.modelplot = []
self.modelcomponents = None
self._plotted_components = []
self.npeaks = 0
#self.nclicks_b1 = 0
#self.nclicks_b2 = 0
#self.xmin = 0
#self.xmax = Spectrum.data.shape[0]
self.button2action = self.guesspeakwidth
self.guesses = []
self.click = 0
self.fitkwargs = {}
self.auto = False
self.fitleg=None
self.residuals=None
self.setfitspec()
self.fittype = 'gaussian'
self.measurements = None
self.vheight=False # vheight must be a boolean, can't be none
self._component_kwargs = {}
self.Registry = Registry
self.autoannotate = mycfg['autoannotate']
self.EQW_plots = []
#self.seterrspec()
@property
def fitter(self):
if hasattr(self, '_fitter'):
return self._fitter
else:
raise AttributeError("The 'specfit' object has no 'fitter' yet. "
"This means you haven't yet run a fit. The "
"fitter is not accessible until after a fit "
"has been run.")
@fitter.setter
def fitter(self, value):
self._fitter = value
@cfgdec
def __call__(self, interactive=False, usemoments=True,
fittype=None,
clear_all_connections=True, debug=False, guesses='moments',
parinfo=None, save=True, annotate=None, show_components=None,
use_lmfit=False, verbose=True, clear=True,
reset_selection=True,
fit_plotted_area=True, use_window_limits=None, vheight=None,
exclude=None, **kwargs):
"""
Fit model functions to a spectrum
Parameters
----------
interactive : boolean
The plotter window will go into interactive mode. See
self.interactive_help_message for details on how to use the
interactive fitter.
fittype : str
[passed to fitting codes; defaults to gaussian]
The model to use. Model must be registered in self.Registry.
gaussian, lorentzian, and voigt profiles are registered by default
guesses : list or 'moments'
A list of guesses. Guesses must have length = n*number of parameters
in model. Guesses are *required* for multifit fits (there is no
automated guessing for most models)
EXAMPLE: for single-fit gaussian
guesses = [height,amplitude,center,width]
for multi-fit gaussian, it is
[amplitude, center, width]
You can also pass the keyword string 'moments' to have the moments
be used to automatically determine the guesses for a *single* peak
parinfo : `pyspeckit.spectrum.parinfo.ParinfoList`
An alternative way to specify guesses. Supercedes guesses.
use_lmfit : boolean
If lmfit-py (https://github.com/newville/lmfit-py) is installed, you
can use it instead of the pure-python (but slow) mpfit.
reset_selection : boolean
Override any selections previously made using `fit_plotted_area` or
other keywords?
fit_plotted_area : boolean
If no other limits are specified, the plotter's xmin/xmax will be
used to define the fit region. Only respects the x-axis limits,
not the y-axis limits.
use_window_limits : boolean
If ``fit_plotted_area==True`` and no other limits are specified,
will use the displayed window area (as set by the zoom tools) as
the fitting range. Only respects the x-axis limits, not the y-axis
limits.
exclude : None or list
Passed to selectregion; specifies regions to exclude in xarr units
Plotter-related Parameters
--------------------------
annotate : None or boolean
If None, will use the default stored in self.annotate, otherwise
overwrites. Annotations will appear on the plot if a plotter
exists.
show_components : boolean
Show the individual components of a multi-component fit (defaults
to blue)
clear : boolean
Clear previous fitter plots before overplotting the fit?
Advanced Parameters
-------------------
clear_all_connections : boolean
Clear all of the interactive connections from a previous interactive
session (e.g., a baseline fitting session) before continuing?
usemoments : boolean
Use the moments of the spectrum as input guesses. Only works
for gaussian and gaussian-like models. Only works for single-fit
mode (not multifit)
DEPRECATED
debug : boolean
Print debug statements?
save : boolean
Save best fits in the FITS header as keywords? ONLY IMPLEMENTED
FOR GAUSSIANS
verbose : boolean
Print out extra stuff
vheight : None or boolean
if None, defaults to self.vheight, otherwise overrides
Determines whether a 0th order baseline will be fit along with the
line
"""
if clear:
self.clear()
if reset_selection:
self.selectregion(verbose=verbose, debug=debug,
fit_plotted_area=fit_plotted_area,
exclude=exclude,
use_window_limits=use_window_limits, **kwargs)
for arg in ['xmin','xmax','xtype','reset']:
if arg in kwargs:
kwargs.pop(arg)
if fittype is not None:
self.fittype = fittype
self.fitter = self.Registry.multifitters[self.fittype]
if 'multifit' in kwargs:
kwargs.pop('multifit')
log.warning("The multifit keyword is no longer required. All fits "
"allow for multiple components.", DeprecationWarning)
if 'guess' in kwargs:
if guesses is None:
guesses = kwargs.pop('guess')
log.warning("The keyword 'guess' is nonstandard; please use 'guesses'")
else:
raise ValueError("Received keywords 'guess' and 'guesses'. "
"Please only use 'guesses'")
self.npeaks = 0
self.fitkwargs = kwargs
log.debug("Additional keyword arguments passed to fitter are: {0}"
.format(kwargs))
if interactive:
if self.Spectrum.plotter.axis is None:
raise Exception("Interactive fitting requires a plotter.")
# reset button count & guesses on every __call__
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.guesses = []
self.start_interactive(clear_all_connections=clear_all_connections,
reset_selection=True,
debug=debug, **kwargs)
elif (self.fittype in self.Registry.multifitters
or guesses is not None
or parinfo is not None):
if guesses is None and parinfo is None:
raise ValueError("You must input guesses when using multifit."
" Also, baseline (continuum fit) first!")
elif parinfo is not None:
self.guesses = parinfo.values
self.parinfo = parinfo
self.multifit(show_components=show_components, verbose=verbose,
debug=debug, use_lmfit=use_lmfit,
annotate=annotate, parinfo=parinfo,
guesses=None, **kwargs)
elif guesses is not None:
if isinstance(guesses, tuple):
guesses = list(guesses)
self.guesses = guesses
self.multifit(show_components=show_components, verbose=verbose,
debug=debug, use_lmfit=use_lmfit,
fittype=fittype,
guesses=guesses, annotate=annotate, **kwargs)
else:
raise ValueError("Guess and parinfo were somehow invalid.")
else:
raise ValueError("Can't fit with given fittype {0}:"
" it is not Registered as a fitter.".format(self.fittype))
if save:
self.savefit()
def EQW(self, plot=False, plotcolor='g', fitted=True, continuum=None,
components=False, annotate=False, alpha=0.5, loc='lower left',
xmin=None, xmax=None, xunits=None, continuum_as_baseline=False,
xunit='pixel', midpt_location='plot-center'):
"""
Returns the equivalent width (integral of "baseline" or "continuum"
minus the spectrum) over the selected range
(the selected range defaults to self.xmin:self.xmax, so it may include
multiple lines!)
Parameters
----------
plot : bool
Plots a box indicating the EQW if plot==True (i.e., it will have a
width equal to the equivalent width, and a height equal to the
measured continuum)
fitted : bool
Use the fitted model? If false, uses the data
continuum : None or float
Can specify a fixed continuum with this keyword, otherwise will use
the fitted baseline. WARNING: continuum=0 will still "work", but
will give numerically invalid results. Similarly, a negative continuum
will work, but will yield results with questionable physical meaning.
continuum_as_baseline : bool
Replace the baseline with the specified continuum when computing
the absorption depth of the line
components : bool
If your fit is multi-component, will attempt to acquire centroids
for each component and print out individual EQWs
xmin : float
xmax : float
The range over which to compute the EQW
xunit : str
The units of xmin/xmax
midpt_location : 'fitted', 'plot-center'
If 'plot' is set, this determines where the EQW will be drawn. It
can be the fitted centroid or the plot-center, i.e. (xmin+xmax)/2
Returns
-------
Equivalent Width, or widths if components=True
"""
if continuum is not None:
# if continuum is specified, don't bother with checks
if np.median(self.Spectrum.baseline.basespec) == 0:
raise ValueError("Baseline / continuum is zero: equivalent width is undefined.")
elif np.median(self.Spectrum.baseline.basespec) < 0:
if mycfg.WARN: warn( "WARNING: Baseline / continuum is negative: equivalent width is poorly defined." )
if xunits is not None and xunit=='pixel':
# todo: deprecation warning
xunit = xunits
# determine range to use
if xmin is None:
xmin = self.xmin #self.Spectrum.xarr.x_to_pix(self.xmin)
else:
xmin = self.Spectrum.xarr.x_to_pix(xmin, xval_units=xunit)
if xmax is None:
xmax = self.xmax #self.Spectrum.xarr.x_to_pix(self.xmax)
else:
xmax = self.Spectrum.xarr.x_to_pix(xmax, xval_units=xunit)
dx = np.abs(self.Spectrum.xarr[xmin:xmax].cdelt(approx=True).value)
log.debug("xmin={0} xmax={1} dx={2} continuum={3}"
.format(xmin, xmax, dx, continuum))
if components:
centroids = self.fitter.analytic_centroids()
integrals = self.fitter.component_integrals(self.Spectrum.xarr[xmin:xmax],dx=dx)
eqw = []
for cen,integ in zip(centroids,integrals):
center_pix = self.Spectrum.xarr.x_to_pix(cen)
if continuum is None:
continuum = self.Spectrum.baseline.basespec[center_pix]
elif continuum_as_baseline:
integrals[-1] += -(self.Spectrum.baseline.basespec[xmin:xmax] - continuum).sum() * dx
eqw.append(-integ / continuum)
if plot:
plot = False
if mycfg.WARN:
warn("Cannot plot multiple Equivalent Widths")
elif fitted:
model = self.get_model(self.Spectrum.xarr[xmin:xmax],
add_baseline=False)
# EQW is positive for absorption lines
# fitted components are assume to be continuum-subtracted
integral = (-model).sum() * dx
if continuum is None:
# centroid in data units
# (may fail if model has pos + neg values)
center = (model*self.Spectrum.xarr[xmin:xmax]).sum()/model.sum()
center_pix = self.Spectrum.xarr.x_to_pix(center)
continuum = self.Spectrum.baseline.basespec[center_pix]
elif continuum_as_baseline:
integral += -(self.Spectrum.baseline.basespec[xmin:xmax] - continuum).sum() * dx
eqw = integral / continuum
else:
if continuum_as_baseline:
diffspec = (continuum - self.Spectrum.data)
elif self.Spectrum.baseline.subtracted is False:
diffspec = (self.Spectrum.baseline.basespec - self.Spectrum.data)
else:
diffspec = -self.Spectrum.data
sumofspec = diffspec[xmin:xmax].sum() * dx
if continuum is None:
continuum = np.median(self.Spectrum.baseline.basespec)
eqw = sumofspec / continuum
if plot and self.Spectrum.plotter.axis:
if midpt_location == 'plot-center':
midpt_pixel = int(np.round((xmin+xmax)/2.0))
midpt = self.Spectrum.xarr[midpt_pixel].value
elif midpt_location == 'fitted':
try:
shifts = np.array([self.Spectrum.specfit.parinfo[x].value
for x in self.Spectrum.specfit.parinfo.keys()
if 'SHIFT' in x])
except AttributeError:
raise AttributeError("Can only specify midpt_location="
"fitted if there is a SHIFT parameter"
"for the fitted model")
# We choose to display the eqw fit at the center of the fitted
# line set, closest to the passed window.
# Note that this has the potential to show a eqw "rectangle"
# centered on a fitted line other than the one measured for the
# eqw call, if there are more than one fitted lines within the
# window.
midpt_pixel = int((xmin+xmax)/2)
midval = self.Spectrum.xarr[midpt_pixel].value
midpt_index = np.argmin(np.abs(shifts-midval))
midpt = shifts[midpt_index]
midpt_pixel = self.Spectrum.xarr.x_to_pix(midpt)
else:
raise ValueError("midpt_location must be 'plot-center' or "
"fitted")
if continuum_as_baseline:
midpt_level = continuum
else:
midpt_level = self.Spectrum.baseline.basespec[midpt_pixel]
log.debug("EQW plotting: midpt={0}, midpt_pixel={1}, "
"midpt_level={2}, eqw={3}".format(midpt, midpt_pixel,
midpt_level, eqw))
self.EQW_plots.append(self.Spectrum.plotter.axis.fill_between(
[midpt-eqw/2.0,midpt+eqw/2.0], [0,0],
[midpt_level,midpt_level], color=plotcolor, alpha=alpha,
label='EQW: %0.3g' % eqw))
if annotate:
self.Spectrum.plotter.axis.legend(
[(matplotlib.collections.CircleCollection([0],facecolors=[plotcolor],edgecolors=[plotcolor]))],
[('EQW: %0.3g' % eqw)],
markerscale=0.01, borderpad=0.1, handlelength=0.1,
handletextpad=0.1, loc=loc)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
if hasattr(self.Spectrum,'header'):
history.write_history(self.Spectrum.header, "EQW for %s: %s" %
(self.fittype,eqw))
return eqw
def register_fitter(self,*args,**kwargs):
"""
Register a model fitter
"""
self.Registry.add_fitter(*args,**kwargs)
register_fitter.__doc__ += Registry.add_fitter.__doc__
def seterrspec(self, usestd=None, useresiduals=True):
"""
Simple wrapper function to set the error spectrum; will either use the
input spectrum or determine the error using the RMS of the residuals,
depending on whether the residuals exist.
"""
if (self.Spectrum.error is not None) and not usestd:
if (self.Spectrum.error == 0).all():
if self.residuals is not None and useresiduals:
residuals_std = self.residuals.std()
if residuals_std == 0:
self.errspec = np.ones(self.spectofit.shape[0])
warnings.warn("Residuals have 0 standard deviation. "
"That's probably too good to be true.")
else:
self.errspec = np.ones(self.spectofit.shape[0]) * residuals_std
elif type(self.Spectrum.error) is np.ma.masked_array:
# force errspec to be a non-masked array of ones
self.errspec = self.Spectrum.error.data + 1
else:
self.errspec = self.Spectrum.error + 1
else:
# this is the default behavior if spectrum.error is set
self.errspec = self.Spectrum.error.copy()
elif self.residuals is not None and useresiduals:
self.errspec = np.ones(self.spectofit.shape[0]) * self.residuals.std()
else:
self.errspec = np.ones(self.spectofit.shape[0]) * self.spectofit.std()
def setfitspec(self):
"""
Set the spectrum that will be fit. This is primarily to remove NANs
from consideration: if you simply remove the data from both the X-axis
and the Y-axis, it will not be considered for the fit, and a linear
X-axis is not needed for fitting.
However, it may be possible to do this using masked arrays instead of
setting errors to be 1e10....
"""
if self.Spectrum.data.sum() is np.ma.masked:
self.spectofit = np.zeros_like(self.Spectrum.data)
self.errspec = np.zeros_like(self.Spectrum.data)
self._valid = False
return
# see https://github.com/numpy/numpy/issues/3474
self.spectofit = np.ma.copy(self.Spectrum.data)
if hasattr(self.Spectrum.data, 'mask') and hasattr(self.spectofit,
'mask'):
assert np.all(self.Spectrum.data.mask == self.spectofit.mask)
self._valid = True
if hasattr(self.Spectrum,'baseline'):
if ((not self.Spectrum.baseline.subtracted and
self.Spectrum.baseline.basespec is not None and
len(self.spectofit) == len(self.Spectrum.baseline.basespec))):
self.spectofit -= self.Spectrum.baseline.basespec
OKmask = np.isfinite(self.spectofit)
with warnings.catch_warnings():
# catch a specific np1.7 futurewarning relating to masks
warnings.simplefilter("ignore")
self.spectofit[~OKmask] = 0
self.seterrspec()
# the "OK" mask is just checking that the values are finite
self.errspec[~OKmask] = 1e10
# if an includemask is set *and* there are some included values, "mask out" the rest
# otherwise, if *all* data are excluded, we should assume that means the includemask
# simply hasn't been initialized
if self.includemask is not None and (self.includemask.shape == self.errspec.shape) and any(self.includemask):
self.errspec[~self.includemask] = 1e10*self.errspec.max()
@property
def mask(self):
""" Mask: True means "exclude" """
if ((hasattr(self.spectofit, 'mask') and
self.spectofit.shape==self.spectofit.mask.shape)):
mask = self.spectofit.mask
else:
mask = np.zeros_like(self.spectofit, dtype='bool')
return mask
@property
def dof(self):
""" degrees of freedom in fit """
if not hasattr(self, 'npix_fitted'):
raise AttributeError('No fit has been run, so npix_fitted is not '
'defined and dof cannot be computed.')
return (self.npix_fitted - self.vheight - self.npeaks *
self.Registry.npars[self.fittype] + np.sum(self.parinfo.fixed) +
np.sum([x != '' for x in self.parinfo.tied]))
#self.dof = self.includemask.sum()-self.npeaks*self.Registry.npars[self.fittype]-vheight+np.sum(self.parinfo.fixed)
@property
def mask_sliced(self):
""" Sliced (subset) Mask: True means "exclude" """
return self.mask[self.xmin:self.xmax]
def multifit(self, fittype=None, renormalize='auto', annotate=None,
show_components=None, verbose=True, color=None,
guesses=None, parinfo=None, reset_fitspec=True,
use_window_limits=None, use_lmfit=False, plot=True, **kwargs):
"""
Fit multiple gaussians (or other profiles)
Parameters
----------
fittype : str
What function will be fit? fittype must have been Registryed in the
peakbgfitters dict. Uses default ('gaussian') if not specified
renormalize : 'auto' or bool
if 'auto' or True, will attempt to rescale small data (<1e-9) to be
closer to 1 (scales by the median) so that the fit converges better
parinfo : `~parinfo` structure
Guess structure; supercedes ``guesses``
guesses : list or 'moments'
Either a list of guesses matching the number of parameters * the
number of peaks for the model, or 'moments' to fit a single
spectrum with the moments as guesses
"""
if reset_fitspec:
self.setfitspec()
if not self._valid:
raise ValueError("Data are invalid; cannot be fit.")
#if self.fitkwargs.has_key('negamp'): self.fitkwargs.pop('negamp') # We now do this in gaussfitter.py
if fittype is not None:
self.fittype = fittype
bad_kws = ['fittype','plot']
for kw in bad_kws:
if kw in self.fitkwargs:
del self.fitkwargs[kw]
if guesses is not None and parinfo is not None:
raise ValueError("Both guesses and parinfo were specified, "
"but only one of these is allowed.")
if guesses is None:
if parinfo is not None:
guesses = list(parinfo.values)
else:
guesses = list(self.guesses)
elif isinstance(guesses, string_types) and guesses in ('moment', 'moments'):
guesses = self.moments(vheight=False, **kwargs)
else:
guesses = list(guesses) # needs to be mutable, but needs to be a copy!!
if len(guesses) < self.Registry.npars[self.fittype]:
raise ValueError("Too few parameters input. Need at least %i for %s models" % (self.Registry.npars[self.fittype],self.fittype))
self.npeaks = len(guesses)/self.Registry.npars[self.fittype]
self.fitter = self.Registry.multifitters[self.fittype]
self.vheight = False
if self.fitter.vheight:
# Need to reset the parinfo if vheight has previously been set,
# otherwise npars will disagree, which causes problems if
# renormalization happens
self.fitter.vheight = False
self.fitter.npeaks = self.npeaks
self.fitter._make_parinfo(npeaks=self.npeaks)
# add kwargs to fitkwargs
self.fitkwargs.update(kwargs)
if 'renormalize' in self.fitkwargs:
del self.fitkwargs['renormalize']
# if parinfo was specified, we use it and ignore guesses otherwise, we
# make a parinfo so we can test 'scaleable' below
if parinfo is not None:
pinf_for_scaling = parinfo
else:
pinf_for_scaling, _ = self.fitter._make_parinfo(parvalues=guesses,
npeaks=self.npeaks,
**self.fitkwargs)
scalefactor = 1.0
if renormalize in ('auto',True):
datarange = np.nanmax(self.spectofit[self.xmin:self.xmax]) - np.nanmin(self.spectofit[self.xmin:self.xmax])
if abs(datarange) < 1e-9:
scalefactor = np.nanmedian(np.abs(self.spectofit))
if not np.isfinite(scalefactor):
raise ValueError("non-finite scalefactor = {0} encountered.".format(scalefactor))
elif scalefactor == 0:
raise ValueError("scalefactor = {0} encountered, which will result "
"in divide-by-zero errors".format(scalefactor))
log.info("Renormalizing data by factor %e to improve fitting procedure"
% scalefactor)
self.spectofit /= scalefactor
self.errspec /= scalefactor
# this error should be unreachable, but is included as a sanity check
if self.fitter.npeaks * self.fitter.npars != len(pinf_for_scaling):
raise ValueError("Length of parinfo doesn't agree with "
" npeaks * npars = {0}"
.format(self.fitter.npeaks *
self.fitter.npars))
if len(guesses) != len(pinf_for_scaling):
raise ValueError("Length of parinfo doens't match length of guesses")
# zip guesses with parinfo: truncates parinfo if len(parinfo) > len(guesses)
# actually not sure how/when/if this should happen; this might be a bad hack
# revisit with tests!!
for jj,(guess,par) in enumerate(zip(guesses,pinf_for_scaling)):
if par.scaleable:
guesses[jj] /= scalefactor
# if parinfo was passed in, this will change it
# if it was not, it will change only the placeholder
# (becuase we are passing by reference above)
par.value /= scalefactor
par.limits = [lim / scalefactor for lim in par.limits]
log.debug("Rescaled guesses to {0}".format(guesses))
# all fit data must be float64, otherwise the optimizers may take steps
# less than the precision of the data and get stuck
xtofit = self.Spectrum.xarr[self.xmin:self.xmax][~self.mask_sliced].astype('float64')
spectofit = self.spectofit[self.xmin:self.xmax][~self.mask_sliced].astype('float64')
err = self.errspec[self.xmin:self.xmax][~self.mask_sliced].astype('float64')
if np.all(err == 0):
raise ValueError("Errors are all zero. This should not occur and "
"is a bug. (if you set the errors to all zero, "
"they should be overridden and set to 1)")
if parinfo is not None:
self._validate_parinfo(parinfo, mode='fix')
else:
pinf, _ = self.fitter._make_parinfo(parvalues=guesses,
npeaks=self.npeaks,
**self.fitkwargs)
new_guesses = self._validate_parinfo(pinf, 'guesses')
if any((x!=y) for x,y in zip(guesses, new_guesses)):
warn("Guesses have been changed from {0} to {1}"
.format(guesses, new_guesses))
guesses = new_guesses
mpp,model,mpperr,chi2 = self.fitter(xtofit, spectofit, err=err,
npeaks=self.npeaks,
parinfo=parinfo, # the user MUST be allowed to override parinfo.
params=guesses,
use_lmfit=use_lmfit,
**self.fitkwargs)
any_out_of_range = self._validate_parinfo(self.fitter.parinfo, mode='check')
if any(any_out_of_range):
warn("The fitter returned values that are outside the "
"parameter limits. DEBUG INFO: {0}".format(any_out_of_range))
self.spectofit *= scalefactor
self.errspec *= scalefactor
if hasattr(self.fitter.mp,'status'):
self.mpfit_status = models.mpfit_messages[self.fitter.mp.status]
if model is None:
raise ValueError("Model was not set by fitter. Examine your fitter.")
self.chi2 = chi2
self.model = model * scalefactor
self.parinfo = self.fitter.parinfo
# rescale any scaleable parameters
for par in self.parinfo:
if par.scaleable:
par.value *= scalefactor
if par.error is not None:
par.error *= scalefactor
if par.limits is not None:
par.limits = [lim*scalefactor for lim in par.limits]
self.modelpars = self.parinfo.values
self.modelerrs = self.parinfo.errors
self.residuals = spectofit - self.model
if self.Spectrum.plotter.axis is not None and plot:
if color is not None:
kwargs.update({'composite_fit_color':color})
self.plot_fit(annotate=annotate,
show_components=show_components,
use_window_limits=use_window_limits,
**kwargs)
# Re-organize modelerrs so that any parameters that are tied to others inherit the errors of the params they are tied to
if 'tied' in self.fitkwargs:
for ii, element in enumerate(self.fitkwargs['tied']):
if not element.strip():
continue
if '[' in element and ']' in element:
i1 = element.index('[') + 1
i2 = element.index(']')
loc = int(element[i1:i2])
else: # assume lmfit version
varnames = re.compile('([a-zA-Z][a-zA-Z_0-9]*)').search(element).groups()
if not varnames:
continue
elif len(varnames) > 1:
warn("The 'tied' parameter {0} is not simple enough for error propagation".format(element))
continue
else:
varname = varnames[0]
loc = self.parinfo.names.index(varname)
self.modelerrs[ii] = self.modelerrs[loc]
# make sure the full model is populated
self._full_model()
# calculate the number of pixels included in the fit. This should
# *only* be done when fitting, not when selecting data.
# (see self.dof)
self.npix_fitted = self.includemask.sum() - self.mask.sum()
self.history_fitpars()
def refit(self, use_lmfit=False):
""" Redo a fit using the current parinfo as input """
return self.multifit(parinfo=self.parinfo, use_lmfit=use_lmfit,
reset_fitspec=False)
def history_fitpars(self):
if hasattr(self.Spectrum,'header'):
history.write_history(self.Spectrum.header, "SPECFIT: Fitted "
"profile of type %s" % (self.fittype))
history.write_history(self.Spectrum.header, "Chi^2: %g DOF: %i" %
(self.chi2, self.dof))
for par in self.parinfo:
history.write_history(self.Spectrum.header, str(par))
def peakbgfit(self, usemoments=True, annotate=None, vheight=True, height=0,
negamp=None, fittype=None, renormalize='auto', color=None,
use_lmfit=False, show_components=None, debug=False,
use_window_limits=True, guesses=None,
nsigcut_moments=None, plot=True, parinfo=None, **kwargs):
"""
Fit a single peak (plus a background)
Parameters
----------
usemoments : bool
The initial guess will be set by the fitter's 'moments' function
(this overrides 'guesses')
annotate : bool
Make a legend?
vheight : bool
Fit a (constant) background as well as a peak?
height : float
initial guess for background
negamp : bool
If True, assumes amplitude is negative. If False, assumes positive. If
None, can be either.
fittype : bool
What function will be fit? fittype must have been Registryed in the
peakbgfitters dict
renormalize : 'auto' or bool
if 'auto' or True, will attempt to rescale small data (<1e-9) to be
closer to 1 (scales by the median) so that the fit converges better
nsigcut_moments : bool
pass to moment guesser; can do a sigma cut for moment guessing
"""
self.npeaks = 1
self.auto = True
self.setfitspec()
if fittype is not None:
self.fittype=fittype
NP = self.Registry.peakbgfitters[self.fittype].default_npars
if guesses is not None:
log.debug("Using user-specified guesses.")
self.guesses = guesses
if len(guesses) != NP + vheight:
raise ValueError("Invalid guesses specified for single-fitter."
"Expected {0}, got {1}. Perhaps you should "
"use the multifitter (multifit=True)?"
.format(NP+vheight, len(guesses)))
elif usemoments: # this can be done within gaussfit but I want to save them
# use this INDEPENDENT of fittype for now (voigt and gauss get same guesses)
log.debug("Using moment-based guesses.")
moments_f = self.Registry.peakbgfitters[self.fittype].moments
self.guesses = moments_f(self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax],
vheight=vheight,
negamp=negamp,
nsigcut=nsigcut_moments,
**kwargs)
else:
if negamp:
self.guesses = [height,-1,0,1]
else:
self.guesses = [height,1,0,1]
# If we're fitting anything but a simple Gaussian, we need the length
# of guesses to be right so we pad with appended zeros
# BUT, if the guesses from the moments have the right number of
# parameters, we don't need to do this.
if NP > len(self.guesses):
for ii in xrange(len(self.guesses),NP):
self.guesses += [0.0]
self.fitter = self.Registry.peakbgfitters[self.fittype]
log.debug("n(guesses): %s Guesses: %s vheight: %s " %
(len(self.guesses),self.guesses,vheight))
scalefactor = 1.0
if renormalize in ('auto',True):
datarange = self.spectofit[self.xmin:self.xmax].max() - self.spectofit[self.xmin:self.xmax].min()
if abs(datarange) < 1e-9:
scalefactor = np.median(np.abs(self.spectofit))
log.info("Renormalizing data by factor %e to improve fitting procedure"
% scalefactor)
self.spectofit /= scalefactor
self.errspec /= scalefactor
self.guesses[0] /= scalefactor
if vheight:
self.guesses[1] /= scalefactor
log.debug("Guesses before fit: {0}".format(self.guesses))
if 'debug' in self.fitkwargs:
debug = self.fitkwargs['debug']
del self.fitkwargs['debug']
mpp,model,mpperr,chi2 = self.fitter(
self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax],
err=self.errspec[self.xmin:self.xmax], vheight=vheight,
params=self.guesses, parinfo=parinfo, debug=debug,
use_lmfit=use_lmfit, **self.fitkwargs)
log.debug("1. Guesses, fits after: {0}, {1}".format(self.guesses, mpp))
self.spectofit *= scalefactor
self.errspec *= scalefactor
if hasattr(self.fitter.mp,'status'):
self.mpfit_status = models.mpfit_messages[self.fitter.mp.status]
self.parinfo = self.fitter.parinfo
if model is None:
raise ValueError("Model was not set by fitter. Examine your fitter.")
self.chi2 = chi2
self.vheight=vheight
if vheight:
self.Spectrum.baseline.order = 0
self.Spectrum.baseline.baselinepars = [mpp[0]*scalefactor] # first item in list form
self.Spectrum.baseline.basespec = self.Spectrum.data*0 + mpp[0]*scalefactor
self.model = model*scalefactor - mpp[0]*scalefactor
# I removed this recently for some reason, but more code depends on it being in place
# Need to figure out *WHY* anything would want an extra parameter
if len(mpp) == self.fitter.npars+1:
mpp = mpp[1:]
else:
self.model = model*scalefactor
self.residuals = self.spectofit[self.xmin:self.xmax] - self.model*scalefactor
self.modelpars = mpp
self.modelerrs = mpperr
# rescale any scaleable parameters
for par in self.parinfo:
if par.scaleable:
par.value = par.value * scalefactor
if par.error is not None:
par.error = par.error * scalefactor
if self.Spectrum.plotter.axis is not None and plot:
if color is not None:
kwargs.update({'composite_fit_color':color})
self.plot_fit(annotate=annotate,
use_window_limits=use_window_limits,
show_components=show_components,
**kwargs)
# make sure the full model is populated
self._full_model(debug=debug)
self.npix_fitted = self.includemask.sum() - self.mask.sum()
log.debug("2. Guesses, fits after vheight removal: {0},{1}"
.format(self.guesses, mpp))
self.history_fitpars()
def _full_model(self, debug=False, **kwargs):
"""
Compute the model for the whole spectrum
"""
self.fullmodel = self.get_full_model(debug=debug,**kwargs)
self.fullresiduals = self.Spectrum.data - self.fullmodel
def get_full_model(self, debug=False,**kwargs):
""" compute the model over the full axis """
return self.get_model(self.Spectrum.xarr, debug=debug,**kwargs)
def get_model(self, xarr, pars=None, debug=False, add_baseline=None):
""" Compute the model over a given axis """
if pars is None:
return self.get_model_frompars(xarr=xarr, pars=self.parinfo,
add_baseline=add_baseline, debug=debug)
else:
return self.get_model_frompars(xarr=xarr, pars=pars,
add_baseline=add_baseline, debug=debug)
def get_model_frompars(self, xarr, pars, debug=False, add_baseline=None):
""" Compute the model over a given axis """
if ((add_baseline is None and (self.Spectrum.baseline.subtracted or self.vheight))
or add_baseline is False):
return self.fitter.n_modelfunc(pars,**self.fitter.modelfunc_kwargs)(xarr)
else:
return (self.fitter.n_modelfunc(pars,
**self.fitter.modelfunc_kwargs)(xarr)
+ self.Spectrum.baseline.get_model(np.arange(xarr.size)))
def plot_model(self, pars, offset=0.0, annotate=False, clear=False, **kwargs):
"""
Plot a model from specified input parameters
(see plot_fit for kwarg specification)
annotate is set to "false" because arbitrary annotations are not yet implemented
"""
# really, plot_fit should be thin on top of plot_model, but that's
# not how I wrote it, so it will have to wait for a refactor
if clear: self.clear()
return self.plot_fit(pars=pars, offset=offset, annotate=False, **kwargs)
#def assess_npeaks(self):
# """
# Attempt to determine whether any of the peaks are unnecessary
# """
# if self.npeaks <= 1:
# return
# npars = self.fitter.npars
# perpeakpars = [self.parinfo.values[ii*npars:(ii+1)*npars] for ii in
# range(self.npeaks)]
# parsets = [((x[0][0],x[1][0]),x[0][1]+x[1][1]) for x in
# itertools.combinations(perpeakpars, self.npeaks-1)]
# parsets = [x
# for y in itertools.combinations(perpeakpars, self.npeaks-1)
# for x in y]
# chi2_without = [(self.spectofit[self.xmin:self.xmax] -
# self.get_model_frompars(self.xarr, self.pars[ii*npars:
def plot_fit(self, xarr=None, annotate=None, show_components=None,
composite_fit_color='red', lw=0.5,
composite_lw=0.75, pars=None, offset=None,
use_window_limits=None, show_hyperfine_components=None,
plotkwargs={}, **kwargs):
"""
Plot the fit. Must have fitted something before calling this!
It will be automatically called whenever a spectrum is fit (assuming an
axis for plotting exists)
kwargs are passed to the fitter's components attribute
Parameters
----------
xarr : None
If none, will use the spectrum's xarr. Otherwise, plot the
specified xarr. This is useful if you want to plot a well-sampled
model when the input spectrum is undersampled
annotate : None or bool
Annotate the plot? If not specified, defaults to self.autoannotate
show_components : None or bool
show_hyperfine_components : None or bool
Show the individual gaussian components overlaid on the composite fit
use_window_limits : None or bool
If False, will reset the window to include the whole spectrum. If
True, leaves the window as is. Defaults to self.use_window_limits
if None.
pars : parinfo
A parinfo structure or list of model parameters. If none, uses
best-fit
offset : None or float
Y-offset. If none, uses the default self.Spectrum.plotter offset, otherwise,
uses the specified float.
"""
#if self.Spectrum.baseline.subtracted is False and self.Spectrum.baseline.basespec is not None:
# # don't display baseline if it's included in the fit
# plot_offset = self.Spectrum.plotter.offset+(self.Spectrum.baseline.basespec * (~self.vheight))
#else:
if offset is None:
plot_offset = self.Spectrum.plotter.offset
else:
plot_offset = offset
if xarr is None:
xarr = self.Spectrum.xarr
if pars is not None:
model = self.get_model_frompars(xarr, pars)
else:
self._full_model()
model = self.fullmodel
self.modelplot += self.Spectrum.plotter.axis.plot(xarr,
model + plot_offset,
color=composite_fit_color,
linewidth=lw,
**plotkwargs)
# Plot components
if show_components or show_hyperfine_components:
self.plot_components(xarr=xarr,
show_hyperfine_components=show_hyperfine_components,
pars=pars, plotkwargs=plotkwargs)
uwl = use_window_limits if use_window_limits is not None else self.use_window_limits
# plotter kwargs are kwargs for the Spectrum.plotter,
# whereas plotkwargs are for the matplotlib plot command
plotterkwargs = {}
plotterkwargs.update(self.Spectrum.plotter.plotkwargs)
plotterkwargs['use_window_limits'] = uwl
self.Spectrum.plotter.reset_limits(**plotterkwargs)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
if annotate or ((annotate is None) and self.autoannotate):
self.annotate()
if self.vheight: self.Spectrum.baseline.annotate()
def plot_components(self, xarr=None, show_hyperfine_components=None,
component_yoffset=0.0, component_lw=0.75, pars=None,
component_fit_color='blue', component_kwargs={},
add_baseline=False, plotkwargs={}, **kwargs):
"""
Overplot the individual components of a fit
Parameters
----------
xarr : None
If none, will use the spectrum's xarr. Otherwise, plot the
specified xarr. This is useful if you want to plot a well-sampled
model when the input spectrum is undersampled
show_hyperfine_components : None | bool
Keyword argument to pass to component codes; determines whether to return
individual (e.g., hyperfine) components of a composite model
component_yoffset : float
Vertical (y-direction) offset to add to the components when plotting
component_lw : float
Line width of component lines
component_fitcolor : color
Color of component lines
component_kwargs : dict
Keyword arguments to pass to the fitter.components method
add_baseline : bool
Add the fit to the components before plotting. Makes sense to use
if self.Spectrum.baseline.subtracted == False
pars : parinfo
A parinfo structure or list of model parameters. If none, uses
best-fit
"""
plot_offset = self.Spectrum.plotter.offset
if xarr is None:
xarr = self.Spectrum.xarr
if show_hyperfine_components is not None:
component_kwargs['return_hyperfine_components'] = show_hyperfine_components
self._component_kwargs = component_kwargs
if pars is None:
pars = self.modelpars
self.modelcomponents = self.fitter.components(xarr=xarr, pars=pars, **component_kwargs)
yoffset = plot_offset + component_yoffset
if add_baseline:
yoffset += self.Spectrum.baseline.basespec
for data in self.modelcomponents:
# can have multidimensional components
if len(data.shape) > 1:
for d in data:
self._plotted_components += self.Spectrum.plotter.axis.plot(xarr,
d + yoffset,
color=component_fit_color, linewidth=component_lw, **plotkwargs)
else:
self._plotted_components += self.Spectrum.plotter.axis.plot(xarr,
data + yoffset,
color=component_fit_color, linewidth=component_lw, **plotkwargs)
def fullsizemodel(self):
"""
If the model was fit to a sub-region of the spectrum, expand it (with
zeros wherever the model was not defined) to fill the spectrum.
Examples
--------
>>> noise = np.random.randn(100)
>>> xarr = np.linspace(-50,50,100)
>>> signal = np.exp(-(xarr-5)**2/(2*3.**2))
>>> sp = pyspeckit.Spectrum(data=noise + signal, xarr=xarr, xarrkwargs={'units':'km/s'})
>>> sp.specfit(xmin=-25,xmax=25)
>>> sp.specfit.model.shape
(48,)
>>> sp.specfit.fullsizemodel()
>>> sp.specfit.model.shape
(100,)
"""
if self.model.shape != self.Spectrum.data.shape:
temp = np.zeros(self.Spectrum.data.shape)
temp[self.xmin:self.xmax] = self.model
self.model = temp
self.residuals = self.spectofit - self.model
self.selectregion(reset=True)
def plotresiduals(self, fig=2, axis=None, clear=True, color='k',
linewidth=0.5, drawstyle='steps-mid', yoffset=0.0,
label=True, pars=None, zeroline=None,
set_limits=True, **kwargs):
"""
Plot residuals of the fit. Specify a figure or
axis; defaults to figure(2).
Parameters
----------
fig : int
Figure number. Overridden by axis
axis : axis
The axis to plot on
pars : None or parlist
If set, the residuals will be computed for the input parameters
zeroline : bool or None
Plot the "zero" line through the center of the residuals. If None,
defaults to "True if yoffset!=0, False otherwise"
kwargs are passed to matplotlib plot
"""
self._full_model(pars=pars)
if axis is None:
if isinstance(fig,int):
fig=matplotlib.pyplot.figure(fig)
self.residualaxis = matplotlib.pyplot.gca()
if clear:
self.residualaxis.clear()
else:
self.residualaxis = axis
if clear:
self.residualaxis.clear()
self.residualplot = self.residualaxis.plot(self.Spectrum.xarr,
self.fullresiduals+yoffset,
drawstyle=drawstyle,
linewidth=linewidth,
color=color, **kwargs)
if zeroline or (zeroline is None and yoffset != 0):
self.residualplot += self.residualaxis.plot(self.Spectrum.xarr,
(np.zeros_like(self.Spectrum.xarr.value)+yoffset),
linestyle='--',
color='k',
alpha=0.5)
if set_limits:
if ((self.Spectrum.plotter.xmin is not None) and
(self.Spectrum.plotter.xmax is not None)):
self.residualaxis.set_xlim(self.Spectrum.plotter.xmin.value,
self.Spectrum.plotter.xmax.value)
if ((self.Spectrum.plotter.ymin is not None) and
(self.Spectrum.plotter.ymax is not None)):
self.residualaxis.set_ylim(self.Spectrum.plotter.ymin,
self.Spectrum.plotter.ymax)
if label:
self.residualaxis.set_xlabel(self.Spectrum.plotter.xlabel)
self.residualaxis.set_ylabel(self.Spectrum.plotter.ylabel)
self.residualaxis.set_title("Residuals")
if self.Spectrum.plotter.autorefresh:
self.residualaxis.figure.canvas.draw()
def annotate(self,loc='upper right',labelspacing=0.25, markerscale=0.01,
borderpad=0.1, handlelength=0.1, handletextpad=0.1,
fontsize=10,
frameon=False, chi2=None, optimal_chi2_kwargs={}, **kwargs):
"""
Add a legend to the plot showing the fitted parameters
_clearlegend() will remove the legend
chi2 : {True or 'reduced' or 'optimal' or 'allthree'}
kwargs passed to legend
"""
self._clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
if hasattr(self.fitter,'annotations'):
self._annotation_labels = self.fitter.annotations()
else:
raise Exception("Fitter %s has no annotations." % self.fitter)
#xtypename = units.unit_type_dict[self.Spectrum.xarr.xtype]
xcharconv = units.SmartCaseNoSpaceDict({u.Hz.physical_type:'\\nu',
u.m.physical_type:'\\lambda',
(u.km/u.s).physical_type:'v',
'pixels':'x',
u.dimensionless_unscaled:'x',
'dimensionless':'x',
})
try:
xchar = xcharconv[self.Spectrum.xarr.unit.physical_type]
except AttributeError:
unit_key = self.Spectrum.xarr.unit
xchar = xcharconv[u.Unit(unit_key).physical_type]
self._annotation_labels = [L.replace('x',xchar) if L[1]=='x' else L for
L in self._annotation_labels]
if chi2 is not None:
chi2n_label = '$\\chi^2/\\nu = %0.2g$' % (self.chi2/self.dof)
chi2opt_label = '$\\chi^2_o/\\nu = %0.2g$' % self.optimal_chi2(**optimal_chi2_kwargs)
chi2_label = '$\\chi^2 = %0.2g$' % self.chi2
if chi2 == 'allthree':
self._annotation_labels.append("\n".join([chi2n_label,
chi2_label,
chi2opt_label]))
elif chi2 == 'reduced':
self._annotation_labels.append(chi2n_label)
elif chi2 == 'optimal':
self._annotation_labels.append(chi2opt_label)
else:
self._annotation_labels.append(chi2_label)
if self.Spectrum.plotter.axis:
try:
self.fitleg = self.Spectrum.plotter.axis.legend(
tuple([pl]*len(self._annotation_labels)),
self._annotation_labels, loc=loc, markerscale=markerscale,
borderpad=borderpad, handlelength=handlelength,
handletextpad=handletextpad, labelspacing=labelspacing,
frameon=frameon, fontsize=fontsize, **kwargs)
self.Spectrum.plotter.axis.add_artist(self.fitleg)
except TypeError as ex:
print("Error {0} was raised, which may indicate an outdated mpl version".format(ex))
try:
self.fitleg.set_draggable(True)
except AttributeError:
# wrong version and/or non-interactive backend
pass
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
def print_fit(self, print_baseline=True, **kwargs):
"""
Print the best-fit parameters to the command line
"""
if self.Spectrum.baseline.baselinepars is not None and print_baseline:
print("Baseline: " + " + ".join(["%12g x^%i" % (x,i) for i,x in enumerate(self.Spectrum.baseline.baselinepars[::-1])]))
for i,p in enumerate(self.parinfo):
print("%15s: %12g +/- %12g" % (p['parname'],p['value'],p['error']))
def clear(self, legend=True, components=True):
"""
Remove the fitted model from the plot
Also removes the legend by default
"""
if self.Spectrum.plotter.axis is not None:
for p in self.modelplot:
p.set_visible(False)
if legend:
self._clearlegend()
if components:
self._clearcomponents()
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
# Empty the modelplot array to free memory
self.modelplot = []
# remove residuals from self if they're there.
if hasattr(self,'residualplot'):
for L in self.residualplot:
if L in self.Spectrum.plotter.axis.lines:
self.Spectrum.plotter.axis.lines.remove(L)
def _clearcomponents(self):
for pc in self._plotted_components:
pc.set_visible(False)
if pc in self.Spectrum.plotter.axis.lines:
self.Spectrum.plotter.axis.lines.remove(pc)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
# Empty the plotted components array to free memory
self._plotted_components = []
def _clearlegend(self):
"""
Remove the legend from the plot window
"""
axis = self.Spectrum.plotter.axis
if axis and axis.legend_ == self.fitleg:
axis.legend_ = None
if axis and self.fitleg is not None:
# don't remove fitleg unless it's in the current axis
# self.fitleg.set_visible(False)
if self.fitleg in axis.artists:
axis.artists.remove(self.fitleg)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
def savefit(self):
"""
Save the fit parameters from a Gaussian fit to the FITS header
.. todo::
THESE SHOULD BE WRITTEN FOR EACH TYPE OF MODEL TO BE FIT
"""
if self.modelpars is not None and hasattr(self.Spectrum,'header'):
for ii,p in enumerate(self.modelpars):
try:
if ii % 3 == 0:
self.Spectrum.header['AMP%1i' % (ii/3)] = (p,"Gaussian best fit amplitude #%i" % (ii/3))
elif ii % 3 == 1:
self.Spectrum.header['CEN%1i' % (ii/3)] = (p,"Gaussian best fit center #%i" % (ii/3))
elif ii % 3 == 2:
self.Spectrum.header['WID%1i' % (ii/3)] = (p,"Gaussian best fit width #%i" % (ii/3))
except ValueError as ex:
log.info("Failed to save fit to header: {0}".format(ex))
def downsample(self,factor):
"""
Downsample the model spectrum (and the spectofit spectra)
This should only be done when Spectrum.smooth is called
"""
if self.model is not None:
self.model = self.model[::factor]
if self.residuals is not None:
self.residuals = self.residuals[::factor]
self.spectofit = self.spectofit[::factor]
self.errspec = self.errspec[::factor]
self.includemask = self.includemask[::factor]
def crop(self,x1pix,x2pix):
"""
When spectrum.crop is called, this must be too
"""
if self.model is not None:
self.model = self.model[x1pix:x2pix]
if hasattr(self,'fullmodel'):
self.fullmodel = self.fullmodel[x1pix:x2pix]
self.includemask = self.includemask[x1pix:x2pix]
self.setfitspec()
def integral(self, analytic=False, direct=False, threshold='auto',
integration_limits=None, integration_limit_units='pixels',
return_error=False, **kwargs):
"""
Return the integral of the fitted spectrum
Parameters
----------
analytic : bool
Return the analytic integral of the fitted function?
.. WARNING:: This approach is only implemented for some models
.. todo:: Implement error propagation for this approach
direct : bool
Return the integral of the *spectrum* (as opposed to the *fit*)
over a range defined by the `integration_limits` if specified or
`threshold` otherwise
threshold : 'auto' or 'error' or float
Determines what data to be included in the integral based off of where
the model is greater than this number
If 'auto', the threshold will be set to peak_fraction * the peak
model value.
If 'error', uses the error spectrum as the threshold
See `self.get_model_xlimits` for details
integration_limits : None or 2-tuple
Manually specify the limits in `integration_limit_units` units
return_error : bool
Return the error on the integral if set.
The error computed by
sigma = sqrt(sum(sigma_i^2)) * dx
kwargs :
passed to `self.fitter.integral` if ``not(direct)``
Returns
-------
np.scalar or np.ndarray with the integral or integral & error
"""
if analytic:
return self.fitter.analytic_integral(modelpars=self.parinfo.values)
xmin,xmax = self.get_model_xlimits(units='pixels', threshold=threshold)
if integration_limits is None:
integration_limits = [xmin,xmax]
integration_limits = [
self.Spectrum.xarr.x_to_pix(x,xval_units=integration_limit_units)
for x in integration_limits]
if xmax - xmin > 1: # can only get cdelt if there's more than 1 pixel
dx = self.Spectrum.xarr[xmin:xmax].cdelt().value
else:
dx = None
if dx is None:
#dx = np.abs(np.concatenate([np.diff(self.Spectrum.xarr),[0]]))
#warn("Irregular X-axis. The last pixel is ignored.")
self.Spectrum.xarr.make_dxarr()
dx = self.Spectrum.xarr.dxarr.value
else:
# shouldn't shape be a 'property'?
dx = np.repeat(np.abs(dx), self.Spectrum.shape)
if direct:
integrand = self.Spectrum.data[xmin:xmax]
if not self.Spectrum.baseline.subtracted:
integrand -= self.Spectrum.baseline.basespec[xmin:xmax]
integ = (integrand * dx[xmin:xmax]).sum()
if return_error:
# compute error assuming a "known mean" (not a sample mean). If sample mean, multiply
# by sqrt(len(dx)/(len(dx)-1)) (which should be very near 1)
error = np.sqrt((dx[xmin:xmax] * self.Spectrum.error[xmin:xmax]**2).sum() / dx[xmin:xmax].sum())
return np.array([integ,error])
else:
return integ
#OK = np.abs( fullmodel ) > threshold
#integ = (self.spectofit[OK] * dx[OK]).sum()
#error = np.sqrt((self.errspec[OK]**2 * dx[OK]).sum()/dx[OK].sum())
else:
if not hasattr(self.fitter,'integral'):
raise AttributeError("The fitter %s does not have an integral implemented" % self.fittype)
# the model considered here must NOT include the baseline!
# if it does, you'll get the integral of the continuum
#fullmodel = self.get_full_model(add_baseline=False)
if self.Spectrum.xarr.cdelt() is not None:
dx = np.median(dx)
integ = self.fitter.integral(self.modelpars, dx=dx, **kwargs)
if return_error:
if mycfg.WARN:
warn("WARNING: The computation of the error "
"on the integral is not obviously "
"correct or robust... it's just a guess.")
OK = self.model_mask(threshold=threshold, add_baseline=False)
error = np.sqrt((self.errspec[OK]**2).sum()) * dx
#raise NotImplementedError("We haven't written up correct error estimation for integrals of fits")
else:
integ = 0
error = 0
warn("An analytic integal could not be computed because the X-axis is irregular. Try direct=True when integrating, or find a way to linearize the X-axis")
if return_error:
return integ,error
else:
return integ
def model_mask(self, **kwargs):
"""
Get a mask (boolean array) of the region where the fitted model is
significant
Parameters
----------
threshold : 'auto' or 'error' or float
The threshold to compare the model values to for selecting the mask
region.
* auto: uses `peak_fraction` times the model peak
* error: use the spectrum error
* float: any floating point number as an absolute threshold
peak_fraction : float
Parameter used if ``threshold=='auto'`` to determine fraction of
model peak to set threshold at
add_baseline : bool
Add the fitted baseline to the model before comparing to threshold?
Returns
-------
mask : `~numpy.ndarray`
A boolean mask array with the same size as the spectrum, set to
``True`` where the fitted model has values above a specified
threshold
"""
return self._compare_to_threshold(**kwargs)
def _compare_to_threshold(self, threshold='auto', peak_fraction=0.01,
add_baseline=False):
"""
Identify pixels that are above some threshold
"""
model = self.get_full_model(add_baseline=add_baseline)
# auto-set threshold from some fraction of the model peak
if threshold=='auto':
threshold = peak_fraction * np.abs(model).max()
elif threshold=='error':
threshold = self.errspec
OK = np.abs(model) > threshold
return OK
def get_model_xlimits(self, threshold='auto', peak_fraction=0.01,
add_baseline=False, unit='pixels', units=None):
"""
Return the x positions of the first and last points at which the model
is above some threshold
Parameters
----------
threshold : 'auto' or 'error' or float
If 'auto', the threshold will be set to peak_fraction * the peak
model value.
If 'error', uses the error spectrum as the threshold
peak_fraction : float
ignored unless threshold == 'auto'
add_baseline : bool
Include the baseline when computing whether the model is above the
threshold? default FALSE. Passed to get_full_model.
units : str
A valid unit type, e.g. 'pixels' or 'angstroms'
"""
OK = self._compare_to_threshold(threshold=threshold,
peak_fraction=peak_fraction,
add_baseline=add_baseline)
# find the first & last "True" values
xpixmin = OK.argmax()
xpixmax = len(OK) - OK[::-1].argmax() - 1
if units is not None and unit =='pixels':
# todo: deprecate
unit = units
if unit == 'pixels':
return [xpixmin,xpixmax]
else:
return self.Spectrum.xarr[[xpixmin,xpixmax]].as_unit(units)
def shift_pars(self, frame=None):
"""
Shift the velocity / wavelength / frequency of the fitted parameters
into a different frame
Right now this only takes care of redshift and only if redshift is defined.
It should be extended to do other things later
"""
for ii,pi in enumerate(self.parinfo):
for partype in ('shift','offset','velo'):
if partype in str.lower(pi['parname']):
if frame is not None:
self.modelpars[ii] = self.Spectrum.xarr.x_in_frame(self.modelpars[ii], frame)
def moments(self, fittype=None, **kwargs):
"""
Return the moments
see the :mod:`~pyspeckit.spectrum.moments` module
Parameters
----------
fittype : None or str
The registered fit type to use for moment computation
"""
if fittype is None:
fittype = self.fittype
return list(self.Registry.multifitters[fittype].moments(
self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax], **kwargs))
def button3action(self, event, debug=False, nwidths=1):
"""
Disconnect the interactiveness
Perform the fit (or die trying)
Hide the guesses
"""
if self.nclicks_b1 == 0:
# there has been no selection
# therefore, we assume *everything* is selected
self.includemask[:] = True
self.Spectrum.plotter.figure.canvas.mpl_disconnect(self.click)
self.Spectrum.plotter.figure.canvas.mpl_disconnect(self.keyclick)
if self.npeaks > 0:
if hasattr(self, 'fitter'):
self.guesses = self.fitter.parse_3par_guesses(self.guesses)
else:
# default fitter is a Gaussian, which has 3 parameters
if len(self.guesses) % 3 != 0:
log.error("Default fitter is Gaussian, and there were "
"{0} guess parameters, which is not a "
"multiple of 3.".format(len(self.guesses)))
log.info("{0} Guesses : {1} X channel range: {2}-{3}"
.format(len(self.guesses), self.guesses, self.xmin,
self.xmax))
self.multifit(use_window_limits=True)
for p in self.button2plot + self.button1plot:
p.set_visible(False)
# disconnect interactive window (and more importantly, reconnect to
# original interactive cmds)
self.clear_all_connections()
def copy(self, parent=None, registry=None):
"""
Create a copy of the spectral fit - includes copies of the _full_model,
the registry, the fitter, parinfo, modelpars, modelerrs, model, npeaks
Parameters
----------
parent : `pyspeckit.classes.Spectrum`
A `~Spectrum` instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
if registry is None:
if hasattr(parent, 'Registry'):
registry = parent.Registry
else:
# only make a copy if we're not already given a specific registry
# to inherit
copy.deepcopy(self.Registry)
newspecfit = Specfit(parent, Registry=registry)
newspecfit.parinfo = copy.deepcopy(self.parinfo)
if newspecfit.parinfo is None:
newspecfit.modelpars = None
newspecfit.modelerrs = None
else:
newspecfit.modelpars = newspecfit.parinfo.values
newspecfit.modelerrs = newspecfit.parinfo.errors
newspecfit.includemask = self.includemask.copy()
newspecfit.model = copy.copy(self.model)
newspecfit.npeaks = self.npeaks
if hasattr(self,'fitter'):
newspecfit.fitter = copy.deepcopy(self.fitter)
newspecfit.fitter.parinfo = newspecfit.parinfo
if hasattr(self,'fullmodel'):
newspecfit._full_model()
# this is ridiculous, absurd, and infuriating...
newspecfit.button2action = newspecfit.guesspeakwidth
if parent is not None:
newspecfit.Spectrum.plotter = parent.plotter
else:
newspecfit.Spectrum.plotter = None
return newspecfit
def __copy__(self):
return self.copy(parent=self.Spectrum)
def add_sliders(self, parlimitdict=None, **kwargs):
"""
Add a Sliders window in a new figure appropriately titled
Parameters
----------
parlimitdict: dict
Each parameter needs to have displayed limits; these are set in
min-max pairs. If this is left empty, the widget will try to guess
at reasonable limits, but the guessing is not very sophisticated
yet.
.. todo:: Add a button in the navbar that makes this window pop up
http://stackoverflow.com/questions/4740988/add-new-navigate-modes-in-matplotlib
"""
if parlimitdict is None:
# try to create a reasonable parlimit dict
parlimitdict = {}
for param in self.parinfo:
if not param.parname in parlimitdict:
if any( (x in param['parname'].lower() for x in ('shift','xoff')) ):
lower, upper = (self.Spectrum.xarr[self.includemask].min().value,
self.Spectrum.xarr[self.includemask].max().value)
elif any( (x in param['parname'].lower() for x in ('width','fwhm')) ):
xvalrange = (self.Spectrum.xarr[self.includemask].max().value -
self.Spectrum.xarr[self.includemask].min().value)
lower,upper = (0,xvalrange)
elif any( (x in param['parname'].lower() for x in ('amp','peak','height')) ):
datarange = self.spectofit.max() - self.spectofit.min()
lower,upper = (param['value']-datarange, param['value']+datarange)
else:
lower = param['value'] * 0.1
upper = param['value'] * 10
# override guesses with limits
if param.limited[0]:
# nextafter -> next representable float
lower = np.nextafter(param.limits[0], param.limits[0]+1)
if param.limited[1]:
upper = np.nextafter(param.limits[1], param.limits[1]-1)
parlimitdict[param.parname] = (lower,upper)
if hasattr(self,'fitter'):
self.SliderWidget = widgets.FitterSliders(self,
self.Spectrum.plotter.figure,
npars=self.fitter.npars,
parlimitdict=parlimitdict,
**kwargs)
else:
log.error("Must have a fitter instantiated before creating sliders")
def optimal_chi2(self, reduced=True, threshold='error', **kwargs):
"""
Compute an "optimal" :math:`\\chi^2` statistic, i.e. one in which only pixels in
which the model is statistically significant are included
Parameters
----------
reduced : bool
Return the reduced :math:`\\chi^2`
threshold : 'auto' or 'error' or float
If 'auto', the threshold will be set to peak_fraction * the peak
model value, where peak_fraction is a kwarg passed to
get_model_xlimits reflecting the fraction of the model peak
to consider significant
If 'error', uses the error spectrum as the threshold
kwargs : dict
passed to :meth:`get_model_xlimits`
Returns
-------
chi2 : float
:math:`\\chi^2` statistic or reduced :math:`\\chi^2` statistic (:math:`\\chi^2/n`)
.. math::
\\chi^2 = \\sum( (d_i - m_i)^2 / e_i^2 )
"""
modelmask = self._compare_to_threshold(threshold=threshold, **kwargs)
chi2 = np.sum((self.fullresiduals[modelmask]/self.errspec[modelmask])**2)
if reduced:
# vheight included here or not? assuming it should be...
return chi2/self.dof
else:
return chi2
def get_pymc(self, **kwargs):
"""
Create a pymc MCMC sampler from the current fitter. Defaults to 'uninformative' priors
`kwargs` are passed to the fitter's get_pymc method, with parameters defined below.
Parameters
----------
data : np.ndarray
error : np.ndarray
use_fitted_values : bool
Each parameter with a measured error will have a prior defined by
the Normal distribution with sigma = par.error and mean = par.value
Examples
--------
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> MCuninformed = sp.specfit.get_pymc()
>>> MCwithpriors = sp.specfit.get_pymc(use_fitted_values=True)
>>> MCuninformed.sample(1000)
>>> MCuninformed.stats()['AMPLITUDE0']
>>> # WARNING: This will fail because width cannot be set <0, but it may randomly reach that...
>>> # How do you define a likelihood distribution with a lower limit?!
>>> MCwithpriors.sample(1000)
>>> MCwithpriors.stats()['AMPLITUDE0']
"""
if hasattr(self.fitter,'get_pymc'):
return self.fitter.get_pymc(self.Spectrum.xarr, self.spectofit,
self.errspec, **kwargs)
else:
raise AttributeError("Fitter %r does not have pymc implemented." % self.fitter)
def get_emcee(self, nwalkers=None, **kwargs):
"""
Get an emcee walker ensemble for the data & model using the current model type
Parameters
----------
data : np.ndarray
error : np.ndarray
nwalkers : int
Number of walkers to use. Defaults to 2 * self.fitters.npars
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> emcee_ensemble = sp.specfit.get_emcee()
>>> p0 = emcee_ensemble.p0 * (np.random.randn(*emcee_ensemble.p0.shape) / 10. + 1.0)
>>> pos,logprob,state = emcee_ensemble.run_mcmc(p0,100)
"""
import emcee
if hasattr(self.fitter,'get_emcee_ensemblesampler'):
nwalkers = (self.fitter.npars * self.fitter.npeaks + self.fitter.vheight) * 2
emc = self.fitter.get_emcee_ensemblesampler(self.Spectrum.xarr,
self.spectofit,
self.errspec, nwalkers)
emc.nwalkers = nwalkers
emc.p0 = np.array([self.parinfo.values] * emc.nwalkers)
return emc
def get_components(self, **kwargs):
"""
If a model has been fitted, return the components of the model
Parameters
----------
kwargs are passed to self.fitter.components
"""
if self.modelpars is not None:
self.modelcomponents = self.fitter.components(self.Spectrum.xarr,
self.modelpars, **kwargs)
return self.modelcomponents
def measure_approximate_fwhm(self, threshold='error', emission=True,
interpolate_factor=1, plot=False,
grow_threshold=2, **kwargs):
"""
Measure the FWHM of a fitted line
This procedure is designed for multi-component *blended* lines; if the
true FWHM is known (i.e., the line is well-represented by a single
gauss/voigt/lorentz profile), use that instead. Do not use this for
multiple independently peaked profiles.
This MUST be run AFTER a fit has been performed!
Parameters
----------
threshold : 'error' | float
The threshold above which the spectrum will be interpreted as part
of the line. This threshold is applied to the *model*. If it is
'noise', self.error will be used.
emission : bool
Is the line absorption or emission?
interpolate_factor : integer
Magnification factor for determining sub-pixel FWHM. If used,
"zooms-in" by using linear interpolation within the line region
plot : bool
Overplot a line at the FWHM indicating the FWHM. kwargs
are passed to matplotlib.plot
grow_threshold : int
Minimum number of valid points. If the total # of points above the
threshold is <= to this number, it will be grown by 1 pixel on each side
Returns
-------
The approximated FWHM, if it can be computed
If there are <= 2 valid pixels, a fwhm cannot be computed
"""
if threshold == 'error':
threshold = self.Spectrum.error
if np.all(self.Spectrum.error==0):
threshold = 1e-3*self.Spectrum.data.max()
if self.Spectrum.baseline.subtracted is False:
data = self.Spectrum.data - self.Spectrum.baseline.basespec
else:
data = self.Spectrum.data * 1
model = self.get_full_model(add_baseline=False)
if np.count_nonzero(model) == 0:
raise ValueError("The model is all zeros. No FWHM can be "
"computed.")
# can modify inplace because data is a copy of self.Spectrum.data
if not emission:
data *= -1
model *= -1
line_region = model > threshold
if line_region.sum() == 0:
raise ValueError("No valid data included in FWHM computation")
if line_region.sum() <= grow_threshold:
line_region[line_region.argmax()-1:line_region.argmax()+1] = True
reverse_argmax = len(line_region) - line_region.argmax() - 1
line_region[reverse_argmax-1:reverse_argmax+1] = True
log.warning("Fewer than {0} pixels were identified as part of the fit."
" To enable statistical measurements, the range has been"
" expanded by 2 pixels including some regions below the"
" threshold.".format(grow_threshold))
# determine peak (because data is neg if absorption, always use max)
peak = data[line_region].max()
xarr = self.Spectrum.xarr[line_region]
xarr.make_dxarr()
cd = xarr.dxarr.min()
if interpolate_factor > 1:
newxarr = units.SpectroscopicAxis(np.arange(xarr.min().value-cd.value,
xarr.max().value+cd.value,
cd.value /
float(interpolate_factor)
),
unit=xarr.unit,
equivalencies=xarr.equivalencies
)
# load the metadata from xarr
# newxarr._update_from(xarr)
data = np.interp(newxarr,xarr,data[line_region])
xarr = newxarr
else:
data = data[line_region]
# need the peak location so we can find left/right half-max locations
peakloc = data.argmax()
hm_left = np.argmin(np.abs(data[:peakloc]-peak/2.))
hm_right = np.argmin(np.abs(data[peakloc:]-peak/2.)) + peakloc
deltax = xarr[hm_right]-xarr[hm_left]
if plot:
# for plotting, use a negative if absorption
sign = 1 if emission else -1
# shift with baseline if baseline is plotted
if not self.Spectrum.baseline.subtracted:
basespec = self.Spectrum.baseline.get_model(xarr)
yoffleft = self.Spectrum.plotter.offset + basespec[hm_left]
yoffright = self.Spectrum.plotter.offset + basespec[hm_right]
else:
yoffleft = yoffright = self.Spectrum.plotter.offset
log.debug("peak={2} yoffleft={0} yoffright={1}".format(yoffleft, yoffright, peak))
log.debug("hm_left={0} hm_right={1} xarr[hm_left]={2} xarr[hm_right]={3}".format(hm_left, hm_right, xarr[hm_left], xarr[hm_right]))
self.Spectrum.plotter.axis.plot([xarr[hm_right].value,
xarr[hm_left].value],
np.array([sign*peak/2.+yoffleft,
sign*peak/2.+yoffright]),
**kwargs)
self.Spectrum.plotter.refresh()
# debug print hm_left,hm_right,"FWHM: ",deltax
# debug self.Spectrum.plotter.axis.plot(xarr,data,color='magenta')
# debug self.Spectrum.plotter.refresh()
# debug raise TheDead
return deltax
def _validate_parinfo(self, parinfo, mode='fix'):
assert mode in ('fix','raise','check','guesses')
any_out_of_range = []
for param in parinfo:
if (param.limited[0] and (param.value < param.limits[0])):
if (np.allclose(param.value, param.limits[0])):
# nextafter -> next representable float
if mode in ('fix', 'guesses'):
warn("{0} is less than the lower limit {1}, but very close."
" Converting to {1}+ULP".format(param.value,
param.limits[0]))
param.value = np.nextafter(param.limits[0], param.limits[0]+1)
elif mode == 'raise':
raise ValueError("{0} is less than the lower limit {1}, but very close."
.format(param.value, param.limits[1]))
elif mode == 'check':
any_out_of_range.append("lt:close",)
else:
raise ValueError("{0} is less than the lower limit {1}"
.format(param.value, param.limits[0]))
elif mode == 'check':
any_out_of_range.append(False)
if (param.limited[1] and (param.value > param.limits[1])):
if (np.allclose(param.value, param.limits[1])):
if mode in ('fix', 'guesses'):
param.value = np.nextafter(param.limits[1], param.limits[1]-1)
warn("{0} is greater than the upper limit {1}, but very close."
" Converting to {1}-ULP".format(param.value,
param.limits[1]))
elif mode == 'raise':
raise ValueError("{0} is greater than the upper limit {1}, but very close."
.format(param.value, param.limits[1]))
elif mode == 'check':
any_out_of_range.append("gt:close")
else:
raise ValueError("{0} is greater than the upper limit {1}"
.format(param.value, param.limits[1]))
elif mode == 'check':
any_out_of_range.append(False)
if mode == 'guesses':
return parinfo.values
return any_out_of_range
|
pyspeckit/pyspeckit
|
pyspeckit/spectrum/fitters.py
|
Python
|
mit
| 94,508
|
[
"Gaussian"
] |
072e02984ff07a15d4cbf5f32f382096954104971d069686516a26cbc430a04b
|
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from __future__ import division
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from binascii import hexlify as _hexlify
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
"""
if a is None:
try:
a = int(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple( x % (2**32) for x in internalstate )
except ValueError as e:
raise TypeError from e
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None,
maxwidth=1<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
Do not supply the 'int', 'default', and 'maxwidth' arguments.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is default:
if istart > 0:
if istart >= maxwidth:
return self._randbelow(istart)
return int(self.random() * istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= maxwidth:
return int(istart + self._randbelow(width))
return int(istart + int(self.random()*width))
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
if n >= maxwidth:
return istart + istep*self._randbelow(n)
return istart + istep*int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, _log=_log, int=int, _maxwidth=1<<BPF,
_Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
"""Return a random int in the range [0,n)
Handles the case where n has more bits than returned
by a single call to the underlying generator.
"""
try:
getrandbits = self.getrandbits
except AttributeError:
pass
else:
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
# This assures that the two methods correspond.
if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = getrandbits(k)
while r >= n:
r = getrandbits(k)
return r
if n >= _maxwidth:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large")
return int(self.random() * n)
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None, int=int):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
random = self.random
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, (set, frozenset)):
population = tuple(population)
if not hasattr(population, '__getitem__') or hasattr(population, 'keys'):
raise TypeError("Population must be a sequence or set. For dicts, use dict.keys().")
random = self.random
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
_int = int
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = _int(random() * n)
while j in selected:
j = _int(random() * n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"""Get a random number in the range [a, b)."""
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. (The parameter would be
called "lambda", but that is a reserved word in Python.) Returned
values range from 0 to positive infinity.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
random = self.random
u = random()
while u <= 1e-7:
u = random()
return -_log(u)/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
r = (1.0 + b * b)/(2.0 * b)
while 1:
u1 = random()
z = _cos(_pi * u1)
f = (1.0 + r * z)/(r + z)
c = kappa * (r - f)
u2 = random()
if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
break
u3 = random()
if u3 > 0.5:
theta = (mu % TWOPI) + _acos(f)
else:
theta = (mu % TWOPI) - _acos(f)
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
bytes = (k + 7) // 8 # bits / 8 and rounded up
x = int(_hexlify(_urandom(bytes)), 16)
return x >> (bytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.0/Lib/random.py
|
Python
|
mit
| 25,561
|
[
"Gaussian"
] |
8cc9cf2fd19d5c8a9c02c24db5ca6b1fb459d6ac301e398cc09e1e1f3103bb60
|
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import fixtures
import mock
from mox3 import mox
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import importutils
from oslo_utils import netutils
import six
import testtools
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova import objects
from nova.objects import network as network_obj
from nova.objects import virtual_interface as vif_obj
from nova import quota
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_ldap
from nova.tests.unit import fake_network
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_floating_ip
from nova.tests.unit.objects import test_network
from nova.tests.unit.objects import test_service
from nova.tests.unit import utils as test_utils
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_get_instance_nw_info_fake(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(4, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_end(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, dhcp_server='192.168.0.11',
allowed_start='192.168.0.10',
allowed_end='192.168.0.245')
self.assertEqual(1, len(nets))
network = nets[0]
# gateway defaults to beginning of allowed_start
self.assertEqual('192.168.0.10', network['gateway'])
# vpn_server doesn't conflict with dhcp_start
self.assertEqual('192.168.0.12', network['vpn_private_address'])
# dhcp_start doesn't conflict with dhcp_server
self.assertEqual('192.168.0.13', network['dhcp_start'])
# NOTE(vish): 10 from the beginning, 10 from the end, and
# 1 for the gateway, 1 for the dhcp server,
# 1 for the vpn server
self.assertEqual(23, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_out_of_range(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AddressOutOfRange,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_start='192.168.1.10')
def test_validate_reserved_end_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidAddress,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_end='invalid')
def test_validate_cidr_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidCidr,
self.network.create_networks,
context_admin, 'fake', 'invalid', False,
1, 256)
def test_validate_non_int_size(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidIntValue,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 'invalid')
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def test_get_instance_nw_info(self, get):
def make_ip(index):
vif = objects.VirtualInterface(uuid=index, address=index)
network = objects.Network(uuid=index,
bridge=index,
label=index,
project_id=index,
injected=False,
netmask='255.255.255.0',
dns1=None,
dns2=None,
cidr_v6=None,
gateway_v6=None,
broadcast_v6=None,
netmask_v6=None,
rxtx_base=None,
gateway='192.168.%s.1' % index,
dhcp_server='192.168.%s.1' % index,
broadcast='192.168.%s.255' % index,
cidr='192.168.%s.0/24' % index)
return objects.FixedIP(virtual_interface=vif,
network=network,
floating_ips=objects.FloatingIPList(),
address='192.168.%s.2' % index)
objs = [make_ip(index) for index in ('3', '1', '2')]
get.return_value = objects.FixedIPList(objects=objs)
nw_info = self.network.get_instance_nw_info(self.context, None,
None, None)
for i, vif in enumerate(nw_info):
self.assertEqual(vif['network']['bridge'], objs[i].network.bridge)
@mock.patch.object(objects.Network, 'get_by_id')
def test_add_fixed_ip_instance_using_id_without_vpn(self, get_by_id):
# Allocate a fixed ip from a network and assign it to an instance.
# Network is given by network id.
network_id = networks[0]['id']
with mock.patch.object(self.network,
'allocate_fixed_ip') as allocate_fixed_ip:
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
network_id)
# Assert that we fetched the network by id, not uuid
get_by_id.assert_called_once_with(self.context,
network_id, project_only='allow_none')
# Assert that we called allocate_fixed_ip for the given network and
# instance. We should not have requested a specific address from the
# network.
allocate_fixed_ip.assert_called_once_with(self.context, FAKEUUID,
get_by_id.return_value,
address=None)
@mock.patch.object(objects.Network, 'get_by_uuid')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, get_by_uuid):
# Allocate a fixed ip from a network and assign it to an instance.
# Network is given by network uuid.
network_uuid = networks[0]['uuid']
with mock.patch.object(self.network,
'allocate_fixed_ip') as allocate_fixed_ip,\
mock.patch.object(self.context, 'elevated',
return_value=mock.sentinel.elevated):
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
network_uuid)
# Assert that we fetched the network by uuid, not id, and with elevated
# context
get_by_uuid.assert_called_once_with(mock.sentinel.elevated,
network_uuid)
# Assert that we called allocate_fixed_ip for the given network and
# instance. We should not have requested a specific address from the
# network.
allocate_fixed_ip.assert_called_once_with(self.context,
FAKEUUID,
get_by_uuid.return_value,
address=None)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
def test_allocate_fixed_ip_instance_dns(self):
# Test DNS entries are created when allocating a fixed IP.
# Allocate a fixed IP to an instance. Ensure that dns entries have been
# created for the instance's name and uuid.
network = network_obj.Network._from_db_object(
self.context, network_obj.Network(), test_network.fake_network)
network.save = mock.MagicMock()
# Create a minimal instance object
instance_params = {
'display_name': HOST,
'security_groups': []
}
instance = fake_instance.fake_instance_obj(
context.RequestContext('ignore', 'ignore'),
expected_attrs=instance_params.keys(), **instance_params)
instance.save = mock.MagicMock()
# We don't specify a specific address, so we should get a FixedIP
# automatically allocated from the pool. Fix its value here.
fip = objects.FixedIP(address='192.168.0.101')
fip.save = mock.MagicMock()
with mock.patch.object(objects.Instance, 'get_by_uuid',
return_value=instance),\
mock.patch.object(objects.FixedIP, 'associate_pool',
return_value=fip):
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
instance_manager = self.network.instance_dns_manager
expected_addresses = ['192.168.0.101']
# Assert that we have a correct entry by instance display name
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(expected_addresses, addresses)
# Assert that we have a correct entry by instance uuid
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(expected_addresses, addresses)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = 'nosuch'
get_by_uuid.return_value = inst
usages = {'fixed_ips': {'in_use': 10, 'reserved': 1}}
reserve.side_effect = exception.OverQuota(overs='testing',
quotas={'fixed_ips': 10},
usages=usages)
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': 'nosuch'})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid='fake-uuid')
fip = objects.FixedIP(instance_uuid='fake-uuid',
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1,
vif_id=1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid='fake-uuid', id=1)
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate_pool')
@mock.patch('nova.network.manager.NetworkManager._add_virtual_interface')
def test_allocate_fixed_ip_create_new_vifs(self,
mock_add,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1000)
net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}
instance = objects.Instance(context=self.context)
instance.create()
vif = objects.VirtualInterface(context,
id=1000,
address='00:00:00:00:00:00',
instance_uuid=instance.uuid,
network_id=net['id'],
uuid='nosuch')
mock_fixedip_associate.return_value = fip
mock_add.return_value = vif
mock_instance_get.return_value = instance
mock_vif_get.return_value = None
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
self.network.allocate_fixed_ip(self.context, instance['uuid'],
net)
mock_add.assert_called_once_with(self.context, instance['uuid'],
net['id'])
self.assertEqual(fip.virtual_interface_id, vif.id)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch.object(db, 'virtual_interface_get_by_instance_and_network',
return_value=None)
@mock.patch('nova.objects.fixed_ip.FixedIP')
def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip,
mock_get_vif, mock_instance_get):
# Tests that we don't try to do anything with fixed IPs if
# _add_virtual_interface fails.
instance = fake_instance.fake_instance_obj(self.context)
mock_instance_get.return_value = instance
network = {'cidr': '24', 'id': 1,
'uuid': '398399b3-f696-4859-8695-a6560e14cb02'}
vif_error = exception.VirtualInterfaceMacAddressException()
# mock out quotas because we don't care in this test
with mock.patch.object(self.network, 'quotas_cls', objects.QuotasNoOp):
with mock.patch.object(self.network, '_add_virtual_interface',
side_effect=vif_error):
self.assertRaises(
exception.VirtualInterfaceMacAddressException,
self.network.allocate_fixed_ip, self.context,
'9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0', network)
self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls))
class FlatDHCPNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.network.manager.VlanManager._setup_network_on_host')
@mock.patch('nova.network.manager.VlanManager.'
'_validate_instance_zone_for_dns_domain')
@mock.patch('nova.network.manager.VlanManager.'
'_do_trigger_security_group_members_refresh_for_instance')
@mock.patch('nova.network.manager.VlanManager._add_virtual_interface')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.VirtualInterface.get_by_instance_and_network')
def test_allocate_fixed_ip_return_none(self, mock_get,
mock_associate, mock_get_uuid, mock_add, mock_trigger,
mock_validate, mock_setup):
net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=netaddr.IPAddress('1.2.3.4'),
virtual_interface_id=1)
instance = objects.Instance(context=self.context)
instance.create()
vif = objects.VirtualInterface(self.context,
id=1000,
address='00:00:00:00:00:00',
instance_uuid=instance.uuid,
network_id=net['id'],
uuid='nosuch')
mock_associate.return_value = fip
mock_add.return_value = vif
mock_get.return_value = None
mock_get_uuid.return_value = instance
mock_validate.return_value = False
self.network.allocate_fixed_ip(self.context_admin, instance.uuid, net)
mock_add.assert_called_once_with(self.context_admin, instance.uuid,
net['id'])
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1,
vif_id=1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch',
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True,
vif_id=1)
@mock.patch.object(db, 'virtual_interface_get_by_instance_and_network',
return_value=None)
@mock.patch('nova.objects.fixed_ip.FixedIP')
def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip,
mock_get_vif):
# Tests that we don't try to do anything with fixed IPs if
# _add_virtual_interface fails.
vif_error = exception.VirtualInterfaceMacAddressException()
with mock.patch.object(self.network, '_add_virtual_interface',
side_effect=vif_error):
self.assertRaises(exception.VirtualInterfaceMacAddressException,
self.network.allocate_fixed_ip, self.context,
'9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0',
networks[0])
self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls))
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
self.assertEqual(networks[1]["vlan"], 103)
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_parameter(self):
# vlan parameter could not be greater than 4094
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=4095, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be greater than 4094'
self.assertIn(error_msg, six.text_type(exc))
# vlan parameter could not be less than 1
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=0, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be less than 1'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_be_integer(self):
# vlan must be an integer
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan='fake', cidr='192.168.0.1/24')
error_msg = 'vlan must be an integer'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_multiple_without_dhcp_server(self):
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["dhcp_server"], "192.168.3.1")
self.assertEqual(networks[1]["dhcp_server"], "192.168.3.129")
def test_vlan_multiple_with_dhcp_server(self):
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100, dhcp_server='192.168.3.1')
self.assertEqual(networks[0]["dhcp_server"], "192.168.3.1")
self.assertEqual(networks[1]["dhcp_server"], "192.168.3.1")
def test_validate_networks(self):
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id + '1')
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id='testproject')
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network, '_floating_ip_pool_exists',
lambda _x, _y: True)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id='testproject')
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid='fake_uuid',
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def _deallocate_fixed_with_dhcp(self, mock_dev_exists, fixed_update,
net_get, fixed_get):
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
with contextlib.nested(
mock.patch.object(db, 'virtual_interface_get', vif_get),
mock.patch.object(
utils, 'execute',
side_effect=processutils.ProcessExecutionError()),
) as (_vif_get, _execute):
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1,
instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(
test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr.address,
'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
mock_dev_exists.assert_called_once_with(networks[1]['bridge'])
if mock_dev_exists.return_value:
_execute.assert_called_once_with('dhcp_release',
networks[1]['bridge'],
fix_addr.address,
'DE:AD:BE:EF:00:00',
run_as_root=True)
@mock.patch('nova.network.linux_net.device_exists', return_value=True)
def test_deallocate_fixed_with_dhcp(self, mock_dev_exists):
self._deallocate_fixed_with_dhcp(mock_dev_exists)
@mock.patch('nova.network.linux_net.device_exists', return_value=False)
def test_deallocate_fixed_without_dhcp(self, mock_dev_exists):
self._deallocate_fixed_with_dhcp(mock_dev_exists)
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in six.iteritems(kwargs):
self.__setattr__(k, v)
class FakeNetwork(object):
def __init__(self, **kwargs):
self.vlan = None
for k, v in six.iteritems(kwargs):
self.__setattr__(k, v)
def __getitem__(self, item):
return getattr(self, item)
class CommonNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'fixed_ip_get_by_instance')
manager.db.fixed_ip_get_by_instance(
self.context, FAKEUUID).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info_with_update_dns_entries(
self, fixed_get):
self.flags(update_dns_entries=True)
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
with mock.patch.object(manager.network_rpcapi,
'update_dns') as mock_update_dns:
manager.deallocate_for_instance(
ctx, instance=fake_instance.fake_instance_obj(ctx))
mock_update_dns.assert_called_once_with(ctx, ['123'])
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_requested_networks(self):
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in [('123', '1.2.3.4'), ('123', '4.3.2.1'),
('123', None)]])
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_update_dns_entries(self):
self.flags(update_dns_entries=True)
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]])
with mock.patch.object(manager.network_rpcapi,
'update_dns') as mock_update_dns:
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
mock_update_dns.assert_called_once_with(ctx, ['123'])
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual(manager.deallocate_called, '10.0.0.1')
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_with_uuid(self):
cidr = '192.168.0.0/24'
uuid = FAKEUUID
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
kwargs = {'uuid': uuid}
nets = manager.create_networks(*args, **kwargs)
self.assertEqual(1, len(nets))
net = nets[0]
self.assertEqual(uuid, net['uuid'])
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
@mock.patch('nova.objects.quotas.Quotas.rollback')
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.network.manager.NetworkManager.'
'_do_trigger_security_group_members_refresh_for_instance')
def test_fixed_ip_cleanup_rollback(self, fake_trig,
fixed_get, rollback):
manager = network_manager.NetworkManager()
fake_trig.side_effect = test.TestingException
self.assertRaises(test.TestingException,
manager.deallocate_fixed_ip,
self.context, 'fake', 'fake',
instance=fake_inst(uuid='ignoreduuid'))
rollback.assert_called_once_with()
def test_fixed_cidr_out_of_range(self):
manager = network_manager.NetworkManager()
ctxt = context.get_admin_context()
self.assertRaises(exception.AddressOutOfRange,
manager.create_networks, ctxt, label="fake",
cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25')
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.NoDBTestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance(context=self.context)
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create()
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.user_context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(netutils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': self.network.host,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance(context=self.context)
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create()
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
REQUIRES_LOCKING = True
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_binary')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id='testproject').AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id='testproject')
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.deleted = True
instance.create()
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.create()
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
self.network.create_public_dns_domain(self.context, domain1,
'testproject')
self.network.create_public_dns_domain(self.context, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[1]['domain'], domain2)
self.assertEqual(domains[0]['project'], 'testproject')
self.assertEqual(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
self.network.delete_dns_domain(self.context, domain1)
self.network.delete_dns_domain(self.context, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
mock_get.return_value = mock.sentinel.floating
self.assertEqual(mock.sentinel.floating,
self.network.get_floating_ip_by_address(
self.context,
mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
mock_get.return_value = mock.sentinel.floatings
self.assertEqual(mock.sentinel.floatings,
self.network.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context, self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
objects.FloatingIP(address='5.6.7.8')]
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.db.floating_ip_get_pools')
def test_floating_ip_pool_exists(self, floating_ip_get_pools):
floating_ip_get_pools.return_value = [{'name': 'public'}]
self.assertTrue(self.network._floating_ip_pool_exists(self.context,
'public'))
@mock.patch('nova.db.floating_ip_get_pools')
def test_floating_ip_pool_does_not_exist(self, floating_ip_get_pools):
floating_ip_get_pools.return_value = []
self.assertFalse(self.network._floating_ip_pool_exists(self.context,
'public'))
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
self.network.create_private_dns_domain(self.context, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[0]['availability_zone'], zone1)
self.network.delete_dns_domain(self.context, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.NoDBTestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.network.ldapdns.ldap',
fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
class NetworkManagerNoDBTestCase(test.NoDBTestCase):
"""Tests nova.network.manager.NetworkManager without a database."""
def setUp(self):
super(NetworkManagerNoDBTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.manager = network_manager.NetworkManager()
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_not_associated(self, mock_fip_get_by_addr):
# Tests that the method is a no-op when the fixed IP is not associated
# to an instance.
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fake_network.next_fixed_ip(1))
fip.instance_uuid = None
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_allocated(self, mock_fip_get_by_addr):
# Tests that the fixed IP is not disassociated if it's allocated.
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fake_network.next_fixed_ip(1))
fip.leased = False
fip.allocated = True
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address')
def test_release_fixed_ip_mac_matches_associated_instance(self,
mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the mac passed to
# release_fixed_ip matches the VIF which has the same instance_uuid
# as the instance associated to the FixedIP object. Also tests
# that the fixed IP is marked as not leased in the database if it was
# currently leased.
instance = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = instance.uuid
fip['leased'] = True
vif = fip['virtual_interface']
vif['instance_uuid'] = instance.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_addr.return_value = vif
with mock.patch.object(fip, 'save') as mock_fip_save:
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(
self.context, fip.address, vif.address)
mock_fip_save.assert_called_once_with()
self.assertFalse(fip.leased)
mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address',
return_value=None)
def test_release_fixed_ip_vif_not_found_for_mac(self, mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the fixed IP is marked
# as deallocated and there is no VIF found in the database for the mac
# passed in.
fip = fake_network.next_fixed_ip(1)
fip['leased'] = False
mac = fip['virtual_interface']['address']
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address, mac)
mock_vif_get_by_addr.assert_called_once_with(self.context, mac)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_no_mac(self, mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the fixed IP is
# deallocated and there is no mac address passed in (like before
# the network rpc api version bump to pass it in).
fip = fake_network.next_fixed_ip(1)
fip['leased'] = False
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address')
def test_release_fixed_ip_mac_mismatch_associated_instance(self,
mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is not disassociated when the VIF for the mac
# passed to release_fixed_ip does not have an instance_uuid that
# matches fixed_ip.instance_uuid.
old_instance = fake_instance.fake_instance_obj(self.context)
new_instance = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = new_instance.uuid
fip['leased'] = False
vif = fip['virtual_interface']
vif['instance_uuid'] = old_instance.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_addr.return_value = vif
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(
self.context, fip.address, vif.address)
mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_id')
@mock.patch.object(objects.Quotas, 'reserve')
def test_deallocate_fixed_ip_explicit_disassociate(self,
mock_quota_reserve,
mock_vif_get_by_id,
mock_fip_get_by_addr):
# Tests that we explicitly call FixedIP.disassociate when the fixed IP
# is not leased and has an associated instance (race with dnsmasq).
self.flags(force_dhcp_release=True)
fake_inst = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = fake_inst.uuid
fip['leased'] = False
vif = fip['virtual_interface']
vif['instance_uuid'] = fake_inst.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
fip.network = fake_network.fake_network_obj(self.context,
fip.network_id)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_id.return_value = vif
@mock.patch.object(self.manager,
'_do_trigger_security_group_members_refresh_for_instance')
@mock.patch.object(self.manager,
'_validate_instance_zone_for_dns_domain',
return_value=False)
@mock.patch.object(self.manager, '_teardown_network_on_host')
@mock.patch.object(fip, 'save')
@mock.patch.object(fip, 'disassociate')
def do_test(mock_disassociate, mock_fip_save,
mock_teardown_network_on_host, mock_validate_zone,
mock_trigger_secgroup_refresh):
self.assertEqual(fake_inst.uuid, fip.instance_uuid)
self.assertFalse(fip.leased)
self.manager.deallocate_fixed_ip(
self.context, fip['address'], instance=fake_inst)
mock_trigger_secgroup_refresh.assert_called_once_with(
fake_inst.uuid)
mock_teardown_network_on_host.assert_called_once_with(self.context,
fip.network)
mock_disassociate.assert_called_once_with()
do_test()
|
scripnichenko/nova
|
nova/tests/unit/network/test_manager.py
|
Python
|
apache-2.0
| 167,629
|
[
"FEFF"
] |
9a43618bd50be780cd537dd1790e051c3f2d14d76784830a9ac193a7fa3de923
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
""":mod:`MDAnalysis.analysis.legacy` --- Legacy analysis code
==========================================================
.. versionadded:: 0.16.0
The :mod:`MDAnalysis.analysis.legacy` package contains analysis
modules that are not or only incompletely tested and not regularly
maintained. They nevertheless still provide useful and sometimes
unique analysis capabilities and are therefore provided **as is**.
.. warning::
Code in this module is not regularly maintained. Please use it very
carefully.
If you want to use code from this module then you will have to import
it explicitly. For example, ::
from MDAnalysis.analysis.legacy import x3dna
(For further discussion, see `Issue 743`_.)
.. _Issue 743: https://github.com/MDAnalysis/mdanalysis/issues/743
"""
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/analysis/legacy/__init__.py
|
Python
|
gpl-2.0
| 1,837
|
[
"MDAnalysis"
] |
ba00d652dd0aa3d03202027eb0dd2dc1327f6ed5983d0a077569062fc3945873
|
# !!! This uses the https://newsapi.org/ api. TO comply with the TOU
# !!! we must link back to this site whenever we display results.
import json
import requests
import webbrowser
from colorama import Fore
from plugin import plugin, require
@require(network=True)
@plugin('news')
class News:
def __init__(self):
self.sources = [
'bloomberg',
'financial-times',
'cnbc',
'reuters',
'al-jazeera-english',
'the-wall-street-journal',
'the-huffington-post',
'business-insider',
'the-new-york-times',
'abc-news',
'fox-news',
'cnn',
'google-news',
'wired']
self.source_dict = {}
for source in self.sources:
self.source_dict[str(self.sources.index(source) + 1)] = source
def __call__(self, jarvis, s):
if s == "updatekey":
key = jarvis.input(
"Please enter your NEWS API key (q or Enter go back): ")
if key.lower() == "q" or key.lower() == "":
jarvis.say("Could not update the NEWS API key! ", Fore.RED)
else:
self.update_api_key(jarvis, key)
jarvis.say("NEWS API key successfully updated! ", Fore.GREEN)
elif s == "configure":
self.configure(jarvis)
elif s == "remove":
self.remove_source(jarvis)
elif s == "help":
jarvis.say("-------------------------------------")
jarvis.say("Command\t\t | Description")
jarvis.say("-------------------------------------")
jarvis.say("news\t\t : Finds top headlines")
jarvis.say(
"news updatekey\t : Updates the news API key of the user")
jarvis.say(
"news configure\t : Configures the news channel of the user")
jarvis.say("news sources\t : List the configured news sources")
jarvis.say(
"news remove\t : Removes a source from the news channel of the user")
jarvis.say("news [word]\t : Finds articles related to that word")
elif s == "sources":
sources = self.get_news_sources(jarvis)
if not sources:
jarvis.say(
"No sources configured. Use 'news configure' to add sources.",
Fore.RED)
else:
dic = {}
for source in sources:
dic[str(sources.index(source) + 1)] = source
for index in sorted([int(x) for x in dic.keys()]):
jarvis.say(str(index) + " : " + dic[str(index)])
elif self.get_api_key(jarvis) is None:
jarvis.say("Missing API key", Fore.RED)
jarvis.say("Visit https://newsapi.org/ to get the key", Fore.RED)
jarvis.say(
"Use \'news updatekey\' command to add a key\n",
Fore.RED)
elif s == "" or s == " ":
self.parse_articles(self.get_headlines(jarvis), jarvis)
else:
searchlist = s.split(" ")
if "" in searchlist:
searchlist.remove("")
if " " in searchlist:
searchlist.remove(" ")
self.parse_articles(self.get_news(jarvis, searchlist), jarvis)
@staticmethod
def get_api_key(jarvis):
"""
will return either the news_api key of the user, already stored in the memory.json
file or None in case the user does not have his own api
"""
return jarvis.get_data("news-settings")
def update_api_key(self, jarvis, api_key):
"""
the user might have a news api key and they might want to add to memory.json or update an old one
"""
jarvis.update_data("news-settings", api_key)
return self.get_api_key(jarvis)
def get_news_sources(self, jarvis):
"""
returns a list of all the new sources added to the news channel of the user
"""
sources = jarvis.get_data("news-sources")
if sources is None:
sources = []
return sources
def add_source(self, jarvis, news_source):
"""
adds a new source (if it does not exist) to the news channel of the user
"""
sources = self.get_news_sources(jarvis)
if news_source not in sources:
sources.append(news_source)
jarvis.update_data("news-sources", sources)
jarvis.say(
news_source
+ " has been successfully been added to your sources!",
Fore.GREEN)
else:
jarvis.say(
news_source
+ " was already included in your sources!",
Fore.GREEN)
return self.get_news_sources(jarvis)
def remove_source(self, jarvis):
"""
removes a new source from the news channel of the user
"""
sources = self.get_news_sources(jarvis)
dic = {}
for source in sources:
dic[str(sources.index(source) + 1)] = source
for index in sorted([int(x) for x in dic.keys()]):
jarvis.say(str(index) + " : " + dic[str(index)])
index_list = jarvis.input(
"Type the indexes of the sources you would like to remove from your channel separated by "
"space: ")
index_list = index_list.split(" ")
if " " in index_list:
index_list.remove(" ")
if "" in index_list:
index_list.remove("")
for index in index_list:
if str(index) in dic:
source = dic[str(index)]
sources.remove(source)
jarvis.update_data("news-sources", sources)
jarvis.say(
source
+ " has been successfully removed from your news channel!",
Fore.GREEN)
else:
jarvis.say("Index not found!", Fore.RED)
return self.get_news_sources(jarvis)
def configure(self, jarvis):
"""
configures the news channel of the user
"""
for index in sorted([int(x) for x in self.source_dict.keys()]):
jarvis.say(str(index) + ": " + self.source_dict.get(str(index)))
index_list = jarvis.input(
"Type the indexes of the sources you would like to add to your channel separated by "
"space: ")
index_list = index_list.split(" ")
if " " in index_list:
index_list.remove(" ")
if "" in index_list:
index_list.remove("")
for index in index_list:
if index in self.source_dict.keys():
self.add_source(jarvis, self.source_dict.get(index, index))
else:
jarvis.say(index + " is not a valid index", Fore.RED)
def get_headlines(self, jarvis):
"""
gets top headlines for a quick lookup of the world news, based on the news channel of the user (if it exists)
"""
sources = self.get_news_sources(jarvis)
if len(sources) == 0:
jarvis.say(
"You have not configured any source. Getting top headlines\n",
Fore.GREEN)
url = "https://newsapi.org/v2/top-headlines?country=us&apiKey=" + \
self.get_api_key(jarvis)
else:
url = "https://newsapi.org/v2/top-headlines?sources="
for source in sources:
url += source + ","
url += "&apiKey=" + self.get_api_key(jarvis)
return self._get(jarvis, url)
def get_news(self, jarvis, searchlist):
"""
gets top news based on a particular search list , based on the news channel of the user (if it exists)
"""
sources = self.get_news_sources(jarvis)
url = "https://newsapi.org/v2/everything?q="
for i in searchlist:
url += i + "%20"
if len(sources) != 0:
url += "&sources="
for source in sources:
url += source + ","
url += "&apiKey=" + self.get_api_key(jarvis)
return self._get(jarvis, url)
def _get(self, jarvis, url):
"""fetch a webpage"""
response = requests.get(url)
if response.status_code == requests.codes.ok:
data = json.loads(response.text)
return data
else:
if response.status_code == 401:
jarvis.say("API key not valid", Fore.RED)
else:
jarvis.say("An error occured: Error code: "
+ response.raise_for_status(), Fore.RED)
return None
def parse_articles(self, data, jarvis):
article_list = {}
index = 1
if data is None:
jarvis.say("No Articles", Fore.RED)
return
# jarvis.say articles with their index
if not data['articles']:
jarvis.say("No Articles matching the word(s)", Fore.RED)
return
for article in data['articles']:
jarvis.say(str(index) + ": " + article['title'])
article_list[index] = article
index += 1
# Attribution link for News API to comply with TOU
jarvis.say("\nPowered by News API. Type NewsAPI to learn more")
jarvis.say("\nType index to expand news, 0 to return to jarvis prompt\n")
# Check to see if index or NewsAPI was enterd
idx = jarvis.input()
if idx.lower() == "newsapi":
webbrowser.open('https://newsapi.org/')
return
# check if we have a valid index
try:
int(idx)
if int(idx) > (index - 1):
jarvis.say(str(idx) + " is not a valid index", Fore.RED)
return
elif int(idx) == 0:
return
except BaseException:
jarvis.say("Not a valid index", Fore.RED)
return
# if index valid jarvis.say article description
jarvis.say(article_list[int(idx)]['description'])
jarvis.say("Do you want to read more? (yes/no): ")
i = jarvis.input()
# if user wants to read more open browser to article url
if i.lower() == "yes" or i.lower() == 'y':
webbrowser.open(article_list[int(idx)]['url'])
return
|
sukeesh/Jarvis
|
jarviscli/plugins/news.py
|
Python
|
mit
| 10,494
|
[
"VisIt"
] |
d0bc37c58988c4a132db23acd03e941f24b0b425f725a93739ad929e1245247d
|
""" Summary plots of SHAP values across a whole dataset.
"""
from __future__ import division
import warnings
import numpy as np
from scipy.stats import gaussian_kde
try:
import matplotlib.pyplot as pl
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from ._labels import labels
from . import colors
# TODO: remove unused title argument / use title argument
# TODO: Add support for hclustering based explanations where we sort the leaf order by magnitude and then show the dendrogram to the left
def violin(shap_values, features=None, feature_names=None, max_display=None, plot_type="violin",
color=None, axis_color="#333333", title=None, alpha=1, show=True, sort=True,
color_bar=True, plot_size="auto", layered_violin_max_num_bins=20, class_names=None,
class_inds=None,
color_bar_label=labels["FEATURE_VALUE"],
cmap=colors.red_blue,
# depreciated
auto_size_plot=None,
use_log_scale=False):
"""Create a SHAP beeswarm plot, colored by feature values when they are provided.
Parameters
----------
shap_values : numpy.array
For single output explanations this is a matrix of SHAP values (# samples x # features).
For multi-output explanations this is a list of such matrices of SHAP values.
features : numpy.array or pandas.DataFrame or list
Matrix of feature values (# samples x # features) or a feature_names list as shorthand
feature_names : list
Names of the features (length # features)
max_display : int
How many top features to include in the plot (default is 20, or 7 for interaction plots)
plot_type : "dot" (default for single output), "bar" (default for multi-output), "violin",
or "compact_dot".
What type of summary plot to produce. Note that "compact_dot" is only used for
SHAP interaction values.
plot_size : "auto" (default), float, (float, float), or None
What size to make the plot. By default the size is auto-scaled based on the number of
features that are being displayed. Passing a single float will cause each row to be that
many inches high. Passing a pair of floats will scale the plot by that
number of inches. If None is passed then the size of the current figure will be left
unchanged.
"""
# support passing an explanation object
if str(type(shap_values)).endswith("Explanation'>"):
shap_exp = shap_values
base_value = shap_exp.expected_value
shap_values = shap_exp.values
if features is None:
features = shap_exp.data
if feature_names is None:
feature_names = shap_exp.feature_names
# if out_names is None: # TODO: waiting for slicer support of this
# out_names = shap_exp.output_names
# deprecation warnings
if auto_size_plot is not None:
warnings.warn("auto_size_plot=False is deprecated and is now ignored! Use plot_size=None instead.")
multi_class = False
if isinstance(shap_values, list):
multi_class = True
if plot_type is None:
plot_type = "bar" # default for multi-output explanations
assert plot_type == "bar", "Only plot_type = 'bar' is supported for multi-output explanations!"
else:
if plot_type is None:
plot_type = "dot" # default for single output explanations
assert len(shap_values.shape) != 1, "Summary plots need a matrix of shap_values, not a vector."
# default color:
if color is None:
if plot_type == 'layered_violin':
color = "coolwarm"
elif multi_class:
color = lambda i: colors.red_blue_circle(i/len(shap_values))
else:
color = colors.blue_rgb
idx2cat = None
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = features.columns
# feature index to category flag
idx2cat = features.dtypes.astype(str).isin(["object", "category"]).tolist()
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif (features is not None) and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
num_features = (shap_values[0].shape[1] if multi_class else shap_values.shape[1])
if features is not None:
shape_msg = "The shape of the shap_values matrix does not match the shape of the " \
"provided data matrix."
if num_features - 1 == features.shape[1]:
assert False, shape_msg + " Perhaps the extra column in the shap_values matrix is the " \
"constant offset? Of so just pass shap_values[:,:-1]."
else:
assert num_features == features.shape[1], shape_msg
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(num_features)])
if use_log_scale:
pl.xscale('symlog')
# plotting SHAP interaction values
if not multi_class and len(shap_values.shape) == 3:
if plot_type == "compact_dot":
new_shap_values = shap_values.reshape(shap_values.shape[0], -1)
new_features = np.tile(features, (1, 1, features.shape[1])).reshape(features.shape[0], -1)
new_feature_names = []
for c1 in feature_names:
for c2 in feature_names:
if c1 == c2:
new_feature_names.append(c1)
else:
new_feature_names.append(c1 + "* - " + c2)
return summary(
new_shap_values, new_features, new_feature_names,
max_display=max_display, plot_type="dot", color=color, axis_color=axis_color,
title=title, alpha=alpha, show=show, sort=sort,
color_bar=color_bar, plot_size=plot_size, class_names=class_names,
color_bar_label="*" + color_bar_label
)
if max_display is None:
max_display = 7
else:
max_display = min(len(feature_names), max_display)
sort_inds = np.argsort(-np.abs(shap_values.sum(1)).sum(0))
# get plotting limits
delta = 1.0 / (shap_values.shape[1] ** 2)
slow = np.nanpercentile(shap_values, delta)
shigh = np.nanpercentile(shap_values, 100 - delta)
v = max(abs(slow), abs(shigh))
slow = -v
shigh = v
pl.figure(figsize=(1.5 * max_display + 1, 0.8 * max_display + 1))
pl.subplot(1, max_display, 1)
proj_shap_values = shap_values[:, sort_inds[0], sort_inds]
proj_shap_values[:, 1:] *= 2 # because off diag effects are split in half
summary(
proj_shap_values, features[:, sort_inds] if features is not None else None,
feature_names=feature_names[sort_inds],
sort=False, show=False, color_bar=False,
plot_size=None,
max_display=max_display
)
pl.xlim((slow, shigh))
pl.xlabel("")
title_length_limit = 11
pl.title(shorten_text(feature_names[sort_inds[0]], title_length_limit))
for i in range(1, min(len(sort_inds), max_display)):
ind = sort_inds[i]
pl.subplot(1, max_display, i + 1)
proj_shap_values = shap_values[:, ind, sort_inds]
proj_shap_values *= 2
proj_shap_values[:, i] /= 2 # because only off diag effects are split in half
summary(
proj_shap_values, features[:, sort_inds] if features is not None else None,
sort=False,
feature_names=["" for i in range(len(feature_names))],
show=False,
color_bar=False,
plot_size=None,
max_display=max_display
)
pl.xlim((slow, shigh))
pl.xlabel("")
if i == min(len(sort_inds), max_display) // 2:
pl.xlabel(labels['INTERACTION_VALUE'])
pl.title(shorten_text(feature_names[ind], title_length_limit))
pl.tight_layout(pad=0, w_pad=0, h_pad=0.0)
pl.subplots_adjust(hspace=0, wspace=0.1)
if show:
pl.show()
return
if max_display is None:
max_display = 20
if sort:
# order features by the sum of their effect magnitudes
if multi_class:
feature_order = np.argsort(np.sum(np.mean(np.abs(shap_values), axis=1), axis=0))
else:
feature_order = np.argsort(np.sum(np.abs(shap_values), axis=0))
feature_order = feature_order[-min(max_display, len(feature_order)):]
else:
feature_order = np.flip(np.arange(min(max_display, num_features)), 0)
row_height = 0.4
if plot_size == "auto":
pl.gcf().set_size_inches(8, len(feature_order) * row_height + 1.5)
elif type(plot_size) in (list, tuple):
pl.gcf().set_size_inches(plot_size[0], plot_size[1])
elif plot_size is not None:
pl.gcf().set_size_inches(8, len(feature_order) * plot_size + 1.5)
pl.axvline(x=0, color="#999999", zorder=-1)
if plot_type == "dot":
for pos, i in enumerate(feature_order):
pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
shaps = shap_values[:, i]
values = None if features is None else features[:, i]
inds = np.arange(len(shaps))
np.random.shuffle(inds)
if values is not None:
values = values[inds]
shaps = shaps[inds]
colored_feature = True
try:
if idx2cat is not None and idx2cat[i]: # check categorical feature
colored_feature = False
else:
values = np.array(values, dtype=np.float64) # make sure this can be numeric
except:
colored_feature = False
N = len(shaps)
# hspacing = (np.max(shaps) - np.min(shaps)) / 200
# curr_bin = []
nbins = 100
quant = np.round(nbins * (shaps - np.min(shaps)) / (np.max(shaps) - np.min(shaps) + 1e-8))
inds = np.argsort(quant + np.random.randn(N) * 1e-6)
layer = 0
last_bin = -1
ys = np.zeros(N)
for ind in inds:
if quant[ind] != last_bin:
layer = 0
ys[ind] = np.ceil(layer / 2) * ((layer % 2) * 2 - 1)
layer += 1
last_bin = quant[ind]
ys *= 0.9 * (row_height / np.max(ys + 1))
if features is not None and colored_feature:
assert features.shape[0] == len(shaps), "Feature and SHAP matrices must have the same number of rows!"
# Get nan values:
nan_mask = np.isnan(values)
# Trim the value and color range to percentiles
vmin, vmax, cvals = _trim_crange(values, nan_mask)
# plot the nan values in the interaction feature as grey
pl.scatter(shaps[nan_mask], pos + ys[nan_mask], color="#777777", vmin=vmin,
vmax=vmax, s=16, alpha=alpha, linewidth=0,
zorder=3, rasterized=len(shaps) > 500)
# plot the non-nan values colored by the trimmed feature value
pl.scatter(shaps[np.invert(nan_mask)], pos + ys[np.invert(nan_mask)],
cmap=cmap, vmin=vmin, vmax=vmax, s=16,
c=cvals, alpha=alpha, linewidth=0,
zorder=3, rasterized=len(shaps) > 500)
else:
pl.scatter(shaps, pos + ys, s=16, alpha=alpha, linewidth=0, zorder=3,
color=color if colored_feature else "#777777", rasterized=len(shaps) > 500)
elif plot_type == "violin":
for pos, i in enumerate(feature_order):
pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
if features is not None:
global_low = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 1)
global_high = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 99)
for pos, i in enumerate(feature_order):
shaps = shap_values[:, i]
shap_min, shap_max = np.min(shaps), np.max(shaps)
rng = shap_max - shap_min
xs = np.linspace(np.min(shaps) - rng * 0.2, np.max(shaps) + rng * 0.2, 100)
if np.std(shaps) < (global_high - global_low) / 100:
ds = gaussian_kde(shaps + np.random.randn(len(shaps)) * (global_high - global_low) / 100)(xs)
else:
ds = gaussian_kde(shaps)(xs)
ds /= np.max(ds) * 3
values = features[:, i]
window_size = max(10, len(values) // 20)
smooth_values = np.zeros(len(xs) - 1)
sort_inds = np.argsort(shaps)
trailing_pos = 0
leading_pos = 0
running_sum = 0
back_fill = 0
for j in range(len(xs) - 1):
while leading_pos < len(shaps) and xs[j] >= shaps[sort_inds[leading_pos]]:
running_sum += values[sort_inds[leading_pos]]
leading_pos += 1
if leading_pos - trailing_pos > 20:
running_sum -= values[sort_inds[trailing_pos]]
trailing_pos += 1
if leading_pos - trailing_pos > 0:
smooth_values[j] = running_sum / (leading_pos - trailing_pos)
for k in range(back_fill):
smooth_values[j - k - 1] = smooth_values[j]
else:
back_fill += 1
# Get nan values:
nan_mask = np.isnan(values)
# Trim the value and color range to percentiles
vmin, vmax, cvals = _trim_crange(values, nan_mask)
# plot the nan values in the interaction feature as grey
pl.scatter(shaps[nan_mask], np.ones(shap_values[nan_mask].shape[0]) * pos,
color="#777777", vmin=vmin, vmax=vmax, s=9,
alpha=alpha, linewidth=0, zorder=1)
# plot the non-nan values colored by the trimmed feature value
pl.scatter(shaps[np.invert(nan_mask)], np.ones(shap_values[np.invert(nan_mask)].shape[0]) * pos,
cmap=cmap, vmin=vmin, vmax=vmax, s=9,
c=cvals, alpha=alpha, linewidth=0, zorder=1)
# smooth_values -= nxp.nanpercentile(smooth_values, 5)
# smooth_values /= np.nanpercentile(smooth_values, 95)
smooth_values -= vmin
if vmax - vmin > 0:
smooth_values /= vmax - vmin
for i in range(len(xs) - 1):
if ds[i] > 0.05 or ds[i + 1] > 0.05:
pl.fill_between([xs[i], xs[i + 1]], [pos + ds[i], pos + ds[i + 1]],
[pos - ds[i], pos - ds[i + 1]], color=colors.red_blue_no_bounds(smooth_values[i]),
zorder=2)
else:
parts = pl.violinplot(shap_values[:, feature_order], range(len(feature_order)), points=200, vert=False,
widths=0.7,
showmeans=False, showextrema=False, showmedians=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('none')
pc.set_alpha(alpha)
elif plot_type == "layered_violin": # courtesy of @kodonnell
num_x_points = 200
bins = np.linspace(0, features.shape[0], layered_violin_max_num_bins + 1).round(0).astype(
'int') # the indices of the feature data corresponding to each bin
shap_min, shap_max = np.min(shap_values), np.max(shap_values)
x_points = np.linspace(shap_min, shap_max, num_x_points)
# loop through each feature and plot:
for pos, ind in enumerate(feature_order):
# decide how to handle: if #unique < layered_violin_max_num_bins then split by unique value, otherwise use bins/percentiles.
# to keep simpler code, in the case of uniques, we just adjust the bins to align with the unique counts.
feature = features[:, ind]
unique, counts = np.unique(feature, return_counts=True)
if unique.shape[0] <= layered_violin_max_num_bins:
order = np.argsort(unique)
thesebins = np.cumsum(counts[order])
thesebins = np.insert(thesebins, 0, 0)
else:
thesebins = bins
nbins = thesebins.shape[0] - 1
# order the feature data so we can apply percentiling
order = np.argsort(feature)
# x axis is located at y0 = pos, with pos being there for offset
y0 = np.ones(num_x_points) * pos
# calculate kdes:
ys = np.zeros((nbins, num_x_points))
for i in range(nbins):
# get shap values in this bin:
shaps = shap_values[order[thesebins[i]:thesebins[i + 1]], ind]
# if there's only one element, then we can't
if shaps.shape[0] == 1:
warnings.warn(
"not enough data in bin #%d for feature %s, so it'll be ignored. Try increasing the number of records to plot."
% (i, feature_names[ind]))
# to ignore it, just set it to the previous y-values (so the area between them will be zero). Not ys is already 0, so there's
# nothing to do if i == 0
if i > 0:
ys[i, :] = ys[i - 1, :]
continue
# save kde of them: note that we add a tiny bit of gaussian noise to avoid singular matrix errors
ys[i, :] = gaussian_kde(shaps + np.random.normal(loc=0, scale=0.001, size=shaps.shape[0]))(x_points)
# scale it up so that the 'size' of each y represents the size of the bin. For continuous data this will
# do nothing, but when we've gone with the unqique option, this will matter - e.g. if 99% are male and 1%
# female, we want the 1% to appear a lot smaller.
size = thesebins[i + 1] - thesebins[i]
bin_size_if_even = features.shape[0] / nbins
relative_bin_size = size / bin_size_if_even
ys[i, :] *= relative_bin_size
# now plot 'em. We don't plot the individual strips, as this can leave whitespace between them.
# instead, we plot the full kde, then remove outer strip and plot over it, etc., to ensure no
# whitespace
ys = np.cumsum(ys, axis=0)
width = 0.8
scale = ys.max() * 2 / width # 2 is here as we plot both sides of x axis
for i in range(nbins - 1, -1, -1):
y = ys[i, :] / scale
c = pl.get_cmap(color)(i / (
nbins - 1)) if color in pl.cm.datad else color # if color is a cmap, use it, otherwise use a color
pl.fill_between(x_points, pos - y, pos + y, facecolor=c)
pl.xlim(shap_min, shap_max)
elif not multi_class and plot_type == "bar":
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
global_shap_values = np.abs(shap_values).mean(0)
pl.barh(y_pos, global_shap_values[feature_inds], 0.7, align='center', color=color)
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
elif multi_class and plot_type == "bar":
if class_names is None:
class_names = ["Class "+str(i) for i in range(len(shap_values))]
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
left_pos = np.zeros(len(feature_inds))
if class_inds is None:
class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])
elif class_inds == "original":
class_inds = range(len(shap_values))
for i, ind in enumerate(class_inds):
global_shap_values = np.abs(shap_values[ind]).mean(0)
pl.barh(
y_pos, global_shap_values[feature_inds], 0.7, left=left_pos, align='center',
color=color(i), label=class_names[ind]
)
left_pos += global_shap_values[feature_inds]
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
pl.legend(frameon=False, fontsize=12)
# draw the color bar
if color_bar and features is not None and plot_type != "bar" and \
(plot_type != "layered_violin" or color in pl.cm.datad):
import matplotlib.cm as cm
m = cm.ScalarMappable(cmap=cmap if plot_type != "layered_violin" else pl.get_cmap(color))
m.set_array([0, 1])
cb = pl.colorbar(m, ticks=[0, 1], aspect=1000)
cb.set_ticklabels([labels['FEATURE_VALUE_LOW'], labels['FEATURE_VALUE_HIGH']])
cb.set_label(color_bar_label, size=12, labelpad=0)
cb.ax.tick_params(labelsize=11, length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.9) * 20)
# cb.draw_all()
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().spines['left'].set_visible(False)
pl.gca().tick_params(color=axis_color, labelcolor=axis_color)
pl.yticks(range(len(feature_order)), [feature_names[i] for i in feature_order], fontsize=13)
if plot_type != "bar":
pl.gca().tick_params('y', length=20, width=0.5, which='major')
pl.gca().tick_params('x', labelsize=11)
pl.ylim(-1, len(feature_order))
if plot_type == "bar":
pl.xlabel(labels['GLOBAL_VALUE'], fontsize=13)
else:
pl.xlabel(labels['VALUE'], fontsize=13)
if show:
pl.show()
def _trim_crange(values, nan_mask):
"""Trim the color range, but prevent the color range from collapsing."""
# Get vmin and vmax as 5. and 95. percentiles
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax: # if percentile range is equal, take 1./99. perc.
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax: # if still equal, use min/max
vmin = np.min(values)
vmax = np.max(values)
if vmin > vmax: # fixes rare numerical precision issues
vmin = vmax
# Get color values depnding on value range
cvals = values[np.invert(nan_mask)].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0
cvals[cvals_imp > vmax] = vmax
cvals[cvals_imp < vmin] = vmin
return vmin, vmax, cvals
def shorten_text(text, length_limit):
if len(text) > length_limit:
return text[:length_limit - 3] + "..."
else:
return text
|
slundberg/shap
|
shap/plots/_violin.py
|
Python
|
mit
| 23,938
|
[
"Gaussian"
] |
e50a9b331d35215f602a0fce7ccbf405b99d6a2bf9c9b6cff668d18e9b1887b0
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from customdelegate import CustomDelegate, DocumentationMetaclass, ValueLoading
from camelot.view.controls import editors
from camelot.core.utils import variant_to_pyobject
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
class DateTimeDelegate(CustomDelegate):
__metaclass__ = DocumentationMetaclass
editor = editors.DateTimeEditor
def __init__(self, parent=None, editable=True, **kwargs):
CustomDelegate.__init__(self, parent, editable=editable, **kwargs)
locale = QtCore.QLocale()
self.datetime_format = locale.dateTimeFormat(locale.ShortFormat)
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
value = variant_to_pyobject( index.model().data( index, Qt.EditRole ) )
value_str = u''
if value not in (None, ValueLoading):
date_time = QtCore.QDateTime(
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second
)
value_str = date_time.toString(self.datetime_format)
self.paint_text(painter, option, index, value_str, horizontal_align=Qt.AlignRight)
painter.restore()
|
kurtraschke/camelot
|
camelot/view/controls/delegates/datetimedelegate.py
|
Python
|
gpl-2.0
| 2,386
|
[
"VisIt"
] |
49083c3d5f8ee815747f3f1083d6fc6346d4d73f62bf567036d79aef5d39cb1e
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image Transformer model with model and data parallelism using MTF.
Integration of Mesh tensorflow with Image Transformer to do model parallelism.
Currently, this supports unconditional image generation. Specify a particular
architecture layout in the hparams that specifies how different dimensions are
split or replicated along the mesh dimensions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.mesh_tensorflow import mesh_tensorflow as mtf
from tensor2tensor.mesh_tensorflow import mtf_layers
from tensor2tensor.mesh_tensorflow import mtf_model
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_model
class MtfImageTransformer(mtf_model.MtfModel):
"""Image Transformer in mesh_tensorflow."""
@property
def inputs_vocab_dim(self):
assert self.has_input
return mtf.Dimension("inputs_vocab", self._hparams.num_classes)
@property
def targets_vocab_dim(self):
return mtf.Dimension(
"vocab", self._problem_hparams.target_modality._vocab_size) # pylint: disable=protected-access
@property
def outputs_vocab_dim(self):
return mtf.Dimension("output_vocab", 256)
@property
def rows_dim(self):
return mtf.Dimension("rows", self._hparams.img_len)
@property
def cols_dim(self):
return mtf.Dimension(
"cols", self._hparams.img_len*self._hparams.num_channels)
@property
def orig_cols_dim(self):
return mtf.Dimension("orig_cols", self._hparams.img_len)
@property
def channels_dim(self):
return mtf.Dimension("channels", self._hparams.num_channels)
@property
def model_dim(self):
return mtf.Dimension("d_model", self._hparams.hidden_size)
@property
def max_length_dim(self):
return mtf.Dimension("max_length", self._hparams.max_length)
@property
def length_dim(self):
return mtf.Dimension("length", self._hparams.max_length)
@property
def heads_dim(self):
return mtf.Dimension("heads", self._hparams.num_heads)
@property
def kv_dim(self):
return mtf.Dimension("d_kv", self._hparams.d_kv)
@property
def feedforward_dim(self):
return mtf.Dimension("d_ff", self._hparams.d_ff)
@property
def activation_type(self):
hparams = self._hparams
if hparams.activation_dtype == "float32":
activation_dtype = tf.float32
elif hparams.activation_dtype == "float16":
activation_dtype = tf.float16
elif hparams.activation_dtype == "bfloat16":
activation_dtype = tf.bfloat16
else:
raise ValueError(
"unknown hparams.activation_dtype %s" % hparams.activation_dtype)
return activation_dtype
def create_positional_emb_2d(self, targets):
"""Learned 2d positional embedding for images."""
mesh = targets.mesh
positional_emb_rows_var = mtf.get_variable(
mesh, "positional_emb_rows",
mtf.Shape([self.max_length_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=self.activation_type)
positional_emb_cols_var = mtf.get_variable(
mesh, "positional_emb_cols",
mtf.Shape([self.max_length_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=self.activation_type)
targets_position_x = mtf.range(mesh, self.rows_dim, dtype=tf.int32)
targets_position_y = mtf.range(mesh, self.cols_dim, dtype=tf.int32)
position_x = mtf.broadcast(
mtf.gather(positional_emb_rows_var, targets_position_x,
self.max_length_dim),
mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim]))
position_y = mtf.broadcast(
mtf.gather(positional_emb_cols_var, targets_position_y,
self.max_length_dim),
mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim]))
return position_x + position_y
def mtf_model_fn(self, features, mesh):
features = copy.copy(features)
tf.logging.info("features = %s" % features)
hparams = self._hparams
activation_dtype = self.activation_type
# We assume fixed vocab size for targets
targets = tf.to_int32(features["targets"])
# Image preprocessing, reshape into a 1D sequence and shift right.
length = hparams.img_len*hparams.img_len*hparams.num_channels
targets = tf.reshape(targets, [hparams.batch_size, length])
shifted_targets = common_layers.shift_right_2d(targets)
# Declare all the dimensions
batch_dim = mtf.Dimension("batch", hparams.batch_size)
def import_to_batch_by_length(x, name):
return mtf.import_tf_tensor(
mesh, x, mtf.Shape([batch_dim, self.length_dim]), name=name)
def layer_prepostprocess_dropout(x):
return mtf.dropout(
x, keep_prob=1.0 - hparams.layer_prepostprocess_dropout,
noise_shape=mtf.Shape([batch_dim, self.model_dim]))
targets = import_to_batch_by_length(targets, "targets")
shifted_targets = import_to_batch_by_length(
shifted_targets, "shifted_targets")
extra_losses = []
# Create targets content and position embeddings.
# Create embedding var for targets and positions and do a gather.
targets_embedding_var = mtf.get_variable(
mesh, "targets_embedding",
mtf.Shape([self.targets_vocab_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=activation_dtype)
x = mtf.gather(targets_embedding_var,
shifted_targets, self.targets_vocab_dim)
# Add positional embeddings
x += mtf.reshape(self.create_positional_emb_2d(targets),
[self.length_dim, self.model_dim])
# If conditional and input is given, add the input embedding to the target.
# TODO(nikip): Verify conditional.
if self.has_input and not hparams.unconditional:
inputs = tf.squeeze(tf.to_int32(features["inputs"]), [2, 3])
inputs = import_to_batch_by_length(inputs, "inputs")
# Input embeddings
inputs_embedding_var = mtf_layers.embedding(
mesh, "input_embedding",
mtf.Shape([self.inputs_vocab_dim, self.model_dim]),
activation_dtype=activation_dtype)
inputs_emb = mtf.gather(
inputs_embedding_var, inputs, self.inputs_vocab_dim)
x += inputs_emb
# Image Transformer Decoder
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf_layers.masked_local_attention_1d(
mtf_layers.layer_norm(x, self.model_dim, name="layer_norm_att"),
None,
self.kv_dim,
self.heads_dim,
block_length=hparams.block_length,
name="self_att"))
# ffn layer
x += layer_prepostprocess_dropout(mtf_layers.dense_relu_dense(
mtf_layers.layer_norm(x, self.model_dim, name="layer_norm_ffn"),
self.feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[self.length_dim]))
x = mtf_layers.layer_norm(x, self.model_dim, name="final_layer_norm")
# Calculate the logits and loss.
logits = mtf_layers.dense(x, self.outputs_vocab_dim, name="logits")
soft_targets = mtf.one_hot(
targets, self.outputs_vocab_dim, dtype=activation_dtype)
loss = mtf_layers.softmax_cross_entropy_with_logits(
logits, soft_targets, self.outputs_vocab_dim)
loss = mtf.reduce_mean(loss)
for l in extra_losses:
loss += l
# Reshape logits to original target shape.
logits = mtf.reshape(
logits,
mtf.Shape([batch_dim, self.rows_dim, self.orig_cols_dim,
self.channels_dim, self.outputs_vocab_dim]))
return logits, loss
@registry.register_hparams
def mtf_image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 1
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "batch:8")
hparams.add_hparam("layout", "batch:batch")
hparams.add_hparam("mtf_mode", True)
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("filter_size", 1024)
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 6)
hparams.add_hparam("attention_key_size", 256)
hparams.add_hparam("attention_value_size", 256)
# Share weights between input and target embeddings
hparams.shared_embedding = True
# mixture of experts hparams
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 64)
hparams.add_hparam("d_ff", 2048)
# Image related hparams
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("unconditional", True)
hparams.add_hparam("block_length", 128)
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny():
"""Catch bugs locally..."""
hparams = mtf_image_transformer_base()
hparams.hidden_size = 128
hparams.d_ff = 256
hparams.batch_size = 4
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 4
hparams.num_heads = 4
hparams.attention_key_size = 128
hparams.attention_value_size = 128
hparams.block_length = 32
# data parallelism and model-parallelism
hparams.mesh_shape = "batch:2"
hparams.layout = "batch:batch"
return hparams
@registry.register_hparams
def mtf_image_transformer_single():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = ""
hparams.layout = ""
hparams.hidden_size = 32
hparams.filter_size = 32
hparams.batch_size = 1
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 1
hparams.num_heads = 2
hparams.attention_key_size = 32
hparams.attention_value_size = 32
hparams.block_length = 16
return hparams
@registry.register_hparams
def mtf_image_transformer_base_single():
"""Small single parameters."""
hparams = mtf_image_transformer_base()
hparams.num_decoder_layers = 6
hparams.filter_size = 256
hparams.block_length = 128
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
@registry.register_hparams
def mtf_image_transformer_base_cifar():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base()
hparams.mesh_shape = "batch:8"
hparams.layout = "batch:batch"
hparams.learning_rate_decay_steps = 13600 # one epoch
hparams.batch_size = 32
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.d_ff = 2048
hparams.learning_rate = 0.5
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
@registry.register_hparams
def mtf_image_transformer_cifar_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
return hparams
@registry.register_hparams
def mtf_image_transformer_cifar_mp_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "model:4;batch:8"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 32
hparams.num_heads = 8
hparams.d_ff = 8192
return hparams
@registry.register_hparams
def mtf_image_transformer_base_imagenet():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
hparams.d_ff = 2048
hparams.hidden_size = 512
hparams.num_decoder_layers = 12
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 31250
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.unconditional = True
return hparams
@registry.register_hparams
def mtf_image_transformer_base_imagenet_mp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:4;batch:8"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 32
hparams.num_heads = 8
hparams.d_ff = 8192
hparams.learning_rate_warmup_steps = 31250
hparams.unconditional = True
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny_moe():
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = "all:4"
hparams.layout = "batch:all,experts:all"
hparams.ffn_layer = "moe"
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny_8gpu():
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = "all:8"
hparams.layout = "vocab:all;filter_size:all;heads:all"
return hparams
@registry.register_hparams
def mtf_image_transformer_length_sharded():
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = "all:2"
hparams.layout = "length:all"
return hparams
|
vthorsteinsson/tensor2tensor
|
tensor2tensor/mesh_tensorflow/mtf_image_transformer.py
|
Python
|
apache-2.0
| 14,371
|
[
"MOE"
] |
6b52b8ff8a973e809ca8f9178d3e26455d50524650dc4f05bee87e0f7caf807f
|
"""Support for Ecobee binary sensors."""
from homeassistant.components.binary_sensor import (
BinarySensorDevice,
DEVICE_CLASS_OCCUPANCY,
)
from .const import DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER, _LOGGER
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up ecobee binary sensors."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up ecobee binary (occupancy) sensors."""
data = hass.data[DOMAIN]
dev = list()
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor["capability"]:
if item["type"] != "occupancy":
continue
dev.append(EcobeeBinarySensor(data, sensor["name"], index))
async_add_entities(dev, True)
class EcobeeBinarySensor(BinarySensorDevice):
"""Representation of an Ecobee sensor."""
def __init__(self, data, sensor_name, sensor_index):
"""Initialize the Ecobee sensor."""
self.data = data
self._name = sensor_name + " Occupancy"
self.sensor_name = sensor_name
self.index = sensor_index
self._state = None
@property
def name(self):
"""Return the name of the Ecobee sensor."""
return self._name.rstrip()
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] == self.sensor_name:
if "code" in sensor:
return f"{sensor['code']}-{self.device_class}"
thermostat = self.data.ecobee.get_thermostat(self.index)
return f"{thermostat['identifier']}-{sensor['id']}-{self.device_class}"
@property
def device_info(self):
"""Return device information for this sensor."""
identifier = None
model = None
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
if "code" in sensor:
identifier = sensor["code"]
model = "ecobee Room Sensor"
else:
thermostat = self.data.ecobee.get_thermostat(self.index)
identifier = thermostat["identifier"]
try:
model = (
f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
)
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/home-assistant/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
break
if identifier is not None and model is not None:
return {
"identifiers": {(DOMAIN, identifier)},
"name": self.sensor_name,
"manufacturer": MANUFACTURER,
"model": model,
}
return None
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state == "true"
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return DEVICE_CLASS_OCCUPANCY
async def async_update(self):
"""Get the latest state of the sensor."""
await self.data.update()
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
for item in sensor["capability"]:
if item["type"] != "occupancy":
continue
self._state = item["value"]
break
|
joopert/home-assistant
|
homeassistant/components/ecobee/binary_sensor.py
|
Python
|
apache-2.0
| 4,119
|
[
"VisIt"
] |
95fc81af5ea087653234c8c329ea158b41b7eb8fd53312a503c04383755bd3a5
|
#!/usr/bin/env python
"""
@package npt2
*** This code is for the new CUDA platform! ***
NPT simulation in OpenMM. Runs a simulation to compute bulk properties
(for example, the density or the enthalpy of vaporization) and compute the
derivative with respect to changing the force field parameters.
The basic idea is this: First we run a density simulation to determine
the average density. This quantity of course has some uncertainty,
and in general we want to avoid evaluating finite-difference
derivatives of noisy quantities. The key is to realize that the
densities are sampled from a Boltzmann distribution, so the analytic
derivative can be computed if the potential energy derivative is
accessible. We compute the potential energy derivative using
finite-difference of snapshot energies and apply a simple formula to
compute the density derivative.
The enthalpy of vaporization should come just as easily.
This script borrows from John Chodera's ideal gas simulation in PyOpenMM.
References
[1] Shirts MR, Mobley DL, Chodera JD, and Pande VS. Accurate and efficient corrections for
missing dispersion interactions in molecular simulations. JPC B 111:13052, 2007.
[2] Ahn S and Fessler JA. Standard errors of mean, variance, and standard deviation estimators.
Technical Report, EECS Department, The University of Michigan, 2003.
Copyright And License
@author Lee-Ping Wang <leeping@stanford.edu>
@author John D. Chodera <jchodera@gmail.com>
All code in this repository is released under the GNU General Public License.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but without any
warranty; without even the implied warranty of merchantability or fitness for a
particular purpose. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
#================#
# Global Imports #
#================#
import os
import sys
import numpy as np
from simtk.unit import *
from simtk.openmm import *
from simtk.openmm.app import *
from forcebalance.forcefield import FF
from forcebalance.nifty import col, flat, lp_dump, lp_load, printcool, printcool_dictionary
from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, f1d7p
from forcebalance.molecule import Molecule
from forcebalance.openmmio import *
import argparse
#======================================================#
# Global, user-tunable variables (simulation settings) #
#======================================================#
parser = argparse.ArgumentParser()
parser.add_argument('liquid_pdbfile', help='PDB File for the liquid')
parser.add_argument('openmm_xmlfile', help='OpenMM Parameter XML File for the liquid, contained within forcebalance.p')
parser.add_argument('liquid_prod_steps', type=int, help='Number of time steps for the liquid production simulation')
parser.add_argument('liquid_timestep', type=float, help='Length of the time step for the liquid simulation, in femtoseconds')
parser.add_argument('liquid_interval', type=float, help='Time interval for saving the liquid coordinates, in picoseconds')
parser.add_argument('temperature',type=float, help='Temperature (K)')
parser.add_argument('pressure',type=float, help='Pressure (Atm)')
# Other optional arguments
parser.add_argument('--liquid_equ_steps', type=int, help='Number of time steps used for equilibration', default=100000)
parser.add_argument('--gas_pdbfile', help='TINKER .xyz file for the gas', type=str, default="mono.pdb")
parser.add_argument('--gas_equ_steps', type=int, help='Number of time steps for the gas-phase production simulation', default=100000)
parser.add_argument('--gas_prod_steps', type=int, help='Number of time steps for the gas-phase production simulation', default=1000000)
parser.add_argument('--gas_timestep', type=float, help='Length of the time step for the gas-phase simulation, in femtoseconds', default=0.5)
parser.add_argument('--gas_interval', type=float, help='Time interval for saving the gas-phase coordinates, in picoseconds', default=0.1)
parser.add_argument('--force_cuda', action='store_true', help='Crash immediately if CUDA platform is not available')
args = parser.parse_args()
# The convention of using the "iteration" as a fundamental unit comes from the OpenMM script.
timestep = args.liquid_timestep * femtosecond # timestep for integration in femtosecond
faststep = 0.25 * femtosecond # "fast" timestep (for MTS integrator, if used)
nsteps = int(1000 * args.liquid_interval / args.liquid_timestep) # Number of time steps per interval (or "iteration") for saving coordinates (in steps)
nequiliterations = args.liquid_equ_steps / nsteps # Number of iterations set aside for equilibration
niterations = args.liquid_prod_steps / nsteps # Number of production iterations
print timestep
print faststep
print nsteps
print nequiliterations
print niterations
print "I will perform %i iterations of %i x %.3f fs time steps each" % (niterations, nsteps, args.liquid_timestep)
# Simulation settings for the monomer.
m_timestep = args.gas_timestep * femtosecond
m_nsteps = int(1000 * args.gas_interval / args.gas_timestep)
m_nequiliterations = args.gas_equ_steps / m_nsteps
m_niterations = args.gas_prod_steps / m_nsteps
temperature = args.temperature * kelvin # temperature in kelvin
pressure = args.pressure * atmospheres # pressure in atmospheres
collision_frequency = 1.0 / picosecond # Langevin barostat friction / random force parameter
barostat_frequency = 10 # number of steps between MC volume adjustments
# Flag to set verbose debug output
verbose = True
amoeba_mutual_kwargs = {'nonbondedMethod' : PME, 'nonbondedCutoff' : 0.7*nanometer,
'constraints' : None, 'rigidWater' : False, 'vdwCutoff' : 0.85,
'aEwald' : 5.4459052, 'pmeGridDimensions' : [24,24,24],
'mutualInducedTargetEpsilon' : 1e-6, 'useDispersionCorrection' : True}
amoeba_direct_kwargs = {'nonbondedMethod' : PME, 'nonbondedCutoff' : 0.7*nanometer,
'constraints' : None, 'rigidWater' : False, 'vdwCutoff' : 0.85,
'aEwald' : 5.4459052, 'pmeGridDimensions' : [24,24,24],
'polarization' : 'direct', 'useDispersionCorrection' : True}
amoeba_nonpol_kwargs = {'nonbondedMethod' : PME, 'nonbondedCutoff' : 0.7*nanometer,
'constraints' : None, 'rigidWater' : False, 'vdwCutoff' : 0.85,
'aEwald' : 5.4459052, 'pmeGridDimensions' : [24,24,24],
'useDispersionCorrection' : True}
tip3p_kwargs = {'nonbondedMethod' : PME, 'nonbondedCutoff' : 0.85*nanometer,
'vdwCutoff' : 0.9, 'aEwald' : 5.4459052, 'pmeGridDimensions' : [24,24,24], 'useDispersionCorrection' : True}
mono_tip3p_kwargs = {'nonbondedMethod' : NoCutoff}
mono_direct_kwargs = {'nonbondedMethod' : NoCutoff, 'constraints' : None,
'rigidWater' : False, 'polarization' : 'direct'}
mono_mutual_kwargs = {'nonbondedMethod' : NoCutoff, 'constraints' : None,
'rigidWater' : False, 'mutualInducedTargetEpsilon' : 1e-6}
mono_nonpol_kwargs = {'nonbondedMethod' : NoCutoff, 'constraints' : None,
'rigidWater' : False}
def generateMaxwellBoltzmannVelocities(system, temperature):
""" Generate velocities from a Maxwell-Boltzmann distribution. """
# Get number of atoms
natoms = system.getNumParticles()
# Create storage for velocities.
velocities = Quantity(np.zeros([natoms, 3], np.float32), nanometer / picosecond) # velocities[i,k] is the kth component of the velocity of atom i
# Compute thermal energy and inverse temperature from specified temperature.
kB = BOLTZMANN_CONSTANT_kB * AVOGADRO_CONSTANT_NA
kT = kB * temperature # thermal energy
beta = 1.0 / kT # inverse temperature
# Assign velocities from the Maxwell-Boltzmann distribution.
for atom_index in range(natoms):
mass = system.getParticleMass(atom_index) # atomic mass
sigma = sqrt(kT / mass) # standard deviation of velocity distribution for each coordinate for this atom
for k in range(3):
velocities[atom_index,k] = sigma * np.random.normal()
return velocities
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3):
"""
Compute the (cross) statistical inefficiency of (two) timeseries.
Notes
The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
The fast method described in Ref [1] is used to compute g.
References
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
Examples
Compute statistical inefficiency of timeseries data with known correlation time.
>>> import timeseries
>>> A_n = timeseries.generateCorrelatedTimeseries(N=100000, tau=5.0)
>>> g = statisticalInefficiency(A_n, fast=True)
@param[in] A_n (required, numpy array) - A_n[n] is nth value of
timeseries A. Length is deduced from vector.
@param[in] B_n (optional, numpy array) - B_n[n] is nth value of
timeseries B. Length is deduced from vector. If supplied, the
cross-correlation of timeseries A and B will be estimated instead of
the autocorrelation of timeseries A.
@param[in] fast (optional, boolean) - if True, will use faster (but
less accurate) method to estimate correlation time, described in
Ref. [1] (default: False)
@param[in] mintime (optional, int) - minimum amount of correlation
function to compute (default: 3) The algorithm terminates after
computing the correlation time out to mintime when the correlation
function furst goes negative. Note that this time may need to be
increased if there is a strong initial negative peak in the
correlation function.
@return g The estimated statistical inefficiency (equal to 1 + 2
tau, where tau is the correlation time). We enforce g >= 1.0.
"""
# Create numpy copies of input arguments.
A_n = np.array(A_n)
if B_n is not None:
B_n = np.array(B_n)
else:
B_n = np.array(A_n)
# Get the length of the timeseries.
N = A_n.size
# Be sure A_n and B_n have the same dimensions.
if(A_n.shape != B_n.shape):
raise ParameterError('A_n and B_n must have same dimensions.')
# Initialize statistical inefficiency estimate with uncorrelated value.
g = 1.0
# Compute mean of each timeseries.
mu_A = A_n.mean()
mu_B = B_n.mean()
# Make temporary copies of fluctuation from mean.
dA_n = A_n.astype(np.float64) - mu_A
dB_n = B_n.astype(np.float64) - mu_B
# Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1.
sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
# Trap the case where this covariance is zero, and we cannot proceed.
if(sigma2_AB == 0):
print 'Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency'
return 1.0
# Accumulate the integrated correlation time by computing the normalized correlation time at
# increasing values of t. Stop accumulating if the correlation function goes negative, since
# this is unlikely to occur unless the correlation function has decayed to the point where it
# is dominated by noise and indistinguishable from zero.
t = 1
increment = 1
while (t < N-1):
# compute normalized fluctuation correlation function at time t
C = sum( dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)]*dA_n[t:N] ) / (2.0 * float(N-t) * sigma2_AB)
# Terminate if the correlation function has crossed zero and we've computed the correlation
# function at least out to 'mintime'.
if (C <= 0.0) and (t > mintime):
break
# Accumulate contribution to the statistical inefficiency.
g += 2.0 * C * (1.0 - float(t)/float(N)) * float(increment)
# Increment t and the amount by which we increment t.
t += increment
# Increase the interval if "fast mode" is on.
if fast: increment += 1
# g must be at least unity
if (g < 1.0): g = 1.0
# Return the computed statistical inefficiency.
return g
def compute_volume(box_vectors):
""" Compute the total volume of an OpenMM system. """
[a,b,c] = box_vectors
A = np.array([a/a.unit, b/a.unit, c/a.unit])
# Compute volume of parallelepiped.
volume = np.linalg.det(A) * a.unit**3
return volume
def compute_mass(system):
""" Compute the total mass of an OpenMM system. """
mass = 0.0 * amu
for i in range(system.getNumParticles()):
mass += system.getParticleMass(i)
return mass
def get_dipole(simulation,q=None,positions=None):
# Return the current dipole moment in Debye.
# Note that this quantity is meaningless if the system carries a net charge.
dx = 0.0
dy = 0.0
dz = 0.0
enm_debye = 48.03204255928332 # Conversion factor from e*nm to Debye
for i in simulation.system.getForces():
if i.__class__.__name__ == "AmoebaMultipoleForce":
mm = i.getSystemMultipoleMoments(simulation.context)
dx += mm[1]
dy += mm[2]
dz += mm[3]
if i.__class__.__name__ == "NonbondedForce":
# Get array of charges.
if q == None:
q = np.array([i.getParticleParameters(j)[0]._value for j in range(i.getNumParticles())])
# Get array of positions in nanometers.
if positions == None:
positions = simulation.context.getState(getPositions=True).getPositions()
x = np.array([j._value for j in positions])
# Multiply charges by positions to get dipole moment.
dip = enm_debye * np.sum(x*q.reshape(-1,1),axis=0)
dx += dip[0]
dy += dip[1]
dz += dip[2]
return [dx,dy,dz]
def MTSVVVRIntegrator(temperature, collision_rate, timestep, system, ninnersteps=4):
"""
Create a multiple timestep velocity verlet with velocity randomization (VVVR) integrator.
ARGUMENTS
temperature (numpy.unit.Quantity compatible with kelvin) - the temperature
collision_rate (numpy.unit.Quantity compatible with 1/picoseconds) - the collision rate
timestep (numpy.unit.Quantity compatible with femtoseconds) - the integration timestep
system (simtk.openmm.System) - system whose forces will be partitioned
ninnersteps (int) - number of inner timesteps (default: 4)
RETURNS
integrator (openmm.CustomIntegrator) - a VVVR integrator
NOTES
This integrator is equivalent to a Langevin integrator in the velocity Verlet discretization with a
timestep correction to ensure that the field-free diffusion constant is timestep invariant. The inner
velocity Verlet discretization is transformed into a multiple timestep algorithm.
REFERENCES
VVVR Langevin integrator:
* http://arxiv.org/abs/1301.3800
* http://arxiv.org/abs/1107.2967 (to appear in PRX 2013)
TODO
Move initialization of 'sigma' to setting the per-particle variables.
"""
# Multiple timestep Langevin integrator.
for i in system.getForces():
if i.__class__.__name__ in ["NonbondedForce", "CustomNonbondedForce", "AmoebaVdwForce", "AmoebaMultipoleForce"]:
# Slow force.
print i.__class__.__name__, "is a Slow Force"
i.setForceGroup(1)
else:
print i.__class__.__name__, "is a Fast Force"
# Fast force.
i.setForceGroup(0)
kB = BOLTZMANN_CONSTANT_kB * AVOGADRO_CONSTANT_NA
kT = kB * temperature
integrator = openmm.CustomIntegrator(timestep)
integrator.addGlobalVariable("dt_fast", timestep/float(ninnersteps)) # fast inner timestep
integrator.addGlobalVariable("kT", kT) # thermal energy
integrator.addGlobalVariable("a", numpy.exp(-collision_rate*timestep)) # velocity mixing parameter
integrator.addGlobalVariable("b", numpy.sqrt((2/(collision_rate*timestep)) * numpy.tanh(collision_rate*timestep/2))) # timestep correction parameter
integrator.addPerDofVariable("sigma", 0)
integrator.addPerDofVariable("x1", 0) # position before application of constraints
#
# Pre-computation.
# This only needs to be done once, but it needs to be done for each degree of freedom.
# Could move this to initialization?
#
integrator.addComputePerDof("sigma", "sqrt(kT/m)")
#
# Velocity perturbation.
#
integrator.addComputePerDof("v", "sqrt(a)*v + sqrt(1-a)*sigma*gaussian")
integrator.addConstrainVelocities();
#
# Symplectic inner multiple timestep.
#
integrator.addUpdateContextState();
integrator.addComputePerDof("v", "v + 0.5*b*dt*f1/m")
for innerstep in range(ninnersteps):
# Fast inner symplectic timestep.
integrator.addComputePerDof("v", "v + 0.5*b*dt_fast*f0/m")
integrator.addComputePerDof("x", "x + v*b*dt_fast")
integrator.addComputePerDof("x1", "x")
integrator.addConstrainPositions();
integrator.addComputePerDof("v", "v + 0.5*b*dt_fast*f0/m + (x-x1)/dt_fast")
integrator.addComputePerDof("v", "v + 0.5*b*dt*f1/m") # TODO: Additional velocity constraint correction?
integrator.addConstrainVelocities();
#
# Velocity randomization
#
integrator.addComputePerDof("v", "sqrt(a)*v + sqrt(1-a)*sigma*gaussian")
integrator.addConstrainVelocities();
return integrator
def create_simulation_object(pdb, settings, pbc=True, precision="single"):
#================================#
# Create the simulation platform #
#================================#
# Name of the simulation platform (Reference, Cuda, OpenCL)
try:
PlatName = 'CUDA'
print "Setting Platform to", PlatName
platform = Platform.getPlatformByName(PlatName)
# Set the device to the environment variable or zero otherwise
device = os.environ.get('CUDA_DEVICE',"0")
print "Setting Device to", device
platform.setPropertyDefaultValue("CudaDeviceIndex", device)
# Setting CUDA precision to double appears to improve performance of derivatives.
platform.setPropertyDefaultValue("CudaPrecision", precision)
platform.setPropertyDefaultValue("OpenCLDeviceIndex", device)
except:
if args.force_cuda:
raise Exception('Force CUDA option is enabled but CUDA platform not available')
PlatName = "Reference"
print "Setting Platform to", PlatName
platform = Platform.getPlatformByName(PlatName)
# Create the test system.
forcefield = ForceField(sys.argv[2])
system = forcefield.createSystem(pdb.topology, **settings)
if pbc:
barostat = MonteCarloBarostat(pressure, temperature, barostat_frequency)
# Add barostat.
system.addForce(barostat)
# Create integrator.
NewIntegrator = False
if not NewIntegrator:
integrator = LangevinIntegrator(temperature, collision_frequency, timestep)
else:
print "Using new multiple-timestep velocity-verlet with velocity randomization (MTS-VVVR) integrator."
integrator = MTSVVVRIntegrator(temperature, collision_frequency, timestep, system, ninnersteps=int(timestep/faststep))
# Stuff for figuring out the ewald error tolerance.
print "There are %i forces" % system.getNumForces()
for i in range(system.getNumForces()):
Frc = system.getForce(i)
print Frc.__class__.__name__
if Frc.__class__.__name__ == 'AmoebaMultipoleForce':
print "The Ewald error tolerance is:", Frc.getEwaldErrorTolerance()
# Create simulation object.
simulation = Simulation(pdb.topology, system, integrator, platform)
return simulation, system
def run_simulation(pdb,settings,pbc=True,Trajectory=True):
""" Run a NPT simulation and gather statistics. """
simulation, system = create_simulation_object(pdb, settings, pbc, "single")
# Set initial positions.
simulation.context.setPositions(pdb.positions)
print "Minimizing the energy... (starting energy % .3f kJ/mol)" % simulation.context.getState(getEnergy=True).getPotentialEnergy().value_in_unit(kilojoule_per_mole),
simulation.minimizeEnergy()
print "Done (final energy % .3f kJ/mol)" % simulation.context.getState(getEnergy=True).getPotentialEnergy().value_in_unit(kilojoule_per_mole)
# Assign velocities.
velocities = generateMaxwellBoltzmannVelocities(system, temperature)
simulation.context.setVelocities(velocities)
if verbose:
# Print out the platform used by the context
print "I'm using the platform", simulation.context.getPlatform().getName()
# Print out the properties of the platform
printcool_dictionary({i:simulation.context.getPlatform().getPropertyValue(simulation.context,i) for i in simulation.context.getPlatform().getPropertyNames()},title="Platform %s has properties:" % simulation.context.getPlatform().getName())
# Serialize the system if we want.
Serialize = 0
if Serialize:
serial = XmlSerializer.serializeSystem(system)
with open('system.xml','w') as f: f.write(serial)
#==========================================#
# Computing a bunch of initial values here #
#==========================================#
if pbc:
# Show initial system volume.
box_vectors = system.getDefaultPeriodicBoxVectors()
volume = compute_volume(box_vectors)
if verbose: print "initial system volume = %.1f nm^3" % (volume / nanometers**3)
# Determine number of degrees of freedom.
kB = BOLTZMANN_CONSTANT_kB * AVOGADRO_CONSTANT_NA
# The center of mass motion remover is also a constraint.
ndof = 3*system.getNumParticles() - system.getNumConstraints() - 3
# Compute total mass.
mass = compute_mass(system).in_units_of(gram / mole) / AVOGADRO_CONSTANT_NA # total system mass in g
# Initialize statistics.
data = dict()
data['time'] = Quantity(np.zeros([niterations], np.float64), picoseconds)
data['potential'] = Quantity(np.zeros([niterations], np.float64), kilojoules_per_mole)
data['kinetic'] = Quantity(np.zeros([niterations], np.float64), kilojoules_per_mole)
data['volume'] = Quantity(np.zeros([niterations], np.float64), angstroms**3)
data['density'] = Quantity(np.zeros([niterations], np.float64), kilogram / meters**3)
data['kinetic_temperature'] = Quantity(np.zeros([niterations], np.float64), kelvin)
# More data structures; stored coordinates, box sizes, densities, and potential energies
xyzs = []
boxes = []
rhos = []
energies = []
volumes = []
dipoles = []
#========================#
# Now run the simulation #
#========================#
# Equilibrate.
if verbose: print "Using timestep", timestep, "and %i steps per data record" % nsteps
if verbose: print "Special note: getVelocities and getForces has been turned off."
if verbose: print "Equilibrating..."
for iteration in range(nequiliterations):
simulation.step(nsteps)
state = simulation.context.getState(getEnergy=True,getPositions=True,getVelocities=False,getForces=False)
kinetic = state.getKineticEnergy()
potential = state.getPotentialEnergy()
if pbc:
box_vectors = state.getPeriodicBoxVectors()
volume = compute_volume(box_vectors)
density = (mass / volume).in_units_of(kilogram / meter**3)
else:
volume = 0.0 * nanometers ** 3
density = 0.0 * kilogram / meter ** 3
kinetic_temperature = 2.0 * kinetic / kB / ndof # (1/2) ndof * kB * T = KE
if verbose:
print "%6d %9.3f %9.3f % 13.3f %10.4f %13.4f" % (iteration, state.getTime() / picoseconds,
kinetic_temperature / kelvin, potential / kilojoules_per_mole,
volume / nanometers**3, density / (kilogram / meter**3))
# Collect production data.
if verbose: print "Production..."
if Trajectory:
simulation.reporters.append(DCDReporter('dynamics.dcd', nsteps))
for iteration in range(niterations):
# Propagate dynamics.
simulation.step(nsteps)
# Compute properties.
state = simulation.context.getState(getEnergy=True,getPositions=True,getVelocities=False,getForces=False)
kinetic = state.getKineticEnergy()
potential = state.getPotentialEnergy()
if pbc:
box_vectors = state.getPeriodicBoxVectors()
volume = compute_volume(box_vectors)
density = (mass / volume).in_units_of(kilogram / meter**3)
else:
volume = 0.0 * nanometers ** 3
density = 0.0 * kilogram / meter ** 3
kinetic_temperature = 2.0 * kinetic / kB / ndof
if verbose:
print "%6d %9.3f %9.3f % 13.3f %10.4f %13.4f" % (iteration, state.getTime() / picoseconds, kinetic_temperature / kelvin, potential / kilojoules_per_mole, volume / nanometers**3, density / (kilogram / meter**3))
# Store properties.
data['time'][iteration] = state.getTime()
data['potential'][iteration] = potential
data['kinetic'][iteration] = kinetic
data['volume'][iteration] = volume
data['density'][iteration] = density
data['kinetic_temperature'][iteration] = kinetic_temperature
xyzs.append(state.getPositions())
boxes.append(state.getPeriodicBoxVectors())
rhos.append(density.value_in_unit(kilogram / meter**3))
energies.append(potential / kilojoules_per_mole)
volumes.append(volume / nanometer**3)
dipoles.append(get_dipole(simulation,positions=xyzs[-1]))
return data, xyzs, boxes, np.array(rhos), np.array(energies), np.array(volumes), np.array(dipoles), simulation
def analyze(data):
"""Analyze the data from the run_simulation function."""
#===========================================================================================#
# Compute statistical inefficiencies to determine effective number of uncorrelated samples. #
#===========================================================================================#
data['g_potential'] = statisticalInefficiency(data['potential'] / kilojoules_per_mole)
data['g_kinetic'] = statisticalInefficiency(data['kinetic'] / kilojoules_per_mole, fast=True)
data['g_volume'] = statisticalInefficiency(data['volume'] / angstroms**3, fast=True)
data['g_density'] = statisticalInefficiency(data['density'] / (kilogram / meter**3), fast=True)
data['g_kinetic_temperature'] = statisticalInefficiency(data['kinetic_temperature'] / kelvin, fast=True)
#=========================================#
# Compute expectations and uncertainties. #
#=========================================#
statistics = dict()
# Kinetic energy.
statistics['KE'] = (data['kinetic'] / kilojoules_per_mole).mean() * kilojoules_per_mole
statistics['dKE'] = (data['kinetic'] / kilojoules_per_mole).std() / np.sqrt(niterations / data['g_kinetic']) * kilojoules_per_mole
statistics['g_KE'] = data['g_kinetic'] * nsteps * timestep
# Potential energy.
statistics['PE'] = (data['potential'] / kilojoules_per_mole).mean() * kilojoules_per_mole
statistics['dPE'] = (data['potential'] / kilojoules_per_mole).std() / np.sqrt(niterations / data['g_potential']) * kilojoules_per_mole
statistics['g_PE'] = data['g_potential'] * nsteps * timestep
# Density
unit = (kilogram / meter**3)
statistics['density'] = (data['density'] / unit).mean() * unit
statistics['ddensity'] = (data['density'] / unit).std() / np.sqrt(niterations / data['g_density']) * unit
statistics['g_density'] = data['g_density'] * nsteps * timestep
# Volume
unit = nanometer**3
statistics['volume'] = (data['volume'] / unit).mean() * unit
statistics['dvolume'] = (data['volume'] / unit).std() / np.sqrt(niterations / data['g_volume']) * unit
statistics['g_volume'] = data['g_volume'] * nsteps * timestep
statistics['std_volume'] = (data['volume'] / unit).std() * unit
statistics['dstd_volume'] = (data['volume'] / unit).std() / np.sqrt((niterations / data['g_volume'] - 1) * 2.0) * unit # uncertainty expression from Ref [1].
# Kinetic temperature
unit = kelvin
statistics['kinetic_temperature'] = (data['kinetic_temperature'] / unit).mean() * unit
statistics['dkinetic_temperature'] = (data['kinetic_temperature'] / unit).std() / np.sqrt(niterations / data['g_kinetic_temperature']) * unit
statistics['g_kinetic_temperature'] = data['g_kinetic_temperature'] * nsteps * timestep
#==========================#
# Print summary statistics #
#==========================#
print "Summary statistics (%.3f ns equil, %.3f ns production)" % (nequiliterations * nsteps * timestep / nanoseconds, niterations * nsteps * timestep / nanoseconds)
print
# Kinetic energies
print "Average kinetic energy: %11.6f +- %11.6f kj/mol (g = %11.6f ps)" % (statistics['KE'] / kilojoules_per_mole, statistics['dKE'] / kilojoules_per_mole, statistics['g_KE'] / picoseconds)
# Potential energies
print "Average potential energy: %11.6f +- %11.6f kj/mol (g = %11.6f ps)" % (statistics['PE'] / kilojoules_per_mole, statistics['dPE'] / kilojoules_per_mole, statistics['g_PE'] / picoseconds)
# Kinetic temperature
unit = kelvin
print "Average kinetic temperature: %11.6f +- %11.6f K (g = %11.6f ps)" % (statistics['kinetic_temperature'] / unit, statistics['dkinetic_temperature'] / unit, statistics['g_kinetic_temperature'] / picoseconds)
unit = (nanometer**3)
print "Volume: mean %11.6f +- %11.6f nm^3" % (statistics['volume'] / unit, statistics['dvolume'] / unit),
print "g = %11.6f ps" % (statistics['g_volume'] / picoseconds)
unit = (kilogram / meter**3)
print "Density: mean %11.6f +- %11.6f nm^3" % (statistics['density'] / unit, statistics['ddensity'] / unit),
print "g = %11.6f ps" % (statistics['g_density'] / picoseconds)
unit_rho = (kilogram / meter**3)
unit_ene = kilojoules_per_mole
pV_mean = (statistics['volume'] * pressure * AVOGADRO_CONSTANT_NA).value_in_unit(kilojoule_per_mole)
pV_err = (statistics['dvolume'] * pressure * AVOGADRO_CONSTANT_NA).value_in_unit(kilojoule_per_mole)
return statistics['density'] / unit_rho, statistics['ddensity'] / unit_rho, statistics['PE'] / unit_ene, statistics['dPE'] / unit_ene, pV_mean, pV_err
def energy_driver(mvals,pdb,FF,xyzs,settings,simulation,boxes=None,verbose=False,dipole=False):
"""
Compute a set of snapshot energies as a function of the force field parameters.
This is a combined OpenMM and ForceBalance function. Note (importantly) that this
function creates a new force field XML file in the run directory.
ForceBalance creates the force field, OpenMM reads it in, and we loop through the snapshots
to compute the energies.
@todo I should be able to generate the OpenMM force field object without writing an external file.
@todo This is a sufficiently general function to be merged into openmmio.py?
@param[in] mvals Mathematical parameter values
@param[in] pdb OpenMM PDB object
@param[in] FF ForceBalance force field object
@param[in] xyzs List of OpenMM positions
@param[in] settings OpenMM settings for creating the System
@param[in] boxes Periodic box vectors
@return E A numpy array of energies in kilojoules per mole
"""
# Print the force field XML from the ForceBalance object, with modified parameters.
FF.make(mvals)
# Load the force field XML file to make the OpenMM object.
forcefield = ForceField(sys.argv[2])
# Create the system, setup the simulation.
system = forcefield.createSystem(pdb.topology, **settings)
UpdateSimulationParameters(system, simulation)
E = []
D = []
q = None
for i in simulation.system.getForces():
if i.__class__.__name__ == "NonbondedForce":
q = np.array([i.getParticleParameters(j)[0]._value for j in range(i.getNumParticles())])
# Loop through the snapshots
if boxes == None:
for xyz in xyzs:
# Set the positions and the box vectors
simulation.context.setPositions(xyz)
# Compute the potential energy and append to list
Energy = simulation.context.getState(getEnergy=True).getPotentialEnergy() / kilojoules_per_mole
E.append(Energy)
if dipole:
D.append(get_dipole(simulation,q=q,positions=xyz))
else:
for xyz,box in zip(xyzs,boxes):
# Set the positions and the box vectors
simulation.context.setPositions(xyz)
simulation.context.setPeriodicBoxVectors(box[0],box[1],box[2])
# Compute the potential energy and append to list
Energy = simulation.context.getState(getEnergy=True).getPotentialEnergy() / kilojoules_per_mole
E.append(Energy)
if dipole:
D.append(get_dipole(simulation,q=q,positions=xyz))
print "\r",
if verbose: print E
if dipole:
# Return a Nx4 array with energies in the first column and dipole in columns 2-4.
return np.hstack((np.array(E).reshape(-1,1), np.array(D).reshape(-1,3)))
else:
return np.array(E)
def energy_derivatives(mvals,h,pdb,FF,xyzs,settings,simulation,boxes=None,AGrad=True):
"""
Compute the first and second derivatives of a set of snapshot
energies with respect to the force field parameters.
This basically calls the finite difference subroutine on the
energy_driver subroutine also in this script.
@todo This is a sufficiently general function to be merged into openmmio.py?
@param[in] mvals Mathematical parameter values
@param[in] pdb OpenMM PDB object
@param[in] FF ForceBalance force field object
@param[in] xyzs List of OpenMM positions
@param[in] settings OpenMM settings for creating the System
@param[in] boxes Periodic box vectors
@return G First derivative of the energies in a N_param x N_coord array
"""
G = np.zeros((FF.np,len(xyzs)))
if not AGrad:
return G
E0 = energy_driver(mvals, pdb, FF, xyzs, settings, simulation, boxes)
CheckFDPts = False
for i in range(FF.np):
G[i,:], _ = f12d3p(fdwrap(energy_driver,mvals,i,key=None,pdb=pdb,FF=FF,xyzs=xyzs,settings=settings,simulation=simulation,boxes=boxes),h,f0=E0)
if CheckFDPts:
# Check whether the number of finite difference points is sufficient. Forward difference still gives residual error of a few percent.
G1 = f1d7p(fdwrap(energy_driver,mvals,i,key=None,pdb=pdb,FF=FF,xyzs=xyzs,settings=settings,simulation=simulation,boxes=boxes),h)
dG = G1 - G[i,:]
dGfrac = (G1 - G[i,:]) / G[i,:]
print "Parameter %3i 7-pt vs. central derivative : RMS, Max error (fractional) = % .4e % .4e (% .4e % .4e)" % (i, np.sqrt(np.mean(dG**2)), max(np.abs(dG)), np.sqrt(np.mean(dGfrac**2)), max(np.abs(dGfrac)))
return G
def energy_dipole_derivatives(mvals,h,pdb,FF,xyzs,settings,simulation,boxes=None,AGrad=True):
"""
Compute the first and second derivatives of a set of snapshot
energies with respect to the force field parameters.
This basically calls the finite difference subroutine on the
energy_driver subroutine also in this script.
@todo This is a sufficiently general function to be merged into openmmio.py?
@param[in] mvals Mathematical parameter values
@param[in] pdb OpenMM PDB object
@param[in] FF ForceBalance force field object
@param[in] xyzs List of OpenMM positions
@param[in] settings OpenMM settings for creating the System
@param[in] boxes Periodic box vectors
@return G First derivative of the energies in a N_param x N_coord array
"""
G = np.zeros((FF.np,len(xyzs)))
GDx = np.zeros((FF.np,len(xyzs)))
GDy = np.zeros((FF.np,len(xyzs)))
GDz = np.zeros((FF.np,len(xyzs)))
if not AGrad:
return G, GDx, GDy, GDz
ED0 = energy_driver(mvals, pdb, FF, xyzs, settings, simulation, boxes, dipole=True)
CheckFDPts = False
for i in range(FF.np):
EDG, _ = f12d3p(fdwrap(energy_driver,mvals,i,key=None,pdb=pdb,FF=FF,xyzs=xyzs,settings=settings,simulation=simulation,boxes=boxes,dipole=True),h,f0=ED0)
G[i,:] = EDG[:,0]
GDx[i,:] = EDG[:,1]
GDy[i,:] = EDG[:,2]
GDz[i,:] = EDG[:,3]
return G, GDx, GDy, GDz
def bzavg(obs,boltz):
if obs.ndim == 2:
if obs.shape[0] == len(boltz) and obs.shape[1] == len(boltz):
raise Exception('Error - both dimensions have length equal to number of snapshots, now confused!')
elif obs.shape[0] == len(boltz):
return np.sum(obs*boltz.reshape(-1,1),axis=0)/np.sum(boltz)
elif obs.shape[1] == len(boltz):
return np.sum(obs*boltz,axis=1)/np.sum(boltz)
else:
raise Exception('The dimensions are wrong!')
elif obs.ndim == 1:
return np.dot(obs,boltz)/sum(boltz)
else:
raise Exception('The number of dimensions can only be 1 or 2!')
def property_derivatives(mvals,h,pdb,FF,xyzs,settings,simulation,kT,property_driver,property_kwargs,boxes=None,AGrad=True):
G = np.zeros(FF.np)
if not AGrad:
return G
ED0 = energy_driver(mvals, pdb, FF, xyzs, settings, simulation, boxes, dipole=True)
E0 = ED0[:,0]
D0 = ED0[:,1:]
P0 = property_driver(None, **property_kwargs)
if 'h_' in property_kwargs:
H0 = property_kwargs['h_'].copy()
for i in range(FF.np):
ED1 = fdwrap(energy_driver,mvals,i,key=None,pdb=pdb,FF=FF,xyzs=xyzs,settings=settings,simulation=simulation,boxes=boxes,dipole=True)(h)
E1 = ED1[:,0]
D1 = ED1[:,1:]
b = np.exp(-(E1-E0)/kT)
b /= np.sum(b)
if 'h_' in property_kwargs:
property_kwargs['h_'] = H0.copy() + (E1-E0)
if 'd_' in property_kwargs:
property_kwargs['d_'] = D1.copy()
S = -1*np.dot(b,np.log(b))
InfoContent = np.exp(S)
if InfoContent / len(E0) < 0.1:
print "Warning: Effective number of snapshots: % .1f (out of %i)" % (InfoContent, len(E0))
P1 = property_driver(b=b,**property_kwargs)
EDM1 = fdwrap(energy_driver,mvals,i,key=None,pdb=pdb,FF=FF,xyzs=xyzs,settings=settings,simulation=simulation,boxes=boxes,dipole=True)(-h)
EM1 = EDM1[:,0]
DM1 = EDM1[:,1:]
b = np.exp(-(EM1-E0)/kT)
b /= np.sum(b)
if 'h_' in property_kwargs:
property_kwargs['h_'] = H0.copy() + (EM1-E0)
if 'd_' in property_kwargs:
property_kwargs['d_'] = DM1.copy()
S = -1*np.dot(b,np.log(b))
InfoContent = np.exp(S)
if InfoContent / len(E0) < 0.1:
print "Warning: Effective number of snapshots: % .1f (out of %i)" % (InfoContent, len(E0))
PM1 = property_driver(b=b,**property_kwargs)
G[i] = (P1-PM1)/(2*h)
if 'h_' in property_kwargs:
property_kwargs['h_'] = H0.copy()
if 'd_' in property_kwargs:
property_kwargs['d_'] = D0.copy()
return G
def main():
"""
Usage: (runcuda.sh) npt.py protein.pdb forcefield.xml <temperature> <pressure>
This program is meant to be called automatically by ForceBalance on
a GPU cluster (specifically, subroutines in openmmio.py). It is
not easy to use manually. This is because the force field is read
in from a ForceBalance 'FF' class.
I wrote this program because automatic fitting of the density (or
other equilibrium properties) is computationally intensive, and the
calculations need to be distributed to the queue. The main instance
of ForceBalance (running on my workstation) queues up a bunch of these
jobs (using Work Queue). Then, I submit a bunch of workers to GPU
clusters (e.g. Certainty, Keeneland). The worker scripts connect to
the main instance and receives one of these jobs.
This script can also be executed locally, if you want to (e.g. for
debugging). Just make sure you have the pickled 'forcebalance.p'
file.
"""
# Create an OpenMM PDB object so we may make the Simulation class.
pdb = PDBFile(sys.argv[1])
# The number of molecules can be determined here.
NMol = len(list(pdb.topology.residues()))
# Load the force field in from the ForceBalance pickle.
FF,mvals,h,AGrad = lp_load(open('forcebalance.p'))
# Create the force field XML files.
FF.make(mvals)
# This creates a system from a force field XML file.
forcefield = ForceField(sys.argv[2])
# Try to detect if we're using an AMOEBA system.
if any(['Amoeba' in i.__class__.__name__ for i in forcefield._forces]):
print "Detected AMOEBA system!"
if FF.amoeba_pol == "mutual":
print "Setting mutual polarization"
Settings = amoeba_mutual_kwargs
mSettings = mono_mutual_kwargs
elif FF.amoeba_pol == "direct":
print "Setting direct polarization"
Settings = amoeba_direct_kwargs
mSettings = mono_direct_kwargs
else:
print "No polarization"
Settings = amoeba_nonpol_kwargs
mSettings = mono_nonpol_kwargs
else:
Settings = {'nonbondedMethod':PME}
mSettings = {}
# if 'tip3p' in sys.argv[2]:
# print "Using TIP3P settings."
# Settings = tip3p_kwargs
# mSettings = mono_tip3p_kwargs
# timestep = 1.0 * femtosecond
# nsteps = 100
# else:
# raise Exception('Encountered a force field that I did not expect!')
#=================================================================#
# Run the simulation for the full system and analyze the results. #
#=================================================================#
Data, Xyzs, Boxes, Rhos, Energies, Volumes, Dips, Sim = run_simulation(pdb, Settings, Trajectory=False)
# Get statistics from our simulation.
Rho_avg, Rho_err, Pot_avg, Pot_err, pV_avg, pV_err = analyze(Data)
# Now that we have the coordinates, we can compute the energy derivatives.
# First create a double-precision simulation object.
DoublePrecisionDerivatives = True
if DoublePrecisionDerivatives and AGrad:
print "Creating Double Precision Simulation for parameter derivatives"
Sim, _ = create_simulation_object(pdb, Settings, pbc=True, precision="double")
G, GDx, GDy, GDz = energy_dipole_derivatives(mvals, h, pdb, FF, Xyzs, Settings, Sim, Boxes, AGrad)
# The density derivative can be computed using the energy derivative.
N = len(Xyzs)
kB = BOLTZMANN_CONSTANT_kB * AVOGADRO_CONSTANT_NA
T = temperature / kelvin
mBeta = (-1 / (temperature * kB)).value_in_unit(mole / kilojoule)
Beta = (1 / (temperature * kB)).value_in_unit(mole / kilojoule)
# Build the first density derivative .
GRho = mBeta * (flat(np.mat(G) * col(Rhos)) / N - np.mean(Rhos) * np.mean(G, axis=1))
#==============================================#
# Now run the simulation for just the monomer. #
#==============================================#
global timestep, nsteps, niterations, nequiliterations
timestep = m_timestep
nsteps = m_nsteps
nequiliterations = m_nequiliterations
niterations = m_niterations
mpdb = PDBFile('mono.pdb')
mData, mXyzs, _trash, _crap, mEnergies, _nah, _dontneed, mSim = run_simulation(mpdb, mSettings, pbc=False, Trajectory=False)
# Get statistics from our simulation.
_trash, _crap, mPot_avg, mPot_err, __trash, __crap = analyze(mData)
# Now that we have the coordinates, we can compute the energy derivatives.
if DoublePrecisionDerivatives and AGrad:
print "Creating Double Precision Simulation for parameter derivatives"
mSim, _ = create_simulation_object(mpdb, mSettings, pbc=False, precision="double")
mG = energy_derivatives(mvals, h, mpdb, FF, mXyzs, mSettings, mSim, None, AGrad)
# pV_avg and mean(pV) are exactly the same.
pV = (pressure * Data['volume'] * AVOGADRO_CONSTANT_NA).value_in_unit(kilojoule_per_mole)
kT = (kB * temperature).value_in_unit(kilojoule_per_mole)
# The enthalpy of vaporization in kJ/mol.
Hvap_avg = mPot_avg - Pot_avg / NMol + kT - np.mean(pV) / NMol
Hvap_err = np.sqrt(Pot_err**2 / NMol**2 + mPot_err**2 + pV_err**2/NMol**2)
# Build the first Hvap derivative.
# We don't pass it back, but nice for printing.
GHvap = np.mean(G,axis=1)
GHvap += mBeta * (flat(np.mat(G) * col(Energies)) / N - Pot_avg * np.mean(G, axis=1))
GHvap /= NMol
GHvap -= np.mean(mG,axis=1)
GHvap -= mBeta * (flat(np.mat(mG) * col(mEnergies)) / N - mPot_avg * np.mean(mG, axis=1))
GHvap *= -1
GHvap -= mBeta * (flat(np.mat(G) * col(pV)) / N - np.mean(pV) * np.mean(G, axis=1)) / NMol
print "The finite difference step size is:",h
Sep = printcool("Density: % .4f +- % .4f kg/m^3, Analytic Derivative" % (Rho_avg, Rho_err))
FF.print_map(vals=GRho)
print Sep
H = Energies + pV
V = np.array(Volumes)
numboots = 1000
L = len(H)
FDCheck = False
def calc_rho(b = None, **kwargs):
if b == None: b = np.ones(L,dtype=float)
if 'r_' in kwargs:
r_ = kwargs['r_']
return bzavg(r_,b)
# No need to calculate error using bootstrap, but here it is anyway
# Rhoboot = []
# for i in range(numboots):
# boot = np.random.randint(L,size=L)
# Rhoboot.append(calc_rho(None,**{'r_':Rhos[boot]}))
# Rhoboot = np.array(Rhoboot)
# Rho_err = np.std(Rhoboot)
if FDCheck:
Sep = printcool("Numerical Derivative:")
GRho1 = property_derivatives(mvals, h, pdb, FF, Xyzs, Settings, Sim, kT, calc_rho, {'r_':Rhos}, Boxes)
FF.print_map(vals=GRho1)
Sep = printcool("Difference (Absolute, Fractional):")
absfrac = ["% .4e % .4e" % (i-j, (i-j)/j) for i,j in zip(GRho, GRho1)]
FF.print_map(vals=absfrac)
print "Box energy:", np.mean(Energies)
print "Monomer energy:", np.mean(mEnergies)
Sep = printcool("Enthalpy of Vaporization: % .4f +- %.4f kJ/mol, Derivatives below" % (Hvap_avg, Hvap_err))
FF.print_map(vals=GHvap)
print Sep
# Define some things to make the analytic derivatives easier.
Gbar = np.mean(G,axis=1)
def deprod(vec):
return flat(np.mat(G)*col(vec))/N
def covde(vec):
return flat(np.mat(G)*col(vec))/N - Gbar*np.mean(vec)
def avg(vec):
return np.mean(vec)
## Thermal expansion coefficient and bootstrap error estimation
def calc_alpha(b = None, **kwargs):
if b == None: b = np.ones(L,dtype=float)
if 'h_' in kwargs:
h_ = kwargs['h_']
if 'v_' in kwargs:
v_ = kwargs['v_']
return 1/(kT*T) * (bzavg(h_*v_,b)-bzavg(h_,b)*bzavg(v_,b))/bzavg(v_,b)
Alpha = calc_alpha(None, **{'h_':H, 'v_':V})
Alphaboot = []
for i in range(numboots):
boot = np.random.randint(L,size=L)
Alphaboot.append(calc_alpha(None, **{'h_':H[boot], 'v_':V[boot]}))
Alphaboot = np.array(Alphaboot)
Alpha_err = np.std(Alphaboot) * max([np.sqrt(statisticalInefficiency(V)),np.sqrt(statisticalInefficiency(H))])
## Thermal expansion coefficient analytic derivative
GAlpha1 = -1 * Beta * deprod(H*V) * avg(V) / avg(V)**2
GAlpha2 = +1 * Beta * avg(H*V) * deprod(V) / avg(V)**2
GAlpha3 = deprod(V)/avg(V) - Gbar
GAlpha4 = Beta * covde(H)
GAlpha = (GAlpha1 + GAlpha2 + GAlpha3 + GAlpha4)/(kT*T)
Sep = printcool("Thermal expansion coefficient: % .4e +- %.4e K^-1\nAnalytic Derivative:" % (Alpha, Alpha_err))
FF.print_map(vals=GAlpha)
if FDCheck:
GAlpha_fd = property_derivatives(mvals, h, pdb, FF, Xyzs, Settings, Sim, kT, calc_alpha, {'h_':H,'v_':V}, Boxes)
Sep = printcool("Numerical Derivative:")
FF.print_map(vals=GAlpha_fd)
Sep = printcool("Difference (Absolute, Fractional):")
absfrac = ["% .4e % .4e" % (i-j, (i-j)/j) for i,j in zip(GAlpha, GAlpha_fd)]
FF.print_map(vals=absfrac)
## Isothermal compressibility
bar_unit = 1.0*bar*nanometer**3/kilojoules_per_mole/item
def calc_kappa(b=None, **kwargs):
if b == None: b = np.ones(L,dtype=float)
if 'v_' in kwargs:
v_ = kwargs['v_']
return bar_unit / kT * (bzavg(v_**2,b)-bzavg(v_,b)**2)/bzavg(v_,b)
Kappa = calc_kappa(None,**{'v_':V})
Kappaboot = []
for i in range(numboots):
boot = np.random.randint(L,size=L)
Kappaboot.append(calc_kappa(None,**{'v_':V[boot]}))
Kappaboot = np.array(Kappaboot)
Kappa_err = np.std(Kappaboot) * np.sqrt(statisticalInefficiency(V))
## Isothermal compressibility analytic derivative
Sep = printcool("Isothermal compressibility: % .4e +- %.4e bar^-1\nAnalytic Derivative:" % (Kappa, Kappa_err))
GKappa1 = +1 * Beta**2 * avg(V**2) * deprod(V) / avg(V)**2
GKappa2 = -1 * Beta**2 * avg(V) * deprod(V**2) / avg(V)**2
GKappa3 = +1 * Beta**2 * covde(V)
GKappa = bar_unit*(GKappa1 + GKappa2 + GKappa3)
FF.print_map(vals=GKappa)
if FDCheck:
GKappa_fd = property_derivatives(mvals, h, pdb, FF, Xyzs, Settings, Sim, kT, calc_kappa, {'v_':V}, Boxes)
Sep = printcool("Numerical Derivative:")
FF.print_map(vals=GKappa_fd)
Sep = printcool("Difference (Absolute, Fractional):")
absfrac = ["% .4e % .4e" % (i-j, (i-j)/j) for i,j in zip(GKappa, GKappa_fd)]
FF.print_map(vals=absfrac)
## Isobaric heat capacity
def calc_cp(b=None, **kwargs):
if b == None: b = np.ones(L,dtype=float)
if 'h_' in kwargs:
h_ = kwargs['h_']
Cp_ = 1/(NMol*kT*T) * (bzavg(h_**2,b) - bzavg(h_,b)**2)
Cp_ *= 1000 / 4.184
return Cp_
Cp = calc_cp(None,**{'h_':H})
Cpboot = []
for i in range(numboots):
boot = np.random.randint(L,size=L)
Cpboot.append(calc_cp(None,**{'h_':H[boot]}))
Cpboot = np.array(Cpboot)
Cp_err = np.std(Cpboot) * np.sqrt(statisticalInefficiency(H))
## Isobaric heat capacity analytic derivative
GCp1 = 2*covde(H) * 1000 / 4.184 / (NMol*kT*T)
GCp2 = mBeta*covde(H**2) * 1000 / 4.184 / (NMol*kT*T)
GCp3 = 2*Beta*avg(H)*covde(H) * 1000 / 4.184 / (NMol*kT*T)
GCp = GCp1 + GCp2 + GCp3
Sep = printcool("Isobaric heat capacity: % .4e +- %.4e cal mol-1 K-1\nAnalytic Derivative:" % (Cp, Cp_err))
FF.print_map(vals=GCp)
if FDCheck:
GCp_fd = property_derivatives(mvals, h, pdb, FF, Xyzs, Settings, Sim, kT, calc_cp, {'h_':H}, Boxes)
Sep = printcool("Numerical Derivative:")
FF.print_map(vals=GCp_fd)
Sep = printcool("Difference (Absolute, Fractional):")
absfrac = ["% .4e % .4e" % (i-j, (i-j)/j) for i,j in zip(GCp,GCp_fd)]
FF.print_map(vals=absfrac)
## Dielectric constant
eps0 = 8.854187817620e-12 * coulomb**2 / newton / meter**2
epsunit = 1.0*(debye**2) / nanometer**3 / BOLTZMANN_CONSTANT_kB / kelvin
prefactor = epsunit/eps0/3
def calc_eps0(b=None, **kwargs):
if b == None: b = np.ones(L,dtype=float)
if 'd_' in kwargs: # Dipole moment vector.
d_ = kwargs['d_']
if 'v_' in kwargs: # Volume.
v_ = kwargs['v_']
b0 = np.ones(L,dtype=float)
dx = d_[:,0]
dy = d_[:,1]
dz = d_[:,2]
D2 = bzavg(dx**2,b)-bzavg(dx,b)**2
D2 += bzavg(dy**2,b)-bzavg(dy,b)**2
D2 += bzavg(dz**2,b)-bzavg(dz,b)**2
return prefactor*D2/bzavg(v_,b)/T
Eps0 = calc_eps0(None,**{'d_':Dips, 'v_':V})
Eps0boot = []
for i in range(numboots):
boot = np.random.randint(L,size=L)
Eps0boot.append(calc_eps0(None,**{'d_':Dips[boot], 'v_':V[boot]}))
Eps0boot = np.array(Eps0boot)
Eps0_err = np.std(Eps0boot)*np.sqrt(np.mean([statisticalInefficiency(Dips[:,0]),statisticalInefficiency(Dips[:,1]),statisticalInefficiency(Dips[:,2])]))
## Dielectric constant analytic derivative
Dx = Dips[:,0]
Dy = Dips[:,1]
Dz = Dips[:,2]
D2 = avg(Dx**2)+avg(Dy**2)+avg(Dz**2)-avg(Dx)**2-avg(Dy)**2-avg(Dz)**2
GD2 = 2*(flat(np.mat(GDx)*col(Dx))/N - avg(Dx)*(np.mean(GDx,axis=1))) - Beta*(covde(Dx**2) - 2*avg(Dx)*covde(Dx))
GD2 += 2*(flat(np.mat(GDy)*col(Dy))/N - avg(Dy)*(np.mean(GDy,axis=1))) - Beta*(covde(Dy**2) - 2*avg(Dy)*covde(Dy))
GD2 += 2*(flat(np.mat(GDz)*col(Dz))/N - avg(Dz)*(np.mean(GDz,axis=1))) - Beta*(covde(Dz**2) - 2*avg(Dz)*covde(Dz))
GEps0 = prefactor*(GD2/avg(V) - mBeta*covde(V)*D2/avg(V)**2)/T
Sep = printcool("Dielectric constant: % .4e +- %.4e\nAnalytic Derivative:" % (Eps0, Eps0_err))
FF.print_map(vals=GEps0)
if FDCheck:
GEps0_fd = property_derivatives(mvals, h, pdb, FF, Xyzs, Settings, Sim, kT, calc_eps0, {'d_':Dips,'v_':V}, Boxes)
Sep = printcool("Numerical Derivative:")
FF.print_map(vals=GEps0_fd)
Sep = printcool("Difference (Absolute, Fractional):")
absfrac = ["% .4e % .4e" % (i-j, (i-j)/j) for i,j in zip(GEps0,GEps0_fd)]
FF.print_map(vals=absfrac)
## Print the final force field.
pvals = FF.make(mvals)
with open(os.path.join('npt_result.p'),'w') as f: lp_dump((Rhos, Volumes, Energies, Dips, G, [GDx, GDy, GDz], mEnergies, mG, Rho_err, Hvap_err, Alpha_err, Kappa_err, Cp_err, Eps0_err, NMol),f)
if __name__ == "__main__":
main()
|
kyleabeauchamp/DBayes
|
ommfb/targets/wat-liquid/npt.py
|
Python
|
gpl-2.0
| 54,693
|
[
"Gaussian",
"OpenMM",
"TINKER"
] |
e60a5eb81266297fab24ae82c89e2e437afc3c307cecc3343e8f7c48d1ff37df
|
# -*- coding: utf-8 -*-
"""
This module implements the GenNetwork class, which implements generic network
logic.
"""
from neuron import h
from ouropy.gendendrite import GenDendrite
import numpy as np
class GenNeuron(object):
"""This is the model of a generic neuron.
Attributes
----------
soma - nrn.Section (Default None)
The soma
dendrites - list (Default empty)
A list of gendendrite.GenDendrite
all_secs - list (Default empty)
A list of all sections
Methods
-------
__init__
mk_soma
mk_dendrite
conn_post
Use cases
---------
>>> myNeuron = GenNeuron()
>>> myNeuron.mk_soma()
>>> myNeuron.mk_dendrite()
Ball-and-stick neuron with default geometry
"""
def mk_soma(self, diam=None, L=None, name=None):
"""Assignes self.soma a hoc section with dimensions diam and L.
Uses nrn defaults when None. Name defaults to 'soma'.
Before mk_soma is called, self.soma = None.
Parameters
----------
diam - numeric (Default from NEURON)
diameter of the soma
L - numeric (Default from NEURON)
length of the soma
name - str (Default 'soma')
name of the section
Returns
-------
None
Use cases
---------
>>> self.mk_soma()
self.soma becomes section with default values
>>> self.mk_soma(name = 'myFirstSoma', diam = 5, L = 100)
self.soma becomes section with name = 'myFirstSoma', diam = 5 and
L = 100
"""
if not name:
name = 'soma'
if hasattr(self, 'soma'):
self.all_secs.remove(self.soma)
self.soma = h.Section(name=name)
if diam:
self.soma.diam = diam
if L:
self.soma.L = L
if not hasattr(self, 'all_secs'):
self.all_secs = []
self.all_secs.append(self.soma)
def mk_dendrite(self, n_secs=1, dend_name=None, sec_names=None, diam=None,
L=None, soma_loc=1):
"""Adds a dendrite to list self.dendrites. Before first call
self.dendrites = None. On first call self.dendrite becomes list with 1
dendrite. Automatically connects the first section of the dendrite to
the soma. Raises error if self.soma = None.
Parameters
----------
n_secs - int
number of sections on the dendrite
dend_name - str
the name
sec_names - list of str
the names of the sections in order
diam - list of numerics
the diameters of the sections
L - list of numerics
the diameters of the sections
Returns
-------
None
Use cases
---------
>>> self.mk_dendrite()
Create a dendrite with 1 section and default values
>>> self.mk_dendrite(4, 'myDendrite',
['prox1', 'prox2', 'dist1', 'dist2'],
[5,3,3,5], [100,50,50,50])
Create a dendrite with 4 sections named 'myDendrite' and custom
geometry and section naming.
"""
if not hasattr(self, 'dendrites'):
self.dendrites = []
if not self.soma:
raise StandardError("No soma created yet.")
curr_dend = GenDendrite(dend_name, n_secs, sec_names, diam, L)
curr_dend.conn_soma(self.soma, soma_loc)
self.dendrites.append(curr_dend)
for x in curr_dend:
self.all_secs.append(x)
def get_segs_by_name(self, name):
"""Returns a list of sections whose .name matches the name parameter.
Parameters
----------
name - str or list of str
The names to gets
Returns
-------
result - list
List of sections whose .name attribute matches the name parameter
Use cases
---------
>>> self.get_segs_by_name('proxd')
Returns segments named 'proxd'
"""
if 'all' in name:
return list(self.all_secs)
result = []
if type(name) == str:
for x in self.all_secs:
if x.name() == name:
result.append(x)
else:
for x in name:
if not (type(x) == str):
raise TypeError("All elements of name must be str")
for y in self.all_secs:
if y.name() == x:
result.append(y)
return np.array(result, dtype=np.dtype(object))
def insert_mechs(self, parameters):
"""Inserts the parameters into the section of the cells.
See ouropy.parameters
Parameters
----------
parameters - ouropy.parameters.Parameter or ParameterSet
A parameter or parameterset contains the mechanism, the segment and
the value to assign.
Returns
-------
None
Use Cases
---------
>>> import ouropy.parameters
>>> myParameters = ouropy.parameters.read_parameters(filename)
>>> self.insert_mechs(myParameters)
Insert the parameters loaded from filename into self. See
ouropy.parameters for details.
"""
mechanisms = parameters.get_mechs()
for x in mechanisms.keys():
sections = self.get_segs_by_name(x)
for y in sections:
for z in mechanisms[x]:
y.insert(z)
for x in parameters:
sections = self.get_segs_by_name(x.sec_name)
for y in sections:
setattr(y, x.mech_name, x.value)
def _current_clamp_soma(self, amp=0.3, dur=500, delay=500):
"""Setup a current clamp at the soma(0.5) section.
Parameters
----------
amp - numeric
amplitude of current injection in neurons unit of current (nA)
dur - numeric
duration of current injection in neurons unit of time (ms)
delay - numeric
start of current injection in neurons unit of time (ms)
Returns
-------
stim - h.IClamp
the current clamp point process
Use Cases
---------
>>> self._current_clamp_soma()
Setup a default current clamp.
>>> volt_vector, t_vector = self._current_clamp_soma()
Setup a default current clamp and assign a voltage vector recording at
soma(0.5) and a time vector.
"""
if not hasattr(self, 'stim'):
self.stim = []
stim = h.IClamp(self.soma(0.5))
stim.amp = amp # Too high amps crash the simulation w/o error!
stim.dur = dur
stim.delay = delay
self.stim.append(stim)
return stim
def _voltage_recording(self):
"""Record the voltage at the soma of GenNeuron or a subclass.
Parameters
----------
None
Returns
-------
soma_v_vec - h.Vector
the current clamp point process
Use Cases
---------
>>> myNeuron = GenNeuron()
>>> myNeuron.mk_soma()
>>> myNeuron._voltage_recording
"""
soma_v_vec = h.Vector()
t_vec = h.Vector()
soma_v_vec.record(self.soma(0.5)._ref_v)
t_vec.record(h._ref_t)
return soma_v_vec
def _AP_counter(self, thr=0):
"""Action Potential counter at the soma of GenNeuron or a subclass.
Parameters
----------
thr - numeric
the threshold value for action potential detection
Returns
-------
time_vec - h.Vector
the current clamp point process
ap - h.APCount
the ap counter object that counts number and records time stamps
Use Cases
---------
>>> myNeuron = GenNeuron()
>>> myNeuron.mk_soma()
>>> myNeuron._AP_counter
"""
ap = h.APCount(self.soma(0.5))
self.ap = ap
self.ap.thresh = thr
self.time_vec = h.Vector()
self.ap.record(self.time_vec)
return self.time_vec, self.ap
def _current_ladder(self, currents, start, stim_dur, stim_interval):
for idx, x in enumerate(currents):
delay = (idx * stim_interval) + start
self._current_clamp_soma(amp=x, dur=stim_dur, delay=delay)
def _SEClamp(self, dur1=200, amp1=0, rs=0.001):
self.vclamp = h.SEClamp(self.soma(0.5))
self.vclamp.dur1 = dur1
self.vclamp.amp1 = amp1
self.vclamp.rs = rs
return self.vclamp
def __iter__(self):
return self
def __next__(self):
if not hasattr(self, 'i'):
self.i = 0
if self.i < (len(self.all_secs)):
i = self.i
self.i += 1
return self.all_secs[i]
else:
self.i = 0
raise StopIteration()
def next(self):
return self.__next__()
|
danielmuellernai/ouropy
|
genneuron.py
|
Python
|
mit
| 9,082
|
[
"NEURON"
] |
94ef536f3b5f82573bf92b90e15d459e69c75706654ee5729d1c0e981bea2b5b
|
#!/usr/bin/env python
"""
Introduces velocities/kinetic energy in a system, to match a given
temperature (in Kelvin). Equivalent to an NVT equilibration with
position restraints on non-solvent heavy atoms. Lipid bilayers
are restrained on phosphate atoms in the Z dimension only.
Raises the temperature of the system gradually up to the desired
target value (e.g. 50, 100, 150, 200, ..., 300, 310), running a
number of MD steps at each stage.
Outputs a portable state (.xml) file with positions and velocities,
to allow restarting and/or continuation.
.2019. joaor@stanford.edu
"""
from __future__ import print_function, division
import argparse
import logging
import math
import os
import random
import re
import sys
import numpy as np
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as units
import _utils
import _restraints
# Format logger
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
##
# Parse user input and options
ap = argparse.ArgumentParser(description=__doc__)
# Mandatory
ap.add_argument('structure', help='Input coordinate file (.cif)')
# Options
ap.add_argument('--output', type=str, default=None,
help='Root name for output files. Default is input file name.')
ap.add_argument('--forcefield', type=str, default='amber14-all.xml',
help='Force field to build the system with (XML format).')
ap.add_argument('--solvent', type=str, default='amber14/tip3p.xml',
help='Solvent model to use in minimization (XML format).')
ap.add_argument('--xyz-frequency', dest='xyz_freq', type=int, default=None,
help='Frequency (number of steps) to write coordinates.')
ap.add_argument('--log-frequency', dest='log_freq', type=int, default=None,
help='Frequency (number of steps) to log run parameters.')
ap.add_argument('--platform', type=str, default=None,
choices=('OpenCL', 'CUDA', 'CPU', 'Reference'),
help='Platform to run calculations on.')
ap.add_argument('--seed', type=int, default=917,
help='Seed number for random number generator(s).')
ap.add_argument('--temperature', default=310, type=float,
help='Target temperature, in Kelvin. Default is 310.')
ap.add_argument('--ladder-step-temperature', default=50, type=int,
help='Temperature increase per heating stage. Default is 50 K')
ap.add_argument('--ladder-num-steps', default=1000, type=int,
help='Number of MD steps per heating stage. Default is 1000')
ap.add_argument('--restraint-k', default=500, type=int,
help='Force constant for position restraints. Default is 500')
ap.add_argument('--hmr', action='store_true', default=False,
help='Use Hydrogen Mass Repartitioning.')
ap.add_argument('--membrane', action='store_true', default=False,
help='Enables options for membranes, e.g. restraints, tension')
ap.add_argument('--gentle', action='store_true', default=False,
help='Auto settings: lower ladder-step-temp and longer -num-steps')
cmd = ap.parse_args()
logging.info('Started')
# Set random seed for reproducibility
random.seed(cmd.seed)
# Figure out platform
platform, plat_properties = _utils.get_platform(cmd.platform)
logging.info('Simulation Details:')
logging.info(f' random seed : {cmd.seed}')
logging.info(f' structure : {cmd.structure}')
logging.info(f' force field : {cmd.forcefield}')
logging.info(f' solvent model: {cmd.solvent}')
logging.info(f' temperature : {cmd.temperature} K')
logging.info(f' restraints K : {cmd.restraint_k} kcal/mol/A^2')
logging.info(f' membrane : {cmd.membrane}')
logging.info(f' HMR : {cmd.hmr}')
# Make rootname for output files
basename = os.path.basename(cmd.structure)
fname, fext = os.path.splitext(basename)
if cmd.output is None:
rootname = fname + '_Heat'
else:
rootname = cmd.output
# Read in structure data and setup OpenMM system
structure = app.PDBxFile(cmd.structure)
forcefield = app.ForceField(cmd.forcefield, cmd.solvent)
md_temp = cmd.ladder_step_temperature * units.kelvin # initial T
md_step = 2.0*units.femtosecond
md_fric = 1.0/units.picosecond
md_nbct = 1.0*units.nanometer
md_hamu = None
md_cstr = app.HBonds
surface_tension = 0*units.bar*units.nanometer # amber lipids are tensionless
if cmd.hmr: # adapt for HMR if necessary
md_step *= 2.5 # make 5 fs
md_hamu = 4*units.amu
md_cstr = app.AllBonds
# Build system & integrator
logging.info('Setting up system and integrator')
system = forcefield.createSystem(structure.topology, nonbondedMethod=app.PME,
nonbondedCutoff=md_nbct,
constraints=md_cstr,
hydrogenMass=md_hamu,
ewaldErrorTolerance=0.0005,
rigidWater=True)
integrator = mm.LangevinIntegrator(md_temp, md_fric, md_step)
integrator.setRandomNumberSeed(cmd.seed)
integrator.setConstraintTolerance(0.00001)
# Restraint heavy atoms
# force = _restraints.make_heavy_atom_restraints(structure, cmd.restraint_k)
force = _restraints.make_heavy_atom_restraints_v2(system, structure,
cmd.restraint_k)
system.addForce(force)
# Restraint lipid headgroups in Z
if cmd.membrane:
# force = _restraints.make_lipid_restraints(structure, cmd.restraint_k)
force = _restraints.make_lipid_restraints_v2(system, structure,
cmd.restraint_k)
system.addForce(force)
# Setup simulation
simulation = app.Simulation(structure.topology, system, integrator,
platform, plat_properties)
simulation.context.setPositions(structure.positions)
simulation.context.setVelocitiesToTemperature(md_temp)
# Setup writer/logger frequencies
freq = max(1, math.floor(cmd.ladder_num_steps / 10))
if cmd.hmr:
# Time step is 5 fs
xyz_freq = cmd.xyz_freq if cmd.xyz_freq is not None else freq
log_freq = cmd.log_freq if cmd.log_freq is not None else freq
else:
# Time step is 2 fs
xyz_freq = cmd.xyz_freq if cmd.xyz_freq is not None else freq
log_freq = cmd.log_freq if cmd.log_freq is not None else freq
# Calculate total simulation length in steps
n_stages = math.ceil(cmd.temperature / cmd.ladder_step_temperature)
n_total_steps = n_stages * cmd.ladder_num_steps
# Setup Reporters
dcd_fname = _utils.make_fname(rootname + '.dcd')
cpt_fname = _utils.make_fname(rootname + '.cpt')
log_fname = _utils.make_fname(rootname + '.log')
dcd = app.DCDReporter(dcd_fname, xyz_freq)
cpt = app.CheckpointReporter(cpt_fname, xyz_freq)
state = app.StateDataReporter(log_fname, log_freq,
step=True,
potentialEnergy=True,
kineticEnergy=True,
temperature=True,
progress=True,
remainingTime=True,
totalSteps=n_total_steps,
speed=True,
separator='\t')
simulation.reporters.append(dcd)
simulation.reporters.append(cpt)
simulation.reporters.append(state)
logging.info(f'Writing coordinates to \'{dcd_fname}\'')
logging.info(f'Writing checkpoint file to \'{cpt_fname}\'')
logging.info(f'Writing simulation log to \'{log_fname}\'')
if cmd.gentle: # for tricky systems
cmd.ladder_step_temperature = 10
cmd.ladder_num_steps = 2000
# Run simulation
counter = 0
cur_temp = 0
while 1:
counter += 1
cur_temp += cmd.ladder_step_temperature
cur_temp = min(cmd.temperature, cur_temp)
logging.info(f'Stage {counter}/{n_stages}: heating system to {cur_temp}K')
simulation.integrator.setTemperature(cur_temp * units.kelvin)
simulation.step(cmd.ladder_num_steps)
if cur_temp >= cmd.temperature:
break
# Write state file (without restraining forces)
xml_fname = _utils.make_fname(rootname + '.xml')
logging.info(f'Writing state file to \'{xml_fname}\'')
system = simulation.system
n_rest_forces = 1
if cmd.membrane:
n_rest_forces += 1
while n_rest_forces:
system.removeForce(system.getNumForces() - 1)
n_rest_forces -= 1
# Reinitialize context. Keep velocities, positions.
state = simulation.context.getState(getPositions=True, getVelocities=True)
xyz, vel = state.getPositions(), state.getVelocities()
simulation.context.reinitialize(preserveState=False)
simulation.context.setPositions(xyz)
simulation.context.setVelocities(vel)
simulation.saveState(xml_fname)
# Write last frame as mmCIF
cif_fname = _utils.make_fname(rootname + '.cif')
logging.info(f'Writing final structure to \'{cif_fname}\'')
with open(cif_fname, 'w') as handle:
app.PDBxFile.writeFile(structure.topology, xyz, handle, keepIds=True)
# Write system without dummy atoms
# Easier to redo system object
# and set positions/velocities manually.
model = app.Modeller(structure.topology, structure.positions)
dummy = [c for c in model.topology.chains() if c.id.startswith('DUM')]
model.delete(dummy) # delete entire chains
n_ini_atoms = model.topology.getNumAtoms()
logging.info('Writing system without dummy (restraint) atoms')
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME,
nonbondedCutoff=md_nbct,
constraints=md_cstr,
hydrogenMass=md_hamu,
ewaldErrorTolerance=0.0005,
rigidWater=True)
integrator = mm.LangevinIntegrator(md_temp, md_fric, md_step)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(xyz[:n_ini_atoms])
simulation.context.setVelocities(vel[:n_ini_atoms])
xml_fname = _utils.make_fname(rootname + '_noDUM' + '.xml')
logging.info(f'Writing dummy-less state to \'{xml_fname}\'')
simulation.saveState(xml_fname)
# Write last frame as mmCIF
cif_fname = _utils.make_fname(rootname + '_noDUM' + '.cif')
logging.info(f'Writing dummy-less structure to \'{cif_fname}\'')
with open(cif_fname, 'w') as handle:
app.PDBxFile.writeFile(model.topology, xyz[:n_ini_atoms], handle, keepIds=True)
logging.info('Finished')
|
csblab/md_scripts
|
openmm/amberff/heatSystem.py
|
Python
|
mit
| 10,453
|
[
"Amber",
"OpenMM"
] |
79f2739d05fb12369648f25f253a8dcf15e383001a3accb5d6ee9163ea4f0668
|
#!/usr/bin/env python
import numpy as np
from gpaw.mpi import world
from gpaw.blacs import BlacsGrid, Redistributor
from gpaw.utilities import compiled_with_sl
def test(comm, M, N, mcpus, ncpus, mb, nb):
grid0 = BlacsGrid(comm, 1, 1)
desc0 = grid0.new_descriptor(M, N, M, N, 0, 0)
A_mn = desc0.zeros(dtype=float)
A_mn[:] = comm.size + 1
grid1 = BlacsGrid(comm, mcpus, ncpus)
desc1 = grid1.new_descriptor(M, N, mb, nb, 0, 0) # ???
B_mn = desc1.zeros(dtype=float)
B_mn[:] = comm.rank
if comm.rank == 0:
msg = 'Slices of global matrix indices by rank'
print msg
print '-' * len(msg)
for rank in range(comm.size):
comm.barrier()
if rank == comm.rank:
print 'Rank %d:' % rank
last_Mstart = -1
for Mstart, Mstop, Nstart, Nstop, block in desc1.my_blocks(B_mn):
if Mstart > last_Mstart and last_Mstart >= 0:
print
print '[%3d:%3d, %3d:%3d]' % (Mstart, Mstop, Nstart, Nstop),
last_Mstart = Mstart
assert (block == comm.rank).all()
#print block
#print
print
print
comm.barrier()
redistributor = Redistributor(comm, desc1, desc0)
redistributor.redistribute(B_mn, A_mn)
if comm.rank == 0:
msg = 'Rank where each element of the global matrix is stored'
print msg
print '-' * len(msg)
print A_mn
if __name__ in ['__main__', '__builtin__']:
if not compiled_with_sl():
print('Not built with ScaLAPACK. Test does not apply.')
else:
M, N = 10, 10
mb, nb = 2, 2
mcpus = int(np.ceil(world.size**0.5))
ncpus = world.size // mcpus
if world.rank == 0:
print 'world size: ', world.size
print 'M x N: ', M, 'x', N
print 'mcpus x ncpus:', mcpus, 'x', ncpus
print 'mb x nb: ', mb, 'x', nb
print
test(world, M, N, mcpus, ncpus, mb, nb)
|
ajylee/gpaw-rtxs
|
gpaw/test/parallel/blacsdist.py
|
Python
|
gpl-3.0
| 2,069
|
[
"GPAW"
] |
084a7e7c7540a8607af2a16057e1f72bc497c7a0d847a367cd9612e20abea20f
|
from flask import Flask, jsonify, render_template, request
import numpy as np
import nest
import os
import json
import anyjson
app = Flask(__name__)
def prep(data):
return map(lambda x: {'x':x[0], 'y':x[1]}, zip(*data))
@app.template_filter('neurons')
def neurons_filter(nodes):
return filter(lambda node: 'generator' not in node, nodes)
@app.template_filter('inputs')
def inputs_filter(nodes):
return filter(lambda node: 'generator' in node, nodes)
@app.template_filter('stringify')
def stringify_filter(s):
return s.replace('_',' ')
@app.route('/')
@app.route('/<count>')
def main(count=1):
nest.ResetKernel()
with open('settings/simulation.json') as data_file:
sims = anyjson.loads(''.join(data_file.readlines()))
# nodes = nest.Models('nodes')
with open('settings/models.json') as data_file:
PARAMS = json.load(data_file)
nodes = PARAMS.keys()
return render_template('multiple_neurons_sd.html', nodes=nodes, sims=sims, count=count)
@app.route('/parameters/', methods=['GET'])
def parameters():
model = request.values['model']
preset = request.values.get('preset', 'default')
with open('settings/models.json') as data_file:
PARAMS = json.load(data_file)
if model in PARAMS:
params = dict(zip(PARAMS[model],nest.GetDefaults(model, PARAMS[model])))
else:
params = nest.GetDefaults(model)
presets_model = map(lambda x: os.path.splitext(x)[0], os.listdir('parameters'))
presets = []
if model in presets_model:
with open('parameters/%s.json' %model) as data_file:
data = json.load(data_file)
presets = map(lambda x: x['label'], data)
data = filter(lambda x: x['label']==preset, data)
if data:
params.update(data[0]['params'])
return jsonify(params=params, presets=presets)
@app.route('/init/', methods=['POST'])
def init():
values = request.get_json()
values['neuron']['params'] = dict(zip(values['neuron']['params'].keys(), map(lambda x: float(x), values['neuron']['params'].values())))
values['input']['params'] = dict(zip(values['input']['params'].keys(), map(lambda x: float(x), values['input']['params'].values())))
nest.ResetKernel()
global neuron
neuron = nest.Create(values['neuron']['model'], values['neuron']['count'], params=values['neuron']['params'])
global input
input = nest.Create(values['input']['model'], params=values['input']['params'])
nest.Connect(input, neuron, 'all_to_all')
global sd
sd = nest.Create('spike_detector')
nest.Connect(neuron, sd, 'all_to_all')
nest.Simulate(1000.)
events = nest.GetStatus(sd,'events')[0]
nest.SetStatus(sd, {'n_events': 0})
if events:
data = prep([events['times'],events['senders']])
else:
data = []
return jsonify(data=prep([events['times'],events['senders']]), values=values, curtime=nest.GetStatus([0],'time'))
@app.route('/simulate/', methods=['POST'])
def simulate():
values = request.get_json()
values['input']['params'] = dict(zip(values['input']['params'].keys(), map(lambda x: float(x), values['input']['params'].values())))
values['neuron']['params'] = dict(zip(values['neuron']['params'].keys(), map(lambda x: float(x), values['neuron']['params'].values())))
nest.SetStatus(input, values['input']['params'])
nest.SetStatus(neuron, values['neuron']['params'])
nest.Simulate(1.)
events = nest.GetStatus(sd,'events')[0]
nest.SetStatus(sd, {'n_events': 0})
if events:
data = prep([events['times'],events['senders']])
else:
data = []
return jsonify(data=prep([events['times'],events['senders']]), values=values, curtime=nest.GetStatus([0],'time'))
if __name__ == '__main__':
app.run()
|
babsey/nest-webapp
|
multiple_neurons_sd.py
|
Python
|
mit
| 3,814
|
[
"NEURON"
] |
ed1557490df599e488d3109e24c57273c708aaabde89a3784917df2e450ee17f
|
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import abc
import collections
import errno
import filecmp
import itertools
import os
import subprocess
import sys
import mx
import mx_compdb
import mx_subst
_target_jdk = None
"""JDK for which native projects should be built."""
def _get_target_jdk():
global _target_jdk
if not _target_jdk:
_target_jdk = mx.get_jdk(tag=mx.DEFAULT_JDK_TAG)
return _target_jdk
# Support for conditional compilation based on the JDK version.
mx_subst.results_substitutions.register_no_arg('jdk_ver', lambda: str(_get_target_jdk().javaCompliance.value))
class lazy_default(object): # pylint: disable=invalid-name
def __init__(self, init):
self.init = init
def __get__(self, instance, owner):
if instance is None:
return self
return vars(instance).setdefault(self.init.__name__, self.init(instance))
class lazy_class_default(object): # pylint: disable=invalid-name
def __init__(self, init):
self.init = init
def __get__(self, instance, owner):
try:
return vars(self)[self.init.__name__]
except KeyError:
return vars(self).setdefault(self.init.__name__, self.init(owner))
class _Toolchain(object):
def __init__(self, target_arch):
self.target_arch = target_arch
@property
def target(self):
return '{}-{}'.format(mx.get_os(), self.target_arch)
@property
def is_native(self):
return self.target_arch == mx.get_arch()
@property
def is_available(self):
return self.is_native
_registry = {}
@classmethod
def for_(cls, target_arch):
return cls._registry.setdefault(target_arch, _Toolchain(target_arch))
class Ninja(object):
"""Encapsulates access to Ninja (ninja).
Abstracts the operations of the Ninja build system that are necessary for
the NinjaBuildTask to build a NinjaProject.
"""
binary = 'ninja'
default_manifest = 'build.ninja'
def __init__(self, build_dir, parallelism, targets=None):
self.build_dir = build_dir
self.parallelism = str(parallelism)
self.targets = targets or []
def needs_build(self):
out = mx.LinesOutputCapture()
details = mx.LinesOutputCapture()
self._run('-n', '-d', 'explain', *self.targets, out=out, err=details)
if details.lines:
return True, [l for l in details.lines if l.startswith('ninja explain:')][0]
else:
assert out.lines == ['ninja: no work to do.']
return False, out.lines[0]
def compdb(self, out):
self._run('-t', 'compdb', *self.targets, out=out)
def build(self):
self._run(*self.targets)
def clean(self):
self._run('-t', 'clean', *self.targets)
def _run(self, *args, **kwargs):
cmd = [self.binary, '-j', self.parallelism]
mx_verbose_env = mx.get_env('MX_VERBOSE', None)
if mx.get_opts().very_verbose or mx_verbose_env:
cmd += ['-v']
cmd += args
out = kwargs.get('out', mx.OutputCapture())
err = kwargs.get('err', subprocess.STDOUT)
verbose = mx.get_opts().verbose or mx_verbose_env
if verbose:
if callable(out) and '-n' not in args:
out = mx.TeeOutputCapture(out)
if callable(err):
err = mx.TeeOutputCapture(err)
try:
rc = mx.run(cmd, nonZeroIsFatal=False, out=out, err=err, cwd=self.build_dir)
except OSError as e:
if e.errno != errno.EACCES:
mx.abort('Error executing \'{}\': {}'.format(' '.join(cmd), str(e)))
mx.logv('{} is not executable. Trying to change permissions...'.format(self.binary))
os.chmod(self.binary, 0o755)
self._run(*args, **kwargs) # retry
else:
not rc or mx.abort(rc if verbose else (out, err)) # pylint: disable=expression-not-assigned
class NativeDependency(mx.Dependency):
"""A Dependency that can be included and linked in when building native projects.
Attributes
include_dirs : iterable of str
Directories with headers provided by this dependency.
libs : iterable of str
Libraries provided by this dependency.
"""
include_dirs = ()
libs = ()
class MultiarchProject(mx.AbstractNativeProject, NativeDependency):
"""A Project containing native code that can be built for multiple target architectures.
Attributes
multiarch : list of str, optional
Target architectures for which this project can be built (must include
the host architecture).
If present, the archivable results for each target architecture are in
a separate subdir of the archive. Otherwise, the archivable results for
the host architecture are at the root of the archive.
"""
def __init__(self, suite, name, subDir, srcDirs, deps, workingSets, d, **kwargs):
context = 'project ' + name
if 'multiarch' in kwargs:
multiarch = mx.Suite._pop_list(kwargs, 'multiarch', context)
self.multiarch = list(set(mx_subst.results_substitutions.substitute(arch) for arch in multiarch))
if mx.get_arch() not in self.multiarch:
mx.abort('"multiarch" must contain the host architecture "{}"'.format(mx.get_arch()), context)
else:
self.multiarch = []
super(MultiarchProject, self).__init__(suite, name, subDir, srcDirs, deps, workingSets, d, **kwargs)
self.out_dir = self.get_output_root()
@property
def _use_multiarch(self):
return self.multiarch and mx.get_opts().multiarch
def getBuildTask(self, args):
if self._use_multiarch:
class MultiarchBuildTask(mx.Buildable, mx.TaskSequence):
subtasks = [self._build_task(target_arch, args) for target_arch in self.multiarch]
def execute(self):
super(MultiarchBuildTask, self).execute()
self.built = any(t.built for t in self.subtasks)
def newestOutput(self):
return mx.TimeStampFile.newest(t.newestOutput() for t in self.subtasks)
return MultiarchBuildTask(self, args)
else:
return self._build_task(mx.get_arch(), args)
@abc.abstractmethod
def _build_task(self, target_arch, args):
""":rtype: TargetArchBuildTask"""
def getArchivableResults(self, use_relpath=True, single=False):
for target_arch in self.multiarch if self._use_multiarch else [mx.get_arch()]:
toolchain = _Toolchain.for_(target_arch)
target_arch_path = toolchain.target if self.multiarch else ''
if toolchain.is_native or not single:
for file_path, archive_path in self._archivable_results(target_arch, use_relpath, single):
yield file_path, mx.join(target_arch_path, archive_path)
@abc.abstractmethod
def _archivable_results(self, target_arch, use_relpath, single):
""":rtype: typing.Iterable[(str, str)]"""
class TargetArchBuildTask(mx.AbstractNativeBuildTask):
def __init__(self, args, project, target_arch):
self.target_arch = target_arch
super(TargetArchBuildTask, self).__init__(args, project)
self.out_dir = mx.join(self.subject.out_dir, self.target_arch)
@property
def name(self):
return '{}_{}'.format(super(TargetArchBuildTask, self).name, self.target_arch)
def buildForbidden(self):
forbidden = super(TargetArchBuildTask, self).buildForbidden()
if not forbidden and not _Toolchain.for_(self.target_arch).is_available:
self.subject.abort('Missing toolchain for {}.'.format(self.target_arch))
return forbidden
class NinjaProject(MultiarchProject):
"""A MultiarchProject that is built using the Ninja build system.
What distinguishes Ninja from other build systems is that its input files are
not meant to be written by hand. Instead, they should be generated, which in
this case is the responsibility of the NinjaProject subclasses.
Subclasses are expected to generate an appropriate build manifest using the
NinjaManifestGenerator.
Attributes
cflags : list of str, optional
Flags used during compilation step.
ldflags : list of str, optional
Flags used during linking step.
ldlibs : list of str, optional
Flags used during linking step.
use_jdk_headers : bool, optional
Whether to add directories with JDK headers to the list of directories
searched for header files. Default is False.
"""
def __init__(self, suite, name, subDir, srcDirs, deps, workingSets, d, **kwargs):
context = 'project ' + name
self._cflags = mx.Suite._pop_list(kwargs, 'cflags', context)
self._ldflags = mx.Suite._pop_list(kwargs, 'ldflags', context)
self._ldlibs = mx.Suite._pop_list(kwargs, 'ldlibs', context)
self.use_jdk_headers = kwargs.pop('use_jdk_headers', False)
super(NinjaProject, self).__init__(suite, name, subDir, srcDirs, deps, workingSets, d, **kwargs)
def isJDKDependent(self):
"""Returns whether this NinjaProject is JDK dependent.
A NinjaProject is considered to be JDK dependent if it uses JDK headers
or `<jdk_ver>` substitution in its `cflags` (presumably for conditional
compilation).
"""
return self.use_jdk_headers or any('<jdk_ver>' in f for f in self._cflags)
def resolveDeps(self):
super(NinjaProject, self).resolveDeps()
self.buildDependencies += self._ninja_deps
if self.use_jdk_headers or self.suite.getMxCompatibility().is_using_jdk_headers_implicitly(self):
self.buildDependencies += [self._jdk_dep]
@lazy_class_default
def _ninja_deps(cls): # pylint: disable=no-self-argument
deps = []
try:
subprocess.check_output(['ninja', '--version'], stderr=subprocess.STDOUT)
except OSError:
dep = mx.library('NINJA', False)
if dep:
deps.append(dep)
Ninja.binary = mx.join(dep.get_path(False), 'ninja')
else:
# necessary until GR-13214 is resolved
mx.warn('Make `ninja` binary available via PATH to build native projects.')
try:
import ninja_syntax # pylint: disable=unused-variable, unused-import
except ImportError:
dep = mx.library('NINJA_SYNTAX')
deps.append(dep)
module_path = mx.join(dep.get_path(False), 'ninja_syntax-{}'.format(dep.version))
mx.ensure_dir_exists(module_path) # otherwise, import machinery will ignore it
sys.path.append(module_path)
return deps
@lazy_class_default
def _jdk_dep(cls): # pylint: disable=no-self-argument
class JavaHome(NativeDependency):
def __init__(self):
super(JavaHome, self).__init__(mx.suite('mx'), 'JAVA_HOME', None)
self.include_dirs = None
def getBuildTask(self, args):
# Ensure that the name is set correctly now that JAVA_HOME is definitely configured
if not self.include_dirs:
jdk = _get_target_jdk()
self.name = 'JAVA_HOME=' + jdk.home
self.include_dirs = jdk.include_dirs
return mx.NoOpTask(self, args)
def _walk_deps_visit_edges(self, *args, **kwargs):
pass
return JavaHome()
def _build_task(self, target_arch, args):
return NinjaBuildTask(args, self, target_arch)
@abc.abstractmethod
def generate_manifest(self, path):
"""Generates a Ninja manifest used to build this project."""
@property
def cflags(self):
return self._cflags
@property
def ldflags(self):
return self._ldflags
@property
def ldlibs(self):
return self._ldlibs
@property
def source_tree(self):
return self._source['tree']
@lazy_default
def _source(self):
source_tree = []
source_files = collections.defaultdict(list)
for source_dir in self.source_dirs():
for root, _, files in os.walk(source_dir):
rel_root = os.path.relpath(root, self.dir)
source_tree.append(rel_root)
# group files by extension
grouping = collections.defaultdict(list)
for f in files:
grouping[os.path.splitext(f)[1]].append(mx.join(rel_root, f))
for ext in grouping.keys():
source_files[ext] += grouping[ext]
return dict(tree=source_tree, files=source_files)
class NinjaBuildTask(TargetArchBuildTask):
default_parallelism = 1
"""
By default, we disable parallelism per project for the following reasons:
#. It allows mx to build whole projects in parallel, which works well for
smallish projects like ours.
#. It is a safe default in terms of compatibility. Although projects may
explicitly request greater parallelism, that may not work out of the
box. In particular, the parallelization of debug builds on Windows may
require special consideration.
"""
def __init__(self, args, project, target_arch=mx.get_arch(), ninja_targets=None):
super(NinjaBuildTask, self).__init__(args, project, target_arch)
self._reason = None
self._manifest = mx.join(self.out_dir, Ninja.default_manifest)
self.ninja = Ninja(self.out_dir, self.parallelism, targets=ninja_targets)
def __str__(self):
return 'Building {} with Ninja'.format(self.name)
def needsBuild(self, newestInput):
is_needed, self._reason = super(NinjaBuildTask, self).needsBuild(newestInput)
if is_needed:
return True, self._reason
if not mx.exists(self._manifest):
self._reason = 'no build manifest'
return True, self._reason
mx.logv('Checking whether to build {} with Ninja...'.format(self.name))
is_needed, self._reason = self.ninja.needs_build()
return is_needed, self._reason
def newestOutput(self):
return mx.TimeStampFile.newest([mx.join(self.out_dir, self.subject._target)])
def build(self):
if not mx.exists(self._manifest) \
or self._reason is None \
or mx.basename(self._manifest) in self._reason \
or 'phony' in self._reason:
with mx.SafeFileCreation(self._manifest) as sfc:
self.subject.generate_manifest(sfc.tmpPath)
if mx.exists(self._manifest) \
and not filecmp.cmp(self._manifest, sfc.tmpPath, shallow=False):
self.ninja.clean()
with mx_compdb.CompdbCapture(self.subject.suite) as out:
if out:
self.ninja.compdb(out=out)
self.ninja.build()
def clean(self, forBuild=False):
if not forBuild:
try:
mx.rmtree(self.out_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class NinjaManifestGenerator(object):
"""Abstracts the writing of the Ninja build manifest.
Essentially, this is a wrapper around the `ninja_syntax.Writer` with several
methods added to make it easier to write a NinjaProject build manifest.
For more details about Ninja, see https://ninja-build.org/manual.html.
"""
def __init__(self, project, output):
import ninja_syntax
self.project = project
self.n = ninja_syntax.Writer(output) # pylint: disable=invalid-name
self._generate()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def newline(self):
self.n.newline()
def comment(self, text):
self.n.comment(text)
def variables(self, **kwargs):
for key, value in kwargs.items():
self.n.variable(key, value)
self.newline()
def include_dirs(self, dirs):
def quote(path):
has_spaces = ' ' in path or ('$project' in path and ' ' in self.project.dir)
return '"{}"'.format(path) if has_spaces else path
self.variables(includes=['-I' + quote(self._resolve(d)) for d in dirs])
def include(self, path):
import ninja_syntax
self.n.include(ninja_syntax.escape_path(path))
def cc(self, source_file): # pylint: disable=invalid-name
return self.n.build(self._output(source_file), 'cc', self._resolve(source_file))[0]
def cxx(self, source_file):
return self.n.build(self._output(source_file), 'cxx', self._resolve(source_file))[0]
def asm(self, source_file):
asm_source = self._resolve(source_file)
if getattr(self.project.toolchain, 'asm_requires_cpp', False):
asm_source = self.n.build(self._output(source_file, '.asm'), 'cpp', asm_source)
return self.n.build(self._output(source_file), 'asm', asm_source)[0]
def ar(self, archive, members): # pylint: disable=invalid-name
return self.n.build(archive, 'ar', members)[0]
def link(self, program, files):
return self.n.build(program, 'link', files)[0]
def linkxx(self, program, files):
return self.n.build(program, 'linkxx', files)[0]
def close(self):
self.n.close()
@staticmethod
def _output(source_file, ext=None):
if ext is None:
ext = '.obj' if mx.is_windows() else '.o'
return os.path.splitext(source_file)[0] + ext
@staticmethod
def _resolve(path):
return mx.join('$project', path)
def _generate(self):
self.comment('Generated by mx. Do not edit.')
self.newline()
self.variables(ninja_required_version='1.3')
self.comment('Directories')
self.variables(project=self.project.dir)
self._generate_mx_interface()
def _generate_mx_interface(self):
def phony(target):
return self.n.build(self._resolve(target), 'phony')[0]
self.comment('Manifest dependencies')
deps = [phony(d) for d in self.project.source_tree]
deps += [self.project.suite.suite_py()]
self.newline()
self.comment('Used by mx to check...')
self.n.rule('dry_run',
command='DRY_RUN $out',
generator=True)
self.newline()
self.comment('...whether manifest needs to be regenerated')
self.n.build(Ninja.default_manifest, 'dry_run', implicit=deps)
self.newline()
class DefaultNativeProject(NinjaProject):
"""A NinjaProject that makes many assumptions when generating a build manifest.
It is assumed that:
#. Directory layout is fixed:
- `include` is a flat subdir containing public headers, and
- `src` subdir contains sources and private headers.
#. There is only one deliverable:
- Kind is the value of the `native` attribute, and
- Name is the value of the `deliverable` attribute if it is specified,
otherwise it is derived from the `name` of the project.
#. All source files are supported and necessary to build the deliverable.
#. All `include_dirs` and `libs` provided by build dependencies are necessary
to build the deliverable.
#. The deliverable and the public headers are intended for distribution.
Attributes
native : {'static_lib', 'shared_lib'}
Kind of the deliverable.
Depending on the value, the necessary flags will be prepended to `cflags`
and `ldflags` automatically.
deliverable : str, optional
Name of the deliverable. By default, it is derived from the `name` of the
project.
"""
include = 'include'
src = 'src'
_kinds = dict(
static_lib=dict(
target=lambda name: mx.add_lib_prefix(name) + ('.lib' if mx.is_windows() else '.a'),
),
shared_lib=dict(
target=lambda name: mx.add_lib_suffix(mx.add_lib_prefix(name)),
),
executable=dict(
target=mx.exe_suffix,
),
)
def __init__(self, suite, name, subDir, srcDirs, deps, workingSets, d, kind, **kwargs):
self.deliverable = kwargs.pop('deliverable', name.split('.')[-1])
self.toolchain = kwargs.pop('toolchain', 'mx:DEFAULT_NINJA_TOOLCHAIN')
if srcDirs:
raise mx.abort('"sourceDirs" is not supported for default native projects')
srcDirs += [self.include, self.src]
super(DefaultNativeProject, self).__init__(suite, name, subDir, srcDirs, deps, workingSets, d, **kwargs)
try:
self._kind = self._kinds[kind]
except KeyError:
raise mx.abort('"native" should be one of {}, but "{}" is given'.format(list(self._kinds.keys()), kind))
include_dir = mx.join(self.dir, self.include)
if next(os.walk(include_dir))[1]:
raise mx.abort('include directory must have a flat structure')
self.include_dirs = [include_dir]
if kind == 'static_lib':
self.libs = [mx.join(self.out_dir, mx.get_arch(), self._target)]
self.buildDependencies.append(self.toolchain)
def resolveDeps(self):
super(DefaultNativeProject, self).resolveDeps()
self.toolchain = mx.distribution(self.toolchain, context=self)
if not isinstance(self.toolchain, mx.AbstractDistribution) or not self.toolchain.get_output():
raise mx.abort("Cannot generate manifest: the specified toolchain ({}) must be an AbstractDistribution that returns a value for get_output".format(self.toolchain), context=self)
@property
def _target(self):
return self._kind['target'](self.deliverable)
@property
def cflags(self):
default_cflags = []
if self._kind == self._kinds['shared_lib']:
default_cflags += dict(
windows=['-MD'],
).get(mx.get_os(), ['-fPIC'])
if mx.is_linux() or mx.is_darwin():
# Do not leak host paths via dwarf debuginfo
def add_debug_prefix(prefix_dir):
def quote(path):
return '"{}"'.format(path) if ' ' in path else path
return '-fdebug-prefix-map={}={}'.format(quote(prefix_dir), quote(mx.basename(prefix_dir)))
default_cflags += [add_debug_prefix(self.suite.vc_dir)]
default_cflags += [add_debug_prefix(_get_target_jdk().home)]
default_cflags += ['-gno-record-gcc-switches']
return default_cflags + super(DefaultNativeProject, self).cflags
@property
def ldflags(self):
default_ldflags = []
if self._kind == self._kinds['shared_lib']:
default_ldflags += dict(
darwin=['-dynamiclib', '-undefined', 'dynamic_lookup'],
windows=['-dll'],
).get(mx.get_os(), ['-shared', '-fPIC'])
return default_ldflags + super(DefaultNativeProject, self).ldflags
@property
def h_files(self):
return self._source['files'].get('.h', [])
@property
def c_files(self):
return self._source['files'].get('.c', [])
@property
def cxx_files(self):
return self._source['files'].get('.cc', [])
@property
def asm_sources(self):
return self._source['files'].get('.S', [])
def generate_manifest(self, path):
unsupported_source_files = list(set(self._source['files'].keys()) - {'.h', '.c', '.cc', '.S'})
if unsupported_source_files:
mx.abort('{} source files are not supported by default native projects'.format(unsupported_source_files))
with NinjaManifestGenerator(self, open(path, 'w')) as gen:
gen.comment("Toolchain configuration")
gen.include(mx.join(self.toolchain.get_output(), 'toolchain.ninja'))
gen.newline()
gen.variables(cflags=[mx_subst.path_substitutions.substitute(cflag) for cflag in self.cflags])
if self._kind != self._kinds['static_lib']:
gen.variables(
ldflags=[mx_subst.path_substitutions.substitute(ldflag) for ldflag in self.ldflags],
ldlibs=self.ldlibs,
)
gen.include_dirs(collections.OrderedDict.fromkeys(
# remove the duplicates while maintaining the ordering
[mx.dirname(h_file) for h_file in self.h_files] + list(itertools.chain.from_iterable(
getattr(d, 'include_dirs', []) for d in self.buildDependencies))
).keys())
gen.comment('Compiled project sources')
object_files = [gen.cc(f) for f in self.c_files]
gen.newline()
object_files += [gen.cxx(f) for f in self.cxx_files]
gen.newline()
object_files += [gen.asm(f) for f in self.asm_sources]
gen.newline()
gen.comment('Project deliverable')
if self._kind == self._kinds['static_lib']:
gen.ar(self._target, object_files)
else:
link = gen.linkxx if self.cxx_files else gen.link
dep_libs = list(itertools.chain.from_iterable(getattr(d, 'libs', []) for d in self.buildDependencies))
link(self._target, object_files + dep_libs)
def _archivable_results(self, target_arch, use_relpath, single):
def result(base_dir, file_path):
assert not mx.isabs(file_path)
archive_path = file_path if use_relpath else mx.basename(file_path)
return mx.join(base_dir, file_path), archive_path
yield result(mx.join(self.out_dir, target_arch), self._target)
if not single:
for header in os.listdir(mx.join(self.dir, self.include)):
yield result(self.dir, mx.join(self.include, header))
|
graalvm/mx
|
mx_native.py
|
Python
|
gpl-2.0
| 27,500
|
[
"VisIt"
] |
5475b8977bec7b64bffe933f769c48c6c6e5087dbc0225c5cb77ce54df97704e
|
#!/usr/bin/env python
"""
This code demonstrates some of the features of authkit.authorize.
Start the server with::
python authorize.py
Then visit http://localhost:8080/ and you should see the output from the
``index()`` method which invites you to try some of the links.
Each method linked to is implemented using a different means of checking
the permission.
In the ``__call__`` method, the code which implements the permission
attribute checking also demonstrates the use of authorize ``middleware``.
If you sign in with a user other than ``james``, you will be signed in
but denied access to the resources.
Close your browser to clear the HTTP authentication cache and try the
example again.
"""
from authkit.permissions import UserIn
from authkit.authorize import authorized, authorize, PermissionError
from authkit.authorize import middleware as authorize_middleware
from paste import httpexceptions
class NoSuchActionError(httpexceptions.HTTPNotFound):
pass
class AuthorizeExampleApp:
def __call__(self, environ, start_response):
if environ['PATH_INFO'] == '/':
method = 'index'
else:
method = environ['PATH_INFO'].split('/')[1]
if not hasattr(self, method):
raise NoSuchActionError('No such method')
app = getattr(self,method)
# This facilitates an alternative way you might want to check permisisons
# rather than using an authorize() decorator
if hasattr(app, 'permission'):
app = authorize_middleware(app, app.permission)
return app(environ, start_response)
def index(self, environ, start_response):
start_response('200 OK', [('Content-type','text/html')])
return ['''
<html>
<head>
<title>AuthKit Authorize Example</title>
</head>
<body>
<h1>Authorize Example</h1>
<p>Try the following links. You should only be able to sign
in as user <tt>james</tt> with the password the same as the
username.</p>
<ul>
<li><a href="/mid_method_test">Mid Method</a></li>
<li><a href="/decorator_test">Decorator</a></li>
<li><a href="/attribute_test">Attribute (middleware)</a></li>
</ul>
<p>Once you have signed in you will need to close your
browser to clear the authentication cache.</p>
</body>
</html>
''']
def mid_method_test(self, environ, start_response):
"""Authorize using a mid-method permissions check"""
if authorized(environ, UserIn(users=['james'])):
start_response('200 OK', [('Content-type','text/html')])
return ['Access granted to /mid_method_test']
else:
start_response('200 OK', [('Content-type','text/html')])
return ['User is not authorized']
@authorize(UserIn(users=['james']))
def decorator_test(self, environ, start_response):
"""Authorize using a decorator"""
start_response('200 OK', [('Content-type','text/html')])
return ['Access granted to /decorator_test']
def attribute_test(self, environ, start_response):
"""Authorize using a permission attribute"""
start_response('200 OK', [('Content-type','text/html')])
return ['Access granted to /attribute_test']
attribute_test.permission = UserIn(users=['james'])
if __name__ == '__main__':
from paste.httpserver import serve
from authkit.authenticate import middleware
def valid(environ, username, password):
"""
Sample, very insecure validation function
"""
return username == password
app = httpexceptions.make_middleware(AuthorizeExampleApp())
app = middleware(
app,
setup_method='basic',
basic_realm='Test Realm',
basic_authenticate_function=valid
)
print """
Clear the HTTP authentication first by closing your browser if you have been
testing other basic authentication examples on the same port.
You will be able to sign in as any user as long as the password is the same as
the username, but all users apart from `james' will be denied access to the
resources.
"""
serve(app, host='0.0.0.0', port=8080)
|
nakato/AuthKit
|
examples/authorize.py
|
Python
|
mit
| 4,360
|
[
"VisIt"
] |
487f3d793ba97502509780a0c76d08ca5f1f653a3f05b48afe0e5f06dd094186
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
# pylint: disable=invalid-name
import os.path
import warnings
from unittest import mock
import pytest
import numpy as np
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import (
SimplexLSQFitter, SLSQPLSQFitter, LinearLSQFitter, LevMarLSQFitter,
JointFitter, Fitter, FittingWithOutlierRemoval)
from astropy.utils import NumpyRNGContext
from astropy.utils.data import get_pkg_data_filename
from astropy.stats import sigma_clip
from astropy.utils.exceptions import AstropyUserWarning
from astropy.modeling.fitting import populate_entry_points
from . import irafutil
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
from pkg_resources import EntryPoint
HAS_PKG = True
except ImportError:
HAS_PKG = False
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomail fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_polynomial2D_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
nlfitter = LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
new_model = nlfitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = nlfitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting_with_weights(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
weights = np.ones_like(self.y)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = nlfitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
with pytest.raises(ValueError) as excinfo:
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
_ = fitter(init_model_comp, x, y)
assert "Model must be simple, not compound" in str(excinfo.value)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
def test_estimated_vs_analytic_deriv(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_estimated_vs_analytic_deriv_with_weights(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_with_optimize(self):
"""
Tests results from `LevMarLSQFitter` against `scipy.optimize.leastsq`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
def test_with_weights(self):
"""
Tests results from `LevMarLSQFitter` with weights.
"""
# part 1: weights are equal to 1
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:.* Maximum number of iterations reached')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, clipping to bounds')
@pytest.mark.parametrize('fitter_class', fitters)
def test_fitter_against_LevMar(self, fitter_class):
"""Tests results from non-linear fitters against `LevMarLSQFitter`."""
levmar = LevMarLSQFitter()
fitter = fitter_class()
# This emits a warning from fitter that we need to ignore with
# pytest.mark.filterwarnings above.
new_model = fitter(self.gauss, self.xdata, self.ydata)
model = levmar(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, clipping to bounds')
def test_LSQ_SLSQP_with_constraints(self):
"""
Runs `LevMarLSQFitter` and `SLSQPLSQFitter` on a model with
constraints.
"""
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fitter = LevMarLSQFitter()
fslsqp = SLSQPLSQFitter()
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
def test_param_cov(self):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.vstack([x, np.ones(len(x))]).T
beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)
s2 = (np.sum((y - np.matmul(X, beta).ravel())**2) /
(len(y) - len(beta)))
olscov = np.linalg.inv(np.matmul(X.T, X)) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
fitter = LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
@pytest.mark.skipif('not HAS_PKG')
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def setup_class(self):
self.exception_not_thrown = Exception("The test should not have gotten here. There was no exception thrown")
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
populate_entry_points([mock_entry_importerror])
except AstropyUserWarning as w:
if "ImportError" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_func(self):
"""This returns a function which fails the type check"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
populate_entry_points([mock_entry_badfunc])
except AstropyUserWarning as w:
if "Class" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
populate_entry_points([mock_entry_badclass])
except AstropyUserWarning as w:
if 'modeling.Fitter' in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, clipping to bounds')
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
self.y += (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, atol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, clipping to bounds')
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
self.z += (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3,
sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3, 3:5, 0] = 100. # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0, 0] = 1000.0 # outlier
self.z[0, 1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x1d, self.z1d)
assert((~mask).sum() == self.z1d.size - 2)
assert(mask[0] and mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_1d_set_with_weights_with_sigma_clip(self):
"""1D model set with separate weights"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
weights = np.array([self.weights1d, self.weights1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=weights)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x, self.y, self.z)
assert((~mask).sum() == self.z.size - 2)
assert(mask[0, 0] and mask[0, 1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
def test_2d_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LevMarLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_linear_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because
weights does not propagate"""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit, filtered = fitter(model, self.x, self.y, self.z,
weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
def test_2d_linear_with_weights_with_sigma_clip(self):
"""same as test above with a linear fitter."""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x, self.y, self.z,
weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_with_weights():
"""Issue #5737 """
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LevMarLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
def test_fitters_interface():
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
levmar = LevMarLSQFitter()
slsqp = SLSQPLSQFitter()
simplex = SimplexLSQFitter()
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
simplex_kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6}
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
_ = slsqp(model, x, y, **kwargs)
_ = simplex(model, x, y, **simplex_kwargs)
kwargs.pop('verblevel')
_ = levmar(model, x, y, **kwargs)
def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10., scale=1., size=(2, 25))
y[0, 14] = 100.
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=5,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info['niter'] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=0,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info['niter'] == 0
@pytest.mark.skipif('not HAS_SCIPY')
class TestFittingUncertanties:
"""
Test that parameter covariance is calculated correctly for the fitters
that do so (currently LevMarLSQFitter, LinearLSQFitter).
"""
example_1D_models = [models.Polynomial1D(2), models.Linear1D()]
example_1D_sets = [models.Polynomial1D(2, n_models=2, model_set_axis=False),
models.Linear1D(n_models=2, slope=[1., 1.], intercept=[0, 0])]
def setup_class(self):
self.x = np.arange(10)
self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.rand_grid = np.random.random(100).reshape(10, 10)
self.rand = self.rand_grid[0]
@pytest.mark.parametrize(('single_model', 'model_set'),
list(zip(example_1D_models, example_1D_sets)))
def test_1d_models(self, single_model, model_set):
""" Test that fitting uncertainties are computed correctly for 1D models
and 1D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
levmar_fitter = LevMarLSQFitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
# test 1D single models
# fit single model w/ nonlinear fitter
y = single_model(self.x) + self.rand
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model_levmar = levmar_fitter(single_model, self.x, y)
cov_model_levmar = fit_model_levmar.cov_matrix.cov_matrix
# fit single model w/ linlsq fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x, y)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
# check covariance, stds computed correctly computed
assert_allclose(cov_model_linlsq, cov_model_levmar)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# now test 1D model sets
# fit set of models w/ linear fitter
y = model_set(self.x, model_set_axis=False) +\
np.array([self.rand, self.rand])
fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)
cov_1d_set_linlsq = [j.cov_matrix for j in
fit_1d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_1d_set_linlsq[0], cov_model_levmar)
assert_allclose(np.sqrt(np.diag(cov_1d_set_linlsq[0])),
fit_1d_set_linlsq.stds[0].stds)
def test_2d_models(self):
"""
Test that fitting uncertainties are computed correctly for 2D models
and 2D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
levmar_fitter = LevMarLSQFitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
single_model = models.Polynomial2D(2, c0_0=2)
model_set = models.Polynomial2D(degree=2, n_models=2, c0_0=[2, 3],
model_set_axis=False)
# fit single model w/ nonlinear fitter
z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model_levmar = levmar_fitter(single_model, self.x_grid,
self.y_grid, z_grid)
cov_model_levmar = fit_model_levmar.cov_matrix.cov_matrix
# fit single model w/ nonlinear fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x_grid,
self.y_grid, z_grid)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
assert_allclose(cov_model_levmar, cov_model_linlsq)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# fit 2d model set
z_grid = model_set(self.x_grid, self.y_grid) + np.array((self.rand_grid,
self.rand_grid))
fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid,
z_grid)
cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_2d_set_linlsq[0], cov_model_levmar)
assert_allclose(np.sqrt(np.diag(cov_2d_set_linlsq[0])),
fit_2d_set_linlsq.stds[0].stds)
def test_covariance_std_printing_indexing(self, capsys):
"""
Test printing methods and indexing.
"""
# test str representation for Covariance/stds
fitter = LinearLSQFitter(calc_uncertainties=True)
mod = models.Linear1D()
fit_mod = fitter(mod, self.x, mod(self.x)+self.rand)
print(fit_mod.cov_matrix)
captured = capsys.readouterr()
assert "slope | 0.001" in captured.out
assert "intercept| -0.006, 0.041" in captured.out
print(fit_mod.stds)
captured = capsys.readouterr()
assert "slope | 0.038" in captured.out
assert "intercept| 0.203" in captured.out
# test 'pprint' for Covariance/stds
print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))
captured = capsys.readouterr()
assert "slope | 0.00144" in captured.out
assert "intercept" not in captured.out
print(fit_mod.stds.pprint(max_lines=1, round_val=5))
captured = capsys.readouterr()
assert "slope | 0.03799" in captured.out
assert "intercept" not in captured.out
# test indexing for Covariance class.
assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix['slope', 'slope']
# test indexing for stds class.
assert fit_mod.stds[1] == fit_mod.stds['intercept']
|
dhomeier/astropy
|
astropy/modeling/tests/test_fitters.py
|
Python
|
bsd-3-clause
| 45,391
|
[
"Gaussian"
] |
7c38a1d4b42230278ad3332e22b7bdce258f4074f01994eec0a315a785550de7
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`elasticsearch.py`
ElasticSearch backend implementation.
"""
import logging
from elasticsearch import Elasticsearch
from slo_generator.constants import NO_DATA
LOGGER = logging.getLogger(__name__)
DEFAULT_DATE_FIELD = '@timestamp'
class ElasticsearchBackend:
"""Backend for querying metrics from ElasticSearch.
Args:
client (elasticsearch.ElasticSearch): Existing ES client.
es_config (dict): ES client configuration.
"""
def __init__(self, client=None, **es_config):
self.client = client
if self.client is None:
self.client = Elasticsearch(**es_config)
# pylint: disable=unused-argument
def good_bad_ratio(self, timestamp, window, slo_config):
"""Query two timeseries, one containing 'good' events, one containing
'bad' events.
Args:
timestamp (int): UNIX timestamp.
window (int): Window size (in seconds).
slo_config (dict): SLO configuration.
Returns:
tuple: A tuple (good_event_count, bad_event_count)
"""
measurement = slo_config['spec']['service_level_indicator']
index = measurement['index']
query_good = measurement['query_good']
query_bad = measurement.get('query_bad')
query_valid = measurement.get('query_valid')
date_field = measurement.get('date_field', DEFAULT_DATE_FIELD)
# Build ELK request bodies
good = ES.build_query(query_good, window, date_field)
bad = ES.build_query(query_bad, window, date_field)
valid = ES.build_query(query_valid, window, date_field)
# Get good events count
response = self.query(index, good)
good_events_count = ES.count(response)
# Get bad events count
if query_bad is not None:
response = self.query(index, bad)
bad_events_count = ES.count(response)
elif query_valid is not None:
response = self.query(index, valid)
bad_events_count = ES.count(response) - good_events_count
else:
raise Exception("`filter_bad` or `filter_valid` is required.")
return (good_events_count, bad_events_count)
def query(self, index, body):
"""Query ElasticSearch server.
Args:
index (str): Index to query.
body (dict): Query body.
Returns:
dict: Response.
"""
return self.client.search(index=index, body=body)
@staticmethod
def count(response):
"""Count event in Prometheus response.
Args:
response (dict): Prometheus query response.
Returns:
int: Event count.
"""
try:
return response['hits']['total']['value']
except KeyError as exception:
LOGGER.warning("Couldn't find any values in timeseries response")
LOGGER.debug(exception, exc_info=True)
return NO_DATA
@staticmethod
def build_query(query, window, date_field=DEFAULT_DATE_FIELD):
"""Build ElasticSearch query.
Add window to existing query.
Replace window for different error budget steps on-the-fly.
Args:
body (dict): Existing query body.
window (int): Window in seconds.
date_field (str): Field to filter time on (must be an ElasticSearch
field of type `date`. Defaults to `@timestamp` (Logstash-
generated date field).
Returns:
dict: Query body with range clause added.
"""
if query is None:
return None
body = {"query": {"bool": query}, "track_total_hits": True}
range_query = {
f"{date_field}": {
"gte": f"now-{window}s/s",
"lt": "now/s"
}
}
# If a 'filter' clause already exist, add the range query on top,
# otherwise create the 'filter' clause.
if "filter" in body["query"]["bool"]:
body["query"]["bool"]["filter"]["range"] = range_query
else:
body["query"]["bool"] = {"filter": {"range": range_query}}
return body
ES = ElasticsearchBackend
|
google/slo-generator
|
slo_generator/backends/elasticsearch.py
|
Python
|
apache-2.0
| 4,815
|
[
"Elk"
] |
9cf59879f76646ee8b7efb395b6ddd1fe9298bdec9ca722eea8cfa1dce018bc9
|
#!/usr/bin/env python
########################################################################
# File : dirac_proxy_get_uploaded_info.py
# Author : Adrian Casajus
########################################################################
"""
Print information about uploaded proxies.
Example:
$ dirac-proxy-get-uploaded-info
Checking for DNs /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar
--------------------------------------------------------------------------------------------------------
| UserDN | UserGroup | ExpirationTime | PersistentFlag |
--------------------------------------------------------------------------------------------------------
| /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar | dirac_user | 2011-06-29 12:04:25 | True |
--------------------------------------------------------------------------------------------------------
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import sys
from DIRAC import gLogger, S_OK
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import ProxyManagerClient
from DIRAC.Core.Security import Properties
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
userName = False
def setUser(arg):
""" Set user
:param str arg: user name
:return: S_OK()
"""
global userName
userName = arg
return S_OK()
@DIRACScript()
def main():
global userName
Script.registerSwitch("u:", "user=", "User to query (by default oneself)", setUser)
Script.parseCommandLine()
result = getProxyInfo()
if not result['OK']:
gLogger.notice("Do you have a valid proxy?")
gLogger.notice(result['Message'])
sys.exit(1)
proxyProps = result['Value']
userName = userName or proxyProps.get('username')
if not userName:
gLogger.notice("Your proxy don`t have username extension")
sys.exit(1)
if userName in Registry.getAllUsers():
if Properties.PROXY_MANAGEMENT not in proxyProps['groupProperties']:
if userName != proxyProps['username'] and userName != proxyProps['issuer']:
gLogger.notice("You can only query info about yourself!")
sys.exit(1)
result = Registry.getDNForUsername(userName)
if not result['OK']:
gLogger.notice("Oops %s" % result['Message'])
dnList = result['Value']
if not dnList:
gLogger.notice("User %s has no DN defined!" % userName)
sys.exit(1)
userDNs = dnList
else:
userDNs = [userName]
gLogger.notice("Checking for DNs %s" % " | ".join(userDNs))
pmc = ProxyManagerClient()
result = pmc.getDBContents({'UserDN': userDNs})
if not result['OK']:
gLogger.notice("Could not retrieve the proxy list: %s" % result['Value'])
sys.exit(1)
data = result['Value']
colLengths = []
for pN in data['ParameterNames']:
colLengths.append(len(pN))
for row in data['Records']:
for i in range(len(row)):
colLengths[i] = max(colLengths[i], len(str(row[i])))
lines = [""]
for i in range(len(data['ParameterNames'])):
pN = data['ParameterNames'][i]
lines[0] += "| %s " % pN.ljust(colLengths[i])
lines[0] += "|"
tL = len(lines[0])
lines.insert(0, "-" * tL)
lines.append("-" * tL)
for row in data['Records']:
nL = ""
for i in range(len(row)):
nL += "| %s " % str(row[i]).ljust(colLengths[i])
nL += "|"
lines.append(nL)
lines.append("-" * tL)
gLogger.notice("\n".join(lines))
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_proxy_get_uploaded_info.py
|
Python
|
gpl-3.0
| 3,686
|
[
"DIRAC"
] |
82ee3e49d7a6aaecad41a0d14f0ab8f402fc5ebf7ea648b6114ebdfa9788cf83
|
class Gro:
"""
the central class in gropy
"""
# -- constructor(s) --
def __init__(self,
system_name=None, num_of_atoms=None,
residue_id=None, residue_name=None,
atom_name=None, atom_id=None,
x=None, y=None, z=None,
v_x=None, v_y=None, v_z=None,
box=None):
"""
wrap the contents in a GROMACS gro file in a class
"""
self.system_name = system_name or 'This is a Gro!'
self.num_of_atoms = num_of_atoms or 0
self.residue_id = residue_id or []
self.residue_name = residue_name or []
self.atom_name = atom_name or []
self.atom_id = atom_id or []
self.x = x or []
self.y = y or []
self.z = z or []
self.v_x = v_x or []
self.v_y = v_y or []
self.v_z = v_z or []
self.box = box or [0.0, 0.0, 0.0]
# -- deconstructor --
# not mandatory in python
# -- file i/o --
def read_gro_file(self, file_name):
"""
read a gro file and store information in a Gro object
"""
with open(file_name, 'r') as file_id:
for i_line, line in enumerate(file_id):
line = line.replace('\n', '')
if i_line == 0:
self.system_name = line
continue
if i_line == 1:
self.num_of_atoms = int(line)
final_line_of_atoms = self.num_of_atoms + 1
continue
if i_line <= final_line_of_atoms:
# store atom information
self.residue_id.append(int(line[0:5]))
self.residue_name.append(line[5:10].strip()) # remove leading spaces
self.atom_name.append(line[10:15].strip()) # remove leading spaces
self.atom_id.append(int(line[15:20]))
self.x.append(float(line[20:28]))
self.y.append(float(line[28:36]))
self.z.append(float(line[36:44]))
if len(line) > 44:
self.v_x.append(float(line[44:52]))
self.v_y.append(float(line[52:60]))
self.v_z.append(float(line[60:68]))
else:
self.v_x.append(0.0)
self.v_y.append(0.0)
self.v_z.append(0.0)
else:
self.box = line.split()
self.box = [float(box_size) for box_size in self.box]
def write_gro_file(self, file_name):
"""
write a gro file based on a Gro object
"""
with open(file_name, 'w') as file_id:
file_id.write("%s\n" % self.system_name)
file_id.write(" %d\n" % self.num_of_atoms)
if self.v_x != []:
for i in xrange(self.num_of_atoms):
file_id.write("%5d%-5s%5s%5d%8.3f%8.3f%8.3f%8.4f%8.4f%8.4f\n" % (self.residue_id[i], self.residue_name[i],
self.atom_name[i], self.atom_id[i], self.x[i], self.y[i], self.z[i], self.v_x[i], self.v_y[i], self.v_z[i]))
else:
for i in xrange(self.num_of_atoms):
file_id.write("%5d%-5s%5s%5d%8.3f%8.3f%8.3f%8.4f%8.4f%8.4f\n" % (self.residue_id[i], self.residue_name[i],
self.atom_name[i], self.atom_id[i], self.x[i], self.y[i], self.z[i], 0.0, 0.0, 0.0))
file_id.write("%10.5f%10.5f%10.5f\n" % (self.box[0], self.box[1], self.box[2]))
# -- preservative operations --
def rename_atoms(self, old_atom_names, new_atom_names):
"""
rename atoms from the old_atom_names to the new_atom_names
"""
assert (len(old_atom_names) == len(new_atom_names)), "old_atom_names doesn't have the same length as new_atom_names"
for i_atom in xrange(self.num_of_atoms):
for i_name in xrange(len(old_atom_names)):
if self.atom_name[i_atom] == old_atom_names[i_name]:
self.atom_name[i_atom] = new_atom_names[i_name]
break
# TODO: may add flexibility to rename atoms with specific residue_names
def rename_residues(self, old_residue_names, new_residue_names):
"""
rename residues with an old_residue_name to a new_residue_name
"""
assert (len(old_residue_names) == len(new_residue_names)), "old_residue_names doesn't have the same length as new_residue_names"
for i_atom in xrange(self.num_of_atoms):
for i_name in xrange(len(old_residue_names)):
if self.residue_name[i_atom] == old_residue_names[i_name]:
self.residue_name[i_atom] = new_residue_names[i_name]
break
def renumber_atoms(self):
"""
renumber residue_id and atom_id starting from 1; the original composition of each resdiue is maintained
"""
last_old_residue_id = -1 # use negative num to avoid coincidence
last_old_residue_name = 'to_be_defined'
last_new_resiue_id = 0
for i_atom in xrange(self.num_of_atoms):
self.atom_id[i_atom] = i_atom + 1 # starting from 1
if self.residue_id[i_atom] == last_residue_id and self.residue_name[i_atom] == last_residue_name:
self.residue_id[i_atom] = last_new_resiue_id
else:
last_old_residue_id = self.residue_id[i_atom]
last_old_residue_name = self.residue_name[i_atom]
self.residue_id[i_atom] = last_new_resiue_id + 1
last_new_resiue_id += 1
def replace_atom_entry(self, i_atom, another_gro_object, j_atom):
"""
replace the i-th atom of the current gro object with the j-th atom of another gro object
"""
self.residue_id[i_atom] = another_gro_object.residue_id[j_atom]
self.residue_name[i_atom] = another_gro_object.residue_name[j_atom]
self.atom_name[i_atom] = another_gro_object.atom_name[j_atom]
self.atom_id[i_atom] = another_gro_object.atom_id[j_atom]
self.x[i_atom] = another_gro_object.x[j_atom]
self.y[i_atom] = another_gro_object.y[j_atom]
self.z[i_atom] = another_gro_object.z[j_atom]
self.v_x[i_atom] = another_gro_object.v_x[j_atom]
self.v_y[i_atom] = another_gro_object.v_y[j_atom]
self.v_z[i_atom] = another_gro_object.v_z[j_atom]
def sort_residues(self, residue_name_list):
"""
sort residues in the provided order, attaching other unspecified residues to the end
"""
sorted_gro = Gro()
# copy provided to another
for residue_name in residue_name_list:
sorted_gro.copy_residues(self, [residue_name])
# remove provided
self.remove_residues(residue_name_list)
# copy unprovided to another
for i_atom in xrange(self.num_of_atoms):
sorted_gro.copy_atom_entry(self, i_atom)
# delete unprovided
for i_atom in xrange(self.num_of_atoms):
self.remove_atom_entry(0)
# copy all back from another
for i_atom in xrange(sorted_gro.num_of_atoms):
self.copy_atom_entry(sorted_gro, i_atom)
# -- additive operations --
def copy_atom_entry(self, another_gro_object, i_atom):
"""
copy the i-th atom entry from another gro object and append to the end of current gro object
"""
self.num_of_atoms += 1
self.residue_id.append(another_gro_object.residue_id[i_atom]);
self.residue_name.append(another_gro_object.residue_name[i_atom]);
self.atom_name.append(another_gro_object.atom_name[i_atom]);
self.atom_id.append(another_gro_object.atom_id[i_atom]);
self.x.append(another_gro_object.x[i_atom]);
self.y.append(another_gro_object.y[i_atom]);
self.z.append(another_gro_object.z[i_atom]);
self.v_x.append(another_gro_object.v_x[i_atom]);
self.v_y.append(another_gro_object.v_y[i_atom]);
self.v_z.append(another_gro_object.v_z[i_atom]);
def copy_residue_entry(self, another_gro_object, residue_id, residue_name):
"""
copy atoms of the specified residue from another gro object and append to the end of current gro object
"""
for i_atom in xrange(another_gro_object.num_of_atoms):
if another_gro_object.residue_id[i_atom] == residue_id and another_gro_object.residue_name[i_atom] == residue_name:
self.copy_atom_entry(another_gro_object, i_atom)
def copy_atoms(self, another_gro_object, atom_name_list):
"""
copy atoms with the provided atom names from another gro object and append to the end of current gro object
"""
for i_atom in xrange(another_gro_object.num_of_atoms):
for atom_name in atom_name_list:
if another_gro_object.atom_name[i_atom] == atom_name:
self.copy_atom_entry(another_gro_object, i_atom)
break
def copy_residues(self, another_gro_object, residue_name_list):
"""
copy atoms with the provided residue names from another gro object and append to the end of current gro object
"""
for i_atom in xrange(another_gro_object.num_of_atoms):
for residue_name in residue_name_list:
if another_gro_object.residue_name[i_atom] == residue_name:
self.copy_atom_entry(another_gro_object, i_atom)
break
# TODO: may add to copy atoms with the providied atom names and residue names
# Python doesn't allow the overloading of assignment operator "=";
# In python, copying an object is often achived by utilizing the copy and deepcopy functions in the copy module.
# -- subtractive operations --
def remove_atom_entry(self, i_atom):
"""
remove the i-th atom entry from current gro object
"""
self.num_of_atoms -= 1
del self.residue_id[i_atom]
del self.residue_name[i_atom]
del self.atom_name[i_atom]
del self.atom_id[i_atom]
del self.x[i_atom]
del self.y[i_atom]
del self.z[i_atom]
del self.v_x[i_atom]
del self.v_y[i_atom]
del self.v_z[i_atom]
def remove_residue_entry(self, residue_id, residue_name):
"""
remove atoms of the specified residue
"""
atom_indice_to_be_removed = []
for i_atom in xrange(self.num_of_atoms):
if self.residue_id[i_atom] == residue_id and self.residue_name[i_atom] == residue_name:
atom_indice_to_be_removed.append(i_atom) # save indice first; direct removal would shrink the atom list
num_of_atoms_to_be_removed = len(atom_indice_to_be_removed)
for i_atom in xrange(num_of_atoms_to_be_removed):
self.remove_atom_entry(atom_indice_to_be_removed[i_atom] - i_atom) # shift atom indice to match the shrinkage of atom list
def remove_atoms(self, atom_name_list):
"""
remove atoms with the provided atom names
"""
atom_indice_to_be_removed = []
for i_atom in xrange(self.num_of_atoms):
for atom_name in atom_name_list:
if self.atom_name[i_atom] == atom_name:
atom_indice_to_be_removed.append(i_atom)
break
num_of_atoms_to_be_removed = len(atom_indice_to_be_removed)
for i_atom in xrange(num_of_atoms_to_be_removed):
self.remove_atom_entry(atom_indice_to_be_removed[i_atom] - i_atom) # shift atom indice to match the shrinkage of atom list
def remove_residues(self, residue_name_list):
"""
remove atoms with the provided residue names
"""
atom_indice_to_be_removed = []
for i_atom in xrange(self.num_of_atoms):
for residue_name in residue_name_list:
if self.residue_name[i_atom] == residue_name:
atom_indice_to_be_removed.append(i_atom)
break
num_of_atoms_to_be_removed = len(atom_indice_to_be_removed)
for i_atom in xrange(num_of_atoms_to_be_removed):
self.remove_atom_entry(atom_indice_to_be_removed[i_atom] - i_atom) # shift atom indice to match the shrinkage of atom list
# TODO: may add to copy atoms with the providied atom names and residue names
|
caizkun/gropy
|
gropy/Gro.py
|
Python
|
mit
| 12,814
|
[
"Gromacs"
] |
99a549c857bd1aea642a1bcc77887850da6cfd81b7f3892d2fb229afef7b5f83
|
"""
Single Bubble Model: Natural seep bubble simulations
=====================================================
Use the ``TAMOC`` `single_bubble_model` to simulate the trajectory of a light
hydrocarbon bubble rising through the water column. This script demonstrates
the typical steps involved in running the single bubble model for a natural
seep bubble.
It uses the ambient data stored in the file `../test/output/test_bm54.nc`,
created by the `test_ambient` module. Please make sure all tests have
passed before running this script or modify the script to use a different
source of ambient data.
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import ambient
from tamoc import dbm
from tamoc import seawater
from tamoc import single_bubble_model
from tamoc import dispersed_phases
import numpy as np
if __name__ == '__main__':
# Open an ambient profile object from the netCDF dataset
nc = '../../test/output/test_bm54.nc'
bm54 = ambient.Profile(nc, chem_names='all')
bm54.close_nc()
# Initialize a single_bubble_model.Model object with this data
sbm = single_bubble_model.Model(bm54)
# Create a light gas bubble to track
composition = ['methane', 'ethane', 'propane', 'oxygen']
bub = dbm.FluidParticle(composition, fp_type=0.)
# Set the mole fractions of each component at release.
mol_frac = np.array([0.95, 0.03, 0.02, 0.])
# Specify the remaining particle initial conditions
de = 0.005
z0 = 1000.
T0 = 273.15 + 30.
fdis = 1.e-15
# Also, use the hydrate model from Jun et al. (2015) to set the
# hydrate shell formation time
P = bm54.get_values(z0, 'pressure')
m = bub.masses_by_diameter(de, T0, P, mol_frac)
t_hyd = dispersed_phases.hydrate_formation_time(bub, z0, m, T0, bm54)
# Simulate the trajectory through the water column and plot the results
sbm.simulate(bub, z0, de, mol_frac, T0, K_T=1, fdis=fdis, t_hyd=t_hyd,
delta_t=10.)
sbm.post_process()
# Save the simulation to a netCDF file
sbm.save_sim('./seep_bubble.nc', '../../test/output/test_bm54.nc',
'Results of ./seep_bubble.py script')
# Save the data for importing into Matlab
sbm.save_txt('./seep_bubble.txt', '../../test/output/test_bm54.nc',
'Results of ./seep_bubble.py script')
|
socolofs/tamoc
|
bin/sbm/seep_bubble.py
|
Python
|
mit
| 2,477
|
[
"NetCDF"
] |
2fdc01202fd1942bc206ada0ee6b78f7867b8b10746204c35df25e818ab2a3cc
|
from director import filterUtils
from director import vieweventfilter
from director import visualization as vis
from director import vtkAll as vtk
from director import vtkNumpy as vnp
from director.shallowCopy import shallowCopy
from PythonQt import QtCore
import numpy as np
class PointSelector(object):
'''
Usage:
selector = PointSelector(view, polyData)
# hold shift and left click and drag to select points
# hold shift+control and left click and drag to deselect points
# disable the selector
selector.stop()
# get the selected points
selection = selector.getSelectedPoints()
# get the selected point ids of the input points
pointIds = vnp.getNumpyFromVtk(selection, 'point_ids')
'''
class EventFilter(vieweventfilter.ViewEventFilter):
def onLeftMousePress(self, event):
modifiers = event.modifiers()
self.selector.modifiers = modifiers
isShift = modifiers == QtCore.Qt.ShiftModifier
isShiftAndControl = (
modifiers == QtCore.Qt.ShiftModifier | QtCore.Qt.ControlModifier
or modifiers == QtCore.Qt.ShiftModifier | QtCore.Qt.MetaModifier)
if isShift or isShiftAndControl:
self.selector.iren.SetInteractorStyle(self.selector.rubberBandStyle)
self.selector.selectMode = 1 if isShift else 0
def __init__(self, view, polyData):
self.view = view
self.polyData = shallowCopy(polyData)
self.selectionObj = None
self.selectionColor = [1, 0, 0]
self.selectionPointSize = 3
self.selectMode = 1
self.iren = view.renderWindow().GetInteractor()
self.prevStyle = self.iren.GetInteractorStyle()
self.rubberBandStyle = vtk.vtkInteractorStyleRubberBand3D()
self.rubberBandStyle.AddObserver('SelectionChangedEvent', self.onRubberBandPickEvent)
self.eventFilter = PointSelector.EventFilter(view)
self.eventFilter.selector = self
vnp.addNumpyToVtk(self.polyData, np.arange(self.polyData.GetNumberOfPoints(), dtype=int), 'point_ids')
vnp.addNumpyToVtk(self.polyData, np.zeros(self.polyData.GetNumberOfPoints(), dtype=int), 'is_selected')
def stop(self):
self.eventFilter.removeEventFilter()
def start(self):
self.eventFilter.installEventFilter()
def getSelectedPoints(self):
return filterUtils.thresholdPoints(self.polyData, 'is_selected', [1, 1])
def getNonSelectedPoints(self):
return filterUtils.thresholdPoints(self.polyData, 'is_selected', [0, 0])
def onRubberBandPickEvent(self, obj, event):
self.pickArea(self.rubberBandStyle.GetStartPosition(), self.rubberBandStyle.GetEndPosition())
def pickArea(self, startPos, endPos):
self.iren.SetInteractorStyle(self.prevStyle)
picker = vtk.vtkAreaPicker()
picker.AreaPick(min(startPos[0], endPos[0]),
min(startPos[1], endPos[1]),
max(startPos[0], endPos[0]),
max(startPos[1], endPos[1]),
self.view.renderer())
frustum = picker.GetFrustum()
extractGeometry = vtk.vtkExtractPolyDataGeometry()
extractGeometry.SetImplicitFunction(frustum)
extractGeometry.SetInputData(self.polyData)
extractGeometry.ExtractBoundaryCellsOn()
extractGeometry.Update()
selected = filterUtils.cleanPolyData(extractGeometry.GetOutput())
if not selected.GetNumberOfPoints():
return
pickedIds = vnp.getNumpyFromVtk(selected, 'point_ids')
vnp.getNumpyFromVtk(self.polyData, 'is_selected')[pickedIds] = self.selectMode
selection = self.getSelectedPoints()
if not self.selectionObj:
self.selectionObj = vis.showPolyData(selection, 'selected points', color=self.selectionColor, parent='selection')
self.selectionObj.setProperty('Point Size', self.selectionPointSize)
else:
self.selectionObj.setPolyData(selection)
|
patmarion/director
|
src/python/director/pointselector.py
|
Python
|
bsd-3-clause
| 4,091
|
[
"VTK"
] |
f22e883e8d6d72f007e67ecefea9a1409b952e10ab0b2334ac89ff3676543bc9
|
fi_data_key = {'Div':'League Division',
'Date':'Match Date (dd/mm/yy)',
'HomeTeam':'Home Team',
'AwayTeam':'Away Team',
'FTHG':'Full Time Home Team Goals',
'FTAG':'Full Time Away Team Goals',
'FTR':'Full Time Result (H=Home Win, D=Draw, A=Away Win)',
'HTHG':'Half Time Home Team Goals',
'HTAG':'Half Time Away Team Goals',
'HTR':'Half Time Result (H=Home Win, D=Draw, A=Away Win)',
'Attendance':'Crowd Attendance',
'Referee':'Match Referee',
'HS':'Home Team Shots',
'AS':'Away Team Shots',
'HST':'Home Team Shots on Target',
'AST':'Away Team Shots on Target',
'HHW':'Home Team Hit Woodwork',
'AHW':'Away Team Hit Woodwork',
'HC':'Home Team Corners',
'AC':'Away Team Corners',
'HF':'Home Team Fouls Committed',
'AF':'Away Team Fouls Committed',
'HO':'Home Team Offsides',
'AO':'Away Team Offsides',
'HY':'Home Team Yellow Cards',
'AY':'Away Team Yellow Cards',
'HR':'Home Team Red Cards',
'AR':'Away Team Red Cards',
'HBP':'Home Team Bookings Points (10 = yellow, 25 = red)',
'ABP':'Away Team Bookings Points (10 = yellow, 25 = red)',
'B365H':'Bet365 home win odds',
'B365D':'Bet365 draw odds',
'B365A':'Bet365 away win odds',
'BSH':'Blue Square home win odds',
'BSD':'Blue Square draw odds',
'BSA':'Blue Square away win odds',
'BWH':'Bet&Win home win odds',
'BWD':'Bet&Win draw odds',
'BWA':'Bet&Win away win odds',
'GBH':'Gamebookers home win odds',
'GBD':'Gamebookers draw odds',
'GBA':'Gamebookers away win odds',
'IWH':'Interwetten home win odds',
'IWD':'Interwetten draw odds',
'IWA':'Interwetten away win odds',
'LBH':'Ladbrokes home win odds',
'LBD':'Ladbrokes draw odds',
'LBA':'Ladbrokes away win odds',
'PSH':'Pinnacle Sports home win odds',
'PSD':'Pinnacle Sports draw odds',
'PSA':'Pinnacle Sports away win odds',
'SOH':'Sporting Odds home win odds',
'SOD':'Sporting Odds draw odds',
'SOA':'Sporting Odds away win odds',
'SBH':'Sportingbet home win odds',
'SBD':'Sportingbet draw odds',
'SBA':'Sportingbet away win odds',
'SJH':'Stan James home win odds',
'SJD':'Stan James draw odds',
'SJA':'Stan James away win odds',
'SYH':'Stanleybet home win odds',
'SYD':'Stanleybet draw odds',
'SYA':'Stanleybet away win odds',
'VCH':'VC Bet home win odds',
'VCD':'VC Bet draw odds',
'VCA':'VC Bet away win odds',
'WHH':'William Hill home win odds',
'WHD':'William Hill draw odds',
'WHA':'William Hill away win odds',
'Bb1X2':'Number of BetBrain bookmakers used to calculate match odds averages and maximums',
'BbMxH':'Betbrain maximum home win odds',
'BbAvH':'Betbrain average home win odds',
'BbMxD':'Betbrain maximum draw odds',
'BbAvD':'Betbrain average draw win odds',
'BbMxA':'Betbrain maximum away win odds',
'BbAvA':'Betbrain average away win odds',
'BbOU':'Number of BetBrain bookmakers used to calculate over/under 2.5 goals (total goals) averages and maximums',
'BbMx>2.5':'Betbrain maximum over 2.5 goals',
'BbAv>2.5':'Betbrain average over 2.5 goals',
'BbMx<2.5':'Betbrain maximum under 2.5 goals',
'BbAv<2.5':'Betbrain average under 2.5 goals',
'GB>2.5':'Gamebookers over 2.5 goals',
'GB<2.5':'Gamebookers under 2.5 goals',
'B365>2.5':'Bet365 over 2.5 goals',
'B365<2.5':'Bet365 under 2.5 goals',
'BbAH':'Number of BetBrain bookmakers used to Asian handicap averages and maximums',
'BbAHh':'Betbrain size of handicap (home team)',
'BbMxAHH':'Betbrain maximum Asian handicap home team odds',
'BbAvAHH':'Betbrain average Asian handicap home team odds',
'BbMxAHA':'Betbrain maximum Asian handicap away team odds',
'BbAvAHA':'Betbrain average Asian handicap away team odds',
'GBAHH':'Gamebookers Asian handicap home team odds',
'GBAHA':'Gamebookers Asian handicap away team odds',
'GBAH':'Gamebookers size of handicap (home team)',
'LBAHH':'Ladbrokes Asian handicap home team odds',
'LBAHA':'Ladbrokes Asian handicap away team odds',
'LBAH':'Ladbrokes size of handicap (home team)',
'B365AHH':'Bet365 Asian handicap home team odds',
'B365AHA':'Bet365 Asian handicap away team odds',
'B365AH':'Bet365 size of handicap (home team)'}
fi_sql_types = {'Div':'VARCHAR(50)',
'Date':'DATE',
'HomeTeam':'VARCHAR(50)',
'AwayTeam':'VARCHAR(50)',
'FTHG':'INT',
'FTAG':'INT',
'FTR':'VARCHAR(50)',
'HTHG':'INT',
'HTAG':'INT',
'HTR':'VARCHAR(50)',
'Attendance':'INT',
'Referee':'VARCHAR(50)',
'HS':'INT',
'AS':'INT',
'HST':'INT',
'AST':'INT',
'HHW':'INT',
'AHW':'INT',
'HC':'INT',
'AC':'INT',
'HF':'INT',
'AF':'INT',
'HO':'INT',
'AO':'INT',
'HY':'INT',
'AY':'INT',
'HR':'INT',
'AR':'INT',
'HBP':'INT',
'ABP':'INT',
'B365H':'FLOAT',
'B365D':'FLOAT',
'B365A':'FLOAT',
'BSH':'FLOAT',
'BSD':'FLOAT',
'BSA':'FLOAT',
'BWH':'FLOAT',
'BWD':'FLOAT',
'BWA':'FLOAT',
'GBH':'FLOAT',
'GBD':'FLOAT',
'GBA':'FLOAT',
'IWH':'FLOAT',
'IWD':'FLOAT',
'IWA':'FLOAT',
'LBH':'FLOAT',
'LBD':'FLOAT',
'LBA':'FLOAT',
'PSH':'FLOAT',
'PSD':'FLOAT',
'PSA':'FLOAT',
'SOH':'FLOAT',
'SOD':'FLOAT',
'SOA':'FLOAT',
'SBH':'FLOAT',
'SBD':'FLOAT',
'SBA':'FLOAT',
'SJH':'FLOAT',
'SJD':'FLOAT',
'SJA':'FLOAT',
'SYH':'FLOAT',
'SYD':'FLOAT',
'SYA':'FLOAT',
'VCH':'FLOAT',
'VCD':'FLOAT',
'VCA':'FLOAT',
'WHH':'FLOAT',
'WHD':'FLOAT',
'WHA':'FLOAT',
'Bb1X2':'INT',
'BbMxH':'FLOAT',
'BbAvH':'FLOAT',
'BbMxD':'FLOAT',
'BbAvD':'FLOAT',
'BbMxA':'FLOAT',
'BbAvA':'FLOAT',
'BbOU':'INT',
'BbMx>2.5':'FLOAT',
'BbAv>2.5':'FLOAT',
'BbMx<2.5':'FLOAT',
'BbAv<2.5':'FLOAT',
'GB>2.5':'FLOAT',
'GB<2.5':'FLOAT',
'B365>2.5':'FLOAT',
'B365<2.5':'FLOAT',
'BbAH':'INT',
'BbAHh':'FLOAT',
'BbMxAHH':'FLOAT',
'BbAvAHH':'FLOAT',
'BbMxAHA':'FLOAT',
'BbAvAHA':'FLOAT',
'GBAHH':'FLOAT',
'GBAHA':'FLOAT',
'GBAH':'FLOAT',
'LBAHH':'FLOAT',
'LBAHA':'FLOAT',
'LBAH':'FLOAT',
'B365AHH':'FLOAT',
'B365AHA':'FLOAT',
'B365AH':'FLOAT'}
|
Danie1Johnson/football-index
|
fi_data_key.py
|
Python
|
gpl-2.0
| 8,624
|
[
"BWA"
] |
40275fbc20bd23f2ec401f1a2946746ddf40d6f6878e294edc2e5976171b1723
|
#! /usr/bin/env python
import re
import math
import collections
import numpy as np
import time
import operator
from scipy.io import mmread, mmwrite
from random import randint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing as pp
from sklearn.svm import SVR
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.decomposition import ProbabilisticPCA, KernelPCA
from sklearn.decomposition import NMF
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, Ridge, Lasso, ElasticNet
import scipy.stats as stats
from sklearn import tree
from sklearn.feature_selection import f_regression
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc, f1_score
from sklearn.gaussian_process import GaussianProcess
import features
# working directory
dir = '.'
label_index = 770
# load train data
def load_train_fs():
# In the validation process, the training data was randomly shuffled firstly.
# For the prediction process, there is no need to shuffle the dataset.
# Owing to out of memory problem, Gaussian process only use part of training data, the prediction of gaussian process
# may be a little different from the model,which the training data was shuffled.
train_fs = np.genfromtxt(open(dir + '/train_v2_combine_5000.csv','rb'), delimiter=',', skip_header=1)
col_mean = stats.nanmean(train_fs, axis=0)
inds = np.where(np.isnan(train_fs))
train_fs[inds] = np.take(col_mean, inds[1])
train_fs[np.isinf(train_fs)] = 0
return train_fs
# load test data
def load_test_fs():
test_fs = np.genfromtxt(open(dir + '/train_v2.csv','rb'), delimiter=',', skip_header = 1)
col_mean = stats.nanmean(test_fs, axis=0)
inds = np.where(np.isnan(test_fs))
test_fs[inds] = np.take(col_mean, inds[1])
test_fs[np.isinf(test_fs)] = 0
return test_fs
# extract features from test data
def test_type(test_fs):
x_Test = test_fs[:,range(1, label_index)]
return x_Test
# extract features from train data
def train_type(train_fs):
train_x = train_fs[:,range(1, label_index)]
train_y= train_fs[:,-1]
return train_x, train_y
# transform the loss to the binary form
def toLabels(train_y):
labels = np.zeros(len(train_y))
labels[train_y>0] = 1
return labels
# generate the output file based to the predictions
def output_preds(preds):
out_file = dir + '/output_combine_5000.csv'
fs = open(out_file,'w')
fs.write('id,loss\n')
for i in range(len(preds)):
if preds[i] > 100:
preds[i] = 100
elif preds[i] < 0:
preds[i] = 0
strs = str(i+105472) + ',' + str(np.float(preds[i]))
fs.write(strs + '\n');
fs.close()
return
# get the top feature indexes by invoking f_regression
def getTopFeatures(train_x, train_y, n_features=100):
f_val, p_val = f_regression(train_x,train_y)
f_val_dict = {}
p_val_dict = {}
for i in range(len(f_val)):
if math.isnan(f_val[i]):
f_val[i] = 0.0
f_val_dict[i] = f_val[i]
if math.isnan(p_val[i]):
p_val[i] = 0.0
p_val_dict[i] = p_val[i]
sorted_f = sorted(f_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
sorted_p = sorted(p_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
feature_indexs = []
for i in range(0,n_features):
feature_indexs.append(sorted_f[i][0])
return feature_indexs
# generate the new data, based on which features are generated, and used
def get_data(train_x, feature_indexs, feature_minus_pair_list=[], feature_plus_pair_list=[],
feature_mul_pair_list=[], feature_divide_pair_list = [], feature_pair_sub_mul_list=[],
feature_pair_plus_mul_list = [],feature_pair_sub_divide_list = [], feature_minus2_pair_list = [],feature_mul2_pair_list=[],
feature_sub_square_pair_list=[], feature_square_sub_pair_list=[],feature_square_plus_pair_list=[]):
sub_train_x = train_x[:,feature_indexs]
for i in range(len(feature_minus_pair_list)):
ind_i = feature_minus_pair_list[i][0]
ind_j = feature_minus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i]-train_x[:,ind_j]))
for i in range(len(feature_plus_pair_list)):
ind_i = feature_plus_pair_list[i][0]
ind_j = feature_plus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] + train_x[:,ind_j]))
for i in range(len(feature_mul_pair_list)):
ind_i = feature_mul_pair_list[i][0]
ind_j = feature_mul_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] * train_x[:,ind_j]))
for i in range(len(feature_divide_pair_list)):
ind_i = feature_divide_pair_list[i][0]
ind_j = feature_divide_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] / train_x[:,ind_j]))
for i in range(len(feature_pair_sub_mul_list)):
ind_i = feature_pair_sub_mul_list[i][0]
ind_j = feature_pair_sub_mul_list[i][1]
ind_k = feature_pair_sub_mul_list[i][2]
sub_train_x = np.column_stack((sub_train_x, (train_x[:,ind_i]-train_x[:,ind_j]) * train_x[:,ind_k]))
return sub_train_x
# use gbm classifier to predict whether the loan defaults or not
def gbc_classify(train_x, train_y):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20],
features.feature_pair_sub_mul_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=8)
gbc.fit(sub_x_Train, labels)
return gbc
# use svm to predict the loss, based on the result of gbm classifier
def gbc_svr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list_sf, feature_pair_plus_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Train[:,101] = np.log(1-sub_x_Train[:,101])
sub_x_Test[ind_tmp,101] = np.log(1-sub_x_Test[ind_tmp,101])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
svr = SVR(C=16, kernel='rbf', gamma = 0.000122)
svr.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = svr.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# use gbm regression to predict the loss, based on the result of gbm classifier
def gbc_gbr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20],feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
gbr1000 = GradientBoostingRegressor(n_estimators=1300, max_depth=4, subsample=0.5, learning_rate=0.05)
gbr1000.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = gbr1000.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# predict the loss based on the Gaussian process regressor, which has been trained
def gp_predict(clf, x_Test):
size = len(x_Test)
part_size = 3000
cnt = (size-1) / part_size + 1
preds = []
for i in range(cnt):
if i < cnt - 1:
pred_part = clf.predict(x_Test[i*part_size: (i+1) * part_size])
else:
pred_part = clf.predict(x_Test[i*part_size: size])
preds.extend(pred_part)
return np.power(np.e,preds)
# train the gaussian process regressor
def gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test_part):
#Owing to out of memory, the model was trained by part of training data
#Attention, this part was trained on the ram of more than 96G
sub_x_Train[:,16] = np.log(1-sub_x_Train[:,16])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
ind_train = np.where(train_y>0)[0]
part_size= int(0.7 * len(ind_train))
gp = GaussianProcess(theta0=1e-3, thetaL=1e-5, thetaU=10, corr= 'absolute_exponential')
gp.fit(sub_x_Train[ind_train[:part_size]], np.log(train_y[ind_train[:part_size]]))
flag = (sub_x_Test_part[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Test_part[ind_tmp,16] = np.log(1-sub_x_Test_part[ind_tmp,16])
sub_x_Test_part[ind_tmp] = scaler.transform(sub_x_Test_part[ind_tmp])
gp_preds_tmp = gp_predict(gp, sub_x_Test_part[ind_tmp])
gp_preds = np.zeros(len(sub_x_Test_part))
gp_preds[ind_tmp] = gp_preds_tmp
return gp_preds
# use gbm classifier to predict whether the loan defaults or not, then invoke the function gbc_gp_predict_part
def gbc_gp_predict(train_x, train_y, test_x):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=9)
gbc.fit(sub_x_Train, labels)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
gp_preds_part = gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test[ind_test])
gp_preds = np.zeros(len(test_x))
gp_preds[ind_test] = gp_preds_part
return gp_preds
# invoke the function gbc_svr_predict_part
def gbc_svr_predict(gbc, train_x, train_y, test_x):
svr_preds = gbc_svr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list, features.feature_pair_plus_list,
features.feature_pair_mul_list, features.feature_pair_divide_list,
features.feature_pair_sub_mul_list, features.feature_pair_sub_list_sf,
features.feature_pair_plus_list2)
return svr_preds
# invoke the function gbc_gbr_predict_part
def gbc_gbr_predict(gbc, train_x, train_y, test_x):
gbr_preds = gbc_gbr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list,
features.feature_pair_plus_list, features.feature_pair_mul_list,
features.feature_pair_divide_list, features.feature_pair_sub_mul_list,
features.feature_pair_sub_list2)
return gbr_preds
# the main function
if __name__ == '__main__':
train_fs = load_train_fs()
test_fs = load_test_fs()
train_x, train_y = train_type(train_fs)
test_x = test_type(test_fs)
gbc = gbc_classify(train_x, train_y)
svr_preds = gbc_svr_predict(gbc, train_x, train_y, test_x)
gbr_preds = gbc_gbr_predict(gbc, train_x, train_y, test_x)
gp_preds = gbc_gp_predict(train_x, train_y, test_x)
preds_all = svr_preds * 0.4 + gp_preds * 0.25 + gbr_preds * 0.35
output_preds(preds_all)
|
Goodideax/CS249
|
predict_combine_5000.py
|
Python
|
bsd-3-clause
| 14,608
|
[
"Gaussian"
] |
7bf46a3ff9906d0406c9b46891dd314cfd7ac7eb750d64a42fc05aee1294e0ed
|
from __future__ import print_function
from numpy import *
from rdkit.sping import pid
import math
def DrawSpiral(canvas,startColor,endColor,startRadius,endRadius,nLoops,degsPerSlice=70,degsPerStep=1,
startAngle=0,centerPos=None,dir=1):
if centerPos is None:
centerPos = (canvas.size[0]/2,canvas.size[1]/2)
nSlices = int(math.ceil(360*nLoops/degsPerSlice))
radPerStep = math.pi*degsPerStep/180.
stepsPerSlice = degsPerSlice/degsPerStep
radiusStep = float(endRadius-startRadius)/(stepsPerSlice*nSlices)
colorStep = (array(endColor,float)-array(startColor,float))/nSlices
print('INFO:',nSlices,radPerStep,stepsPerSlice,radiusStep,colorStep)
angle = math.pi*startAngle/180.
radius = startRadius
color = array(startColor,float)
for i in range(nSlices):
pts = [ (centerPos[0],centerPos[1])]
for j in range(stepsPerSlice):
xPos = centerPos[0] + radius*math.cos(angle)
yPos = centerPos[1] + radius*math.sin(angle)
pts.append((xPos,yPos))
angle += dir*radPerStep
radius += radiusStep
xPos = centerPos[0] + radius*math.cos(angle)
yPos = centerPos[1] + radius*math.sin(angle)
pts.append((xPos,yPos) )
canvas.drawPolygon(pts,edgeColor=pid.transparent,
fillColor=pid.Color(color[0],color[1],color[2]),
closed=1)
color += colorStep
if __name__ == '__main__':
#from sping.PIL.pidPIL import PILCanvas
#canv = PILCanvas(size=(600,600),name='test.png')
from rdkit.sping.SVG.pidSVG import SVGCanvas
#from rdkit.sping.PDF.pidPDf import PDFCanvas
canv = SVGCanvas(size=(600,600),name='test.svg')
#canv = PDFCanvas(size=(600,600),name='test.pdf')
DrawSpiral(canv,(.2,.2,1),(.9,.9,1.),200,50,8,startAngle=-45,degsPerSlice=50,dir=-1)
canv.save()
|
AlexanderSavelyev/rdkit
|
rdkit/utils/spiral.py
|
Python
|
bsd-3-clause
| 1,811
|
[
"RDKit"
] |
412c5b39540600751448506b08cd0db727378303c5629eff32681e8f0ec5cc7d
|
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# PREAMBLE:
import numpy as np
import sys
import MDAnalysis
psf = sys.argv[1]
traj = sys.argv[2]
# SUBROUTINES:
# MAIN PROGRAM:
nf = open('g_r.dat', 'w')
# set the universe object
u = MDAnalysis.Universe('%s' %(psf), '%s' %(traj), format='DCD')
nAtoms = len(u.atoms)
#nSteps = len(u.trajectory)
#nSteps = 200
nSteps = 1000
print nAtoms, nSteps
box_dim = np.array([28.40, 28.40, 28.40], dtype=np.float32)
dmin, dmax = 0.0, 10.0
coor = np.zeros((nSteps,nAtoms, 3))
for ts in u.trajectory[1:nSteps+1]:
i = u.trajectory.frame -2
if i%10==0:
print('Reading step %d from trajectory file \n' %(i))
for atom in range(0,nAtoms):
coor[i][atom] = u.atoms[atom].position
#for i in range(len(coor)):
# print i, coor[i][0]
dist2_array = []
count = 0.0
component = np.zeros(3)
for ts in range(len(coor)):
if ts%10==0:
print('Working on distance calc for frame = %10d \n' %(ts))
for atom1 in range(0,nAtoms-1):
for atom2 in range(atom1+1,nAtoms):
count += 1.0
dist2 = 0.0;
for j in range(0,3):
component[j] = coor[ts][atom1][j] - coor[ts][atom2][j]
if component[j] <-box_dim[j]/2.0:
component[j] += box_dim[j]
if component[j] >box_dim[j]/2.0:
component[j] -= box_dim[j]
dist2 += np.power(component[j],2)
if dist2 <= 0.0:
print "negative distance^2"
dist2_array.append(dist2)
dist_array = np.zeros(len(dist2_array))
np.sqrt(dist2_array, dist_array)
rdf, edges = np.histogramdd(dist_array, bins = 100, range = [(dmin, dmax)])
radii = 0.5*(edges[0][1:] + edges[0][:-1])
delta_r = edges[0][1] - edges[0][0]
density = nAtoms/np.power(box_dim[0],3)
norm = density*count*delta_r
vol = (4./3.)*np.pi*density*(np.power(edges[0][1:],3) - np.power(edges[0][:-1],3))
print "volume values:", vol
print "density (units Ang^-3)", density
print "total number of distances calculated:", count
print "normalization factor (w/out volume) (units: Ang^-3)", norm
corr_rdf = np.zeros(len(rdf))
for i in range(len(rdf)):
corr_rdf[i] = rdf[i]/(norm*vol[i])
nf.write('%15.6f %15.6f %15.6f \n' %(edges[0][i], rdf[i], corr_rdf[i]))
nf.close()
|
rbdavid/MolecDynamics
|
Analysis/RDF/radialdist.py
|
Python
|
mit
| 2,218
|
[
"MDAnalysis"
] |
2b81f395b07c30d1426dc474429e862454fb51a39344858bcc25668394ebeed9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions for Input/Output, including plotting when required.
@author: Antoine Recanati
"""
from __future__ import print_function
import os
import subprocess
import sys
import csv
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
import numpy as np
def oprint(msg, dt=None, cond=True):
""" Prints msg and time elapsed if condition is satisfied.
Parameters
----------
msg : string (message to print)
dt : float (time elapsed)
cond : bool (condition for message to be printed, e.g. verbosity >= 2)
"""
if not cond:
return
elif not dt:
print(msg, file=sys.stderr)
else:
print("%5.3f s: %s" % (dt, msg), file=sys.stderr)
def make_dir(path):
""" Makes directory given by path if it does not exist yet. """
if not os.path.exists(path):
os.mkdir(path)
def fill_args_opts(args):
""" Checks options from argument parser args in a dictionary and returns them in dictionary opts. """
# Assign variables and constants and store them in opts dictionary
opts = {}
opts['ROOT_DIR'] = args.root
opts['W_LEN'] = args.w_len
opts['W_OVL_LEN'] = args.w_ovl_len
opts['LEN_THR'] = args.len_thr
opts['VERB'] = VERB = args.verbosity
opts['MIN_CC_LEN'] = args.min_cc_len
opts['N_PROC'] = args.nproc
opts['TRIM_MARGIN'] = args.trim_margin
opts['MERGE_MARGIN'] = args.margin
opts['JULIA_PATH'] = args.julia
dir_path = os.path.dirname(os.path.realpath(__file__))
opts['JULIA_SCRIPT'] = dir_path + '/spectral_ordering.jl'
# Check if SPOA found
if os.path.exists(args.spoapath):
opts['SPOA_PATH'] = os.path.abspath(args.spoapath)
opts['DO_SPOA'] = True
else:
dir_path = os.path.dirname(os.path.realpath(__file__))
full_path = '%s/%s' % (dir_path, args.spoapath)
if os.path.exists(full_path):
opts['SPOA_PATH'] = os.path.abspath(full_path)
opts['DO_SPOA'] = True
else:
# In case spoa is in the $PATH but not explicitely given
try:
SPOA_PATH = subprocess.check_output("which %s; exit 0" % args.spoapath,
stderr=subprocess.STDOUT, shell=True)
opts['SPOA_PATH'] = SPOA_PATH.split('\n')[0]
opts['DO_SPOA'] = True
# Otherwise do not perform consensus
except:
opts['DO_SPOA'] = False
msg = "spoa executable not found. Provide it with option"\
"--spoapath if you wish to compute consensus sequences"
oprint(msg)
DO_PLOT_POS_V_REF = False
if args.ref_pos_csvf is not None:
DO_PLOT_POS_V_REF = True
opts['REF_POS_CSVF'] = args.ref_pos_csvf
opts['DO_PLOT_POS_V_REF'] = DO_PLOT_POS_V_REF
# Check reads format
READS_FN = args.READS_FN
suffix = READS_FN.split('/')[-1].split('.')[-1]
suffix = suffix[-1] # only last letter to handle .fasta and .fa the same way
if (suffix.lower() == 'a'):
opts['READS_FMT'] = 'fasta'
elif (suffix.lower() == 'q'):
opts['READS_FMT'] = 'fastq'
else:
msg = "Input file {} has no standard suffix. Please provide a file that ends in "\
".*a or .*q (e.g. .fasta, .fa, .fastq or .fq)".format(READS_FN)
raise StandardError(msg)
return opts
def plot_cc_pos_v_ref(ref_pos_csvf, cc, bpos_cc, figpath):
""" Plots position of reads found by algorithm vs reference
if reference position of the reads is already computed from a reference genome.
Parameters
----------
all_reads_pos : list - position for all reads in the dataset
cc : list - reads indices in a given connected component
cc : list - reads indices in a given connected component
bpos_cc : list - leftmost base coordinate for reads in cc
figpath : str - path to file to save figure.
"""
# Restrict reference position to cc and convert to numpy array
with open(ref_pos_csvf, "rb") as posf:
reader = csv.reader(posf)
all_reads_pos = [int(el) for el in list(reader)[0]]
ref_pos_cc = np.array(all_reads_pos)[cc]
bpos_cc = np.array(bpos_cc)
# Remove reads unmapped by BWA
ok_idx = np.argwhere(ref_pos_cc < 1e7)
ref_pos_cc = ref_pos_cc[ok_idx]
bpos_cc = bpos_cc[ok_idx]
# Remove offset
# pos_cc -= pos_cc.min()
# bpos_cc -= bpos_cc.min()
plt.scatter(ref_pos_cc, bpos_cc, s=0.1)
plt.xlabel("true position (from BWA)", fontsize=16)
plt.ylabel("read position found by algorithm", fontsize=16)
plt.savefig(figpath)
plt.close()
def plot_cc_pos_found(bpos_cc, figpath):
""" For debugging. Plot the fine-grained layout found (positions of reads)
vs the coarse-grained (ordering of the reads). If it does not look
*roughly* like a line, there may be a problem.
Parameters
----------
bpos_cc : list (leftmost base coordinate for reads in current connected component)
figpath : str (path to file to save figure)
"""
plt.plot(bpos_cc)
plt.xlabel("read number (found by spectral ordering)", fontsize=16)
plt.ylabel("read position found by fine-grained computation", fontsize=16)
plt.savefig(figpath)
plt.close()
def write_layout_to_file(layout_fn, strands, bpos, epos, cc, read_nb2id):
""" Writes layout of reads to a file.
Writes to file lines containing the following:
read number (in the order of the fasta file), read name, leftmost base
coordinate, strand.
(!) Convention : if strand == '-', the leftmost coordinate corresponds to
the end of the read (i.e. to the beginning of the reverse component of the
read).
Parameters
----------
layout_fn : str (path to file to save layout)
strands : list (strand for each read)
bpos : list (leftmost base coordinate for reads current connected component)
cc : list (read number (from original .fasta/q file) for reads in the current connected component)
read_nb2id : dict (keys : read number, values : read id)
"""
idx_sort = bpos.argsort()
# Write to file
fh = open(layout_fn, 'wb')
for idx in idx_sort:
read_nb = cc[idx]
read_id = read_nb2id[read_nb]
lmbc = bpos[idx]
rmbc = epos[idx]
s = '+' if strands[idx] else '-'
fh.write("%d\t%s\t%d\t%d\t%s\n" % (read_nb, read_id, lmbc, rmbc, s))
fh.close()
#
# # Sort reads in connected component by exact position
# idx_sort = bpos.argsort()
# bpos = bpos[idx_sort]
# strands = strands[idx_sort]
#
# # Write to file
# fh = open(layout_fn, 'wb')
# for (idx, read_nb) in enumerate(cc):
# read_id = read_nb2id[read_nb]
# lmbc = bpos[idx]
# s = '+' if strands[idx] == 1 else '-'
# fh.write("%d\t%s\t%d\t%s\n" % (read_nb, read_id, lmbc, s))
#
# fh.close()
|
antrec/spectrassembler
|
ioandplots.py
|
Python
|
mit
| 7,001
|
[
"BWA"
] |
e0e118b7cf36f6f7fff45aac6802348181f5f6f9af4f828eb428aa9a70d0472f
|
import xml.etree.ElementTree as ET
from os import makedirs
from os import path as os_path
import CppHeaderParser
multiThreadArchicture=True
datatypes = {}
datatypeDeclarations = []
defines = {}
currentFile = ""
prefix = "RPC_" # change prefix inside the header with #pragma RPC prefix EXAMPLE_
# change projectname in the server header with #pragma RPC project My
# Project Name
projectname = "RPC"
# change command_id_start in the server header with #pragma RPC
# command_id_start 42
start_command_id = 1
# change version_number in the server header with #pragma RPC version_number 42
version_number = 0
functionIgnoreList = []
functionNoAnswerList = []
functionPredefinedIDs = {}
def evaluatePragmas(pragmas):
for p in pragmas:
program, command = p.split(" ", 1)
if program == "RPC":
try:
command, target = command.split(" ", 1)
except ValueError:
assert False, "Invalid command or parameter: {} in {}".format(
command, currentFile)
if command == "ignore":
assert len(
target.split(" ")) == 1, "Invalid function name: {} in {}".format(
target, currentFile)
functionIgnoreList.append(target)
elif command == "noanswer":
functionNoAnswerList.append(target)
elif command == "prefix":
global prefix
prefix = target
elif command == "ID":
assert len(p.split(" ")) == 4, "Custom command pragma must have the form '#pragma RPC ID [functionname] [number]' \
where [functionname] is the name of a function in the header and [number] an even unique number greater than 1"
function, ID = p.split(" ")[2:4]
ID = int(ID)
assert ID >= 2, "Custom command IDs must be at least 2"
assert ID < 256, "Custom command IDs must be less than 256"
assert ID % 2 == 0, "Custom command IDs must be even"
assert int(ID / 2) not in functionPredefinedIDs.values(), "ID {ID} cannot be used for both functions '{f2}' and '{f1}'".format(
ID=ID,
f1=function,
f2=list(functionPredefinedIDs.keys())[
list(functionPredefinedIDs.values()).index(int(ID / 2))],
)
functionPredefinedIDs[function] = int(ID / 2)
elif command == "projectname":
global projectname
projectname = p.split(" ", 2)[2]
elif command == "command_id_start":
startid = int(target)
assert startid % 2 == 0, "command_id_start must be even"
assert startid < 256, "command_id_start must be less that 256"
global start_command_id
start_command_id = int(startid / 2)
elif command == "version_number":
global version_number
target = int(target)
assert target < 65536 and target >= 0, "version_number must be positive and less than 65536"
version_number = target
else:
assert False, "Unknown preprocessor command #pragma RPC {} in {}".format(
command, currentFile)
def calculateHash(filenames):
from hashlib import md5
hash = md5()
for fn in filenames:
with open(fn, "r") as f:
hash.update(f.read().encode("UTF-8"))
rh = d = hash.hexdigest()
result = ""
while len(d) > 0:
result += "\\x"
result += d[0:2]
d = d[2:]
return result, rh
def getFilePaths():
try:
return getFilePaths.retval
except AttributeError:
pass
# get paths for various files that need to be created. all created files start with prefix
# parse input
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
"ClientConfig",
help="Configuration file for the client",
type=str)
parser.add_argument(
"ServerConfig",
help="Configuration file for the server",
type=str)
args = parser.parse_args()
# check if input is valid
from os.path import isfile, abspath, split
from os import chdir, makedirs
assert isfile(args.ClientConfig), "Error: Config file " + \
args.ClientConfig + " does not exist."
assert isfile(args.ServerConfig), "Error: Config file " + \
args.ServerConfig + " does not exist."
from configparser import ConfigParser
clientconfigpath = abspath(args.ClientConfig)
serverconfigpath = abspath(args.ServerConfig)
retval = {}
# check client config for validity
clientconfig = ConfigParser()
clientconfig.read(args.ClientConfig)
chdir(split(clientconfigpath)[0])
retval["CLIENT_CONFIG_PATH"] = split(clientconfigpath)[0]
if "USE_SINGLE_TASK_ARCH" in clientconfig["configuration"]:
# print("USE_SINGLE_TASK_ARCH")
#print(clientconfig["configuration"]['USE_SINGLE_TASK_ARCH']);
if (clientconfig["configuration"]['USE_SINGLE_TASK_ARCH'].lower() == "true"):
print("using single thread in client")
global multiThreadArchicture
multiThreadArchicture=False
if "DOCDIR" in clientconfig["configuration"]:
makedirs(clientconfig["configuration"]['DOCDIR'], exist_ok=True)
retval[
"CLIENT_" +
"DOCDIR"] = abspath(
clientconfig["configuration"]["DOCDIR"])
else:
print(
"Warning in \"" +
clientconfigpath +
"\": No DOCDIR specified. No documentation will be generated.")
for d in ("SRCDIR", "GENINCDIR", "SPCINCDIR"):
assert d in clientconfig["configuration"], "Error in \"" + \
clientconfigpath + "\": No " + d + " specified. Abort."
makedirs(clientconfig["configuration"][d], exist_ok=True)
retval["CLIENT_" + d] = abspath(clientconfig["configuration"][d])
if "CLIENT_FUNCTION_PREFIX" in clientconfig["configuration"]:
retval["CLIENT_FUNCTION_PREFIX"] = clientconfig["configuration"]["CLIENT_FUNCTION_PREFIX"]
else:
retval["CLIENT_FUNCTION_PREFIX"] = ""
XMLDIR_suffix = ""
XMLDIR_suffix_num = 0;
if "CLIENT_DOCDIR" in retval:
retval["CLIENT_XMLDIR"] = [retval["CLIENT_DOCDIR"]]
else :
retval["CLIENT_XMLDIR"] = []
if "XMLDIR"+XMLDIR_suffix not in clientconfig["configuration"]:
XMLDIR_suffix = "1"
while "XMLDIR"+XMLDIR_suffix in clientconfig["configuration"]:
path = clientconfig["configuration"]["XMLDIR"+XMLDIR_suffix]
print("creating path client:"+path)
makedirs(path, exist_ok=True)
retval["CLIENT_XMLDIR"].append(path)
XMLDIR_suffix_num = XMLDIR_suffix_num+1
XMLDIR_suffix = str(XMLDIR_suffix_num)
#print(retval["CLIENT_" +"XMLDIR_suffix"])
# check server config for validity
serverconfig = ConfigParser()
serverconfig.read(serverconfigpath)
chdir(split(serverconfigpath)[0])
retval["SERVER_CONFIG_PATH"] = split(serverconfigpath)[0]
retval["ServerHeaderName"] = split(
abspath(serverconfig["configuration"]["SOURCEHEADER"]))[1][:-2]
assert "SOURCEHEADER" in serverconfig[
"configuration"], "Error in \"" + serverconfigpath + "\": No SOURCEHEADER specified. Abort."
assert isfile(serverconfig["configuration"]["SOURCEHEADER"]), "Error in \"" + serverconfigpath + \
"\": Required file \"" + \
serverconfig["configuration"]["SOURCEHEADER"] + "\" not found. Abort."
retval["ServerHeader"] = abspath(
serverconfig["configuration"]["SOURCEHEADER"])
global currentFile
currentFile = retval["ServerHeader"]
if "INCLUDE_INTO_TYPES" in clientconfig["configuration"]:
retval["EXTRA_INCLUDE_INTO_CLIENT_TYPES_H"] = clientconfig["configuration"]["INCLUDE_INTO_TYPES"]
else:
retval["EXTRA_INCLUDE_INTO_CLIENT_TYPES_H"] = ""
if "DOCDIR" in serverconfig["configuration"]:
makedirs(serverconfig["configuration"]['DOCDIR'], exist_ok=True)
retval[
"SERVER_" +
"DOCDIR"] = abspath(
serverconfig["configuration"]["DOCDIR"])
else:
print(
"Warning in \"" +
serverconfigpath +
"\": No DOCDIR specified. No documentation will be generated.")
for d in ("SRCDIR", "GENINCDIR"):
assert d in serverconfig["configuration"], "Error in \"" + \
serverconfigpath + "\": No " + d + " specified. Abort."
makedirs(serverconfig["configuration"][d], exist_ok=True)
retval["SERVER_" + d] = abspath(serverconfig["configuration"][d])
XMLDIR_suffix = ""
XMLDIR_suffix_num = 0;
if "SERVER_DOCDIR" in retval:
retval["SERVER_XMLDIR"] = [retval["SERVER_DOCDIR"]]
else:
retval["SERVER_XMLDIR"] = []
if "XMLDIR"+XMLDIR_suffix not in serverconfig["configuration"]:
XMLDIR_suffix = "1"
while "XMLDIR"+XMLDIR_suffix in serverconfig["configuration"]:
path = serverconfig["configuration"]["XMLDIR"+XMLDIR_suffix]
print("creatint path server:"+ path)
makedirs(path, exist_ok=True)
retval["SERVER_XMLDIR"].append(path)
XMLDIR_suffix_num = XMLDIR_suffix_num+1
XMLDIR_suffix = str(XMLDIR_suffix_num)
#print(retval["SERVER_XMLDIR"])
global hashstring
global rawhash
from sys import argv
hashstring, rawhash = calculateHash(
(argv[0],
clientconfigpath,
serverconfigpath,
retval["ServerHeader"]))
ast = CppHeaderParser.CppHeader(
abspath(serverconfig["configuration"]["SOURCEHEADER"]))
evaluatePragmas(ast.pragmas)
getFilePaths.retval = retval
return getFilePaths.retval
def getDatatype(signature, file="???", line="???"):
# print(10*"+")
# print(signature)
signatureList = signature.split(" ")
if signature in datatypes:
return datatypes[signature]
if signature in defines and defines[signature] in datatypes:
return datatypes[defines[signature]]
if len(signatureList) == 2 and (signatureList[
0] == "struct" or signatureList[0] == "enum"):
assert signature in datatypes, 'Unknown type "{signature}" in {file}:{line}'.format(
signature=signature,
file=file,
line=line,
)
return datatypes[signature]
print(">>>", datatypes)
assert False, 'THIS Unknown type "{signature}" in {file}:{line}'.format(
signature=signature,
file=file,
line=line,
)
def getTypeDeclarations():
return "\n".join(dd for dd in datatypeDeclarations)
def isVoidDatatype(datatype):
try:
return datatype.size_bytes == 0
except AttributeError:
return False
# Metatype describing what all datatypes must be capable of
class Datatype:
#__init__ #depending on type
def setXml(self, xml):
# adds a description of the data type to the xml entry
raise NotImplemented
def declaration(self, identifier):
# returns the declaration for the datatype given its identifier such as
# "i" -> "int i" or "ia" -> "int ia[32][47]"
raise NotImplemented
def stringify(self, identifier, indention):
# identifier is the name of the identifier we want to stringify, can be an expression
# indention is the indention level of the code
# returns code that does the stringifying
raise NotImplemented
def unstringify(self, source, identifier, indention):
# source is the name of a char * pointing to a buffer
# identifier is the name of the identifier we want to unstringify, can be an expression
# indention is the indention level of the code
# returns code that does the unstringifying
raise NotImplemented
def isInput(self):
# returns True if this is an input parameter when passed to a function and False otherwise
# pointers and arrays may be pure output parameters, integers are always input parameters
# if pointers and arrays are input parameters depends on their
# identifier name
raise NotImplemented
def isOutput(self):
# returns True if this is an output parameter when passed to a function and False otherwise
# pointers and arrays can be output parameters, integers can never be output parameters
# if pointers and arrays are output parameters depends on their
# identifier name
raise NotImplemented
def getSize(self):
# returns the number of bytes required to send this datatype over the network
# pointers return 0.
raise NotImplemented
class IntegralDatatype(Datatype):
def __init__(self, signature, size_bytes):
self.signature = signature
self.size_bytes = size_bytes
def setXml(self, xml):
xml.set("bits", str(self.size_bytes * 8))
xml.set("ctype", self.signature)
if self.signature == "char":
xml.set("type", "character")
else:
xml.set("type", "integer")
typ = ET.SubElement(xml, "integer")
signed = "False" if self.signature[0] == "u" else "True"
typ.set("signed", signed)
def getByte(number, identifier):
assert number < 8, "Do not know how to portably deal with integers bigger than 64 bit"
if number == 0:
return "{0}".format(identifier)
elif number < 2:
return "{0} >> {1}".format(identifier, 8 * number)
elif number < 4:
return "{0} >> {1}".format(identifier, 8 * number)
elif number < 8:
return "{0} >> {1}".format(identifier, 8 * number)
def orByte(number, identifier, signature, source):
assert number < 8, "Do not know how to portably deal with integers bigger than 64 bit"
if number == 0:
return "{0} |= ({1}){2}".format(identifier, signature, source)
elif number < 2:
return "{0} |= ({1}){2} << {3}".format(identifier, signature, source, 8 * number)
elif number < 4:
return "{0} |= ({1}){2} << {3}L".format(identifier, signature, source, 8 * number)
elif number < 8:
return "{0} |= ({1}){2} << {3}LL".format(identifier, signature, source, 8 * number)
# use bitshift to prevent endianess problems
def declaration(self, identifier):
return self.signature + " " + identifier
def stringify(self, identifier, indention):
return """
{indention}/* writing integral type {type} {identifier} of size {size} */
{datapush}""".format(
indention=indention * '\t',
identifier=identifier,
type=self.signature,
size=self.size_bytes,
datapush="".join(
indention *
'\t' +
prefix +
"message_push_byte((unsigned char)(" +
IntegralDatatype.getByte(
i,
identifier) +
"));\n" for i in range(
self.size_bytes)),
# 5
)
def unstringify(self, source, identifier, indention):
if self.size_bytes == 0:
return ""
return """
{indention}/* reading integral type {signature} {identifier} of size {size} */
{indention}{identifier} = *{source}++;
{deserialization}""".format(
indention=indention * '\t',
identifier=identifier,
source=source,
signature=self.signature,
size=self.size_bytes,
deserialization="".join(
indention *
'\t' +
IntegralDatatype.orByte(
i,
identifier,
self.signature,
"(*" +
source +
"++)") +
";\n" for i in range(
1,
self.size_bytes)),
)
def isInput(self):
return True
def isOutput(self):
return False
def getSize(self):
assert isinstance(self.size_bytes, int)
return self.size_bytes
class EnumDatatype(Datatype):
def __init__(self, signature, size_bytes,
transfertype, values, name, typedef):
self.signature = signature
self.size_bytes = size_bytes
self.transfertype = transfertype
self.values = values
self.typedef = typedef
self.name = name
def setXml(self, xml):
xml.set("bits", str(self.size_bytes * 8))
xml.set("ctype", self.declaration("")[:-1])
xml.set("type", "enum")
for v in self.values:
typ = ET.SubElement(xml, "enum")
typ.set("name", v["name"])
typ.set("value", str(v["value"]))
def getTypeDeclaration(self):
declaration = ",\n\t".join(
"{name} = {value}".format(
name=v["name"],
value=v["value"]) for v in self.values)
if self.typedef:
declaration = "typedef enum{{\n\t{declaration}\n}} {name};\n".format(
declaration=declaration, name=self.name)
else: # no typedef
declaration = "enum {name}{{\n\t{declaration}\n}};\n".format(
declaration=declaration, name=self.name)
return declaration
def declaration(self, identifier):
return self.signature + " " + identifier
def stringify(self, identifier, indention):
if self.size_bytes == 0:
return ""
return """
{indention}/* writing enum type {signature} {identifier} of size {size} */
{indention}{{
{indention}\t{transfertype} temp = ({transfertype}){identifier};
{serialization}
{indention}}}""".format(
indention=indention * '\t',
identifier=identifier,
serialization=IntegralDatatype(
self.transfertype, self.size_bytes).stringify(
"temp", indention + 1),
signature=self.signature,
size=self.size_bytes,
transfertype=self.transfertype,
)
def unstringify(self, source, identifier, indention):
if self.size_bytes == 0:
return ""
return """
{indention}/* reading enum type {signature}{identifier} of size {size} */
{indention}{{
{indention}\t{transfertype} temp;
{deserialization}
{indention}\t{identifier} = temp;
{indention}}}""".format(
indention=indention * '\t',
identifier=identifier,
source=source,
signature=self.signature,
size=self.size_bytes,
transfertype=self.transfertype,
deserialization=IntegralDatatype(
self.transfertype, self.size_bytes).unstringify(
source, "temp", indention + 1),
)
def isInput(self):
return True
def isOutput(self):
return False
def getSize(self):
assert isinstance(self.size_bytes, int)
return self.size_bytes
class ArrayDatatype(Datatype):
# need to be mindful of padding, otherwise it is a fixed size loop
def __init__(self, numberOfElements, datatype,
parametername, In=None, Out=None):
self.numberOfElements = numberOfElements
self.datatype = datatype
self.In = parametername.endswith("_in") or parametername.endswith(
"_inout") if In is None else In
self.Out = parametername.endswith("_out") or parametername.endswith(
"_inout") if Out is None else Out
def setXml(self, xml):
xml.set("bits", str(self.getSize() * 8))
xml.set("ctype", self.declaration(""))
xml.set("type", "array")
typ = ET.SubElement(xml, "array")
typ.set("elements", self.numberOfElements)
self.datatype.setXml(typ)
def declaration(self, identifier):
return self.datatype.declaration(
identifier + "[" + str(self.numberOfElements) + "]")
def isInput(self):
return self.In
def isOutput(self):
return self.Out
def stringify(self, identifier, indention):
if self.numberOfElements == "1":
# no loop required for 1 element
return "{0}{1}".format(
indention * '\t', self.datatype.stringify("(*" + identifier + ")", indention))
return """
{indention}/* writing array {name} with {numberOfElements} elements */
{indention}{{
{indention} int {prefix}COUNTER_VAR{indentID};
{indention} for ({prefix}COUNTER_VAR{indentID} = 0; {prefix}COUNTER_VAR{indentID} < {numberOfElements}; {prefix}COUNTER_VAR{indentID}++){{
{serialization}
{indention} }}
{indention}}}""".format(
name=identifier,
numberOfElements=self.numberOfElements,
indention=indention * '\t',
serialization=self.datatype.stringify(
"" +
identifier +
"[{}COUNTER_VAR{}]".format(
prefix,
indention),
indention +
2),
indentID=indention,
prefix=prefix,
)
def unstringify(self, destination, identifier, indention):
if self.numberOfElements == "1":
# no loop required for 1 element
return "{0}{1}".format(
indention * '\t', self.datatype.unstringify(destination, "(*" + identifier + ")", indention))
return """
{indention}/* reading array {identifier} with {noe} elements */
{indention}{{
{indention} int {prefix}COUNTER_VAR{ID};
{indention} for ({prefix}COUNTER_VAR{ID} = 0; {prefix}COUNTER_VAR{ID} < {noe}; {prefix}COUNTER_VAR{ID}++){{
{payload}
{indention} }}
{indention}}}""".format(
identifier=identifier,
noe=self.numberOfElements,
indention=indention * '\t',
payload=self.datatype.unstringify(
destination,
identifier +
"[{}COUNTER_VAR{}]".format(
prefix,
indention),
indention +
2),
ID=indention,
prefix=prefix,
)
def getSize(self):
return int(self.numberOfElements) * self.datatype.getSize()
class PointerDatatype(Datatype):
# need to be mindful of parring, otherwise it is a variable sized loop
# if the pointer is used for input, output or both depends on the name,
# for example p_in, p_out or p_inout
def __init__(self, signature, datatype, parametername):
assert None, "Code for pointers does not work yet"
self.signature = signature
self.datatype = datatype
self.In = parametername.endswith(
"_in") or parametername.endswith("_inout")
self.Out = parametername.endswith(
"_out") or parametername.endswith("_inout")
def setXml(self, xml):
xml.set("bits", "???")
xml.set("ctype", self.signature)
def declaration(self, identifier):
return self.signature + " " + identifier
def setNumberOfElementsIdentifier(self, numberOfElementsIdentifier):
self.numberOfElementsIdentifier = numberOfElementsIdentifier
def stringify(self, identifier, indention):
return """
{indention}/* writing pointer {type}{name} with {index} elements*/
{indention}{{
{indention} int i;
{indention} for (i = 0; i < {index}; i++){{
{serialization}
{indention} }}
{indention}}}""".format(
name=identifier,
type=self.signature,
index=self.numberOfElementsIdentifier,
indention=indention * '\t',
serialization=self.datatype.stringify(
identifier + "[i]", indention + 2),
)
def unstringify(self, destination, identifier, indention):
return """
{indention}/* reading pointer {signature}{identifier} with [{numberOfElementsIdentifier}] elements*/
{indention}{{
{indention} int i;
{indention} for (i = 0; i < {numberOfElementsIdentifier}; i++){{
{idDeserializer}
{indention} }}
{indention}}}""".format(
identifier=identifier,
signature=self.signature,
numberOfElementsIdentifier=self.numberOfElementsIdentifier,
indention=indention * '\t',
idDeserializer=self.datatype.unstringify(
destination, identifier + "[i]", indention + 2),
)
def isInput(self):
return self.In
def isOutput(self):
return self.Out
def getSize(self):
return 0.
class StructDatatype(Datatype):
# just call the functions of all the members in order
def __init__(self, signature, memberList, file, lineNumber):
self.signature = signature
self.memberList = memberList
self.file = file
self.lineNumber = lineNumber
def setXml(self, xml):
xml.set("bits", str(self.getSize()))
xml.set("ctype", self.signature)
xml.set("type", "struct")
memberpos = 1
for e in self.memberList:
member = ET.SubElement(xml, "parameter")
member.set("memberpos", str(memberpos))
member.set("name", e["name"])
memberpos += 1
e["datatype"].setXml(member)
def declaration(self, identifier):
return self.signature + " " + identifier
def stringify(self, identifier, indention):
members = ", ".join(m["datatype"].declaration(m["name"])
for m in self.memberList)
# print(self.memberList)
memberStringification = "".join(
m["datatype"].stringify(
identifier + "." + m["name"],
indention + 1) for m in self.memberList)
return "{indention}/*writing {identifier} of type {type} with members {members}*/\n{indention}{{{memberStringification}\n{indention}}}".format(
indention=indention * '\t',
type=self.signature,
members=members,
identifier=identifier,
memberStringification=memberStringification,
)
def unstringify(self, destination, identifier, indention):
memberUnstringification = "".join(
m["datatype"].unstringify(
destination,
identifier + "." + m["name"],
indention + 1) for m in self.memberList)
return """
{indention}/* reading {signature}{identifier}*/
{indention}{{
{memberdeserialize}
{indention}}}""".format(
identifier=identifier,
signature=self.signature,
indention=indention * '\t',
memberdeserialize=memberUnstringification,
)
def isInput(self):
# TODO: Go through members and return if for any of them isInput is
# true
raise NotImplemented
def isOutput(self):
# TODO: Go through members and return if for any of them isOutput is
# true
raise NotImplemented
def getSize(self):
# print(self.memberList)
return sum(m["datatype"].getSize() for m in self.memberList)
def getTypeDeclaration(self):
siglist = self.signature.split(" ")
isTypedefed = len(siglist) == 1
memberDeclarations = ";\n\t".join(
m["datatype"].declaration(
m["name"]) for m in self.memberList)
form = "typedef struct{{\n\t{members};\n}}{sig};\n" if isTypedefed else "{sig}{{\n\t{members};\n}};\n"
return form.format(
sig=self.signature,
members=memberDeclarations,
)
class Function:
# stringify turns a function call into a string and sends it to the other side
# unstringify turns a string into arguments to pass to a function
# assumes the send function has the signature (void *, size_t);
# requests have even numbers, answers have odd numbers
def __init__(self, ID, returntype, name, parameterlist, client_function_prefix):
#print("ID:", ID)
#print("returntype:", returntype)
#print("name:", name)
#print("parameterlist:", parameterlist)
# returntype can either be a Datatype "void", but no pointer
self.isVoidReturnType = isVoidDatatype(returntype)
if not self.isVoidReturnType:
returnValueName = "return_value_out"
rt = ArrayDatatype("1", returntype, returnValueName)
parameterlist.insert(
0, {"parameter": rt, "parametername": returnValueName})
self.name = name
self.parameterlist = parameterlist
#print(10*'+' + '\n' + "".join(str(p) for p in parameterlist) + '\n' + 10*'-' + '\n')
self.ID = ID
self.client_function_prefix = client_function_prefix
def getXml(self, entry):
if self.name in functionIgnoreList:
return
entry.set("name", self.name)
declaration = ET.SubElement(entry, "declaration")
declaration.text = self.getDeclaration()
request = ET.SubElement(entry, "request")
request.set("ID", str(self.ID * 2))
i = 1
for p in self.parameterlist:
if not p["parameter"].isInput():
continue
param = ET.SubElement(request, "parameter")
param.set("position", str(i))
param.set("name", p["parametername"])
i += 1
p["parameter"].setXml(param)
if self.name in functionNoAnswerList:
return
reply = ET.SubElement(entry, "reply")
reply.set("ID", str(self.ID * 2 + 1))
i = 1
for p in self.parameterlist:
if not p["parameter"].isOutput():
continue
param = ET.SubElement(reply, "parameter")
param.set("position", str(i))
param.set("name", p["parametername"])
i += 1
p["parameter"].setXml(param)
def getOriginalDeclaration(self):
returnvalue = "void " if self.isVoidReturnType else self.parameterlist[
0]["parameter"].declaration("")
if returnvalue.endswith(" [1]"):
returnvalue = returnvalue[:-3]
start = 0 if self.isVoidReturnType else 1
return "{returnvalue}{functionname}({parameterlist});".format(
returnvalue=returnvalue,
functionname=self.name,
parameterlist=", ".join(
p["parameter"].declaration(
p["parametername"]) for p in self.parameterlist[
start:]),
)
def getParameterDeclaration(self):
parameterdeclaration = ", ".join(
p["parameter"].declaration(
p["parametername"]) for p in self.parameterlist)
if parameterdeclaration == "":
parameterdeclaration = "void"
return parameterdeclaration
def getCall(self):
#print(self.name, self.parameterlist)
returnvalue = "*return_value_out = " if not self.isVoidReturnType else ""
parameterlist = self.parameterlist if self.isVoidReturnType else self.parameterlist[
1:]
return "{returnvalue}{functionname}({parameterlist});".format(
returnvalue=returnvalue,
functionname=self.name if self.ID > 0 else self.name + "_impl",
parameterlist=", ".join(p["parametername"] for p in parameterlist),
)
def getDefinition(self,client_function_prefix):
# if (multiThreadArchicture):
# result = """
if self.name in functionNoAnswerList:
if (multiThreadArchicture):
result = """
RPC_RESULT {client_function_prefix_}{functionname}({parameterdeclaration}){{
RPC_RESULT result;
assert({prefix}initialized);
{prefix}mutex_lock(RPC_mutex_caller);
{prefix}mutex_lock(RPC_mutex_in_caller);
/***Serializing***/
{prefix}message_start({messagesize});
{prefix}message_push_byte({requestID}); /* save ID */
{inputParameterSerializationCode}
result = {prefix}message_commit();
/* This function has been set to receive no answer */
{prefix}mutex_unlock(RPC_mutex_in_caller);
{prefix}mutex_unlock(RPC_mutex_caller);
return result;
}}
"""
else:
result = """
RPC_RESULT {client_function_prefix_}{functionname}({parameterdeclaration}){{
RPC_RESULT result;
/***Serializing***/
{prefix}message_start({messagesize});
{prefix}message_push_byte({requestID}); /* save ID */
{inputParameterSerializationCode}
result = {prefix}message_commit();
/* This function has been set to receive no answer */
return result;
}}
"""
result = result.format(
requestID=self.ID * 2,
inputParameterSerializationCode="".join(
p["parameter"].stringify(
p["parametername"],
1) for p in self.parameterlist if p["parameter"].isInput()),
functionname=self.name,
parameterdeclaration=self.getParameterDeclaration(),
prefix=prefix,
client_function_prefix_=self.client_function_prefix,
messagesize=sum(p["parameter"].getSize() for p in self.parameterlist if p[
"parameter"].isInput()) + 1,
)
return result
result = "";
if (multiThreadArchicture):
result = """
RPC_RESULT {client_function_prefix_}{functionname}({parameterdeclaration}){{
assert({prefix}initialized);
{prefix}mutex_lock(RPC_mutex_caller);
for (;;){{
{prefix}mutex_lock(RPC_mutex_in_caller);
/***Serializing***/
{prefix}message_start({messagesize});
{prefix}message_push_byte({requestID}); /* save ID */
{inputParameterSerializationCode}
if ({prefix}message_commit() == RPC_SUCCESS){{ /* successfully sent request */
if ({prefix}mutex_lock_timeout(RPC_mutex_answer)){{ /* Wait for answer to arrive */
if (*{prefix}buffer++ != {answerID}){{ /* We got an incorrect answer */
{prefix}mutex_unlock(RPC_mutex_in_caller);
{prefix}mutex_lock(RPC_mutex_parsing_complete);
{prefix}mutex_unlock(RPC_mutex_parsing_complete);
{prefix}mutex_unlock(RPC_mutex_answer);
continue; /* Try if next answer is correct */
}}
/***Deserializing***/
{outputParameterDeserialization}
{prefix}mutex_unlock(RPC_mutex_in_caller);
{prefix}mutex_lock(RPC_mutex_parsing_complete);
{prefix}mutex_unlock(RPC_mutex_parsing_complete);
{prefix}mutex_unlock(RPC_mutex_answer);
{prefix}mutex_unlock(RPC_mutex_caller);
return RPC_SUCCESS;
}}
else {{ /* We failed to get an answer due to timeout */
{prefix}mutex_unlock(RPC_mutex_in_caller);
{prefix}mutex_unlock(RPC_mutex_caller);
return RPC_FAILURE;
}}
}}
else {{ /* Sending request failed */
{prefix}mutex_unlock(RPC_mutex_in_caller);
{prefix}mutex_unlock(RPC_mutex_caller);
return RPC_FAILURE;
}}
}}
/* assert_dead_code; */
}}
"""
else:
result = """
RPC_RESULT {client_function_prefix_}{functionname}({parameterdeclaration}){{
assert({prefix}initialized);
for (;;){{
/***Serializing***/
{prefix}message_start({messagesize});
{prefix}message_push_byte({requestID}); /* save ID */
{inputParameterSerializationCode}
if ({prefix}message_commit() == RPC_SUCCESS){{ /* successfully sent request */
if ({prefix}mutex_lock_timeout(RPC_mutex_answer)){{ /* Wait for answer to arrive */
if (*{prefix}buffer++ != {answerID}){{ /* We got an incorrect answer */
continue; /* Try if next answer is correct */
}}
/***Deserializing***/
{outputParameterDeserialization}
return RPC_SUCCESS;
}}
else {{ /* We failed to get an answer due to timeout */
return RPC_FAILURE;
}}
}}
else {{ /* Sending request failed */
return RPC_FAILURE;
}}
}}
/* assert_dead_code; */
}}
"""
result = result.format(
requestID=self.ID * 2,
answerID=self.ID * 2 + 1,
inputParameterSerializationCode="".join(
p["parameter"].stringify(
p["parametername"],
2) for p in self.parameterlist if p["parameter"].isInput()),
functionname=self.name,
client_function_prefix_ = self.client_function_prefix,
parameterdeclaration=self.getParameterDeclaration(),
outputParameterDeserialization="".join(
p["parameter"].unstringify(
prefix + "buffer",
p["parametername"],
4) for p in self.parameterlist if p["parameter"].isOutput()),
prefix=prefix,
messagesize=sum(p["parameter"].getSize() for p in self.parameterlist if p[
"parameter"].isInput()) + 1,
)
return result;
def getDeclaration(self):
return "RPC_RESULT {}{}({});".format(
# prefix,
self.client_function_prefix,
self.name,
self.getParameterDeclaration(),
)
def getRequestParseCase(self, buffer):
if self.name in functionNoAnswerList:
return """
case {ID}: /* {declaration} */
{{
/* Declarations */
{parameterdeclarations}
/* Read input parameters */
{inputParameterDeserialization}
/* Call function */
{functioncall}
/* This function has been set to receive no answer */
}}
break;""".format(
ID=self.ID * 2,
declaration=self.getDeclaration(),
parameterdeclarations="".join(
"\t\t\t" +
p["parameter"].declaration(
p["parametername"]) +
";\n" for p in self.parameterlist),
inputParameterDeserialization="".join(
p["parameter"].unstringify(
buffer,
p["parametername"],
3) for p in self.parameterlist if p["parameter"].isInput()),
functioncall=self.getCall(),
)
return """
case {ID}: /* {declaration} */
{{
/***Declarations***/
{parameterdeclarations}
/***Read input parameters***/
{inputParameterDeserialization}
/***Call function***/
{prefix}reply_cancelled = 0;
{functioncall}
if ({prefix}reply_cancelled == 0){{
/***send return value and output parameters***/
{prefix}message_start({messagesize});
{prefix}message_push_byte({ID_plus_1});
{outputParameterSerialization}
{prefix}message_commit();
}}
}}
break;""".format(
ID=self.ID * 2,
declaration=self.getDeclaration(),
parameterdeclarations="".join(
"\t\t\t" +
p["parameter"].declaration(
p["parametername"]) +
";\n" for p in self.parameterlist),
inputParameterDeserialization="".join(
p["parameter"].unstringify(
buffer,
p["parametername"],
3) for p in self.parameterlist if p["parameter"].isInput()),
functioncall=self.getCall(),
outputParameterSerialization="".join(
p["parameter"].stringify(
p["parametername"],
4) for p in self.parameterlist if p["parameter"].isOutput()),
ID_plus_1=self.ID * 2 + 1,
prefix=prefix,
messagesize=sum(p["parameter"].getSize() for p in self.parameterlist if p[
"parameter"].isOutput()) + 1,
)
def getAnswerSizeCase(self, buffer):
if self.name in functionNoAnswerList:
return """\t\t/* case {ID}: {declaration}
\t\t\tThis function has been set to receive no answer */
""".format(
declaration=self.getDeclaration(),
ID=self.ID * 2 + 1,
)
size = 1 + sum(p["parameter"].getSize()
for p in self.parameterlist if p["parameter"].isOutput())
retvalsetcode = ""
if isinstance(size, float): # variable length
retvalsetcode += """ if (size_bytes >= 3)
returnvalue.size = {buffer}[1] + {buffer}[2] << 8;
else{{
returnvalue.size = 3;
returnvalue.result = RPC_COMMAND_INCOMPLETE;
}}""".format(buffer=buffer, prefix=prefix)
else:
retvalsetcode += "\t\t\treturnvalue.size = " + str(size) + ";"
return """\t\tcase {ID}: /* {declaration} */
{retvalsetcode}
\t\t\tbreak;
""".format(
declaration=self.getDeclaration(),
ID=self.ID * 2 + 1,
retvalsetcode=retvalsetcode,
)
def getAnswerParseCase(self, buffer):
if self.name in functionNoAnswerList:
return ""
return """\t\tcase {ID}: /* {declaration} */
\t\t\tbreak; /*TODO*/
""".format(
ID=self.ID * 2 + 1,
declaration=self.getDeclaration(),
)
def getRequestSizeCase(self, buffer):
size = 1 + sum(p["parameter"].getSize()
for p in self.parameterlist if p["parameter"].isInput())
retvalsetcode = ""
if isinstance(size, float): # variable length
retvalsetcode += """ if (size_bytes >= 3)
returnvalue.size = {buffer}[1] + {buffer}[2] << 8;
else{{
returnvalue.size = 3;
returnvalue.result = RPC_COMMAND_INCOMPLETE;
}}""".format(buffer=buffer, prefix=prefix)
else:
retvalsetcode += "\t\t\treturnvalue.size = " + str(size) + ";"
return """
\t\tcase {answerID}: /* {functiondeclaration} */
{retvalsetcode}
\t\t\tbreak;
""".format(
answerID=self.ID * 2,
retvalsetcode=retvalsetcode,
functiondeclaration=self.getDeclaration(),
)
def getDocumentation(self):
class BytePositionCounter:
def __init__(self, start=0):
self.position = start
def getBytes(self, length):
form = "{start}" if length == 1 else "{start}-{end}"
self.position += length
return form.format(start=self.position -
length, end=self.position - 1)
pos = BytePositionCounter(start=1)
def stripOneDimensionalArray(vartype):
if vartype.endswith(" [1]"):
vartype = vartype[:-4]
return vartype
tableformat = """
<td class="content">{bytes}</td>
<td class="content">{type}</td>
<td class="content">{length}</td>
<td class="content">{varname}</td>"""
inputvariables = "</tr><tr>".join(tableformat.format(
length=p["parameter"].getSize(),
varname=p["parametername"],
bytes=pos.getBytes(p["parameter"].getSize()),
type=stripOneDimensionalArray(p["parameter"].declaration("")),
)
for p in self.parameterlist if p["parameter"].isInput())
ID = '<td class="content">0</td><td class="content">uint8_t</td><td class="content">1</td><td class="content">ID = {ID}</td>'.format(
ID=self.ID * 2)
inputvariables = ID + "</tr><tr>" + \
inputvariables if len(inputvariables) > 0 else ID
pos = BytePositionCounter(start=1)
def getPredefinedData(name):
if name == "hash_inout":
return '"' + rawhash + '"'
elif name == "hash_index_inout":
return str(0)
elif name == "start_command_id_out":
return str(start_command_id)
elif name == "version_out":
return str(version_number)
assert False, "Internal error: invalid name for predefined variable value: " + name
outputvariables = "</tr><tr>".join(tableformat.format(
length=p["parameter"].getSize(),
varname=p["parametername"] if self.ID != 0 else p[
"parametername"] + ' = ' + getPredefinedData(p["parametername"]),
bytes=pos.getBytes(p["parameter"].getSize()),
type=stripOneDimensionalArray(p["parameter"].declaration("")),
)
for p in self.parameterlist if p["parameter"].isOutput())
ID = '<td class="content">0</td><td class="content">uint8_t</td><td class="content">1</td><td class="content">ID = {ID}</td>'.format(
ID=self.ID * 2 + 1)
outputvariables = ID + "</tr><tr>" + \
outputvariables if len(outputvariables) > 0 else ID
structSet = set()
def addToStructSet(structSet, parameter):
if isinstance(parameter, StructDatatype):
structSet.add(parameter)
for m in parameter.memberList:
addToStructSet(structSet, m)
elif isinstance(parameter, ArrayDatatype):
addToStructSet(structSet, parameter.datatype)
for p in self.parameterlist:
addToStructSet(structSet, p["parameter"])
enumSet = set()
def addToEnumSet(enumSet, parameter):
if isinstance(parameter, StructDatatype):
for m in parameter.memberList:
addToEnumSet(structSet, m)
elif isinstance(parameter, ArrayDatatype):
addToEnumSet(enumSet, parameter.datatype)
elif isinstance(parameter, EnumDatatype):
enumSet.add(parameter)
for p in self.parameterlist:
addToEnumSet(enumSet, p["parameter"])
contentformat = """
<p class="static">{name}</p>
<table>
<tr>
<th>Byte Index</th>
<th>Type</th>
<th>Length</th>
<th>Variable</th>
</tr>
<tr>
{content}
</tr>
</table>"""
structcontent = ""
for s in structSet:
pos = BytePositionCounter()
structcontent += contentformat.format(name=s.signature, content="</tr><tr>".join(
tableformat.format(
length=m[
"datatype"].getSize(),
varname=m["name"],
bytes=pos.getBytes(
m["datatype"].getSize()),
type=stripOneDimensionalArray(
m["datatype"].declaration("")),
) for m in s.memberList)
)
enumcontent = ""
for e in enumSet:
pos = BytePositionCounter()
enumcontent += """
<p class="static">{name}</p>
<table>
<tr>
<th>Name</th>
<th>Value</th>
</tr>
<tr>
{content}
</tr>
</table>""".format(name=e.name, content="</tr><tr>".join('<td class="content">{name}</td><td class="content">{value}</td>'.format(
name=m["name"],
value=m["value"])
for m in e.values)
)
replycontent = contentformat.format(
name="Reply", content=outputvariables)
if self.name in functionNoAnswerList:
replycontent = '<p class="static">Reply: None</p>'
return '''<div class="function">
<table class="declarations">
<tr class="declarations">
<td class="static">Original function</td>
<td class="function">{originalfunctiondeclaration}</td>
</tr>
<tr class="declarations">
<td class="static">Generated function</td>
<td class="function">{functiondeclaration}</td>
</tr>
</table>
{requestcontent}
{replycontent}
{structcontent}
{enumcontent}
</table>
</div>'''.format(
originalfunctiondeclaration=self.getOriginalDeclaration(),
functiondeclaration=self.getDeclaration(),
requestcontent=contentformat.format(
name="Request", content=inputvariables),
replycontent=replycontent,
structcontent=structcontent,
enumcontent=enumcontent,
requestID=self.ID * 2,
replyID=self.ID * 2 + 1,
)
def setIntegralDataType(signature, size_bytes):
datatypes[signature] = IntegralDatatype(signature, size_bytes)
def setBasicDataType(signature, size_bytes):
datatypes[signature] = BasicDatatype(signature, size_bytes)
def setEnumDataType(signature, size_bytes, transfertype,
values, name, typedef):
datatypes[signature] = EnumDatatype(
signature, size_bytes, transfertype, values, name, typedef)
def setPredefinedDataTypes():
typeslist = (
("void", 0),
("char", 1),
("signed char", 1),
("unsigned char", 1),
("int8_t", 1),
("int16_t", 2),
("int24_t", 3),
("int32_t", 4),
("int64_t", 8),
("uint8_t", 1),
("uint16_t", 2),
("uint24_t", 3),
("uint32_t", 4),
("uint64_t", 8),
)
for t in typeslist:
setIntegralDataType(t[0], t[1])
def setEnumTypes(enums):
for e in enums:
# calculating minimum and maximim can be done better with map(max,
# zip(*e["values"])) or something like that
minimum = maximum = 0
for v in e["values"]: # parse the definition of the enum values+
if isinstance(v["value"], type(0)): # its just a (default) int
intValue = v["value"]
else:
try:
intValue = int("".join(v["value"].split(" ")))
# it is something like "- 1000"
except:
# it is something complicated, assume an int has 4 bytes
minimum = -2**30
intValue = 2 ** 30
minimum = min(minimum, intValue)
maximum = max(maximum, intValue)
valRange = maximum - minimum
name = e["name"] if e["typedef"] else "enum " + e["name"]
if valRange < 1:
setBasicDataType(name, 0)
continue
from math import log, ceil
# +1 because a 2 element enum would result in 0 bit field
requiredBits = ceil(log(valRange + 1, 2))
requiredBytes = ceil(requiredBits / 8.)
if requiredBytes == 0:
pass
elif requiredBytes == 1:
cast = "int8_t"
elif requiredBytes == 2:
cast = "int16_t"
elif requiredBytes == 3 or requiredBytes == 4:
cast = "int32_t"
else:
assert False, "enum " + e["name"] + " appears to require " + str(
requiredBytes) + "bytes and does not fit in a 32 bit integer"
if minimum >= 0:
cast = "u" + cast
setEnumDataType(
name,
requiredBytes,
cast,
e["values"],
e["name"],
e["typedef"])
datatypeDeclarations.append(datatypes[name].getTypeDeclaration())
def setStructTypes(structs):
for s in structs:
memberList = []
for t in structs[s]["properties"]["public"]:
memberList.append(
{"name": t["name"], "datatype": getStructParameter(t)})
# print(structs[s][t])
assert len(memberList) > 0, "struct with no members is not supported"
isTypedefed = structs[s]["properties"][
"public"][0]['property_of_class'] != s
signature = s if isTypedefed else "struct " + s
datatypes[signature] = StructDatatype(
signature, memberList, currentFile, structs[s]["line_number"])
datatypeDeclarations.append(datatypes[signature].getTypeDeclaration())
def getHash():
return """/* This hash is generated from the Python script that generated this file,
the configs passed to it and the source header file specified within the
server config. You can use it to verify that the client and the server
use the same protocol. */
#define {prefix}HASH "{hashstring}"
#define {prefix}HASH_SIZE 16
#define {prefix}START_COMMAND_ID {start_command_id}
#define {prefix}VERSION {version}""".format(prefix=prefix, hashstring=hashstring, start_command_id=start_command_id, version=version_number)
def getStructParameter(parameter):
basetype = getDatatype(
parameter["type"],
currentFile,
parameter["line_number"])
if 'multi_dimensional_array_size' in parameter:
retval = ArrayDatatype(
parameter["multi_dimensional_array_size"][-1], basetype, parameter["name"])
for d in reversed(parameter["multi_dimensional_array_size"][:-1]):
retval = ArrayDatatype(d, retval, parameter["name"])
return retval
if 'array_size' in parameter:
return ArrayDatatype(
parameter["array_size"], basetype, parameter["name"])
assert parameter["type"][-1] != '*', "Pointers are not allowed in structs"
return basetype
def setDefines(newdefines):
for d in newdefines:
try:
l = d.split(" ")
defines[l[0]] = " ".join(o for o in l[1:])
except:
pass
def getFunctionReturnType(function):
assert function["returns_pointer"] == 0, "in function " + function["debug"] + " line " + \
str(function["line_number"]) + ": " + \
"Pointers as return types are not supported"
return getDatatype(function["rtnType"],
currentFile, function["line_number"])
def getParameterArraySizes(parameter):
try:
tokens = parameter["method"]["debug"].split(" ")
# print(tokens)
except KeyError:
if "array_size" in parameter:
return [int(parameter["array_size"])]
assert False, "Multidimensional arrays inside structs currently not supported"
assert parameter["name"] in tokens, "Error: cannot get non-existing parameter " + \
parameter["name"] + " from declaration " + parameter["method"]["debug"]
while tokens[0] != parameter["name"]:
tokens = tokens[1:]
tokens = tokens[1:]
parameterSizes = []
while tokens[0] == '[':
tokens = tokens[1:]
parameterSizes.append("")
while tokens[0] != ']':
parameterSizes[-1] += " " + tokens[0]
tokens = tokens[1:]
tokens = tokens[1:]
parameterSizes[-1] = parameterSizes[-1][1:]
return parameterSizes
def getFunctionParameter(parameter):
# return (isPointerRequiringSize, DataType)
if parameter["type"][-1] == '*': # pointer
assert parameter[
"type"][-3] != '*', "Multipointer as parameter is not allowed"
assert parameter["name"].endswith("_in") or parameter["name"].endswith("_out") or parameter["name"].endswith("_inout"),\
'In {1}:{2}: Pointer parameter "{0}" must either have a suffix "_in", "_out", "_inout" or be a fixed size array.'.format(
parameter["name"], currentFile, parameter["line_number"])
return {"isPointerRequiringSize": True, "parameter": PointerDatatype(parameter["type"], getDatatype(
parameter["type"][:-2], currentFile, parameter["line_number"]), parameter["name"])}
basetype = getDatatype(
parameter["type"],
currentFile,
parameter["line_number"])
if parameter["array"]: # array
assert parameter["name"][-3:] == "_in" or parameter["name"][-4:] == "_out" or parameter["name"][-6:] == "_inout",\
'Array parameter name "' + \
parameter["name"] + '" must end with "_in", "_out" or "_inout" in {}:{} '.format(
currentFile, parameter["line_number"])
arraySizes = list(reversed(getParameterArraySizes(parameter)))
current = ArrayDatatype(arraySizes[0], basetype, parameter["name"])
arraySizes = arraySizes[1:]
for arraySize in arraySizes:
current = ArrayDatatype(arraySize, current, parameter["name"])
return {"isPointerRequiringSize": False, "parameter": current}
else: # base type
return {"isPointerRequiringSize": False, "parameter": basetype}
def getFunctionParameterList(parameters):
paramlist = []
isPointerRequiringSize = False
for p in parameters:
if isPointerRequiringSize: # require a size parameter
pointername = parameters[len(paramlist) - 1]["name"]
pointersizename = p["name"]
sizeParameterErrorText = 'Pointer parameter "{0}" must be followed by a size parameter with the name "{0}_size". Or use a fixed size array instead.'.format(
pointername)
assert pointersizename == pointername + "_size", sizeParameterErrorText
functionparameter = getFunctionParameter(p)
isPointerRequiringSize = functionparameter[
"isPointerRequiringSize"]
parameter = functionparameter["parameter"]
assert not isPointerRequiringSize, sizeParameterErrorText
paramlist[-1]["parameter"].setNumberOfElementsIdentifier(
pointersizename)
paramlist.append(
{"parameter": parameter, "parametername": p["name"]})
else:
functionparameter = getFunctionParameter(p)
isPointerRequiringSize = functionparameter[
"isPointerRequiringSize"]
parameter = functionparameter["parameter"]
if isVoidDatatype(parameter):
continue
parametername = p["name"]
if parametername == "" and p["type"] != 'void':
parametername = "unnamed_parameter" + str(len(paramlist))
paramlist.append(
{"parameter": parameter, "parametername": parametername})
assert not isPointerRequiringSize, 'Pointer parameter "{0}" must be followed by a size parameter with the name "{0}_size". Or use a fixed size array instead.'.format(parameters[
len(paramlist) - 1]["name"])
# for p in paramlist:
#print(p.stringify("buffer", "var", 1))
return paramlist
def getFunction(function, client_function_prefix):
functionList = []
name = function["name"]
ID = functionPredefinedIDs.pop(name, None)
if ID is None:
try:
getFunction.functionID += 1
except AttributeError:
getFunction.functionID = start_command_id
while getFunction.functionID in functionPredefinedIDs.values():
getFunction.functionID += 1
assert getFunction.functionID < 127, "Too many functions, require changes to allow bigger function ID variable"
ID = getFunction.functionID
returntype = getFunctionReturnType(function)
parameterlist = getFunctionParameterList(function["parameters"])
return Function(ID, returntype, name, parameterlist, client_function_prefix)
# for k in function.keys():
#print(k, '=', function[k])
# for k in function["parameters"][0]["method"].keys():
#print(k, '=', function["parameters"][0]["method"][k], "\n")
# print(function["parameters"][0]["method"])
# for k2 in k["method"].keys():
#print(k2, '=', str(k["method"][k2]))
# print(10*'_')
# print(function.keys())
# print("\n")
# print(10*"_")
# print(returntype)
#print(returntype.stringify("buffer", "var", 1))
#returntype.stringify("buffer", "var", 1)
#Function(ID, returntype, name, parameterlist)
def checkDefines(defines):
checklist = (
(prefix + "SEND",
"A #define {prefix}SEND is required that takes a const void * and a size and sends data over the network. Example: #define {prefix}SEND send".format(prefix=prefix)),
(
prefix +
"SLEEP",
"A #define {prefix}SLEEP is required that makes the current thread sleep until {prefix}WAKEUP is called or a timeout occured. Returns whether {prefix}WAKEUP was called (and a timeout did not occur)".format(
prefix=prefix)),
(
prefix +
"WAKEUP",
"A #define {prefix}WAKEUP is required that makes the thread sleeping due to {prefix}SLEEP wake up".format(
prefix=prefix)),
)
for c in checklist:
success = False
for d in defines:
if d.split(" ")[0].split("(")[0] == c[0]:
success = True
break
assert success, c[1]
def getPathAndFile(filepath):
from os.path import split
return split(filepath)
def getIncludeFilePath(include):
return include[1:-1]
def setTypes(ast):
setEnumTypes(ast.enums)
setStructTypes(ast.structs)
setStructTypes(ast.classes)
setDefines(ast.defines)
def getNonstandardTypedefs():
return "#include <stdint.h>\n" + "".join((
"".join("typedef int8_t int{0}_t;\n".format(i)
for i in range(1, 8)),
"".join("typedef int16_t int{0}_t;\n".format(i)
for i in range(9, 16)),
"".join("typedef int32_t int{0}_t;\n".format(i)
for i in range(17, 32)),
"".join("typedef uint8_t uint{0}_t;\n".format(i)
for i in range(1, 8)),
"".join("typedef uint16_t uint{0}_t;\n".format(i)
for i in range(9, 16)),
"".join("typedef uint32_t uint{0}_t;\n".format(i)
for i in range(17, 32)),
))
def getGenericHeader(version):
return """
/* This file has been generated by RPC Generator {0} */
/* typedefs for non-standard bit integers */
{1}
/* The optional original return value is returned through the first parameter */
""".format(version, getNonstandardTypedefs())
def getSizeFunction(functions, clientHeader,
parser_to_generic_path, parser_to_server_header_path):
return """#include "{network_include}"
#include "{parser_include}"
#include "{parser_to_server_header_path}"
#include <string.h>
#include <stdint.h>
uint8_t {prefix}reply_cancelled = 0;
{hash}
/* auto-generated implementation */
void {prefix}get_hash_impl(unsigned char hash_inout[16], uint32_t hash_index_inout[1], unsigned char start_command_id_out[1], uint16_t version_out[1]){{
//For future use: the client's hash is in hash_inout and can be processed.
//For now it is just a placeholder for keeping the protocol compatible once this feature is needed.
//But this should be done in the generator since this is generated code.
//Hash index is used in case the client holds multiple protocol descriptions. The client
//can use the index value to iterate.
memcpy(hash_inout, {prefix}HASH, 16);
*hash_index_inout = 0;
*start_command_id_out = {prefix}START_COMMAND_ID;
*version_out = {prefix}VERSION;
}}
/* auto-generated implementation */
void {prefix}cancel_reply(){{
{prefix}reply_cancelled = 1;
}}
/* Receives a pointer to a (partly) received message and it's size.
Returns a result and a size. If size equals RPC_SUCCESS then size is the
size that the message is supposed to have. If result equals RPC_COMMAND_INCOMPLETE
then more bytes are required to determine the size of the message. In this case
size is the expected number of bytes required to determine the correct size.*/
RPC_SIZE_RESULT {prefix}get_request_size(const void *buffer, size_t size_bytes){{
const unsigned char *current = (const unsigned char *)buffer;
RPC_SIZE_RESULT returnvalue;
returnvalue.result = RPC_COMMAND_INCOMPLETE;
if (size_bytes == 0){{
returnvalue.result = RPC_COMMAND_INCOMPLETE;
returnvalue.size = 1;
return returnvalue;
}}
switch (*current){{ /* switch by message ID */{cases}
default:
returnvalue.size = 0;
returnvalue.result = RPC_COMMAND_UNKNOWN;
return returnvalue;
}}
returnvalue.result = returnvalue.size > size_bytes ? RPC_COMMAND_INCOMPLETE : RPC_SUCCESS;
return returnvalue;
}}
""".format(
hash=getHash(),
cases="".join(f.getRequestSizeCase("current") for f in functions),
prefix=prefix,
network_include=join(parser_to_generic_path, prefix + "network.h"),
parser_include=join(parser_to_generic_path, prefix + "parser.h"),
parser_to_server_header_path=parser_to_server_header_path,
)
def getRequestParser(functions):
buffername = "current"
return """
/* This function parses RPC requests, calls the original function and sends an
answer. */
void {prefix}parse_request(const void *buffer, size_t size_bytes){{
(void)size_bytes;
const unsigned char *{buffername} = (const unsigned char *)buffer;
switch (*current++){{ /* switch (request ID) */ {cases}
}}
}}""".format(
cases="".join(f.getRequestParseCase(buffername) for f in functions),
buffername=buffername,
prefix=prefix,
)
def getAnswerParser(functions):
result = """
/* This function pushes the answers to the caller, doing all the necessary synchronization. */
void {prefix}parse_answer(const void *buffer, size_t size_bytes){{
assert({prefix}initialized);
{prefix}buffer = (const unsigned char *)buffer;
assert({prefix}get_answer_length(buffer, size_bytes).result == RPC_SUCCESS);
assert({prefix}get_answer_length(buffer, size_bytes).size <= size_bytes);
""".format(prefix=prefix)
if(multiThreadArchicture):
result += """
{prefix}mutex_unlock(RPC_mutex_answer);
{prefix}mutex_lock(RPC_mutex_in_caller);
{prefix}mutex_unlock(RPC_mutex_parsing_complete);
{prefix}mutex_lock(RPC_mutex_answer);
{prefix}mutex_lock(RPC_mutex_parsing_complete);
{prefix}mutex_unlock(RPC_mutex_in_caller);
}}
""".format(prefix=prefix)
else:
result += """
{prefix}mutex_unlock(RPC_mutex_answer);
}}
""".format(prefix=prefix)
return result
def getRPC_Parser_init():
if(multiThreadArchicture):
result = """
void {prefix}Parser_init(){{
assert(!{prefix}initialized);
if ({prefix}initialized)
return;
{prefix}initialized = 1;
{prefix}mutex_lock(RPC_mutex_parsing_complete);
{prefix}mutex_lock(RPC_mutex_answer);
}}
"""
else:
result = """
void {prefix}Parser_init(){{
assert(!{prefix}initialized);
if ({prefix}initialized)
return;
{prefix}initialized = 1;
{prefix}mutex_lock(RPC_mutex_answer);
}}
"""
result = result.format(prefix=prefix)
return result;
def getRPC_Parser_exit():
if(multiThreadArchicture):
result = """
void {prefix}Parser_exit(){{
assert({prefix}initialized);
if (!{prefix}initialized)
return;
{prefix}initialized = 0;
{prefix}mutex_unlock(RPC_mutex_parsing_complete);
{prefix}mutex_unlock(RPC_mutex_answer);
}}
"""
else:
result = """
void {prefix}Parser_exit(){{
assert({prefix}initialized);
if (!{prefix}initialized)
return;
{prefix}initialized = 0;
{prefix}mutex_unlock(RPC_mutex_answer);
}}
"""
result = result.format(prefix=prefix)
return result;
def getAnswerSizeChecker(functions):
return """/* Get (expected) size of (partial) answer. */
RPC_SIZE_RESULT {prefix}get_answer_length(const void *buffer, size_t size_bytes){{
assert({prefix}initialized);
RPC_SIZE_RESULT returnvalue = {{RPC_SUCCESS, 0}};
const unsigned char *current = (const unsigned char *)buffer;
if (!size_bytes){{
returnvalue.result = RPC_COMMAND_INCOMPLETE;
returnvalue.size = 1;
return returnvalue;
}}
switch (*current){{
{answercases} default:
returnvalue.result = RPC_COMMAND_UNKNOWN;
return returnvalue;
}}
if (returnvalue.result != RPC_COMMAND_UNKNOWN)
returnvalue.result = returnvalue.size > size_bytes ? RPC_COMMAND_INCOMPLETE : RPC_SUCCESS;
return returnvalue;
}}
""".format(
answercases="".join(f.getAnswerSizeCase("current") for f in functions),
prefix=prefix,
)
def getHashFunction():
return Function(0, getDatatype("void"), prefix + "get_hash",
[
{
'parameter': ArrayDatatype( "16",
getDatatype("unsigned char"),
"hash_inout",
Out=True,
In=True
),
'parametername': "hash_inout"
},
{
'parameter': ArrayDatatype( "1",
getDatatype("uint32_t"),
"hash_index_inout",
Out=True,
In=True
),
'parametername': "hash_index_inout"
},
{
'parameter': ArrayDatatype( "1",
getDatatype("unsigned char"),
"start_command_id_out",
Out=True
),
'parametername': "start_command_id_out"
},
{
'parameter': ArrayDatatype( "1",
getDatatype("uint16_t"),
"version_out",
Out=True
),
'parametername': "version_out"
}
], ""
)
def recurseThroughIncludes(rootfile, st_includes, depth):
for i in st_includes.includes:
#global currentFile
inclduefilePath = getIncludeFilePath(i)
#print(os_path.dirname(file))
#print(inclduefilePath)
inclduefilePath = os_path.join(os_path.dirname(rootfile),inclduefilePath)
#print("including file(depth:"+str(depth)+"):"+ inclduefilePath)
try:
iast = CppHeaderParser.CppHeader(inclduefilePath)
setTypes(iast)
if depth < 10:
recurseThroughIncludes(inclduefilePath,iast,depth+1)
except FileNotFoundError:
print('Warning: #include file "{}" not found, skipping'.format(inclduefilePath))
def generateCode(file, xml, parser_to_network_path,
parser_to_server_header_path, client_function_prefix):
#ast = CppHeaderParser.CppHeader("""typedef enum EnumTest{Test} EnumTest;""", argType='string')
ast = CppHeaderParser.CppHeader(file)
# return None
# checkDefines(ast.defines)
setPredefinedDataTypes()
#print(ast.includes)
recurseThroughIncludes(file,ast,0)
currentFile = file
# evaluatePragmas(ast.pragmas)
# print(ast.enums)
# print(ast.typedefs_order)
# print(ast.namespaces)
# print(ast.global_enums)
# print(ast.typedefs)
# return None
# for a in ast.__dict__:
#print(a + ": " + str(getattr(ast, a)))
setTypes(ast)
# TODO: structs
# TODO: typedefs
# for d in datatypes:
#print(d, datatypes[d].size_bytes)
# return None
# for a in ast.__dict__:
# print(a)
# generateFunctionCode(ast.functions[0])
functionlist = [getHashFunction()]
for f in ast.functions:
if not f["name"] in functionIgnoreList:
functionlist.append(getFunction(f, client_function_prefix))
rpcHeader = "\n".join(f.getDeclaration() for f in functionlist)
rpcImplementation = "\n".join(f.getDefinition(client_function_prefix) for f in functionlist)
documentation = ""
for f in functionlist:
if f.name in functionIgnoreList:
continue
entry = ET.SubElement(xml, "function")
f.getXml(entry)
documentation += "\n<hr>\n" + f.getDocumentation()
from os.path import basename
requestParserImplementation = externC_intro + '\n' + getSizeFunction(
functionlist,
basename(file),
parser_to_network_path,
parser_to_server_header_path) + getRequestParser(functionlist) + externC_outro
answerSizeChecker = getAnswerSizeChecker(functionlist)
answerParser = getAnswerParser(functionlist)
return rpcHeader, rpcImplementation, requestParserImplementation, answerParser, answerSizeChecker, documentation
doNotModifyHeader = """/* This file has been automatically generated by RPC-Generator
https://github.com/Crystal-Photonics/RPC-Generator
You should not modify this file manually. */
"""
externC_intro = """#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
"""
externC_outro = """
#ifdef __cplusplus
}
#endif /* __cplusplus */
"""
def get_rpc_enum():
return """typedef enum{{
RPC_SUCCESS,
RPC_FAILURE,
RPC_COMMAND_UNKNOWN,
RPC_COMMAND_INCOMPLETE
}} RPC_RESULT;
""".format(
prefix=prefix,
)
def getRPC_serviceHeader(
headers, headername, typedeclarations, specific_header_to_types_path):
headerDefine = headername.upper()
return """{doNotModify}
{includeguardintro}
{externC_intro}
{hash}
{rpc_declarations}{externC_outro}
{includeguardoutro}
""".format(
hash=getHash(),
doNotModify=doNotModifyHeader,
externC_intro=externC_intro,
externC_outro=externC_outro,
includeguardintro="""#ifndef {}
#define {}
""".format(headerDefine, headerDefine),
includeguardoutro="""#endif /* {} */""".format(headerDefine),
rpc_declarations="""#include <stddef.h>
#include <inttypes.h>
#include "{specific_header_to_types_path}"
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
These are the payload functions made available by the RPC generator.
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
/* //TODO: copy comments for documentation */
{typedeclarations}
{headers}
""".format(
prefix=prefix,
typedeclarations=typedeclarations,
headers=headers,
specific_header_to_types_path=specific_header_to_types_path,
),)
def getNetworkHeader():
return """
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
IMPORTANT: The following functions must be implemented by YOU.
They are required for the RPC to work.
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
{doNotModifyHeader}
#ifndef {prefix}NETWORK_H
#define {prefix}NETWORK_H
#include "RPC_types.h"
{externC_intro}
void {prefix}message_start(size_t size);
/* This function is called when a new message starts. {{size}} is the number of
bytes the message will require. In the implementation you can allocate a
buffer or write a preamble. The implementation can be empty if you do not
need to do that. */
void {prefix}message_push_byte(unsigned char byte);
/* Pushes a byte to be sent via network. You should put all the pushed bytes
into a buffer and send the buffer when {prefix}message_commit is called. If you run
out of buffer space you can send multiple partial messages as long as the
other side puts them back together. */
RPC_RESULT {prefix}message_commit(void);
/* This function is called when a complete message has been pushed using
{prefix}message_push_byte. Now is a good time to send the buffer over the network,
even if the buffer is not full yet. You may also want to free the buffer that
you may have allocated in the {prefix}message_start function.
{prefix}message_commit should return RPC_SUCCESS if the buffer has been successfully
sent and RPC_FAILURE otherwise. */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
You need to define 4 mutexes to implement the {prefix}mutex_* functions below.
See RPC_types.h for a definition of RPC_mutex_id.
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void {prefix}mutex_init(void);
/* Initializes all rpc mutexes. */
void {prefix}mutex_lock(RPC_mutex_id mutex_id);
/* Locks the mutex. If it is already locked it yields until it can lock the mutex. */
void {prefix}mutex_unlock(RPC_mutex_id mutex_id);
/* Unlocks the mutex. The mutex is locked when the function is called. */
char {prefix}mutex_lock_timeout(RPC_mutex_id mutex_id);
/* Tries to lock a mutex. Returns 1 if the mutex was locked and 0 if a timeout
occured. The timeout length should be the time you want to wait for an answer
before giving up. If the time is infinite a lost answer will get the calling
thread stuck indefinitely. */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
The following functions's implementations are automatically generated.
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void {prefix}Parser_init(void);
/* Initializes various states required for the RPC. Must be called before any
other {prefix}* function. Must be called by the parser thread. */
void {prefix}Parser_exit(void);
/* Frees various states required for the RPC. Must be called after any
other {prefix}* function */
RPC_SIZE_RESULT {prefix}get_answer_length(const void *buffer, size_t size);
/* Returns the (expected) length of the beginning of a (partial) message.
If returnvalue.result equals RPC_SUCCESS then returnvalue.size equals the
expected size in bytes.
If returnvalue.result equals RPC_COMMAND_UNKNOWN then the buffer does not point
to the beginning of a recognized message and returnvalue.size has no meaning.
If returnvalue.result equals RPC_COMMAND_INCOMPLETE then returnvalue.size equals
the minimum number of bytes required to figure out the length of the message. */
void {prefix}parse_answer(const void *buffer, size_t size);
/* This function parses answer received from the network. {{buffer}} points to the
buffer that contains the received data and {{size}} contains the number of bytes
that have been received (NOT the size of the buffer!). This function will wake
up {prefix}*-functions below that are waiting for an answer.
Do not call this function with an incomplete message. Use {prefix}get_answer_length
to make sure it is a complete message. */
{externC_outro}
#endif /* {prefix}NETWORK_H */
""".format(
doNotModifyHeader=doNotModifyHeader,
externC_intro=externC_intro,
externC_outro=externC_outro,
prefix=prefix,
)
def generateDocumentation(documentation, filename, headerpath):
import datetime
now = datetime.datetime.now()
return """
<html>
<head>
<title>{filename}.h - RPC Documentation</title>
<link rel="stylesheet" href="{prefix}{filename}.css">
</head>
<body>
<div class="header">
<table style="white-space: nowrap;">
<tr>
<th colspan=2>RPC Documentation<th>
</tr>
<tr>
<td class="static">Generated by:</td><td><a href="https://github.com/Crystal-Photonics/RPC-Generator">RPC-Generator</a></td>
</tr>
<tr>
<td class="static">Server Header File:</td><td><a href="../{headerpath}">{headerpath}</a></td>
</tr>
<tr>
<td class="static">Date of Generation:</td><td>{date}</td>
</tr>
<tr>
<td class="static">Hash:</td><td>{hash}</td>
</tr>
<tr>
<td class="static">Command ID Start:</td><td>{cmdidstart}</td>
</tr>
<tr>
<td class="static">Version:</td><td>{version}</td>
</tr>
</table>
</div>
<div class="content">
{documentation}
</div>
</body>
</html>""".format(
documentation=documentation,
filename=filename,
date=now.strftime("%Y-%m-%d %H:%M"),
prefix=prefix,
hash=rawhash,
cmdidstart=start_command_id,
version=version_number,
headerpath=headerpath,
)
def getCss():
return """p.static {
width: 100%;
font-size: 16px;
color: #000;
margin-left: 20px;
margin-top: 20px;
margin-bottom: 10px;
}
p.function {
font-size: 24px;
font-family: monospace;
font-weight: 700;
color: #224;
margin: 0px;
}
div.content table, div.content td.content {
margin-left: 40px;
border: 1px solid #aaa;
font-size: 16px;
border-collapse: collapse;
padding: 8px;
}
div.content th {
font-weight: 600;
color: #000;
background-color: #aaa;
padding-left: 10px;
padding-right: 10px;
}
hr{
margin-top: 100px;
margin-bottom: 100px;
}
div.content{
margin-left: 6%;
margin-top: 5%;
margin-bottom: 6%;
margin-right: 6%;
}
h1{
font-size: 200%;
font-weight: 500;
font-family: sans-serif;
}
div.header{
margin-top: 6%;
margin-left: 6%;
font-size: 130%;
}
div.header th{
text-align: left;
font-size: 250%;
padding-bottom: 5%;
}
div.header td{
font-size: 130%;
padding-left: 5%;
}
div.function span.static{
font-size: 80%;
font-weight: 500;
margin-right: 10px;
}
div.content td.function{
font-size: 24px;
font-family: monospace;
font-weight: 700;
color: #224;
padding-bottom: 5px;
vertical-align: top;
}
div.content td.static{
padding-right: 10px;
vertical-align: center;
}
div.content table.declarations{
border: 0px;
margin-left: 15px;
margin-bottom: -10px;
}
"""
def getRpcTypesHeader():
files = getFilePaths()
return """{doNotModifyHeader}
#ifndef RPC_TYPES_H
#define RPC_TYPES_H
#include <stddef.h>
{extrainclude}
{rpc_enum}
typedef struct {{
RPC_RESULT result;
size_t size;
}} RPC_SIZE_RESULT;
typedef enum {{
RPC_mutex_parsing_complete,
RPC_mutex_caller,
RPC_mutex_in_caller,
RPC_mutex_answer,
RPC_MUTEX_COUNT
}} RPC_mutex_id;
#define RPC_number_of_mutexes 4
#endif /* RPC_TYPES_H */
""".format(
doNotModifyHeader=doNotModifyHeader,
rpc_enum=get_rpc_enum(),
prefix=prefix,
extrainclude=files["EXTRA_INCLUDE_INTO_CLIENT_TYPES_H"]
)
def getRequestParserHeader():
return """{doNotModifyHeader}
#include "RPC_types.h"
{externC_intro}
/* Receives a pointer to a (partly) received message and it's size.
Returns a result and a size. If size equals RPC_SUCCESS then size is the
size that the message is supposed to have. If result equals RPC_COMMAND_INCOMPLETE
then more bytes are required to determine the size of the message. In this case
size is the expected number of bytes required to determine the correct size.*/
RPC_SIZE_RESULT {prefix}get_request_size(const void *buffer, size_t size_bytes);
/* This function parses RPC requests, calls the original function and sends an
answer. */
void {prefix}parse_request(const void *buffer, size_t size_bytes);
/* If the requested function calls {prefix}cancel_reply() the reply is suppressed
and the client will probably timeout*/
void {prefix}cancel_reply(void);
{externC_outro}
""".format(
doNotModifyHeader=doNotModifyHeader,
externC_intro=externC_intro,
externC_outro=externC_outro,
prefix=prefix,
)
try:
root = ET.Element("RPC")
files = getFilePaths()
from os.path import join, split, relpath
root.set("prefix", prefix)
root.set("projectname", projectname)
root.set("version_number", str(version_number))
root.set("command_id_start", str(start_command_id))
root.set("hash", rawhash)
rpcHeader, rpcImplementation, requestParserImplementation, answerParser, answerSizeChecker, documentation = \
generateCode(
files["ServerHeader"], root, relpath(
files["SERVER_GENINCDIR"], files["SERVER_SRCDIR"]), relpath(
files["ServerHeader"], files["SERVER_SRCDIR"]),files["CLIENT_FUNCTION_PREFIX"])
for function in functionPredefinedIDs:
print(
"Warning: #pragma ID {function} {ID} was ignored since no function named {function} was declared".format(
function=function,
ID=functionPredefinedIDs[function] *
2))
requestParserImplementation = doNotModifyHeader + \
'\n' + requestParserImplementation
rpcImplementation = '''{doNotModify}
{externC_intro}
#include <stdint.h>
#include <assert.h>
#include "{rpc_client_header}"
#include "{network_include}"
static const unsigned char *{prefix}buffer;
static char {prefix}initialized;
{implementation}{externC_outro}
'''.format(
doNotModify=doNotModifyHeader,
rpc_client_header=join(
relpath(
files["CLIENT_SPCINCDIR"],
files["CLIENT_SRCDIR"]),
prefix +
files["ServerHeaderName"] +
'.h'),
implementation=rpcImplementation,
externC_outro=externC_outro,
externC_intro=externC_intro,
prefix=prefix,
network_include=join(
relpath(
files["CLIENT_GENINCDIR"],
files["CLIENT_SRCDIR"]),
prefix + 'network.h')
)
xml = ET.ElementTree()
xml._setroot(root)
dir_name_content = []
dir_name_content.append(("CLIENT_SPCINCDIR",
files["ServerHeaderName"] + ".h",
getRPC_serviceHeader(rpcHeader, prefix + files["ServerHeaderName"] + '_H',
getTypeDeclarations(),
join(relpath(files["CLIENT_GENINCDIR"],
files["CLIENT_SPCINCDIR"]),
"RPC_types.h"))))
dir_name_content.append(
("CLIENT_GENINCDIR", "RPC_types.h", getRpcTypesHeader()))
dir_name_content.append(
("CLIENT_GENINCDIR", "network.h", getNetworkHeader()))
clientcode = "".join((
rpcImplementation,
answerSizeChecker,
answerParser,
getRPC_Parser_init(),
getRPC_Parser_exit(),
externC_outro),
)
dir_name_content.append(
("CLIENT_SRCDIR",
files["ServerHeaderName"] + ".c",
clientcode))
"""
("documentation", generateDocumentation(documentation, files["ServerHeaderFileName"])),
("style", getCss()),
]
"""
print("Writing client files relative to " +
files["CLIENT_CONFIG_PATH"] + ":")
for dir, name, content in dir_name_content:
filename = name if name.startswith("RPC_") else prefix + name
print(
"\t" +
relpath(
join(
files[dir],
filename),
files["CLIENT_CONFIG_PATH"]))
with open(join(files[dir], filename), "w") as f:
f.write(content)
if "CLIENT_DOCDIR" in files:
print(
"\t" +
relpath(
join(
files["CLIENT_DOCDIR"],
prefix +
files["ServerHeaderName"] +
".html"),
files["CLIENT_CONFIG_PATH"]))
with open(join(files["CLIENT_DOCDIR"], prefix + files["ServerHeaderName"] + ".html"), "w") as f:
f.write(
generateDocumentation(
documentation,
files["ServerHeaderName"],
relpath(files["ServerHeader"], files["SERVER_CONFIG_PATH"])))
print(
"\t" +
relpath(
join(
files["CLIENT_DOCDIR"],
prefix +
files["ServerHeaderName"] +
".css"),
files["CLIENT_CONFIG_PATH"]))
with open(join(files["CLIENT_DOCDIR"], prefix + files["ServerHeaderName"] + ".css"), "w") as f:
f.write(getCss())
for path in files["CLIENT_XMLDIR"]:
#print(
# "\t" +
# relpath(
# join(
# path,
# prefix +
# files["ServerHeaderName"] +
# ".xml"),
# files["CLIENT_CONFIG_PATH"]))
xml_path = join(
path,
prefix +
files["ServerHeaderName"] +
".xml")
print("client descriptive xml: "+xml_path)
xml.write(
xml_path,
encoding="UTF-8",
xml_declaration=True)
try:
makedirs(join(files["CLIENT_DOCDIR"], projectname, prefix))
except:
pass
for path in files["CLIENT_XMLDIR"]:
xml_path = join(
path,
projectname,
prefix,
rawhash + ".xml")
try:
makedirs(join(path, projectname, prefix))
except:
pass
print("client rawhash xml: "+xml_path)
xml.write(
xml_path,
encoding="UTF-8",
xml_declaration=True)
dir_name_content = []
dir_name_content.append(
("SERVER_GENINCDIR", "RPC_types.h", getRpcTypesHeader()))
dir_name_content.append(
("SERVER_GENINCDIR", "network.h", getNetworkHeader()))
dir_name_content.append(
("SERVER_GENINCDIR",
"parser.h",
getRequestParserHeader()))
dir_name_content.append(
("SERVER_SRCDIR", "parser.c", requestParserImplementation))
print("Writing server files relative to " +
files["SERVER_CONFIG_PATH"] + ":")
for dir, name, content in dir_name_content:
filename = name if name.startswith("RPC_") else prefix + name
print(
"\t" +
relpath(
join(
files[dir],
filename),
files["SERVER_CONFIG_PATH"]))
with open(join(files[dir], filename), "w") as f:
f.write(content)
if "SERVER_DOCDIR" in files:
print(
"\t" +
relpath(
join(
files["SERVER_DOCDIR"],
prefix +
files["ServerHeaderName"] +
".html"),
files["SERVER_CONFIG_PATH"]))
with open(join(files["SERVER_DOCDIR"], prefix + files["ServerHeaderName"] + ".html"), "w") as f:
f.write(
generateDocumentation(
documentation,
files["ServerHeaderName"],
relpath(files["ServerHeader"], files["SERVER_CONFIG_PATH"])))
print(
"\t" +
relpath(
join(
files["SERVER_DOCDIR"],
prefix +
files["ServerHeaderName"] +
".css"),
files["SERVER_CONFIG_PATH"]))
with open(join(files["SERVER_DOCDIR"], prefix + files["ServerHeaderName"] + ".css"), "w") as f:
f.write(getCss())
for path in files["SERVER_XMLDIR"]:
#print(
# "\t" +
# relpath(
# join(
# path,
# prefix +
# files["ServerHeaderName"] +
# ".xml"),
# files["SERVER_CONFIG_PATH"]))
xml_path = join(
path,
prefix +
files["ServerHeaderName"] +
".xml")
print("server descriptive xml: "+xml_path)
xml.write(
xml_path,
encoding="UTF-8",
xml_declaration=True)
for path in files["SERVER_XMLDIR"]:
xml_path = join(
path,
projectname,
prefix,
rawhash + ".xml")
try:
makedirs(join(path, projectname, prefix))
except:
pass
print("server rawhash xml: "+ xml_path)
xml.write(
xml_path,
encoding="UTF-8",
xml_declaration=True)
except:
import traceback
traceback.print_exc(0)
exit(-1)
|
Crystal-Photonics/RPC-Generator
|
RPC-gen.py
|
Python
|
lgpl-3.0
| 91,328
|
[
"CRYSTAL"
] |
8fa88ec3659a01b0a4db30b572d2e113c64277d7d37b409b51d2e4abc77eb9c0
|
"""Support for control of ElkM1 tasks ("macros")."""
from __future__ import annotations
from typing import Any
from homeassistant.components.scene import Scene
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ElkAttachedEntity, create_elk_entities
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Create the Elk-M1 scene platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
entities: list[ElkTask] = []
elk = elk_data["elk"]
create_elk_entities(elk_data, elk.tasks, "task", ElkTask, entities)
async_add_entities(entities, True)
class ElkTask(ElkAttachedEntity, Scene):
"""Elk-M1 task as scene."""
async def async_activate(self, **kwargs: Any) -> None:
"""Activate the task."""
self._element.activate()
|
rohitranjan1991/home-assistant
|
homeassistant/components/elkm1/scene.py
|
Python
|
mit
| 1,024
|
[
"Elk"
] |
c3a777089aac75b4a8580de563fb79695544624dfbcea59f23d288129dd69a37
|
# -*- coding: utf-8 -*-
"""
fem code for practice
check out FEM - Numerical Python pg 690/1311
"""
__author__ = 'Neal Gordon <nealagordon@gmail.com>'
__date__ = '2016-09-06'
from numpy import array, matrix, zeros, linspace, arange
#from matplotlib.pyplot import *
import matplotlib.pyplot as plt
from numpy.linalg import solve, inv
from matplotlib.pyplot import plot, figure, xlim, ylim, title, xlabel, ylabel, show
import numpy as np
plt.rcParams['figure.figsize'] = (10, 8) # (width, height)
def create_mesh():
import pyfem
my_block = Block2D()
my_block.set_coords( [[0,0],[1,0],[1,1],[0,1]] )
my_block.set_quadratic()
my_block.set_divisions(10, 10)
my_mesh = Mesh()
my_mesh.add_blocks(my_block)
my_mesh.generate()
my_mesh.write("my_mesh.vtk")
def simple_fem():
from numpy import matrix, linalg
from math import pi
l = 1.5
d = 3
F1=F2=F3=F4=F5=F6=F7=F8=F9=F10=0
# K Matrix of order 10 x 10
A = (pi/4)*(d*d)
E = 200
#multiplying factor
m = (E*A)/ (2*1.414*l)
F1 = 4
F = matrix([[F1],[F2],[F3],[F4],[F5],[F6],[F7],[F8],[F9],[F10]])
# K Matrix of order 10 x 10
K = m * matrix([[2.828,4,0,0,0,0,0,0,0,0],\
[-2,4 ,0,0,0,0,0,0,0,0],\
[0,0,4.828,0,0,0,0,0,0,0],\
[0,0,0,2,0,0,0,0,0,0],\
[0,0,0,0,1.414,0,0,0,0,0],\
[0,0,0,0,0,-2,0,0,0,0],\
[0,0,0,0,0,0,4.828,0,0,0],\
[0,0,0,0,0,0,0,2,0,0],\
[0,0,0,0,0,0,0,0,1.414,0],\
[0,0,0,0,0,0,0,0,0,-2]])
# U = (Inverse of K)* F
C = K.I
#C matrix is the inverse of K
print("The Deformation matrix is: ")
print(C*F)
print("nodal displacements")
def cst_fem(structure='4node'):
'''
Gusset plate problem using 8 CST elemetnts. Uniform load across top edge
is modeled with 2 concentrated forces
structure = ['truss','4node', '9node']
'''
## define variables
E = 10e6 # modulus of elasticity
L = 20 # length of sketch (see drawing)
Q = 1000 # pounds/inch load
plotfactor = 1e2 # displacement factor for plot
poisson = 0.3 # poisson ratio
## Set nodal coordinates and element destination entries.
# u1 u2 u3 u4 u5 u6 u7 u8
#==============================================================================
if structure == '4node':
nodexy = array([0, 0, 10, 10, 20, 20, 0, 20]) # global node coordinets (arbitrary)
nodeBC = array([1, 1, 0, 0, 0, 0, 1, 1]) # boundary conditions, 1 if u=0
nodex = list(nodexy[0::2])
nodey = list(nodexy[1::2])
####nodexyplot = [nodex, nodey]
nodexyT = list(zip(nodex, nodey))
### list(zip(nodexplot, nodeyplot))
#### node 0 1 2 3
adj = array([[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]])
#### x y x y x y
#### u1 u2 u# u# u# u#
elnodes = array([[0, 1, 2, 3, 6, 7],
[6, 7, 2, 3, 4, 5]]) # 1 element per row, dofs labled CCW (arbitrary)
#==============================================================================
elif structure == '9node':
# 9 nodes
nodexy = array([0, 0, L/4, L/4, L/2, L/2,3*L/4, 3*L/4, L,
L, L/2, L, L/4, 3*L/4, 0, L, 0, L/2]) # global node coordinets (arbitrary)
# u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11 u12
nodeBC = array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) # is dof fixed?
# x y x y x y
# u1 u2 u# u# u# u#
elnodes = array([[ 0, 1, 2, 3, 16, 17],
[ 2, 3, 4, 5, 16, 17],
[ 4, 5, 12, 13, 16, 17],
[ 4, 5, 10, 11, 12, 13],
[ 4, 5, 6, 7, 10, 11],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 10, 11, 14, 15],
[16, 17, 12, 13, 14, 15]]) # 1 element per row, dofs labled CCW (arbitrary)
adj = array([[0,1,0,0,0,0,0,0,1],
[0,0,1,0,0,0,0,0,1],
[0,0,0,1,0,1,1,0,1],
[0,0,0,0,1,1,0,0,0],
[0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,1,1,0],
[0,0,0,0,0,0,0,1,1],
[0,0,0,0,0,0,0,0,1],
[0,0,0,0,0,0,0,0,0]])
#==============================================================================
elif structure == 'truss':
nodexy = array([0,0,1,0,2,0,3,0,4,0,5,0,5,1,4,1,3,1,2,1,1,1,0,1] )
nodeBC = array([1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1] )
elnodes = array([ [ 0, 1, 2, 3, 22, 23],
[ 2, 3, 4, 5, 20, 21],
[ 4, 5, 6, 7, 18, 19],
[ 6, 7, 8, 9, 16, 17],
[ 8, 9, 10, 11, 14, 15],
[10, 11, 12, 13, 14, 15],
[ 8, 9, 14, 15, 16, 17],
[ 6, 7, 16, 17, 18, 19],
[ 4, 5, 18, 19, 20, 21],
[ 2, 3, 20, 21, 22, 23]])
nodes = int(len(nodexy)//2)
adj = np.zeros((nodes,nodes))
conmat = array([[0,0, 1, 1,1,2,2,2,3,3,3,4,4,4,5,5,6,7, 8, 9, 10],
[1,11,2,10,11,3,9,10,4,8,9,5,7,8,6,7, 7,8, 9, 10, 11]])
conmat = np.transpose(conmat)
for i in range(len(conmat)):
adj[conmat[i,0],conmat[i,1]] = 1
#### Begin calculations
nodex = list(nodexy[0::2])
nodey = list(nodexy[1::2])
####nodexyplot = [nodex, nodey]
nodexyT = list(zip(nodex, nodey))
### list(zip(nodexplot, nodeyplot))
elements = int(len(elnodes)) # Number of elements
nodes = int(len(nodexy)//2) # number of nodes
doftotal = int(nodes*2) # number of total degrees of freedom
nodexyplot = zeros((nodes,2)) # global coordinates of nodes for plotting
nodeplotload = zeros((nodes,2)) # global coordiantes for deflected nodes for plotting
P = zeros((doftotal,1)) # total load vector
U = zeros((doftotal,1)) # displacements
Ue = zeros((6,1)) # displacements per element, 6 for CST
Ke = zeros((6,6)) # stiffness per element
K = zeros((doftotal,doftotal)) # totral structure stiffness
B = zeros((3,6)) # dN/dx , strain = B*u, correct for CST
D = zeros((3,3)) # Elasticity Matrix (D), correct for CST
strain = zeros((elements,3)) # Element(row) strain per node (column)
stress = zeros((elements,3)) # Element(row) stress per node (column)
pstress = 1 # pstress >0 plane stress pstress = 0 plane strain
## Load Vector
P[9] = -2e6 # 10 kips load
#P[3] = -20000/2 # 10 kips load
## Elasticity Matrix
D[1,0] = poisson
D[0,1] = poisson
if pstress == 1:
print('plane stress condition')
D[0,0] = 1
D[1,1] = 1
D[2,2] = 0.5*(1-poisson)
D = D*E/(1-poisson*poisson)
else:
print('plane strain condition')
D[0,0] = 1-poisson
D[1,1] = 1-poisson
D[2,2] = 0.5*(1-2*poisson)
D = D*E/((1-2*poisson)*(1+poisson))
## loop over each element, build element [B] matrix then element matrix.
# Assemble element stiffness into structure stiffness by building B matrix
# B = dN/dx
# x,y are the local nodal coordinates
for i in range(elements): # looping through each element and building shape function
x1 = nodexy[elnodes[i,0]]
x2 = nodexy[elnodes[i,2]]
x3 = nodexy[elnodes[i,4]]
y1 = nodexy[elnodes[i,1]]
y2 = nodexy[elnodes[i,3]]
y3 = nodexy[elnodes[i,5]]
x13 = x1-x3
x21 = x2-x1
x32 = x3-x2
y12 = y1-y2
y23 = y2-y3
y31 = y3-y1
B[0,0] = y23
B[2,0] = x32
B[1,1] = x32
B[2,1] = y23
B[0,2] = y31
B[2,2] = x13
B[1,3] = x13
B[2,3] = y31
B[0,4] = y12
B[2,4] = x21
B[1,5] = x21
B[2,5] = y12
A = 0.5*(x1*y23 + x2*y31 + x3*y12)
B = B/(2*A)
Ke = B.T @ D @ B * A # matrix multiplcation
# assemble elements into structure stiffness
for kk in range(6): # 6 u dof in CST
ii = elnodes[i,kk] # u(j) in element i
for j in range(6): # 6 v dof in CST
jj = elnodes[i,j] # vj in element i
K[ii,jj] += Ke[kk,j] # add element to total structure
## Apply Boundary Conditions via partition matrix method for 0 displacement only
Ksol = np.copy(K)
Psol = np.copy(P)
for i in range(doftotal):
if nodeBC[i] == 1:
Ksol[i,:] = 0
Ksol[:,i] = 0
Ksol[i,i] = 1
Psol[i] = 0
## Solve displacements
#U = Ksol\Psol
U = solve(Ksol,Psol)
## retrieve kru of total structure stiffness matrix and get reactions
R = K @ U
## loop over each element and form [B], get strains then stresses
for i in range(elements):
Ue = zeros((6,1))
x1 = nodexy[elnodes[i,0]]
x2 = nodexy[elnodes[i,2]]
x3 = nodexy[elnodes[i,4]]
y1 = nodexy[elnodes[i,1]]
y2 = nodexy[elnodes[i,3]]
y3 = nodexy[elnodes[i,5]]
x13 = x1-x3
x21 = x2-x1
x32 = x3-x2
y12 = y1-y2
y23 = y2-y3
y31 = y3-y1
B[0,0] = y23
B[2,0] = x32
B[1,1] = x32
B[2,1] = y23
B[0,2] = y31
B[2,2] = x13
B[1,3] = x13
B[2,3] = y31
B[0,4] = y12
B[2,4] = x21
B[1,5] = x21
B[2,5] = y12
A = 0.5*(x1*y23 + x2*y31 + x3*y12)
B = B*0.5/A
for j in range(6):
ii = elnodes[i,j]
Ue[j] = U[ii]
strain[i,:] = np.transpose(B @ Ue)
stress[i,:] = np.transpose(D @ B @ Ue)
# plot shape function
x = linspace(0,20,100)
y = linspace(0,20,100)
N1 = (1/(2*A))*( x2*y3-y2*x3 + (y2-y3)*x +(x3-x2)*y )
N2 = (1/(2*A))*( x3*y1-y3*x1 + (y3-y1)*x +(x1-x3)*y )
N3 = (1/(2*A))*( x1*y2-y1*x2 + (y1-y2)*x +(x2-x1)*y )
Nsum = N1 + N2 + N3
fig1 = figure()
plot(N1,'-.')
plot(N2,'--')
plot(N3,'.')
plot(Nsum)
title('CST shape functions')
show()
## plotting of FEA structure for visual confirmation of results
fig2 = figure()
# transforms global node coordinates nodexy from 1xc vector to nx2
# x y
#adj += adj.T
# nodexy = array([ 0, 0, 10, 10, 20, 20, 0, 20])
plotmag = 1
nodexyu = nodexy+U.T[0]*plotmag
nodexu = list(nodexyu[0::2])
nodeyu = list(nodexyu[1::2])
nodexyuT = list(zip(nodexu, nodeyu))
rlen, clen = adj.shape
x = [] ; y = []
for r in range(rlen):
for c in range(clen):
if adj[r,c] == 1:
x = [nodexyT[r][0], nodexyT[c][0]]
y = [nodexyT[r][1], nodexyT[c][1]]
plt.plot(x,y,'b')
x = [nodexyuT[r][0], nodexyuT[c][0]]
y = [nodexyuT[r][1], nodexyuT[c][1]]
plt.plot(x,y,'b--')
plt.xlim([np.min(nodex)-3, np.max(nodex)+3])
plt.ylim([np.min(nodey)-3, np.max(nodey)+3])
for i in range(nodes):
plt.text(nodex[i]+0.5, nodey[i], '$n_%i$' % (i+1),fontsize=20)
for i in range(elements):
xtemp = nodexy[elnodes[i][0::2]]
ytemp = nodexy[elnodes[i][1::2]]
elnodesx = (max(xtemp) + min(xtemp)) / 2
elnodesy = (max(ytemp) + min(ytemp)) / 2
plt.text(elnodesx, elnodesy, '$e_%i$' % (i+1),fontsize=20)
plt.plot(nodex, nodey, 'o')
plt.plot(nodexu, nodeyu, '*')
# plot stress
elnodeslist = []
for e in elnodes:
tmpnodex = list(nodexy[e[0::2]])
tmpnodey = list(nodexy[e[1::2]])
elnodeslist.append(list(zip(tmpnodex, tmpnodey)))
plot_contour(elnodeslist,stress, '$\sigma$')
#plot_contour(elnodeslist,strain,'$\epsilon$')
def gplot(adj,xy):
'''
# for a 2 element triangle
xy = [(0, 0), (10, 10), (20, 20), (0, 20)]
adj = [[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]]
gplot(adj,xy)
# simple truss
xy =[(0, 0),
(1, 0),
(2, 0),
(3, 0),
(4, 0),
(5, 0),
(5, 1),
(4, 1),
(3, 1),
(2, 1),
(1, 1),
(0, 1)]
adj = [[ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 1.],
[ 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 1., 0.],
[ 0., 0., 0., 0., 1., 0., 0., 0., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 1., 0., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]
gplot(adj,xy)
'''
adj = np.array(adj)
rlen, clen = adj.shape
nodex = [k[0] for k in xy]
nodey = [k[1] for k in xy]
xtemp = [] ; ytemp = []
for r in range(rlen):
for c in range(clen):
if adj[r,c] == 1:
x = [xy[r][0], xy[c][0]]
y = [xy[r][1], xy[c][1]]
plt.plot(x,y,'b')
#plt.plot(x,y,'b--')
xlim([np.min(nodex)-3, np.max(nodex)+3])
ylim([np.min(nodey)-3, np.max(nodey)+3])
plt.plot(nodex, nodey, 'o')
def plot_contour(xynodes, stress, mytitle='contour plot'):
"""
Simple demo of the fill function.
xynodes = [[(0, 0), (10, 10), (0, 20)], [(10, 10), (20, 20), (0, 20)]]
stress = [[-300,-100,-700],[300,350,-350]]
plot_stress(xynodes, stress)
"""
stress = np.array(stress)
fig2 = plt.figure()
fig2 = plt.figure()
sigma1 = stress[:,0]
sigma2 = stress[:,1]
sigma12 = stress[:,2]
vonmises = np.sqrt(sigma1**2 - sigma1*sigma2+sigma2**2+3*sigma12**2)
prins1 = (sigma1 + sigma2) / 2 + np.sqrt( ((sigma1-sigma2)/2)**2 + sigma12**2 )
prins2 = (sigma1 + sigma2) / 2 - np.sqrt( ((sigma1-sigma2)/2)**2 + sigma12**2 )
sigma1norm = sigma1/np.max(np.abs(sigma1))
sigma2norm = sigma2/np.max(np.abs(sigma2))
sigma12norm = sigma12/np.max(np.abs(sigma12))
vonmisesnorm = vonmises/np.max(np.abs(vonmises))
prins1norm = prins1/np.max(np.abs(prins1))
prins2norm = prins2/np.max(np.abs(prins2))
pltsigma = sigma1norm
for i, xy in enumerate(xynodes):
x = [k[0] for k in xy]
y = [k[1] for k in xy]
# red for tension, blue for compression
pltcolor = 'r' if pltsigma[i] >= 0 else 'b'
sigmavalnorm = pltsigma[i] if pltsigma[i] > 0 else pltsigma[i]*-1
plt.fill(x,y,pltcolor, alpha=sigmavalnorm)
plt.plot(x,y,'k')
title(mytitle)
tmpx = [x[0] for k in xynodes for x in k]
tmpy = [x[1] for k in xynodes for x in k]
plt.xlim([np.min(tmpx)-3, np.max(tmpx)+3])
plt.ylim([np.min(tmpy)-3, np.max(tmpy)+3])
plt.show()
if __name__ == '__main__':
# 4node, 9node, truss
cst_fem('9node')
#plot_stress()
|
nagordon/mechpy
|
mechpy/fem.py
|
Python
|
mit
| 16,951
|
[
"VTK"
] |
d01361d104af6dfb3057f5fb70ba4c2763d1bfd2c5b3be58e32e3f076c4b1597
|
"""Implements the hashids algorithm in python. For more information, visit
http://www.hashids.org/. Compatible with Python 2.6, 2.7 and 3"""
import warnings
from functools import wraps
from math import ceil
__version__ = '1.1.0'
RATIO_SEPARATORS = 3.5
RATIO_GUARDS = 12
try:
StrType = basestring
except NameError:
StrType = str
def _is_str(candidate):
"""Returns whether a value is a string."""
return isinstance(candidate, StrType)
def _is_uint(number):
"""Returns whether a value is an unsigned integer."""
try:
return number == int(number) and number >= 0
except ValueError:
return False
def _split(string, splitters):
"""Splits a string into parts at multiple characters"""
part = ''
for character in string:
if character in splitters:
yield part
part = ''
else:
part += character
yield part
def _hash(number, alphabet):
"""Hashes `number` using the given `alphabet` sequence."""
hashed = ''
len_alphabet = len(alphabet)
while True:
hashed = alphabet[number % len_alphabet] + hashed
number //= len_alphabet
if not number:
return hashed
def _unhash(hashed, alphabet):
"""Restores a number tuple from hashed using the given `alphabet` index."""
number = 0
len_hash = len(hashed)
len_alphabet = len(alphabet)
for i, character in enumerate(hashed):
position = alphabet.index(character)
number += position * len_alphabet ** (len_hash - i - 1)
return number
def _reorder(string, salt):
"""Reorders `string` according to `salt`."""
len_salt = len(salt)
if len_salt == 0:
return string
i, index, integer_sum = len(string) - 1, 0, 0
while i > 0:
index %= len_salt
integer = ord(salt[index])
integer_sum += integer
j = (integer + index + integer_sum) % i
temp = string[j]
trailer = string[j+1:] if j + 1 < len(string) else ''
string = string[0:j] + string[i] + trailer
string = string[0:i] + temp + string[i+1:]
i -= 1
index += 1
return string
def _index_from_ratio(dividend, divisor):
"""Returns the ceiled ratio of two numbers as int."""
return int(ceil(float(dividend) / divisor))
def _ensure_length(encoded, min_length, alphabet, guards, values_hash):
"""Ensures the minimal hash length"""
len_guards = len(guards)
guard_index = (values_hash + ord(encoded[0])) % len_guards
encoded = guards[guard_index] + encoded
if len(encoded) < min_length:
guard_index = (values_hash + ord(encoded[2])) % len_guards
encoded += guards[guard_index]
split_at = len(alphabet) // 2
while len(encoded) < min_length:
alphabet = _reorder(alphabet, alphabet)
encoded = alphabet[split_at:] + encoded + alphabet[:split_at]
excess = len(encoded) - min_length
if excess > 0:
from_index = excess // 2
encoded = encoded[from_index:from_index+min_length]
return encoded
def _encode(values, salt, min_length, alphabet, separators, guards):
"""Helper function that does the hash building without argument checks."""
len_alphabet = len(alphabet)
len_separators = len(separators)
values_hash = sum(x % (i + 100) for i, x in enumerate(values))
encoded = lottery = alphabet[values_hash % len(alphabet)]
last = None
for i, value in enumerate(values):
alphabet_salt = (lottery + salt + alphabet)[:len_alphabet]
alphabet = _reorder(alphabet, alphabet_salt)
last = _hash(value, alphabet)
encoded += last
value %= ord(last[0]) + i
encoded += separators[value % len_separators]
encoded = encoded[:-1] # cut off last separator
return (encoded if len(encoded) >= min_length else
_ensure_length(encoded, min_length, alphabet, guards, values_hash))
def _decode(hashid, salt, alphabet, separators, guards):
"""Helper method that restores the values encoded in a hashid without
argument checks."""
parts = tuple(_split(hashid, guards))
hashid = parts[1] if 2 <= len(parts) <= 3 else parts[0]
if not hashid:
return
lottery_char = hashid[0]
hashid = hashid[1:]
hash_parts = _split(hashid, separators)
for part in hash_parts:
alphabet_salt = (lottery_char + salt + alphabet)[:len(alphabet)]
alphabet = _reorder(alphabet, alphabet_salt)
yield _unhash(part, alphabet)
def _deprecated(func):
"""A decorator that warns about deprecation when the passed-in function is
invoked."""
@wraps(func)
def with_warning(*args, **kwargs):
warnings.warn(
('The %s method is deprecated and will be removed in v2.*.*' %
func.__name__),
DeprecationWarning
)
return func(*args, **kwargs)
return with_warning
class Hashids(object):
"""Hashes and restores values using the "hashids" algorithm."""
ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
def __init__(self, salt='', min_length=0, alphabet=ALPHABET):
"""
Initializes a Hashids object with salt, minimum length, and alphabet.
:param salt: A string influencing the generated hash ids.
:param min_length: The minimum length for generated hashes
:param alphabet: The characters to use for the generated hash ids.
"""
self._min_length = max(int(min_length), 0)
self._salt = salt
separators = ''.join(x for x in 'cfhistuCFHISTU' if x in alphabet)
alphabet = ''.join(x for i, x in enumerate(alphabet)
if alphabet.index(x) == i and x not in separators)
len_alphabet, len_separators = len(alphabet), len(separators)
if len_alphabet + len_separators < 16:
raise ValueError('Alphabet must contain at least 16 '
'unique characters.')
separators = _reorder(separators, salt)
min_separators = _index_from_ratio(len_alphabet, RATIO_SEPARATORS)
if not separators or len_separators < min_separators:
if min_separators == 1:
min_separators = 2
if min_separators > len_separators:
split_at = min_separators - len_separators
separators += alphabet[:split_at]
alphabet = alphabet[split_at:]
len_alphabet = len(alphabet)
alphabet = _reorder(alphabet, salt)
num_guards = _index_from_ratio(len_alphabet, RATIO_GUARDS)
if len_alphabet < 3:
guards = separators[:num_guards]
separators = separators[num_guards:]
else:
guards = alphabet[:num_guards]
alphabet = alphabet[num_guards:]
self._alphabet = alphabet
self._guards = guards
self._separators = separators
# Support old API
self.decrypt = _deprecated(self.decode)
self.encrypt = _deprecated(self.encode)
def encode(self, *values):
"""Builds a hash from the passed `values`.
:param values The values to transform into a hashid
>>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456')
>>> hashids.encode(1, 23, 456)
'1d6216i30h53elk3'
"""
if not (values and all(_is_uint(x) for x in values)):
return ''
return _encode(values, self._salt, self._min_length, self._alphabet,
self._separators, self._guards)
def decode(self, hashid):
"""Restore a tuple of numbers from the passed `hashid`.
:param hashid The hashid to decode
>>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456')
>>> hashids.decode('1d6216i30h53elk3')
(1, 23, 456)
"""
if not hashid or not _is_str(hashid):
return ()
try:
numbers = tuple(_decode(hashid, self._salt, self._alphabet,
self._separators, self._guards))
return numbers if hashid == self.encode(*numbers) else ()
except ValueError:
return ()
def encode_hex(self, hex_str):
"""Converts a hexadecimal string (e.g. a MongoDB id) to a hashid.
:param hex_str The hexadecimal string to encodes
>>> Hashids.encode_hex('507f1f77bcf86cd799439011')
'y42LW46J9luq3Xq9XMly'
"""
numbers = (int('1' + hex_str[i:i+12], 16)
for i in range(0, len(hex_str), 12))
try:
return self.encode(*numbers)
except ValueError:
return ''
def decode_hex(self, hashid):
"""Restores a hexadecimal string (e.g. a MongoDB id) from a hashid.
:param hashid The hashid to decode
>>> Hashids.decode_hex('y42LW46J9luq3Xq9XMly')
'507f1f77bcf86cd799439011'
"""
return ''.join(('%x' % x)[1:] for x in self.decode(hashid))
|
BioGRID/IMS
|
operations/database/classes/Hashids.py
|
Python
|
lgpl-3.0
| 9,341
|
[
"VisIt"
] |
536f54df9a2ad94d51b6d6fdb28925ba70a6e57f3a60fa1f58d6e2952decee41
|
"""
"""
import ast
import operator
from collections import deque
from . import ASTOptimization
class DeadCodeElimination(ASTOptimization):
"""
"""
@property
def name(self) -> str:
return 'Dead code elimination'
@property
def description(self) -> str:
return ''
@property
def level(self) -> int:
return 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._node_level = 0
self._return_level = None
def visit(self, node):
if self._return_level:
if self._node_level == self._return_level:
return None
elif self._node_level < self._return_level:
self._return_level = None
if isinstance(node, ast.Return):
self._return_level = self._node_level
self._node_level += 1
node = super().visit(node)
self._node_level -= 1
return node
def visit_If(self, node):
node = self.generic_visit(node)
if (node.orelse
and len(node.orelse) == 1
and isinstance(node.orelse[0], ast.Pass)
):
node.orelse = []
if (len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
):
if node.orelse:
node_test = ast.UnaryOp(op=ast.Not(), operand=node.test)
if (len(node.orelse) == 1
and isinstance(node.orelse[0], ast.If)
):
node_test = ast.BoolOp\
( op = ast.And()
, values = [node_test, node.orelse[0].test]
)
node.test = ast.copy_location(node_test, node.orelse[0].test)
node.body = node.orelse[0].body
node.orelse = node.orelse[0].orelse
else:
node.test = ast.copy_location(node_test, node.test)
node.body = node.orelse
node.orelse = []
else:
node = None
return node
|
Amper/opyum
|
opyum/optimizations/dead_code_elimination.py
|
Python
|
bsd-3-clause
| 2,174
|
[
"VisIt"
] |
c1cec3f3e0ca88df5d688c4ecd2f1250f14576c4d28575c748b65952c330d823
|
#!/usr/bin/python
import os, subprocess
import textwrap, argparse
if __name__ == '__main__':
argument_parser = argparse.ArgumentParser(
prog='stability_all.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
create a graphical functions from input data, all languages
--------------------------------
example of use $python3 %(prog)s
'''))
argument_parser.add_argument('--inputdir', dest='inputdir', required=True, type=str , help='input directory (required)')
argument_parser.add_argument('--language', dest='lng', action='append', required=False, type=str , help='language (default all, optional)')
args = argument_parser.parse_args()
inputdir = args.inputdir
lng = args.lng
list_directories = []
lang_files = []
for d1 in os.listdir(inputdir):
list_directories.append(d1)
for d2 in os.listdir(inputdir+"/"+d1):
lng_file = d2.replace('.tab','')
if lng_file not in lang_files:
lang_files.append(lng_file)
print(lang_files)
# if no introduce language parameter, all language. otherwise check that introduced language is in MCR
if lng is None:
languages = lang_files
else:
for l in lng:
if not l in lang_files:
print("Language must be present in files, options:"+str(lang_files)+"\n")
exit(1)
languages = lng
out_file = open('stability_senses_all.csv', "w")
# build header #######################################################################################################################
out_file.write("vs,")
for lang in languages:
out_file.write(lang + ",")
out_file.write("\n")
# build body #########################################################################################################################
visit = []
for elem_1 in list_directories:
for elem_2 in list_directories:
if elem_1 != elem_2 and elem_1 not in visit and elem_2 not in visit:
out_file.write(elem_1 + "_vs_" + elem_2 + ",")
for lang in languages:
num_lines_senses_1 = sum(1 for line in open(inputdir+"/"+elem_1+"/"+lang+".tab"))
num_lines_senses_2 = sum(1 for line in open(inputdir+"/"+elem_2+"/"+lang+".tab"))
subprocess.check_output("sort "+inputdir+"/"+elem_1+"/"+lang+".tab > tmp1.txt", shell=True)
subprocess.check_output("sort "+inputdir+"/"+elem_2+"/"+lang+".tab > tmp2.txt", shell=True)
equals = subprocess.check_output("comm -1 -2 tmp1.txt tmp2.txt | wc | gawk '{print $1}'", shell=True).strip()
out_file.write(str(float(equals)/num_lines_senses_1)+",")
out_file.write("\n")
if elem_1 not in visit: visit.append(elem_1)
out_file.close()
subprocess.check_output("rm tmp1.txt", shell=True)
subprocess.check_output("rm tmp2.txt", shell=True)
|
daniparera/MCR
|
Senses/analisys/others/stability_all.py
|
Python
|
gpl-2.0
| 3,101
|
[
"VisIt"
] |
dbc46c7ecf2c5ec2d76ce65112b78abd9e70274c5d04198fc6e653156ad4358e
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import collections
import logging
import threading
import time
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super().add_result(future)
self.event.set()
def add_exception(self, future):
super().add_exception(future)
self.event.set()
def add_cancelled(self, future):
super().add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super().__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super().add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super().add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super().add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def _yield_finished_futures(fs, waiter, ref_collect):
"""
Iterate on the list *fs*, yielding finished futures one by one in
reverse order.
Before yielding a future, *waiter* is removed from its waiters
and the future is removed from each set in the collection of sets
*ref_collect*.
The aim of this function is to avoid keeping stale references after
the future is yielded and before the iterator resumes.
"""
while fs:
f = fs[-1]
for futures_set in ref_collect:
futures_set.remove(f)
with f._condition:
f._waiters.remove(waiter)
del f
# Careful not to keep a reference to the popped value
yield fs.pop()
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.monotonic()
fs = set(fs)
total_futures = len(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
finished = list(finished)
try:
yield from _yield_finished_futures(finished, waiter,
ref_collect=(fs,))
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.monotonic()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), total_futures))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
# reverse to keep finishing order
finished.reverse()
yield from _yield_finished_futures(finished, waiter,
ref_collect=(fs, pending))
finally:
# Remove waiter from unfinished futures
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<%s at %#x state=%s raised %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<%s at %#x state=%s returned %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, timeout=None, chunksize=1):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: The size of the chunks the iterable will be broken into
before being passed to a child process. This argument is only
used by ProcessPoolExecutor; it is ignored by
ThreadPoolExecutor.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if timeout is not None:
end_time = timeout + time.monotonic()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
# reverse to keep finishing order
fs.reverse()
while fs:
# Careful not to keep a reference to the popped future
if timeout is None:
yield fs.pop().result()
else:
yield fs.pop().result(end_time - time.monotonic())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
prefetchnta/questlab
|
bin/x64bin/python/36/Lib/concurrent/futures/_base.py
|
Python
|
lgpl-2.1
| 21,847
|
[
"Brian"
] |
63310a112d70f8096759e74908a682137cc1aee8bdbb3aa5eefdbf2fa6d1e541
|
#!/usr/bin/env python
from __future__ import print_function
import vtk
def get_program_parameters():
import argparse
description = 'Add a background image at a render window.'
epilogue = '''
Add a background image to a render window.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('filename', default=None, type=str, nargs='?', help='A required filename.')
args = parser.parse_args()
return args.filename
def main():
colors = vtk.vtkNamedColors()
colors.SetColor('light_cyan', [100, 255, 255, 255])
colors.SetColor('light_magenta', [255, 100, 255, 255])
# Verify input arguments
fn = get_program_parameters()
if fn:
# Read the image
jpeg_reader = vtk.vtkJPEGReader()
if not jpeg_reader.CanReadFile(fn):
print("Error reading file:", fn)
return
jpeg_reader.SetFileName(fn)
jpeg_reader.Update()
image_data = jpeg_reader.GetOutput()
else:
canvas_source = vtk.vtkImageCanvasSource2D()
canvas_source.SetExtent(0, 100, 0, 100, 0, 0)
canvas_source.SetScalarTypeToUnsignedChar()
canvas_source.SetNumberOfScalarComponents(3)
canvas_source.SetDrawColor(colors.GetColor4ub('warm_grey'))
canvas_source.FillBox(0, 100, 0, 100)
canvas_source.SetDrawColor(colors.GetColor4ub('light_cyan'))
canvas_source.FillTriangle(10, 10, 25, 10, 25, 25)
canvas_source.SetDrawColor(colors.GetColor4ub('light_magenta'))
canvas_source.FillTube(75, 75, 0, 75, 5.0)
canvas_source.Update()
image_data = canvas_source.GetOutput()
# Create an image actor to display the image
image_actor = vtk.vtkImageActor()
image_actor.SetInputData(image_data)
# Create a renderer to display the image in the background
background_renderer = vtk.vtkRenderer()
# Create a superquadric
superquadric_source = vtk.vtkSuperquadricSource()
superquadric_source.SetPhiRoundness(1.1)
superquadric_source.SetThetaRoundness(.2)
# Create a mapper and actor
superquadric_mapper = vtk.vtkPolyDataMapper()
superquadric_mapper.SetInputConnection(superquadric_source.GetOutputPort())
superquadric_actor = vtk.vtkActor()
superquadric_actor.SetMapper(superquadric_mapper)
scene_renderer = vtk.vtkRenderer()
render_window = vtk.vtkRenderWindow()
# Set up the render window and renderers such that there is
# a background layer and a foreground layer
background_renderer.SetLayer(0)
background_renderer.InteractiveOff()
scene_renderer.SetLayer(1)
render_window.SetNumberOfLayers(2)
render_window.AddRenderer(background_renderer)
render_window.AddRenderer(scene_renderer)
render_window_interactor = vtk.vtkRenderWindowInteractor()
render_window_interactor.SetRenderWindow(render_window)
# Add actors to the renderers
scene_renderer.AddActor(superquadric_actor)
background_renderer.AddActor(image_actor)
# Render once to figure out where the background camera will be
render_window.Render()
# Set up the background camera to fill the renderer with the image
origin = image_data.GetOrigin()
spacing = image_data.GetSpacing()
extent = image_data.GetExtent()
camera = background_renderer.GetActiveCamera()
camera.ParallelProjectionOn()
xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]
yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]
# xd = (extent[1] - extent[0] + 1) * spacing[0]
yd = (extent[3] - extent[2] + 1) * spacing[1]
d = camera.GetDistance()
camera.SetParallelScale(0.5 * yd)
camera.SetFocalPoint(xc, yc, 0.0)
camera.SetPosition(xc, yc, d)
# Render again to set the correct view
render_window.Render()
# Interact with the window
render_window_interactor.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Images/BackgroundImage.py
|
Python
|
apache-2.0
| 3,966
|
[
"VTK"
] |
26b1bb6756fb2529346b71f4a716f225fa2fcccdbe13f6a0c64234378425690c
|
#!/usr/bin/env python
"""Colored sine wave grating in circular mask"""
############################
# Import various modules #
############################
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import *
from VisionEgg.FlowControl import Presentation, FunctionController
from VisionEgg.Gratings import SinGrating2D
from VisionEgg.Textures import Mask2D
from math import *
#####################################
# Initialize OpenGL window/screen #
#####################################
screen = get_default_screen()
######################################
# Create sinusoidal grating object #
######################################
mask = Mask2D(function='circle', # also supports 'circle'
radius_parameter=100, # sigma for gaussian, radius for circle (units: num_samples)
num_samples=(256,256)) # this many texture elements in mask (covers whole size specified below)
# NOTE: I am not a color scientist, and I am not familiar with the
# needs of color scientists. Color interpolation is currently done in
# RGB space, but I assume there are other interpolation methods that
# people may want. Please submit any suggestions.
stimulus = SinGrating2D(color1 = (0.5, 0.25, 0.5), # RGB (alpha ignored if given)
color2 = (1.0, 0.5, 0.1), # RGB (alpha ignored if given)
contrast = 0.2,
pedestal = 0.1,
mask = mask, # optional
position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
anchor = 'center',
size = ( 300.0 , 300.0 ),
spatial_freq = 20.0 / screen.size[0], # units of cycles/pixel
temporal_freq_hz = 1.0,
orientation = 270.0 )
def pedestal_func(t):
# Calculate pedestal over time. (Pedestal range [0.1,0.9] and
# contrast = 0.2 limits total range to [0.0,1.0])
temporal_freq_hz = 0.2
return 0.4 * sin(t*2*pi * temporal_freq_hz) + 0.5
###############################################################
# Create viewport - intermediary between stimuli and screen #
###############################################################
viewport = Viewport( screen=screen, stimuli=[stimulus] )
########################################
# Create presentation object and go! #
########################################
p = Presentation(go_duration=(10.0,'seconds'),viewports=[viewport])
p.add_controller(stimulus,'pedestal',FunctionController(during_go_func=pedestal_func))
p.go()
|
visionegg/visionegg
|
demo/color_grating.py
|
Python
|
lgpl-2.1
| 2,726
|
[
"Gaussian"
] |
416fd81fb9303e79ae3f2de2113535e44fe0f2bbbb247709c78f7154453d221c
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of the Speciation Particle Swarm Optimization algorithm as
presented in *Li, Blackwell, and Branke, 2006, Particle Swarm with Speciation
and Adaptation in a Dynamic Environment.*
"""
import itertools
import math
import operator
import random
import numpy
try:
from itertools import imap
except:
# Python 3 nothing to do
pass
else:
map = imap
from deap import base
from deap.benchmarks import movingpeaks
from deap import creator
from deap import tools
scenario = movingpeaks.SCENARIO_2
NDIM = 5
BOUNDS = [scenario["min_coord"], scenario["max_coord"]]
mpb = movingpeaks.MovingPeaks(dim=NDIM, **scenario)
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Particle", list, fitness=creator.FitnessMax, speed=list,
best=None, bestfit=creator.FitnessMax)
def generate(pclass, dim, pmin, pmax, smin, smax):
part = pclass(random.uniform(pmin, pmax) for _ in range(dim))
part.speed = [random.uniform(smin, smax) for _ in range(dim)]
return part
def convert_quantum(swarm, rcloud, centre):
dim = len(swarm[0])
for part in swarm:
position = [random.gauss(0, 1) for _ in range(dim)]
dist = math.sqrt(sum(x**2 for x in position))
# Gaussian distribution
# u = abs(random.gauss(0, 1.0/3.0))
# part[:] = [(rcloud * x * u**(1.0/dim) / dist) + c for x, c in zip(position, centre)]
# UVD distribution
# u = random.random()
# part[:] = [(rcloud * x * u**(1.0/dim) / dist) + c for x, c in zip(position, centre)]
# NUVD distribution
u = abs(random.gauss(0, 1.0/3.0))
part[:] = [(rcloud * x * u / dist) + c for x, c in zip(position, centre)]
del part.fitness.values
del part.bestfit.values
part.best = None
return swarm
def updateParticle(part, best, chi, c):
ce1 = (c*random.uniform(0, 1) for _ in range(len(part)))
ce2 = (c*random.uniform(0, 1) for _ in range(len(part)))
ce1_p = map(operator.mul, ce1, map(operator.sub, best, part))
ce2_g = map(operator.mul, ce2, map(operator.sub, part.best, part))
a = map(operator.sub,
map(operator.mul,
itertools.repeat(chi),
map(operator.add, ce1_p, ce2_g)),
map(operator.mul,
itertools.repeat(1-chi),
part.speed))
part.speed = list(map(operator.add, part.speed, a))
part[:] = list(map(operator.add, part, part.speed))
toolbox = base.Toolbox()
toolbox.register("particle", generate, creator.Particle, dim=NDIM,
pmin=BOUNDS[0], pmax=BOUNDS[1], smin=-(BOUNDS[1] - BOUNDS[0])/2.0,
smax=(BOUNDS[1] - BOUNDS[0])/2.0)
toolbox.register("swarm", tools.initRepeat, list, toolbox.particle)
toolbox.register("update", updateParticle, chi=0.729843788, c=2.05)
toolbox.register("convert", convert_quantum)
toolbox.register("evaluate", mpb)
def main(verbose=True):
NPARTICLES = 100
RS = (BOUNDS[1] - BOUNDS[0]) / (50**(1.0/NDIM)) # between 1/20 and 1/10 of the domain's range
PMAX = 10
RCLOUD = 1.0 # 0.5 times the move severity
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "nswarm", "evals", "error", "offline_error", "avg", "max"
swarm = toolbox.swarm(n=NPARTICLES)
generation = 0
while mpb.nevals < 5e5:
# Evaluate each particle in the swarm
for part in swarm:
part.fitness.values = toolbox.evaluate(part)
if not part.best or part.bestfit < part.fitness:
part.best = toolbox.clone(part[:]) # Get the position
part.bestfit.values = part.fitness.values # Get the fitness
# Sort swarm into species, best individual comes first
sorted_swarm = sorted(swarm, key=lambda ind: ind.bestfit, reverse=True)
species = []
while sorted_swarm:
found = False
for s in species:
dist = math.sqrt(sum((x1 - x2)**2 for x1, x2 in zip(sorted_swarm[0].best, s[0].best)))
if dist <= RS:
found = True
s.append(sorted_swarm[0])
break
if not found:
species.append([sorted_swarm[0]])
sorted_swarm.pop(0)
record = stats.compile(swarm)
logbook.record(gen=generation, evals=mpb.nevals, nswarm=len(species),
error=mpb.currentError(), offline_error=mpb.offlineError(), **record)
if verbose:
print(logbook.stream)
# Detect change
if any(s[0].bestfit.values != toolbox.evaluate(s[0].best) for s in species):
# Convert particles to quantum particles
for s in species:
s[:] = toolbox.convert(s, rcloud=RCLOUD, centre=s[0].best)
else:
# Replace exceeding particles in a species with new particles
for s in species:
if len(s) > PMAX:
n = len(s) - PMAX
del s[PMAX:]
s.extend(toolbox.swarm(n=n))
# Update particles that have not been reinitialized
for s in species[:-1]:
for part in s[:PMAX]:
toolbox.update(part, s[0].best)
del part.fitness.values
# Return all but the worst species' updated particles to the swarm
# The worst species is replaced by new particles
swarm = list(itertools.chain(toolbox.swarm(n=len(species[-1])), *species[:-1]))
generation += 1
if __name__ == '__main__':
main()
|
DEAP/deap
|
examples/pso/speciation.py
|
Python
|
lgpl-3.0
| 6,580
|
[
"Gaussian"
] |
17d59a78a80eef3eaa2b0b093197a0b689b3e635ae9dc3d12cb921149c9dde2d
|
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Any, Dict, Iterable, List, Optional
from urllib.parse import quote, unquote
from typing_extensions import TypedDict
from django.http import QueryDict
from swh.model.exceptions import ValidationError
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.model.identifiers import (
CONTENT,
DIRECTORY,
RELEASE,
REVISION,
SNAPSHOT,
ObjectType,
QualifiedSWHID,
)
from swh.web.common import archive
from swh.web.common.exc import BadInputExc
from swh.web.common.typing import (
QueryParameters,
SnapshotContext,
SWHIDContext,
SWHIDInfo,
SWHObjectInfo,
)
from swh.web.common.utils import reverse
def gen_swhid(
object_type: str,
object_id: str,
scheme_version: int = 1,
metadata: SWHIDContext = {},
) -> str:
"""
Returns the SoftWare Heritage persistent IDentifier for a swh object based on:
* the object type
* the object id
* the SWHID scheme version
Args:
object_type: the swh object type
(content/directory/release/revision/snapshot)
object_id: the swh object id (hexadecimal representation
of its hash value)
scheme_version: the scheme version of the SWHIDs
Returns:
the SWHID of the object
Raises:
BadInputExc: if the provided parameters do not enable to
generate a valid identifier
"""
try:
decoded_object_type = ObjectType[object_type.upper()]
decoded_object_id = hash_to_bytes(object_id)
obj_swhid = str(
QualifiedSWHID(
object_type=decoded_object_type,
object_id=decoded_object_id,
scheme_version=scheme_version,
**metadata,
)
)
except (ValidationError, KeyError, ValueError) as e:
raise BadInputExc("Invalid object (%s) for SWHID. %s" % (object_id, e))
else:
return obj_swhid
class ResolvedSWHID(TypedDict):
"""parsed SWHID with context"""
swhid_parsed: QualifiedSWHID
"""URL to browse object according to SWHID context"""
browse_url: Optional[str]
def resolve_swhid(
swhid: str, query_params: Optional[QueryParameters] = None
) -> ResolvedSWHID:
"""
Try to resolve a SoftWare Heritage persistent IDentifier into an url for
browsing the targeted object.
Args:
swhid: a SoftWare Heritage persistent IDentifier
query_params: optional dict filled with
query parameters to append to the browse url
Returns:
a dict with the following keys:
* **swhid_parsed**: the parsed identifier
* **browse_url**: the url for browsing the targeted object
"""
swhid_parsed = get_swhid(swhid)
object_type = swhid_parsed.object_type
object_id = swhid_parsed.object_id
browse_url = None
url_args = {}
query_dict = QueryDict("", mutable=True)
fragment = ""
process_lines = object_type == ObjectType.CONTENT
if query_params and len(query_params) > 0:
for k in sorted(query_params.keys()):
query_dict[k] = query_params[k]
if swhid_parsed.origin:
origin_url = unquote(swhid_parsed.origin)
origin_url = archive.lookup_origin({"url": origin_url})["url"]
query_dict["origin_url"] = origin_url
if swhid_parsed.path and swhid_parsed.path != b"/":
query_dict["path"] = swhid_parsed.path.decode("utf8", errors="replace")
if swhid_parsed.anchor:
directory = b""
if swhid_parsed.anchor.object_type == ObjectType.DIRECTORY:
directory = swhid_parsed.anchor.object_id
elif swhid_parsed.anchor.object_type == ObjectType.REVISION:
revision = archive.lookup_revision(
hash_to_hex(swhid_parsed.anchor.object_id)
)
directory = revision["directory"]
elif swhid_parsed.anchor.object_type == ObjectType.RELEASE:
release = archive.lookup_release(
hash_to_hex(swhid_parsed.anchor.object_id)
)
if release["target_type"] == REVISION:
revision = archive.lookup_revision(release["target"])
directory = revision["directory"]
if object_type == ObjectType.CONTENT:
if (
not swhid_parsed.origin
and swhid_parsed.anchor.object_type != ObjectType.REVISION
):
# when no origin or revision context, content objects need to have
# their path prefixed by root directory id for breadcrumbs display
query_dict["path"] = hash_to_hex(directory) + query_dict["path"]
else:
# remove leading slash from SWHID content path
query_dict["path"] = query_dict["path"][1:]
elif object_type == ObjectType.DIRECTORY:
object_id = directory
# remove leading and trailing slashes from SWHID directory path
if query_dict["path"].endswith("/"):
query_dict["path"] = query_dict["path"][1:-1]
else:
query_dict["path"] = query_dict["path"][1:]
# snapshot context
if swhid_parsed.visit:
if swhid_parsed.visit.object_type != ObjectType.SNAPSHOT:
raise BadInputExc("Visit must be a snapshot SWHID.")
query_dict["snapshot"] = hash_to_hex(swhid_parsed.visit.object_id)
if swhid_parsed.anchor:
if (
swhid_parsed.anchor.object_type == ObjectType.REVISION
and object_type != ObjectType.REVISION
):
query_dict["revision"] = hash_to_hex(swhid_parsed.anchor.object_id)
elif swhid_parsed.anchor.object_type == ObjectType.RELEASE:
release = archive.lookup_release(
hash_to_hex(swhid_parsed.anchor.object_id)
)
if release:
query_dict["release"] = release["name"]
# browsing content or directory without snapshot context
elif (
object_type in (ObjectType.CONTENT, ObjectType.DIRECTORY)
and swhid_parsed.anchor
):
if swhid_parsed.anchor.object_type == ObjectType.REVISION:
# anchor revision, objects are browsed from its view
object_type = ObjectType.REVISION
object_id = swhid_parsed.anchor.object_id
elif (
object_type == ObjectType.DIRECTORY
and swhid_parsed.anchor.object_type == ObjectType.DIRECTORY
):
# a directory is browsed from its root
object_id = swhid_parsed.anchor.object_id
if object_type == ObjectType.CONTENT:
url_args["query_string"] = f"sha1_git:{hash_to_hex(object_id)}"
elif object_type in (ObjectType.DIRECTORY, ObjectType.RELEASE, ObjectType.REVISION):
url_args["sha1_git"] = hash_to_hex(object_id)
elif object_type == ObjectType.SNAPSHOT:
url_args["snapshot_id"] = hash_to_hex(object_id)
if swhid_parsed.lines and process_lines:
lines = swhid_parsed.lines
fragment += "#L" + str(lines[0])
if lines[1]:
fragment += "-L" + str(lines[1])
if url_args:
browse_url = (
reverse(
f"browse-{object_type.name.lower()}",
url_args=url_args,
query_params=query_dict,
)
+ fragment
)
return ResolvedSWHID(swhid_parsed=swhid_parsed, browse_url=browse_url)
def get_swhid(swhid: str) -> QualifiedSWHID:
"""Check if a SWHID is valid and return it parsed.
Args:
swhid: a SoftWare Heritage persistent IDentifier.
Raises:
BadInputExc: if the provided SWHID can not be parsed.
Return:
A parsed SWHID.
"""
try:
# ensure core part of SWHID is in lower case to avoid parsing error
(core, sep, qualifiers) = swhid.partition(";")
core = core.lower()
return QualifiedSWHID.from_string(core + sep + qualifiers)
except ValidationError as ve:
raise BadInputExc("Error when parsing identifier: %s" % " ".join(ve.messages))
def group_swhids(swhids: Iterable[QualifiedSWHID],) -> Dict[str, List[bytes]]:
"""
Groups many SoftWare Heritage persistent IDentifiers into a
dictionary depending on their type.
Args:
swhids: an iterable of SoftWare Heritage persistent
IDentifier objects
Returns:
A dictionary with:
keys: object types
values: object hashes
"""
swhids_by_type: Dict[str, List[bytes]] = {
CONTENT: [],
DIRECTORY: [],
REVISION: [],
RELEASE: [],
SNAPSHOT: [],
}
for obj_swhid in swhids:
obj_id = obj_swhid.object_id
obj_type = obj_swhid.object_type
swhids_by_type[obj_type.name.lower()].append(hash_to_bytes(obj_id))
return swhids_by_type
def get_swhids_info(
swh_objects: Iterable[SWHObjectInfo],
snapshot_context: Optional[SnapshotContext] = None,
extra_context: Optional[Dict[str, Any]] = None,
) -> List[SWHIDInfo]:
"""
Returns a list of dict containing info related to SWHIDs of objects.
Args:
swh_objects: an iterable of dict describing archived objects
snapshot_context: optional dict parameter describing the snapshot in
which the objects have been found
extra_context: optional dict filled with extra contextual info about
the objects
Returns:
a list of dict containing SWHIDs info
"""
swhids_info = []
for swh_object in swh_objects:
if not swh_object["object_id"]:
swhids_info.append(
SWHIDInfo(
object_type=swh_object["object_type"],
object_id="",
swhid="",
swhid_url="",
context={},
swhid_with_context=None,
swhid_with_context_url=None,
)
)
continue
object_type = swh_object["object_type"]
object_id = swh_object["object_id"]
swhid_context: SWHIDContext = {}
if snapshot_context:
if snapshot_context["origin_info"] is not None:
swhid_context["origin"] = quote(
snapshot_context["origin_info"]["url"], safe="/?:@&"
)
if object_type != SNAPSHOT:
swhid_context["visit"] = gen_swhid(
SNAPSHOT, snapshot_context["snapshot_id"]
)
if object_type in (CONTENT, DIRECTORY):
if snapshot_context["release_id"] is not None:
swhid_context["anchor"] = gen_swhid(
RELEASE, snapshot_context["release_id"]
)
elif snapshot_context["revision_id"] is not None:
swhid_context["anchor"] = gen_swhid(
REVISION, snapshot_context["revision_id"]
)
if object_type in (CONTENT, DIRECTORY):
if (
extra_context
and "revision" in extra_context
and extra_context["revision"]
and "anchor" not in swhid_context
):
swhid_context["anchor"] = gen_swhid(REVISION, extra_context["revision"])
elif (
extra_context
and "root_directory" in extra_context
and extra_context["root_directory"]
and "anchor" not in swhid_context
and (
object_type != DIRECTORY
or extra_context["root_directory"] != object_id
)
):
swhid_context["anchor"] = gen_swhid(
DIRECTORY, extra_context["root_directory"]
)
path = None
if extra_context and "path" in extra_context:
path = extra_context["path"] or "/"
if "filename" in extra_context and object_type == CONTENT:
path += extra_context["filename"]
if object_type == DIRECTORY and path == "/":
path = None
if path:
swhid_context["path"] = quote(path, safe="/?:@&")
swhid = gen_swhid(object_type, object_id)
swhid_url = reverse("browse-swhid", url_args={"swhid": swhid})
swhid_with_context = None
swhid_with_context_url = None
if swhid_context:
swhid_with_context = gen_swhid(
object_type, object_id, metadata=swhid_context
)
swhid_with_context_url = reverse(
"browse-swhid", url_args={"swhid": swhid_with_context}
)
swhids_info.append(
SWHIDInfo(
object_type=object_type,
object_id=object_id,
swhid=swhid,
swhid_url=swhid_url,
context=swhid_context,
swhid_with_context=swhid_with_context,
swhid_with_context_url=swhid_with_context_url,
)
)
return swhids_info
|
SoftwareHeritage/swh-web-ui
|
swh/web/common/identifiers.py
|
Python
|
agpl-3.0
| 13,679
|
[
"VisIt"
] |
3022bdbce8f15d3e48515af3824c418bc59f1fbe8a61f0872a192359fdabc8eb
|
"""
"""
#
import time
import pprint
#
#
from models.player import Player
player = Player(player_id=5353462)
player.visit()
|
m3talstorm/foe-bot
|
foe/sabotage.py
|
Python
|
mit
| 130
|
[
"VisIt"
] |
966508da684b206c3288a211888e176db38649af68b60929f27c5323377d1bba
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.utils import Testing
from peacock.Input.BlockInfo import BlockInfo
from peacock.Input.ParameterInfo import ParameterInfo
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def testBasic(self):
b = BlockInfo(None, "/Foo", False, "")
b1 = BlockInfo(None, "/Foo/bar", False, "")
b.addChildBlock(b1)
self.assertEqual(b1.parent, b)
self.assertIn(b1.name, b.children_list)
self.assertIn(b1.name, b.children)
b.types[b1.name] = b1
b.star_node = b1
out = b.dump()
self.assertIn("/Foo/bar", out)
def testTypes(self):
b = BlockInfo(None, "/Foo", False, "")
self.assertEqual(b.paramValue("type"), None)
b.setBlockType("t") # shouldn't do anything
p = ParameterInfo(None, "type")
b.addParameter(p)
self.assertEqual(p.parent, b)
self.assertEqual(b.paramValue("type"), "")
b.setBlockType("t") # shouldn't do anything
self.assertEqual(b.paramValue("type"), "t")
b1 = BlockInfo(None, "/Foo/t", False, "")
p = ParameterInfo(None, "p")
b1.addParameter(p)
b.types[b1.name] = b1
self.assertEqual(b.paramValue("p"), "")
b.setParamValue("p", "val")
self.assertEqual(b.paramValue("p"), "val")
b.setBlockType("t1")
self.assertEqual(b.paramValue("p"), None)
def testUserParams(self):
b = BlockInfo(None, "/Foo", False, "")
p = ParameterInfo(None, "p")
p.user_added = False
b.addParameter(p)
b.addUserParam("bar", "val")
b.addUserParam("bar", "val")
p = b.getParamInfo("bar")
self.assertEqual(p.user_added, True)
self.assertEqual(b.paramValue("bar"), "val")
b.setParamValue("bar", "val1")
self.assertEqual(b.paramValue("bar"), "val1")
b.removeUserParam("bar1")
self.assertEqual(b.paramValue("bar"), "val1")
b.removeUserParam("p")
self.assertEqual(b.paramValue("p"), "")
b.removeUserParam("bar")
self.assertEqual(b.paramValue("bar"), None)
self.assertNotIn("bar", b.parameters_list)
self.assertNotIn("bar", b.parameters)
b.addUserParam("bar", "val")
b.addUserParam("foo", "val1")
self.assertEqual(len(b.parameters_list), 3)
self.assertEqual(b.parameters_list.index("bar"), 1)
self.assertEqual(b.parameters_list.index("foo"), 2)
b.moveUserParam("foo", 0)
self.assertEqual(b.parameters_list.index("bar"), 2)
self.assertEqual(b.parameters_list.index("foo"), 0)
b.renameUserParam("bar1", "bar2")
b.renameUserParam("p", "bar2")
self.assertEqual(b.paramValue("bar2"), None)
b.renameUserParam("bar", "bar1")
self.assertEqual(b.paramValue("bar"), None)
self.assertEqual(b.paramValue("bar1"), "val")
def testChild(self):
b = BlockInfo(None, "/Foo", False, "")
b2 = BlockInfo(None, "/Foo/bar", False, "")
b21 = BlockInfo(None, "/Foo/bar/child", False, "")
b3 = BlockInfo(None, "/Foo/bar1", False, "")
b2.addChildBlock(b21)
b.addChildBlock(b2)
b.addChildBlock(b3)
self.assertEqual(b2.parent, b)
self.assertEqual(b3.parent, b)
self.assertIn(b2.name, b.children)
self.assertIn(b2.name, b.children_list)
self.assertEqual(b.children_list.index(b2.name), 0)
self.assertEqual(b.children_list.index(b3.name), 1)
b.moveChildBlock(b3.name, 0)
self.assertEqual(b.children_list.index(b2.name), 1)
self.assertEqual(b.children_list.index(b3.name), 0)
b.renameChildBlock("bar", "foo")
self.assertEqual(b2.path, "/Foo/foo")
self.assertEqual(b2.children["child"].path, "/Foo/foo/child")
b.renameChildBlock("foo", "bar1")
self.assertEqual(b2.path, "/Foo/foo")
b.renameChildBlock("foo1", "bar1")
self.assertEqual(b2.path, "/Foo/foo")
b2.removeChildBlock("foo")
b2.removeChildBlock("child")
self.assertEqual(len(b2.children_list), 0)
self.assertEqual(len(b2.children.keys()), 0)
def testCopy(self):
b = BlockInfo(None, "/Foo", False, "")
b.addUserParam("p0", "val0")
b.addUserParam("p1", "val1")
b2 = BlockInfo(None, "/Foo/bar", False, "")
b2.addUserParam("p2", "val2")
b2.addUserParam("p3", "val3")
b3 = BlockInfo(None, "/Foo/bar1", False, "")
b3.addUserParam("p4", "val4")
b3.addUserParam("p5", "val5")
b.addChildBlock(b2)
b.addChildBlock(b3)
b.setStarInfo(BlockInfo(None, "Foo/star", False, ""))
b.addBlockType(BlockInfo(None, "Foo/t", False, ""))
b_copy = b.copy(None)
self.assertNotEqual(b_copy, b)
self.assertEqual(b_copy.name, b.name)
self.assertEqual(b_copy.types.keys(), b.types.keys())
self.assertNotEqual(b_copy.types, b.types)
self.assertEqual(b_copy.children.keys(), b.children.keys())
self.assertEqual(b_copy.children_list, b.children_list)
self.assertEqual(b_copy.parameters_list, b.parameters_list)
self.assertEqual(b_copy.parameters.keys(), b.parameters.keys())
o = b.dump()
o1 = b_copy.dump()
self.assertEqual(o, o1)
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/input_tab/BlockInfo/test_BlockInfo.py
|
Python
|
lgpl-2.1
| 5,770
|
[
"MOOSE"
] |
efb762856e317caa5632081d83bed64d37d58224d30d0ab739776c91404177d6
|
"""
Implementation of Von-Mises-Fisher Mixture models,
i.e. the equaivalent of mixture of Gaussian on the sphere.
Author: Bertrand Thirion, 2010-2011
"""
import numpy as np
from warnings import warn
warn('Module nipy.algorithms.clustering.von_mises_fisher_mixture' +
'deprecated, will be removed',
FutureWarning,
stacklevel=2)
class VonMisesMixture(object):
"""
Model for Von Mises mixture distribution with fixed variance
on a two-dimensional sphere
"""
def __init__(self, k, precision, means=None, weights=None,
null_class=False):
""" Initialize Von Mises mixture
Parameters
----------
k: int,
number of components
precision: float,
the fixed precision parameter
means: array of shape(self.k, 3), optional
input component centers
weights: array of shape(self.k), optional
input components weights
null_class: bool, optional
Inclusion of a null class within the model
(related to k=0)
fixme
-----
consistency checks
"""
self.k = k
self.dim = 2
self.em_dim = 3
self.means = means
self.precision = precision
self.weights = weights
self.null_class = null_class
def log_density_per_component(self, x):
"""Compute the per-component density of the data
Parameters
----------
x: array fo shape(n,3)
should be on the unit sphere
Returns
-------
like: array of shape(n, self.k), with non-neagtive values
the density
"""
n = x.shape[0]
constant = self.precision / (2 * np.pi * (1 - np.exp( - \
2 * self.precision)))
loglike = np.log(constant) + \
(np.dot(x, self.means.T) - 1) * self.precision
if self.null_class:
loglike = np.hstack((np.log(1. / (4 * np.pi)) * np.ones((n, 1)),
loglike))
return loglike
def density_per_component(self, x):
"""
Compute the per-component density of the data
Parameters
----------
x: array fo shape(n,3)
should be on the unit sphere
Returns
-------
like: array of shape(n, self.k), with non-neagtive values
the density
"""
return np.exp(self.log_density_per_component(x))
def weighted_density(self, x):
""" Return weighted density
Parameters
----------
x: array shape(n,3)
should be on the unit sphere
Returns
-------
like: array
of shape(n, self.k)
"""
return(self.density_per_component(x) * self.weights)
def log_weighted_density(self, x):
""" Return log weighted density
Parameters
----------
x: array fo shape(n,3)
should be on the unit sphere
Returns
-------
log_like: array of shape(n, self.k)
"""
return(self.log_density_per_component(x) + np.log(self.weights))
def mixture_density(self, x):
""" Return mixture density
Parameters
----------
x: array fo shape(n,3)
should be on the unit sphere
Returns
-------
like: array of shape(n)
"""
wl = self.weighted_density(x)
return np.sum(wl, 1)
def responsibilities(self, x):
""" Return responsibilities
Parameters
----------
x: array fo shape(n,3)
should be on the unit sphere
Returns
-------
resp: array of shape(n, self.k)
"""
lwl = self.log_weighted_density(x)
wl = np.exp(lwl.T - lwl.mean(1)).T
swl = np.sum(wl, 1)
resp = (wl.T / swl).T
return resp
def estimate_weights(self, z):
""" Calculate and set weights from `z`
Parameters
----------
z: array of shape(self.k)
"""
self.weights = np.sum(z, 0) / z.sum()
def estimate_means(self, x, z):
""" Calculate and set means from `x` and `z`
Parameters
----------
x: array fo shape(n,3)
should be on the unit sphere
z: array of shape(self.k)
"""
m = np.dot(z.T, x)
self.means = (m.T / np.sqrt(np.sum(m ** 2, 1))).T
def estimate(self, x, maxiter=100, miniter=1, bias=None):
""" Return average log density across samples
Parameters
----------
x: array of shape (n,3)
should be on the unit sphere
maxiter : int, optional
maximum number of iterations of the algorithms
miniter : int, optional
minimum number of iterations
bias : array of shape(n), optional
prior probability of being in a non-null class
Returns
-------
ll : float
average (across samples) log-density
"""
# initialization with random positions and constant weights
if self.weights is None:
self.weights = np.ones(self.k) / self.k
if self.null_class:
self.weights = np.ones(self.k + 1) / (self.k + 1)
if self.means is None:
aux = np.arange(x.shape[0])
np.random.shuffle(aux)
self.means = x[aux[:self.k]]
# EM algorithm
assert not(np.isnan(self.means).any())
pll = - np.inf
for i in range(maxiter):
ll = np.log(self.mixture_density(x)).mean()
z = self.responsibilities(x)
assert not(np.isnan(z).any())
# bias z
if bias is not None:
z[:, 0] *= (1 - bias)
z[:, 1:] = ((z[:, 1:].T) * bias).T
z = (z.T / np.sum(z, 1)).T
self.estimate_weights(z)
if self.null_class:
self.estimate_means(x, z[:, 1:])
else:
self.estimate_means(x, z)
assert not(np.isnan(self.means).any())
if (i > miniter) and (ll < pll + 1.e-6):
break
pll = ll
return ll
def show(self, x):
""" Visualization utility
Parameters
----------
x: array fo shape(n,3)
should be on the unit sphere
"""
# label the data
z = np.argmax(self.responsibilities(x), 1)
import pylab
import mpl_toolkits.mplot3d.axes3d as p3
fig = pylab.figure()
ax = p3.Axes3D(fig)
colors = (['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'] * \
(1 + (1 + self.k) / 8))[:self.k + 1]
if (self.null_class) and (z == 0).any():
ax.plot3D(x[z == 0, 0], x[z == 0, 1], x[z == 0, 2], '.',
color=colors[0])
for k in range(self.k):
if self.null_class:
if np.sum(z == (k + 1)) == 0:
continue
uk = z == (k + 1)
ax.plot3D(x[uk, 0], x[uk, 1], x[uk, 2], '.',
color=colors[k + 1])
ax.plot3D([self.means[k, 0]], [self.means[k, 1]],
[self.means[k, 2]], 'o', color=colors[k + 1])
else:
if np.sum(z == k) == 0:
continue
ax.plot3D(x[z == k, 0], x[z == k, 1], x[z == k, 2], '.',
color=colors[k])
ax.plot3D([self.means[k, 0]], [self.means[k, 1]],
[self.means[k, 2]], 'o', color=colors[k])
pylab.show()
def estimate_robust_vmm(k, precision, null_class, x, ninit=10, bias=None,
maxiter=100):
""" Return the best von_mises mixture after severla initialization
Parameters
----------
k: int, number of classes
precision: float, priori precision parameter
null class: bool, optional,
should a null class be included or not
x: array fo shape(n,3)
input data, should be on the unit sphere
ninit: int, optional,
number of iterations
bias: array of shape(n), optional
prior probability of being in a non-null class
maxiter: int, optional,
maximum number of iterations after each initialization
"""
score = - np.inf
for i in range(ninit):
aux = VonMisesMixture(k, precision, null_class=null_class)
ll = aux.estimate(x, bias=bias)
if ll > score:
best_model = aux
score = ll
return best_model
def select_vmm(krange, precision, null_class, x, ninit=10, bias=None,
maxiter=100, verbose=0):
"""Return the best von_mises mixture after severla initialization
Parameters
----------
krange: list of ints,
number of classes to consider
precision:
null class:
x: array fo shape(n,3)
should be on the unit sphere
ninit: int, optional,
number of iterations
maxiter: int, optional,
bias: array of shape(n),
a prior probability of not being in the null class
verbose: Bool, optional
"""
score = - np.inf
for k in krange:
aux = estimate_robust_vmm(k, precision, null_class, x, ninit, bias,
maxiter)
ll = aux.estimate(x)
if null_class:
bic = ll - np.log(x.shape[0]) * k * 3 / x.shape[0]
else:
bic = ll - np.log(x.shape[0]) * (k * 3 - 1) / x.shape[0]
if verbose:
print k, bic
if bic > score:
best_model = aux
score = bic
return best_model
def select_vmm_cv(krange, precision, x, null_class, cv_index,
ninit=5, maxiter=100, bias=None, verbose=0):
"""Return the best von_mises mixture after severla initialization
Parameters
----------
krange: list of ints,
number of classes to consider
precision: float,
precision parameter of the von-mises densities
x: array fo shape(n, 3)
should be on the unit sphere
null class: bool, whether a null class should be included or not
cv_index: set of indices for cross validation
ninit: int, optional,
number of iterations
maxiter: int, optional,
bias: array of shape (n), prior
"""
score = - np.inf
mll = []
for k in krange:
mll.append( - np.inf)
for j in range(1):
ll = np.zeros_like(cv_index).astype(np.float)
for i in np.unique(cv_index):
xl = x[cv_index != i]
xt = x[cv_index == i]
bias_l = None
if bias is not None:
bias_l = bias[cv_index != i]
aux = estimate_robust_vmm(k, precision, null_class, xl,
ninit=ninit, bias=bias_l,
maxiter=maxiter)
if bias is None:
ll[cv_index == i] = np.log(aux.mixture_density(xt))
else:
bias_t = bias[cv_index == i]
lwd = aux.weighted_density(xt)
ll[cv_index == i] = np.log(lwd[:, 0] * (1 - bias_t) + \
lwd[:, 1:].sum(1) * bias_t)
if ll.mean() > mll[-1]:
mll[-1] = ll.mean()
aux = estimate_robust_vmm(k, precision, null_class, x,
ninit, bias=bias, maxiter=maxiter)
if verbose:
print k, mll[ - 1]
if mll[ - 1] > score:
best_model = aux
score = mll[ - 1]
return best_model
def sphere_density(npoints):
"""Return the points and area of a npoints**2 points sampled on a sphere
Returns
-------
s : array of shape(npoints ** 2, 3)
area: array of shape(npoints)
"""
u = np.linspace(0, 2 * np.pi, npoints + 1)[:npoints]
v = np.linspace(0, np.pi, npoints + 1)[:npoints]
s = np.vstack((np.ravel(np.outer(np.cos(u), np.sin(v))),
np.ravel(np.outer(np.sin(u), np.sin(v))),
np.ravel(np.outer(np.ones(np.size(u)), np.cos(v))))).T
area = np.abs(np.ravel(np.outer(np.ones(np.size(u)), np.sin(v)))) * \
np.pi ** 2 * 2 * 1. / (npoints ** 2)
return s, area
def example_noisy():
x1 = [0.6, 0.48, 0.64]
x2 = [-0.8, 0.48, 0.36]
x3 = [0.48, 0.64, -0.6]
x = np.random.randn(200, 3) * .1
x[:30] += x1
x[40:150] += x2
x[150:] += x3
x = (x.T / np.sqrt(np.sum(x ** 2, 1))).T
precision = 100.
vmm = select_vmm(range(2, 7), precision, True, x)
vmm.show(x)
# check that it sums to 1
s, area = sphere_density(100)
print (vmm.mixture_density(s) * area).sum()
def example_cv_nonoise():
x1 = [0.6, 0.48, 0.64]
x2 = [-0.8, 0.48, 0.36]
x3 = [0.48, 0.64, -0.6]
x = np.random.randn(30, 3) * .1
x[0::3] += x1
x[1::3] += x2
x[2::3] += x3
x = (x.T / np.sqrt(np.sum(x ** 2, 1))).T
precision = 50.
sub = np.repeat(np.arange(10), 3)
vmm = select_vmm_cv(range(1, 8), precision, x, cv_index=sub,
null_class=False, ninit=20)
vmm.show(x)
# check that it sums to 1
s, area = sphere_density(100)
return vmm
|
arokem/nipy
|
nipy/algorithms/clustering/von_mises_fisher_mixture.py
|
Python
|
bsd-3-clause
| 13,461
|
[
"Gaussian"
] |
dcb2cc35e212403c4f5d3e9c93da1f39f2df9a526e7f09461c3325b5f574609b
|
import os
import errno
import threading
import datetime
import tempfile
import shutil
import json
import time
import socket
import re
from copy import deepcopy
from GangaCore.Utility.Config import getConfig
from GangaCore.Utility.logging import getLogger
from GangaCore.Core.exceptions import GangaException
from GangaCore.GPIDev.Base.Proxy import isType
from GangaCore.GPIDev.Credentials import credential_store
import GangaCore.Utility.execute as gexecute
logger = getLogger()
# Cache
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
DIRAC_ENV = {}
DIRAC_INCLUDE = ''
Dirac_Env_Lock = threading.Lock()
Dirac_Proxy_Lock = threading.Lock()
Dirac_Exec_Lock = threading.Lock()
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
class GangaDiracError(GangaException):
""" Exception type which is thrown from problems executing a command against DIRAC """
def __init__(self, message, dirac_id = None, job_id = None):
GangaException.__init__(self, message)
self.dirac_id = dirac_id
self.job_id = job_id
def __str__(self):
if self.job_id and self.dirac_id:
return "GangaDiracError, Job %s with Dirac ID %s : %s" % (self.job_id, self.dirac_id, self.message)
else:
return GangaException.__str__(self)
def getDiracEnv(sourceFile = None):
"""
Returns the dirac environment stored in a global dictionary by GangaCore.
Once loaded and stored this is used for executing all DIRAC code in future
Args:
sourceFile (str): This is an optional file path which points to the env which should be sourced for this DIRAC
"""
global DIRAC_ENV
with Dirac_Env_Lock:
if sourceFile is None:
sourceFile = 'default'
cache_file = getConfig('DIRAC')['DiracEnvJSON']
source_command = getConfig('DIRAC')['DiracEnvSource']
if not cache_file and not source_command:
source_command = getConfig('DIRAC')['DiracEnvFile']
else:
# Needed for backwards compatibility with old configs...
cache_file = None
source_command = sourceFile
if sourceFile not in DIRAC_ENV:
if cache_file:
DIRAC_ENV[sourceFile] = read_env_cache(cache_file)
elif source_command:
DIRAC_ENV[sourceFile] = get_env(source_command)
else:
logger.error("'DiracEnvSource' config variable empty")
logger.error("%s %s" % (getConfig('DIRAC')['DiracEnvJSON'], getConfig('DIRAC')['DiracEnvSource']))
#In case of custom location
if os.getenv('X509_USER_PROXY'):
DIRAC_ENV[sourceFile]['X509_USER_PROXY'] = os.getenv('X509_USER_PROXY')
return DIRAC_ENV[sourceFile]
def get_env(env_source):
"""
Given a source command, return the DIRAC environment that the
command created.
Args:
env_source: a command which can be sourced, providing the desired environment
Returns:
dict: the environment
"""
logger.debug('Running DIRAC source command %s', env_source)
env = dict(os.environ)
gexecute.execute('source {0}'.format(env_source), shell=True, env=env, update_env=True)
if not any(key.startswith('DIRAC') for key in env):
fake_dict = {}
with open(env_source) as _env:
for _line in _env.readlines():
split_val = _line.split('=')
if len(split_val) == 2:
key = split_val[0]
value = split_val[1]
fake_dict[key] = value
if not any(key.startswith('DIRAC') for key in fake_dict):
logger.error("Env: %s" % str(env))
logger.error("Fake: %s" % str(fake_dict))
raise RuntimeError("'DIRAC*' not found in environment")
else:
return fake_dict
return env
def write_env_cache(env, cache_filename):
"""
Given a command and a file path, source the command and store it
in the file
Args:
env (dict): the environment
cache_filename: a full path to a file to store the cache in
"""
cache_dir = os.path.dirname(cache_filename)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
with open(cache_filename, 'w') as cache_file:
json.dump(env, cache_file)
def read_env_cache(cache_filename):
"""
Args:
cache_filename: a full path to a file to store the cache in
Returns:
dict: the cached environment
"""
logger.debug('Reading DIRAC cache file at %s', cache_filename)
with open(cache_filename, 'r') as cache_file:
env = json.load(cache_file)
# Convert unicode strings to byte strings
env = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in env.items())
return env
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
def getDiracCommandIncludes(force=False):
"""
This helper function returns the Ganga DIRAC helper functions which are called by Ganga code to talk to DIRAC
These are loaded from disk once and then saved in memory.
Args:
force (bool): Triggers a reload from disk when True
"""
global DIRAC_INCLUDE
with Dirac_Env_Lock:
if DIRAC_INCLUDE == '' or force:
for fname in getConfig('DIRAC')['DiracCommandFiles']:
if not os.path.exists(fname):
raise RuntimeError("Specified Dirac command file '%s' does not exist." % fname)
with open(fname, 'r') as inc_file:
DIRAC_INCLUDE += inc_file.read() + '\n'
return DIRAC_INCLUDE
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
def getValidDiracFiles(job, names=None):
"""
This is a generator for all DiracFiles in a jobs outputfiles
TODO: Is this still called anywhere?
Args:
job (Job): The job which is having it's DiracFiles tested
names (list): list of strings of names to be matched to namePatterns in outputfiles
"""
from GangaDirac.Lib.Files.DiracFile import DiracFile
if job.subjobs:
for sj in job.subjobs:
for df in (f for f in sj.outputfiles if isType(f, DiracFile)):
if df.subfiles:
for valid_sf in (sf for sf in df.subfiles if sf.lfn != '' and (names is None or sf.namePattern in names)):
yield valid_sf
else:
if df.lfn != '' and (names is None or df.namePattern in names):
yield df
else:
for df in (f for f in job.outputfiles if isType(f, DiracFile)):
if df.subfiles:
for valid_sf in (sf for sf in df.subfiles if sf.lfn != '' and (names is None or sf.namePattern in names)):
yield valid_sf
else:
if df.lfn != '' and (names is None or df.namePattern in names):
yield df
def execute(command,
timeout=getConfig('DIRAC')['Timeout'],
env=None,
cwd=None,
shell=False,
python_setup='',
eval_includes=None,
update_env=False,
return_raw_dict=False,
cred_req=None,
new_subprocess = False
):
"""
Execute a command on the local DIRAC server.
This function blocks until the server returns.
Args:
command (str): This is the command we're running within our DIRAC session
timeout (int): This is the length of time that a DIRAC call has before it's decided some interaction has timed out
env (dict): an optional environment to execute the DIRAC code in
cwd (str): an optional string to a valid path where this code should be executed
shell (bool): Should this code be executed in a new shell environment
python_setup (str): Optional extra code to pass to python when executing
eval_includes (???): TODO document me
update_env (bool): Should this modify the given env object with the env after the command has executed
return_raw_dict(bool): Should we return the raw dict from the DIRAC interface or parse it here
cred_req (ICredentialRequirement): What credentials does this call need
new_subprocess(bool): Do we want to do this in a fresh subprocess or just connect to the DIRAC server process?
"""
if cwd is None:
# We can in all likelyhood be in a temp folder on a shared (SLOW) filesystem
# If we are we do NOT want to execute commands which will involve any I/O on the system that isn't needed
cwd_ = tempfile.mkdtemp()
else:
# We know were whe want to run, lets just run there
cwd_ = cwd
from GangaDirac.BOOT import startDiracProcess
returnable = ''
if not new_subprocess:
with Dirac_Exec_Lock:
# First check if a Dirac process is running
from GangaDirac.BOOT import running_dirac_process
if not running_dirac_process:
startDiracProcess()
#Set up a socket to connect to the process
from GangaDirac.BOOT import dirac_process_ids
HOST = 'localhost' # The server's hostname or IP address
PORT = dirac_process_ids[1] # The port used by the server
#Put inside a try/except in case the existing process has timed out
try:
s= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
except socket.error as serr:
#Start a new process
startDiracProcess()
from GangaDirac.BOOT import dirac_process_ids
PORT = dirac_process_ids[1]
s= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
#Send a random string, then change the directory to carry out the command, then send the command
command_to_send = str(dirac_process_ids[2])
command_to_send += 'os.chdir("%s")\n' % cwd_
command_to_send += command
s.sendall(('%s###END-TRANS###' % command_to_send).encode('utf-8'))
out = ''
while '###END-TRANS###' not in out:
data = s.recv(1024)
out += data.decode("utf-8")
s.close()
#Some regex nonsense to deal with the long representations in python 3
out = re.sub(r'((?:^|\s|,|{|\()\d+)L([^A-Za-z0-9\"\'])', r'\1\2', out)
returnable = eval(out)
else:
if env is None:
if cred_req is None:
env = getDiracEnv()
else:
env = getDiracEnv(cred_req.dirac_env)
if python_setup == '':
python_setup = getDiracCommandIncludes()
if cred_req is not None:
env['X509_USER_PROXY'] = credential_store[cred_req].location
if os.getenv('KRB5CCNAME'):
env['KRB5CCNAME'] = os.getenv('KRB5CCNAME')
returnable = gexecute.execute(command,
timeout=timeout,
env=env,
cwd=cwd_,
shell=shell,
python_setup=python_setup,
eval_includes=eval_includes,
update_env=update_env)
# If the time
if returnable == 'Command timed out!':
raise GangaDiracError("DIRAC command timed out")
# TODO we would like some way of working out if the code has been executed correctly
# Most commands will be OK now that we've added the check for the valid proxy before executing commands here
if cwd is None:
shutil.rmtree(cwd_, ignore_errors=True)
if isinstance(returnable, dict):
if return_raw_dict:
# If the output is a dictionary return and it has been requested, then return it
return returnable
# If the output is a dictionary allow for automatic error detection
if returnable['OK']:
return returnable['Value']
else:
raise GangaDiracError(returnable['Message'])
else:
# Else raise an exception as it should be a dictionary
raise GangaDiracError(returnable)
|
ganga-devs/ganga
|
ganga/GangaDirac/Lib/Utilities/DiracUtilities.py
|
Python
|
gpl-3.0
| 12,474
|
[
"DIRAC"
] |
59b68a7707869fa816b033a9876c5c435747c67f956a821a6e077f51a423d54a
|
"""
This file contains Python code illustrating the creation and manipulation of
vtkTable objects.
"""
from vtk import *
#------------------------------------------------------------------------------
# Some Helper Functions
#------------------------------------------------------------------------------
def add_row_to_vtkTable(vtk_table, new_row=None):
""" Python helper function to add a new row of data to a vtkTable object. """
# Just a couple of sanity checks.
if new_row == None:
print "ERROR: No data provided for new table row."
return False
if len(new_row) != vtk_table.GetNumberOfColumns():
print "ERROR: Number of entries in new row does not match # of columns in table."
return False
for i in range(vtk_table.GetNumberOfColumns()):
vtk_table.GetColumn(i).InsertNextValue( new_row[i] )
return True
def get_vtkTableHeaders(vtk_table):
""" Returns the vtkTable headers (column names) as a Python list """
headers = []
for icol in range( vtk_table.GetNumberOfColumns() ):
headers.append( vtk_table.GetColumn(icol).GetName() )
return headers
def get_vtkTableRow(vtk_table, row_number):
""" Returns a row from a vtkTable object as a Python list. """
row = []
for icol in range( vtk_table.GetNumberOfColumns() ):
row.append( vtk_table.GetColumn(icol).GetValue(row_number) )
return row
def get_vtkTableAsDelimitedText(vtk_table, sep="\t"):
""" return a nicely formatted string version of a vtkTable """
s = ""
hdrs = get_vtkTableHeaders(vtk_table)
for i in hdrs:
s += "%s%s"%(i,sep)
s = s.rstrip(sep)
s += "\n"
for irow in range(vtk_table.GetNumberOfRows()):
rowdata = get_vtkTableRow(vtk_table, irow)
for i in rowdata:
s += "%s%s"%(str(i),sep)
s = s.rstrip(sep)
s += "\n"
return s
#------------------------------------------------------------------------------
# Script Entry Point (i.e., main() )
#------------------------------------------------------------------------------
if __name__ == "__main__":
""" Main entry point of this python script """
#----------------------------------------------------------
# Create an empty table
T = vtkTable()
#----------------------------------------------------------
# Create Column 1 (IDs)
col1 = vtkIntArray()
col1.SetName("ID")
for i in range(1, 8):
col1.InsertNextValue(i)
T.AddColumn(col1)
#----------------------------------------------------------
# Create Column 2 (Names)
namesList = ['Bob', 'Ann', 'Sue', 'Bill', 'Joe', 'Jill', 'Rick']
col2 = vtkStringArray()
col2.SetName("Name")
for val in namesList:
col2.InsertNextValue(val)
T.AddColumn(col2)
#----------------------------------------------------------
# Create Column 3 (Ages)
agesList = [12, 25, 72, 11, 31, 36, 32]
col3 = vtkIntArray()
col3.SetName("Age")
for val in agesList:
col3.InsertNextValue(val)
T.AddColumn(col3)
#----------------------------------------------------------
# Add a row to the table
new_row = [8, "Luis", 68]
# we can't really use vtkTable.InsertNextRow() since it takes a vtkVariantArray
# as its argument (and the SetValue, etc. methods on that are not wrapped into
# Python) We can just append to each of the column arrays.
if not add_row_to_vtkTable(T, new_row):
print "Whoops!"
#----------------------------------------------------------
# Call PrintSelf() on a VTK object is done simply by printing the object
print 25*"="
print "Calling PrintSelf():"
print T
#----------------------------------------------------------
# Here are a couple of ways to print out our table in Python using
# the helper functions that appear earlier in this script.
# The accessor methods used here can be adapted to do more interesting
# things with a vtkTable from within Python.
# print out our table
print 25*"="
print "Rows as lists:"
print get_vtkTableHeaders(T)
for i in range(T.GetNumberOfRows()):
print get_vtkTableRow(T,i)
print ""
print 25*"="
print "Delimited text:"
print get_vtkTableAsDelimitedText(T)
print "vtkTable Python Example Completed."
|
naucoin/VTKSlicerWidgets
|
Examples/Infovis/Python/tables_adv.py
|
Python
|
bsd-3-clause
| 4,425
|
[
"VTK"
] |
c0a4a1011fa6242407e0ccbc1a5298b5647ec50b56274a91c9c446dfad0d90e0
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.ndimage import map_coordinates
from dipy.viz.colormap import line_colors
# Conditional import machinery for vtk
from dipy.utils.optpkg import optional_package
# import vtk
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
ns, have_numpy_support, _ = optional_package('vtk.util.numpy_support')
def set_input(vtk_object, inp):
""" Generic input function which takes into account VTK 5 or 6
Parameters
----------
vtk_object: vtk object
inp: vtkPolyData or vtkImageData or vtkAlgorithmOutput
Returns
-------
vtk_object
Notes
-------
This can be used in the following way::
from dipy.viz.utils import set_input
poly_mapper = set_input(vtk.vtkPolyDataMapper(), poly_data)
"""
if isinstance(inp, vtk.vtkPolyData) \
or isinstance(inp, vtk.vtkImageData):
if vtk.VTK_MAJOR_VERSION <= 5:
vtk_object.SetInput(inp)
else:
vtk_object.SetInputData(inp)
elif isinstance(inp, vtk.vtkAlgorithmOutput):
vtk_object.SetInputConnection(inp)
vtk_object.Update()
return vtk_object
def numpy_to_vtk_points(points):
""" Numpy points array to a vtk points array
Parameters
----------
points : ndarray
Returns
-------
vtk_points : vtkPoints()
"""
vtk_points = vtk.vtkPoints()
vtk_points.SetData(ns.numpy_to_vtk(np.asarray(points), deep=True))
return vtk_points
def numpy_to_vtk_colors(colors):
""" Numpy color array to a vtk color array
Parameters
----------
colors: ndarray
Returns
-------
vtk_colors : vtkDataArray
Notes
-----
If colors are not already in UNSIGNED_CHAR you may need to multiply by 255.
Examples
--------
>>> import numpy as np
>>> from dipy.viz.utils import numpy_to_vtk_colors
>>> rgb_array = np.random.rand(100, 3)
>>> vtk_colors = numpy_to_vtk_colors(255 * rgb_array)
"""
vtk_colors = ns.numpy_to_vtk(np.asarray(colors), deep=True,
array_type=vtk.VTK_UNSIGNED_CHAR)
return vtk_colors
def map_coordinates_3d_4d(input_array, indices):
""" Evaluate the input_array data at the given indices
using trilinear interpolation
Parameters
----------
input_array : ndarray,
3D or 4D array
indices : ndarray
Returns
-------
output : ndarray
1D or 2D array
"""
if input_array.ndim <= 2 or input_array.ndim >= 5:
raise ValueError("Input array can only be 3d or 4d")
if input_array.ndim == 3:
return map_coordinates(input_array, indices.T, order=1)
if input_array.ndim == 4:
values_4d = []
for i in range(input_array.shape[-1]):
values_tmp = map_coordinates(input_array[..., i],
indices.T, order=1)
values_4d.append(values_tmp)
return np.ascontiguousarray(np.array(values_4d).T)
def lines_to_vtk_polydata(lines, colors=None):
""" Create a vtkPolyData with lines and colors
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3), list of arrays, tuple (3,), array (K,), None
If None then a standard orientation colormap is used for every line.
If one tuple of color is used. Then all streamlines will have the same
colour.
If an array (N, 3) is given, where N is equal to the number of lines.
Then every line is coloured with a different RGB color.
If a list of RGB arrays is given then every point of every line takes
a different color.
If an array (K, 3) is given, where K is the number of points of all
lines then every point is colored with a different RGB color.
If an array (K,) is given, where K is the number of points of all
lines then these are considered as the values to be used by the
colormap.
If an array (L,) is given, where L is the number of streamlines then
these are considered as the values to be used by the colormap per
streamline.
If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the
colormap are interpolated automatically using trilinear interpolation.
Returns
-------
poly_data : vtkPolyData
is_colormap : bool, true if the input color array was a colormap
"""
# Get the 3d points_array
points_array = np.vstack(lines)
nb_lines = len(lines)
nb_points = len(points_array)
lines_range = range(nb_lines)
# Get lines_array in vtk input format
lines_array = []
# Using np.intp (instead of int64), because of a bug in numpy:
# https://github.com/nipy/dipy/pull/789
# https://github.com/numpy/numpy/issues/4384
points_per_line = np.zeros([nb_lines], np.intp)
current_position = 0
for i in lines_range:
current_len = len(lines[i])
points_per_line[i] = current_len
end_position = current_position + current_len
lines_array += [current_len]
lines_array += range(current_position, end_position)
current_position = end_position
lines_array = np.array(lines_array)
# Set Points to vtk array format
vtk_points = numpy_to_vtk_points(points_array)
# Set Lines to vtk array format
vtk_lines = vtk.vtkCellArray()
vtk_lines.GetData().DeepCopy(ns.numpy_to_vtk(lines_array))
vtk_lines.SetNumberOfCells(nb_lines)
is_colormap = False
# Get colors_array (reformat to have colors for each points)
# - if/else tested and work in normal simple case
if colors is None: # set automatic rgb colors
cols_arr = line_colors(lines)
colors_mapper = np.repeat(lines_range, points_per_line, axis=0)
vtk_colors = numpy_to_vtk_colors(255 * cols_arr[colors_mapper])
else:
cols_arr = np.asarray(colors)
if cols_arr.dtype == np.object: # colors is a list of colors
vtk_colors = numpy_to_vtk_colors(255 * np.vstack(colors))
else:
if len(cols_arr) == nb_points:
if cols_arr.ndim == 1: # values for every point
vtk_colors = ns.numpy_to_vtk(cols_arr, deep=True)
is_colormap = True
elif cols_arr.ndim == 2: # map color to each point
vtk_colors = numpy_to_vtk_colors(255 * cols_arr)
elif cols_arr.ndim == 1:
if len(cols_arr) == nb_lines: # values for every streamline
cols_arrx = []
for (i, value) in enumerate(colors):
cols_arrx += lines[i].shape[0]*[value]
cols_arrx = np.array(cols_arrx)
vtk_colors = ns.numpy_to_vtk(cols_arrx, deep=True)
is_colormap = True
else: # the same colors for all points
vtk_colors = numpy_to_vtk_colors(
np.tile(255 * cols_arr, (nb_points, 1)))
elif cols_arr.ndim == 2: # map color to each line
colors_mapper = np.repeat(lines_range, points_per_line, axis=0)
vtk_colors = numpy_to_vtk_colors(255 * cols_arr[colors_mapper])
else: # colormap
# get colors for each vertex
cols_arr = map_coordinates_3d_4d(cols_arr, points_array)
vtk_colors = ns.numpy_to_vtk(cols_arr, deep=True)
is_colormap = True
vtk_colors.SetName("Colors")
# Create the poly_data
poly_data = vtk.vtkPolyData()
poly_data.SetPoints(vtk_points)
poly_data.SetLines(vtk_lines)
poly_data.GetPointData().SetScalars(vtk_colors)
return poly_data, is_colormap
def get_polydata_lines(line_polydata):
""" vtk polydata to a list of lines ndarrays
Parameters
----------
line_polydata : vtkPolyData
Returns
-------
lines : list
List of N curves represented as 2D ndarrays
"""
lines_vertices = ns.vtk_to_numpy(line_polydata.GetPoints().GetData())
lines_idx = ns.vtk_to_numpy(line_polydata.GetLines().GetData())
lines = []
current_idx = 0
while current_idx < len(lines_idx):
line_len = lines_idx[current_idx]
next_idx = current_idx + line_len + 1
line_range = lines_idx[current_idx + 1: next_idx]
lines += [lines_vertices[line_range]]
current_idx = next_idx
return lines
def get_polydata_triangles(polydata):
""" get triangles (ndarrays Nx3 int) from a vtk polydata
Parameters
----------
polydata : vtkPolyData
Returns
-------
output : array (N, 3)
triangles
"""
vtk_polys = ns.vtk_to_numpy(polydata.GetPolys().GetData())
assert((vtk_polys[::4] == 3).all()) # test if its really triangles
return np.vstack([vtk_polys[1::4], vtk_polys[2::4], vtk_polys[3::4]]).T
def get_polydata_vertices(polydata):
""" get vertices (ndarrays Nx3 int) from a vtk polydata
Parameters
----------
polydata : vtkPolyData
Returns
-------
output : array (N, 3)
points, represented as 2D ndarrays
"""
return ns.vtk_to_numpy(polydata.GetPoints().GetData())
def get_polydata_normals(polydata):
""" get vertices normal (ndarrays Nx3 int) from a vtk polydata
Parameters
----------
polydata : vtkPolyData
Returns
-------
output : array (N, 3)
Normals, represented as 2D ndarrays (Nx3). None if there are no normals
in the vtk polydata.
"""
vtk_normals = polydata.GetPointData().GetNormals()
if vtk_normals is None:
return None
else:
return ns.vtk_to_numpy(vtk_normals)
def get_polydata_colors(polydata):
""" get points color (ndarrays Nx3 int) from a vtk polydata
Parameters
----------
polydata : vtkPolyData
Returns
-------
output : array (N, 3)
Colors. None if no normals in the vtk polydata.
"""
vtk_colors = polydata.GetPointData().GetScalars()
if vtk_colors is None:
return None
else:
return ns.vtk_to_numpy(vtk_colors)
def set_polydata_triangles(polydata, triangles):
""" set polydata triangles with a numpy array (ndarrays Nx3 int)
Parameters
----------
polydata : vtkPolyData
triangles : array (N, 3)
triangles, represented as 2D ndarrays (Nx3)
"""
vtk_triangles = np.hstack(np.c_[np.ones(len(triangles)).astype(np.int) * 3,
triangles])
vtk_triangles = ns.numpy_to_vtkIdTypeArray(vtk_triangles, deep=True)
vtk_cells = vtk.vtkCellArray()
vtk_cells.SetCells(len(triangles), vtk_triangles)
polydata.SetPolys(vtk_cells)
return polydata
def set_polydata_vertices(polydata, vertices):
""" set polydata vertices with a numpy array (ndarrays Nx3 int)
Parameters
----------
polydata : vtkPolyData
vertices : vertices, represented as 2D ndarrays (Nx3)
"""
vtk_points = vtk.vtkPoints()
vtk_points.SetData(ns.numpy_to_vtk(vertices, deep=True))
polydata.SetPoints(vtk_points)
return polydata
def set_polydata_normals(polydata, normals):
""" set polydata normals with a numpy array (ndarrays Nx3 int)
Parameters
----------
polydata : vtkPolyData
normals : normals, represented as 2D ndarrays (Nx3) (one per vertex)
"""
vtk_normals = ns.numpy_to_vtk(normals, deep=True)
polydata.GetPointData().SetNormals(vtk_normals)
return polydata
def set_polydata_colors(polydata, colors):
""" set polydata colors with a numpy array (ndarrays Nx3 int)
Parameters
----------
polydata : vtkPolyData
colors : colors, represented as 2D ndarrays (Nx3)
colors are uint8 [0,255] RGB for each points
"""
vtk_colors = ns.numpy_to_vtk(colors, deep=True,
array_type=vtk.VTK_UNSIGNED_CHAR)
vtk_colors.SetNumberOfComponents(3)
vtk_colors.SetName("RGB")
polydata.GetPointData().SetScalars(vtk_colors)
return polydata
def update_polydata_normals(polydata):
""" generate and update polydata normals
Parameters
----------
polydata : vtkPolyData
"""
normals_gen = set_input(vtk.vtkPolyDataNormals(), polydata)
normals_gen.ComputePointNormalsOn()
normals_gen.ComputeCellNormalsOn()
normals_gen.SplittingOff()
# normals_gen.FlipNormalsOn()
# normals_gen.ConsistencyOn()
# normals_gen.AutoOrientNormalsOn()
normals_gen.Update()
vtk_normals = normals_gen.GetOutput().GetPointData().GetNormals()
polydata.GetPointData().SetNormals(vtk_normals)
def get_polymapper_from_polydata(polydata):
""" get vtkPolyDataMapper from a vtkPolyData
Parameters
----------
polydata : vtkPolyData
Returns
-------
poly_mapper : vtkPolyDataMapper
"""
poly_mapper = set_input(vtk.vtkPolyDataMapper(), polydata)
poly_mapper.ScalarVisibilityOn()
poly_mapper.InterpolateScalarsBeforeMappingOn()
poly_mapper.Update()
poly_mapper.StaticOn()
return poly_mapper
def get_actor_from_polymapper(poly_mapper, light=(0.1, 0.15, 0.05)):
""" get vtkActor from a vtkPolyDataMapper
Parameters
----------
poly_mapper : vtkPolyDataMapper
Returns
-------
actor : vtkActor
"""
actor = vtk.vtkActor()
actor.SetMapper(poly_mapper)
# actor.GetProperty().SetRepresentationToWireframe()
actor.GetProperty().BackfaceCullingOn()
actor.GetProperty().SetInterpolationToPhong()
# actor.GetProperty().SetInterpolationToFlat()
actor.GetProperty().SetAmbient(light[0]) # .3
actor.GetProperty().SetDiffuse(light[1]) # .3
actor.GetProperty().SetSpecular(light[2]) # .3
return actor
def get_actor_from_polydata(polydata):
""" get vtkActor from a vtkPolyData
Parameters
----------
polydata : vtkPolyData
Returns
-------
actor : vtkActor
"""
poly_mapper = get_polymapper_from_polydata(polydata)
return get_actor_from_polymapper(poly_mapper)
|
villalonreina/dipy
|
dipy/viz/utils.py
|
Python
|
bsd-3-clause
| 14,226
|
[
"VTK"
] |
8156a26a95233369593dc9ccfcfce244a7fa75399405a0c13e913b6cdc2dde19
|
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.base import ModelBase
from django_facebook import model_managers, settings as facebook_settings
from open_facebook.utils import json, camel_to_underscore
from datetime import timedelta
from django_facebook.utils import compatible_datetime as datetime, \
get_model_for_attribute, get_user_attribute, get_instance_for_attribute, \
try_get_profile, update_user_attributes
from django_facebook.utils import get_user_model
from open_facebook.exceptions import OAuthException
import logging
import os
logger = logging.getLogger(__name__)
def get_user_model_setting():
from django.conf import settings
default = 'auth.User'
user_model_setting = getattr(settings, 'AUTH_USER_FACEBOOK', default)
return user_model_setting
def validate_settings():
'''
Checks our Facebook and Django settings and looks for common errors
'''
from django.conf import settings
from django_facebook import settings as facebook_settings
if facebook_settings.FACEBOOK_SKIP_VALIDATE:
return
# check for required settings
if not facebook_settings.FACEBOOK_APP_ID:
logger.warn('Warning FACEBOOK_APP_ID isnt specified')
if not facebook_settings.FACEBOOK_APP_SECRET:
logger.warn('Warning FACEBOOK_APP_SECRET isnt specified')
# warn on things which will cause bad performance
if facebook_settings.FACEBOOK_STORE_LIKES or facebook_settings.FACEBOOK_STORE_FRIENDS:
if not facebook_settings.FACEBOOK_CELERY_STORE:
msg = '''Storing friends or likes without using Celery will significantly slow down your login
Its recommended to enable FACEBOOK_CELERY_STORE or disable FACEBOOK_STORE_FRIENDS and FACEBOOK_STORE_LIKES'''
logger.warn(msg)
# make sure the context processors are present
required = ['django_facebook.context_processors.facebook',
'django.core.context_processors.request']
context_processors = settings.TEMPLATE_CONTEXT_PROCESSORS
for context_processor in required:
if context_processor not in context_processors:
logger.warn(
'Required context processor %s wasnt found', context_processor)
backends = settings.AUTHENTICATION_BACKENDS
required = 'django_facebook.auth_backends.FacebookBackend'
if required not in backends:
logger.warn('Required auth backend %s wasnt found', required)
validate_settings()
if facebook_settings.FACEBOOK_PROFILE_IMAGE_PATH:
PROFILE_IMAGE_PATH = settings.FACEBOOK_PROFILE_IMAGE_PATH
else:
PROFILE_IMAGE_PATH = os.path.join('images', 'facebook_profiles/%Y/%m/%d')
class FACEBOOK_OG_STATE:
class NOT_CONNECTED:
'''
The user has not connected their profile with Facebook
'''
pass
class CONNECTED:
'''
The user has connected their profile with Facebook, but isn't
setup for Facebook sharing
- sharing is either disabled
- or we have no valid access token
'''
pass
class SHARING(CONNECTED):
'''
The user is connected to Facebook and sharing is enabled
'''
pass
@python_2_unicode_compatible
class BaseFacebookModel(models.Model):
'''
Abstract class to add to your profile or user model.
NOTE: If you don't use this this abstract class, make sure you copy/paste
the fields in.
'''
about_me = models.TextField(blank=True, null=True)
facebook_id = models.BigIntegerField(blank=True, unique=True, null=True)
access_token = models.TextField(
blank=True, help_text='Facebook token for offline access', null=True)
facebook_name = models.CharField(max_length=255, blank=True, null=True)
facebook_profile_url = models.TextField(blank=True, null=True)
website_url = models.TextField(blank=True, null=True)
blog_url = models.TextField(blank=True, null=True)
date_of_birth = models.DateField(blank=True, null=True)
gender = models.CharField(max_length=1, choices=(
('m', 'Male'), ('f', 'Female')), blank=True, null=True)
raw_data = models.TextField(blank=True, null=True)
# the field which controls if we are sharing to facebook
facebook_open_graph = models.NullBooleanField(
help_text='Determines if this user want to share via open graph')
# set to true if we require a new access token
new_token_required = models.BooleanField(default=False,
help_text='Set to true if the access token is outdated or lacks permissions')
@property
def open_graph_new_token_required(self):
'''
Shows if we need to (re)authenticate the user for open graph sharing
'''
reauthentication = False
if self.facebook_open_graph and self.new_token_required:
reauthentication = True
elif self.facebook_open_graph is None:
reauthentication = True
return reauthentication
def __str__(self):
return self.get_user().pk
class Meta:
abstract = True
def refresh(self):
'''
Get the latest version of this object from the db
'''
return self.__class__.objects.get(id=self.id)
def get_user(self):
'''
Since this mixin can be used both for profile and user models
'''
if hasattr(self, 'user'):
user = self.user
else:
user = self
return user
def get_user_id(self):
'''
Since this mixin can be used both for profile and user_id models
'''
if hasattr(self, 'user_id'):
user_id = self.user_id
else:
user_id = self.id
return user_id
@property
def facebook_og_state(self):
if not self.facebook_id:
state = FACEBOOK_OG_STATE.NOT_CONNECTED
elif self.access_token and self.facebook_open_graph:
state = FACEBOOK_OG_STATE.SHARING
else:
state = FACEBOOK_OG_STATE.CONNECTED
return state
def likes(self):
likes = FacebookLike.objects.filter(user_id=self.get_user_id())
return likes
def friends(self):
friends = FacebookUser.objects.filter(user_id=self.get_user_id())
return friends
def disconnect_facebook(self):
self.access_token = None
self.new_token_required = False
self.facebook_id = None
def clear_access_token(self):
self.access_token = None
self.new_token_required = False
self.save()
def update_access_token(self, new_value):
'''
Updates the access token
**Example**::
# updates to 123 and sets new_token_required to False
profile.update_access_token(123)
:param new_value:
The new value for access_token
'''
self.access_token = new_value
self.new_token_required = False
def extend_access_token(self):
'''
https://developers.facebook.com/roadmap/offline-access-removal/
We can extend the token only once per day
Normal short lived tokens last 1-2 hours
Long lived tokens (given by extending) last 60 days
The token can be extended multiple times, supposedly on every visit
'''
logger.info('extending access token for user %s', self.get_user())
results = None
if facebook_settings.FACEBOOK_CELERY_TOKEN_EXTEND:
from django_facebook import tasks
tasks.extend_access_token.delay(self, self.access_token)
else:
results = self._extend_access_token(self.access_token)
return results
def _extend_access_token(self, access_token):
from open_facebook.api import FacebookAuthorization
results = FacebookAuthorization.extend_access_token(access_token)
access_token = results['access_token']
old_token = self.access_token
token_changed = access_token != old_token
message = 'a new' if token_changed else 'the same'
log_format = 'Facebook provided %s token, which expires at %s'
expires_delta = timedelta(days=60)
logger.info(log_format, message, expires_delta)
if token_changed:
logger.info('Saving the new access token')
self.update_access_token(access_token)
self.save()
from django_facebook.signals import facebook_token_extend_finished
facebook_token_extend_finished.send(
sender=get_user_model(), user=self.get_user(), profile=self,
token_changed=token_changed, old_token=old_token
)
return results
def get_offline_graph(self):
'''
Returns a open facebook graph client based on the access token stored
in the user's profile
'''
from open_facebook.api import OpenFacebook
if self.access_token:
graph = OpenFacebook(access_token=self.access_token)
graph.current_user_id = self.facebook_id
return graph
BaseFacebookProfileModel = BaseFacebookModel
class FacebookModel(BaseFacebookModel):
'''
the image field really destroys the subclassability of an abstract model
you always need to customize the upload settings and storage settings
thats why we stick it in a separate class
override the BaseFacebookProfile if you want to change the image
'''
image = models.ImageField(blank=True, null=True,
upload_to=PROFILE_IMAGE_PATH, max_length=255)
def profile_or_self(self):
user_or_profile_model = get_model_for_attribute('facebook_id')
user_model = get_user_model()
if user_or_profile_model == user_model:
return self
else:
return self.facebookprofile
class Meta:
abstract = True
# better name for the mixin now that it can also be used for user models
FacebookProfileModel = FacebookModel
@python_2_unicode_compatible
class FacebookUser(models.Model):
'''
Model for storing a users friends
'''
# in order to be able to easily move these to an another db,
# use a user_id and no foreign key
user_id = models.IntegerField()
facebook_id = models.BigIntegerField()
name = models.TextField(blank=True, null=True)
gender = models.CharField(choices=(
('F', 'female'), ('M', 'male')), blank=True, null=True, max_length=1)
objects = model_managers.FacebookUserManager()
class Meta:
unique_together = ['user_id', 'facebook_id']
def __str__(self):
return u'Facebook user %s' % self.name
class FacebookLike(models.Model):
'''
Model for storing all of a users fb likes
'''
# in order to be able to easily move these to an another db,
# use a user_id and no foreign key
user_id = models.IntegerField()
facebook_id = models.BigIntegerField()
name = models.TextField(blank=True, null=True)
category = models.TextField(blank=True, null=True)
created_time = models.DateTimeField(blank=True, null=True)
class Meta:
unique_together = ['user_id', 'facebook_id']
class FacebookProfile(FacebookProfileModel):
'''
Not abstract version of the facebook profile model
Use this by setting
AUTH_PROFILE_MODULE = 'django_facebook.FacebookProfile'
'''
user = models.OneToOneField(get_user_model_setting())
if getattr(settings, 'AUTH_USER_MODEL', None) == 'django_facebook.FacebookCustomUser':
try:
from django.contrib.auth.models import AbstractUser, UserManager
class FacebookCustomUser(AbstractUser, FacebookModel):
'''
The django 1.5 approach to adding the facebook related fields
'''
objects = UserManager()
# add any customizations you like
state = models.CharField(max_length=255, blank=True, null=True)
except ImportError as e:
logger.info('Couldnt setup FacebookUser, got error %s', e)
class BaseModelMetaclass(ModelBase):
'''
Cleaning up the table naming conventions
'''
def __new__(cls, name, bases, attrs):
super_new = ModelBase.__new__(cls, name, bases, attrs)
module_name = camel_to_underscore(name)
app_label = super_new.__module__.split('.')[-2]
db_table = '%s_%s' % (app_label, module_name)
django_default = '%s_%s' % (app_label, name.lower())
if not getattr(super_new._meta, 'proxy', False):
db_table_is_default = django_default == super_new._meta.db_table
# Don't overwrite when people customize the db_table
if db_table_is_default:
super_new._meta.db_table = db_table
return super_new
@python_2_unicode_compatible
class BaseModel(models.Model):
'''
Stores the fields common to all incentive models
'''
__metaclass__ = BaseModelMetaclass
def __str__(self):
'''
Looks at some common ORM naming standards and tries to display those before
default to the django default
'''
attributes = ['name', 'title', 'slug']
name = None
for a in attributes:
if hasattr(self, a):
name = getattr(self, a)
if not name:
name = repr(self.__class__)
return name
class Meta:
abstract = True
@python_2_unicode_compatible
class CreatedAtAbstractBase(BaseModel):
'''
Stores the fields common to all incentive models
'''
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
# determine if we should clean this model
auto_clean = False
def save(self, *args, **kwargs):
'''
Allow for auto clean support
'''
if self.auto_clean:
self.clean()
saved = models.Model.save(self, *args, **kwargs)
return saved
def __str__(self):
'''
Looks at some common ORM naming standards and tries to display those before
default to the django default
'''
attributes = ['name', 'title', 'slug']
name = None
for a in attributes:
if hasattr(self, a):
name = getattr(self, a)
if not name:
name = repr(self.__class__)
return name
def __repr__(self):
return '<%s[%s]>' % (self.__class__.__name__, self.pk)
class Meta:
abstract = True
class OpenGraphShare(BaseModel):
'''
Object for tracking all shares to Facebook
Used for statistics and evaluating how things are going
I recommend running this in a task
**Example usage**::
from user.models import OpenGraphShare
user = UserObject
url = 'http://www.fashiolista.com/'
kwargs = dict(list=url)
share = OpenGraphShare.objects.create(
user = user,
action_domain='fashiolista:create',
content_object=self,
)
share.set_share_dict(kwargs)
share.save()
result = share.send()
**Advanced usage**::
share.send()
share.update(message='Hello world')
share.remove()
share.retry()
Using this model has the advantage that it allows us to
- remove open graph shares (since we store the Facebook id)
- retry open graph shares, which is handy in case of
- updated access tokens (retry all shares from this user in the last facebook_settings.FACEBOOK_OG_SHARE_RETRY_DAYS)
- Facebook outages (Facebook often has minor interruptions, retry in 15m, for max facebook_settings.FACEBOOK_OG_SHARE_RETRIES)
'''
objects = model_managers.OpenGraphShareManager()
user = models.ForeignKey(get_user_model_setting())
# domain stores
action_domain = models.CharField(max_length=255)
facebook_user_id = models.BigIntegerField()
# what we are sharing, dict and object
share_dict = models.TextField(blank=True, null=True)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.PositiveIntegerField(blank=True, null=True)
content_object = GenericForeignKey('content_type', 'object_id')
# completion data
error_message = models.TextField(blank=True, null=True)
last_attempt = models.DateTimeField(
blank=True, null=True, auto_now_add=True)
retry_count = models.IntegerField(blank=True, null=True)
# only written if we actually succeed
share_id = models.CharField(blank=True, null=True, max_length=255)
completed_at = models.DateTimeField(blank=True, null=True)
# tracking removals
removed_at = models.DateTimeField(blank=True, null=True)
# updated at and created at, last one needs an index
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
db_table = facebook_settings.FACEBOOK_OG_SHARE_DB_TABLE
def save(self, *args, **kwargs):
if self.user and not self.facebook_user_id:
profile = self.user.facebookprofile
self.facebook_user_id = get_user_attribute(
self.user, profile, 'facebook_id')
return BaseModel.save(self, *args, **kwargs)
def send(self, graph=None, shared_explicitly=False):
result = None
# update the last attempt
self.last_attempt = datetime.now()
self.save()
# see if the graph is enabled
profile = try_get_profile(self.user)
user_or_profile = get_instance_for_attribute(
self.user, profile, 'access_token')
graph = graph or user_or_profile.get_offline_graph()
user_enabled = shared_explicitly or \
(user_or_profile.facebook_open_graph and self.facebook_user_id)
# start sharing
if graph and user_enabled:
graph_location = '%s/%s' % (
self.facebook_user_id, self.action_domain)
share_dict = self.get_share_dict()
from open_facebook.exceptions import OpenFacebookException
try:
result = graph.set(graph_location, **share_dict)
share_id = result.get('id')
if not share_id:
error_message = 'No id in Facebook response, found %s for url %s with data %s' % (
result, graph_location, share_dict)
logger.error(error_message)
raise OpenFacebookException(error_message)
self.share_id = share_id
self.error_message = None
self.completed_at = datetime.now()
self.save()
except OpenFacebookException as e:
logger.warn(
'Open graph share failed, writing message %s' % str(e))
self.error_message = repr(e)
self.save()
# maybe we need a new access token
new_token_required = self.exception_requires_new_token(
e, graph)
# verify that the token didnt change in the mean time
user_or_profile = user_or_profile.__class__.objects.get(
id=user_or_profile.id)
token_changed = graph.access_token != user_or_profile.access_token
logger.info('new token required is %s and token_changed is %s',
new_token_required, token_changed)
if new_token_required and not token_changed:
logger.info(
'a new token is required, setting the flag on the user or profile')
# time to ask the user for a new token
update_user_attributes(self.user, profile, dict(
new_token_required=True), save=True)
elif not graph:
self.error_message = 'no graph available'
self.save()
elif not user_enabled:
self.error_message = 'user not enabled'
self.save()
return result
def exception_requires_new_token(self, e, graph):
'''
Determines if the exceptions is something which requires us to
ask for a new token. Examples are:
Error validating access token: Session has expired at unix time
1350669826. The current unix time is 1369657666.
(#200) Requires extended permission: publish_actions (error code 200)
'''
new_token = False
if isinstance(e, OAuthException):
new_token = True
# if we have publish actions than our token is ok
# we get in this flow if Facebook mistakenly marks exceptions
# as oAuthExceptions
publish_actions = graph.has_permissions(['publish_actions'])
if publish_actions:
new_token = False
return new_token
def update(self, data, graph=None):
'''
Update the share with the given data
'''
result = None
profile = self.user.facebookprofile
graph = graph or profile.get_offline_graph()
# update the share dict so a retry will do the right thing
# just in case we fail the first time
shared = self.update_share_dict(data)
self.save()
# broadcast the change to facebook
if self.share_id:
result = graph.set(self.share_id, **shared)
return result
def remove(self, graph=None):
if not self.share_id:
raise ValueError('Can only delete shares which have an id')
# see if the graph is enabled
profile = self.user.facebookprofile
graph = graph or profile.get_offline_graph()
response = None
if graph:
response = graph.delete(self.share_id)
self.removed_at = datetime.now()
self.save()
return response
def retry(self, graph=None, reset_retries=False):
if self.completed_at:
raise ValueError('You can\'t retry completed shares')
if reset_retries:
self.retry_count = 0
# handle the case where self.retry_count = None
self.retry_count = self.retry_count + 1 if self.retry_count else 1
# actually retry now
result = self.send(graph=graph)
return result
def set_share_dict(self, share_dict):
share_dict_string = json.dumps(share_dict)
self.share_dict = share_dict_string
def get_share_dict(self):
share_dict_string = self.share_dict
share_dict = json.loads(share_dict_string)
return share_dict
def update_share_dict(self, share_dict):
old_share_dict = self.get_share_dict()
old_share_dict.update(share_dict)
self.set_share_dict(old_share_dict)
return old_share_dict
|
rafaelgontijo/Django-facebook-fork
|
django_facebook/models.py
|
Python
|
bsd-3-clause
| 23,266
|
[
"VisIt"
] |
51ae3e8c048516a18264630b88b66f9c0883c837a8f20e8ce83987209ccf1b48
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
# Jeremy Kolb <jkolb@brandeis.edu>
import gl_XML, glX_XML, glX_proto_common, license
import sys, getopt, copy, string
def convertStringForXCB(str):
tmp = ""
special = [ "ARB" ]
i = 0
while i < len(str):
if str[i:i+3] in special:
tmp = '%s_%s' % (tmp, string.lower(str[i:i+3]))
i = i + 2;
elif str[i].isupper():
tmp = '%s_%s' % (tmp, string.lower(str[i]))
else:
tmp = '%s%s' % (tmp, str[i])
i += 1
return tmp
def hash_pixel_function(func):
"""Generate a 'unique' key for a pixel function. The key is based on
the parameters written in the command packet. This includes any
padding that might be added for the original function and the 'NULL
image' flag."""
h = ""
hash_pre = ""
hash_suf = ""
for param in func.parameterIterateGlxSend():
if param.is_image():
[dim, junk, junk, junk, junk] = param.get_dimensions()
d = (dim + 1) & ~1
hash_pre = "%uD%uD_" % (d - 1, d)
if param.img_null_flag:
hash_suf = "_NF"
h += "%u" % (param.size())
if func.pad_after(param):
h += "4"
n = func.name.replace("%uD" % (dim), "")
n = "__glx_%s_%uD%uD" % (n, d - 1, d)
h = hash_pre + h + hash_suf
return [h, n]
class glx_pixel_function_stub(glX_XML.glx_function):
"""Dummy class used to generate pixel "utility" functions that are
shared by multiple dimension image functions. For example, these
objects are used to generate shared functions used to send GLX
protocol for TexImage1D and TexImage2D, TexSubImage1D and
TexSubImage2D, etc."""
def __init__(self, func, name):
# The parameters to the utility function are the same as the
# parameters to the real function except for the added "pad"
# parameters.
self.name = name
self.images = []
self.parameters = []
self.parameters_by_name = {}
for _p in func.parameterIterator():
p = copy.copy(_p)
self.parameters.append(p)
self.parameters_by_name[ p.name ] = p
if p.is_image():
self.images.append(p)
p.height = "height"
if p.img_yoff == None:
p.img_yoff = "yoffset"
if p.depth:
if p.extent == None:
p.extent = "extent"
if p.img_woff == None:
p.img_woff = "woffset"
pad_name = func.pad_after(p)
if pad_name:
pad = copy.copy(p)
pad.name = pad_name
self.parameters.append(pad)
self.parameters_by_name[ pad.name ] = pad
self.return_type = func.return_type
self.glx_rop = ~0
self.glx_sop = 0
self.glx_vendorpriv = 0
self.glx_doubles_in_order = func.glx_doubles_in_order
self.vectorequiv = None
self.output = None
self.can_be_large = func.can_be_large
self.reply_always_array = func.reply_always_array
self.dimensions_in_reply = func.dimensions_in_reply
self.img_reset = None
self.server_handcode = 0
self.client_handcode = 0
self.ignore = 0
self.count_parameter_list = func.count_parameter_list
self.counter_list = func.counter_list
self.offsets_calculated = 0
return
class PrintGlxProtoStubs(glX_proto_common.glx_print_proto):
def __init__(self):
glX_proto_common.glx_print_proto.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( "(C) Copyright IBM Corporation 2004, 2005", "IBM")
self.last_category = ""
self.generic_sizes = [3, 4, 6, 8, 12, 16, 24, 32]
self.pixel_stubs = {}
self.debug = 0
return
def printRealHeader(self):
print ''
print '#include <GL/gl.h>'
print '#include "indirect.h"'
print '#include "glxclient.h"'
print '#include "indirect_size.h"'
print '#include "glapi.h"'
print '#include "glthread.h"'
print '#include <GL/glxproto.h>'
print '#ifdef USE_XCB'
print '#include <X11/Xlib-xcb.h>'
print '#include <xcb/xcb.h>'
print '#include <xcb/glx.h>'
print '#endif /* USE_XCB */'
print ''
print '#define __GLX_PAD(n) (((n) + 3) & ~3)'
print ''
self.printFastcall()
self.printNoinline()
print ''
print '#ifndef __GNUC__'
print '# define __builtin_expect(x, y) x'
print '#endif'
print ''
print '/* If the size and opcode values are known at compile-time, this will, on'
print ' * x86 at least, emit them with a single instruction.'
print ' */'
print '#define emit_header(dest, op, size) \\'
print ' do { union { short s[2]; int i; } temp; \\'
print ' temp.s[0] = (size); temp.s[1] = (op); \\'
print ' *((int *)(dest)) = temp.i; } while(0)'
print ''
print """NOINLINE CARD32
__glXReadReply( Display *dpy, size_t size, void * dest, GLboolean reply_is_always_array )
{
xGLXSingleReply reply;
(void) _XReply(dpy, (xReply *) & reply, 0, False);
if (size != 0) {
if ((reply.length > 0) || reply_is_always_array) {
const GLint bytes = (reply_is_always_array)
? (4 * reply.length) : (reply.size * size);
const GLint extra = 4 - (bytes & 3);
_XRead(dpy, dest, bytes);
if ( extra < 4 ) {
_XEatData(dpy, extra);
}
}
else {
(void) memcpy( dest, &(reply.pad3), size);
}
}
return reply.retval;
}
NOINLINE void
__glXReadPixelReply( Display *dpy, struct glx_context * gc, unsigned max_dim,
GLint width, GLint height, GLint depth, GLenum format, GLenum type,
void * dest, GLboolean dimensions_in_reply )
{
xGLXSingleReply reply;
GLint size;
(void) _XReply(dpy, (xReply *) & reply, 0, False);
if ( dimensions_in_reply ) {
width = reply.pad3;
height = reply.pad4;
depth = reply.pad5;
if ((height == 0) || (max_dim < 2)) { height = 1; }
if ((depth == 0) || (max_dim < 3)) { depth = 1; }
}
size = reply.length * 4;
if (size != 0) {
void * buf = Xmalloc( size );
if ( buf == NULL ) {
_XEatData(dpy, size);
__glXSetError(gc, GL_OUT_OF_MEMORY);
}
else {
const GLint extra = 4 - (size & 3);
_XRead(dpy, buf, size);
if ( extra < 4 ) {
_XEatData(dpy, extra);
}
__glEmptyImage(gc, 3, width, height, depth, format, type,
buf, dest);
Xfree(buf);
}
}
}
#define X_GLXSingle 0
NOINLINE FASTCALL GLubyte *
__glXSetupSingleRequest( struct glx_context * gc, GLint sop, GLint cmdlen )
{
xGLXSingleReq * req;
Display * const dpy = gc->currentDpy;
(void) __glXFlushRenderBuffer(gc, gc->pc);
LockDisplay(dpy);
GetReqExtra(GLXSingle, cmdlen, req);
req->reqType = gc->majorOpcode;
req->contextTag = gc->currentContextTag;
req->glxCode = sop;
return (GLubyte *)(req) + sz_xGLXSingleReq;
}
NOINLINE FASTCALL GLubyte *
__glXSetupVendorRequest( struct glx_context * gc, GLint code, GLint vop, GLint cmdlen )
{
xGLXVendorPrivateReq * req;
Display * const dpy = gc->currentDpy;
(void) __glXFlushRenderBuffer(gc, gc->pc);
LockDisplay(dpy);
GetReqExtra(GLXVendorPrivate, cmdlen, req);
req->reqType = gc->majorOpcode;
req->glxCode = code;
req->vendorCode = vop;
req->contextTag = gc->currentContextTag;
return (GLubyte *)(req) + sz_xGLXVendorPrivateReq;
}
const GLuint __glXDefaultPixelStore[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
#define zero (__glXDefaultPixelStore+0)
#define one (__glXDefaultPixelStore+8)
#define default_pixel_store_1D (__glXDefaultPixelStore+4)
#define default_pixel_store_1D_size 20
#define default_pixel_store_2D (__glXDefaultPixelStore+4)
#define default_pixel_store_2D_size 20
#define default_pixel_store_3D (__glXDefaultPixelStore+0)
#define default_pixel_store_3D_size 36
#define default_pixel_store_4D (__glXDefaultPixelStore+0)
#define default_pixel_store_4D_size 36
"""
for size in self.generic_sizes:
self.print_generic_function(size)
return
def printBody(self, api):
self.pixel_stubs = {}
generated_stubs = []
for func in api.functionIterateGlx():
if func.client_handcode: continue
# If the function is a pixel function with a certain
# GLX protocol signature, create a fake stub function
# for it. For example, create a single stub function
# that is used to implement both glTexImage1D and
# glTexImage2D.
if func.glx_rop != 0:
do_it = 0
for image in func.get_images():
if image.img_pad_dimensions:
do_it = 1
break
if do_it:
[h, n] = hash_pixel_function(func)
self.pixel_stubs[ func.name ] = n
if h not in generated_stubs:
generated_stubs.append(h)
fake_func = glx_pixel_function_stub( func, n )
self.printFunction(fake_func, fake_func.name)
self.printFunction(func, func.name)
if func.glx_sop and func.glx_vendorpriv:
self.printFunction(func, func.glx_vendorpriv_names[0])
self.printGetProcAddress(api)
return
def printGetProcAddress(self, api):
procs = {}
for func in api.functionIterateGlx():
for n in func.entry_points:
if func.has_different_protocol(n):
procs[n] = func.static_glx_name(n)
print """
#ifdef GLX_SHARED_GLAPI
static const struct proc_pair
{
const char *name;
_glapi_proc proc;
} proc_pairs[%d] = {""" % len(procs)
names = procs.keys()
names.sort()
for i in xrange(len(names)):
comma = ',' if i < len(names) - 1 else ''
print ' { "%s", (_glapi_proc) gl%s }%s' % (names[i], procs[names[i]], comma)
print """};
static int
__indirect_get_proc_compare(const void *key, const void *memb)
{
const struct proc_pair *pair = (const struct proc_pair *) memb;
return strcmp((const char *) key, pair->name);
}
_glapi_proc
__indirect_get_proc_address(const char *name)
{
const struct proc_pair *pair;
/* skip "gl" */
name += 2;
pair = (const struct proc_pair *) bsearch((const void *) name,
(const void *) proc_pairs, ARRAY_SIZE(proc_pairs), sizeof(proc_pairs[0]),
__indirect_get_proc_compare);
return (pair) ? pair->proc : NULL;
}
#endif /* GLX_SHARED_GLAPI */
"""
return
def printFunction(self, func, name):
footer = '}\n'
if func.glx_rop == ~0:
print 'static %s' % (func.return_type)
print '%s( unsigned opcode, unsigned dim, %s )' % (func.name, func.get_parameter_string())
print '{'
else:
if func.has_different_protocol(name):
if func.return_type == "void":
ret_string = ''
else:
ret_string = "return "
func_name = func.static_glx_name(name)
print '#define %s %d' % (func.opcode_vendor_name(name), func.glx_vendorpriv)
print '%s gl%s(%s)' % (func.return_type, func_name, func.get_parameter_string())
print '{'
print ' struct glx_context * const gc = __glXGetCurrentContext();'
print ''
print '#if defined(GLX_DIRECT_RENDERING) && !defined(GLX_USE_APPLEGL)'
print ' if (gc->isDirect) {'
print ' %sGET_DISPATCH()->%s(%s);' % (ret_string, func.name, func.get_called_parameter_string())
print ' } else'
print '#endif'
print ' {'
footer = '}\n}\n'
else:
print '#define %s %d' % (func.opcode_name(), func.opcode_value())
print '%s __indirect_gl%s(%s)' % (func.return_type, name, func.get_parameter_string())
print '{'
if func.glx_rop != 0 or func.vectorequiv != None:
if len(func.images):
self.printPixelFunction(func)
else:
self.printRenderFunction(func)
elif func.glx_sop != 0 or func.glx_vendorpriv != 0:
self.printSingleFunction(func, name)
pass
else:
print "/* Missing GLX protocol for %s. */" % (name)
print footer
return
def print_generic_function(self, n):
size = (n + 3) & ~3
print """static FASTCALL NOINLINE void
generic_%u_byte( GLint rop, const void * ptr )
{
struct glx_context * const gc = __glXGetCurrentContext();
const GLuint cmdlen = %u;
emit_header(gc->pc, rop, cmdlen);
(void) memcpy((void *)(gc->pc + 4), ptr, %u);
gc->pc += cmdlen;
if (__builtin_expect(gc->pc > gc->limit, 0)) { (void) __glXFlushRenderBuffer(gc, gc->pc); }
}
""" % (n, size + 4, size)
return
def common_emit_one_arg(self, p, pc, adjust, extra_offset):
if p.is_array():
src_ptr = p.name
else:
src_ptr = "&" + p.name
if p.is_padding:
print '(void) memset((void *)(%s + %u), 0, %s);' \
% (pc, p.offset + adjust, p.size_string() )
elif not extra_offset:
print '(void) memcpy((void *)(%s + %u), (void *)(%s), %s);' \
% (pc, p.offset + adjust, src_ptr, p.size_string() )
else:
print '(void) memcpy((void *)(%s + %u + %s), (void *)(%s), %s);' \
% (pc, p.offset + adjust, extra_offset, src_ptr, p.size_string() )
def common_emit_args(self, f, pc, adjust, skip_vla):
extra_offset = None
for p in f.parameterIterateGlxSend( not skip_vla ):
if p.name != f.img_reset:
self.common_emit_one_arg(p, pc, adjust, extra_offset)
if p.is_variable_length():
temp = p.size_string()
if extra_offset:
extra_offset += " + %s" % (temp)
else:
extra_offset = temp
return
def pixel_emit_args(self, f, pc, large):
"""Emit the arguments for a pixel function. This differs from
common_emit_args in that pixel functions may require padding
be inserted (i.e., for the missing width field for
TexImage1D), and they may also require a 'NULL image' flag
be inserted before the image data."""
if large:
adjust = 8
else:
adjust = 4
for param in f.parameterIterateGlxSend():
if not param.is_image():
self.common_emit_one_arg(param, pc, adjust, None)
if f.pad_after(param):
print '(void) memcpy((void *)(%s + %u), zero, 4);' % (pc, (param.offset + param.size()) + adjust)
else:
[dim, width, height, depth, extent] = param.get_dimensions()
if f.glx_rop == ~0:
dim_str = "dim"
else:
dim_str = str(dim)
if param.is_padding:
print '(void) memset((void *)(%s + %u), 0, %s);' \
% (pc, (param.offset - 4) + adjust, param.size_string() )
if param.img_null_flag:
if large:
print '(void) memcpy((void *)(%s + %u), zero, 4);' % (pc, (param.offset - 4) + adjust)
else:
print '(void) memcpy((void *)(%s + %u), (void *)((%s == NULL) ? one : zero), 4);' % (pc, (param.offset - 4) + adjust, param.name)
pixHeaderPtr = "%s + %u" % (pc, adjust)
pcPtr = "%s + %u" % (pc, param.offset + adjust)
if not large:
if param.img_send_null:
condition = '(compsize > 0) && (%s != NULL)' % (param.name)
else:
condition = 'compsize > 0'
print 'if (%s) {' % (condition)
print ' (*gc->fillImage)(gc, %s, %s, %s, %s, %s, %s, %s, %s, %s);' % (dim_str, width, height, depth, param.img_format, param.img_type, param.name, pcPtr, pixHeaderPtr)
print '} else {'
print ' (void) memcpy( %s, default_pixel_store_%uD, default_pixel_store_%uD_size );' % (pixHeaderPtr, dim, dim)
print '}'
else:
print '__glXSendLargeImage(gc, compsize, %s, %s, %s, %s, %s, %s, %s, %s, %s);' % (dim_str, width, height, depth, param.img_format, param.img_type, param.name, pcPtr, pixHeaderPtr)
return
def large_emit_begin(self, f, op_name = None):
if not op_name:
op_name = f.opcode_real_name()
print 'const GLint op = %s;' % (op_name)
print 'const GLuint cmdlenLarge = cmdlen + 4;'
print 'GLubyte * const pc = __glXFlushRenderBuffer(gc, gc->pc);'
print '(void) memcpy((void *)(pc + 0), (void *)(&cmdlenLarge), 4);'
print '(void) memcpy((void *)(pc + 4), (void *)(&op), 4);'
return
def common_func_print_just_start(self, f, name):
print ' struct glx_context * const gc = __glXGetCurrentContext();'
# The only reason that single and vendor private commands need
# a variable called 'dpy' is becuase they use the SyncHandle
# macro. For whatever brain-dead reason, that macro is hard-
# coded to use a variable called 'dpy' instead of taking a
# parameter.
# FIXME Simplify the logic related to skip_condition and
# FIXME condition_list in this function. Basically, remove
# FIXME skip_condition, and just append the "dpy != NULL" type
# FIXME condition to condition_list from the start. The only
# FIXME reason it's done in this confusing way now is to
# FIXME minimize the diffs in the generated code.
if not f.glx_rop:
for p in f.parameterIterateOutputs():
if p.is_image() and (p.img_format != "GL_COLOR_INDEX" or p.img_type != "GL_BITMAP"):
print ' const __GLXattribute * const state = gc->client_state_private;'
break
print ' Display * const dpy = gc->currentDpy;'
skip_condition = "dpy != NULL"
elif f.can_be_large:
skip_condition = "gc->currentDpy != NULL"
else:
skip_condition = None
if f.return_type != 'void':
print ' %s retval = (%s) 0;' % (f.return_type, f.return_type)
if name != None and name not in f.glx_vendorpriv_names:
print '#ifndef USE_XCB'
self.emit_packet_size_calculation(f, 0)
if name != None and name not in f.glx_vendorpriv_names:
print '#endif'
condition_list = []
for p in f.parameterIterateCounters():
condition_list.append( "%s >= 0" % (p.name) )
# 'counter' parameters cannot be negative
print " if (%s < 0) {" % p.name
print " __glXSetError(gc, GL_INVALID_VALUE);"
if f.return_type != 'void':
print " return 0;"
else:
print " return;"
print " }"
if skip_condition:
condition_list.append( skip_condition )
if len( condition_list ) > 0:
if len( condition_list ) > 1:
skip_condition = "(%s)" % (string.join( condition_list, ") && (" ))
else:
skip_condition = "%s" % (condition_list.pop(0))
print ' if (__builtin_expect(%s, 1)) {' % (skip_condition)
return 1
else:
return 0
def printSingleFunction(self, f, name):
self.common_func_print_just_start(f, name)
if self.debug:
print ' printf( "Enter %%s...\\n", "gl%s" );' % (f.name)
if name not in f.glx_vendorpriv_names:
# XCB specific:
print '#ifdef USE_XCB'
if self.debug:
print ' printf("\\tUsing XCB.\\n");'
print ' xcb_connection_t *c = XGetXCBConnection(dpy);'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
xcb_name = 'xcb_glx%s' % convertStringForXCB(name)
iparams=[]
extra_iparams = []
output = None
for p in f.parameterIterator():
if p.is_output:
output = p
if p.is_image():
if p.img_format != "GL_COLOR_INDEX" or p.img_type != "GL_BITMAP":
extra_iparams.append("state->storePack.swapEndian")
else:
extra_iparams.append("0")
# Hardcode this in. lsb_first param (apparently always GL_FALSE)
# also present in GetPolygonStipple, but taken care of above.
if xcb_name == "xcb_glx_read_pixels":
extra_iparams.append("0")
else:
iparams.append(p.name)
xcb_request = '%s(%s)' % (xcb_name, ", ".join(["c", "gc->currentContextTag"] + iparams + extra_iparams))
if f.needs_reply():
print ' %s_reply_t *reply = %s_reply(c, %s, NULL);' % (xcb_name, xcb_name, xcb_request)
if output and f.reply_always_array:
print ' (void)memcpy(%s, %s_data(reply), %s_data_length(reply) * sizeof(%s));' % (output.name, xcb_name, xcb_name, output.get_base_type_string())
elif output and not f.reply_always_array:
if not output.is_image():
print ' if (%s_data_length(reply) == 0)' % (xcb_name)
print ' (void)memcpy(%s, &reply->datum, sizeof(reply->datum));' % (output.name)
print ' else'
print ' (void)memcpy(%s, %s_data(reply), %s_data_length(reply) * sizeof(%s));' % (output.name, xcb_name, xcb_name, output.get_base_type_string())
if f.return_type != 'void':
print ' retval = reply->ret_val;'
print ' free(reply);'
else:
print ' ' + xcb_request + ';'
print '#else'
# End of XCB specific.
if f.parameters != []:
pc_decl = "GLubyte const * pc ="
else:
pc_decl = "(void)"
if name in f.glx_vendorpriv_names:
print ' %s __glXSetupVendorRequest(gc, %s, %s, cmdlen);' % (pc_decl, f.opcode_real_name(), f.opcode_vendor_name(name))
else:
print ' %s __glXSetupSingleRequest(gc, %s, cmdlen);' % (pc_decl, f.opcode_name())
self.common_emit_args(f, "pc", 0, 0)
images = f.get_images()
for img in images:
if img.is_output:
o = f.command_fixed_length() - 4
print ' *(int32_t *)(pc + %u) = 0;' % (o)
if img.img_format != "GL_COLOR_INDEX" or img.img_type != "GL_BITMAP":
print ' * (int8_t *)(pc + %u) = state->storePack.swapEndian;' % (o)
if f.img_reset:
print ' * (int8_t *)(pc + %u) = %s;' % (o + 1, f.img_reset)
return_name = ''
if f.needs_reply():
if f.return_type != 'void':
return_name = " retval"
return_str = " retval = (%s)" % (f.return_type)
else:
return_str = " (void)"
got_reply = 0
for p in f.parameterIterateOutputs():
if p.is_image():
[dim, w, h, d, junk] = p.get_dimensions()
if f.dimensions_in_reply:
print " __glXReadPixelReply(dpy, gc, %u, 0, 0, 0, %s, %s, %s, GL_TRUE);" % (dim, p.img_format, p.img_type, p.name)
else:
print " __glXReadPixelReply(dpy, gc, %u, %s, %s, %s, %s, %s, %s, GL_FALSE);" % (dim, w, h, d, p.img_format, p.img_type, p.name)
got_reply = 1
else:
if f.reply_always_array:
aa = "GL_TRUE"
else:
aa = "GL_FALSE"
# gl_parameter.size() returns the size
# of the entire data item. If the
# item is a fixed-size array, this is
# the size of the whole array. This
# is not what __glXReadReply wants. It
# wants the size of a single data
# element in the reply packet.
# Dividing by the array size (1 for
# non-arrays) gives us this.
s = p.size() / p.get_element_count()
print " %s __glXReadReply(dpy, %s, %s, %s);" % (return_str, s, p.name, aa)
got_reply = 1
# If a reply wasn't read to fill an output parameter,
# read a NULL reply to get the return value.
if not got_reply:
print " %s __glXReadReply(dpy, 0, NULL, GL_FALSE);" % (return_str)
elif self.debug:
# Only emit the extra glFinish call for functions
# that don't already require a reply from the server.
print ' __indirect_glFinish();'
if self.debug:
print ' printf( "Exit %%s.\\n", "gl%s" );' % (name)
print ' UnlockDisplay(dpy); SyncHandle();'
if name not in f.glx_vendorpriv_names:
print '#endif /* USE_XCB */'
print ' }'
print ' return%s;' % (return_name)
return
def printPixelFunction(self, f):
if self.pixel_stubs.has_key( f.name ):
# Normally gl_function::get_parameter_string could be
# used. However, this call needs to have the missing
# dimensions (e.g., a fake height value for
# glTexImage1D) added in.
p_string = ""
for param in f.parameterIterateGlxSend():
if param.is_padding:
continue
p_string += ", " + param.name
if param.is_image():
[dim, junk, junk, junk, junk] = param.get_dimensions()
if f.pad_after(param):
p_string += ", 1"
print ' %s(%s, %u%s );' % (self.pixel_stubs[f.name] , f.opcode_name(), dim, p_string)
return
if self.common_func_print_just_start(f, None):
trailer = " }"
else:
trailer = None
if f.can_be_large:
print 'if (cmdlen <= gc->maxSmallRenderCommandSize) {'
print ' if ( (gc->pc + cmdlen) > gc->bufEnd ) {'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
print ' }'
if f.glx_rop == ~0:
opcode = "opcode"
else:
opcode = f.opcode_real_name()
print 'emit_header(gc->pc, %s, cmdlen);' % (opcode)
self.pixel_emit_args( f, "gc->pc", 0 )
print 'gc->pc += cmdlen;'
print 'if (gc->pc > gc->limit) { (void) __glXFlushRenderBuffer(gc, gc->pc); }'
if f.can_be_large:
print '}'
print 'else {'
self.large_emit_begin(f, opcode)
self.pixel_emit_args(f, "pc", 1)
print '}'
if trailer: print trailer
return
def printRenderFunction(self, f):
# There is a class of GL functions that take a single pointer
# as a parameter. This pointer points to a fixed-size chunk
# of data, and the protocol for this functions is very
# regular. Since they are so regular and there are so many
# of them, special case them with generic functions. On
# x86, this saves about 26KB in the libGL.so binary.
if f.variable_length_parameter() == None and len(f.parameters) == 1:
p = f.parameters[0]
if p.is_pointer():
cmdlen = f.command_fixed_length()
if cmdlen in self.generic_sizes:
print ' generic_%u_byte( %s, %s );' % (cmdlen, f.opcode_real_name(), p.name)
return
if self.common_func_print_just_start(f, None):
trailer = " }"
else:
trailer = None
if self.debug:
print 'printf( "Enter %%s...\\n", "gl%s" );' % (f.name)
if f.can_be_large:
print 'if (cmdlen <= gc->maxSmallRenderCommandSize) {'
print ' if ( (gc->pc + cmdlen) > gc->bufEnd ) {'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
print ' }'
print 'emit_header(gc->pc, %s, cmdlen);' % (f.opcode_real_name())
self.common_emit_args(f, "gc->pc", 4, 0)
print 'gc->pc += cmdlen;'
print 'if (__builtin_expect(gc->pc > gc->limit, 0)) { (void) __glXFlushRenderBuffer(gc, gc->pc); }'
if f.can_be_large:
print '}'
print 'else {'
self.large_emit_begin(f)
self.common_emit_args(f, "pc", 8, 1)
p = f.variable_length_parameter()
print ' __glXSendLargeCommand(gc, pc, %u, %s, %s);' % (p.offset + 8, p.name, p.size_string())
print '}'
if self.debug:
print '__indirect_glFinish();'
print 'printf( "Exit %%s.\\n", "gl%s" );' % (f.name)
if trailer: print trailer
return
class PrintGlxProtoInit_c(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
(C) Copyright IBM Corporation 2004""", "PRECISION INSIGHT, IBM")
return
def printRealHeader(self):
print """/**
* \\file indirect_init.c
* Initialize indirect rendering dispatch table.
*
* \\author Kevin E. Martin <kevin@precisioninsight.com>
* \\author Brian Paul <brian@precisioninsight.com>
* \\author Ian Romanick <idr@us.ibm.com>
*/
#include "indirect_init.h"
#include "indirect.h"
#include "glapi.h"
/**
* No-op function used to initialize functions that have no GLX protocol
* support.
*/
static int NoOp(void)
{
return 0;
}
/**
* Create and initialize a new GL dispatch table. The table is initialized
* with GLX indirect rendering protocol functions.
*/
struct _glapi_table * __glXNewIndirectAPI( void )
{
struct _glapi_table *glAPI;
GLuint entries;
entries = _glapi_get_dispatch_table_size();
glAPI = (struct _glapi_table *) Xmalloc(entries * sizeof(void *));
/* first, set all entries to point to no-op functions */
{
int i;
void **dispatch = (void **) glAPI;
for (i = 0; i < entries; i++) {
dispatch[i] = (void *) NoOp;
}
}
/* now, initialize the entries we understand */"""
def printRealFooter(self):
print """
return glAPI;
}
"""
return
def printBody(self, api):
for [name, number] in api.categoryIterate():
if number != None:
preamble = '\n /* %3u. %s */\n\n' % (int(number), name)
else:
preamble = '\n /* %s */\n\n' % (name)
for func in api.functionIterateByCategory(name):
if func.client_supported_for_indirect():
print '%s glAPI->%s = __indirect_gl%s;' % (preamble, func.name, func.name)
preamble = ''
return
class PrintGlxProtoInit_h(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
(C) Copyright IBM Corporation 2004""", "PRECISION INSIGHT, IBM")
self.header_tag = "_INDIRECT_H_"
self.last_category = ""
return
def printRealHeader(self):
print """/**
* \\file
* Prototypes for indirect rendering functions.
*
* \\author Kevin E. Martin <kevin@precisioninsight.com>
* \\author Ian Romanick <idr@us.ibm.com>
*/
"""
self.printVisibility( "HIDDEN", "hidden" )
self.printFastcall()
self.printNoinline()
print """
#include "glxclient.h"
extern HIDDEN NOINLINE CARD32 __glXReadReply( Display *dpy, size_t size,
void * dest, GLboolean reply_is_always_array );
extern HIDDEN NOINLINE void __glXReadPixelReply( Display *dpy,
struct glx_context * gc, unsigned max_dim, GLint width, GLint height,
GLint depth, GLenum format, GLenum type, void * dest,
GLboolean dimensions_in_reply );
extern HIDDEN NOINLINE FASTCALL GLubyte * __glXSetupSingleRequest(
struct glx_context * gc, GLint sop, GLint cmdlen );
extern HIDDEN NOINLINE FASTCALL GLubyte * __glXSetupVendorRequest(
struct glx_context * gc, GLint code, GLint vop, GLint cmdlen );
"""
def printBody(self, api):
for func in api.functionIterateGlx():
params = func.get_parameter_string()
print 'extern HIDDEN %s __indirect_gl%s(%s);' % (func.return_type, func.name, params)
for n in func.entry_points:
if func.has_different_protocol(n):
asdf = func.static_glx_name(n)
if asdf not in func.static_entry_points:
print 'extern HIDDEN %s gl%s(%s);' % (func.return_type, asdf, params)
# give it a easy-to-remember name
if func.client_handcode:
print '#define gl_dispatch_stub_%s gl%s' % (n, asdf)
else:
print 'GLAPI %s GLAPIENTRY gl%s(%s);' % (func.return_type, asdf, params)
break
print ''
print '#ifdef GLX_SHARED_GLAPI'
print 'extern HIDDEN void (*__indirect_get_proc_address(const char *name))(void);'
print '#endif'
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode] [-d]" % sys.argv[0]
print " -m output_mode Output mode can be one of 'proto', 'init_c' or 'init_h'."
print " -d Enable extra debug information in the generated code."
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:m:d")
except Exception,e:
show_usage()
debug = 0
mode = "proto"
for (arg,val) in args:
if arg == "-f":
file_name = val
elif arg == "-m":
mode = val
elif arg == "-d":
debug = 1
if mode == "proto":
printer = PrintGlxProtoStubs()
elif mode == "init_c":
printer = PrintGlxProtoInit_c()
elif mode == "init_h":
printer = PrintGlxProtoInit_h()
else:
show_usage()
printer.debug = debug
api = gl_XML.parse_GL_API( file_name, glX_XML.glx_item_factory() )
printer.Print( api )
|
ayoubg/gem5-graphics
|
Mesa-7.11.2_GPGPU-Sim/src/mapi/glapi/gen/glX_proto_send.py
|
Python
|
bsd-3-clause
| 32,013
|
[
"Brian"
] |
c507198c59dde04f4f860693d7bd4b2627dd00a38802aec339967dc8a0c6535a
|
import os
import shutil
import unittest
from unittest import TestCase
from pymatgen.io.qchem.inputs import QCInput
from custodian.qchem.handlers import QChemErrorHandler
__author__ = "Samuel Blau, Brandon Woods, Shyam Dwaraknath, Ryan Kingsbury"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Samuel Blau"
__email__ = "samblau1@gmail.com"
__status__ = "Alpha"
__date__ = "3/26/18"
__credits__ = "Xiaohui Qu"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_files", "qchem", "new_test_files")
scr_dir = os.path.join(test_dir, "scr")
cwd = os.getcwd()
class QChemErrorHandlerTest(TestCase):
def setUp(self):
os.makedirs(scr_dir)
os.chdir(scr_dir)
def _check_equivalent_inputs(self, input1, input2):
self.assertEqual(QCInput.from_file(input1).molecule, QCInput.from_file(input2).molecule)
self.assertEqual(QCInput.from_file(input1).rem, QCInput.from_file(input2).rem)
def test_unable_to_determine_lamda(self):
for ii in range(2):
shutil.copyfile(
os.path.join(test_dir, "unable_to_determine_lamda.qin." + str(ii)),
os.path.join(scr_dir, "unable_to_determine_lamda.qin." + str(ii)),
)
shutil.copyfile(
os.path.join(test_dir, "unable_to_determine_lamda.qout." + str(ii)),
os.path.join(scr_dir, "unable_to_determine_lamda.qout." + str(ii)),
)
h = QChemErrorHandler(
input_file="unable_to_determine_lamda.qin.0",
output_file="unable_to_determine_lamda.qout.0",
)
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["unable_to_determine_lamda"])
self.assertEqual(d["actions"], [{"molecule": "molecule_from_last_geometry"}])
self._check_equivalent_inputs("unable_to_determine_lamda.qin.0", "unable_to_determine_lamda.qin.1")
def test_linear_dependent_basis_and_FileMan(self):
for ii in range(1, 3):
shutil.copyfile(
os.path.join(test_dir, "unable_to_determine_lamda.qin." + str(ii)),
os.path.join(scr_dir, "unable_to_determine_lamda.qin." + str(ii)),
)
shutil.copyfile(
os.path.join(test_dir, "unable_to_determine_lamda.qout." + str(ii)),
os.path.join(scr_dir, "unable_to_determine_lamda.qout." + str(ii)),
)
h = QChemErrorHandler(
input_file="unable_to_determine_lamda.qin.1",
output_file="unable_to_determine_lamda.qout.1",
)
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["premature_end_FileMan_error"])
self.assertEqual(d["warnings"]["linear_dependence"], True)
self.assertEqual(d["actions"], [{"thresh": "14"}])
def test_failed_to_transform(self):
for ii in range(2):
shutil.copyfile(
os.path.join(test_dir, "qunino_vinyl.qin." + str(ii)),
os.path.join(scr_dir, "qunino_vinyl.qin." + str(ii)),
)
shutil.copyfile(
os.path.join(test_dir, "qunino_vinyl.qout." + str(ii)),
os.path.join(scr_dir, "qunino_vinyl.qout." + str(ii)),
)
h = QChemErrorHandler(input_file="qunino_vinyl.qin.0", output_file="qunino_vinyl.qout.0")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["failed_to_transform_coords"])
self.assertEqual(d["actions"], [{"sym_ignore": True}, {"symmetry": False}])
self._check_equivalent_inputs("qunino_vinyl.qin.0", "qunino_vinyl.qin.1")
h = QChemErrorHandler(input_file="qunino_vinyl.qin.1", output_file="qunino_vinyl.qout.1")
self.assertEqual(h.check(), False)
def test_scf_failed_to_converge(self):
for ii in range(3):
shutil.copyfile(
os.path.join(test_dir, "crowd_gradient.qin." + str(ii)),
os.path.join(scr_dir, "crowd_gradient.qin." + str(ii)),
)
shutil.copyfile(
os.path.join(test_dir, "crowd_gradient.qout." + str(ii)),
os.path.join(scr_dir, "crowd_gradient.qout." + str(ii)),
)
h = QChemErrorHandler(input_file="crowd_gradient.qin.0", output_file="crowd_gradient.qout.0")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["SCF_failed_to_converge"])
self.assertEqual(d["actions"], [{"max_scf_cycles": 200}])
self._check_equivalent_inputs("crowd_gradient.qin.0", "crowd_gradient.qin.1")
h = QChemErrorHandler(input_file="crowd_gradient.qin.1", output_file="crowd_gradient.qout.1")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["SCF_failed_to_converge"])
self.assertEqual(d["actions"], [{"thresh": "14"}])
def test_out_of_opt_cycles(self):
shutil.copyfile(
os.path.join(test_dir, "crowd_gradient.qin.2"),
os.path.join(scr_dir, "crowd_gradient.qin.2"),
)
shutil.copyfile(
os.path.join(test_dir, "crowd_gradient.qout.2"),
os.path.join(scr_dir, "crowd_gradient.qout.2"),
)
shutil.copyfile(
os.path.join(test_dir, "crowd_gradient.qin.3"),
os.path.join(scr_dir, "crowd_gradient.qin.3"),
)
h = QChemErrorHandler(input_file="crowd_gradient.qin.2", output_file="crowd_gradient.qout.2")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["out_of_opt_cycles"])
self.assertEqual(
d["actions"],
[{"geom_max_cycles:": 200}, {"molecule": "molecule_from_last_geometry"}],
)
self._check_equivalent_inputs("crowd_gradient.qin.2", "crowd_gradient.qin.3")
def test_advanced_out_of_opt_cycles(self):
shutil.copyfile(
os.path.join(test_dir, "2564_complete/error1/mol.qin"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "2564_complete/error1/mol.qout"),
os.path.join(scr_dir, "mol.qout"),
)
shutil.copyfile(
os.path.join(test_dir, "2564_complete/mol.qin.opt_0"),
os.path.join(scr_dir, "mol.qin.opt_0"),
)
h = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["out_of_opt_cycles"])
self.assertEqual(d["actions"], [{"molecule": "molecule_from_last_geometry"}])
self._check_equivalent_inputs("mol.qin.opt_0", "mol.qin")
self.assertEqual(h.opt_error_history[0], "more_bonds")
shutil.copyfile(
os.path.join(test_dir, "2564_complete/mol.qin.opt_0"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "2564_complete/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout"),
)
h.check()
self.assertEqual(h.opt_error_history, [])
def test_advanced_out_of_opt_cycles1(self):
shutil.copyfile(
os.path.join(test_dir, "2620_complete/mol.qin.opt_0"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "2620_complete/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout"),
)
h = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout")
self.assertEqual(h.check(), False)
def test_failed_to_read_input(self):
shutil.copyfile(
os.path.join(test_dir, "unable_lamda_weird.qin"),
os.path.join(scr_dir, "unable_lamda_weird.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "unable_lamda_weird.qout"),
os.path.join(scr_dir, "unable_lamda_weird.qout"),
)
h = QChemErrorHandler(input_file="unable_lamda_weird.qin", output_file="unable_lamda_weird.qout")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["failed_to_read_input"])
self.assertEqual(d["actions"], [{"rerun_job_no_changes": True}])
self._check_equivalent_inputs("unable_lamda_weird.qin.last", "unable_lamda_weird.qin")
def test_input_file_error(self):
shutil.copyfile(
os.path.join(test_dir, "bad_input.qin"),
os.path.join(scr_dir, "bad_input.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "bad_input.qout"),
os.path.join(scr_dir, "bad_input.qout"),
)
h = QChemErrorHandler(input_file="bad_input.qin", output_file="bad_input.qout")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["input_file_error"])
self.assertEqual(d["actions"], None)
def test_basis_not_supported(self):
shutil.copyfile(
os.path.join(test_dir, "basis_not_supported.qin"),
os.path.join(scr_dir, "basis_not_supported.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "basis_not_supported.qout"),
os.path.join(scr_dir, "basis_not_supported.qout"),
)
h = QChemErrorHandler(input_file="basis_not_supported.qin", output_file="basis_not_supported.qout")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["basis_not_supported"])
self.assertEqual(d["actions"], None)
def test_NLebdevPts(self):
shutil.copyfile(
os.path.join(test_dir, "lebdevpts.qin"),
os.path.join(scr_dir, "lebdevpts.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "lebdevpts.qout"),
os.path.join(scr_dir, "lebdevpts.qout"),
)
h = QChemErrorHandler(input_file="lebdevpts.qin", output_file="lebdevpts.qout")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["NLebdevPts"])
self.assertEqual(d["actions"], [{"esp_surface_density": "250"}])
def test_read_error(self):
shutil.copyfile(
os.path.join(test_dir, "molecule_read_error/mol.qin"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "molecule_read_error/mol.qout"),
os.path.join(scr_dir, "mol.qout"),
)
h = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["read_molecule_error"])
self.assertEqual(d["actions"], [{"rerun_job_no_changes": True}])
self._check_equivalent_inputs("mol.qin.last", "mol.qin")
def test_never_called_qchem_error(self):
shutil.copyfile(
os.path.join(test_dir, "mpi_error/mol.qin"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "mpi_error/mol.qout"),
os.path.join(scr_dir, "mol.qout"),
)
h = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["never_called_qchem"])
self.assertEqual(d["actions"], [{"rerun_job_no_changes": True}])
self._check_equivalent_inputs("mol.qin.last", "mol.qin")
def test_OOS_read_hess(self):
shutil.copyfile(
os.path.join(test_dir, "OOS_read_hess.qin"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "OOS_read_hess.qout"),
os.path.join(scr_dir, "mol.qout"),
)
h = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["out_of_opt_cycles"])
self.assertEqual(
d["actions"],
[
{"molecule": "molecule_from_last_geometry"},
{"geom_opt_hessian": "deleted"},
],
)
self._check_equivalent_inputs(os.path.join(test_dir, "OOS_read_hess_next.qin"), "mol.qin")
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
if __name__ == "__main__":
unittest.main()
|
materialsproject/custodian
|
custodian/qchem/tests/test_handlers.py
|
Python
|
mit
| 12,265
|
[
"pymatgen"
] |
050a262f80402f11fc10b0f315d2d896ecc5dc61faa102695cf2ad1caf4b1e87
|
"""Shared functionality useful across multiple structural variant callers.
Handles exclusion regions and preparing discordant regions.
"""
import collections
import os
import numpy
import pybedtools
import pysam
import toolz as tz
import yaml
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.bam import callable
from bcbio.ngsalign import postalign
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import shared, config_utils
from bcbio.provenance import do
from bcbio.structural import regions
from bcbio.variation import bedutils, population
# ## Case/control
def find_case_control(items):
"""Find case/control items in a population of multiple samples.
"""
cases = []
controls = []
for data in items:
if population.get_affected_status(data) == 1:
controls.append(data)
else:
cases.append(data)
return cases, controls
# ## Prepare exclusion regions (repeats, telomeres, centromeres)
def _get_sv_exclude_file(items):
"""Retrieve SV file of regions to exclude.
"""
sv_bed = utils.get_in(items[0], ("genome_resources", "variation", "sv_repeat"))
if sv_bed and os.path.exists(sv_bed):
return sv_bed
def _get_variant_regions(items):
"""Retrieve variant regions defined in any of the input items.
"""
return filter(lambda x: x is not None,
[tz.get_in(("config", "algorithm", "variant_regions"), data)
for data in items
if tz.get_in(["config", "algorithm", "coverage_interval"], data) != "genome"])
def has_variant_regions(items, base_file, chrom=None):
"""Determine if we should process this chromosome: needs variant regions defined.
"""
if chrom:
all_vrs = _get_variant_regions(items)
if len(all_vrs) > 0:
test = shared.subset_variant_regions(tz.first(all_vrs), chrom, base_file, items)
if test == chrom:
return False
return True
def remove_exclude_regions(orig_bed, base_file, items, remove_entire_feature=False):
"""Remove centromere and short end regions from an existing BED file of regions to target.
"""
out_bed = os.path.join("%s-noexclude.bed" % (utils.splitext_plus(base_file)[0]))
exclude_bed = prepare_exclude_file(items, base_file)
with file_transaction(items[0], out_bed) as tx_out_bed:
pybedtools.BedTool(orig_bed).subtract(pybedtools.BedTool(exclude_bed),
A=remove_entire_feature, nonamecheck=True).saveas(tx_out_bed)
if utils.file_exists(out_bed):
return out_bed
else:
return orig_bed
def get_base_cnv_regions(data, work_dir):
"""Retrieve set of target regions for CNV analysis.
Subsets to extended transcript regions for WGS experiments to avoid
long runtimes.
"""
cov_interval = dd.get_coverage_interval(data)
base_regions = regions.get_sv_bed(data)
# if we don't have a configured BED or regions to use for SV caling
if not base_regions:
# For genome calls, subset to regions within 10kb of genes
if cov_interval == "genome":
base_regions = regions.get_sv_bed(data, "transcripts1e4", work_dir)
if base_regions:
base_regions = remove_exclude_regions(base_regions, base_regions, [data])
# Finally, default to the defined variant regions
if not base_regions:
base_regions = dd.get_variant_regions(data)
return bedutils.clean_file(base_regions, data)
def prepare_exclude_file(items, base_file, chrom=None):
"""Prepare a BED file for exclusion.
Excludes high depth and centromere regions which contribute to long run times and
false positive structural variant calls.
"""
out_file = "%s-exclude%s.bed" % (utils.splitext_plus(base_file)[0], "-%s" % chrom if chrom else "")
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with shared.bedtools_tmpdir(items[0]):
# Get a bedtool for the full region if no variant regions
want_bedtool = callable.get_ref_bedtool(tz.get_in(["reference", "fasta", "base"], items[0]),
items[0]["config"], chrom)
if chrom:
want_bedtool = pybedtools.BedTool(shared.subset_bed_by_chrom(want_bedtool.saveas().fn,
chrom, items[0]))
sv_exclude_bed = _get_sv_exclude_file(items)
if sv_exclude_bed and len(want_bedtool) > 0:
want_bedtool = want_bedtool.subtract(sv_exclude_bed, nonamecheck=True).saveas()
if any(dd.get_coverage_interval(d) == "genome" for d in items):
want_bedtool = pybedtools.BedTool(shared.remove_highdepth_regions(want_bedtool.saveas().fn, items))
with file_transaction(items[0], out_file) as tx_out_file:
full_bedtool = callable.get_ref_bedtool(tz.get_in(["reference", "fasta", "base"], items[0]),
items[0]["config"])
if len(want_bedtool) > 0:
full_bedtool.subtract(want_bedtool, nonamecheck=True).saveas(tx_out_file)
else:
full_bedtool.saveas(tx_out_file)
return out_file
def exclude_by_ends(in_file, exclude_file, data, in_params=None):
"""Exclude calls based on overlap of the ends with exclusion regions.
Removes structural variants with either end being in a repeat: a large
source of false positives.
Parameters tuned based on removal of LCR overlapping false positives in DREAM
synthetic 3 data.
"""
params = {"end_buffer": 50,
"rpt_pct": 0.9,
"total_rpt_pct": 0.2,
"sv_pct": 0.5}
if in_params:
params.update(in_params)
assert in_file.endswith(".bed")
out_file = "%s-norepeats%s" % utils.splitext_plus(in_file)
to_filter = collections.defaultdict(list)
removed = 0
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with shared.bedtools_tmpdir(data):
for coord, end_name in [(1, "end1"), (2, "end2")]:
base, ext = utils.splitext_plus(tx_out_file)
end_file = _create_end_file(in_file, coord, params, "%s-%s%s" % (base, end_name, ext))
to_filter = _find_to_filter(end_file, exclude_file, params, to_filter)
with open(tx_out_file, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
key = "%s:%s-%s" % tuple(line.strip().split("\t")[:3])
total_rpt_size = sum(to_filter.get(key, [0]))
if total_rpt_size <= (params["total_rpt_pct"] * params["end_buffer"]):
out_handle.write(line)
else:
removed += 1
return out_file, removed
def _find_to_filter(in_file, exclude_file, params, to_exclude):
"""Identify regions in the end file that overlap the exclusion file.
We look for ends with a large percentage in a repeat or where the end contains
an entire repeat.
"""
for feat in pybedtools.BedTool(in_file).intersect(pybedtools.BedTool(exclude_file), wao=True, nonamecheck=True):
us_chrom, us_start, us_end, name, other_chrom, other_start, other_end, overlap = feat.fields
if float(overlap) > 0:
other_size = float(other_end) - float(other_start)
other_pct = float(overlap) / other_size
us_pct = float(overlap) / (float(us_end) - float(us_start))
if us_pct > params["sv_pct"] or (other_pct > params["rpt_pct"]):
to_exclude[name].append(float(overlap))
return to_exclude
def _create_end_file(in_file, coord, params, out_file):
with open(in_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
parts = line.strip().split("\t")
name = "%s:%s-%s" % tuple(parts[:3])
curpos = int(parts[coord])
if coord == 1:
start, end = curpos, curpos + params["end_buffer"]
else:
start, end = curpos - params["end_buffer"], curpos
if start > 0:
out_handle.write("\t".join([parts[0], str(start),
str(end), name])
+ "\n")
return out_file
def get_sv_chroms(items, exclude_file):
"""Retrieve chromosomes to process on, avoiding extra skipped chromosomes.
"""
exclude_regions = {}
for region in pybedtools.BedTool(exclude_file):
if int(region.start) == 0:
exclude_regions[region.chrom] = int(region.end)
out = []
with pysam.Samfile(items[0]["work_bam"], "rb") as pysam_work_bam:
for chrom, length in zip(pysam_work_bam.references, pysam_work_bam.lengths):
exclude_length = exclude_regions.get(chrom, 0)
if exclude_length < length:
out.append(chrom)
return out
# ## Read preparation
def _extract_split_and_discordants(in_bam, work_dir, data):
"""Retrieve split-read alignments from input BAM file.
"""
sr_file = os.path.join(work_dir, "%s-sr.bam" % os.path.splitext(os.path.basename(in_bam))[0])
disc_file = os.path.join(work_dir, "%s-disc.bam" % os.path.splitext(os.path.basename(in_bam))[0])
if not utils.file_exists(sr_file) or not utils.file_exists(disc_file):
with file_transaction(data, sr_file) as tx_sr_file:
with file_transaction(data, disc_file) as tx_disc_file:
cores = dd.get_num_cores(data)
ref_file = dd.get_ref_file(data)
cmd = ("extract-sv-reads -e --input-threads {cores} -T {ref_file} "
"-i {in_bam} -s {tx_sr_file} -d {tx_disc_file}")
do.run(cmd.format(**locals()), "extract split and discordant reads", data)
for fname in [sr_file, disc_file]:
bam.index(fname, data["config"])
return sr_file, disc_file
def _find_existing_inputs(data):
"""Check for pre-calculated split reads and discordants done as part of alignment streaming.
"""
in_bam = dd.get_align_bam(data)
sr_file = "%s-sr.bam" % os.path.splitext(in_bam)[0]
disc_file = "%s-disc.bam" % os.path.splitext(in_bam)[0]
if utils.file_exists(sr_file) and utils.file_exists(disc_file):
return sr_file, disc_file
else:
sr_file = dd.get_sr_bam(data)
disc_file = dd.get_disc_bam(data)
if sr_file and utils.file_exists(sr_file) and disc_file and utils.file_exists(disc_file):
return sr_file, disc_file
else:
return None, None
def get_split_discordants(data, work_dir):
"""Retrieve split and discordant reads, potentially calculating with extract_sv_reads as needed.
"""
align_bam = dd.get_align_bam(data)
sr_bam, disc_bam = _find_existing_inputs(data)
if not sr_bam:
work_dir = (work_dir if not os.access(os.path.dirname(align_bam), os.W_OK | os.X_OK)
else os.path.dirname(align_bam))
sr_bam, disc_bam = _extract_split_and_discordants(align_bam, work_dir, data)
return sr_bam, disc_bam
def get_cur_batch(items):
"""Retrieve name of the batch shared between all items in a group.
"""
batches = []
for data in items:
batch = tz.get_in(["metadata", "batch"], data, [])
batches.append(set(batch) if isinstance(batch, (list, tuple)) else set([batch]))
combo_batches = reduce(lambda b1, b2: b1.intersection(b2), batches)
if len(combo_batches) == 1:
return combo_batches.pop()
elif len(combo_batches) == 0:
return None
else:
raise ValueError("Found multiple overlapping batches: %s -- %s" % (combo_batches, batches))
def outname_from_inputs(in_files):
base = os.path.commonprefix(in_files)
if base.endswith("chr"):
base = base[:-3]
while base.endswith(("-", "_", ".")):
base = base[:-1]
return base
# -- Insert size calculation
def insert_size_stats(dists):
"""Calcualtes mean/median and MAD from distances, avoiding outliers.
MAD is the Median Absolute Deviation: http://en.wikipedia.org/wiki/Median_absolute_deviation
"""
med = numpy.median(dists)
filter_dists = filter(lambda x: x < med + 10 * med, dists)
median = numpy.median(filter_dists)
return {"mean": float(numpy.mean(filter_dists)), "std": float(numpy.std(filter_dists)),
"median": float(median),
"mad": float(numpy.median([abs(x - median) for x in filter_dists]))}
def calc_paired_insert_stats(in_bam, nsample=1000000):
"""Retrieve statistics for paired end read insert distances.
"""
dists = []
n = 0
with pysam.Samfile(in_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_proper_pair and read.is_read1:
n += 1
dists.append(abs(read.isize))
if n >= nsample:
break
return insert_size_stats(dists)
def calc_paired_insert_stats_save(in_bam, stat_file, nsample=1000000):
"""Calculate paired stats, saving to a file for re-runs.
"""
if utils.file_exists(stat_file):
with open(stat_file) as in_handle:
return yaml.safe_load(in_handle)
else:
stats = calc_paired_insert_stats(in_bam, nsample)
with open(stat_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats
|
brainstorm/bcbio-nextgen
|
bcbio/structural/shared.py
|
Python
|
mit
| 13,904
|
[
"pysam"
] |
5502ed98c6cabfcfcd1596536091f54c46e5220c6c83fbf928c39293d8203ca7
|
from PyQt4.QtCore import pyqtSlot, pyqtSignal, pyqtProperty
from PyQt4.QtCore import QAbstractListModel, QModelIndex
from PyQt4.QtCore import Qt, QObject, QUrl, QStringList
from PyQt4.QtGui import QApplication, QWidget, QColor, qRgb
from PyQt4.QtDeclarative import QDeclarativeView
import os
import numpy as np
from ase.data import covalent_radii, vdw_radii, colors, atomic_numbers
from multiasecalc.lammps.typing import TypeResolver
from multiasecalc.lammps.bonds import Bonds
from multiasecalc.lammps import charmmtypes, compasstypes
from multiasecalc import utils
qml_sourcepath = os.path.join(utils.csmm_config['CSMM_INSTALL_DIR'], 'atomsview/atomsview.qml')
def view(atoms, typedata=None):
if typedata:
resolver = TypeResolver(typedata)
resolver.resolve_atoms(atoms)
app = QApplication([])
view = AtomsView(atoms)
view.show()
app.exec_()
class AtomsView(QDeclarativeView):
def __init__(self, atoms):
QWidget.__init__(self)
self.view_state = ViewState(atoms)
self.bonds_model = BondsModel(self.view_state)
self.atoms_model = AtomsModel(self.view_state)
self.rootContext().setContextProperty('viewState', self.view_state)
self.rootContext().setContextProperty('atomsModel', self.atoms_model)
self.rootContext().setContextProperty('bondsModel', self.bonds_model)
self.setSource(QUrl(qml_sourcepath))
self.setResizeMode(QDeclarativeView.SizeRootObjectToView)
self.setWindowTitle('AtomsView')
self.resize(800, 800)
class ViewState(QObject):
updated = pyqtSignal()
def __init__(self, atoms):
QObject.__init__(self)
self.atoms = atoms
self.atom_coordinates = np.array(atoms.positions)
self.translation = np.zeros(3)
self.rotation = np.diag((1,1,1))
self.centerAtoms()
def update_coordinates(self):
coords = np.dot(self.atoms.positions+self.translation, self.rotation.T)
self.atom_coordinates = coords
self.updated.emit()
def centerAtoms(self):
self.translation = -np.mean(self.atoms.positions, axis=0)
self.update_coordinates()
@pyqtSlot(float, float)
def rotate(self, x_radians, y_radians):
x_rot = x_rotation(y_radians)
y_rot = y_rotation(x_radians)
self.rotation = np.dot(np.dot(x_rot,y_rot), self.rotation)
self.update_coordinates()
@pyqtSlot(str, result='QVariantList')
def jmolColor(self, element):
number = atomic_numbers[str(element)]
color = colors.jmol_colors[number]
return [float(c) for c in color]
def x_rotation(th):
c, s = np.cos(th), np.sin(th)
return np.array([[1,0,0], [0,c,-s], [0,s,c]])
def y_rotation(th):
c, s = np.cos(th), np.sin(th)
return np.array([[c,0,s], [0,1,0], [-s,0,c]])
class AtomsModel(QAbstractListModel):
xrole, yrole, zrole, elementrole, typerole, descriptionrole, covradiusrole = range(Qt.UserRole+1, Qt.UserRole+1+7)
def __init__(self, view_state):
QAbstractListModel.__init__(self)
self.view_state = view_state
view_state.updated.connect(self.changed)
role_names = {
self.xrole: 'atomx',
self.yrole: 'atomy',
self.zrole: 'atomz',
self.elementrole: 'element',
self.covradiusrole: 'covalentRadius',
self.typerole: 'type',
self.descriptionrole: 'description'
}
self.setRoleNames(role_names)
def rowCount(self, index = QModelIndex()):
return len(self.view_state.atoms)
def data(self, index, role = Qt.DisplayRole):
atoms = self.view_state.atoms
row = index.row()
if role in (self.xrole, self.yrole, self.zrole):
return float(self.view_state.atom_coordinates[row, role - self.xrole])
else:
has_types = 'atom_types' in self.view_state.atoms.info
has_doc = 'descriptions' in self.view_state.atoms.info
return {
self.elementrole: atoms.get_chemical_symbols()[row],
self.typerole: atoms.info['atom_types'][row] if has_types else False,
self.descriptionrole: atoms.info['descriptions'][row] if has_doc else False,
self.covradiusrole: float(covalent_radii[atoms.numbers[row]])
}[role]
@pyqtSlot()
def changed(self):
self.dataChanged.emit(self.index(0), self.index(self.rowCount()-1))
class BondsModel(QAbstractListModel):
x1role, y1role, z1role, x2role, y2role, z2role, element1role, element2role = range(Qt.UserRole+1, Qt.UserRole+1+8)
def __init__(self, view_state):
QAbstractListModel.__init__(self)
self.view_state = view_state
self.pairs = list(view_state.atoms.info['bonds'])
view_state.updated.connect(self.changed)
role_names = {
self.x1role: 'x1',
self.y1role: 'y1',
self.z1role: 'z1',
self.x2role: 'x2',
self.y2role: 'y2',
self.z2role: 'z2',
self.element1role: 'element1',
self.element2role: 'element2',
}
self.setRoleNames(role_names)
def rowCount(self, index = QModelIndex()):
return len(self.view_state.atoms.info['bonds'])
def data(self, index, role = Qt.DisplayRole):
atoms = self.view_state.atoms
row = index.row()
i, j = self.pairs[row]
if role in (self.x1role, self.y1role, self.z1role):
return float(self.view_state.atom_coordinates[i, role - self.x1role])
if role in (self.x2role, self.y2role, self.z2role):
return float(self.view_state.atom_coordinates[j, role - self.x2role])
else:
return {
self.element1role: atoms.get_chemical_symbols()[i],
self.element2role: atoms.get_chemical_symbols()[j],
}[role]
@pyqtSlot()
def changed(self):
self.dataChanged.emit(self.index(0), self.index(self.rowCount()-1))
if __name__ == '__main__':
import sys
from ase.data import s22
try:
atoms = s22.create_s22_system(sys.argv[1])
except:
atoms = s22.create_s22_system('Adenine-thymine_complex_stack')
try:
if sys.argv[2] == 'charmm':
typedata = charmmtypes.data
if sys.argv[2] == 'compass':
typedata = compasstypes.data
except:
typedata = charmmtypes.data
atoms.info['bonds'] = Bonds(atoms, autodetect=True)
type_resolver = TypeResolver(typedata)
matches = [type_resolver.resolve(atom) for atom in atoms]
atoms.info['atom_types'] = [m.type for m in matches]
atoms.info['descriptions'] = [m.docstring for m in matches]
view(atoms)
|
csmm/multiase
|
atomsview/atomsview.py
|
Python
|
gpl-2.0
| 5,997
|
[
"ASE",
"CHARMM",
"LAMMPS"
] |
a703fa3fd5905c39f221f53a7d2f764107b69595be7786b687d80ed68806747d
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Accfft(CMakePackage, CudaPackage):
"""AccFFT extends existing FFT libraries for CUDA-enabled
Graphics Processing Units (GPUs) to distributed memory clusters
"""
homepage = "http://accfft.org"
git = "https://github.com/amirgholami/accfft.git"
version('develop', branch='master')
variant('pnetcdf', default=True, description='Add support for parallel NetCDF')
variant('shared', default=True, description='Enables the build of shared libraries')
# See: http://accfft.org/articles/install/#installing-dependencies
depends_on('fftw precision=float,double ~mpi+openmp')
depends_on('parallel-netcdf', when='+pnetcdf')
# fix error [-Wc++11-narrowing]
patch('fix_narrowing_error.patch')
parallel = False
def cmake_args(self):
spec = self.spec
args = [
'-DFFTW_ROOT={0}'.format(spec['fftw'].prefix),
'-DFFTW_USE_STATIC_LIBS=false',
'-DBUILD_GPU={0}'.format('true' if '+cuda' in spec else 'false'),
'-DBUILD_SHARED={0}'.format(
'true' if '+shared' in spec else 'false'
),
]
if '+cuda' in spec:
cuda_arch = [x for x in spec.variants['cuda_arch'].value if x]
if cuda_arch:
args.append('-DCUDA_NVCC_FLAGS={0}'.format(
' '.join(self.cuda_flags(cuda_arch))
))
return args
|
iulian787/spack
|
var/spack/repos/builtin/packages/accfft/package.py
|
Python
|
lgpl-2.1
| 1,649
|
[
"NetCDF"
] |
ddc37a5a4b2218533087970461e8e0f6e05027d123dce557826ea11fde305d63
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is the module's setup script. To install this module, run:
#
# python setup.py install
#
""" Concurrent logging handler (drop-in replacement for RotatingFileHandler)
Overview
========
This module provides an additional log handler for Python's standard logging
package (PEP 282). This handler will write log events to log file which is
rotated when the log file reaches a certain size. Multiple processes can
safely write to the same log file concurrently.
Details
=======
.. _portalocker: http://code.activestate.com/recipes/65203/
The ``ConcurrentRotatingFileHandler`` class is a drop-in replacement for
Python's standard log handler ``RotatingFileHandler``. This module uses file
locking so that multiple processes can concurrently log to a single file without
dropping or clobbering log events. This module provides a file rotation scheme
like with ``RotatingFileHanler``. Extra care is taken to ensure that logs
can be safely rotated before the rotation process is started. (This module works
around the file rename issue with ``RotatingFileHandler`` on Windows, where a
rotation failure means that all subsequent log events are dropped).
This module attempts to preserve log records at all cost. This means that log
files will grow larger than the specified maximum (rotation) size. So if disk
space is tight, you may want to stick with ``RotatingFileHandler``, which will
strictly adhere to the maximum file size.
If you have multiple instances of a script (or multiple scripts) all running at
the same time and writing to the same log file, then *all* of the scripts should
be using ``ConcurrentRotatingFileHandler``. You should not attempt to mix
and match ``RotatingFileHandler`` and ``ConcurrentRotatingFileHandler``.
This package bundles `portalocker`_ to deal with file locking. Please be aware
that portalocker only supports Unix (posix) an NT platforms at this time, and
therefore this package only supports those platforms as well.
Installation
============
Use the following command to install this package::
pip install ConcurrentLogHandler
If you are installing from source, you can use::
python setup.py install
Examples
========
Simple Example
--------------
Here is a example demonstrating how to use this module directly (from within
Python code)::
from logging import getLogger, INFO
from cloghandler import ConcurrentRotatingFileHandler
import os
log = getLogger()
# Use an absolute path to prevent file rotation trouble.
logfile = os.path.abspath("mylogfile.log")
# Rotate log after reaching 512K, keep 5 old copies.
rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 512*1024, 5)
log.addHandler(rotateHandler)
log.setLevel(INFO)
log.info("Here is a very exciting log message, just for you")
Automatic fallback example
--------------------------
If you are distributing your code and you are unsure if the
`ConcurrentLogHandler` package has been installed everywhere your code will run,
Python makes it easy to gracefully fallback to the built in
`RotatingFileHandler`, here is an example::
try:
from cloghandler import ConcurrentRotatingFileHandler as RFHandler
except ImportError:
# Next 2 lines are optional: issue a warning to the user
from warnings import warn
warn("ConcurrentLogHandler package not installed. Using builtin log handler")
from logging.handlers import RotatingFileHandler as RFHandler
log = getLogger()
rotateHandler = RFHandler("/path/to/mylogfile.log", "a", 1048576, 15)
log.addHandler(rotateHandler)
Config file example
-------------------
This example shows you how to use this log handler with the logging config file
parser. This allows you to keep your logging configuration code separate from
your application code.
Example config file: ``logging.ini``::
[loggers]
keys=root
[handlers]
keys=hand01
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=handlers.ConcurrentRotatingFileHandler
level=NOTSET
formatter=form01
args=("rotating.log", "a", 512*1024, 5)
[formatter_form01]
format=%(asctime)s %(levelname)s %(message)s
Example Python code: ``app.py``::
import logging, logging.config
import cloghandler
logging.config.fileConfig("logging.ini")
log = logging.getLogger()
log.info("Here is a very exciting log message, just for you")
Change Log
==========
.. _Red Hat Bug #858912: https://bugzilla.redhat.com/show_bug.cgi?id=858912
.. _Python Bug #15960: http://bugs.python.org/issue15960
.. _LP Bug 1199332: https://bugs.launchpad.net/python-concurrent-log-handler/+bug/1199332
.. _LP Bug 1199333: https://bugs.launchpad.net/python-concurrent-log-handler/+bug/1199333
- 0.8.7: Bug fixes - `LP Bug 1199332`_ and `LP Bug 1199333`_.
* More gracefully handle out of disk space scenarios. Prevent release() from
throwing an exception.
* Handle logging.shutdown() in Python 2.7+. Close the lock file stream via
close().
* Big thanks to Dan Callaghan for forwarding these issues and patches.
- 0.8.6: Fixed packaging bug with test script
* Fix a small packaging bug from the 0.8.5 release. (Thanks to Björn Häuser
for bringing this to my attention.)
* Updated stresstest.py to always use the correct python version when
launching sub-processes instead of the system's default "python".
- 0.8.5: Fixed ValueError: I/O operation on closed file
* Thanks to Vince Carney, Arif Kasim, Matt Drew, Nick Coghlan, and
Dan Callaghan for bug reports. Bugs can now be filled here:
https://bugs.launchpad.net/python-concurrent-log-handler. Bugs resolved
`Red Hat Bug #858912`_ and `Python Bug #15960`_
* Updated ez_setup.py to 0.7.7
* Updated portalocker to 0.3 (now maintained by Rick van Hattem)
* Initial Python 3 support (needs more testing)
* Fixed minor spelling mistakes
- 0.8.4: Fixed lock-file naming issue
* Resolved a minor issue where lock-files would be improperly named if the
log file contained ".log" in the middle of the log name. For example, if
you log file was "/var/log/mycompany.logging.mysource.log", the lock file
would be named "/var/log/mycompany.ging.mysource.lock", which is not correct.
Thanks to Dirk Rothe for pointing this out. Since this introduce a slight
lock-file behavior difference, make sure all concurrent writers are updated
to 0.8.4 at the same time if this issue effects you.
* Updated ez_setup.py to 0.6c11
- 0.8.3: Fixed a log file rotation bug and updated docs
* Fixed a bug that happens after log rotation when multiple processes are
witting to the same log file. Each process ends up writing to their own
log file ("log.1" or "log.2" instead of "log"). The fix is simply to reopen
the log file and check the size again. I do not believe this bug results in
data loss; however, this certainly was not the desired behavior. (A big
thanks goes to Oliver Tonnhofer for finding, documenting, and providing a
patch for this bug.)
* Cleanup the docs. (aka "the page you are reading right now") I fixed some
silly mistakes and typos... who writes this stuff?
- 0.8.2: Minor bug fix release (again)
* Found and resolved another issue with older logging packages that do not
support encoding.
- 0.8.1: Minor bug fix release
* Now importing "codecs" directly; I found some slight differences in the
logging module in different Python 2.4.x releases that caused the module to
fail to load.
- 0.8.0: Minor feature release
* Add better support for using ``logging.config.fileConfig()``. This class
is now available using ``class=handlers.ConcurrentRotatingFileHandler``.
* Minor changes in how the ``filename`` parameter is handled when given a
relative path.
- 0.7.4: Minor bug fix
* Fixed a typo in the package description (incorrect class name)
* Added a change log; which you are reading now.
* Fixed the ``close()`` method to no longer assume that stream is still
open.
To-do
=====
* This module has had minimal testing in a multi-threaded process. I see no
reason why this should be an issue, but no stress-testing has been done in a
threaded situation. If this is important to you, you could always add
threading support to the ``stresstest.py`` script and send me the patch.
"""
import sys
extra = {}
if sys.version_info >= (3, 0):
extra.update(use_2to3=True)
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
VERSION = "0.8.7"
classifiers = """\
Development Status :: 5 - Production/Stable
Topic :: System :: Logging
Operating System :: POSIX
Operating System :: Microsoft :: Windows
Programming Language :: Python
Programming Language :: Python :: 2.4
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Topic :: Software Development :: Libraries :: Python Modules
License :: OSI Approved :: Apache Software License
"""
doc = __doc__.splitlines()
setup(name='ConcurrentLogHandler',
version=VERSION,
author="Lowell Alleman",
author_email="lowell87@gmail.com",
py_modules=[
"cloghandler",
"portalocker",
],
package_dir={ '' : 'src', },
data_files=[
('tests', ["stresstest.py"]),
('docs', [
'README',
'LICENSE',
]),
],
url="http://launchpad.net/python-concurrent-log-handler",
license = "http://www.apache.org/licenses/LICENSE-2.0",
description=doc.pop(0),
long_description="\n".join(doc),
platforms = [ "nt", "posix" ],
keywords = "logging, windows, linux, unix, rotate, portalocker",
classifiers=classifiers.splitlines(),
zip_safe=True,
#test_suite=unittest.TestSuite,
**extra
)
|
dsuch/ConcurrentLogHandler
|
setup.py
|
Python
|
apache-2.0
| 10,099
|
[
"exciting"
] |
1f15f9cdbc3cc1a6e6b66844f4eaba3825b2201ce39e6c9e39f8d95a1540815c
|
#
# Copyright (C) 2018 Susan H. Leung
# All Rights Reserved
#
from rdkit import RDConfig
import os
import sys
import math
from datetime import datetime, timedelta
import unittest
from rdkit import DataStructs
from rdkit import Chem
from rdkit.Geometry import rdGeometry as geom
from rdkit.Chem.rdchem import Atom
from rdkit.Chem.MolStandardize import rdMolStandardize
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1Cleanup(self):
mol = Chem.MolFromSmiles("CCC(=O)O[Na]")
nmol = rdMolStandardize.Cleanup(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CCC(=O)[O-].[Na+]")
def test2StandardizeSmiles(self):
self.assertEqual(rdMolStandardize.StandardizeSmiles("CCC(=O)O[Na]"), "CCC(=O)[O-].[Na+]")
def test3Parents(self):
mol = Chem.MolFromSmiles("[Na]OC(=O)c1ccccc1")
nmol = rdMolStandardize.FragmentParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "O=C([O-])c1ccccc1")
mol = Chem.MolFromSmiles("C[NH+](C)(C).[Cl-]")
nmol = rdMolStandardize.ChargeParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CN(C)C")
mol = Chem.MolFromSmiles("[O-]CCCC=CO.[Na+]")
nmol = rdMolStandardize.TautomerParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "O=CCCCC[O-].[Na+]")
nmol = rdMolStandardize.TautomerParent(mol, skipStandardize=True)
# same answer because of the standardization at the end
self.assertEqual(Chem.MolToSmiles(nmol), "O=CCCCC[O-].[Na+]")
mol = Chem.MolFromSmiles("C[C@](F)(Cl)C/C=C/[C@H](F)Cl")
nmol = rdMolStandardize.StereoParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CC(F)(Cl)CC=CC(F)Cl")
mol = Chem.MolFromSmiles("[12CH3][13CH3]")
nmol = rdMolStandardize.IsotopeParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CC")
mol = Chem.MolFromSmiles("[Na]Oc1c([12C@H](F)Cl)c(O[2H])c(C(=O)O)cc1CC=CO")
nmol = rdMolStandardize.SuperParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "O=CCCc1cc(C(=O)O)c(O)c(C(F)Cl)c1O")
mol = Chem.MolFromSmiles("[Na]Oc1c([12C@H](F)Cl)c(O[2H])c(C(=O)O)cc1CC=CO")
nmol = rdMolStandardize.SuperParent(mol, skipStandardize=True)
self.assertEqual(Chem.MolToSmiles(nmol), "O=CCCc1cc(C(=O)[O-])c(O)c(C(F)Cl)c1O.[Na+]")
def test4Normalize(self):
mol = Chem.MolFromSmiles(r"C[N+](C)=C\C=C\[O-]")
nmol = rdMolStandardize.Normalize(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CN(C)C=CC=O")
def test4Reionize(self):
mol = Chem.MolFromSmiles("C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O")
nmol = rdMolStandardize.Reionize(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "O=S(O)c1ccc(S(=O)(=O)[O-])cc1")
def test5Metal(self):
mol = Chem.MolFromSmiles("C1(CCCCC1)[Zn]Br")
md = rdMolStandardize.MetalDisconnector()
nm = md.Disconnect(mol)
# Metal.MetalDisconnector.Disconnect(mol)
self.assertEqual(Chem.MolToSmiles(nm), "[Br-].[CH-]1CCCCC1.[Zn+2]")
# test user defined metal_nof
md.SetMetalNof(
Chem.MolFromSmarts(
"[Li,K,Rb,Cs,Fr,Be,Mg,Ca,Sr,Ba,Ra,Sc,Ti,V,Cr,Mn,Fe,Co,Ni,Cu,Zn,Al,Ga,Y,Zr,Nb,Mo,Tc,Ru,Rh,Pd,Ag,Cd,In,Sn,Hf,Ta,W,Re,Os,Ir,Pt,Au,Hg,Tl,Pb,Bi]~[N,O,F]"
))
mol2 = Chem.MolFromSmiles("CCC(=O)O[Na]")
nm2 = md.Disconnect(mol2)
self.assertEqual(Chem.MolToSmiles(nm2), "CCC(=O)O[Na]")
def test6Charge(self):
mol = Chem.MolFromSmiles("C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O")
# instantiate with default acid base pair library
reionizer = rdMolStandardize.Reionizer()
nm = reionizer.reionize(mol)
self.assertEqual(Chem.MolToSmiles(nm), "O=S(O)c1ccc(S(=O)(=O)[O-])cc1")
# try reionize with another acid base pair library without the right
# pairs
abfile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'MolStandardize', 'test_data',
'acid_base_pairs2.txt')
reionizer2 = rdMolStandardize.Reionizer(abfile)
nm2 = reionizer2.reionize(mol)
self.assertEqual(Chem.MolToSmiles(nm2), "O=S([O-])c1ccc(S(=O)(=O)O)cc1")
# test Uncharger
uncharger = rdMolStandardize.Uncharger()
mol3 = Chem.MolFromSmiles("O=C([O-])c1ccccc1")
nm3 = uncharger.uncharge(mol3)
self.assertEqual(Chem.MolToSmiles(nm3), "O=C(O)c1ccccc1")
# test canonical Uncharger
uncharger = rdMolStandardize.Uncharger(canonicalOrder=False)
mol3 = Chem.MolFromSmiles("C[N+](C)(C)CC(C(=O)[O-])CC(=O)[O-]")
nm3 = uncharger.uncharge(mol3)
self.assertEqual(Chem.MolToSmiles(nm3), "C[N+](C)(C)CC(CC(=O)[O-])C(=O)O")
uncharger = rdMolStandardize.Uncharger(canonicalOrder=True)
nm3 = uncharger.uncharge(mol3)
self.assertEqual(Chem.MolToSmiles(nm3), "C[N+](C)(C)CC(CC(=O)O)C(=O)[O-]")
def test7Fragment(self):
fragremover = rdMolStandardize.FragmentRemover()
mol = Chem.MolFromSmiles("CN(C)C.Cl.Cl.Br")
nm = fragremover.remove(mol)
self.assertEqual(Chem.MolToSmiles(nm), "CN(C)C")
lfragchooser = rdMolStandardize.LargestFragmentChooser()
mol2 = Chem.MolFromSmiles("[N+](=O)([O-])[O-].[CH3+]")
nm2 = lfragchooser.choose(mol2)
self.assertEqual(Chem.MolToSmiles(nm2), "O=[N+]([O-])[O-]")
lfragchooser2 = rdMolStandardize.LargestFragmentChooser(preferOrganic=True)
nm3 = lfragchooser2.choose(mol2)
self.assertEqual(Chem.MolToSmiles(nm3), "[CH3+]")
fragremover = rdMolStandardize.FragmentRemover(skip_if_all_match=True)
mol = Chem.MolFromSmiles("[Na+].Cl.Cl.Br")
nm = fragremover.remove(mol)
self.assertEqual(nm.GetNumAtoms(), mol.GetNumAtoms())
smi3 = "CNC[C@@H]([C@H]([C@@H]([C@@H](CO)O)O)O)O.c1cc2c(cc1C(=O)O)oc(n2)c3cc(cc(c3)Cl)Cl"
lfParams = rdMolStandardize.CleanupParameters()
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol3 = Chem.MolFromSmiles(smi3)
lfrag3 = lfrag_params.choose(mol3)
self.assertEqual(Chem.MolToSmiles(lfrag3), "CNC[C@H](O)[C@@H](O)[C@H](O)[C@H](O)CO")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserCountHeavyAtomsOnly = True
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol3 = Chem.MolFromSmiles(smi3)
lfrag3 = lfrag_params.choose(mol3)
self.assertEqual(Chem.MolToSmiles(lfrag3), "O=C(O)c1ccc2nc(-c3cc(Cl)cc(Cl)c3)oc2c1")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserUseAtomCount = False
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol3 = Chem.MolFromSmiles(smi3)
lfrag3 = lfrag_params.choose(mol3)
self.assertEqual(Chem.MolToSmiles(lfrag3), "O=C(O)c1ccc2nc(-c3cc(Cl)cc(Cl)c3)oc2c1")
smi4 = "CC.O=[Pb]=O"
lfParams = rdMolStandardize.CleanupParameters()
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "CC")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserCountHeavyAtomsOnly = True
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "O=[Pb]=O")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserUseAtomCount = False
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "O=[Pb]=O")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserCountHeavyAtomsOnly = True
lfParams.preferOrganic = True
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "CC")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserUseAtomCount = False
lfParams.preferOrganic = True
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "CC")
def test8Normalize(self):
normalizer = rdMolStandardize.Normalizer()
mol = Chem.MolFromSmiles("C[n+]1ccccc1[O-]")
nm = normalizer.normalize(mol)
self.assertEqual(Chem.MolToSmiles(nm), "Cn1ccccc1=O")
def test9Validate(self):
vm = rdMolStandardize.RDKitValidation()
mol = Chem.MolFromSmiles("CO(C)C", sanitize=False)
msg = vm.validate(mol)
self.assertEqual(len(msg), 1)
self.assertEqual
("""INFO: [ValenceValidation] Explicit valence for atom # 1 O, 3, is greater than permitted""",
msg[0])
vm2 = rdMolStandardize.MolVSValidation([rdMolStandardize.FragmentValidation()])
# with no argument it also works
# vm2 = rdMolStandardize.MolVSValidation()
mol2 = Chem.MolFromSmiles("COc1cccc(C=N[N-]C(N)=O)c1[O-].O.O.O.O=[U+2]=O")
msg2 = vm2.validate(mol2)
self.assertEqual(len(msg2), 1)
self.assertEqual
("""INFO: [FragmentValidation] water/hydroxide is present""", msg2[0])
vm3 = rdMolStandardize.MolVSValidation()
mol3 = Chem.MolFromSmiles("C1COCCO1.O=C(NO)NO")
msg3 = vm3.validate(mol3)
self.assertEqual(len(msg3), 2)
self.assertEqual
("""INFO: [FragmentValidation] 1,2-dimethoxyethane is present""", msg3[0])
self.assertEqual
("""INFO: [FragmentValidation] 1,4-dioxane is present""", msg3[1])
atomic_no = [6, 7, 8]
allowed_atoms = [Atom(i) for i in atomic_no]
vm4 = rdMolStandardize.AllowedAtomsValidation(allowed_atoms)
mol4 = Chem.MolFromSmiles("CC(=O)CF")
msg4 = vm4.validate(mol4)
self.assertEqual(len(msg4), 1)
self.assertEqual
("""INFO: [AllowedAtomsValidation] Atom F is not in allowedAtoms list""", msg4[0])
atomic_no = [9, 17, 35]
disallowed_atoms = [Atom(i) for i in atomic_no]
vm5 = rdMolStandardize.DisallowedAtomsValidation(disallowed_atoms)
mol5 = Chem.MolFromSmiles("CC(=O)CF")
msg5 = vm4.validate(mol5)
self.assertEqual(len(msg5), 1)
self.assertEqual
("""INFO: [DisallowedAtomsValidation] Atom F is in disallowedAtoms list""", msg5[0])
msg6 = rdMolStandardize.ValidateSmiles("ClCCCl.c1ccccc1O")
self.assertEqual(len(msg6), 1)
self.assertEqual
("""INFO: [FragmentValidation] 1,2-dichloroethane is present""", msg6[0])
def test10NormalizeFromData(self):
data = """// Name SMIRKS
Nitro to N+(O-)=O [N,P,As,Sb;X3:1](=[O,S,Se,Te:2])=[O,S,Se,Te:3]>>[*+1:1]([*-1:2])=[*:3]
Sulfone to S(=O)(=O) [S+2:1]([O-:2])([O-:3])>>[S+0:1](=[O-0:2])(=[O-0:3])
Pyridine oxide to n+O- [n:1]=[O:2]>>[n+:1][O-:2]
// Azide to N=N+=N- [*,H:1][N:2]=[N:3]#[N:4]>>[*,H:1][N:2]=[N+:3]=[N-:4]
"""
normalizer1 = rdMolStandardize.Normalizer()
params = rdMolStandardize.CleanupParameters()
normalizer2 = rdMolStandardize.NormalizerFromData(data, params)
imol = Chem.MolFromSmiles("O=N(=O)CCN=N#N", sanitize=False)
mol1 = normalizer1.normalize(imol)
mol2 = normalizer2.normalize(imol)
self.assertEqual(Chem.MolToSmiles(imol), "N#N=NCCN(=O)=O")
self.assertEqual(Chem.MolToSmiles(mol1), "[N-]=[N+]=NCC[N+](=O)[O-]")
self.assertEqual(Chem.MolToSmiles(mol2), "N#N=NCC[N+](=O)[O-]")
def test11FragmentParams(self):
data = """// Name SMARTS
fluorine [F]
chlorine [Cl]
"""
fragremover = rdMolStandardize.FragmentRemoverFromData(data)
mol = Chem.MolFromSmiles("CN(C)C.Cl.Cl.Br")
nm = fragremover.remove(mol)
self.assertEqual(Chem.MolToSmiles(nm), "Br.CN(C)C")
def test12ChargeParams(self):
params = """// The default list of AcidBasePairs, sorted from strongest to weakest.
// This list is derived from the Food and Drug: Administration Substance
// Registration System Standard Operating Procedure guide.
//
// Name Acid Base
-SO2H [!O][SD3](=O)[OH] [!O][SD3](=O)[O-]
-SO3H [!O]S(=O)(=O)[OH] [!O]S(=O)(=O)[O-]
"""
mol = Chem.MolFromSmiles("C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O")
# instantiate with default acid base pair library
reionizer = rdMolStandardize.ReionizerFromData(params, [])
print("done")
nm = reionizer.reionize(mol)
self.assertEqual(Chem.MolToSmiles(nm), "O=S([O-])c1ccc(S(=O)(=O)O)cc1")
def test13Tautomers(self):
enumerator = rdMolStandardize.TautomerEnumerator()
m = Chem.MolFromSmiles("C1(=CCCCC1)O")
ctaut = enumerator.Canonicalize(m)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
params = rdMolStandardize.CleanupParameters()
enumerator = rdMolStandardize.TautomerEnumerator(params)
m = Chem.MolFromSmiles("C1(=CCCCC1)O")
ctaut = enumerator.Canonicalize(m)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
taut_res = enumerator.Enumerate(m)
self.assertEqual(len(taut_res), 2)
ctauts = list(sorted(Chem.MolToSmiles(x) for x in taut_res))
self.assertEqual(ctauts, ['O=C1CCCCC1', 'OC1=CCCCC1'])
self.assertEqual(list(taut_res.smiles), ['O=C1CCCCC1', 'OC1=CCCCC1'])
# this tests the non-templated overload
self.assertEqual(Chem.MolToSmiles(enumerator.PickCanonical(taut_res)), "O=C1CCCCC1")
# this tests the templated overload
self.assertEqual(Chem.MolToSmiles(enumerator.PickCanonical(set(taut_res()))), "O=C1CCCCC1")
with self.assertRaises(TypeError):
enumerator.PickCanonical(1)
with self.assertRaises(TypeError):
enumerator.PickCanonical([0, 1])
self.assertEqual(
Chem.MolToSmiles(
enumerator.PickCanonical(Chem.MolFromSmiles(x) for x in ['O=C1CCCCC1', 'OC1=CCCCC1'])),
"O=C1CCCCC1")
def scorefunc1(mol):
' stupid tautomer scoring function '
p = Chem.MolFromSmarts('[OH]')
return len(mol.GetSubstructMatches(p))
def scorefunc2(mol):
' stupid tautomer scoring function '
p = Chem.MolFromSmarts('O=C')
return len(mol.GetSubstructMatches(p))
m = Chem.MolFromSmiles("C1(=CCCCC1)O")
ctaut = enumerator.Canonicalize(m, scorefunc1)
self.assertEqual(Chem.MolToSmiles(ctaut), "OC1=CCCCC1")
ctaut = enumerator.Canonicalize(m, scorefunc2)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure lambdas work
ctaut = enumerator.Canonicalize(m,
lambda x: len(x.GetSubstructMatches(Chem.MolFromSmarts('C=O'))))
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure we behave if we return something bogus from the scoring function
with self.assertRaises(TypeError):
ctaut = enumerator.Canonicalize(m, lambda x: 'fail')
self.assertEqual(enumerator.ScoreTautomer(Chem.MolFromSmiles('N=c1[nH]cccc1')), 99)
self.assertEqual(enumerator.ScoreTautomer(Chem.MolFromSmiles('Nc1ncccc1')), 100)
def scorefunc2(mol):
' stupid tautomer scoring function '
p = Chem.MolFromSmarts('O=C')
return len(mol.GetSubstructMatches(p))
m = Chem.MolFromSmiles("C1(=CCCCC1)O")
ctaut = enumerator.Canonicalize(m, scorefunc1)
self.assertEqual(Chem.MolToSmiles(ctaut), "OC1=CCCCC1")
ctaut = enumerator.Canonicalize(m, scorefunc2)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure lambdas work
ctaut = enumerator.Canonicalize(m,
lambda x: len(x.GetSubstructMatches(Chem.MolFromSmarts('C=O'))))
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure we behave if we return something bogus from the scoring function
with self.assertRaises(TypeError):
ctaut = enumerator.Canonicalize(m, lambda x: 'fail')
self.assertEqual(enumerator.ScoreTautomer(Chem.MolFromSmiles('N=c1[nH]cccc1')), 99)
self.assertEqual(enumerator.ScoreTautomer(Chem.MolFromSmiles('Nc1ncccc1')), 100)
res = enumerator.Enumerate(m)
# this test the specialized overload
ctaut = enumerator.PickCanonical(res, scorefunc1)
self.assertEqual(Chem.MolToSmiles(ctaut), "OC1=CCCCC1")
ctaut = enumerator.PickCanonical(res, scorefunc2)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure lambdas work
ctaut = enumerator.PickCanonical(
res, lambda x: len(x.GetSubstructMatches(Chem.MolFromSmarts('C=O'))))
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure we behave if we return something bogus from the scoring function
with self.assertRaises(TypeError):
ctaut = enumerator.PickCanonical(res, lambda x: 'fail')
# this test the non-specialized overload
ctaut = enumerator.PickCanonical(set(res()), scorefunc1)
self.assertEqual(Chem.MolToSmiles(ctaut), "OC1=CCCCC1")
ctaut = enumerator.PickCanonical(set(res()), scorefunc2)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure lambdas work
ctaut = enumerator.PickCanonical(
set(res()), lambda x: len(x.GetSubstructMatches(Chem.MolFromSmarts('C=O'))))
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure we behave if we return something bogus from the scoring function
with self.assertRaises(TypeError):
ctaut = enumerator.PickCanonical(set(res()), lambda x: 'fail')
def test14TautomerDetails(self):
enumerator = rdMolStandardize.TautomerEnumerator()
m = Chem.MolFromSmiles("c1ccccc1CN=c1[nH]cccc1")
taut_res = enumerator.Enumerate(m)
self.assertEqual(len(taut_res.tautomers), 2)
self.assertEqual(taut_res.modifiedAtoms, (7, 9))
self.assertEqual(len(taut_res.modifiedBonds), 7)
self.assertEqual(taut_res.modifiedBonds, (7, 8, 9, 10, 11, 12, 14))
taut_res = enumerator.Enumerate(m)
self.assertEqual(len(taut_res.tautomers), 2)
self.assertEqual(taut_res.modifiedAtoms, (7, 9))
taut_res = enumerator.Enumerate(m)
self.assertEqual(len(taut_res.tautomers), 2)
self.assertEqual(len(taut_res.modifiedBonds), 7)
self.assertEqual(taut_res.modifiedBonds, (7, 8, 9, 10, 11, 12, 14))
def test15EnumeratorParams(self):
# Test a structure with hundreds of tautomers.
smi68 = "[H][C](CO)(NC(=O)C1=C(O)C(O)=CC=C1)C(O)=O"
m68 = Chem.MolFromSmiles(smi68)
enumerator = rdMolStandardize.TautomerEnumerator()
res68 = enumerator.Enumerate(m68)
self.assertEqual(len(res68), 252)
self.assertEqual(len(res68.tautomers), len(res68))
self.assertEqual(res68.status, rdMolStandardize.TautomerEnumeratorStatus.MaxTransformsReached)
enumerator = rdMolStandardize.GetV1TautomerEnumerator()
res68 = enumerator.Enumerate(m68)
self.assertEqual(len(res68), 292)
self.assertEqual(len(res68.tautomers), len(res68))
self.assertEqual(res68.status, rdMolStandardize.TautomerEnumeratorStatus.MaxTransformsReached)
params = rdMolStandardize.CleanupParameters()
params.maxTautomers = 50
enumerator = rdMolStandardize.TautomerEnumerator(params)
res68 = enumerator.Enumerate(m68)
self.assertEqual(len(res68), 50)
self.assertEqual(res68.status, rdMolStandardize.TautomerEnumeratorStatus.MaxTautomersReached)
sAlaSmi = "C[C@H](N)C(=O)O"
sAla = Chem.MolFromSmiles(sAlaSmi)
# test remove (S)-Ala stereochemistry
self.assertEqual(sAla.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CCW)
self.assertEqual(sAla.GetAtomWithIdx(1).GetProp("_CIPCode"), "S")
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = True
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(sAla)
for taut in res:
self.assertEqual(taut.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(taut.GetAtomWithIdx(1).HasProp("_CIPCode"))
for taut in res.tautomers:
self.assertEqual(taut.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(taut.GetAtomWithIdx(1).HasProp("_CIPCode"))
for i, taut in enumerate(res):
self.assertEqual(Chem.MolToSmiles(taut), Chem.MolToSmiles(res.tautomers[i]))
self.assertEqual(len(res), len(res.smiles))
self.assertEqual(len(res), len(res.tautomers))
self.assertEqual(len(res), len(res()))
self.assertEqual(len(res), len(res.smilesTautomerMap))
for i, taut in enumerate(res.tautomers):
self.assertEqual(Chem.MolToSmiles(taut), Chem.MolToSmiles(res[i]))
self.assertEqual(Chem.MolToSmiles(taut), res.smiles[i])
self.assertEqual(Chem.MolToSmiles(taut),
Chem.MolToSmiles(res.smilesTautomerMap.values()[i].tautomer))
for i, k in enumerate(res.smilesTautomerMap.keys()):
self.assertEqual(k, res.smiles[i])
for i, v in enumerate(res.smilesTautomerMap.values()):
self.assertEqual(Chem.MolToSmiles(v.tautomer), Chem.MolToSmiles(res[i]))
for i, (k, v) in enumerate(res.smilesTautomerMap.items()):
self.assertEqual(k, res.smiles[i])
self.assertEqual(Chem.MolToSmiles(v.tautomer), Chem.MolToSmiles(res[i]))
for i, smiles in enumerate(res.smiles):
self.assertEqual(smiles, Chem.MolToSmiles(res[i]))
self.assertEqual(smiles, res.smilesTautomerMap.keys()[i])
self.assertEqual(Chem.MolToSmiles(res.tautomers[-1]), Chem.MolToSmiles(res[-1]))
self.assertEqual(Chem.MolToSmiles(res[-1]), Chem.MolToSmiles(res[len(res) - 1]))
self.assertEqual(Chem.MolToSmiles(res.tautomers[-1]),
Chem.MolToSmiles(res.tautomers[len(res) - 1]))
with self.assertRaises(IndexError):
res[len(res)]
with self.assertRaises(IndexError):
res[-len(res) - 1]
with self.assertRaises(IndexError):
res.tautomers[len(res)]
with self.assertRaises(IndexError):
res.tautomers[-len(res.tautomers) - 1]
# test retain (S)-Ala stereochemistry
self.assertEqual(sAla.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CCW)
self.assertEqual(sAla.GetAtomWithIdx(1).GetProp("_CIPCode"), "S")
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(sAla)
for taut in res:
tautAtom = taut.GetAtomWithIdx(1)
if (tautAtom.GetHybridization() == Chem.HybridizationType.SP3):
self.assertEqual(tautAtom.GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CCW)
self.assertTrue(tautAtom.HasProp("_CIPCode"))
self.assertEqual(tautAtom.GetProp("_CIPCode"), "S")
else:
self.assertFalse(tautAtom.HasProp("_CIPCode"))
self.assertEqual(tautAtom.GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
eEnolSmi = "C/C=C/O"
eEnol = Chem.MolFromSmiles(eEnolSmi)
self.assertEqual(eEnol.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREOE)
# test remove enol E stereochemistry
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveBondStereo = True
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(eEnol)
for taut in res.tautomers:
self.assertEqual(taut.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREONONE)
# test retain enol E stereochemistry
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveBondStereo = False
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(eEnol)
for taut in res.tautomers:
if (taut.GetBondWithIdx(1).GetBondType() == Chem.BondType.DOUBLE):
self.assertEqual(taut.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREOE)
zEnolSmi = "C/C=C\\O"
zEnol = Chem.MolFromSmiles(zEnolSmi)
self.assertEqual(zEnol.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREOZ)
# test remove enol Z stereochemistry
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveBondStereo = True
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(zEnol)
for taut in res:
self.assertEqual(taut.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREONONE)
# test retain enol Z stereochemistry
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveBondStereo = False
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(zEnol)
for taut in res:
if (taut.GetBondWithIdx(1).GetBondType() == Chem.BondType.DOUBLE):
self.assertEqual(taut.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREOZ)
chembl2024142Smi = "[2H]C1=C(C(=C2C(=C1[2H])C(=O)C(=C(C2=O)C([2H])([2H])[2H])C/C=C(\\C)/CC([2H])([2H])/C=C(/CC/C=C(\\C)/CCC=C(C)C)\\C([2H])([2H])[2H])[2H])[2H]"
chembl2024142 = Chem.MolFromSmiles(chembl2024142Smi)
params = Chem.RemoveHsParameters()
params.removeAndTrackIsotopes = True
chembl2024142 = Chem.RemoveHs(chembl2024142, params)
self.assertTrue(chembl2024142.GetAtomWithIdx(12).HasProp("_isotopicHs"))
# test remove isotopic Hs involved in tautomerism
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveIsotopicHs = True
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(chembl2024142)
for taut in res:
self.assertFalse(taut.GetAtomWithIdx(12).HasProp("_isotopicHs"))
# test retain isotopic Hs involved in tautomerism
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveIsotopicHs = False
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(chembl2024142)
for taut in res:
self.assertTrue(taut.GetAtomWithIdx(12).HasProp("_isotopicHs"))
def test16EnumeratorCallback(self):
class MyTautomerEnumeratorCallback(rdMolStandardize.TautomerEnumeratorCallback):
def __init__(self, parent, timeout_ms):
super().__init__()
self._parent = parent
self._timeout = timedelta(milliseconds=timeout_ms)
self._start_time = datetime.now()
def __call__(self, mol, res):
self._parent.assertTrue(isinstance(mol, Chem.Mol))
self._parent.assertTrue(isinstance(res, rdMolStandardize.TautomerEnumeratorResult))
return (datetime.now() - self._start_time < self._timeout)
class MyBrokenCallback(rdMolStandardize.TautomerEnumeratorCallback):
pass
class MyBrokenCallback2(rdMolStandardize.TautomerEnumeratorCallback):
__call__ = 1
# Test a structure with hundreds of tautomers.
smi68 = "[H][C](CO)(NC(=O)C1=C(O)C(O)=CC=C1)C(O)=O"
m68 = Chem.MolFromSmiles(smi68)
params = rdMolStandardize.CleanupParameters()
params.maxTransforms = 10000
params.maxTautomers = 10000
enumerator = rdMolStandardize.TautomerEnumerator(params)
enumerator.SetCallback(MyTautomerEnumeratorCallback(self, 50.0))
res68 = enumerator.Enumerate(m68)
# either the enumeration was canceled due to timeout
# or it has completed very quickly
hasReachedTimeout = (len(res68.tautomers) < 375
and res68.status == rdMolStandardize.TautomerEnumeratorStatus.Canceled)
hasCompleted = (len(res68.tautomers) == 375
and res68.status == rdMolStandardize.TautomerEnumeratorStatus.Completed)
if hasReachedTimeout:
print("Enumeration was canceled due to timeout (50 ms)", file=sys.stderr)
if hasCompleted:
print("Enumeration has completed", file=sys.stderr)
self.assertTrue(hasReachedTimeout or hasCompleted)
self.assertTrue(hasReachedTimeout ^ hasCompleted)
enumerator = rdMolStandardize.TautomerEnumerator(params)
enumerator.SetCallback(MyTautomerEnumeratorCallback(self, 10000.0))
res68 = enumerator.Enumerate(m68)
# either the enumeration completed
# or it ran very slowly and was canceled due to timeout
hasReachedTimeout = (len(res68.tautomers) < 295
and res68.status == rdMolStandardize.TautomerEnumeratorStatus.Canceled)
hasCompleted = (len(res68.tautomers) == 295
and res68.status == rdMolStandardize.TautomerEnumeratorStatus.Completed)
if hasReachedTimeout:
print("Enumeration was canceled due to timeout (10 s)", file=sys.stderr)
if hasCompleted:
print("Enumeration has completed", file=sys.stderr)
self.assertTrue(hasReachedTimeout or hasCompleted)
self.assertTrue(hasReachedTimeout ^ hasCompleted)
enumerator = rdMolStandardize.TautomerEnumerator(params)
with self.assertRaises(AttributeError):
enumerator.SetCallback(MyBrokenCallback())
with self.assertRaises(AttributeError):
enumerator.SetCallback(MyBrokenCallback2())
# GitHub #4736
enumerator = rdMolStandardize.TautomerEnumerator(params)
enumerator.SetCallback(MyTautomerEnumeratorCallback(self, 50.0))
enumerator_copy = rdMolStandardize.TautomerEnumerator(enumerator)
res68 = enumerator.Enumerate(m68)
res68_copy = enumerator_copy.Enumerate(m68)
self.assertTrue(res68.status == res68_copy.status)
def test17PickCanonicalCIPChangeOnChiralCenter(self):
def get_canonical_taut(res):
best_idx = max([(rdMolStandardize.TautomerEnumerator.ScoreTautomer(t), i)
for i, t in enumerate(res.tautomers)])[1]
return res.tautomers[best_idx]
smi = "CC\\C=C(/O)[C@@H](C)C(C)=O"
mol = Chem.MolFromSmiles(smi)
self.assertIsNotNone(mol)
self.assertEqual(mol.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(mol.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
# here the chirality disappears as the chiral center is itself involved in tautomerism
te = rdMolStandardize.TautomerEnumerator()
can_taut = te.Canonicalize(mol)
self.assertIsNotNone(can_taut)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(can_taut.GetAtomWithIdx(5).HasProp("_CIPCode"))
self.assertEqual(Chem.MolToSmiles(can_taut), "CCCC(=O)C(C)C(C)=O")
# here the chirality stays even if the chiral center is itself involved in tautomerism
# because of the tautomerRemoveSp3Stereo parameter being set to false
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
te = rdMolStandardize.TautomerEnumerator(params)
can_taut = te.Canonicalize(mol)
self.assertIsNotNone(can_taut)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(Chem.MolToSmiles(can_taut), "CCCC(=O)[C@@H](C)C(C)=O")
# here the chirality disappears as the chiral center is itself involved in tautomerism
# the reassignStereo setting has no influence
te = rdMolStandardize.TautomerEnumerator()
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 8)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(best_taut.GetAtomWithIdx(5).HasProp("_CIPCode"))
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)C(C)C(C)=O")
# here the chirality disappears as the chiral center is itself involved in tautomerism
# the reassignStereo setting has no influence
params = rdMolStandardize.CleanupParameters()
params.tautomerReassignStereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 8)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(best_taut.GetAtomWithIdx(5).HasProp("_CIPCode"))
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)C(C)C(C)=O")
# here the chirality stays even if the chiral center is itself involved in tautomerism
# because of the tautomerRemoveSp3Stereo parameter being set to false
# as reassignStereo by default is true, the CIP code has been recomputed
# and therefore it is now S (correct)
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 8)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@@H](C)C(C)=O")
# here the chirality stays even if the chiral center is itself involved in tautomerism
# because of the tautomerRemoveSp3Stereo parameter being set to false
# as reassignStereo is false, the CIP code has not been recomputed
# and therefore it is still R (incorrect)
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
params.tautomerReassignStereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 8)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@@H](C)C(C)=O")
smi = "CC\\C=C(/O)[C@@](CC)(C)C(C)=O"
mol = Chem.MolFromSmiles(smi)
self.assertIsNotNone(mol)
self.assertEqual(mol.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(mol.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
# here the chirality stays no matter how tautomerRemoveSp3Stereo
# is set as the chiral center is not involved in tautomerism
te = rdMolStandardize.TautomerEnumerator()
can_taut = te.Canonicalize(mol)
self.assertIsNotNone(can_taut)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(can_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
te = rdMolStandardize.TautomerEnumerator(params)
can_taut = te.Canonicalize(mol)
self.assertIsNotNone(can_taut)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(can_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
# as reassignStereo by default is true, the CIP code has been recomputed
# and therefore it is now R (correct)
te = rdMolStandardize.TautomerEnumerator()
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 4)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
# as reassignStereo is false, the CIP code has not been recomputed
# and therefore it is still S (incorrect)
params = rdMolStandardize.CleanupParameters()
params.tautomerReassignStereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 4)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
# as reassignStereo by default is true, the CIP code has been recomputed
# and therefore it is now R (correct)
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 4)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
# here the chirality stays even if the tautomerRemoveSp3Stereo parameter
# is set to false as the chiral center is not involved in tautomerism
# as reassignStereo is false, the CIP code has not been recomputed
# and therefore it is still S (incorrect)
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
params.tautomerReassignStereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 4)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
def test18TautomerEnumeratorResultIter(self):
smi = "Cc1nnc(NC(=O)N2CCN(Cc3ccc(F)cc3)C(=O)C2)s1"
mol = Chem.MolFromSmiles(smi)
self.assertIsNotNone(mol)
te = rdMolStandardize.TautomerEnumerator()
res = te.Enumerate(mol)
res_it = iter(res)
i = 0
while 1:
try:
t = next(res_it)
except StopIteration:
break
self.assertEqual(Chem.MolToSmiles(t), Chem.MolToSmiles(res[i]))
i += 1
self.assertEqual(i, len(res))
res_it = iter(res)
i = -len(res)
while 1:
try:
t = next(res_it)
except StopIteration:
break
self.assertEqual(Chem.MolToSmiles(t), Chem.MolToSmiles(res[i]))
i += 1
self.assertEqual(i, 0)
def test19NormalizeFromParams(self):
params = rdMolStandardize.CleanupParameters()
params.normalizationsFile = "ThisFileDoesNotExist.txt"
with self.assertRaises(OSError):
rdMolStandardize.NormalizerFromParams(params)
def test20NoneHandling(self):
with self.assertRaises(ValueError):
rdMolStandardize.ChargeParent(None)
with self.assertRaises(ValueError):
rdMolStandardize.Cleanup(None)
with self.assertRaises(ValueError):
rdMolStandardize.FragmentParent(None)
with self.assertRaises(ValueError):
rdMolStandardize.Normalize(None)
with self.assertRaises(ValueError):
rdMolStandardize.Reionize(None)
def test21UpdateFromJSON(self):
params = rdMolStandardize.CleanupParameters()
# note: these actual parameters aren't useful... they are for testing
rdMolStandardize.UpdateParamsFromJSON(
params, """{
"normalizationData":[
{"name":"silly 1","smarts":"[Cl:1]>>[F:1]"},
{"name":"silly 2","smarts":"[Br:1]>>[F:1]"}
],
"acidbaseData":[
{"name":"-CO2H","acid":"C(=O)[OH]","base":"C(=O)[O-]"},
{"name":"phenol","acid":"c[OH]","base":"c[O-]"}
],
"fragmentData":[
{"name":"hydrogen", "smarts":"[H]"},
{"name":"fluorine", "smarts":"[F]"},
{"name":"chlorine", "smarts":"[Cl]"}
],
"tautomerTransformData":[
{"name":"1,3 (thio)keto/enol f","smarts":"[CX4!H0]-[C]=[O,S,Se,Te;X1]","bonds":"","charges":""},
{"name":"1,3 (thio)keto/enol r","smarts":"[O,S,Se,Te;X2!H0]-[C]=[C]"}
]}""")
m = Chem.MolFromSmiles("CCC=O")
te = rdMolStandardize.TautomerEnumerator(params)
tauts = [Chem.MolToSmiles(x) for x in te.Enumerate(m)]
self.assertEqual(tauts, ["CC=CO", "CCC=O"])
self.assertEqual(Chem.MolToSmiles(rdMolStandardize.CanonicalTautomer(m, params)), "CCC=O")
# now with defaults
te = rdMolStandardize.TautomerEnumerator()
tauts = [Chem.MolToSmiles(x) for x in te.Enumerate(m)]
self.assertEqual(tauts, ["CC=CO", "CCC=O"])
self.assertEqual(Chem.MolToSmiles(rdMolStandardize.CanonicalTautomer(m)), "CCC=O")
m = Chem.MolFromSmiles('ClCCCBr')
nm = rdMolStandardize.Normalize(m, params)
self.assertEqual(Chem.MolToSmiles(nm), "FCCCF")
# now with defaults
nm = rdMolStandardize.Normalize(m)
self.assertEqual(Chem.MolToSmiles(nm), "ClCCCBr")
m = Chem.MolFromSmiles('c1cc([O-])cc(C(=O)O)c1')
nm = rdMolStandardize.Reionize(m, params)
self.assertEqual(Chem.MolToSmiles(nm), "O=C([O-])c1cccc(O)c1")
# now with defaults
nm = rdMolStandardize.Reionize(m)
self.assertEqual(Chem.MolToSmiles(nm), "O=C([O-])c1cccc(O)c1")
m = Chem.MolFromSmiles('C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O')
nm = rdMolStandardize.Reionize(m, params)
self.assertEqual(Chem.MolToSmiles(nm), "O=S([O-])c1ccc(S(=O)(=O)O)cc1")
# now with defaults
nm = rdMolStandardize.Reionize(m)
self.assertEqual(Chem.MolToSmiles(nm), "O=S(O)c1ccc(S(=O)(=O)[O-])cc1")
m = Chem.MolFromSmiles('[F-].[Cl-].[Br-].CC')
nm = rdMolStandardize.RemoveFragments(m, params)
self.assertEqual(Chem.MolToSmiles(nm), "CC.[Br-]")
# now with defaults
nm = rdMolStandardize.RemoveFragments(m)
self.assertEqual(Chem.MolToSmiles(nm), "CC")
if __name__ == "__main__":
unittest.main()
|
rdkit/rdkit
|
Code/GraphMol/MolStandardize/Wrap/testMolStandardize.py
|
Python
|
bsd-3-clause
| 42,058
|
[
"RDKit"
] |
bd327a8adb4c23d624441b269d2ab227aceec639bfc633c62af454dedf56eff9
|
import ast
from .base import (
BaseAnalyzer, Result, AttributeVisitor, ModuleVisitor, set_lineno,
)
class CallVisitor(ast.NodeVisitor):
"""
Collects all usable attributes and names inside function call.
"""
def __init__(self):
self.names = set()
def visit_Attribute(self, node):
visitor = AttributeVisitor()
visitor.visit(node)
if visitor.is_usable:
self.names.add(visitor.get_name())
def visit_Name(self, node):
self.names.add(node.id)
class RenderToResponseVisitor(ModuleVisitor):
interesting = {
'django.shortcuts': ['render_to_response'],
'django.shortcuts.render_to_response': None,
'django.template': ['RequestContext'],
'django.template.RequestContext': None,
}
@set_lineno
def visit_Call(self, node):
# Check if calling attribute is usable...
visitor = AttributeVisitor()
visitor.visit(node.func)
if not visitor.is_usable:
return
# ...and if interesting
name = visitor.get_name()
if name not in self.names:
return
# ... and also if it is actually `render_to_response` call.
if self.names[name] != 'django.shortcuts.render_to_response':
return
# Check if it contains `RequestContext`. If so, add to `found`.
visitor = CallVisitor()
visitor.visit(node)
for subname in visitor.names:
if subname not in self.names:
continue
if self.names[subname] == 'django.template.RequestContext':
self.add_found(name, node)
class RenderToResponseAnalyzer(BaseAnalyzer):
def analyze_file(self, filepath, code):
if not isinstance(code, ast.AST):
return
visitor = RenderToResponseVisitor()
visitor.visit(code)
for name, node, start, stop in visitor.get_found():
result = Result(
description=(
"this %r usage case can be replaced with 'render' "
"function from 'django.shortcuts' package." % name
),
path=filepath,
line=start,
)
lines = self.get_file_lines(filepath, start, stop)
for lineno, important, text in lines:
result.source.add_line(lineno, text, important)
yield result
|
alfredhq/djlint
|
djlint/analyzers/render_to_response.py
|
Python
|
isc
| 2,433
|
[
"VisIt"
] |
0b96366cecec748522c7ab24cc50cfd3efed19496b273101404774b184a187d2
|
#!/usr/bin/env python
"""
Get the given file replica metadata from the File Catalog
"""
import os
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(("LocalFile: Path to local file containing LFNs", "LFN: Logical File Names"))
Script.registerArgument(" SE: Storage element")
Script.parseCommandLine()
from DIRAC import gLogger
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
# parseCommandLine show help when mandatory arguments are not specified or incorrect argument
inputFileName, storageElement = Script.getPositionalArgs(group=True)
if os.path.exists(inputFileName):
inputFile = open(inputFileName, "r")
string = inputFile.read()
lfns = [lfn.strip() for lfn in string.splitlines()]
inputFile.close()
else:
lfns = [inputFileName]
res = DataManager().getReplicaMetadata(lfns, storageElement)
if not res["OK"]:
print("Error:", res["Message"])
DIRACExit(1)
print("%s %s %s %s" % ("File".ljust(100), "Migrated".ljust(8), "Cached".ljust(8), "Size (bytes)".ljust(10)))
for lfn, metadata in res["Value"]["Successful"].items():
print(
"%s %s %s %s"
% (
lfn.ljust(100),
str(metadata["Migrated"]).ljust(8),
str(metadata.get("Cached", metadata["Accessible"])).ljust(8),
str(metadata["Size"]).ljust(10),
)
)
for lfn, reason in res["Value"]["Failed"].items():
print("%s %s" % (lfn.ljust(100), reason.ljust(8)))
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_dms_replica_metadata.py
|
Python
|
gpl-3.0
| 1,780
|
[
"DIRAC"
] |
8e9aa41623ff193777d8579aa5ca25b5f3945c973fda1ea8833ba5b41d25c914
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-install-agent
# Author : Ricardo Graciani
########################################################################
"""
Do the initial installation and configuration of a DIRAC agent
"""
__RCSID__ = "$Id$"
#
from DIRAC.Core.Utilities import InstallTools
from DIRAC.ConfigurationSystem.Client.Helpers import getCSExtensions
#
from DIRAC import gConfig, S_OK, S_ERROR
from DIRAC import exit as DIRACexit
InstallTools.exitOnError = True
#
from DIRAC.Core.Base import Script
overwrite = False
def setOverwrite( opVal ):
global overwrite
overwrite = True
return S_OK()
module = ''
specialOptions = {}
def setModule( optVal ):
global specialOptions,module
specialOptions['Module'] = optVal
module = optVal
return S_OK()
def setSpecialOption( optVal ):
global specialOptions
option,value = optVal.split('=')
specialOptions[option] = value
return S_OK()
Script.registerSwitch( "w", "overwrite", "Overwrite the configuration in the global CS", setOverwrite )
Script.registerSwitch( "m:", "module=", "Python module name for the agent code", setModule )
Script.registerSwitch( "p:", "parameter=", "Special agent option ", setSpecialOption )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... System Agent|System/Agent' % Script.scriptName,
'Arguments:',
' System: Name of the DIRAC system (ie: WorkloadManagement)',
' Agent: Name of the DIRAC agent (ie: JobCleaningAgent)'] ) )
Script.parseCommandLine()
args = Script.getPositionalArgs()
if len( args ) == 1:
args = args[0].split( '/' )
if len( args ) != 2:
Script.showHelp()
DIRACexit( -1 )
#
system = args[0]
agent = args[1]
if module:
result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, module,
getCSExtensions(),
overwrite = overwrite )
result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, agent,
getCSExtensions(),
specialOptions=specialOptions,
overwrite = overwrite,
addDefaultOptions = False )
else:
result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, agent,
getCSExtensions(),
specialOptions=specialOptions,
overwrite = overwrite )
if not result['OK']:
print "ERROR:", result['Message']
else:
result = InstallTools.installComponent( 'agent', system, agent, getCSExtensions(), module )
if not result['OK']:
print "ERROR:", result['Message']
DIRACexit( 1 )
else:
print "Successfully installed agent %s in %s system, now setting it up" % ( agent, system )
result = InstallTools.setupComponent( 'agent', system, agent, getCSExtensions(), module )
if not result['OK']:
print "ERROR:", result['Message']
DIRACexit( 1 )
else:
print "Successfully completed the installation of agent %s in %s system" % ( agent, system )
DIRACexit()
|
rajanandakumar/DIRAC
|
Core/scripts/dirac-install-agent.py
|
Python
|
gpl-3.0
| 3,506
|
[
"DIRAC"
] |
8cb9ccf5b1178e0ef935f4ecab5ff4a946d5a93a72f15278350668693dfa224a
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
RANGE = 150
MAX_ITERATIONS_1 = RANGE
MAX_ITERATIONS_2 = RANGE
XRAD = 200
YRAD = 200
mandelbrot1 = vtk.vtkImageMandelbrotSource()
mandelbrot1.SetMaximumNumberOfIterations(150)
mandelbrot1.SetWholeExtent(0, XRAD - 1, 0, YRAD - 1, 0, 0)
mandelbrot1.SetSampleCX(1.3 / XRAD, 1.3 / XRAD, 1.3 / XRAD, 1.3 / XRAD)
mandelbrot1.SetOriginCX(-0.72, 0.22, 0.0, 0.0)
mandelbrot1.SetProjectionAxes(0, 1, 2)
mapToWL = vtk.vtkImageMapToWindowLevelColors()
mapToWL.SetInputConnection(mandelbrot1.GetOutputPort())
mapToWL.SetWindow(RANGE)
mapToWL.SetLevel(RANGE / 3.0)
# set the window/level to 255.0/127.5 to view full range
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(mapToWL.GetOutputPort())
viewer.SetColorWindow(255.0)
viewer.SetColorLevel(127.5)
viewer.Render()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Imaging/Core/Testing/Python/TestMapToWindowLevelColors.py
|
Python
|
bsd-3-clause
| 933
|
[
"VTK"
] |
308b65b1b3c3c05287b7c5e806184c7481c2c052d9d055485d7c3854ac63f6e0
|
import moose
import rdesigneur as rd
rdes = rd.rdesigneur(
stimList = [['soma', '1', '.', 'inject', '(t>0.1 && t<0.2) * 2e-8' ]],
plotList = [['soma', '1', '.', 'Vm', 'Soma membrane potential']]
)
rdes.buildModel()
moose.reinit()
moose.start( 0.3 )
rdes.display()
|
BhallaLab/moose-examples
|
tutorials/Rdesigneur/ex2.0_currentPulse.py
|
Python
|
gpl-2.0
| 272
|
[
"MOOSE"
] |
594d0c3a81bc2c4d25e1529d7f47b7fc172affbe8f84d486746c3265ef4e4d0c
|
#
# This file is a part of the normalize python library
#
# normalize is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
#
# normalize is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the MIT license along with
# normalize. If not, refer to the upstream repository at
# http://github.com/hearsaycorp/normalize
#
from __future__ import absolute_import
from builtins import object
import collections
from normalize.coll import Collection
import normalize.exc as exc
from normalize.record import Record
from normalize.selector import FieldSelector
from normalize.selector import MultiFieldSelector
class Visitor(object):
"""The Visitor object represents a single recursive visit in progress. You
hopefully shouldn't have to sub-class this class for most use cases; just
VisitorPattern.
"""
def __init__(self, unpack_func, apply_func, collect_func, reduce_func,
apply_empty_slots=False, extraneous=False,
ignore_empty_string=False, ignore_none=True,
visit_filter=None, filter=None):
"""Create a new Visitor object. Generally called by a front-end class
method of :py:class:`VisitorPattern`
There are four positional arguments, which specify the particular
functions to be used during the visit. The important options from a
user of a visitor are the keyword arguments:
``apply_empty_slots=``\ *bool*
If set, then your ``apply`` method (or ``reverse``, etc) will
be called even if there is no corresponding value in the input.
Your method will receive the Exception as if it were the value.
``extraneous=``\ *bool*
Also call the apply method on properties marked *extraneous*.
False by default.
``ignore_empty_string=``\ *bool*
If the 'apply' function returns the empty string, treat it as
if the slot or object did not exist. ``False`` by default.
``ignore_none=``\ *bool*
If the 'apply' function returns ``None``, treat it as if the
slot or object did not exist. ``True`` by default.
``visit_filter=``\ *MultiFieldSelector*
This supplies an instance of
:py:class:`normalize.selector.MultiFieldSelector`, and
restricts the operation to the matched object fields. Can also
be specified as just ``filter=``
"""
self.unpack = unpack_func
self.apply = apply_func
self.collect = collect_func
self.reduce = reduce_func
self.apply_empty_slots = apply_empty_slots
self.extraneous = extraneous
self.ignore_empty_string = ignore_empty_string
self.ignore_none = ignore_none
if visit_filter is None:
visit_filter = filter
if isinstance(visit_filter, (MultiFieldSelector, type(None))):
self.visit_filter = visit_filter
else:
self.visit_filter = MultiFieldSelector(*visit_filter)
self.seen = set() # TODO
self.cue = list()
def is_filtered(self, prop):
return (not self.extraneous and prop.extraneous) or (
self.visit_filter and not self.visit_filter[self.cue + [prop.name]]
)
@property
def field_selector(self):
return FieldSelector(self.cue)
def push(self, what):
self.cue.append(what)
def pop(self, what=None):
if what is not None:
assert(self.cue[-1] == what)
return self.cue.pop()
def copy(self):
"""Be sure to implement this method when sub-classing, otherwise you
will lose any specialization context."""
doppel = type(self)(
self.unpack, self.apply, self.collect, self.reduce,
apply_empty_slots=self.apply_empty_slots,
extraneous=self.extraneous,
ignore_empty_string=self.ignore_empty_string,
ignore_none=self.ignore_none,
visit_filter=self.visit_filter,
)
for x in self.cue:
doppel.push(x)
doppel.seen = self.seen
return doppel
class VisitorPattern(object):
"""Base Class for writing Record visitor pattern classes. These classes
are not instantiated, and consist purely of class methods.
There are three visitors supplied by default, which correspond to typical
use for IO (:py:meth:`normalize.visitor.VisitorPattern.visit` for output,
and :py:meth:`normalize.visitor.VisitorPattern.cast` for input), and for
providing a centralized type catalogue
(:py:meth:`normalize.visitor.VisitorPattern.reflect`).
============= =========== ============= ===================================
``visit`` ``cast`` ``reflect`` Description
============= =========== ============= ===================================
``unpack`` ``grok`` ``scantypes`` Defines how to get a property value
from the thing being walked, and a
generator for the collection.
``apply`` ``reverse`` ``propinfo`` Conversion for individual values
``aggregate`` ``collect`` ``itemtypes`` Combine collection results
``reduce`` ``produce`` ``typeinfo`` Combine apply results
============= =========== ============= ===================================
To customize what is emitted, sub-class ``VisitorPattern`` and override the
class methods of the conversion you are interested in. For many simple IO
use cases, you might need only to override are ``apply`` and ``reverse``,
if that.
The versions for ``visit`` are documented the most thoroughly, as these are
the easiest to understand and the ones most users will be customizing. The
documentation for the other methods describes the differences between them
and their ``visit`` counterpart.
"""
Visitor = Visitor
@classmethod
def visit(cls, value, value_type=None, **kwargs):
"""A value visitor, which visits instances (typically), applies
:py:meth:`normalize.visitor.VisitorPattern.apply` to every attribute
slot, and returns the reduced result.
Like :py:func:`normalize.diff.diff`, this function accepts a series of
keyword arguments, which are passed through to
:py:class:`normalize.visitor.Visitor`.
This function also takes positional arguments:
``value=``\ *object*
The value to visit. Normally (but not always) a
:py:class:`normalize.record.Record` instance.
``value_type=``\ *RecordType*
This is the ``Record`` subclass to interpret ``value`` as. The
default is ``type(value)``. If you specify this, then the type
information on ``value`` is essentially ignored (with the
caveat mentioned below on :py:meth:`Visitor.map_prop`), and may
be a ``dict``, ``list``, etc.
``**kwargs``
Visitor options accepted by
:py:meth:`normalize.visitor.Visitor.__init__`.
"""
visitor = cls.Visitor(
cls.unpack, cls.apply, cls.aggregate, cls.reduce,
**kwargs)
if not value_type:
value_type = type(value)
if not issubclass(value_type, Record):
raise TypeError(
"Cannot visit %s instance" % value_type.__name__
)
return cls.map(visitor, value, value_type)
@classmethod
def unpack(cls, value, value_type, visitor):
"""Unpack a value during a 'visit'
args:
``value=``\ *object*
The instance being visited
``value_type=``\ *RecordType*
The expected type of the instance
``visitor=``\ *Visitor*
The context/options
returns a tuple with two items:
``get_prop=``\ *function*
This function should take a
:py:class:`normalize.property.Property` instance, and return
the slot from the value, or raise ``AttributeError`` or
``KeyError`` if the slot is empty. Returning nothing means
that the item has no properties to unpack; ie, it's an opaque
type.
``get_item=``\ *generator*
This generator should return the tuple protocol used by
:py:class:`normalize.coll.Collection`: (K, V) where K can be an
ascending integer (for sequences), V (for sets), or something
hashable like a string (for dictionaries/maps)
"""
if issubclass(value_type, Collection):
try:
generator = value.itertuples()
except AttributeError:
if isinstance(value, value_type.colltype):
generator = value_type.coll_to_tuples(value)
else:
raise exc.VisitorUnpackError(
passed=value,
colltype=value_type.colltype.__name__,
context=visitor,
)
else:
generator = None
if issubclass(value_type, Record):
def propget(prop):
return prop.__get__(value)
else:
propget = None
return propget, generator
@classmethod
def apply(cls, value, prop, visitor):
"""'apply' is a general place to put a function which is called on
every extant record slot. This is usually the most important function
to implement when sub-classing.
The default implementation passes through the slot value as-is, but
expected exceptions are converted to ``None``.
args:
``value=``\ *value*\ \|\ *AttributeError*\ \|\ *KeyError*
This is the value currently in the slot, or the Record itself
with the ``apply_records`` visitor option. *AttributeError*
will only be received if you passed ``apply_empty_slots``, and
*KeyError* will be passed if ``parent_obj`` is a ``dict`` (see
:py:meth:`Visitor.map_prop` for details about when this might
happen)
``prop=``\ *Property*\ \|\ ``None``
This is the :py:class:`normalize.Property` instance which
represents the field being traversed.
This can be ``None`` when being applied over Collection
instances, where the type of the contents is not a Record.
``visitor=``\ *Visitor*
This object can be used to inspect parameters of the current
run, such as options which control which kinds of values are
visited, which fields are being visited and where the function
is in relation to the starting point.
"""
return (
None if isinstance(value, (AttributeError, KeyError)) else
value
)
@classmethod
def aggregate(self, mapped_coll_generator, coll_type, visitor):
"""Hook called for each normalize.coll.Collection, after mapping over
each of the items in the collection.
The default implementation calls
:py:meth:`normalize.coll.Collection.tuples_to_coll` with
``coerce=False``, which just re-assembles the collection into a native
python collection type of the same type of the input collection.
args:
``result_coll_generator=`` *generator func*
Generator which returns (key, value) pairs (like
:py:meth:`normalize.coll.Collection.itertuples`)
``coll_type=``\ *CollectionType*
This is the :py:class:`normalize.coll.Collection`-derived
*class* which is currently being reduced.
``visitor=``\ *Visitor*
Context/options object
"""
return coll_type.tuples_to_coll(mapped_coll_generator, coerce=False)
@classmethod
def reduce(self, mapped_props, aggregated, value_type, visitor):
"""This reduction is called to combine the mapped slot and collection
item values into a single value for return.
The default implementation tries to behave naturally; you'll almost
always get a dict back when mapping over a record, and list or some
other collection when mapping over collections.
If the collection has additional properties which are not ignored (eg,
not extraneous, not filtered), then the result will be a dictionary
with the results of mapping the properties, and a 'values' key will be
added with the result of mapping the items in the collection.
args:
``mapped_props=``\ *generator*
Iterating over this generator will yield K, V pairs, where K is
**the Property object** and V is the mapped value.
``aggregated=``\ *object*
This contains whatever ``aggregate`` returned, normally a list.
``value_type=``\ *RecordType*
This is the type which is currently being reduced.
A :py:class:`normalize.record.Record` subclass
``visitor=``\ *Visitor*
Contenxt/options object.
"""
reduced = None
if mapped_props:
reduced = dict((k.name, v) for k, v in mapped_props)
if issubclass(value_type, Collection) and aggregated is not None:
if all(visitor.is_filtered(prop) for prop in
list(value_type.properties.values())):
reduced = aggregated
else:
if reduced.get("values", False):
raise exc.VisitorTooSimple(
fs=visitor.field_selector,
value_type_name=value_type.__name__,
visitor=type(self).__name__,
)
else:
reduced['values'] = aggregated
return reduced
# CAST versions
@classmethod
def cast(cls, value_type, value, visitor=None, **kwargs):
"""Cast is for visitors where you are visiting some random data
structure (perhaps returned by a previous ``VisitorPattern.visit()``
operation), and you want to convert back to the value type.
This function also takes positional arguments:
``value_type=``\ *RecordType*
The type to cast to.
``value=``\ *object*
``visitor=``\ *Visitor.Options*
Specifies the visitor options, which customizes the descent
and reduction.
"""
if visitor is None:
visitor = cls.Visitor(
cls.grok, cls.reverse, cls.collect, cls.produce,
**kwargs)
return cls.map(visitor, value, value_type)
# hooks for types which define what is considered acceptable input for
# given contexts during 'cast'
#
# note: Collection.coll_to_tuples will generally allow you to pass
# collections as a list or a dict with the *values* being the members of
# the set, so this code allows this.
grok_mapping_types = collections.Mapping
grok_coll_types = (collections.Sequence, collections.Mapping)
@classmethod
def grok(cls, value, value_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.unpack` but called
for ``cast`` operations. Expects to work with dictionaries and lists
instead of Record objects.
Reverses the transform performed in
:py:meth:`normalize.visitor.VisitorPattern.reduce` for collections with
properties.
If you pass tuples to ``isa`` of your Properties, then you might need
to override this function and throw ``TypeError`` if the passed
``value_type`` is not appropriate for ``value``.
"""
is_coll = issubclass(value_type, Collection)
is_record = issubclass(value_type, Record) and any(
not visitor.is_filtered(prop) for prop in
list(value_type.properties.values())
)
if is_record and not isinstance(value, cls.grok_mapping_types):
raise exc.VisitorGrokRecordError(
val=repr(value),
record_type=value_type,
record_type_name=value_type.__name__,
field_selector=visitor.field_selector,
)
values = value
if is_coll and is_record:
try:
if "values" in value:
values = value['values']
except TypeError:
pass
generator = None
if is_coll:
if not isinstance(values, cls.grok_coll_types):
raise exc.VisitorGrokCollectionError(
val=repr(values),
record_type=value_type,
record_type_name=value_type.__name__,
field_selector=visitor.field_selector,
)
generator = value_type.coll_to_tuples(values)
propget = None
if is_record:
def propget(prop):
return value[prop.name]
return propget, generator
@classmethod
def reverse(cls, value, prop, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.apply` but called
for ``cast`` operations. The default implementation passes through but
squashes exceptions, just like apply.
"""
return (
None if isinstance(value, (AttributeError, KeyError)) else
value
)
@classmethod
def collect(cls, mapped_coll_generator, coll_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.aggregate`, but
coerces the mapped values to the collection item type on the way
through.
"""
return coll_type.tuples_to_coll(mapped_coll_generator)
@classmethod
def produce(cls, mapped_props, aggregated, value_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.reduce`, but
constructs instances rather than returning plain dicts.
"""
kwargs = {} if not mapped_props else dict(
(k.name, v) for k, v in mapped_props
)
if issubclass(value_type, Collection):
kwargs['values'] = aggregated
return value_type(**kwargs)
# versions which walk type objects
@classmethod
def reflect(cls, X, **kwargs):
"""Reflect is for visitors where you are exposing some information
about the types reachable from a starting type to an external system.
For example, a front-end, a REST URL router and documentation
framework, an avro schema definition, etc.
X can be a type or an instance.
This API should be considered **experimental**
"""
if isinstance(X, type):
value = None
value_type = X
else:
value = X
value_type = type(X)
if not issubclass(value_type, Record):
raise TypeError("Cannot reflect on %s" % value_type.__name__)
visitor = cls.Visitor(
cls.scantypes, cls.propinfo, cls.itemtypes,
cls.typeinfo,
**kwargs)
return cls.map(visitor, value, value_type)
@classmethod
def scantypes(cls, value, value_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.unpack`, but
returns a getter which just returns the property, and a collection
getter which returns a set with a single item in it.
"""
item_type_generator = None
if issubclass(value_type, Collection):
def get_item_types():
if isinstance(value_type.itemtype, tuple):
# not actually supported by Collection yet, but whatever
for vt in value_type.itemtype:
yield (vt, vt)
else:
yield value_type.itemtype, value_type.itemtype
item_type_generator = get_item_types()
propget = None
if issubclass(value_type, Record):
def propget(prop):
return prop
return propget, item_type_generator
@classmethod
def propinfo(cls, value, prop, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.apply`, but takes a
property and returns a dict with some basic info. The default
implementation returns just the name of the property and the type in
here.
"""
if not prop:
return {"name": value.__name__}
rv = {"name": prop.name}
if prop.valuetype:
if isinstance(prop.valuetype, tuple):
rv['type'] = [typ.__name__ for typ in prop.valuetype]
else:
rv['type'] = prop.valuetype.__name__
return rv
@classmethod
def itemtypes(cls, mapped_types, coll_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.aggregate`, but
returns . This will normally only get called with a single type.
"""
rv = list(v for k, v in mapped_types)
return rv[0] if len(rv) == 1 else rv
@classmethod
def typeinfo(cls, propinfo, type_parameters, value_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.reduce`, but returns
the final dictionary to correspond to a type definition. The default
implementation returns just the type name, the list of properties, and
the item type for collections.
"""
propspec = dict((prop.name, info) for prop, info in propinfo)
ts = {'name': value_type.__name__}
if propspec:
ts['properties'] = propspec
if type_parameters:
ts['itemtype'] = type_parameters
return ts
# sentinel iteration stopper
class StopVisiting(object):
"""This sentinel value may be returned by a custom implementation of
``unpack`` (or ``grok``, or ``scantypes``) to indicate that the descent
should be stopped immediately, instead of proceeding to descend into
sub-properties. It can be passed a literal value to use as the mapped
value as a single constructor argument, or the class itself returned to
indicate no mapped value."""
return_value = None
def __init__(self, return_value):
self.return_value = return_value
# methods-in-common
@classmethod
def map(cls, visitor, value, value_type):
"""The common visitor API used by all three visitor implementations.
args:
``visitor=``\ *Visitor*
Visitor options instance: contains the callbacks to use to
implement the visiting, as well as traversal & filtering
options.
``value=``\ *Object*
Object being visited
``value_type=``\ *RecordType*
The type object controlling the visiting.
"""
unpacked = visitor.unpack(value, value_type, visitor)
if unpacked == cls.StopVisiting or isinstance(
unpacked, cls.StopVisiting
):
return unpacked.return_value
if isinstance(unpacked, tuple):
props, coll = unpacked
else:
props, coll = unpacked, None
# recurse into values for collections
if coll:
coll_map_generator = cls.map_collection(
visitor, coll, value_type,
)
mapped_coll = visitor.collect(
coll_map_generator, value_type, visitor,
)
else:
mapped_coll = None
# recurse into regular properties
mapped_props = None
if props:
mapped_props = cls.map_record(visitor, props, value_type)
elif mapped_coll is None:
return visitor.apply(value, None, visitor)
return visitor.reduce(
mapped_props, mapped_coll, value_type, visitor,
)
@classmethod
def map_record(cls, visitor, get_value, record_type):
rv = visitor.copy() # expensive?
for name, prop in record_type.properties.items():
if rv.is_filtered(prop):
continue
rv.push(name)
try:
value = get_value(prop)
except AttributeError as ae:
value = ae
except KeyError as ke:
value = ke
except Exception as e:
rv.pop(name)
raise exc.VisitorPropError(
exception=e,
prop=prop,
prop_name=name,
record_type_name=record_type.__name__,
fs=rv.field_selector,
)
if visitor.apply_empty_slots or not isinstance(
value, (KeyError, AttributeError),
):
mapped = cls.map_prop(rv, value, prop)
if mapped is None and rv.ignore_none:
pass
elif mapped == "" and rv.ignore_empty_string:
pass
else:
yield prop, mapped
rv.pop(name)
@classmethod
def map_collection(cls, visitor, coll_generator, coll_type):
rv = visitor.copy()
for key, value in coll_generator:
rv.push(key)
mapped = cls.map(rv, value, coll_type.itemtype)
rv.pop(key)
if mapped is None and visitor.ignore_none:
pass
elif mapped == "" and visitor.ignore_empty_string:
pass
else:
yield key, mapped
@classmethod
def map_prop(cls, visitor, value, prop):
mapped = None
# XXX - this fallback here is type-unsafe, and exists only for
# those who don't declare their isa= for complex object types.
value_type = prop.valuetype or type(value)
if isinstance(value_type, tuple):
mapped = cls.map_type_union(
visitor, value, value_type, prop,
)
elif issubclass(value_type, Record):
mapped = cls.map(visitor, value, value_type)
else:
mapped = visitor.apply(value, prop, visitor)
return mapped
@classmethod
def map_type_union(cls, visitor, value, type_tuple, prop):
# This corner-case method applies when visiting a value and
# ncountering a type union in the ``Property.valuetype`` field.
#
# this code has the same problem that record_id does; that is, it
# doesn't know which of the type union the value is.
#
# the solution this function uses is to try all of them, until one of
# them returns something logically true. Handlers (ie, unpack/grok)
# can also protest by raising TypeError, and the next one will be
# tried.
record_types = []
matching_record_types = []
for value_type in type_tuple:
if issubclass(value_type, Record):
record_types.append(value_type)
# XXX - this test here should probably be a per-visitor
# hook, as it only really applies to 'visit', not 'grok'
if isinstance(value, value_type):
matching_record_types.append(value_type)
mapped = None
if matching_record_types:
for value_type in matching_record_types:
try:
mapped = cls.map(visitor, value, value_type)
except TypeError:
pass
else:
if mapped:
break
else:
for value_type in record_types:
try:
mapped = cls.map(visitor, value, value_type)
except TypeError:
pass
else:
# this could also be the wrong thing when mapping
# over types.
if mapped:
break
if not mapped:
mapped = visitor.apply(value, prop, visitor)
return mapped
|
hearsaycorp/normalize
|
normalize/visitor.py
|
Python
|
mit
| 28,877
|
[
"VisIt"
] |
0c7770956b31a9ae31831898b06b22973853c25fadadeb0a077a2a87bdb85488
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.