text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
# coding: utf-8 # (c) 2015-05-02 Teruhisa Okada """ 2015/05/02 okada create this file. 2015/06/30 okada update. # (c) 2015-11-24 Teruhisa Okada """ import netCDF4 import numpy as np from numpy import dtype from datetime import datetime import pandas as pd import romspy def make_obs_file(ncfile, csvfile, stafile=None, dates=None, varids=None, flag=1, e=None): print 'ncfile:', ncfile print 'obsfile:', csvfile print 'stafile:', stafile print 'numpy version:', np.__version__, '(> 1.9.0)' state_variable = 19 df = pd.read_csv(csvfile, parse_dates=['date'], index_col='date') df = df.sort() print df if dates is not None: df = df[df.index >= dates[0]] df = df[df.index <= dates[1]] if varids is not None: if len(varids) == 2: df = df[(df.type==varids[0]) | (df.type==varids[1])] print df.head() print df.tail() """ You need to change each errors """ df['error'] = 0.0 if flag == 1: # OBWQ13 df.depth = -df.depth if e is None: df.loc[df.type==6, "error"] = 0.5 + 0.005 # temp df.loc[df.type==7, "error"] = 0.5 + 0.005 # salt df.loc[df.type==10, "error"] = 1.0 + 0.05 # chlo df.loc[df.type==15, "error"] = 10.0 + 0.05 # oxygen else: df.loc[df.type==6, "error"] = e[0] # temp df.loc[df.type==7, "error"] = e[1] # salt df.loc[df.type==10, "error"] = e[2] # chlo df.loc[df.type==15, "error"] = e[3] # oxygen df['xgrid'] = 0 df['ygrid'] = 0 df['lon'] = 0.0 df['lat'] = 0.0 sta = pd.read_csv(stafile, index_col='station') for i in xrange(1,14): df.ix[df.station==i, "xgrid"] = sta.xgrid[i] df.loc[df.station==i, "ygrid"] = sta.ygrid[i] df.loc[df.station==i, "lat"] = sta.lat[i] df.loc[df.station==i, "lon"] = sta.lon[i] elif flag == 2: # OB_RADAR df.loc[df.type==4, "error"] = 0.05 # u df.loc[df.type==5, "error"] = 0.05 # v df['depth'] = -0.5 df['station'] = 0 df['layer'] = 0.5 if type(df.index.values[0]) == str: time = datetime.strptime(df.index.values, '') elif type(df.index.values[0]) == np.datetime64: time = [dt64.astype('M8[s]').astype('O') for dt64 in df.index.values] else: time = [ts.to_datetime() for ts in df.index.values] print type(df.index.values[0]), df.index.values[0] time_out = netCDF4.date2num(time, romspy.JST_days) survey_out, nobs_out = np.unique(time_out, return_counts=True) station_out = df.station.values layer_out = df.layer.values depth_out = df.depth.values type_out = df.type.values value_out = df.value.values xgrid_out = df.xgrid.values ygrid_out = df.ygrid.values lon_out = df.lon.values lat_out = df.lat.values error_out = df.error.values """ write netcdf """ nc = netCDF4.Dataset(ncfile, 'w', format='NETCDF3_CLASSIC') now = datetime.now() nc.history = now.strftime('%Y-%m-%d %H:%M:%S') nc.author = 'OKADA Teruhisa' nc.createDimension('survey', len(survey_out)) nc.createDimension('state_variable', state_variable) nc.createDimension('datum', sum(nobs_out)) for name in nc.dimensions.keys(): print nc.dimensions[name] spherical = nc.createVariable('spherical', dtype('int32').char) spherical.long_name = 'grid type logical seitch' spherical.flag_ncfile_tru = [0, 1] spherical.flag_meanings = 'Cartesian Spherical' Nobs = nc.createVariable('Nobs', dtype('int32').char, ('survey',)) Nobs.long_name = 'number of observations with the same survey time' survey_time = nc.createVariable('survey_time', dtype('double').char, ('survey',)) survey_time.long_name = 'survey time' survey_time.units = romspy.GMT_days survey_time.calendar = 'gregorian' obs_variance = nc.createVariable('obs_variance', dtype('double').char, ('state_variable',)) obs_variance.long_name = 'global time and space observation variance' obs_type = nc.createVariable('obs_type', dtype('int32').char, ('datum',)) obs_type.long_name = 'model state variable associated with observation' obs_type.flag_values = [i + 1 for i in range(16)] obs_type.flag_meanings = '1:zeta 2:ubar 3:vbar 4:u 5:v 6:temperature 7:salinity 8:NH4 9:NO3 10:chlorophyll 11:phytoplankton 12:zooplankton 13:LdetritusN 14:SdetritusN 15:oxygen 16:PO4 17:LdetritusP 18:SdetritusP 19:H2S' obs_provenance = nc.createVariable('obs_provenance', dtype('int32').char, ('datum',)) obs_provenance.long_name = 'observation origin' obs_provenance.flag_values = [1, 2] obs_provenance.flag_meanings = '1:OBWQ13 2:OB_RADAR' obs_station = nc.createVariable('obs_station', dtype('int32').char, ('datum',)) obs_station.long_name = 'observation station number' obs_station.flag_values = [i+1 for i in range(13)] obs_station.flag_meanings = 'akashi sumoto kanku kobe yodo hannan sakai rokko hamadera awaji suma osaka kishiwada' obs_time = nc.createVariable('obs_time', dtype('double').char, ('datum',)) obs_layer = nc.createVariable('obs_layer', dtype('double').char, ('datum',)) obs_depth = nc.createVariable('obs_depth', dtype('double').char, ('datum',)) obs_Xgrid = nc.createVariable('obs_Xgrid', dtype('double').char, ('datum',)) obs_Ygrid = nc.createVariable('obs_Ygrid', dtype('double').char, ('datum',)) obs_Zgrid = nc.createVariable('obs_Zgrid', dtype('double').char, ('datum',)) obs_lon = nc.createVariable('obs_lon', dtype('double').char, ('datum',)) obs_lat = nc.createVariable('obs_lat', dtype('double').char, ('datum',)) obs_error = nc.createVariable('obs_error', dtype('double').char, ('datum',)) obs_value = nc.createVariable('obs_value', dtype('double').char, ('datum',)) obs_time.long_name = 'time of observation' obs_time.units = romspy.GMT_days obs_time.calendar = 'gregorian' obs_layer.long_name = 'layer of observation' obs_layer.units = 'nondimensional' obs_depth.long_name = 'depth of observation' obs_depth.units = 'meter' obs_depth.negative = 'downwards' obs_Xgrid.long_name = 'observation fractional x-grid location' obs_Ygrid.long_name = 'observation fractional y-grid location' obs_Zgrid.long_name = 'observation fractional z-grid location' obs_lon.long_name = 'observation longitude' obs_lat.long_name = 'observation latitude' obs_error.long_name = 'observation error covariance' obs_value.long_name = 'observation value' spherical[:] = 1 Nobs[:] = nobs_out survey_time[:] = survey_out obs_variance[:] = 0 obs_provenance[:] = flag obs_time[:] = time_out obs_type[:] = type_out obs_layer[:] = layer_out obs_depth[:] = depth_out obs_Xgrid[:] = xgrid_out obs_Ygrid[:] = ygrid_out obs_Zgrid[:] = 0 obs_station[:] = station_out obs_lon[:] = lon_out obs_lat[:] = lat_out obs_error[:] = error_out obs_value[:] = value_out for name in nc.variables.keys(): print name, nc.variables[name][:] nc.close() print 'Finish!' if __name__ == '__main__': dates = [datetime(2012,1,1,0), datetime(2013,1,1,0)] varids = None stafile = '/home/okada/Dropbox/Data/stations13.csv' flag = 1 errors = None outfile_tmp = '/home/okada/Data/ob500_obs_2012_{}.nc' name = 'mp-3_clean' if name == 'obweb-5': outfile = 'F:/okada/Dropbox/Data/ob500_obs_2012_obweb-5.nc' inpfile = 'Z:/Data/obweb/converted_db_oxygen3.csv' stafile = 'Z:/Data/stations13.csv' elif name == 'mp-1': outfile = outfile_tmp.format(name) inpfile = '/home/okada/Data/mp/converted_mp.csv' elif name == 'mp-1_ts': outfile = outfile_tmp.format(name) inpfile = '/home/okada/Data/mp/converted_mp.csv' varids = [6, 7] elif name == 'mp-1_bio': outfile = outfile_tmp.format(name) inpfile = '/home/okada/Data/mp/converted_mp.csv' varids = [10, 15] elif name == 'mp-2': outfile = outfile_tmp.format(name) inpfile = '/home/okada/Data/mp/converted_mp-2.csv' elif name == 'radar-1': flag = 2 inpfile = 'Z:/Data/radar/converted_radar_20120201_20120301.csv' outfile = 'Z:/Data/ob500_obs_2012_{}.nc'.format(name) varids = [4, 5] stafile = None elif name == 'mp-3': outfile = outfile_tmp.format(name) inpfile = '/home/okada/Data/mp/converted_mp-2.csv' errors = [0.34, 0.21, 7.53, 14.19] # Sakai, 2011 errors[2] = 11.47 # obs by ship, 2015 elif name == 'mp-3_clean': outfile = outfile_tmp.format(name) inpfile = '/home/okada/Data/mp/converted_mp-2_clean.csv' errors = [0.34, 0.21, 7.53, 14.19] # Sakai, 2011 errors[2] = 11.47 # obs by ship, 2015 make_obs_file(outfile, inpfile, stafile, dates, varids=varids, flag=flag, e=errors)
okadate/romspy
romspy/make/make_obs_file.py
Python
mit
9,106
[ "NetCDF" ]
9e88546b34ef7ae755afbbcc5317cfff11bbd56f71156233088a0e162aab5420
from __future__ import print_function, division, absolute_import from george import kernels, GP import numpy as np from kglib import fitters from scipy.integrate import quad from scipy.optimize import minimize class HistFitter(fitters.Bayesian_LS): def __init__(self, mcmc_samples, bin_edges): """ Histogram Inference a la Dan Foreman-Mackey Parameters: =========== - mcmc_samples: numpy array of shape (Nobs, Nsamples) MCMC samples for the thing you want to histogram - bin_edges: numpy.ndarray array The edges of the histogram bins to use. """ self.mcmc_samples = mcmc_samples self.bin_edges = bin_edges self.bin_centers = (self.bin_edges[:-1] + self.bin_edges[1:]) / 2 self.bin_widths = np.diff(self.bin_edges) self.Nbins = self.bin_widths.size self.Nobs = self.mcmc_samples.shape[0] # Find which bin each q falls in self.bin_idx = np.digitize(self.mcmc_samples, self.bin_edges) - 1 # Determine the censoring function for each bin (used in the integral) self.censor_integrals = np.array([quad(func=self.censoring_fcn, a=left, b=right)[0] for (left, right) in zip(self.bin_edges[:-1], self.bin_edges[1:])]) # Set values needed for multinest fitting self.n_params = self.Nbins self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] def lnlike(self, pars): # Pull theta out of pars theta = pars[:self.Nbins] # Generate the inner summation gamma = np.ones_like(self.bin_idx) * np.nan good = (self.bin_idx < self.Nbins) & (self.bin_idx >= 0) # nans in q get put in nonexistent bins gamma[good] = self.Nobs * self.censoring_fcn(self.mcmc_samples[good]) * theta[self.bin_idx[good]] summation = np.nanmean(gamma, axis=1) # Calculate the integral I = self._integral_fcn(theta) # Generate the log-likelihood ll = -I + np.nansum(np.log(summation)) return ll def lnprior(self, pars): """ Override this if you want to set a better prior on the bin heights. """ if all([p > 0 and p < 10 for p in pars]): return 0 return -np.inf def lnprob(self, pars): lp = self.lnprior(pars) return lp + self.lnlike(pars) if np.isfinite(lp) else -np.inf def _integral_fcn(self, theta): return np.sum(theta * self.censor_integrals) * self.Nobs def censoring_fcn(self, value): """ Censoring function. This should return the completeness of your survey to the given value. """ return 1.0 def guess_fit(self): def errfcn(pars): ll = self.lnprob(pars) return -ll initial_guess = np.ones_like(self.bin_centers) bounds = [[1e-3, None] for p in initial_guess] out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def mnest_prior(self, cube, ndim, nparams): # All bins are in the range (0, 10) for i in range(self.Nbins): cube[i] *= 10 return class CensoredHistFitter(HistFitter): """ Inherits from HistFitter, but actually defines the censoring function """ def censoring_fcn(self, val, alpha=40, beta=0.25): # sigmoid censoring function. Change this for the real deal! return 1.0 / (1.0 + np.exp(-alpha * (val - beta))) class SmoothHistFitter(CensoredHistFitter): """ A subclass of HistogramFitter that puts a gaussian process smoothing prior on the bin heights """ def __init__(self, *args, **kwargs): super(SmoothHistFitter, self).__init__(*args, **kwargs) self.smoothing = self.mcmc_samples.shape[0] / self.Nbins self.n_params = self.Nbins + 4 self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] self.param_names.extend(('lna', 'lntau', 'lnerr', 'mean')) def lnprior(self, pars): """ Smoothing prior using gaussian process. We will learn the hyperparameters and marginalize over them. """ theta = pars[:self.Nbins] if np.any(theta < 0): return -np.inf a, tau, err = np.exp(pars[self.Nbins:-1]) mean = pars[-1] kernel = a * kernels.ExpSquaredKernel(tau) gp = GP(kernel, mean=mean) gp.compute(self.bin_centers, yerr=err) return gp.lnlikelihood(theta) / self.smoothing def guess_fit(self): """ This doesn't work too great, but the full MCMC fit looks good. """ def errfcn(pars): ll = self.lnprob(pars) return -ll # Set up initial guesses initial_guess = np.ones(self.bin_centers.size + 4) initial_guess[-4] = 0.0 initial_guess[-3] = -0.25 initial_guess[-2] = -1.0 initial_guess[-1] = -1.0 # Set up bounds bounds = [[1e-3, None] for p in self.bin_centers] bounds.append([-10, 20]) bounds.append([-10, 10]) bounds.append((-1, 5)) bounds.append((-10, 10)) # Minimize out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def _lnlike(self, pars): return self.lnprob(pars) def mnest_prior(self, cube, ndim, nparams): for i in range(self.Nbins): cube[i] *= 10 cube[self.Nbins] = cube[self.Nbins] * 30 - 10 cube[self.Nbins + 1] = cube[self.Nbins + 1] * 20 - 10 cube[self.Nbins + 2] = cube[self.Nbins + 2] * 7 - 2 cube[self.Nbins + 3] = cube[self.Nbins + 3] * 20 - 10 return
kgullikson88/gullikson-scripts
kglib/fitters/histogram.py
Python
mit
5,838
[ "Gaussian" ]
a21cbe6b41efef8700f8be48c915e48abfabc27858fe35f2484dc46e4a89941c
# MolMod is a collection of molecular modelling tools for python. # Copyright (C) 2007 - 2008 Toon Verstraelen <Toon.Verstraelen@UGent.be> # # This file is part of MolMod. # # MolMod is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # MolMod is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/> # # -- from molmod.units import ps, amu, A, atm, deg from molmod.io.common import slice_match import numpy __all__ = ["Error", "HistoryReader", "OutputReader"] class Error(Exception): pass class HistoryReader(object): def __init__(self, filename, sub=slice(None), pos_unit=A, vel_unit=A/ps, frc_unit=amu*A/ps**2, time_unit=ps, mass_unit=amu): self._f = file(filename) self._sub = sub self.pos_unit = pos_unit self.vel_unit = vel_unit self.frc_unit = frc_unit self.time_unit = time_unit self.mass_unit = mass_unit try: self.header = self._f.next()[:-1] integers = tuple(int(word) for word in self._f.next().split()) if len(integers) != 3: raise Error("Second line must contain three integers.") self.keytrj, self.imcon, self.num_atoms = integers except StopIteration: raise Error("File is too short. Could not read header.") except ValueError: raise Error("Second line must contain three integers.") self._counter = 1 self._frame_size = 4 + self.num_atoms*(self.keytrj+2) def __del__(self): self._f.close() def __iter__(self): return self def next(self): # auxiliary read function def read_three(msg): # read three words as floating point numbers line = self._f.next() try: return [float(line[:12]), float(line[12:24]), float(line[24:])] except ValueError: raise Error(msg) # skip frames as requested while not slice_match(self._sub, self._counter): for i in xrange(self._frame_size): self._f.next() self._counter += 1 frame = {} # read the frame header line words = self._f.next().split() if len(words) != 6: raise Error("The first line of each time frame must contain 6 words. (%i'th frame)" % self._counter) if words[0] != "timestep": raise Error("The first word of the first line of each time frame must be 'timestep'. (%i'th frame)" % self._counter) try: step = int(words[1]) frame["step"] = step if int(words[2]) != self.num_atoms: raise Error("The number of atoms has changed. (%i'th frame, %i'th step)" % (self._counter, step)) if int(words[3]) != self.keytrj: raise Error("keytrj has changed. (%i'th frame, %i'th step)" % (self._counter, step)) if int(words[4]) != self.imcon: raise Error("imcon has changed. (%i'th frame, %i'th step)" % (self._counter, step)) frame["timestep"] = float(words[5])*self.time_unit frame["time"] = frame["timestep"]*step # this is ugly, or wait ... dlpoly is a bit ugly. we are not to blame! except ValueError: raise Error("Could not convert all numbers on the first line of the current time frame. (%i'th frame)" % self._counter) # the three cell lines cell = numpy.zeros((3,3), float) frame["cell"] = cell cell_msg = "The cell lines must consist of three floating point values. (%i'th frame, %i'th step)" % (self._counter, step) for i in xrange(3): cell[:,i] = read_three(cell_msg) cell *= self.pos_unit # the atoms symbols = [] frame["symbols"] = symbols masses = numpy.zeros(self.num_atoms, float) frame["masses"] = masses charges = numpy.zeros(self.num_atoms, float) frame["charges"] = charges pos = numpy.zeros((self.num_atoms,3), float) frame["pos"] = pos if self.keytrj > 0: vel = numpy.zeros((self.num_atoms,3), float) frame["vel"] = vel if self.keytrj > 1: frc = numpy.zeros((self.num_atoms,3), float) frame["frc"] = frc for i in xrange(self.num_atoms): # the atom header line words = self._f.next().split() if len(words) != 4: raise Error("The atom header line must contain 4 words. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)) symbols.append(words[0]) try: masses[i] = float(words[2])*self.mass_unit charges[i] = float(words[3]) except ValueError: raise Error("The numbers in the atom header line could not be interpreted.") # the pos line pos_msg = "The position lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1) pos[i] = read_three(pos_msg) if self.keytrj > 0: vel_msg = "The velocity lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1) vel[i] = read_three(vel_msg) if self.keytrj > 1: frc_msg = "The force lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1) frc[i] = read_three(frc_msg) pos *= self.pos_unit # convert to au if self.keytrj > 0: vel *= self.vel_unit # convert to au if self.keytrj > 1: frc *= self.frc_unit # convert to au # done self._counter += 1 return frame class OutputReader(object): _marker = " " + "-"*130 def __init__(self, filename, sub=slice(None), skip_equi_period=True, pos_unit=A, time_unit=ps, angle_unit=deg, e_unit=amu/(A/ps)**2): self._f = file(filename) self._sub = sub self.skip_equi_period = skip_equi_period self._counter = 1 self._conv = [ 1, e_unit, 1, e_unit, e_unit, e_unit, e_unit, e_unit, e_unit, e_unit, time_unit, e_unit, 1, e_unit, e_unit, e_unit, e_unit, e_unit, e_unit, e_unit, 1, pos_unit**3, 1, e_unit, e_unit, angle_unit, angle_unit, angle_unit, e_unit, 1000*atm, ] self.last_step = None # find the line that gives the number of equilibration steps: try: while True: line = self._f.next() if line.startswith(" equilibration period"): self.equi_period = int(line[30:]) break except StopIteration: raise Error("DL_POLY OUTPUT file is too short. Could not find line with the number of equilibration steps.") except ValueError: raise Error("Could not read the number of equilibration steps. (expecting an integer)") def __del__(self): self._f.close() def __iter__(self): return self def next(self): def goto_next_frame(): marked = False while True: line = self._f.next()[:-1] if marked and len(line) > 0 and not line.startswith(" --------"): try: step = int(line[:10]) return step, line except ValueError: pass marked = (len(line) == 131 and line == self._marker) while True: step, line = goto_next_frame() if (not self.skip_equi_period or step >= self.equi_period) and \ step != self.last_step: break # skip frames as requested while not slice_match(self._sub, self._counter): step, line = goto_next_frame() self._counter += 1 # now really read these three lines try: row = [step] for i in xrange(9): row.append(float(line[10+i*12:10+(i+1)*12])) line = self._f.next()[:-1] row.append(float(line[:10])) for i in xrange(9): row.append(float(line[10+i*12:10+(i+1)*12])) line = self._f.next()[:-1] row.append(float(line[:10])) for i in xrange(9): row.append(float(line[10+i*12:10+(i+1)*12])) except ValueError: raise Error("Some numbers in the output file could not be read. (expecting floating point numbers)") # convert all the numbers to atomic units for i in xrange(30): row[i] *= self._conv[i] # done self.last_step = step return row
woutersmet/Molmodsummer
lib/molmod/io/dlpoly.py
Python
gpl-3.0
9,363
[ "DL_POLY" ]
85cbfa274690f56fa53644ef60e69cd996e6d09c706c39e2fcff9bdc4a3ff56e
#!/usr/bin/env python3 # # Script to convert DFTB+ MD output to ASE ext-xyz trajectory # by Patrick Melix # 2018/02/13 # # You can import the module and then call .main() or use it as a script import os from ase import io from ase.io.dftb import read_dftb_lattice def main(): if not os.path.isfile('geo_end.xyz'): raise ValueError('File geo_end.xyz does not exist') if not os.path.isfile('md.out'): raise ValueError('File md.out does not exist') #if output exists mv to .bak outFile = 'traj.xyz' if os.path.isfile(outFile): print('ATTENTION: {:} exists, moving to *.bak'.format(outFile)) os.rename(outFile, outFile+'.bak') mol = io.read('geo_end.xyz', index=slice(0,None)) read_dftb_lattice(images=mol) for frame in mol: frame.wrap() frame.write(outFile,append=True) if __name__ == "__main__": main()
patrickmelix/Python4ChemistryTools
dftb+2traj.py
Python
mit
893
[ "ASE" ]
f2e7d3118e3a5fd65ab8d645b773662ca48c7146e5bcb14586e02b7cfeb82fcf
# $Id$ # # Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """unit testing code for the Smiles file handling stuff """ import unittest,sys,os from rdkit import RDConfig from rdkit import Chem from rdkit.six import next class TestCase(unittest.TestCase): def setUp(self): self.smis = ['CC','CCC','CCCCC','CCCCCC','CCCCCCC','CC','CCCCOC'] def test1LazyReader(self): " tests lazy reads """ supp = Chem.SmilesMolSupplierFromText('\n'.join(self.smis),',',0,-1,0) for i in range(4): m = next(supp) assert m,'read %d failed'%i assert m.GetNumAtoms(),'no atoms in mol %d'%i i = len(supp)-1 m = supp[i] assert m,'read %d failed'%i assert m.GetNumAtoms(),'no atoms in mol %d'%i ms = [x for x in supp] for i in range(len(supp)): m = ms[i] if m: ms[i] = Chem.MolToSmiles(m) l = len(supp) assert l == len(self.smis),'bad supplier length: %d'%(l) i = len(self.smis)-3 m = supp[i-1] assert m,'back index %d failed'%i assert m.GetNumAtoms(),'no atoms in mol %d'%i try: m = supp[len(self.smis)] except: fail = 1 else: fail = 0 assert fail,'out of bound read did not fail' def test2LazyIter(self): " tests lazy reads using the iterator interface " supp = Chem.SmilesMolSupplierFromText('\n'.join(self.smis),',',0,-1,0) nDone = 0 for mol in supp: assert mol,'read %d failed'%i assert mol.GetNumAtoms(),'no atoms in mol %d'%i nDone += 1 assert nDone==len(self.smis),'bad number of molecules' l = len(supp) assert l == len(self.smis),'bad supplier length: %d'%(l) i = len(self.smis)-3 m = supp[i-1] assert m,'back index %d failed'%i assert m.GetNumAtoms(),'no atoms in mol %d'%i try: m = supp[len(self.smis)] except: fail = 1 else: fail = 0 assert fail,'out of bound read did not fail' def test3BoundaryConditions(self): smis = ['CC','CCOC','fail','CCO'] supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0) assert len(supp)==4 assert supp[2] is None assert supp[3] supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0) assert supp[2] is None assert supp[3] assert len(supp)==4 try: supp[4] except: ok=1 else: ok=0 assert ok supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0) assert len(supp)==4 assert supp[3] try: supp[4] except: ok=1 else: ok=0 assert ok supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0) try: supp[4] except: ok=1 else: ok=0 assert ok assert len(supp)==4 assert supp[3] if __name__ == '__main__': unittest.main()
soerendip42/rdkit
rdkit/Chem/Suppliers/UnitTestSmilesMolSupplier.py
Python
bsd-3-clause
3,046
[ "RDKit" ]
4067bb3f0008c794ae24d2c1213e0e15a75f947aa2beec8aab48ae4e63947be8
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs Mozilla's Kraken JavaScript benchmark.""" import os from metrics import power from telemetry import benchmark from telemetry.page import page_set from telemetry.page import page_test from telemetry.value import list_of_scalar_values from telemetry.value import scalar DESCRIPTIONS = { 'ai-astar': 'This benchmark uses the [A* search algorithm]' '(http://en.wikipedia.org/wiki/A*_search_algorithm) to automatically ' 'plot an efficient path between two points, in the presence of ' 'obstacles. Adapted from code by [Brian Gringstead]' '(http://www.briangrinstead.com/blog/astar-search-algorithm-in-' 'javascript).', 'audio-beat-detection': 'This benchmark performs [beat detection]' '(http://en.wikipedia.org/wiki/Beat_detection) on an Audio sample ' 'using [code](http://beatdetektor.svn.sourceforge.net/viewvc' '/beatdetektor/trunk/core/js/beatdetektor.js?revision=18&view=markup) ' 'from [BeatDetektor](http://www.cubicproductions.com/index.php' '?option=com_content&view=article&id=67&Itemid=82) and ' '[DSP.js](http://github.com/corbanbrook/dsp.js/).', 'audio-dft': 'This benchmark performs a [Discrete Fourier Transform]' '(http://en.wikipedia.org/wiki/Discrete_Fourier_transform) on an ' 'Audio sample using code from [DSP.js]' '(http://github.com/corbanbrook/dsp.js).', 'audio-fft': 'This benchmark performs a [Fast Fourier Transform]' '(http://en.wikipedia.org/wiki/Fast_Fourier_transform) on an Audio ' 'sample using code from [DSP.js]' '(http://github.com/corbanbrook/dsp.js/).', 'audio-oscillator': 'This benchmark generates a soundwave using code from [DSP.js]' '(http://github.com/corbanbrook/dsp.js/).', 'imaging-darkroom': 'This benchmark performs a variety of photo manipulations such as ' 'Fill, Brightness, Contrast, Saturation, and Temperature.', 'imaging-desaturate': 'This benchmark [desaturates]' '(http://en.wikipedia.org/wiki/Colorfulness) a photo using code from ' '[Pixastic](http://www.pixastic.com/).', 'imaging-gaussian-blur': 'This benchmark performs a [Gaussian blur]' '(http://en.wikipedia.org/wiki/Gaussian_blur) on a photo.', 'json-parse-financial': 'This benchmark parses [JSON](http://www.json.org) records.', 'json-stringify-tinderbox': 'This benchmark serializes [Tinderbox]' '(http://tests.themasta.com/tinderboxpushlog/?tree=Firefox) build ' 'data to [JSON](http://www.json.org).', } def _Mean(l): return float(sum(l)) / len(l) if len(l) > 0 else 0.0 class _KrakenMeasurement(page_test.PageTest): def __init__(self): super(_KrakenMeasurement, self).__init__() self._power_metric = None def CustomizeBrowserOptions(self, options): power.PowerMetric.CustomizeBrowserOptions(options) def WillStartBrowser(self, browser): self._power_metric = power.PowerMetric(browser) def DidNavigateToPage(self, page, tab): self._power_metric.Start(page, tab) def ValidateAndMeasurePage(self, page, tab, results): tab.WaitForJavaScriptExpression( 'document.title.indexOf("Results") != -1', 700) tab.WaitForDocumentReadyStateToBeComplete() self._power_metric.Stop(page, tab) self._power_metric.AddResults(tab, results) js_get_results = """ var formElement = document.getElementsByTagName("input")[0]; decodeURIComponent(formElement.value.split("?")[1]); """ result_dict = eval(tab.EvaluateJavaScript(js_get_results)) total = 0 for key in result_dict: if key == 'v': continue results.AddValue(list_of_scalar_values.ListOfScalarValues( results.current_page, key, 'ms', result_dict[key], important=False, description=DESCRIPTIONS.get(key))) total += _Mean(result_dict[key]) # TODO(tonyg/nednguyen): This measurement shouldn't calculate Total. The # results system should do that for us. results.AddValue(scalar.ScalarValue( results.current_page, 'Total', 'ms', total, description='Total of the means of the results for each type ' 'of benchmark in [Mozilla\'s Kraken JavaScript benchmark]' '(http://krakenbenchmark.mozilla.org/)')) class Kraken(benchmark.Benchmark): """Mozilla's Kraken JavaScript benchmark.""" test = _KrakenMeasurement def CreatePageSet(self, options): ps = page_set.PageSet( archive_data_file='../page_sets/data/kraken.json', file_path=os.path.abspath(__file__)) ps.AddPageWithDefaultRunNavigate( 'http://krakenbenchmark.mozilla.org/kraken-1.1/driver.html') return ps
sencha/chromium-spacewalk
tools/perf/benchmarks/kraken.py
Python
bsd-3-clause
4,952
[ "Brian", "Gaussian" ]
08a53af7a7a6ff10f011384a04b8b2840d3ccd59a5335f18e30d3445500edffb
#!/usr/bin/env python CopyRight = ''' /************************************************************************** * * Copyright 2010 VMware, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /** * @file * SRGB translation. * * @author Brian Paul <brianp@vmware.com> * @author Michal Krol <michal@vmware.com> * @author Jose Fonseca <jfonseca@vmware.com> */ ''' import math import struct def srgb_to_linear(x): if x <= 0.04045: return x / 12.92 else: return math.pow((x + 0.055) / 1.055, 2.4) def linear_to_srgb(x): if x >= 0.0031308: return 1.055 * math.pow(x, 0.41666666) - 0.055 else: return 12.92 * x def generate_srgb_tables(): print 'const float' print 'util_format_srgb_8unorm_to_linear_float_table[256] = {' for j in range(0, 256, 4): print ' ', for i in range(j, j + 4): print '%.7e,' % (srgb_to_linear(i / 255.0),), print print '};' print print 'const uint8_t' print 'util_format_srgb_to_linear_8unorm_table[256] = {' for j in range(0, 256, 16): print ' ', for i in range(j, j + 16): print '%3u,' % (int(srgb_to_linear(i / 255.0) * 255.0 + 0.5),), print print '};' print print 'const uint8_t' print 'util_format_linear_to_srgb_8unorm_table[256] = {' for j in range(0, 256, 16): print ' ', for i in range(j, j + 16): print '%3u,' % (int(linear_to_srgb(i / 255.0) * 255.0 + 0.5),), print print '};' print # calculate the table interpolation values used in float linear to unorm8 srgb numexp = 13 mantissa_msb = 3 # stepshift is just used to only use every x-th float to make things faster, # 5 is largest value which still gives exact same table as 0 stepshift = 5 nbuckets = numexp << mantissa_msb bucketsize = (1 << (23 - mantissa_msb)) >> stepshift mantshift = 12 valtable = [] sum_aa = float(bucketsize) sum_ab = 0.0 sum_bb = 0.0 for i in range(0, bucketsize): j = (i << stepshift) >> mantshift sum_ab += j sum_bb += j*j inv_det = 1.0 / (sum_aa * sum_bb - sum_ab * sum_ab) for bucket in range(0, nbuckets): start = ((127 - numexp) << 23) + bucket*(bucketsize << stepshift) sum_a = 0.0 sum_b = 0.0 for i in range(0, bucketsize): j = (i << stepshift) >> mantshift fint = start + (i << stepshift) ffloat = struct.unpack('f', struct.pack('I', fint))[0] val = linear_to_srgb(ffloat) * 255.0 + 0.5 sum_a += val sum_b += j*val solved_a = inv_det * (sum_bb*sum_a - sum_ab*sum_b) solved_b = inv_det * (sum_aa*sum_b - sum_ab*sum_a) scaled_a = solved_a * 65536.0 / 512.0 scaled_b = solved_b * 65536.0 int_a = int(scaled_a + 0.5) int_b = int(scaled_b + 0.5) valtable.append((int_a << 16) + int_b) print 'const unsigned' print 'util_format_linear_to_srgb_helper_table[104] = {' for j in range(0, nbuckets, 4): print ' ', for i in range(j, j + 4): print '0x%08x,' % (valtable[i],), print print '};' print def main(): print '/* This file is autogenerated by u_format_srgb.py. Do not edit directly. */' print # This will print the copyright message on the top of this file print CopyRight.strip() print print '#include "format_srgb.h"' print generate_srgb_tables() if __name__ == '__main__': main()
execunix/vinos
xsrc/external/mit/MesaLib/dist/src/util/format_srgb.py
Python
apache-2.0
4,744
[ "Brian" ]
0bb51dfa1d126b930ff6f9a8683cd82c0bbd14e9b47baeb21d1789f5a469d35f
#%% from SimPEG import np, Mesh import SimPEG as simpeg import time as tm import vtk, vtk.util.numpy_support as npsup import re, sys, os def read_GOCAD_ts(tsfile): """Read GOCAD triangulated surface (*.ts) file INPUT: tsfile: Triangulated surface OUTPUT: vrts : Array of vertices in XYZ coordinates [n x 3] trgl : Array of index for triangles [m x 3]. The order of the vertices is important and describes the normal n = cross( (P2 - P1 ) , (P3 - P1) ) Created on Jan 13th, 2016 Author: @fourndo """ fid = open(tsfile,'r') line = fid.readline() # Skip all the lines until the vertices while re.match('TFACE',line)==None: line = fid.readline() line = fid.readline() vrtx = [] # Run down all the vertices and save in array while re.match('VRTX',line): l_input = re.split('[\s*]',line) temp = np.array(l_input[2:5]) vrtx.append(temp.astype(np.float)) # Read next line line = fid.readline() vrtx = np.asarray(vrtx) # Skip lines to the triangles while re.match('TRGL',line)==None: line = fid.readline() # Run down the list of triangles trgl = [] # Run down all the vertices and save in array while re.match('TRGL',line): l_input = re.split('[\s*]',line) temp = np.array(l_input[1:4]) trgl.append(temp.astype(np.int)) # Read next line line = fid.readline() trgl = np.asarray(trgl) return vrtx, trgl def gocad2vtp(gcFile): """" Function to read gocad polystructure file and makes VTK Polydata object (vtp). Input: gcFile: gocadFile with polysturcture """ print "Reading GOCAD ts file..." vrtx, trgl = read_GOCAD_ts(gcFile) # Adjust the index trgl = trgl - 1 # Make vtk pts ptsvtk = vtk.vtkPoints() ptsvtk.SetData(npsup.numpy_to_vtk(vrtx,deep=1)) # Make the polygon connection polys = vtk.vtkCellArray() for face in trgl: poly = vtk.vtkPolygon() poly.GetPointIds().SetNumberOfIds(len(face)) for nrv, vert in enumerate(face): poly.GetPointIds().SetId(nrv,vert) polys.InsertNextCell(poly) # Make the polydata, structure of connections and vrtx polyData = vtk.vtkPolyData() polyData.SetPoints(ptsvtk) polyData.SetPolys(polys) return polyData def gocad2simpegMeshIndex(gcFile,mesh,extractBoundaryCells=True,extractInside=True): """" Function to read gocad polystructure file and output indexes of mesh with in the structure. """ # Make the polydata polyData = gocad2vtp(gcFile) # Make implicit func ImpDistFunc = vtk.vtkImplicitPolyDataDistance() ImpDistFunc.SetInput(polyData) # Convert the mesh vtkMesh = vtk.vtkRectilinearGrid() vtkMesh.SetDimensions(mesh.nNx,mesh.nNy,mesh.nNz) vtkMesh.SetXCoordinates(npsup.numpy_to_vtk(mesh.vectorNx,deep=1)) vtkMesh.SetYCoordinates(npsup.numpy_to_vtk(mesh.vectorNy,deep=1)) vtkMesh.SetZCoordinates(npsup.numpy_to_vtk(mesh.vectorNz,deep=1)) # Add indexes cell data to the object vtkInd = npsup.numpy_to_vtk(np.arange(mesh.nC),deep=1) vtkInd.SetName('Index') vtkMesh.GetCellData().AddArray(vtkInd) # Define the extractGeometry extractImpDistRectGridFilt = vtk.vtkExtractGeometry() # Object constructor extractImpDistRectGridFilt.SetImplicitFunction(ImpDistFunc) # extractImpDistRectGridFilt.SetInputData(vtkMesh) # Set extraction type if extractBoundaryCells is True: extractImpDistRectGridFilt.ExtractBoundaryCellsOn() else: extractImpDistRectGridFilt.ExtractBoundaryCellsOff() if extractInside is True: extractImpDistRectGridFilt.ExtractInsideOn() else: extractImpDistRectGridFilt.ExtractInsideOff() print "Extracting indices from grid..." # Executing the pipe extractImpDistRectGridFilt.Update() # Get index inside insideGrid = extractImpDistRectGridFilt.GetOutput() insideGrid = npsup.vtk_to_numpy(insideGrid.GetCellData().GetArray('Index')) # Return the indexes inside return insideGrid def makeVTPFiles(): # Read in topo surface geosurf = ['CDED_Lake_Coarse.ts','Till.ts','XVK.ts','PK1.ts','PK1_extended.ts','PK2.ts','PK3.ts','HK1.ts','VK.ts'] # The make the polydata polyDict = {} for fileName in geosurf: tin = tm.time() name = fileName.split('.')[0] print "Computing indices with VTK: " + fileName polyDict[name] = gocad2vtp(fileName) print "VTK operation completed in " + str(tm.time() - tin) + " sec" # Write the files sys.path.append('/home/gudni/gitCodes/python/telluricpy') import telluricpy for k,vtpObj in polyDict.iteritems(): telluricpy.vtkTools.io.writeVTPFile(k+'.vtp',vtpObj) def makeModelFile(): """ Loads in a triangulated surface from Gocad (*.ts) and use VTK to transfer onto a 3D mesh. New scripts to be added to basecode """ #%% work_dir = '' mshfile = 'MEsh_TEst.msh' # Load mesh file mesh = Mesh.TensorMesh.readUBC(work_dir+mshfile) # Load in observation file #[B,M,dobs] = PF.BaseMag.readUBCmagObs(obsfile) # Read in topo surface topsurf = work_dir+'CDED_Lake_Coarse.ts' geosurf = [[work_dir+'Till.ts',True,True], [work_dir+'XVK.ts',True,True], [work_dir+'PK1.ts',True,True], [work_dir+'PK2.ts',True,True], [work_dir+'PK3.ts',True,True], [work_dir+'HK1.ts',True,True], [work_dir+'VK.ts',True,True] ] # Background density bkgr = 1e-4 airc = 1e-8 # Units vals = np.asarray([1e-2,3e-2,5e-2,2e-2,2e-2,1e-3,5e-3]) #%% Script starts here # # Create a grid of observations and offset the z from topo model= np.ones(mesh.nC) * bkgr # Load GOCAD surf #[vrtx, trgl] = PF.BaseMag.read_GOCAD_ts(tsfile) # Find active cells from surface for ii in range(len(geosurf)): tin = tm.time() print "Computing indices with VTK: " + geosurf[ii][0] indx = gocad2simpegMeshIndex(geosurf[ii][0],mesh) print "VTK operation completed in " + str(tm.time() - tin) + " sec" model[indx] = vals[ii] indx = gocad2simpegMeshIndex(topsurf,mesh) actv = np.zeros(mesh.nC) actv[indx] = 1 model[actv==0] = airc Mesh.TensorMesh.writeModelUBC(mesh,'VTKout.dat',model)
simpeg/presentations
SciPy2016/Geological_model/GKR_gocadUtils.py
Python
mit
6,519
[ "VTK" ]
2d6b967432d0f3c6536b432cf3724072eeae72c86dcfead69fe40974c15b6d4c
# coding: utf-8 from sqlalchemy import ( BINARY, DECIMAL, TIMESTAMP, Column, Computed, Date, DateTime, Enum, Float, ForeignKey, Index, LargeBinary, String, Table, Text, Time, text, ) from sqlalchemy.dialects.mysql import ( BIGINT, INTEGER, LONGBLOB, LONGTEXT, MEDIUMINT, MEDIUMTEXT, SMALLINT, TINYINT, TINYTEXT, VARCHAR, ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() metadata = Base.metadata class Additive(Base): __tablename__ = "Additive" additiveId = Column(INTEGER(11), primary_key=True) name = Column(String(45)) additiveType = Column(String(45)) comments = Column(String(512)) class AdminActivity(Base): __tablename__ = "AdminActivity" adminActivityId = Column(INTEGER(11), primary_key=True) username = Column( String(45), nullable=False, unique=True, server_default=text("''") ) action = Column(String(45), index=True) comments = Column(String(100)) dateTime = Column(DateTime) class AdminVar(Base): __tablename__ = "AdminVar" __table_args__ = {"comment": "ISPyB administration values"} varId = Column(INTEGER(11), primary_key=True) name = Column(String(32), index=True) value = Column(String(1024), index=True) class Aperture(Base): __tablename__ = "Aperture" apertureId = Column(INTEGER(10), primary_key=True) sizeX = Column(Float) class AutoProc(Base): __tablename__ = "AutoProc" __table_args__ = ( Index( "AutoProc_refined_unit_cell", "refinedCell_a", "refinedCell_b", "refinedCell_c", "refinedCell_alpha", "refinedCell_beta", "refinedCell_gamma", "spaceGroup", ), ) autoProcId = Column( INTEGER(10), primary_key=True, comment="Primary key (auto-incremented)" ) autoProcProgramId = Column(INTEGER(10), index=True, comment="Related program item") spaceGroup = Column(String(45), comment="Space group") refinedCell_a = Column(Float, comment="Refined cell") refinedCell_b = Column(Float, comment="Refined cell") refinedCell_c = Column(Float, comment="Refined cell") refinedCell_alpha = Column(Float, comment="Refined cell") refinedCell_beta = Column(Float, comment="Refined cell") refinedCell_gamma = Column(Float, comment="Refined cell") recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") class BFAutomationError(Base): __tablename__ = "BF_automationError" automationErrorId = Column(INTEGER(10), primary_key=True) errorType = Column(String(40), nullable=False) solution = Column(Text) class BFSystem(Base): __tablename__ = "BF_system" systemId = Column(INTEGER(10), primary_key=True) name = Column(String(100)) description = Column(String(200)) class BLSampleImageAutoScoreSchema(Base): __tablename__ = "BLSampleImageAutoScoreSchema" __table_args__ = {"comment": "Scoring schema name and whether it is enabled"} blSampleImageAutoScoreSchemaId = Column(TINYINT(3), primary_key=True) schemaName = Column( String(25), nullable=False, comment="Name of the schema e.g. Hampton, MARCO" ) enabled = Column( TINYINT(1), server_default=text("1"), comment="Whether this schema is enabled (could be configurable in the UI)", ) class BLSampleImageScore(Base): __tablename__ = "BLSampleImageScore" blSampleImageScoreId = Column(INTEGER(11), primary_key=True) name = Column(String(45)) score = Column(Float) colour = Column(String(15)) class BLSampleType(Base): __tablename__ = "BLSampleType" blSampleTypeId = Column(INTEGER(10), primary_key=True) name = Column(String(100)) proposalType = Column(String(10)) active = Column( TINYINT(1), server_default=text("1"), comment="1=active, 0=inactive" ) class BeamCalendar(Base): __tablename__ = "BeamCalendar" beamCalendarId = Column(INTEGER(10), primary_key=True) run = Column(String(7), nullable=False) beamStatus = Column(String(24), nullable=False) startDate = Column(DateTime, nullable=False) endDate = Column(DateTime, nullable=False) class BeamlineStats(Base): __tablename__ = "BeamlineStats" beamlineStatsId = Column(INTEGER(11), primary_key=True) beamline = Column(String(10)) recordTimeStamp = Column(DateTime) ringCurrent = Column(Float) energy = Column(Float) gony = Column(Float) beamW = Column(Float) beamH = Column(Float) flux = Column(Float(asdecimal=True)) scanFileW = Column(String(255)) scanFileH = Column(String(255)) class CalendarHash(Base): __tablename__ = "CalendarHash" __table_args__ = { "comment": "Lets people get to their calendars without logging in using a private (hash) url" } calendarHashId = Column(INTEGER(10), primary_key=True) ckey = Column(String(50)) hash = Column(String(128)) beamline = Column(TINYINT(1)) class ComponentSubType(Base): __tablename__ = "ComponentSubType" componentSubTypeId = Column(INTEGER(11), primary_key=True) name = Column(String(31), nullable=False) hasPh = Column(TINYINT(1), server_default=text("0")) proposalType = Column(String(10)) active = Column( TINYINT(1), server_default=text("1"), comment="1=active, 0=inactive" ) class ComponentType(Base): __tablename__ = "ComponentType" componentTypeId = Column(INTEGER(11), primary_key=True) name = Column(String(31), nullable=False) class ConcentrationType(Base): __tablename__ = "ConcentrationType" concentrationTypeId = Column(INTEGER(11), primary_key=True) name = Column(String(31), nullable=False) symbol = Column(String(8), nullable=False) proposalType = Column(String(10)) active = Column( TINYINT(1), server_default=text("1"), comment="1=active, 0=inactive" ) class ContainerRegistry(Base): __tablename__ = "ContainerRegistry" containerRegistryId = Column(INTEGER(11), primary_key=True) barcode = Column(String(20)) comments = Column(String(255)) recordTimestamp = Column(DateTime, server_default=text("current_timestamp()")) class ContainerType(Base): __tablename__ = "ContainerType" __table_args__ = {"comment": "A lookup table for different types of containers"} containerTypeId = Column(INTEGER(10), primary_key=True) name = Column(String(100)) proposalType = Column(String(10)) active = Column( TINYINT(1), server_default=text("1"), comment="1=active, 0=inactive" ) capacity = Column(INTEGER(11)) wellPerRow = Column(SMALLINT(6)) dropPerWellX = Column(SMALLINT(6)) dropPerWellY = Column(SMALLINT(6)) dropHeight = Column(Float) dropWidth = Column(Float) dropOffsetX = Column(Float) dropOffsetY = Column(Float) wellDrop = Column(SMALLINT(6)) class CryoemInitialModel(Base): __tablename__ = "CryoemInitialModel" __table_args__ = {"comment": "Initial cryo-EM model generation results"} cryoemInitialModelId = Column(INTEGER(10), primary_key=True) resolution = Column(Float, comment="Unit: Angstroms") numberOfParticles = Column(INTEGER(10)) ParticleClassification = relationship( "ParticleClassification", secondary="ParticleClassification_has_CryoemInitialModel", ) class DataAcquisition(Base): __tablename__ = "DataAcquisition" dataAcquisitionId = Column(INTEGER(10), primary_key=True) sampleCellId = Column(INTEGER(10), nullable=False) framesCount = Column(String(45)) energy = Column(String(45)) waitTime = Column(String(45)) detectorDistance = Column(String(45)) class DataReductionStatus(Base): __tablename__ = "DataReductionStatus" dataReductionStatusId = Column(INTEGER(11), primary_key=True) dataCollectionId = Column(INTEGER(11), nullable=False) status = Column(String(15)) filename = Column(String(255)) message = Column(String(255)) class Detector(Base): __tablename__ = "Detector" __table_args__ = ( Index( "Detector_FKIndex1", "detectorType", "detectorManufacturer", "detectorModel", "detectorPixelSizeHorizontal", "detectorPixelSizeVertical", ), {"comment": "Detector table is linked to a dataCollection"}, ) detectorId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) detectorType = Column(String(255)) detectorManufacturer = Column(String(255)) detectorModel = Column(String(255)) detectorPixelSizeHorizontal = Column(Float) detectorPixelSizeVertical = Column(Float) DETECTORMAXRESOLUTION = Column(Float) DETECTORMINRESOLUTION = Column(Float) detectorSerialNumber = Column(String(30), unique=True) detectorDistanceMin = Column(Float(asdecimal=True)) detectorDistanceMax = Column(Float(asdecimal=True)) trustedPixelValueRangeLower = Column(Float(asdecimal=True)) trustedPixelValueRangeUpper = Column(Float(asdecimal=True)) sensorThickness = Column(Float) overload = Column(Float) XGeoCorr = Column(String(255)) YGeoCorr = Column(String(255)) detectorMode = Column(String(255)) density = Column(Float) composition = Column(String(16)) numberOfPixelsX = Column(MEDIUMINT(9), comment="Detector number of pixels in x") numberOfPixelsY = Column(MEDIUMINT(9), comment="Detector number of pixels in y") detectorRollMin = Column(Float(asdecimal=True), comment="unit: degrees") detectorRollMax = Column(Float(asdecimal=True), comment="unit: degrees") localName = Column(String(40), comment="Colloquial name for the detector") class DewarLocation(Base): __tablename__ = "DewarLocation" __table_args__ = {"comment": "ISPyB Dewar location table"} eventId = Column(INTEGER(10), primary_key=True) dewarNumber = Column(String(128), nullable=False, comment="Dewar number") userId = Column(String(128), comment="User who locates the dewar") dateTime = Column(DateTime, comment="Date and time of locatization") locationName = Column(String(128), comment="Location of the dewar") courierName = Column( String(128), comment="Carrier name who's shipping back the dewar" ) courierTrackingNumber = Column( String(128), comment="Tracking number of the shippment" ) class DewarLocationList(Base): __tablename__ = "DewarLocationList" __table_args__ = {"comment": "List of locations for dewars"} locationId = Column(INTEGER(10), primary_key=True) locationName = Column( String(128), nullable=False, server_default=text("''"), comment="Location" ) class EMMicroscope(Base): __tablename__ = "EMMicroscope" emMicroscopeId = Column(INTEGER(11), primary_key=True) instrumentName = Column(String(100), nullable=False) voltage = Column(Float) CS = Column(Float) detectorPixelSize = Column(Float) C2aperture = Column(Float) ObjAperture = Column(Float) C2lens = Column(Float) class Experiment(Base): __tablename__ = "Experiment" experimentId = Column(INTEGER(11), primary_key=True) proposalId = Column(INTEGER(10), nullable=False) name = Column(String(255)) creationDate = Column(DateTime) comments = Column(String(512)) experimentType = Column(String(128)) sourceFilePath = Column(String(256)) dataAcquisitionFilePath = Column( String(256), comment="The file path pointing to the data acquisition. Eventually it may be a compressed file with all the files or just the folder", ) status = Column(String(45)) sessionId = Column(INTEGER(10)) class ExperimentType(Base): __tablename__ = "ExperimentType" __table_args__ = {"comment": "A lookup table for different types of experients"} experimentTypeId = Column(INTEGER(10), primary_key=True) name = Column(String(100)) proposalType = Column(String(10)) active = Column( TINYINT(1), server_default=text("1"), comment="1=active, 0=inactive" ) class Frame(Base): __tablename__ = "Frame" frameId = Column(INTEGER(10), primary_key=True) FRAMESETID = Column(INTEGER(11)) filePath = Column(String(255)) comments = Column(String(45)) class FrameList(Base): __tablename__ = "FrameList" frameListId = Column(INTEGER(10), primary_key=True) comments = Column(INTEGER(10)) class GeometryClassname(Base): __tablename__ = "GeometryClassname" geometryClassnameId = Column(INTEGER(11), primary_key=True) geometryClassname = Column(String(45)) geometryOrder = Column(INTEGER(2), nullable=False) class ImageQualityIndicators(Base): __tablename__ = "ImageQualityIndicators" dataCollectionId = Column(INTEGER(11), primary_key=True, nullable=False) imageNumber = Column(MEDIUMINT(8), primary_key=True, nullable=False) imageId = Column(INTEGER(12)) autoProcProgramId = Column( INTEGER(10), comment="Foreign key to the AutoProcProgram table" ) spotTotal = Column(INTEGER(10), comment="Total number of spots") inResTotal = Column( INTEGER(10), comment="Total number of spots in resolution range" ) goodBraggCandidates = Column( INTEGER(10), comment="Total number of Bragg diffraction spots" ) iceRings = Column(INTEGER(10), comment="Number of ice rings identified") method1Res = Column(Float, comment="Resolution estimate 1 (see publication)") method2Res = Column(Float, comment="Resolution estimate 2 (see publication)") maxUnitCell = Column( Float, comment="Estimation of the largest possible unit cell edge" ) pctSaturationTop50Peaks = Column( Float, comment="The fraction of the dynamic range being used" ) inResolutionOvrlSpots = Column(INTEGER(10), comment="Number of spots overloaded") binPopCutOffMethod2Res = Column( Float, comment="Cut off used in resolution limit calculation" ) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") totalIntegratedSignal = Column(Float(asdecimal=True)) dozor_score = Column(Float(asdecimal=True), comment="dozor_score") driftFactor = Column(Float, comment="EM movie drift factor") class Imager(Base): __tablename__ = "Imager" imagerId = Column(INTEGER(11), primary_key=True) name = Column(String(45), nullable=False) temperature = Column(Float) serial = Column(String(45)) capacity = Column(SMALLINT(6)) class InspectionType(Base): __tablename__ = "InspectionType" inspectionTypeId = Column(INTEGER(11), primary_key=True) name = Column(String(45)) class InstructionSet(Base): __tablename__ = "InstructionSet" instructionSetId = Column(INTEGER(10), primary_key=True) type = Column(String(50)) class IspybCrystalClass(Base): __tablename__ = "IspybCrystalClass" __table_args__ = {"comment": "ISPyB crystal class values"} crystalClassId = Column(INTEGER(11), primary_key=True) crystalClass_code = Column(String(20), nullable=False) crystalClass_name = Column(String(255), nullable=False) class IspybReference(Base): __tablename__ = "IspybReference" referenceId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) referenceName = Column(String(255), comment="reference name") referenceUrl = Column(String(1024), comment="url of the reference") referenceBibtext = Column(LargeBinary, comment="bibtext value of the reference") beamline = Column( Enum("All", "ID14-4", "ID23-1", "ID23-2", "ID29", "XRF", "AllXRF", "Mesh"), comment="beamline involved", ) class Laboratory(Base): __tablename__ = "Laboratory" laboratoryId = Column(INTEGER(10), primary_key=True) laboratoryUUID = Column(String(45)) name = Column(String(45)) address = Column(String(255)) city = Column(String(45)) country = Column(String(45)) url = Column(String(255)) organization = Column(String(45)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) laboratoryPk = Column(INTEGER(10)) postcode = Column(String(15)) class Log4Stat(Base): __tablename__ = "Log4Stat" id = Column(INTEGER(11), primary_key=True) priority = Column(String(15)) LOG4JTIMESTAMP = Column(DateTime) msg = Column(String(255)) detail = Column(String(255)) value = Column(String(255)) timestamp = Column(DateTime) class MeasurementUnit(Base): __tablename__ = "MeasurementUnit" measurementUnitId = Column(INTEGER(10), primary_key=True) name = Column(String(45)) unitType = Column(String(45)) class Model(Base): __tablename__ = "Model" modelId = Column(INTEGER(10), primary_key=True) name = Column(String(45)) pdbFile = Column(String(255)) fitFile = Column(String(255)) firFile = Column(String(255)) logFile = Column(String(255)) rFactor = Column(String(45)) chiSqrt = Column(String(45)) volume = Column(String(45)) rg = Column(String(45)) dMax = Column(String(45)) class ModelList(Base): __tablename__ = "ModelList" modelListId = Column(INTEGER(10), primary_key=True) nsdFilePath = Column(String(255)) chi2RgFilePath = Column(String(255)) class MotorPosition(Base): __tablename__ = "MotorPosition" motorPositionId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phiX = Column(Float(asdecimal=True)) phiY = Column(Float(asdecimal=True)) phiZ = Column(Float(asdecimal=True)) sampX = Column(Float(asdecimal=True)) sampY = Column(Float(asdecimal=True)) omega = Column(Float(asdecimal=True)) kappa = Column(Float(asdecimal=True)) phi = Column(Float(asdecimal=True)) chi = Column(Float(asdecimal=True)) gridIndexY = Column(INTEGER(11)) gridIndexZ = Column(INTEGER(11)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) class PDB(Base): __tablename__ = "PDB" pdbId = Column(INTEGER(11), primary_key=True) name = Column(String(255)) contents = Column(MEDIUMTEXT) code = Column(String(4)) source = Column(String(30), comment="Could be e.g. AlphaFold or RoseTTAFold") class PHPSession(Base): __tablename__ = "PHPSession" id = Column(String(50), primary_key=True) accessDate = Column(DateTime) data = Column(String(4000)) class Permission(Base): __tablename__ = "Permission" permissionId = Column(INTEGER(11), primary_key=True) type = Column(String(15), nullable=False) description = Column(String(100)) UserGroup = relationship("UserGroup", secondary="UserGroup_has_Permission") class PhasingAnalysis(Base): __tablename__ = "PhasingAnalysis" phasingAnalysisId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") class PhasingProgramRun(Base): __tablename__ = "PhasingProgramRun" phasingProgramRunId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phasingCommandLine = Column(String(255), comment="Command line for phasing") phasingPrograms = Column(String(255), comment="Phasing programs (comma separated)") phasingStatus = Column(TINYINT(1), comment="success (1) / fail (0)") phasingMessage = Column(String(255), comment="warning, error,...") phasingStartTime = Column(DateTime, comment="Processing start time") phasingEndTime = Column(DateTime, comment="Processing end time") phasingEnvironment = Column(String(255), comment="Cpus, Nodes,...") recordTimeStamp = Column(DateTime, server_default=text("current_timestamp()")) class PlateGroup(Base): __tablename__ = "PlateGroup" plateGroupId = Column(INTEGER(10), primary_key=True) name = Column(String(255)) storageTemperature = Column(String(45)) class PlateType(Base): __tablename__ = "PlateType" PlateTypeId = Column(INTEGER(10), primary_key=True) name = Column(String(45)) description = Column(String(45)) shape = Column(String(45)) rowCount = Column(INTEGER(11)) columnCount = Column(INTEGER(11)) experimentId = Column(INTEGER(10), index=True) class Position(Base): __tablename__ = "Position" positionId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) relativePositionId = Column( ForeignKey("Position.positionId", ondelete="CASCADE", onupdate="CASCADE"), index=True, comment="relative position, null otherwise", ) posX = Column(Float(asdecimal=True)) posY = Column(Float(asdecimal=True)) posZ = Column(Float(asdecimal=True)) scale = Column(Float(asdecimal=True)) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") X = Column(Float(asdecimal=True), Computed("(`posX`)", persisted=False)) Y = Column(Float(asdecimal=True), Computed("(`posY`)", persisted=False)) Z = Column(Float(asdecimal=True), Computed("(`posZ`)", persisted=False)) parent = relationship("Position", remote_side=[positionId]) class Positioner(Base): __tablename__ = "Positioner" __table_args__ = { "comment": "An arbitrary positioner and its value, could be e.g. a motor. Allows for instance to store some positions with a sample or subsample" } positionerId = Column(INTEGER(10), primary_key=True) positioner = Column(String(50), nullable=False) value = Column(Float, nullable=False) class ProcessingPipelineCategory(Base): __tablename__ = "ProcessingPipelineCategory" __table_args__ = { "comment": "A lookup table for the category of processing pipeline" } processingPipelineCategoryId = Column(INTEGER(11), primary_key=True) name = Column(String(20), nullable=False) class PurificationColumn(Base): __tablename__ = "PurificationColumn" __table_args__ = { "comment": "Size exclusion chromotography (SEC) lookup table for BioSAXS" } purificationColumnId = Column(INTEGER(10), primary_key=True) name = Column(String(100)) active = Column( TINYINT(1), server_default=text("1"), comment="1=active, 0=inactive" ) class Run(Base): __tablename__ = "Run" runId = Column(INTEGER(10), primary_key=True) timePerFrame = Column(String(45)) timeStart = Column(String(45)) timeEnd = Column(String(45)) storageTemperature = Column(String(45)) exposureTemperature = Column(String(45)) spectrophotometer = Column(String(45)) energy = Column(String(45)) creationDate = Column(DateTime) frameAverage = Column(String(45)) frameCount = Column(String(45)) transmission = Column(String(45)) beamCenterX = Column(String(45)) beamCenterY = Column(String(45)) pixelSizeX = Column(String(45)) pixelSizeY = Column(String(45)) radiationRelative = Column(String(45)) radiationAbsolute = Column(String(45)) normalization = Column(String(45)) t_SAFETYREQUEST = Table( "SAFETYREQUEST", metadata, Column("SAFETYREQUESTID", DECIMAL(10, 0)), Column("XMLDOCUMENTID", DECIMAL(10, 0)), Column("PROTEINID", DECIMAL(10, 0)), Column("PROJECTCODE", String(45)), Column("SUBMISSIONDATE", DateTime), Column("RESPONSE", DECIMAL(3, 0)), Column("REPONSEDATE", DateTime), Column("RESPONSEDETAILS", String(255)), ) class SAMPLECELL(Base): __tablename__ = "SAMPLECELL" SAMPLECELLID = Column(INTEGER(11), primary_key=True) SAMPLEEXPOSUREUNITID = Column(INTEGER(11)) ID = Column(String(45)) NAME = Column(String(45)) DIAMETER = Column(String(45)) MATERIAL = Column(String(45)) class SAMPLEEXPOSUREUNIT(Base): __tablename__ = "SAMPLEEXPOSUREUNIT" SAMPLEEXPOSUREUNITID = Column(INTEGER(11), primary_key=True) ID = Column(String(45)) PATHLENGTH = Column(String(45)) VOLUME = Column(String(45)) class SAXSDATACOLLECTIONGROUP(Base): __tablename__ = "SAXSDATACOLLECTIONGROUP" DATACOLLECTIONGROUPID = Column(INTEGER(11), primary_key=True) DEFAULTDATAACQUISITIONID = Column(INTEGER(11)) SAXSDATACOLLECTIONARRAYID = Column(INTEGER(11)) class SafetyLevel(Base): __tablename__ = "SafetyLevel" safetyLevelId = Column(INTEGER(10), primary_key=True) code = Column(String(45)) description = Column(String(45)) class ScanParametersService(Base): __tablename__ = "ScanParametersService" scanParametersServiceId = Column(INTEGER(10), primary_key=True) name = Column(String(45)) description = Column(String(45)) class Schedule(Base): __tablename__ = "Schedule" scheduleId = Column(INTEGER(11), primary_key=True) name = Column(String(45)) class SchemaStatus(Base): __tablename__ = "SchemaStatus" schemaStatusId = Column(INTEGER(11), primary_key=True) scriptName = Column(String(100), nullable=False, unique=True) schemaStatus = Column(String(10)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) class ScreeningRankSet(Base): __tablename__ = "ScreeningRankSet" screeningRankSetId = Column(INTEGER(10), primary_key=True) rankEngine = Column(String(255)) rankingProjectFileName = Column(String(255)) rankingSummaryFileName = Column(String(255)) class Sleeve(Base): __tablename__ = "Sleeve" __table_args__ = { "comment": "Registry of ice-filled sleeves used to cool plates whilst on the goniometer" } sleeveId = Column( TINYINT(3), primary_key=True, comment="The unique sleeve id 1...255 which also identifies its home location in the freezer", ) location = Column( TINYINT(3), comment="NULL == freezer, 1...255 for local storage locations" ) lastMovedToFreezer = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) lastMovedFromFreezer = Column(TIMESTAMP, server_default=text("current_timestamp()")) class UserGroup(Base): __tablename__ = "UserGroup" userGroupId = Column(INTEGER(11), primary_key=True) name = Column(String(31), nullable=False, unique=True) class Workflow(Base): __tablename__ = "Workflow" workflowId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) workflowTitle = Column(String(255)) workflowType = Column( Enum( "Undefined", "BioSAXS Post Processing", "EnhancedCharacterisation", "LineScan", "MeshScan", "Dehydration", "KappaReorientation", "BurnStrategy", "XrayCentering", "DiffractionTomography", "TroubleShooting", "VisualReorientation", "HelicalCharacterisation", "GroupedProcessing", "MXPressE", "MXPressO", "MXPressL", "MXScore", "MXPressI", "MXPressM", "MXPressA", ) ) workflowTypeId = Column(INTEGER(11)) comments = Column(String(1024)) status = Column(String(255)) resultFilePath = Column(String(255)) logFilePath = Column(String(255)) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") workflowDescriptionFullPath = Column( String(255), comment="Full file path to a json description of the workflow" ) class WorkflowType(Base): __tablename__ = "WorkflowType" workflowTypeId = Column(INTEGER(11), primary_key=True) workflowTypeName = Column(String(45)) comments = Column(String(2048)) recordTimeStamp = Column(TIMESTAMP) t_v_Log4Stat = Table( "v_Log4Stat", metadata, Column("id", INTEGER(11), server_default=text("'0'")), Column("priority", String(15)), Column("timestamp", DateTime), Column("msg", String(255)), Column("detail", String(255)), Column("value", String(255)), ) t_v_dewar = Table( "v_dewar", metadata, Column("proposalId", INTEGER(10), server_default=text("'0'")), Column("shippingId", INTEGER(10), server_default=text("'0'")), Column("shippingName", String(45)), Column("dewarId", INTEGER(10), server_default=text("'0'")), Column("dewarName", String(45)), Column("dewarStatus", String(45)), Column("proposalCode", String(45)), Column("proposalNumber", String(45)), Column("creationDate", DateTime), Column("shippingType", String(45)), Column("barCode", String(45)), Column("shippingStatus", String(45)), Column("beamLineName", String(45)), Column("nbEvents", BIGINT(21), server_default=text("'0'")), Column("storesin", BIGINT(21), server_default=text("'0'")), Column("nbSamples", BIGINT(21), server_default=text("'0'")), ) t_v_dewarBeamline = Table( "v_dewarBeamline", metadata, Column("beamLineName", String(45)), Column("COUNT(*)", BIGINT(21), server_default=text("'0'")), ) t_v_dewarBeamlineByWeek = Table( "v_dewarBeamlineByWeek", metadata, Column("Week", String(23)), Column("ID14", BIGINT(21), server_default=text("'0'")), Column("ID23", BIGINT(21), server_default=text("'0'")), Column("ID29", BIGINT(21), server_default=text("'0'")), Column("BM14", BIGINT(21), server_default=text("'0'")), ) t_v_dewarByWeek = Table( "v_dewarByWeek", metadata, Column("Week", String(23)), Column("Dewars Tracked", BIGINT(21), server_default=text("'0'")), Column("Dewars Non-Tracked", BIGINT(21), server_default=text("'0'")), ) t_v_dewarByWeekTotal = Table( "v_dewarByWeekTotal", metadata, Column("Week", String(23)), Column("Dewars Tracked", BIGINT(21), server_default=text("'0'")), Column("Dewars Non-Tracked", BIGINT(21), server_default=text("'0'")), Column("Total", BIGINT(21), server_default=text("'0'")), ) t_v_dewarList = Table( "v_dewarList", metadata, Column("proposal", String(90)), Column("shippingName", String(45)), Column("dewarName", String(45)), Column("barCode", String(45)), Column("creationDate", String(10)), Column("shippingType", String(45)), Column("nbEvents", BIGINT(21), server_default=text("'0'")), Column("dewarStatus", String(45)), Column("shippingStatus", String(45)), Column("nbSamples", BIGINT(21), server_default=text("'0'")), ) t_v_dewarProposalCode = Table( "v_dewarProposalCode", metadata, Column("proposalCode", String(45)), Column("COUNT(*)", BIGINT(21), server_default=text("'0'")), ) t_v_dewarProposalCodeByWeek = Table( "v_dewarProposalCodeByWeek", metadata, Column("Week", String(23)), Column("MX", BIGINT(21), server_default=text("'0'")), Column("FX", BIGINT(21), server_default=text("'0'")), Column("BM14U", BIGINT(21), server_default=text("'0'")), Column("BM161", BIGINT(21), server_default=text("'0'")), Column("BM162", BIGINT(21), server_default=text("'0'")), Column("Others", BIGINT(21), server_default=text("'0'")), ) t_v_hour = Table("v_hour", metadata, Column("num", String(18))) t_v_logonByHour = Table( "v_logonByHour", metadata, Column("Hour", String(7)), Column("Distinct logins", BIGINT(21), server_default=text("'0'")), Column("Total logins", BIGINT(22), server_default=text("'0'")), ) t_v_logonByHour2 = Table( "v_logonByHour2", metadata, Column("Hour", String(7)), Column("Distinct logins", BIGINT(21), server_default=text("'0'")), Column("Total logins", BIGINT(22), server_default=text("'0'")), ) t_v_logonByMonthDay = Table( "v_logonByMonthDay", metadata, Column("Day", String(5)), Column("Distinct logins", BIGINT(21), server_default=text("'0'")), Column("Total logins", BIGINT(22), server_default=text("'0'")), ) t_v_logonByMonthDay2 = Table( "v_logonByMonthDay2", metadata, Column("Day", String(5)), Column("Distinct logins", BIGINT(21), server_default=text("'0'")), Column("Total logins", BIGINT(22), server_default=text("'0'")), ) t_v_logonByWeek = Table( "v_logonByWeek", metadata, Column("Week", String(23)), Column("Distinct logins", BIGINT(21), server_default=text("'0'")), Column("Total logins", BIGINT(22), server_default=text("'0'")), ) t_v_logonByWeek2 = Table( "v_logonByWeek2", metadata, Column("Week", String(23)), Column("Distinct logins", BIGINT(21), server_default=text("'0'")), Column("Total logins", BIGINT(22), server_default=text("'0'")), ) t_v_logonByWeekDay = Table( "v_logonByWeekDay", metadata, Column("Day", String(64)), Column("Distinct logins", BIGINT(21), server_default=text("'0'")), Column("Total logins", BIGINT(22), server_default=text("'0'")), ) t_v_logonByWeekDay2 = Table( "v_logonByWeekDay2", metadata, Column("Day", String(64)), Column("Distinct logins", BIGINT(21), server_default=text("'0'")), Column("Total logins", BIGINT(22), server_default=text("'0'")), ) t_v_monthDay = Table("v_monthDay", metadata, Column("num", String(10))) class VRun(Base): __tablename__ = "v_run" __table_args__ = (Index("v_run_idx1", "startDate", "endDate"),) runId = Column(INTEGER(11), primary_key=True) run = Column(String(7), nullable=False, server_default=text("''")) startDate = Column(DateTime) endDate = Column(DateTime) t_v_sample = Table( "v_sample", metadata, Column("proposalId", INTEGER(10), server_default=text("'0'")), Column("shippingId", INTEGER(10), server_default=text("'0'")), Column("dewarId", INTEGER(10), server_default=text("'0'")), Column("containerId", INTEGER(10), server_default=text("'0'")), Column("blSampleId", INTEGER(10), server_default=text("'0'")), Column("proposalCode", String(45)), Column("proposalNumber", String(45)), Column("creationDate", DateTime), Column("shippingType", String(45)), Column("barCode", String(45)), Column("shippingStatus", String(45)), ) t_v_sampleByWeek = Table( "v_sampleByWeek", metadata, Column("Week", String(23)), Column("Samples", BIGINT(21)), ) t_v_week = Table("v_week", metadata, Column("num", String(7))) t_v_weekDay = Table("v_weekDay", metadata, Column("day", String(10))) class AbInitioModel(Base): __tablename__ = "AbInitioModel" abInitioModelId = Column(INTEGER(10), primary_key=True) modelListId = Column( ForeignKey("ModelList.modelListId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) averagedModelId = Column( ForeignKey("Model.modelId", ondelete="CASCADE", onupdate="CASCADE"), index=True ) rapidShapeDeterminationModelId = Column( ForeignKey("Model.modelId", ondelete="CASCADE", onupdate="CASCADE"), index=True ) shapeDeterminationModelId = Column( ForeignKey("Model.modelId", ondelete="CASCADE", onupdate="CASCADE"), index=True ) comments = Column(String(512)) creationTime = Column(DateTime) Model = relationship( "Model", primaryjoin="AbInitioModel.averagedModelId == Model.modelId" ) ModelList = relationship("ModelList") Model1 = relationship( "Model", primaryjoin="AbInitioModel.rapidShapeDeterminationModelId == Model.modelId", ) Model2 = relationship( "Model", primaryjoin="AbInitioModel.shapeDeterminationModelId == Model.modelId" ) class AutoProcScaling(Base): __tablename__ = "AutoProcScaling" __table_args__ = (Index("AutoProcScalingIdx1", "autoProcScalingId", "autoProcId"),) autoProcScalingId = Column( INTEGER(10), primary_key=True, comment="Primary key (auto-incremented)" ) autoProcId = Column( ForeignKey("AutoProc.autoProcId", ondelete="CASCADE", onupdate="CASCADE"), index=True, comment="Related autoProc item (used by foreign key)", ) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") AutoProc = relationship("AutoProc") class BFComponent(Base): __tablename__ = "BF_component" componentId = Column(INTEGER(10), primary_key=True) systemId = Column(ForeignKey("BF_system.systemId"), index=True) name = Column(String(100)) description = Column(String(200)) BF_system = relationship("BFSystem") class BFSystemBeamline(Base): __tablename__ = "BF_system_beamline" system_beamlineId = Column(INTEGER(10), primary_key=True) systemId = Column(ForeignKey("BF_system.systemId"), index=True) beamlineName = Column(String(20)) BF_system = relationship("BFSystem") class BLSampleImageAutoScoreClass(Base): __tablename__ = "BLSampleImageAutoScoreClass" __table_args__ = { "comment": "The automated scoring classes - the thing being scored" } blSampleImageAutoScoreClassId = Column(TINYINT(3), primary_key=True) blSampleImageAutoScoreSchemaId = Column( ForeignKey( "BLSampleImageAutoScoreSchema.blSampleImageAutoScoreSchemaId", onupdate="CASCADE", ), index=True, ) scoreClass = Column( String(15), nullable=False, comment="Thing being scored e.g. crystal, precipitant", ) BLSampleImageAutoScoreSchema = relationship("BLSampleImageAutoScoreSchema") class BeamApertures(Base): __tablename__ = "BeamApertures" beamAperturesid = Column(INTEGER(11), primary_key=True) beamlineStatsId = Column( ForeignKey("BeamlineStats.beamlineStatsId", ondelete="CASCADE"), index=True ) flux = Column(Float(asdecimal=True)) x = Column(Float) y = Column(Float) apertureSize = Column(SMALLINT(5)) BeamlineStats = relationship("BeamlineStats") class BeamCentres(Base): __tablename__ = "BeamCentres" beamCentresid = Column(INTEGER(11), primary_key=True) beamlineStatsId = Column( ForeignKey("BeamlineStats.beamlineStatsId", ondelete="CASCADE"), index=True ) x = Column(Float) y = Column(Float) zoom = Column(TINYINT(3)) BeamlineStats = relationship("BeamlineStats") class BeamLineSetup(Base): __tablename__ = "BeamLineSetup" beamLineSetupId = Column(INTEGER(10), primary_key=True) detectorId = Column(ForeignKey("Detector.detectorId"), index=True) synchrotronMode = Column(String(255)) undulatorType1 = Column(String(45)) undulatorType2 = Column(String(45)) undulatorType3 = Column(String(45)) focalSpotSizeAtSample = Column(Float) focusingOptic = Column(String(255)) beamDivergenceHorizontal = Column(Float) beamDivergenceVertical = Column(Float) polarisation = Column(Float) monochromatorType = Column(String(255)) setupDate = Column(DateTime) synchrotronName = Column(String(255)) maxExpTimePerDataCollection = Column(Float(asdecimal=True)) maxExposureTimePerImage = Column(Float, comment="unit: seconds") minExposureTimePerImage = Column(Float(asdecimal=True)) goniostatMaxOscillationSpeed = Column(Float(asdecimal=True)) goniostatMaxOscillationWidth = Column( Float(asdecimal=True), comment="unit: degrees" ) goniostatMinOscillationWidth = Column(Float(asdecimal=True)) maxTransmission = Column(Float(asdecimal=True), comment="unit: percentage") minTransmission = Column(Float(asdecimal=True)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) CS = Column(Float, comment="Spherical Aberration, Units: mm?") beamlineName = Column(String(50), comment="Beamline that this setup relates to") beamSizeXMin = Column(Float, comment="unit: um") beamSizeXMax = Column(Float, comment="unit: um") beamSizeYMin = Column(Float, comment="unit: um") beamSizeYMax = Column(Float, comment="unit: um") energyMin = Column(Float, comment="unit: eV") energyMax = Column(Float, comment="unit: eV") omegaMin = Column(Float, comment="unit: degrees") omegaMax = Column(Float, comment="unit: degrees") kappaMin = Column(Float, comment="unit: degrees") kappaMax = Column(Float, comment="unit: degrees") phiMin = Column(Float, comment="unit: degrees") phiMax = Column(Float, comment="unit: degrees") active = Column(TINYINT(1), nullable=False, server_default=text("0")) numberOfImagesMax = Column(MEDIUMINT(8)) numberOfImagesMin = Column(MEDIUMINT(8)) boxSizeXMin = Column(Float(asdecimal=True), comment="For gridscans, unit: um") boxSizeXMax = Column(Float(asdecimal=True), comment="For gridscans, unit: um") boxSizeYMin = Column(Float(asdecimal=True), comment="For gridscans, unit: um") boxSizeYMax = Column(Float(asdecimal=True), comment="For gridscans, unit: um") monoBandwidthMin = Column(Float(asdecimal=True), comment="unit: percentage") monoBandwidthMax = Column(Float(asdecimal=True), comment="unit: percentage") preferredDataCentre = Column( String(30), comment="Relevant datacentre to use to process data from this beamline", ) Detector = relationship("Detector") class Buffer(Base): __tablename__ = "Buffer" bufferId = Column(INTEGER(10), primary_key=True) BLSESSIONID = Column(INTEGER(11)) safetyLevelId = Column( ForeignKey("SafetyLevel.safetyLevelId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) name = Column(String(45)) acronym = Column(String(45)) pH = Column(String(45)) composition = Column(String(45)) comments = Column(String(512)) proposalId = Column(INTEGER(10), nullable=False, server_default=text("-1")) SafetyLevel = relationship("SafetyLevel") class FrameSet(Base): __tablename__ = "FrameSet" frameSetId = Column(INTEGER(10), primary_key=True) runId = Column( ForeignKey("Run.runId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) FILEPATH = Column(String(255)) INTERNALPATH = Column(String(255)) frameListId = Column( ForeignKey("FrameList.frameListId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) detectorId = Column(INTEGER(10)) detectorDistance = Column(String(45)) FrameList = relationship("FrameList") Run = relationship("Run") class FrameToList(Base): __tablename__ = "FrameToList" frameToListId = Column(INTEGER(10), primary_key=True) frameListId = Column( ForeignKey("FrameList.frameListId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) frameId = Column( ForeignKey("Frame.frameId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) Frame = relationship("Frame") FrameList = relationship("FrameList") class Instruction(Base): __tablename__ = "Instruction" instructionId = Column(INTEGER(10), primary_key=True) instructionSetId = Column( ForeignKey( "InstructionSet.instructionSetId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) INSTRUCTIONORDER = Column(INTEGER(11)) comments = Column(String(255)) order = Column(INTEGER(11), nullable=False) InstructionSet = relationship("InstructionSet") class Macromolecule(Base): __tablename__ = "Macromolecule" macromoleculeId = Column(INTEGER(11), primary_key=True) proposalId = Column(INTEGER(10)) safetyLevelId = Column( ForeignKey("SafetyLevel.safetyLevelId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) name = Column(String(45)) acronym = Column(String(45)) molecularMass = Column(String(45)) extintionCoefficient = Column(String(45)) sequence = Column(String(1000)) creationDate = Column(DateTime) comments = Column(String(1024)) SafetyLevel = relationship("SafetyLevel") class ModelToList(Base): __tablename__ = "ModelToList" modelToListId = Column(INTEGER(10), primary_key=True) modelId = Column( ForeignKey("Model.modelId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) modelListId = Column( ForeignKey("ModelList.modelListId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) Model = relationship("Model") ModelList = relationship("ModelList") class Person(Base): __tablename__ = "Person" personId = Column(INTEGER(10), primary_key=True) laboratoryId = Column(ForeignKey("Laboratory.laboratoryId"), index=True) siteId = Column(INTEGER(11), index=True) personUUID = Column(String(45)) familyName = Column(String(100), index=True) givenName = Column(String(45)) title = Column(String(45)) emailAddress = Column(String(60)) phoneNumber = Column(String(45)) login = Column(String(45), unique=True) faxNumber = Column(String(45)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) cache = Column(Text) externalId = Column(BINARY(16)) Laboratory = relationship("Laboratory") Project = relationship("Project", secondary="Project_has_Person") UserGroup = relationship("UserGroup", secondary="UserGroup_has_Person") class PhasingProgramAttachment(Base): __tablename__ = "PhasingProgramAttachment" phasingProgramAttachmentId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phasingProgramRunId = Column( ForeignKey( "PhasingProgramRun.phasingProgramRunId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, comment="Related program item", ) fileType = Column( Enum("Map", "Logfile", "PDB", "CSV", "INS", "RES", "TXT"), comment="file type" ) fileName = Column(String(45), comment="file name") filePath = Column(String(255), comment="file path") recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") PhasingProgramRun = relationship("PhasingProgramRun") class ProcessingPipeline(Base): __tablename__ = "ProcessingPipeline" __table_args__ = { "comment": "A lookup table for different processing pipelines and their categories" } processingPipelineId = Column(INTEGER(11), primary_key=True) processingPipelineCategoryId = Column( ForeignKey("ProcessingPipelineCategory.processingPipelineCategoryId"), index=True, ) name = Column(String(20), nullable=False) discipline = Column(String(10), nullable=False) pipelineStatus = Column( Enum("automatic", "optional", "deprecated"), comment="Is the pipeline in operation or available", ) reprocessing = Column( TINYINT(1), server_default=text("1"), comment="Pipeline is available for re-processing", ) ProcessingPipelineCategory = relationship("ProcessingPipelineCategory") class SamplePlate(Base): __tablename__ = "SamplePlate" samplePlateId = Column(INTEGER(10), primary_key=True) BLSESSIONID = Column(INTEGER(11)) plateGroupId = Column( ForeignKey("PlateGroup.plateGroupId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) plateTypeId = Column( ForeignKey("PlateType.PlateTypeId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) instructionSetId = Column( ForeignKey( "InstructionSet.instructionSetId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) boxId = Column(INTEGER(10)) name = Column(String(45)) slotPositionRow = Column(String(45)) slotPositionColumn = Column(String(45)) storageTemperature = Column(String(45)) experimentId = Column( ForeignKey("Experiment.experimentId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) Experiment = relationship("Experiment") InstructionSet = relationship("InstructionSet") PlateGroup = relationship("PlateGroup") PlateType = relationship("PlateType") class SaxsDataCollection(Base): __tablename__ = "SaxsDataCollection" dataCollectionId = Column(INTEGER(10), primary_key=True) BLSESSIONID = Column(INTEGER(11)) experimentId = Column( ForeignKey("Experiment.experimentId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) comments = Column(String(5120)) Experiment = relationship("Experiment") class ScheduleComponent(Base): __tablename__ = "ScheduleComponent" scheduleComponentId = Column(INTEGER(11), primary_key=True) scheduleId = Column( ForeignKey("Schedule.scheduleId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) offset_hours = Column(INTEGER(11)) inspectionTypeId = Column( ForeignKey("InspectionType.inspectionTypeId", ondelete="CASCADE"), index=True ) InspectionType = relationship("InspectionType") Schedule = relationship("Schedule") class SpaceGroup(Base): __tablename__ = "SpaceGroup" spaceGroupId = Column(INTEGER(10), primary_key=True, comment="Primary key") spaceGroupNumber = Column(INTEGER(10), comment="ccp4 number pr IUCR") spaceGroupShortName = Column( String(45), index=True, comment="short name without blank" ) spaceGroupName = Column(String(45), comment="verbose name") bravaisLattice = Column(String(45), comment="short name") bravaisLatticeName = Column(String(45), comment="verbose name") pointGroup = Column(String(45), comment="point group") geometryClassnameId = Column( ForeignKey( "GeometryClassname.geometryClassnameId", ondelete="CASCADE", onupdate="CASCADE", ), index=True, ) MX_used = Column( TINYINT(1), nullable=False, server_default=text("0"), comment="1 if used in the crystal form", ) GeometryClassname = relationship("GeometryClassname") t_UserGroup_has_Permission = Table( "UserGroup_has_Permission", metadata, Column( "userGroupId", ForeignKey("UserGroup.userGroupId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, ), Column( "permissionId", ForeignKey("Permission.permissionId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, index=True, ), ) class WorkflowStep(Base): __tablename__ = "WorkflowStep" workflowStepId = Column(INTEGER(11), primary_key=True) workflowId = Column(ForeignKey("Workflow.workflowId"), nullable=False, index=True) type = Column(String(45)) status = Column(String(45)) folderPath = Column(String(1024)) imageResultFilePath = Column(String(1024)) htmlResultFilePath = Column(String(1024)) resultFilePath = Column(String(1024)) comments = Column(String(2048)) crystalSizeX = Column(String(45)) crystalSizeY = Column(String(45)) crystalSizeZ = Column(String(45)) maxDozorScore = Column(String(45)) recordTimeStamp = Column(TIMESTAMP) Workflow = relationship("Workflow") class Assembly(Base): __tablename__ = "Assembly" assemblyId = Column(INTEGER(10), primary_key=True) macromoleculeId = Column( ForeignKey( "Macromolecule.macromoleculeId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) creationDate = Column(DateTime) comments = Column(String(255)) Macromolecule = relationship("Macromolecule") class AutoProcScalingStatistics(Base): __tablename__ = "AutoProcScalingStatistics" __table_args__ = ( Index( "AutoProcScalingStatistics_scalingId_statisticsType", "autoProcScalingId", "scalingStatisticsType", ), ) autoProcScalingStatisticsId = Column( INTEGER(10), primary_key=True, comment="Primary key (auto-incremented)" ) autoProcScalingId = Column( ForeignKey( "AutoProcScaling.autoProcScalingId", ondelete="CASCADE", onupdate="CASCADE" ), comment="Related autoProcScaling item (used by foreign key)", ) scalingStatisticsType = Column( Enum("overall", "innerShell", "outerShell"), nullable=False, index=True, server_default=text("'overall'"), comment="Scaling statistics type", ) comments = Column(String(255), comment="Comments...") resolutionLimitLow = Column(Float, comment="Low resolution limit") resolutionLimitHigh = Column(Float, comment="High resolution limit") rMerge = Column(Float, comment="Rmerge") rMeasWithinIPlusIMinus = Column(Float, comment="Rmeas (within I+/I-)") rMeasAllIPlusIMinus = Column(Float, comment="Rmeas (all I+ & I-)") rPimWithinIPlusIMinus = Column(Float, comment="Rpim (within I+/I-) ") rPimAllIPlusIMinus = Column(Float, comment="Rpim (all I+ & I-)") fractionalPartialBias = Column(Float, comment="Fractional partial bias") nTotalObservations = Column(INTEGER(10), comment="Total number of observations") nTotalUniqueObservations = Column(INTEGER(10), comment="Total number unique") meanIOverSigI = Column(Float, comment="Mean((I)/sd(I))") completeness = Column(Float, comment="Completeness") multiplicity = Column(Float, comment="Multiplicity") anomalousCompleteness = Column(Float, comment="Anomalous completeness") anomalousMultiplicity = Column(Float, comment="Anomalous multiplicity") recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") anomalous = Column( TINYINT(1), server_default=text("0"), comment="boolean type:0 noanoum - 1 anoum" ) ccHalf = Column(Float, comment="information from XDS") ccAnomalous = Column(Float) resIOverSigI2 = Column(Float, comment="Resolution where I/Sigma(I) equals 2") AutoProcScaling = relationship("AutoProcScaling") class BFComponentBeamline(Base): __tablename__ = "BF_component_beamline" component_beamlineId = Column(INTEGER(10), primary_key=True) componentId = Column(ForeignKey("BF_component.componentId"), index=True) beamlinename = Column(String(20)) BF_component = relationship("BFComponent") class BFSubcomponent(Base): __tablename__ = "BF_subcomponent" subcomponentId = Column(INTEGER(10), primary_key=True) componentId = Column(ForeignKey("BF_component.componentId"), index=True) name = Column(String(100)) description = Column(String(200)) BF_component = relationship("BFComponent") class BufferHasAdditive(Base): __tablename__ = "BufferHasAdditive" bufferHasAdditiveId = Column(INTEGER(10), primary_key=True) bufferId = Column( ForeignKey("Buffer.bufferId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) additiveId = Column( ForeignKey("Additive.additiveId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) measurementUnitId = Column( ForeignKey( "MeasurementUnit.measurementUnitId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) quantity = Column(String(45)) Additive = relationship("Additive") Buffer = relationship("Buffer") MeasurementUnit = relationship("MeasurementUnit") class ContainerReport(Base): __tablename__ = "ContainerReport" containerReportId = Column(INTEGER(11), primary_key=True) containerRegistryId = Column( ForeignKey("ContainerRegistry.containerRegistryId"), index=True ) personId = Column( ForeignKey("Person.personId"), index=True, comment="Person making report" ) report = Column(Text) attachmentFilePath = Column(String(255)) recordTimestamp = Column(DateTime) ContainerRegistry = relationship("ContainerRegistry") Person = relationship("Person") class MacromoleculeRegion(Base): __tablename__ = "MacromoleculeRegion" macromoleculeRegionId = Column(INTEGER(10), primary_key=True) macromoleculeId = Column( ForeignKey( "Macromolecule.macromoleculeId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) regionType = Column(String(45)) id = Column(String(45)) count = Column(String(45)) sequence = Column(String(45)) Macromolecule = relationship("Macromolecule") class ModelBuilding(Base): __tablename__ = "ModelBuilding" modelBuildingId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phasingAnalysisId = Column( ForeignKey( "PhasingAnalysis.phasingAnalysisId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, comment="Related phasing analysis item", ) phasingProgramRunId = Column( ForeignKey( "PhasingProgramRun.phasingProgramRunId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, comment="Related program item", ) spaceGroupId = Column( ForeignKey("SpaceGroup.spaceGroupId", ondelete="CASCADE", onupdate="CASCADE"), index=True, comment="Related spaceGroup", ) lowRes = Column(Float(asdecimal=True)) highRes = Column(Float(asdecimal=True)) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") PhasingAnalysis = relationship("PhasingAnalysis") PhasingProgramRun = relationship("PhasingProgramRun") SpaceGroup = relationship("SpaceGroup") class Phasing(Base): __tablename__ = "Phasing" phasingId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phasingAnalysisId = Column( ForeignKey( "PhasingAnalysis.phasingAnalysisId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, comment="Related phasing analysis item", ) phasingProgramRunId = Column( ForeignKey( "PhasingProgramRun.phasingProgramRunId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, comment="Related program item", ) spaceGroupId = Column( ForeignKey("SpaceGroup.spaceGroupId", ondelete="CASCADE", onupdate="CASCADE"), index=True, comment="Related spaceGroup", ) method = Column( Enum("solvent flattening", "solvent flipping", "e", "SAD", "shelxe"), comment="phasing method", ) solventContent = Column(Float(asdecimal=True)) enantiomorph = Column(TINYINT(1), comment="0 or 1") lowRes = Column(Float(asdecimal=True)) highRes = Column(Float(asdecimal=True)) recordTimeStamp = Column(DateTime, server_default=text("current_timestamp()")) PhasingAnalysis = relationship("PhasingAnalysis") PhasingProgramRun = relationship("PhasingProgramRun") SpaceGroup = relationship("SpaceGroup") class PhasingStep(Base): __tablename__ = "PhasingStep" phasingStepId = Column(INTEGER(10), primary_key=True) previousPhasingStepId = Column(INTEGER(10)) programRunId = Column( ForeignKey("PhasingProgramRun.phasingProgramRunId"), index=True ) spaceGroupId = Column(ForeignKey("SpaceGroup.spaceGroupId"), index=True) autoProcScalingId = Column( ForeignKey("AutoProcScaling.autoProcScalingId"), index=True ) phasingAnalysisId = Column(INTEGER(10), index=True) phasingStepType = Column( Enum("PREPARE", "SUBSTRUCTUREDETERMINATION", "PHASING", "MODELBUILDING") ) method = Column(String(45)) solventContent = Column(String(45)) enantiomorph = Column(String(45)) lowRes = Column(String(45)) highRes = Column(String(45)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) AutoProcScaling = relationship("AutoProcScaling") PhasingProgramRun = relationship("PhasingProgramRun") SpaceGroup = relationship("SpaceGroup") class PhasingHasScaling(Base): __tablename__ = "Phasing_has_Scaling" phasingHasScalingId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phasingAnalysisId = Column( ForeignKey( "PhasingAnalysis.phasingAnalysisId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, comment="Related phasing analysis item", ) autoProcScalingId = Column( ForeignKey( "AutoProcScaling.autoProcScalingId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, comment="Related autoProcScaling item", ) datasetNumber = Column( INTEGER(11), comment="serial number of the dataset and always reserve 0 for the reference", ) recordTimeStamp = Column(DateTime, server_default=text("current_timestamp()")) AutoProcScaling = relationship("AutoProcScaling") PhasingAnalysis = relationship("PhasingAnalysis") class PreparePhasingData(Base): __tablename__ = "PreparePhasingData" preparePhasingDataId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phasingAnalysisId = Column( ForeignKey( "PhasingAnalysis.phasingAnalysisId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, comment="Related phasing analysis item", ) phasingProgramRunId = Column( ForeignKey( "PhasingProgramRun.phasingProgramRunId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, comment="Related program item", ) spaceGroupId = Column( ForeignKey("SpaceGroup.spaceGroupId", ondelete="CASCADE", onupdate="CASCADE"), index=True, comment="Related spaceGroup", ) lowRes = Column(Float(asdecimal=True)) highRes = Column(Float(asdecimal=True)) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") PhasingAnalysis = relationship("PhasingAnalysis") PhasingProgramRun = relationship("PhasingProgramRun") SpaceGroup = relationship("SpaceGroup") class Project(Base): __tablename__ = "Project" projectId = Column(INTEGER(11), primary_key=True) personId = Column(ForeignKey("Person.personId"), index=True) title = Column(String(200)) acronym = Column(String(100)) owner = Column(String(50)) Person = relationship("Person") Protein = relationship("Protein", secondary="Project_has_Protein") BLSession = relationship("BLSession", secondary="Project_has_Session") Shipping = relationship("Shipping", secondary="Project_has_Shipping") XFEFluorescenceSpectrum = relationship( "XFEFluorescenceSpectrum", secondary="Project_has_XFEFSpectrum" ) class Proposal(Base): __tablename__ = "Proposal" __table_args__ = ( Index( "Proposal_FKIndexCodeNumber", "proposalCode", "proposalNumber", unique=True ), ) proposalId = Column(INTEGER(10), primary_key=True) personId = Column( ForeignKey("Person.personId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, server_default=text("0"), ) title = Column(String(200)) proposalCode = Column(String(45)) proposalNumber = Column(String(45)) bltimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) proposalType = Column(String(2), comment="Proposal type: MX, BX") externalId = Column(BINARY(16)) state = Column(Enum("Open", "Closed", "Cancelled"), server_default=text("'Open'")) Person = relationship("Person") class SamplePlatePosition(Base): __tablename__ = "SamplePlatePosition" samplePlatePositionId = Column(INTEGER(10), primary_key=True) samplePlateId = Column( ForeignKey("SamplePlate.samplePlateId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) rowNumber = Column(INTEGER(11)) columnNumber = Column(INTEGER(11)) volume = Column(String(45)) SamplePlate = relationship("SamplePlate") class StockSolution(Base): __tablename__ = "StockSolution" stockSolutionId = Column(INTEGER(10), primary_key=True) BLSESSIONID = Column(INTEGER(11)) bufferId = Column( ForeignKey("Buffer.bufferId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) macromoleculeId = Column( ForeignKey( "Macromolecule.macromoleculeId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) instructionSetId = Column( ForeignKey( "InstructionSet.instructionSetId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) boxId = Column(INTEGER(10)) name = Column(String(45)) storageTemperature = Column(String(55)) volume = Column(String(55)) concentration = Column(String(55)) comments = Column(String(255)) proposalId = Column(INTEGER(10), nullable=False, server_default=text("-1")) Buffer = relationship("Buffer") InstructionSet = relationship("InstructionSet") Macromolecule = relationship("Macromolecule") class Stoichiometry(Base): __tablename__ = "Stoichiometry" stoichiometryId = Column(INTEGER(10), primary_key=True) hostMacromoleculeId = Column( ForeignKey( "Macromolecule.macromoleculeId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) macromoleculeId = Column( ForeignKey( "Macromolecule.macromoleculeId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) ratio = Column(String(45)) Macromolecule = relationship( "Macromolecule", primaryjoin="Stoichiometry.hostMacromoleculeId == Macromolecule.macromoleculeId", ) Macromolecule1 = relationship( "Macromolecule", primaryjoin="Stoichiometry.macromoleculeId == Macromolecule.macromoleculeId", ) class Structure(Base): __tablename__ = "Structure" structureId = Column(INTEGER(10), primary_key=True) macromoleculeId = Column( ForeignKey( "Macromolecule.macromoleculeId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) PDB = Column(String(45)) structureType = Column(String(45)) fromResiduesBases = Column(String(45)) toResiduesBases = Column(String(45)) sequence = Column(String(45)) Macromolecule = relationship("Macromolecule") class SubstructureDetermination(Base): __tablename__ = "SubstructureDetermination" substructureDeterminationId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phasingAnalysisId = Column( ForeignKey( "PhasingAnalysis.phasingAnalysisId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, comment="Related phasing analysis item", ) phasingProgramRunId = Column( ForeignKey( "PhasingProgramRun.phasingProgramRunId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, comment="Related program item", ) spaceGroupId = Column( ForeignKey("SpaceGroup.spaceGroupId", ondelete="CASCADE", onupdate="CASCADE"), index=True, comment="Related spaceGroup", ) method = Column( Enum("SAD", "MAD", "SIR", "SIRAS", "MR", "MIR", "MIRAS", "RIP", "RIPAS"), comment="phasing method", ) lowRes = Column(Float(asdecimal=True)) highRes = Column(Float(asdecimal=True)) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") PhasingAnalysis = relationship("PhasingAnalysis") PhasingProgramRun = relationship("PhasingProgramRun") SpaceGroup = relationship("SpaceGroup") class Subtraction(Base): __tablename__ = "Subtraction" subtractionId = Column(INTEGER(10), primary_key=True) dataCollectionId = Column( ForeignKey( "SaxsDataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, ) rg = Column(String(45)) rgStdev = Column(String(45)) I0 = Column(String(45)) I0Stdev = Column(String(45)) firstPointUsed = Column(String(45)) lastPointUsed = Column(String(45)) quality = Column(String(45)) isagregated = Column(String(45)) concentration = Column(String(45)) gnomFilePath = Column(String(255)) rgGuinier = Column(String(45)) rgGnom = Column(String(45)) dmax = Column(String(45)) total = Column(String(45)) volume = Column(String(45)) creationTime = Column(DateTime) kratkyFilePath = Column(String(255)) scatteringFilePath = Column(String(255)) guinierFilePath = Column(String(255)) SUBTRACTEDFILEPATH = Column(String(255)) gnomFilePathOutput = Column(String(255)) substractedFilePath = Column(String(255)) SaxsDataCollection = relationship("SaxsDataCollection") t_UserGroup_has_Person = Table( "UserGroup_has_Person", metadata, Column( "userGroupId", ForeignKey("UserGroup.userGroupId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, ), Column( "personId", ForeignKey("Person.personId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, index=True, ), ) class AssemblyHasMacromolecule(Base): __tablename__ = "AssemblyHasMacromolecule" AssemblyHasMacromoleculeId = Column(INTEGER(10), primary_key=True) assemblyId = Column( ForeignKey("Assembly.assemblyId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) macromoleculeId = Column( ForeignKey( "Macromolecule.macromoleculeId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) Assembly = relationship("Assembly") Macromolecule = relationship("Macromolecule") class BFSubcomponentBeamline(Base): __tablename__ = "BF_subcomponent_beamline" subcomponent_beamlineId = Column(INTEGER(10), primary_key=True) subcomponentId = Column(ForeignKey("BF_subcomponent.subcomponentId"), index=True) beamlinename = Column(String(20)) BF_subcomponent = relationship("BFSubcomponent") class BLSampleGroup(Base): __tablename__ = "BLSampleGroup" blSampleGroupId = Column(INTEGER(11), primary_key=True) name = Column(String(100), comment="Human-readable name") proposalId = Column( ForeignKey("Proposal.proposalId", ondelete="SET NULL", onupdate="CASCADE"), index=True, ) Proposal = relationship("Proposal") class BLSession(Base): __tablename__ = "BLSession" __table_args__ = (Index("proposalId", "proposalId", "visit_number", unique=True),) sessionId = Column(INTEGER(10), primary_key=True) beamLineSetupId = Column( ForeignKey( "BeamLineSetup.beamLineSetupId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) proposalId = Column( ForeignKey("Proposal.proposalId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, server_default=text("0"), ) beamCalendarId = Column(ForeignKey("BeamCalendar.beamCalendarId"), index=True) projectCode = Column(String(45)) startDate = Column(DateTime, index=True) endDate = Column(DateTime, index=True) beamLineName = Column(String(45), index=True) scheduled = Column(TINYINT(1)) nbShifts = Column(INTEGER(10)) comments = Column(String(2000)) beamLineOperator = Column(String(45)) bltimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) visit_number = Column(INTEGER(10), server_default=text("0")) usedFlag = Column( TINYINT(1), comment="indicates if session has Datacollections or XFE or EnergyScans attached", ) sessionTitle = Column(String(255), comment="fx accounts only") structureDeterminations = Column(Float) dewarTransport = Column(Float) databackupFrance = Column(Float, comment="data backup and express delivery France") databackupEurope = Column(Float, comment="data backup and express delivery Europe") expSessionPk = Column(INTEGER(11), comment="smis session Pk ") operatorSiteNumber = Column(String(10), index=True, comment="matricule site") lastUpdate = Column( TIMESTAMP, nullable=False, server_default=text("'0000-00-00 00:00:00'"), comment="last update timestamp: by default the end of the session, the last collect...", ) protectedData = Column( String(1024), comment="indicates if the data are protected or not" ) externalId = Column(BINARY(16)) archived = Column( TINYINT(1), server_default=text("0"), comment="The data for the session is archived and no longer available on disk", ) BeamCalendar = relationship("BeamCalendar") BeamLineSetup = relationship("BeamLineSetup") Proposal = relationship("Proposal") Shipping = relationship("Shipping", secondary="ShippingHasSession") class ContainerRegistryHasProposal(Base): __tablename__ = "ContainerRegistry_has_Proposal" __table_args__ = ( Index("containerRegistryId", "containerRegistryId", "proposalId", unique=True), ) containerRegistryHasProposalId = Column(INTEGER(11), primary_key=True) containerRegistryId = Column(ForeignKey("ContainerRegistry.containerRegistryId")) proposalId = Column(ForeignKey("Proposal.proposalId"), index=True) personId = Column( ForeignKey("Person.personId"), index=True, comment="Person registering the container", ) recordTimestamp = Column(DateTime, server_default=text("current_timestamp()")) ContainerRegistry = relationship("ContainerRegistry") Person = relationship("Person") Proposal = relationship("Proposal") class DiffractionPlan(Base): __tablename__ = "DiffractionPlan" diffractionPlanId = Column(INTEGER(10), primary_key=True) name = Column(String(20)) experimentKind = Column( Enum( "Default", "MXPressE", "MXPressO", "MXPressE_SAD", "MXScore", "MXPressM", "MAD", "SAD", "Fixed", "Ligand binding", "Refinement", "OSC", "MAD - Inverse Beam", "SAD - Inverse Beam", "MESH", "XFE", "Stepped transmission", "XChem High Symmetry", "XChem Low Symmetry", "Commissioning", ) ) observedResolution = Column(Float) minimalResolution = Column(Float) exposureTime = Column(Float) oscillationRange = Column(Float) maximalResolution = Column(Float) screeningResolution = Column(Float) radiationSensitivity = Column(Float) anomalousScatterer = Column(String(255)) preferredBeamSizeX = Column(Float) preferredBeamSizeY = Column(Float) preferredBeamDiameter = Column(Float) comments = Column(String(1024)) DIFFRACTIONPLANUUID = Column(String(1000)) aimedCompleteness = Column(Float(asdecimal=True)) aimedIOverSigmaAtHighestRes = Column(Float(asdecimal=True)) aimedMultiplicity = Column(Float(asdecimal=True)) aimedResolution = Column(Float(asdecimal=True)) anomalousData = Column(TINYINT(1), server_default=text("0")) complexity = Column(String(45)) estimateRadiationDamage = Column(TINYINT(1), server_default=text("0")) forcedSpaceGroup = Column(String(45)) requiredCompleteness = Column(Float(asdecimal=True)) requiredMultiplicity = Column(Float(asdecimal=True)) requiredResolution = Column(Float(asdecimal=True)) strategyOption = Column(VARCHAR(200)) kappaStrategyOption = Column(String(45)) numberOfPositions = Column(INTEGER(11)) minDimAccrossSpindleAxis = Column( Float(asdecimal=True), comment="minimum dimension accross the spindle axis" ) maxDimAccrossSpindleAxis = Column( Float(asdecimal=True), comment="maximum dimension accross the spindle axis" ) radiationSensitivityBeta = Column(Float(asdecimal=True)) radiationSensitivityGamma = Column(Float(asdecimal=True)) minOscWidth = Column(Float) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) monochromator = Column(String(8), comment="DMM or DCM") energy = Column(Float, comment="eV") transmission = Column(Float, comment="Decimal fraction in range [0,1]") boxSizeX = Column(Float, comment="microns") boxSizeY = Column(Float, comment="microns") kappaStart = Column(Float, comment="degrees") axisStart = Column(Float, comment="degrees") axisRange = Column(Float, comment="degrees") numberOfImages = Column(MEDIUMINT(9), comment="The number of images requested") presetForProposalId = Column( ForeignKey("Proposal.proposalId"), index=True, comment="Indicates this plan is available to all sessions on given proposal", ) beamLineName = Column( String(45), comment="Indicates this plan is available to all sessions on given beamline", ) detectorId = Column( ForeignKey("Detector.detectorId", onupdate="CASCADE"), index=True ) distance = Column(Float(asdecimal=True)) orientation = Column(Float(asdecimal=True)) monoBandwidth = Column(Float(asdecimal=True)) centringMethod = Column(Enum("xray", "loop", "diffraction", "optical")) userPath = Column( String(100), comment='User-specified relative "root" path inside the session directory to be used for holding collected data', ) robotPlateTemperature = Column(Float, comment="units: kelvin") exposureTemperature = Column(Float, comment="units: kelvin") experimentTypeId = Column(ForeignKey("ExperimentType.experimentTypeId"), index=True) purificationColumnId = Column( ForeignKey("PurificationColumn.purificationColumnId"), index=True ) collectionMode = Column( Enum("auto", "manual"), comment="The requested collection mode, possible values are auto, manual", ) priority = Column( INTEGER(4), comment="The priority of this sample relative to others in the shipment", ) qMin = Column(Float, comment="minimum in qRange, unit: nm^-1, needed for SAXS") qMax = Column(Float, comment="maximum in qRange, unit: nm^-1, needed for SAXS") reductionParametersAveraging = Column( Enum("All", "Fastest Dimension", "1D"), comment="Post processing params for SAXS", ) Detector = relationship("Detector") ExperimentType = relationship("ExperimentType") Proposal = relationship("Proposal") PurificationColumn = relationship("PurificationColumn") class LabContact(Base): __tablename__ = "LabContact" __table_args__ = ( Index("cardNameAndProposal", "cardName", "proposalId", unique=True), Index("personAndProposal", "personId", "proposalId", unique=True), ) labContactId = Column(INTEGER(10), primary_key=True) personId = Column( ForeignKey("Person.personId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, ) cardName = Column(String(40), nullable=False) proposalId = Column( ForeignKey("Proposal.proposalId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) defaultCourrierCompany = Column(String(45)) courierAccount = Column(String(45)) billingReference = Column(String(45)) dewarAvgCustomsValue = Column(INTEGER(10), nullable=False, server_default=text("0")) dewarAvgTransportValue = Column( INTEGER(10), nullable=False, server_default=text("0") ) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) Person = relationship("Person") Proposal = relationship("Proposal") class PhasingStatistics(Base): __tablename__ = "PhasingStatistics" phasingStatisticsId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) phasingHasScalingId1 = Column( ForeignKey( "Phasing_has_Scaling.phasingHasScalingId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, comment="the dataset in question", ) phasingHasScalingId2 = Column( ForeignKey( "Phasing_has_Scaling.phasingHasScalingId", ondelete="CASCADE", onupdate="CASCADE", ), index=True, comment="if this is MIT or MAD, which scaling are being compared, null otherwise", ) phasingStepId = Column(ForeignKey("PhasingStep.phasingStepId"), index=True) numberOfBins = Column(INTEGER(11), comment="the total number of bins") binNumber = Column(INTEGER(11), comment="binNumber, 999 for overall") lowRes = Column( Float(asdecimal=True), comment="low resolution cutoff of this binfloat" ) highRes = Column( Float(asdecimal=True), comment="high resolution cutoff of this binfloat" ) metric = Column( Enum( "Rcullis", "Average Fragment Length", "Chain Count", "Residues Count", "CC", "PhasingPower", "FOM", '<d"/sig>', "Best CC", "CC(1/2)", "Weak CC", "CFOM", "Pseudo_free_CC", "CC of partial model", ), comment="metric", ) statisticsValue = Column(Float(asdecimal=True), comment="the statistics value") nReflections = Column(INTEGER(11)) recordTimeStamp = Column(DateTime, server_default=text("current_timestamp()")) Phasing_has_Scaling = relationship( "PhasingHasScaling", primaryjoin="PhasingStatistics.phasingHasScalingId1 == PhasingHasScaling.phasingHasScalingId", ) Phasing_has_Scaling1 = relationship( "PhasingHasScaling", primaryjoin="PhasingStatistics.phasingHasScalingId2 == PhasingHasScaling.phasingHasScalingId", ) PhasingStep = relationship("PhasingStep") t_Project_has_Person = Table( "Project_has_Person", metadata, Column( "projectId", ForeignKey("Project.projectId", ondelete="CASCADE"), primary_key=True, nullable=False, ), Column( "personId", ForeignKey("Person.personId", ondelete="CASCADE"), primary_key=True, nullable=False, index=True, ), ) class ProjectHasUser(Base): __tablename__ = "Project_has_User" projecthasuserid = Column(INTEGER(11), primary_key=True) projectid = Column(ForeignKey("Project.projectId"), nullable=False, index=True) username = Column(String(15)) Project = relationship("Project") class ProposalHasPerson(Base): __tablename__ = "ProposalHasPerson" proposalHasPersonId = Column(INTEGER(10), primary_key=True) proposalId = Column(ForeignKey("Proposal.proposalId"), nullable=False, index=True) personId = Column(ForeignKey("Person.personId"), nullable=False, index=True) role = Column( Enum( "Co-Investigator", "Principal Investigator", "Alternate Contact", "ERA Admin", "Associate", ) ) Person = relationship("Person") Proposal = relationship("Proposal") class Protein(Base): __tablename__ = "Protein" __table_args__ = (Index("ProteinAcronym_Index", "proposalId", "acronym"),) proteinId = Column(INTEGER(10), primary_key=True) proposalId = Column( ForeignKey("Proposal.proposalId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, server_default=text("0"), ) name = Column(String(255)) acronym = Column(String(45), index=True) description = Column( Text, comment="A description/summary using words and sentences" ) hazardGroup = Column( TINYINT(3), nullable=False, server_default=text("1"), comment="A.k.a. risk group", ) containmentLevel = Column( TINYINT(3), nullable=False, server_default=text("1"), comment="A.k.a. biosafety level, which indicates the level of containment required", ) safetyLevel = Column(Enum("GREEN", "YELLOW", "RED")) molecularMass = Column(Float(asdecimal=True)) proteinType = Column(String(45)) personId = Column(INTEGER(10), index=True) bltimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) isCreatedBySampleSheet = Column(TINYINT(1), server_default=text("0")) sequence = Column(Text) MOD_ID = Column(String(20)) componentTypeId = Column( ForeignKey( "ComponentType.componentTypeId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) concentrationTypeId = Column( ForeignKey( "ConcentrationType.concentrationTypeId", ondelete="CASCADE", onupdate="CASCADE", ), index=True, ) _global = Column("global", TINYINT(1), server_default=text("0")) externalId = Column(BINARY(16)) density = Column(Float) abundance = Column(Float, comment="Deprecated") isotropy = Column(Enum("isotropic", "anisotropic")) ComponentType = relationship("ComponentType") ConcentrationType = relationship("ConcentrationType") Proposal = relationship("Proposal") ComponentSubType = relationship( "ComponentSubType", secondary="Component_has_SubType" ) class SWOnceToken(Base): __tablename__ = "SW_onceToken" __table_args__ = { "comment": "One-time use tokens needed for token auth in order to grant access to file downloads and webcams (and some images)" } onceTokenId = Column(INTEGER(11), primary_key=True) token = Column(String(128)) personId = Column(ForeignKey("Person.personId"), index=True) proposalId = Column(ForeignKey("Proposal.proposalId"), index=True) validity = Column(String(200)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) Person = relationship("Person") Proposal = relationship("Proposal") class Screen(Base): __tablename__ = "Screen" screenId = Column(INTEGER(11), primary_key=True) name = Column(String(45)) proposalId = Column(ForeignKey("Proposal.proposalId"), index=True) _global = Column("global", TINYINT(1)) Proposal = relationship("Proposal") class Specimen(Base): __tablename__ = "Specimen" specimenId = Column(INTEGER(10), primary_key=True) BLSESSIONID = Column(INTEGER(11)) bufferId = Column( ForeignKey("Buffer.bufferId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) macromoleculeId = Column( ForeignKey( "Macromolecule.macromoleculeId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) samplePlatePositionId = Column( ForeignKey( "SamplePlatePosition.samplePlatePositionId", ondelete="CASCADE", onupdate="CASCADE", ), index=True, ) safetyLevelId = Column( ForeignKey("SafetyLevel.safetyLevelId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) stockSolutionId = Column( ForeignKey( "StockSolution.stockSolutionId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) code = Column(String(255)) concentration = Column(String(45)) volume = Column(String(45)) experimentId = Column( ForeignKey("Experiment.experimentId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) comments = Column(String(5120)) Buffer = relationship("Buffer") Experiment = relationship("Experiment") Macromolecule = relationship("Macromolecule") SafetyLevel = relationship("SafetyLevel") SamplePlatePosition = relationship("SamplePlatePosition") StockSolution = relationship("StockSolution") class SubtractionToAbInitioModel(Base): __tablename__ = "SubtractionToAbInitioModel" subtractionToAbInitioModelId = Column(INTEGER(10), primary_key=True) abInitioId = Column( ForeignKey( "AbInitioModel.abInitioModelId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) subtractionId = Column( ForeignKey("Subtraction.subtractionId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) AbInitioModel = relationship("AbInitioModel") Subtraction = relationship("Subtraction") class AssemblyRegion(Base): __tablename__ = "AssemblyRegion" assemblyRegionId = Column(INTEGER(10), primary_key=True) assemblyHasMacromoleculeId = Column( ForeignKey( "AssemblyHasMacromolecule.AssemblyHasMacromoleculeId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, ) assemblyRegionType = Column(String(45)) name = Column(String(45)) fromResiduesBases = Column(String(45)) toResiduesBases = Column(String(45)) AssemblyHasMacromolecule = relationship("AssemblyHasMacromolecule") class BFFault(Base): __tablename__ = "BF_fault" faultId = Column(INTEGER(10), primary_key=True) sessionId = Column(ForeignKey("BLSession.sessionId"), nullable=False, index=True) owner = Column(String(50)) subcomponentId = Column(ForeignKey("BF_subcomponent.subcomponentId"), index=True) starttime = Column(DateTime) endtime = Column(DateTime) beamtimelost = Column(TINYINT(1)) beamtimelost_starttime = Column(DateTime) beamtimelost_endtime = Column(DateTime) title = Column(String(200)) description = Column(Text) resolved = Column(TINYINT(1)) resolution = Column(Text) attachment = Column(String(200)) eLogId = Column(INTEGER(11)) assignee = Column(String(50)) personId = Column(ForeignKey("Person.personId"), index=True) assigneeId = Column(ForeignKey("Person.personId"), index=True) Person = relationship("Person", primaryjoin="BFFault.assigneeId == Person.personId") Person1 = relationship("Person", primaryjoin="BFFault.personId == Person.personId") BLSession = relationship("BLSession") BF_subcomponent = relationship("BFSubcomponent") class BLSessionHasSCPosition(Base): __tablename__ = "BLSession_has_SCPosition" blsessionhasscpositionid = Column(INTEGER(11), primary_key=True) blsessionid = Column( ForeignKey("BLSession.sessionId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) scContainer = Column( SMALLINT(5), comment="Position of container within sample changer" ) containerPosition = Column( SMALLINT(5), comment="Position of sample within container" ) BLSession = relationship("BLSession") class BeamlineAction(Base): __tablename__ = "BeamlineAction" beamlineActionId = Column(INTEGER(11), primary_key=True) sessionId = Column(ForeignKey("BLSession.sessionId"), index=True) startTimestamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp() ON UPDATE current_timestamp()"), ) endTimestamp = Column( TIMESTAMP, nullable=False, server_default=text("'0000-00-00 00:00:00'") ) message = Column(String(255)) parameter = Column(String(50)) value = Column(String(30)) loglevel = Column(Enum("DEBUG", "CRITICAL", "INFO")) status = Column( Enum("PAUSED", "RUNNING", "TERMINATED", "COMPLETE", "ERROR", "EPICSFAIL") ) BLSession = relationship("BLSession") class ComponentLattice(Base): __tablename__ = "ComponentLattice" componentLatticeId = Column(INTEGER(11), primary_key=True) componentId = Column(ForeignKey("Protein.proteinId"), index=True) spaceGroup = Column(String(20)) cell_a = Column(Float(asdecimal=True)) cell_b = Column(Float(asdecimal=True)) cell_c = Column(Float(asdecimal=True)) cell_alpha = Column(Float(asdecimal=True)) cell_beta = Column(Float(asdecimal=True)) cell_gamma = Column(Float(asdecimal=True)) Protein = relationship("Protein") t_Component_has_SubType = Table( "Component_has_SubType", metadata, Column( "componentId", ForeignKey("Protein.proteinId", ondelete="CASCADE"), primary_key=True, nullable=False, ), Column( "componentSubTypeId", ForeignKey( "ComponentSubType.componentSubTypeId", ondelete="CASCADE", onupdate="CASCADE", ), primary_key=True, nullable=False, index=True, ), ) class Crystal(Base): __tablename__ = "Crystal" crystalId = Column(INTEGER(10), primary_key=True) diffractionPlanId = Column( ForeignKey( "DiffractionPlan.diffractionPlanId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) proteinId = Column( ForeignKey("Protein.proteinId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, server_default=text("0"), ) crystalUUID = Column(String(45)) name = Column(String(255)) spaceGroup = Column(String(20)) morphology = Column(String(255)) color = Column(String(45)) size_X = Column(Float(asdecimal=True)) size_Y = Column(Float(asdecimal=True)) size_Z = Column(Float(asdecimal=True)) cell_a = Column(Float(asdecimal=True)) cell_b = Column(Float(asdecimal=True)) cell_c = Column(Float(asdecimal=True)) cell_alpha = Column(Float(asdecimal=True)) cell_beta = Column(Float(asdecimal=True)) cell_gamma = Column(Float(asdecimal=True)) comments = Column(String(255)) pdbFileName = Column(String(255), comment="pdb file name") pdbFilePath = Column(String(1024), comment="pdb file path") recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) abundance = Column(Float) theoreticalDensity = Column(Float) DiffractionPlan = relationship("DiffractionPlan") Protein = relationship("Protein") class DataCollectionPlanHasDetector(Base): __tablename__ = "DataCollectionPlan_has_Detector" __table_args__ = ( Index( "dataCollectionPlanId", "dataCollectionPlanId", "detectorId", unique=True ), ) dataCollectionPlanHasDetectorId = Column(INTEGER(11), primary_key=True) dataCollectionPlanId = Column( ForeignKey("DiffractionPlan.diffractionPlanId"), nullable=False ) detectorId = Column(ForeignKey("Detector.detectorId"), nullable=False, index=True) exposureTime = Column(Float(asdecimal=True)) distance = Column(Float(asdecimal=True)) roll = Column(Float(asdecimal=True)) DiffractionPlan = relationship("DiffractionPlan") Detector = relationship("Detector") class DewarRegistry(Base): __tablename__ = "DewarRegistry" dewarRegistryId = Column(INTEGER(11), primary_key=True) facilityCode = Column(String(20), nullable=False, unique=True) proposalId = Column( ForeignKey("Proposal.proposalId", onupdate="CASCADE"), index=True ) labContactId = Column( ForeignKey("LabContact.labContactId", ondelete="SET NULL", onupdate="CASCADE"), index=True, ) purchaseDate = Column(DateTime) bltimestamp = Column( DateTime, nullable=False, server_default=text("current_timestamp()") ) LabContact = relationship("LabContact") Proposal = relationship("Proposal") class ExperimentKindDetails(Base): __tablename__ = "ExperimentKindDetails" experimentKindId = Column(INTEGER(10), primary_key=True) diffractionPlanId = Column( ForeignKey( "DiffractionPlan.diffractionPlanId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) exposureIndex = Column(INTEGER(10)) dataCollectionType = Column(String(45)) dataCollectionKind = Column(String(45)) wedgeValue = Column(Float) DiffractionPlan = relationship("DiffractionPlan") class Measurement(Base): __tablename__ = "Measurement" specimenId = Column( ForeignKey("Specimen.specimenId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) runId = Column( ForeignKey("Run.runId", ondelete="CASCADE", onupdate="CASCADE"), index=True ) code = Column(String(100)) priorityLevelId = Column(INTEGER(10)) exposureTemperature = Column(String(45)) viscosity = Column(String(45)) flow = Column(TINYINT(1)) extraFlowTime = Column(String(45)) volumeToLoad = Column(String(45)) waitTime = Column(String(45)) transmission = Column(String(45)) comments = Column(String(512)) measurementId = Column(INTEGER(10), primary_key=True) Run = relationship("Run") Specimen = relationship("Specimen") t_Project_has_Protein = Table( "Project_has_Protein", metadata, Column( "projectId", ForeignKey("Project.projectId", ondelete="CASCADE"), primary_key=True, nullable=False, ), Column( "proteinId", ForeignKey("Protein.proteinId", ondelete="CASCADE"), primary_key=True, nullable=False, index=True, ), ) t_Project_has_Session = Table( "Project_has_Session", metadata, Column( "projectId", ForeignKey("Project.projectId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, ), Column( "sessionId", ForeignKey("BLSession.sessionId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, index=True, ), ) class ProteinHasPDB(Base): __tablename__ = "Protein_has_PDB" proteinhaspdbid = Column(INTEGER(11), primary_key=True) proteinid = Column(ForeignKey("Protein.proteinId"), nullable=False, index=True) pdbid = Column(ForeignKey("PDB.pdbId"), nullable=False, index=True) PDB = relationship("PDB") Protein = relationship("Protein") class ScanParametersModel(Base): __tablename__ = "ScanParametersModel" scanParametersModelId = Column(INTEGER(11), primary_key=True) scanParametersServiceId = Column( ForeignKey("ScanParametersService.scanParametersServiceId", onupdate="CASCADE"), index=True, ) dataCollectionPlanId = Column( ForeignKey("DiffractionPlan.diffractionPlanId", onupdate="CASCADE"), index=True ) sequenceNumber = Column(TINYINT(3)) start = Column(Float(asdecimal=True)) stop = Column(Float(asdecimal=True)) step = Column(Float(asdecimal=True)) array = Column(Text) duration = Column(MEDIUMINT(8), comment="Duration for parameter change in seconds") DiffractionPlan = relationship("DiffractionPlan") ScanParametersService = relationship("ScanParametersService") class ScreenComponentGroup(Base): __tablename__ = "ScreenComponentGroup" screenComponentGroupId = Column(INTEGER(11), primary_key=True) screenId = Column(ForeignKey("Screen.screenId"), nullable=False, index=True) position = Column(SMALLINT(6)) Screen = relationship("Screen") class SessionType(Base): __tablename__ = "SessionType" sessionTypeId = Column(INTEGER(10), primary_key=True) sessionId = Column( ForeignKey("BLSession.sessionId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) typeName = Column(String(31), nullable=False) BLSession = relationship("BLSession") class SessionHasPerson(Base): __tablename__ = "Session_has_Person" sessionId = Column( ForeignKey("BLSession.sessionId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, server_default=text("0"), ) personId = Column( ForeignKey("Person.personId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, index=True, server_default=text("0"), ) role = Column( Enum( "Local Contact", "Local Contact 2", "Staff", "Team Leader", "Co-Investigator", "Principal Investigator", "Alternate Contact", "Data Access", "Team Member", "ERA Admin", "Associate", ) ) remote = Column(TINYINT(1), server_default=text("0")) Person = relationship("Person") BLSession = relationship("BLSession") class Shipping(Base): __tablename__ = "Shipping" shippingId = Column(INTEGER(10), primary_key=True) proposalId = Column( ForeignKey("Proposal.proposalId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, server_default=text("0"), ) shippingName = Column(String(45), index=True) deliveryAgent_agentName = Column(String(45)) deliveryAgent_shippingDate = Column(Date) deliveryAgent_deliveryDate = Column(Date) deliveryAgent_agentCode = Column(String(45)) deliveryAgent_flightCode = Column(String(45)) shippingStatus = Column(String(45), index=True) bltimeStamp = Column(DateTime) laboratoryId = Column(INTEGER(10), index=True) isStorageShipping = Column(TINYINT(1), server_default=text("0")) creationDate = Column(DateTime, index=True) comments = Column(String(1000)) sendingLabContactId = Column( ForeignKey("LabContact.labContactId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) returnLabContactId = Column( ForeignKey("LabContact.labContactId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) returnCourier = Column(String(45)) dateOfShippingToUser = Column(DateTime) shippingType = Column(String(45)) SAFETYLEVEL = Column(String(8)) deliveryAgent_flightCodeTimestamp = Column( TIMESTAMP, comment="Date flight code created, if automatic" ) deliveryAgent_label = Column(Text, comment="Base64 encoded pdf of airway label") readyByTime = Column(Time, comment="Time shipment will be ready") closeTime = Column(Time, comment="Time after which shipment cannot be picked up") physicalLocation = Column( String(50), comment="Where shipment can be picked up from: i.e. Stores" ) deliveryAgent_pickupConfirmationTimestamp = Column( TIMESTAMP, comment="Date picked confirmed" ) deliveryAgent_pickupConfirmation = Column( String(10), comment="Confirmation number of requested pickup" ) deliveryAgent_readyByTime = Column(Time, comment="Confirmed ready-by time") deliveryAgent_callinTime = Column(Time, comment="Confirmed courier call-in time") deliveryAgent_productcode = Column( String(10), comment="A code that identifies which shipment service was used" ) deliveryAgent_flightCodePersonId = Column( ForeignKey("Person.personId"), index=True, comment="The person who created the AWB (for auditing)", ) Person = relationship("Person") Proposal = relationship("Proposal") LabContact = relationship( "LabContact", primaryjoin="Shipping.returnLabContactId == LabContact.labContactId", ) LabContact1 = relationship( "LabContact", primaryjoin="Shipping.sendingLabContactId == LabContact.labContactId", ) class BLSampleTypeHasComponent(Base): __tablename__ = "BLSampleType_has_Component" blSampleTypeId = Column( ForeignKey("Crystal.crystalId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, ) componentId = Column( ForeignKey("Protein.proteinId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, index=True, ) abundance = Column(Float) Crystal = relationship("Crystal") Protein = relationship("Protein") class CourierTermsAccepted(Base): __tablename__ = "CourierTermsAccepted" __table_args__ = {"comment": "Records acceptances of the courier T and C"} courierTermsAcceptedId = Column(INTEGER(10), primary_key=True) proposalId = Column(ForeignKey("Proposal.proposalId"), nullable=False, index=True) personId = Column(ForeignKey("Person.personId"), nullable=False, index=True) shippingName = Column(String(100)) timestamp = Column(DateTime, server_default=text("current_timestamp()")) shippingId = Column( ForeignKey("Shipping.shippingId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) Person = relationship("Person") Proposal = relationship("Proposal") Shipping = relationship("Shipping") class CrystalHasUUID(Base): __tablename__ = "Crystal_has_UUID" crystal_has_UUID_Id = Column(INTEGER(10), primary_key=True) crystalId = Column( ForeignKey("Crystal.crystalId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) UUID = Column(String(45), index=True) imageURL = Column(String(255)) Crystal = relationship("Crystal") class Dewar(Base): __tablename__ = "Dewar" dewarId = Column(INTEGER(10), primary_key=True) shippingId = Column( ForeignKey("Shipping.shippingId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) code = Column(String(45), index=True) comments = Column(TINYTEXT) storageLocation = Column(String(45)) dewarStatus = Column(String(45), index=True) bltimeStamp = Column(DateTime) isStorageDewar = Column(TINYINT(1), server_default=text("0")) barCode = Column(String(45), unique=True) firstExperimentId = Column( ForeignKey("BLSession.sessionId", ondelete="SET NULL", onupdate="CASCADE"), index=True, ) customsValue = Column(INTEGER(11)) transportValue = Column(INTEGER(11)) trackingNumberToSynchrotron = Column(String(30)) trackingNumberFromSynchrotron = Column(String(30)) type = Column( Enum("Dewar", "Toolbox", "Parcel"), nullable=False, server_default=text("'Dewar'"), ) facilityCode = Column(String(20)) weight = Column(Float, comment="dewar weight in kg") deliveryAgent_barcode = Column( String(30), comment="Courier piece barcode (not the airway bill)" ) BLSession = relationship("BLSession") Shipping = relationship("Shipping") class DewarRegistryHasProposal(Base): __tablename__ = "DewarRegistry_has_Proposal" __table_args__ = ( Index("dewarRegistryId", "dewarRegistryId", "proposalId", unique=True), ) dewarRegistryHasProposalId = Column(INTEGER(11), primary_key=True) dewarRegistryId = Column(ForeignKey("DewarRegistry.dewarRegistryId")) proposalId = Column(ForeignKey("Proposal.proposalId"), index=True) personId = Column( ForeignKey("Person.personId"), index=True, comment="Person registering the dewar", ) recordTimestamp = Column(DateTime, server_default=text("current_timestamp()")) labContactId = Column( ForeignKey("LabContact.labContactId", onupdate="CASCADE"), index=True, comment="Owner of the dewar", ) DewarRegistry = relationship("DewarRegistry") LabContact = relationship("LabContact") Person = relationship("Person") Proposal = relationship("Proposal") class DewarReport(Base): __tablename__ = "DewarReport" dewarReportId = Column(INTEGER(11), primary_key=True) facilityCode = Column( ForeignKey("DewarRegistry.facilityCode", ondelete="CASCADE"), nullable=False, index=True, ) report = Column(Text) attachment = Column(String(255)) bltimestamp = Column( DateTime, nullable=False, server_default=text("current_timestamp()") ) DewarRegistry = relationship("DewarRegistry") class MeasurementToDataCollection(Base): __tablename__ = "MeasurementToDataCollection" measurementToDataCollectionId = Column(INTEGER(10), primary_key=True) dataCollectionId = Column( ForeignKey( "SaxsDataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE", ), index=True, ) measurementId = Column( ForeignKey("Measurement.measurementId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) dataCollectionOrder = Column(INTEGER(10)) SaxsDataCollection = relationship("SaxsDataCollection") Measurement = relationship("Measurement") class Merge(Base): __tablename__ = "Merge" mergeId = Column(INTEGER(10), primary_key=True) measurementId = Column( ForeignKey("Measurement.measurementId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) frameListId = Column( ForeignKey("FrameList.frameListId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) discardedFrameNameList = Column(String(1024)) averageFilePath = Column(String(255)) framesCount = Column(String(45)) framesMerge = Column(String(45)) FrameList = relationship("FrameList") Measurement = relationship("Measurement") t_Project_has_Shipping = Table( "Project_has_Shipping", metadata, Column( "projectId", ForeignKey("Project.projectId", ondelete="CASCADE"), primary_key=True, nullable=False, ), Column( "shippingId", ForeignKey("Shipping.shippingId", ondelete="CASCADE"), primary_key=True, nullable=False, index=True, ), ) class ScreenComponent(Base): __tablename__ = "ScreenComponent" screenComponentId = Column(INTEGER(11), primary_key=True) screenComponentGroupId = Column( ForeignKey("ScreenComponentGroup.screenComponentGroupId"), nullable=False, index=True, ) componentId = Column(ForeignKey("Protein.proteinId"), index=True) concentration = Column(Float) pH = Column(Float) Protein = relationship("Protein") ScreenComponentGroup = relationship("ScreenComponentGroup") t_ShippingHasSession = Table( "ShippingHasSession", metadata, Column( "shippingId", ForeignKey("Shipping.shippingId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, ), Column( "sessionId", ForeignKey("BLSession.sessionId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, index=True, ), ) class Container(Base): __tablename__ = "Container" containerId = Column(INTEGER(10), primary_key=True) dewarId = Column( ForeignKey("Dewar.dewarId", ondelete="CASCADE", onupdate="CASCADE"), index=True ) code = Column(String(45)) containerType = Column(String(20)) capacity = Column(INTEGER(10)) sampleChangerLocation = Column(String(20)) containerStatus = Column(String(45), index=True) bltimeStamp = Column(DateTime) beamlineLocation = Column(String(20), index=True) screenId = Column(ForeignKey("Screen.screenId"), index=True) scheduleId = Column(ForeignKey("Schedule.scheduleId"), index=True) barcode = Column(String(45), unique=True) imagerId = Column(ForeignKey("Imager.imagerId"), index=True) sessionId = Column( ForeignKey("BLSession.sessionId", ondelete="SET NULL", onupdate="CASCADE"), index=True, ) ownerId = Column(ForeignKey("Person.personId"), index=True) requestedImagerId = Column(ForeignKey("Imager.imagerId"), index=True) requestedReturn = Column( TINYINT(1), server_default=text("0"), comment="True for requesting return, False means container will be disposed", ) comments = Column(String(255)) experimentType = Column(String(20)) storageTemperature = Column(Float, comment="NULL=ambient") containerRegistryId = Column( ForeignKey("ContainerRegistry.containerRegistryId"), index=True ) scLocationUpdated = Column(DateTime) priorityPipelineId = Column( ForeignKey("ProcessingPipeline.processingPipelineId"), index=True ) experimentTypeId = Column(ForeignKey("ExperimentType.experimentTypeId"), index=True) containerTypeId = Column(ForeignKey("ContainerType.containerTypeId"), index=True) ContainerRegistry = relationship("ContainerRegistry") ContainerType = relationship("ContainerType") Dewar = relationship("Dewar") ExperimentType = relationship("ExperimentType") Imager = relationship("Imager", primaryjoin="Container.imagerId == Imager.imagerId") Person = relationship("Person") ProcessingPipeline = relationship("ProcessingPipeline") Imager1 = relationship( "Imager", primaryjoin="Container.requestedImagerId == Imager.imagerId" ) Schedule = relationship("Schedule") Screen = relationship("Screen") BLSession = relationship("BLSession") class DewarTransportHistory(Base): __tablename__ = "DewarTransportHistory" DewarTransportHistoryId = Column(INTEGER(10), primary_key=True) dewarId = Column( ForeignKey("Dewar.dewarId", ondelete="CASCADE", onupdate="CASCADE"), index=True ) dewarStatus = Column(String(45), nullable=False) storageLocation = Column(String(45), nullable=False) arrivalDate = Column(DateTime, nullable=False) Dewar = relationship("Dewar") class BFAutomationFault(Base): __tablename__ = "BF_automationFault" automationFaultId = Column(INTEGER(10), primary_key=True) automationErrorId = Column( ForeignKey("BF_automationError.automationErrorId"), index=True ) containerId = Column(ForeignKey("Container.containerId"), index=True) severity = Column(Enum("1", "2", "3")) stacktrace = Column(Text) resolved = Column(TINYINT(1)) faultTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) BF_automationError = relationship("BFAutomationError") Container = relationship("Container") class BLSample(Base): __tablename__ = "BLSample" __table_args__ = (Index("crystalId", "crystalId", "containerId"),) blSampleId = Column(INTEGER(10), primary_key=True) diffractionPlanId = Column( ForeignKey( "DiffractionPlan.diffractionPlanId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) crystalId = Column( ForeignKey("Crystal.crystalId", ondelete="CASCADE", onupdate="CASCADE") ) containerId = Column( ForeignKey("Container.containerId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) name = Column(String(45), index=True) code = Column(String(45)) location = Column(String(45)) holderLength = Column(Float(asdecimal=True)) loopLength = Column(Float(asdecimal=True)) loopType = Column(String(45)) wireWidth = Column(Float(asdecimal=True)) comments = Column(String(1024)) completionStage = Column(String(45)) structureStage = Column(String(45)) publicationStage = Column(String(45)) publicationComments = Column(String(255)) blSampleStatus = Column(String(20), index=True) isInSampleChanger = Column(TINYINT(1)) lastKnownCenteringPosition = Column(String(255)) POSITIONID = Column(INTEGER(11)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) SMILES = Column( String(400), comment="the symbolic description of the structure of a chemical compound", ) blSubSampleId = Column(INTEGER(11), index=True) lastImageURL = Column(String(255)) screenComponentGroupId = Column( ForeignKey("ScreenComponentGroup.screenComponentGroupId"), index=True ) volume = Column(Float) dimension1 = Column(Float(asdecimal=True)) dimension2 = Column(Float(asdecimal=True)) dimension3 = Column(Float(asdecimal=True)) shape = Column(String(15)) packingFraction = Column(Float) preparationTemeprature = Column( MEDIUMINT(9), comment="Sample preparation temperature, Units: kelvin" ) preparationHumidity = Column(Float, comment="Sample preparation humidity, Units: %") blottingTime = Column(INTEGER(11), comment="Blotting time, Units: sec") blottingForce = Column(Float, comment="Force used when blotting sample, Units: N?") blottingDrainTime = Column( INTEGER(11), comment="Time sample left to drain after blotting, Units: sec" ) support = Column(String(50), comment="Sample support material") subLocation = Column( SMALLINT(5), comment="Indicates the sample's location on a multi-sample pin, where 1 is closest to the pin base", ) staffComments = Column(String(255), comment="Any staff comments on the sample") Container = relationship("Container") Crystal = relationship("Crystal") DiffractionPlan = relationship("DiffractionPlan") ScreenComponentGroup = relationship("ScreenComponentGroup") Project = relationship("Project", secondary="Project_has_BLSample") class ContainerHistory(Base): __tablename__ = "ContainerHistory" containerHistoryId = Column(INTEGER(11), primary_key=True) containerId = Column( ForeignKey("Container.containerId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) location = Column(String(45)) blTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) status = Column(String(45)) beamlineName = Column(String(20)) Container = relationship("Container") class ContainerInspection(Base): __tablename__ = "ContainerInspection" __table_args__ = ( Index( "ContainerInspection_idx4", "containerId", "scheduleComponentid", "state", "manual", ), ) containerInspectionId = Column(INTEGER(11), primary_key=True) containerId = Column( ForeignKey("Container.containerId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, ) inspectionTypeId = Column( ForeignKey("InspectionType.inspectionTypeId"), nullable=False, index=True ) imagerId = Column(ForeignKey("Imager.imagerId"), index=True) temperature = Column(Float) blTimeStamp = Column(DateTime) scheduleComponentid = Column( ForeignKey("ScheduleComponent.scheduleComponentId"), index=True ) state = Column(String(20)) priority = Column(SMALLINT(6)) manual = Column(TINYINT(1)) scheduledTimeStamp = Column(DateTime) completedTimeStamp = Column(DateTime) Container = relationship("Container") Imager = relationship("Imager") InspectionType = relationship("InspectionType") ScheduleComponent = relationship("ScheduleComponent") class ContainerQueue(Base): __tablename__ = "ContainerQueue" containerQueueId = Column(INTEGER(11), primary_key=True) containerId = Column( ForeignKey("Container.containerId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) personId = Column(ForeignKey("Person.personId", onupdate="CASCADE"), index=True) createdTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) completedTimeStamp = Column(TIMESTAMP) Container = relationship("Container") Person = relationship("Person") class BLSampleGroupHasBLSample(Base): __tablename__ = "BLSampleGroup_has_BLSample" blSampleGroupId = Column( ForeignKey("BLSampleGroup.blSampleGroupId"), primary_key=True, nullable=False ) blSampleId = Column( ForeignKey("BLSample.blSampleId"), primary_key=True, nullable=False, index=True ) groupOrder = Column(MEDIUMINT(9)) type = Column(Enum("background", "container", "sample", "calibrant", "capillary")) blSampleTypeId = Column(ForeignKey("BLSampleType.blSampleTypeId"), index=True) BLSampleGroup = relationship("BLSampleGroup") BLSample = relationship("BLSample") BLSampleType = relationship("BLSampleType") class BLSampleImage(Base): __tablename__ = "BLSampleImage" blSampleImageId = Column(INTEGER(11), primary_key=True) blSampleId = Column(ForeignKey("BLSample.blSampleId"), nullable=False, index=True) micronsPerPixelX = Column(Float) micronsPerPixelY = Column(Float) imageFullPath = Column(String(255), unique=True) blSampleImageScoreId = Column( ForeignKey("BLSampleImageScore.blSampleImageScoreId", onupdate="CASCADE"), index=True, ) comments = Column(String(255)) blTimeStamp = Column(DateTime) containerInspectionId = Column( ForeignKey("ContainerInspection.containerInspectionId"), index=True ) modifiedTimeStamp = Column(DateTime) offsetX = Column( INTEGER(11), nullable=False, server_default=text("0"), comment="The x offset of the image relative to the canvas", ) offsetY = Column( INTEGER(11), nullable=False, server_default=text("0"), comment="The y offset of the image relative to the canvas", ) BLSample = relationship("BLSample") BLSampleImageScore = relationship("BLSampleImageScore") ContainerInspection = relationship("ContainerInspection") class BLSampleHasDataCollectionPlan(Base): __tablename__ = "BLSample_has_DataCollectionPlan" blSampleId = Column( ForeignKey("BLSample.blSampleId"), primary_key=True, nullable=False ) dataCollectionPlanId = Column( ForeignKey("DiffractionPlan.diffractionPlanId"), primary_key=True, nullable=False, index=True, ) planOrder = Column(TINYINT(3)) BLSample = relationship("BLSample") DiffractionPlan = relationship("DiffractionPlan") class BLSampleHasPositioner(Base): __tablename__ = "BLSample_has_Positioner" blSampleHasPositioner = Column(INTEGER(10), primary_key=True) blSampleId = Column(ForeignKey("BLSample.blSampleId"), nullable=False, index=True) positionerId = Column( ForeignKey("Positioner.positionerId"), nullable=False, index=True ) BLSample = relationship("BLSample") Positioner = relationship("Positioner") class DataCollectionGroup(Base): __tablename__ = "DataCollectionGroup" __table_args__ = { "comment": "a dataCollectionGroup is a group of dataCollection for a spe" } dataCollectionGroupId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) sessionId = Column( ForeignKey("BLSession.sessionId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, comment="references Session table", ) comments = Column(String(1024), comment="comments") blSampleId = Column( ForeignKey("BLSample.blSampleId", ondelete="CASCADE", onupdate="CASCADE"), index=True, comment="references BLSample table", ) experimentType = Column( Enum( "SAD", "SAD - Inverse Beam", "OSC", "Collect - Multiwedge", "MAD", "Helical", "Multi-positional", "Mesh", "Burn", "MAD - Inverse Beam", "Characterization", "Dehydration", "tomo", "experiment", "EM", "PDF", "PDF+Bragg", "Bragg", "single particle", "Serial Fixed", "Serial Jet", "Standard", "Time Resolved", "Diamond Anvil High Pressure", "Custom", "XRF map", "Energy scan", "XRF spectrum", "XRF map xas", "Mesh3D", "Screening", ), comment="Standard: Routine structure determination experiment. Time Resolved: Investigate the change of a system over time. Custom: Special or non-standard data collection.", ) startTime = Column(DateTime, comment="Start time of the dataCollectionGroup") endTime = Column(DateTime, comment="end time of the dataCollectionGroup") crystalClass = Column(String(20), comment="Crystal Class for industrials users") detectorMode = Column(String(255), comment="Detector mode") actualSampleBarcode = Column(String(45), comment="Actual sample barcode") actualSampleSlotInContainer = Column( INTEGER(10), comment="Actual sample slot number in container" ) actualContainerBarcode = Column(String(45), comment="Actual container barcode") actualContainerSlotInSC = Column( INTEGER(10), comment="Actual container slot number in sample changer" ) workflowId = Column( ForeignKey("Workflow.workflowId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) xtalSnapshotFullPath = Column(String(255)) scanParameters = Column(LONGTEXT) experimentTypeId = Column(ForeignKey("ExperimentType.experimentTypeId"), index=True) BLSample = relationship("BLSample") ExperimentType = relationship("ExperimentType") BLSession = relationship("BLSession") Workflow = relationship("Workflow") Project = relationship("Project", secondary="Project_has_DCGroup") t_Project_has_BLSample = Table( "Project_has_BLSample", metadata, Column( "projectId", ForeignKey("Project.projectId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, ), Column( "blSampleId", ForeignKey("BLSample.blSampleId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, index=True, ), ) class RobotAction(Base): __tablename__ = "RobotAction" __table_args__ = {"comment": "Robot actions as reported by GDA"} robotActionId = Column(INTEGER(11), primary_key=True) blsessionId = Column(ForeignKey("BLSession.sessionId"), nullable=False, index=True) blsampleId = Column(ForeignKey("BLSample.blSampleId"), index=True) actionType = Column( Enum("LOAD", "UNLOAD", "DISPOSE", "STORE", "WASH", "ANNEAL", "MOSAIC") ) startTimestamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp() ON UPDATE current_timestamp()"), ) endTimestamp = Column( TIMESTAMP, nullable=False, server_default=text("'0000-00-00 00:00:00'") ) status = Column( Enum("SUCCESS", "ERROR", "CRITICAL", "WARNING", "EPICSFAIL", "COMMANDNOTSENT") ) message = Column(String(255)) containerLocation = Column(SMALLINT(6)) dewarLocation = Column(SMALLINT(6)) sampleBarcode = Column(String(45)) xtalSnapshotBefore = Column(String(255)) xtalSnapshotAfter = Column(String(255)) BLSample = relationship("BLSample") BLSession = relationship("BLSession") class XRFFluorescenceMappingROI(Base): __tablename__ = "XRFFluorescenceMappingROI" xrfFluorescenceMappingROIId = Column(INTEGER(11), primary_key=True) startEnergy = Column(Float, nullable=False) endEnergy = Column(Float, nullable=False) element = Column(String(2)) edge = Column( String(15), comment="Edge type i.e. Ka1, could be a custom edge in case of overlap Ka1-noCa", ) r = Column(TINYINT(3), comment="R colour component") g = Column(TINYINT(3), comment="G colour component") b = Column(TINYINT(3), comment="B colour component") blSampleId = Column( ForeignKey("BLSample.blSampleId"), index=True, comment="ROIs can be created within the context of a sample", ) scalar = Column( String(50), comment="For ROIs that are not an element, i.e. could be a scan counter instead", ) BLSample = relationship("BLSample") class BLSampleImageAnalysis(Base): __tablename__ = "BLSampleImageAnalysis" blSampleImageAnalysisId = Column(INTEGER(11), primary_key=True) blSampleImageId = Column(ForeignKey("BLSampleImage.blSampleImageId"), index=True) oavSnapshotBefore = Column(String(255)) oavSnapshotAfter = Column(String(255)) deltaX = Column(INTEGER(11)) deltaY = Column(INTEGER(11)) goodnessOfFit = Column(Float) scaleFactor = Column(Float) resultCode = Column(String(15)) matchStartTimeStamp = Column(TIMESTAMP, server_default=text("current_timestamp()")) matchEndTimeStamp = Column(TIMESTAMP) BLSampleImage = relationship("BLSampleImage") class BLSampleImageHasAutoScoreClass(Base): __tablename__ = "BLSampleImage_has_AutoScoreClass" __table_args__ = { "comment": "Many-to-many relationship between drop images and thing being scored, as well as the actual probability (score) that the drop image contains that thing" } blSampleImageId = Column( ForeignKey( "BLSampleImage.blSampleImageId", ondelete="CASCADE", onupdate="CASCADE" ), primary_key=True, nullable=False, ) blSampleImageAutoScoreClassId = Column( ForeignKey( "BLSampleImageAutoScoreClass.blSampleImageAutoScoreClassId", ondelete="CASCADE", onupdate="CASCADE", ), primary_key=True, nullable=False, index=True, ) probability = Column(Float) BLSampleImageAutoScoreClass = relationship("BLSampleImageAutoScoreClass") BLSampleImage = relationship("BLSampleImage") class BLSubSample(Base): __tablename__ = "BLSubSample" blSubSampleId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) blSampleId = Column( ForeignKey("BLSample.blSampleId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, comment="sample", ) diffractionPlanId = Column( ForeignKey( "DiffractionPlan.diffractionPlanId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, comment="eventually diffractionPlan", ) blSampleImageId = Column(ForeignKey("BLSampleImage.blSampleImageId"), index=True) positionId = Column( ForeignKey("Position.positionId", ondelete="CASCADE", onupdate="CASCADE"), index=True, comment="position of the subsample", ) position2Id = Column( ForeignKey("Position.positionId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) motorPositionId = Column( ForeignKey( "MotorPosition.motorPositionId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, comment="motor position", ) blSubSampleUUID = Column(String(45), comment="uuid of the blsubsample") imgFileName = Column(String(255), comment="image filename") imgFilePath = Column(String(1024), comment="url image") comments = Column(String(1024), comment="comments") recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) source = Column(Enum("manual", "auto"), server_default=text("'manual'")) type = Column( String(10), comment="The type of subsample, i.e. roi (region), poi (point), loi (line)", ) BLSample = relationship("BLSample") BLSampleImage = relationship("BLSampleImage") DiffractionPlan = relationship("DiffractionPlan") MotorPosition = relationship("MotorPosition") Position = relationship( "Position", primaryjoin="BLSubSample.position2Id == Position.positionId" ) Position1 = relationship( "Position", primaryjoin="BLSubSample.positionId == Position.positionId" ) t_Project_has_DCGroup = Table( "Project_has_DCGroup", metadata, Column( "projectId", ForeignKey("Project.projectId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, ), Column( "dataCollectionGroupId", ForeignKey( "DataCollectionGroup.dataCollectionGroupId", ondelete="CASCADE", onupdate="CASCADE", ), primary_key=True, nullable=False, index=True, ), ) class BLSampleImageMeasurement(Base): __tablename__ = "BLSampleImageMeasurement" __table_args__ = {"comment": "For measuring crystal growth over time"} blSampleImageMeasurementId = Column(INTEGER(11), primary_key=True) blSampleImageId = Column( ForeignKey( "BLSampleImage.blSampleImageId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) blSubSampleId = Column(ForeignKey("BLSubSample.blSubSampleId"), index=True) startPosX = Column(Float(asdecimal=True)) startPosY = Column(Float(asdecimal=True)) endPosX = Column(Float(asdecimal=True)) endPosY = Column(Float(asdecimal=True)) blTimeStamp = Column(DateTime) BLSampleImage = relationship("BLSampleImage") BLSubSample = relationship("BLSubSample") class BLSubSampleHasPositioner(Base): __tablename__ = "BLSubSample_has_Positioner" blSubSampleHasPositioner = Column(INTEGER(10), primary_key=True) blSubSampleId = Column( ForeignKey("BLSubSample.blSubSampleId"), nullable=False, index=True ) positionerId = Column( ForeignKey("Positioner.positionerId"), nullable=False, index=True ) BLSubSample = relationship("BLSubSample") Positioner = relationship("Positioner") class ContainerQueueSample(Base): __tablename__ = "ContainerQueueSample" containerQueueSampleId = Column(INTEGER(11), primary_key=True) containerQueueId = Column( ForeignKey( "ContainerQueue.containerQueueId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) blSubSampleId = Column( ForeignKey("BLSubSample.blSubSampleId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) status = Column( String(20), comment="The status of the queued item, i.e. skipped, reinspect. Completed / failed should be inferred from related DataCollection", ) startTime = Column(DateTime, comment="Start time of processing the queue item") endTime = Column(DateTime, comment="End time of processing the queue item") dataCollectionPlanId = Column( ForeignKey("DiffractionPlan.diffractionPlanId"), index=True ) blSampleId = Column(ForeignKey("BLSample.blSampleId"), index=True) BLSample = relationship("BLSample") BLSubSample = relationship("BLSubSample") ContainerQueue = relationship("ContainerQueue") DiffractionPlan = relationship("DiffractionPlan") class DataCollection(Base): __tablename__ = "DataCollection" __table_args__ = ( Index( "DataCollection_dataCollectionGroupId_startTime", "dataCollectionGroupId", "startTime", ), ) dataCollectionId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) BLSAMPLEID = Column(INTEGER(11), index=True) SESSIONID = Column(INTEGER(11), index=True, server_default=text("0")) experimenttype = Column(String(24)) dataCollectionNumber = Column(INTEGER(10), index=True) startTime = Column(DateTime, index=True, comment="Start time of the dataCollection") endTime = Column(DateTime, comment="end time of the dataCollection") runStatus = Column(String(45)) axisStart = Column(Float) axisEnd = Column(Float) axisRange = Column(Float) overlap = Column(Float) numberOfImages = Column(INTEGER(10)) startImageNumber = Column(INTEGER(10)) numberOfPasses = Column(INTEGER(10)) exposureTime = Column(Float) imageDirectory = Column( String(255), index=True, comment="The directory where files reside - should end with a slash", ) imagePrefix = Column(String(45), index=True) imageSuffix = Column(String(45)) imageContainerSubPath = Column( String(255), comment="Internal path of a HDF5 file pointing to the data for this data collection", ) fileTemplate = Column(String(255)) wavelength = Column(Float) resolution = Column(Float) detectorDistance = Column(Float) xBeam = Column(Float) yBeam = Column(Float) comments = Column(String(1024)) printableForReport = Column(TINYINT(1), server_default=text("1")) CRYSTALCLASS = Column(String(20)) slitGapVertical = Column(Float) slitGapHorizontal = Column(Float) transmission = Column(Float) synchrotronMode = Column(String(20)) xtalSnapshotFullPath1 = Column(String(255)) xtalSnapshotFullPath2 = Column(String(255)) xtalSnapshotFullPath3 = Column(String(255)) xtalSnapshotFullPath4 = Column(String(255)) rotationAxis = Column(Enum("Omega", "Kappa", "Phi")) phiStart = Column(Float) kappaStart = Column(Float) omegaStart = Column(Float) chiStart = Column(Float) resolutionAtCorner = Column(Float) detector2Theta = Column(Float) DETECTORMODE = Column(String(255)) undulatorGap1 = Column(Float) undulatorGap2 = Column(Float) undulatorGap3 = Column(Float) beamSizeAtSampleX = Column(Float) beamSizeAtSampleY = Column(Float) centeringMethod = Column(String(255)) averageTemperature = Column(Float) ACTUALSAMPLEBARCODE = Column(String(45)) ACTUALSAMPLESLOTINCONTAINER = Column(INTEGER(11)) ACTUALCONTAINERBARCODE = Column(String(45)) ACTUALCONTAINERSLOTINSC = Column(INTEGER(11)) actualCenteringPosition = Column(String(255)) beamShape = Column(String(45)) dataCollectionGroupId = Column( ForeignKey("DataCollectionGroup.dataCollectionGroupId"), nullable=False, index=True, comment="references DataCollectionGroup table", ) POSITIONID = Column(INTEGER(11)) detectorId = Column( ForeignKey("Detector.detectorId"), index=True, comment="references Detector table", ) FOCALSPOTSIZEATSAMPLEX = Column(Float) POLARISATION = Column(Float) FOCALSPOTSIZEATSAMPLEY = Column(Float) APERTUREID = Column(INTEGER(11)) screeningOrigId = Column(INTEGER(11)) startPositionId = Column(ForeignKey("MotorPosition.motorPositionId"), index=True) endPositionId = Column(ForeignKey("MotorPosition.motorPositionId"), index=True) flux = Column(Float(asdecimal=True)) strategySubWedgeOrigId = Column( INTEGER(10), index=True, comment="references ScreeningStrategySubWedge table" ) blSubSampleId = Column(ForeignKey("BLSubSample.blSubSampleId"), index=True) flux_end = Column(Float(asdecimal=True), comment="flux measured after the collect") bestWilsonPlotPath = Column(String(255)) processedDataFile = Column(String(255)) datFullPath = Column(String(255)) magnification = Column( Float, comment="Calibrated magnification, Units: dimensionless" ) totalAbsorbedDose = Column(Float, comment="Unit: e-/A^2 for EM") binning = Column( TINYINT(1), server_default=text("1"), comment="1 or 2. Number of pixels to process as 1. (Use mean value.)", ) particleDiameter = Column(Float, comment="Unit: nm") boxSize_CTF = Column(Float, comment="Unit: pixels") minResolution = Column(Float, comment="Unit: A") minDefocus = Column(Float, comment="Unit: A") maxDefocus = Column(Float, comment="Unit: A") defocusStepSize = Column(Float, comment="Unit: A") amountAstigmatism = Column(Float, comment="Unit: A") extractSize = Column(Float, comment="Unit: pixels") bgRadius = Column(Float, comment="Unit: nm") voltage = Column(Float, comment="Unit: kV") objAperture = Column(Float, comment="Unit: um") c1aperture = Column(Float, comment="Unit: um") c2aperture = Column(Float, comment="Unit: um") c3aperture = Column(Float, comment="Unit: um") c1lens = Column(Float, comment="Unit: %") c2lens = Column(Float, comment="Unit: %") c3lens = Column(Float, comment="Unit: %") totalExposedDose = Column(Float, comment="Units: e-/A^2") nominalMagnification = Column( Float, comment="Nominal magnification: Units: dimensionless" ) nominalDefocus = Column(Float, comment="Nominal defocus, Units: A") imageSizeX = Column( MEDIUMINT(8), comment="Image size in x, incase crop has been used, Units: pixels", ) imageSizeY = Column(MEDIUMINT(8), comment="Image size in y, Units: pixels") pixelSizeOnImage = Column( Float, comment="Pixel size on image, calculated from magnification, duplicate? Units: um?", ) phasePlate = Column(TINYINT(1), comment="Whether the phase plate was used") dataCollectionPlanId = Column( ForeignKey("DiffractionPlan.diffractionPlanId"), index=True ) BLSubSample = relationship("BLSubSample") DataCollectionGroup = relationship("DataCollectionGroup") DiffractionPlan = relationship("DiffractionPlan") Detector = relationship("Detector") MotorPosition = relationship( "MotorPosition", primaryjoin="DataCollection.endPositionId == MotorPosition.motorPositionId", ) MotorPosition1 = relationship( "MotorPosition", primaryjoin="DataCollection.startPositionId == MotorPosition.motorPositionId", ) class EnergyScan(Base): __tablename__ = "EnergyScan" energyScanId = Column(INTEGER(10), primary_key=True) sessionId = Column( ForeignKey("BLSession.sessionId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) blSampleId = Column(ForeignKey("BLSample.blSampleId"), index=True) fluorescenceDetector = Column(String(255)) scanFileFullPath = Column(String(255)) jpegChoochFileFullPath = Column(String(255)) element = Column(String(45)) startEnergy = Column(Float) endEnergy = Column(Float) transmissionFactor = Column(Float) exposureTime = Column(Float) axisPosition = Column(Float) synchrotronCurrent = Column(Float) temperature = Column(Float) peakEnergy = Column(Float) peakFPrime = Column(Float) peakFDoublePrime = Column(Float) inflectionEnergy = Column(Float) inflectionFPrime = Column(Float) inflectionFDoublePrime = Column(Float) xrayDose = Column(Float) startTime = Column(DateTime) endTime = Column(DateTime) edgeEnergy = Column(String(255)) filename = Column(String(255)) beamSizeVertical = Column(Float) beamSizeHorizontal = Column(Float) choochFileFullPath = Column(String(255)) crystalClass = Column(String(20)) comments = Column(String(1024)) flux = Column(Float(asdecimal=True), comment="flux measured before the energyScan") flux_end = Column( Float(asdecimal=True), comment="flux measured after the energyScan" ) workingDirectory = Column(String(45)) blSubSampleId = Column(ForeignKey("BLSubSample.blSubSampleId"), index=True) BLSample = relationship("BLSample") BLSubSample = relationship("BLSubSample") BLSession = relationship("BLSession") Project = relationship("Project", secondary="Project_has_EnergyScan") class XFEFluorescenceSpectrum(Base): __tablename__ = "XFEFluorescenceSpectrum" xfeFluorescenceSpectrumId = Column(INTEGER(10), primary_key=True) sessionId = Column( ForeignKey("BLSession.sessionId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) blSampleId = Column( ForeignKey("BLSample.blSampleId", ondelete="CASCADE", onupdate="CASCADE"), index=True, ) jpegScanFileFullPath = Column(String(255)) startTime = Column(DateTime) endTime = Column(DateTime) filename = Column(String(255)) exposureTime = Column(Float) axisPosition = Column(Float) beamTransmission = Column(Float) annotatedPymcaXfeSpectrum = Column(String(255)) fittedDataFileFullPath = Column(String(255)) scanFileFullPath = Column(String(255)) energy = Column(Float) beamSizeVertical = Column(Float) beamSizeHorizontal = Column(Float) crystalClass = Column(String(20)) comments = Column(String(1024)) blSubSampleId = Column(ForeignKey("BLSubSample.blSubSampleId"), index=True) flux = Column(Float(asdecimal=True), comment="flux measured before the xrfSpectra") flux_end = Column( Float(asdecimal=True), comment="flux measured after the xrfSpectra" ) workingDirectory = Column(String(512)) BLSample = relationship("BLSample") BLSubSample = relationship("BLSubSample") BLSession = relationship("BLSession") class BLSampleHasEnergyScan(Base): __tablename__ = "BLSample_has_EnergyScan" blSampleId = Column( ForeignKey("BLSample.blSampleId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, server_default=text("0"), ) energyScanId = Column( ForeignKey("EnergyScan.energyScanId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, server_default=text("0"), ) blSampleHasEnergyScanId = Column(INTEGER(10), primary_key=True) BLSample = relationship("BLSample") EnergyScan = relationship("EnergyScan") class DataCollectionComment(Base): __tablename__ = "DataCollectionComment" dataCollectionCommentId = Column(INTEGER(11), primary_key=True) dataCollectionId = Column( ForeignKey( "DataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) personId = Column( ForeignKey("Person.personId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) comments = Column(String(4000)) createTime = Column( DateTime, nullable=False, server_default=text("current_timestamp()") ) modTime = Column(Date) DataCollection = relationship("DataCollection") Person = relationship("Person") class DataCollectionFileAttachment(Base): __tablename__ = "DataCollectionFileAttachment" dataCollectionFileAttachmentId = Column(INTEGER(11), primary_key=True) dataCollectionId = Column( ForeignKey( "DataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) fileFullPath = Column(String(255), nullable=False) fileType = Column( Enum("snapshot", "log", "xy", "recip", "pia", "warning", "params") ) createTime = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) DataCollection = relationship("DataCollection") class GridImageMap(Base): __tablename__ = "GridImageMap" gridImageMapId = Column(INTEGER(11), primary_key=True) dataCollectionId = Column(ForeignKey("DataCollection.dataCollectionId"), index=True) imageNumber = Column( INTEGER(11), comment="Movie number, sequential 1-n in time order" ) outputFileId = Column(String(80), comment="File number, file 1 may not be movie 1") positionX = Column(Float, comment="X position of stage, Units: um") positionY = Column(Float, comment="Y position of stage, Units: um") DataCollection = relationship("DataCollection") class Image(Base): __tablename__ = "Image" __table_args__ = (Index("Image_Index3", "fileLocation", "fileName"),) imageId = Column(INTEGER(12), primary_key=True) dataCollectionId = Column( ForeignKey( "DataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, server_default=text("0"), ) imageNumber = Column(INTEGER(10), index=True) fileName = Column(String(255)) fileLocation = Column(String(255)) measuredIntensity = Column(Float) jpegFileFullPath = Column(String(255)) jpegThumbnailFileFullPath = Column(String(255)) temperature = Column(Float) cumulativeIntensity = Column(Float) synchrotronCurrent = Column(Float) comments = Column(String(1024)) machineMessage = Column(String(1024)) BLTIMESTAMP = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) motorPositionId = Column( ForeignKey( "MotorPosition.motorPositionId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) DataCollection = relationship("DataCollection") MotorPosition = relationship("MotorPosition") class Movie(Base): __tablename__ = "Movie" movieId = Column(INTEGER(11), primary_key=True) dataCollectionId = Column(ForeignKey("DataCollection.dataCollectionId"), index=True) movieNumber = Column(MEDIUMINT(8)) movieFullPath = Column(String(255)) createdTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp() ON UPDATE current_timestamp()"), ) positionX = Column(Float) positionY = Column(Float) nominalDefocus = Column(Float, comment="Nominal defocus, Units: A") DataCollection = relationship("DataCollection") class Particle(Base): __tablename__ = "Particle" particleId = Column(INTEGER(11), primary_key=True) dataCollectionId = Column( ForeignKey( "DataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, ) x = Column(Float) y = Column(Float) DataCollection = relationship("DataCollection") class ProcessingJob(Base): __tablename__ = "ProcessingJob" __table_args__ = {"comment": "From this we get both job times and lag times"} processingJobId = Column(INTEGER(11), primary_key=True) dataCollectionId = Column(ForeignKey("DataCollection.dataCollectionId"), index=True) displayName = Column(String(80), comment="xia2, fast_dp, dimple, etc") comments = Column( String(255), comment="For users to annotate the job and see the motivation for the job", ) recordTimestamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="When job was submitted", ) recipe = Column(String(50), comment="What we want to run (xia, dimple, etc).") automatic = Column( TINYINT(1), comment="Whether this processing job was triggered automatically or not", ) DataCollection = relationship("DataCollection") t_Project_has_EnergyScan = Table( "Project_has_EnergyScan", metadata, Column( "projectId", ForeignKey("Project.projectId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, ), Column( "energyScanId", ForeignKey("EnergyScan.energyScanId", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, nullable=False, index=True, ), ) t_Project_has_XFEFSpectrum = Table( "Project_has_XFEFSpectrum", metadata, Column( "projectId", ForeignKey("Project.projectId", ondelete="CASCADE"), primary_key=True, nullable=False, ), Column( "xfeFluorescenceSpectrumId", ForeignKey( "XFEFluorescenceSpectrum.xfeFluorescenceSpectrumId", ondelete="CASCADE" ), primary_key=True, nullable=False, index=True, ), ) class AutoProcProgram(Base): __tablename__ = "AutoProcProgram" autoProcProgramId = Column( INTEGER(10), primary_key=True, comment="Primary key (auto-incremented)" ) processingCommandLine = Column( String(255), comment="Command line for running the automatic processing" ) processingPrograms = Column( String(255), comment="Processing programs (comma separated)" ) processingStatus = Column(TINYINT(1), comment="success (1) / fail (0)") processingMessage = Column(String(255), comment="warning, error,...") processingStartTime = Column(DateTime, comment="Processing start time") processingEndTime = Column(DateTime, comment="Processing end time") processingEnvironment = Column(String(255), comment="Cpus, Nodes,...") recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") processingJobId = Column(ForeignKey("ProcessingJob.processingJobId"), index=True) ProcessingJob = relationship("ProcessingJob") class ProcessingJobImageSweep(Base): __tablename__ = "ProcessingJobImageSweep" __table_args__ = { "comment": "This allows multiple sweeps per processing job for multi-xia2" } processingJobImageSweepId = Column(INTEGER(11), primary_key=True) processingJobId = Column(ForeignKey("ProcessingJob.processingJobId"), index=True) dataCollectionId = Column(ForeignKey("DataCollection.dataCollectionId"), index=True) startImage = Column(MEDIUMINT(8)) endImage = Column(MEDIUMINT(8)) DataCollection = relationship("DataCollection") ProcessingJob = relationship("ProcessingJob") class ProcessingJobParameter(Base): __tablename__ = "ProcessingJobParameter" processingJobParameterId = Column(INTEGER(11), primary_key=True) processingJobId = Column(ForeignKey("ProcessingJob.processingJobId"), index=True) parameterKey = Column(String(80), comment="E.g. resolution, spacegroup, pipeline") parameterValue = Column(String(1024)) ProcessingJob = relationship("ProcessingJob") class WorkflowMesh(Base): __tablename__ = "WorkflowMesh" workflowMeshId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) workflowId = Column( ForeignKey("Workflow.workflowId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, comment="Related workflow", ) bestPositionId = Column( ForeignKey( "MotorPosition.motorPositionId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) bestImageId = Column( ForeignKey("Image.imageId", ondelete="CASCADE", onupdate="CASCADE"), index=True ) value1 = Column(Float(asdecimal=True)) value2 = Column(Float(asdecimal=True)) value3 = Column(Float(asdecimal=True), comment="N value") value4 = Column(Float(asdecimal=True)) cartographyPath = Column(String(255)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) Image = relationship("Image") MotorPosition = relationship("MotorPosition") Workflow = relationship("Workflow") class AutoProcIntegration(Base): __tablename__ = "AutoProcIntegration" autoProcIntegrationId = Column( INTEGER(10), primary_key=True, comment="Primary key (auto-incremented)" ) dataCollectionId = Column( ForeignKey( "DataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, comment="DataCollection item", ) autoProcProgramId = Column( ForeignKey( "AutoProcProgram.autoProcProgramId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, comment="Related program item", ) startImageNumber = Column(INTEGER(10), comment="start image number") endImageNumber = Column(INTEGER(10), comment="end image number") refinedDetectorDistance = Column( Float, comment="Refined DataCollection.detectorDistance" ) refinedXBeam = Column(Float, comment="Refined DataCollection.xBeam") refinedYBeam = Column(Float, comment="Refined DataCollection.yBeam") rotationAxisX = Column(Float, comment="Rotation axis") rotationAxisY = Column(Float, comment="Rotation axis") rotationAxisZ = Column(Float, comment="Rotation axis") beamVectorX = Column(Float, comment="Beam vector") beamVectorY = Column(Float, comment="Beam vector") beamVectorZ = Column(Float, comment="Beam vector") cell_a = Column(Float, comment="Unit cell") cell_b = Column(Float, comment="Unit cell") cell_c = Column(Float, comment="Unit cell") cell_alpha = Column(Float, comment="Unit cell") cell_beta = Column(Float, comment="Unit cell") cell_gamma = Column(Float, comment="Unit cell") recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") anomalous = Column( TINYINT(1), server_default=text("0"), comment="boolean type:0 noanoum - 1 anoum" ) AutoProcProgram = relationship("AutoProcProgram") DataCollection = relationship("DataCollection") class AutoProcProgramAttachment(Base): __tablename__ = "AutoProcProgramAttachment" autoProcProgramAttachmentId = Column( INTEGER(10), primary_key=True, comment="Primary key (auto-incremented)" ) autoProcProgramId = Column( ForeignKey( "AutoProcProgram.autoProcProgramId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, comment="Related autoProcProgram item", ) fileType = Column( Enum("Log", "Result", "Graph", "Debug", "Input"), comment="Type of file Attachment", ) fileName = Column(String(255), comment="Attachment filename") filePath = Column(String(255), comment="Attachment filepath to disk storage") recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") importanceRank = Column( TINYINT(3), comment="For the particular autoProcProgramId and fileType, indicate the importance of the attachment. Higher numbers are more important", ) AutoProcProgram = relationship("AutoProcProgram") class AutoProcProgramMessage(Base): __tablename__ = "AutoProcProgramMessage" autoProcProgramMessageId = Column(INTEGER(10), primary_key=True) autoProcProgramId = Column( ForeignKey("AutoProcProgram.autoProcProgramId"), index=True ) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()") ) severity = Column(Enum("ERROR", "WARNING", "INFO")) message = Column(String(200)) description = Column(Text) AutoProcProgram = relationship("AutoProcProgram") class GridInfo(Base): __tablename__ = "GridInfo" gridInfoId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) xOffset = Column(Float(asdecimal=True)) yOffset = Column(Float(asdecimal=True)) dx_mm = Column(Float(asdecimal=True)) dy_mm = Column(Float(asdecimal=True)) steps_x = Column(Float(asdecimal=True)) steps_y = Column(Float(asdecimal=True)) meshAngle = Column(Float(asdecimal=True)) recordTimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp()"), comment="Creation or last update date/time", ) workflowMeshId = Column( ForeignKey( "WorkflowMesh.workflowMeshId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) orientation = Column( Enum("vertical", "horizontal"), server_default=text("'horizontal'") ) dataCollectionGroupId = Column( ForeignKey("DataCollectionGroup.dataCollectionGroupId"), index=True ) pixelsPerMicronX = Column(Float) pixelsPerMicronY = Column(Float) snapshot_offsetXPixel = Column(Float) snapshot_offsetYPixel = Column(Float) snaked = Column( TINYINT(1), server_default=text("0"), comment="True: The images associated with the DCG were collected in a snaked pattern", ) dataCollectionId = Column( ForeignKey( "DataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) DataCollectionGroup = relationship("DataCollectionGroup") DataCollection = relationship("DataCollection") WorkflowMesh = relationship("WorkflowMesh") class MXMRRun(Base): __tablename__ = "MXMRRun" mxMRRunId = Column(INTEGER(11), primary_key=True) autoProcScalingId = Column( ForeignKey("AutoProcScaling.autoProcScalingId"), nullable=False, index=True ) rValueStart = Column(Float) rValueEnd = Column(Float) rFreeValueStart = Column(Float) rFreeValueEnd = Column(Float) LLG = Column(Float, comment="Log Likelihood Gain") TFZ = Column(Float, comment="Translation Function Z-score") spaceGroup = Column(String(45), comment="Space group of the MR solution") autoProcProgramId = Column( ForeignKey("AutoProcProgram.autoProcProgramId"), index=True ) AutoProcProgram = relationship("AutoProcProgram") AutoProcScaling = relationship("AutoProcScaling") class MotionCorrection(Base): __tablename__ = "MotionCorrection" motionCorrectionId = Column(INTEGER(11), primary_key=True) dataCollectionId = Column(ForeignKey("DataCollection.dataCollectionId"), index=True) autoProcProgramId = Column( ForeignKey("AutoProcProgram.autoProcProgramId"), index=True ) imageNumber = Column(SMALLINT(5), comment="Movie number, sequential in time 1-n") firstFrame = Column(SMALLINT(5), comment="First frame of movie used") lastFrame = Column(SMALLINT(5), comment="Last frame of movie used") dosePerFrame = Column(Float, comment="Dose per frame, Units: e-/A^2") doseWeight = Column(Float, comment="Dose weight, Units: dimensionless") totalMotion = Column(Float, comment="Total motion, Units: A") averageMotionPerFrame = Column(Float, comment="Average motion per frame, Units: A") driftPlotFullPath = Column(String(255), comment="Full path to the drift plot") micrographFullPath = Column(String(255), comment="Full path to the micrograph") micrographSnapshotFullPath = Column( String(255), comment="Full path to a snapshot (jpg) of the micrograph" ) patchesUsedX = Column( MEDIUMINT(8), comment="Number of patches used in x (for motioncor2)" ) patchesUsedY = Column( MEDIUMINT(8), comment="Number of patches used in y (for motioncor2)" ) fftFullPath = Column( String(255), comment="Full path to the jpg image of the raw micrograph FFT" ) fftCorrectedFullPath = Column( String(255), comment="Full path to the jpg image of the drift corrected micrograph FFT", ) comments = Column(String(255)) movieId = Column(ForeignKey("Movie.movieId"), index=True) AutoProcProgram = relationship("AutoProcProgram") DataCollection = relationship("DataCollection") Movie = relationship("Movie") class PDBEntry(Base): __tablename__ = "PDBEntry" pdbEntryId = Column(INTEGER(11), primary_key=True) autoProcProgramId = Column( ForeignKey("AutoProcProgram.autoProcProgramId", ondelete="CASCADE"), nullable=False, index=True, ) code = Column(String(4)) cell_a = Column(Float) cell_b = Column(Float) cell_c = Column(Float) cell_alpha = Column(Float) cell_beta = Column(Float) cell_gamma = Column(Float) resolution = Column(Float) pdbTitle = Column(String(255)) pdbAuthors = Column(String(600)) pdbDate = Column(DateTime) pdbBeamlineName = Column(String(50)) beamlines = Column(String(100)) distance = Column(Float) autoProcCount = Column(SMALLINT(6)) dataCollectionCount = Column(SMALLINT(6)) beamlineMatch = Column(TINYINT(1)) authorMatch = Column(TINYINT(1)) AutoProcProgram = relationship("AutoProcProgram") class Screening(Base): __tablename__ = "Screening" screeningId = Column(INTEGER(10), primary_key=True) dataCollectionId = Column( ForeignKey( "DataCollection.dataCollectionId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) bltimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp() ON UPDATE current_timestamp()"), ) programVersion = Column(String(45)) comments = Column(String(255)) shortComments = Column(String(20)) diffractionPlanId = Column( INTEGER(10), index=True, comment="references DiffractionPlan" ) dataCollectionGroupId = Column( ForeignKey("DataCollectionGroup.dataCollectionGroupId"), index=True ) xmlSampleInformation = Column(LONGBLOB) autoProcProgramId = Column( ForeignKey( "AutoProcProgram.autoProcProgramId", ondelete="SET NULL", onupdate="CASCADE" ), index=True, ) AutoProcProgram = relationship("AutoProcProgram") DataCollectionGroup = relationship("DataCollectionGroup") DataCollection = relationship("DataCollection") class ZcZocaloBuffer(Base): __tablename__ = "zc_ZocaloBuffer" AutoProcProgramID = Column( ForeignKey( "AutoProcProgram.autoProcProgramId", ondelete="CASCADE", onupdate="CASCADE" ), primary_key=True, nullable=False, comment="Reference to an existing AutoProcProgram", ) UUID = Column( INTEGER(10), primary_key=True, nullable=False, comment="AutoProcProgram-specific unique identifier", ) Reference = Column( INTEGER(10), comment="Context-dependent reference to primary key IDs in other ISPyB tables", ) AutoProcProgram = relationship("AutoProcProgram") class AutoProcScalingHasInt(Base): __tablename__ = "AutoProcScaling_has_Int" __table_args__ = ( Index( "AutoProcScalingHasInt_FKIndex3", "autoProcScalingId", "autoProcIntegrationId", ), ) autoProcScaling_has_IntId = Column( INTEGER(10), primary_key=True, comment="Primary key (auto-incremented)" ) autoProcScalingId = Column( ForeignKey( "AutoProcScaling.autoProcScalingId", ondelete="CASCADE", onupdate="CASCADE" ), comment="AutoProcScaling item", ) autoProcIntegrationId = Column( ForeignKey( "AutoProcIntegration.autoProcIntegrationId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, comment="AutoProcIntegration item", ) recordTimeStamp = Column(DateTime, comment="Creation or last update date/time") AutoProcIntegration = relationship("AutoProcIntegration") AutoProcScaling = relationship("AutoProcScaling") class AutoProcStatus(Base): __tablename__ = "AutoProcStatus" __table_args__ = { "comment": "AutoProcStatus table is linked to AutoProcIntegration" } autoProcStatusId = Column( INTEGER(11), primary_key=True, comment="Primary key (auto-incremented)" ) autoProcIntegrationId = Column( ForeignKey( "AutoProcIntegration.autoProcIntegrationId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, ) step = Column( Enum("Indexing", "Integration", "Correction", "Scaling", "Importing"), nullable=False, comment="autoprocessing step", ) status = Column( Enum("Launched", "Successful", "Failed"), nullable=False, comment="autoprocessing status", ) comments = Column(String(1024), comment="comments") bltimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp() ON UPDATE current_timestamp()"), ) AutoProcIntegration = relationship("AutoProcIntegration") class CTF(Base): __tablename__ = "CTF" ctfId = Column(INTEGER(11), primary_key=True) motionCorrectionId = Column( ForeignKey("MotionCorrection.motionCorrectionId"), index=True ) autoProcProgramId = Column( ForeignKey("AutoProcProgram.autoProcProgramId"), index=True ) boxSizeX = Column(Float, comment="Box size in x, Units: pixels") boxSizeY = Column(Float, comment="Box size in y, Units: pixels") minResolution = Column(Float, comment="Minimum resolution for CTF, Units: A") maxResolution = Column(Float, comment="Units: A") minDefocus = Column(Float, comment="Units: A") maxDefocus = Column(Float, comment="Units: A") defocusStepSize = Column(Float, comment="Units: A") astigmatism = Column(Float, comment="Units: A") astigmatismAngle = Column(Float, comment="Units: deg?") estimatedResolution = Column(Float, comment="Units: A") estimatedDefocus = Column(Float, comment="Units: A") amplitudeContrast = Column(Float, comment="Units: %?") ccValue = Column(Float, comment="Correlation value") fftTheoreticalFullPath = Column( String(255), comment="Full path to the jpg image of the simulated FFT" ) comments = Column(String(255)) AutoProcProgram = relationship("AutoProcProgram") MotionCorrection = relationship("MotionCorrection") class MXMRRunBlob(Base): __tablename__ = "MXMRRunBlob" mxMRRunBlobId = Column(INTEGER(11), primary_key=True) mxMRRunId = Column(ForeignKey("MXMRRun.mxMRRunId"), nullable=False, index=True) view1 = Column(String(255)) view2 = Column(String(255)) view3 = Column(String(255)) filePath = Column( String(255), comment="File path corresponding to the filenames in the view* columns", ) x = Column(Float, comment="Fractional x coordinate of blob in range [-1, 1]") y = Column(Float, comment="Fractional y coordinate of blob in range [-1, 1]") z = Column(Float, comment="Fractional z coordinate of blob in range [-1, 1]") height = Column(Float, comment="Blob height (sigmas)") occupancy = Column(Float, comment="Site occupancy factor in range [0, 1]") nearestAtomName = Column(String(4), comment="Name of nearest atom") nearestAtomChainId = Column(String(2), comment="Chain identifier of nearest atom") nearestAtomResName = Column(String(4), comment="Residue name of nearest atom") nearestAtomResSeq = Column( MEDIUMINT(8), comment="Residue sequence number of nearest atom" ) nearestAtomDistance = Column(Float, comment="Distance in Angstrom to nearest atom") mapType = Column( Enum("anomalous", "difference"), comment="Type of electron density map corresponding to this blob", ) MXMRRun = relationship("MXMRRun") class MotionCorrectionDrift(Base): __tablename__ = "MotionCorrectionDrift" motionCorrectionDriftId = Column(INTEGER(11), primary_key=True) motionCorrectionId = Column( ForeignKey("MotionCorrection.motionCorrectionId"), index=True ) frameNumber = Column( SMALLINT(5), comment="Frame number of the movie these drift values relate to" ) deltaX = Column(Float, comment="Drift in x, Units: A") deltaY = Column(Float, comment="Drift in y, Units: A") MotionCorrection = relationship("MotionCorrection") class PDBEntryHasAutoProcProgram(Base): __tablename__ = "PDBEntry_has_AutoProcProgram" pdbEntryHasAutoProcId = Column(INTEGER(11), primary_key=True) pdbEntryId = Column( ForeignKey("PDBEntry.pdbEntryId", ondelete="CASCADE"), nullable=False, index=True, ) autoProcProgramId = Column( ForeignKey("AutoProcProgram.autoProcProgramId", ondelete="CASCADE"), nullable=False, index=True, ) distance = Column(Float) AutoProcProgram = relationship("AutoProcProgram") PDBEntry = relationship("PDBEntry") class ParticlePicker(Base): __tablename__ = "ParticlePicker" __table_args__ = { "comment": "An instance of a particle picker program that was run" } particlePickerId = Column(INTEGER(10), primary_key=True) programId = Column( ForeignKey("AutoProcProgram.autoProcProgramId", onupdate="CASCADE"), index=True ) firstMotionCorrectionId = Column( ForeignKey("MotionCorrection.motionCorrectionId", onupdate="CASCADE"), index=True, ) particlePickingTemplate = Column(String(255), comment="Cryolo model") particleDiameter = Column(Float, comment="Unit: nm") numberOfParticles = Column(INTEGER(10)) summaryImageFullPath = Column( String(255), comment="Generated summary micrograph image with highlighted particles", ) MotionCorrection = relationship("MotionCorrection") AutoProcProgram = relationship("AutoProcProgram") class RelativeIceThickness(Base): __tablename__ = "RelativeIceThickness" relativeIceThicknessId = Column(INTEGER(11), primary_key=True) motionCorrectionId = Column( ForeignKey("MotionCorrection.motionCorrectionId", onupdate="CASCADE"), index=True, ) autoProcProgramId = Column( ForeignKey("AutoProcProgram.autoProcProgramId", onupdate="CASCADE"), index=True ) minimum = Column(Float, comment="Minimum relative ice thickness, Unitless") q1 = Column(Float, comment="Quartile 1, unitless") median = Column(Float, comment="Median relative ice thickness, Unitless") q3 = Column(Float, comment="Quartile 3, unitless") maximum = Column(Float, comment="Minimum relative ice thickness, Unitless") AutoProcProgram = relationship("AutoProcProgram") MotionCorrection = relationship("MotionCorrection") class ScreeningInput(Base): __tablename__ = "ScreeningInput" screeningInputId = Column(INTEGER(10), primary_key=True) screeningId = Column( ForeignKey("Screening.screeningId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, server_default=text("0"), ) beamX = Column(Float) beamY = Column(Float) rmsErrorLimits = Column(Float) minimumFractionIndexed = Column(Float) maximumFractionRejected = Column(Float) minimumSignalToNoise = Column(Float) diffractionPlanId = Column(INTEGER(10), comment="references DiffractionPlan table") xmlSampleInformation = Column(LONGBLOB) Screening = relationship("Screening") class ScreeningOutput(Base): __tablename__ = "ScreeningOutput" screeningOutputId = Column(INTEGER(10), primary_key=True) screeningId = Column( ForeignKey("Screening.screeningId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, server_default=text("0"), ) statusDescription = Column(String(1024)) rejectedReflections = Column(INTEGER(10)) resolutionObtained = Column(Float) spotDeviationR = Column(Float) spotDeviationTheta = Column(Float) beamShiftX = Column(Float) beamShiftY = Column(Float) numSpotsFound = Column(INTEGER(10)) numSpotsUsed = Column(INTEGER(10)) numSpotsRejected = Column(INTEGER(10)) mosaicity = Column(Float) iOverSigma = Column(Float) diffractionRings = Column(TINYINT(1)) SCREENINGSUCCESS = Column( TINYINT(1), server_default=text("0"), comment="Column to be deleted" ) mosaicityEstimated = Column(TINYINT(1), nullable=False, server_default=text("0")) rankingResolution = Column(Float(asdecimal=True)) program = Column(String(45)) doseTotal = Column(Float(asdecimal=True)) totalExposureTime = Column(Float(asdecimal=True)) totalRotationRange = Column(Float(asdecimal=True)) totalNumberOfImages = Column(INTEGER(11)) rFriedel = Column(Float(asdecimal=True)) indexingSuccess = Column(TINYINT(1), nullable=False, server_default=text("0")) strategySuccess = Column(TINYINT(1), nullable=False, server_default=text("0")) alignmentSuccess = Column(TINYINT(1), nullable=False, server_default=text("0")) Screening = relationship("Screening") class ScreeningRank(Base): __tablename__ = "ScreeningRank" screeningRankId = Column(INTEGER(10), primary_key=True) screeningRankSetId = Column( ForeignKey( "ScreeningRankSet.screeningRankSetId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, server_default=text("0"), ) screeningId = Column( ForeignKey("Screening.screeningId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, server_default=text("0"), ) rankValue = Column(Float) rankInformation = Column(String(1024)) Screening = relationship("Screening") ScreeningRankSet = relationship("ScreeningRankSet") class XRFFluorescenceMapping(Base): __tablename__ = "XRFFluorescenceMapping" __table_args__ = { "comment": "An XRF map generated from an XRF Mapping ROI based on data from a gridscan of a sample" } xrfFluorescenceMappingId = Column(INTEGER(11), primary_key=True) xrfFluorescenceMappingROIId = Column( ForeignKey( "XRFFluorescenceMappingROI.xrfFluorescenceMappingROIId", ondelete="CASCADE", onupdate="CASCADE", ), nullable=False, index=True, ) gridInfoId = Column(ForeignKey("GridInfo.gridInfoId"), nullable=False, index=True) dataFormat = Column( String(15), nullable=False, comment="Description of format and any compression, i.e. json+gzip for gzipped json", ) data = Column(LONGBLOB, nullable=False, comment="The actual data") points = Column( INTEGER(11), comment="The number of points available, for realtime feedback" ) opacity = Column( Float, nullable=False, server_default=text("1"), comment="Display opacity" ) colourMap = Column(String(20), comment="Colour map for displaying the data") min = Column(INTEGER(3), comment="Min value in the data for histogramming") max = Column(INTEGER(3), comment="Max value in the data for histogramming") autoProcProgramId = Column( ForeignKey("AutoProcProgram.autoProcProgramId"), index=True, comment="Related autoproc programid", ) AutoProcProgram = relationship("AutoProcProgram") GridInfo = relationship("GridInfo") XRFFluorescenceMappingROI = relationship("XRFFluorescenceMappingROI") class XrayCentringResult(Base): __tablename__ = "XrayCentringResult" xrayCentringResultId = Column(INTEGER(11), primary_key=True) gridInfoId = Column( ForeignKey("GridInfo.gridInfoId", ondelete="CASCADE", onupdate="CASCADE"), nullable=False, index=True, ) method = Column(String(15), comment="Type of X-ray centering calculation") status = Column( Enum("success", "failure", "pending"), nullable=False, server_default=text("'pending'"), ) x = Column( Float, comment="position in number of boxes in direction of the fast scan within GridInfo grid", ) y = Column( Float, comment="position in number of boxes in direction of the slow scan within GridInfo grid", ) GridInfo = relationship("GridInfo") class ParticleClassificationGroup(Base): __tablename__ = "ParticleClassificationGroup" particleClassificationGroupId = Column(INTEGER(10), primary_key=True) particlePickerId = Column( ForeignKey( "ParticlePicker.particlePickerId", ondelete="CASCADE", onupdate="CASCADE" ), index=True, ) programId = Column( ForeignKey("AutoProcProgram.autoProcProgramId", onupdate="CASCADE"), index=True ) type = Column( Enum("2D", "3D"), comment="Indicates the type of particle classification" ) batchNumber = Column(INTEGER(10), comment="Corresponding to batch number") numberOfParticlesPerBatch = Column( INTEGER(10), comment="total number of particles per batch (a large integer)" ) numberOfClassesPerBatch = Column(INTEGER(10)) symmetry = Column(String(20)) ParticlePicker = relationship("ParticlePicker") AutoProcProgram = relationship("AutoProcProgram") class ScreeningOutputLattice(Base): __tablename__ = "ScreeningOutputLattice" screeningOutputLatticeId = Column(INTEGER(10), primary_key=True) screeningOutputId = Column( ForeignKey( "ScreeningOutput.screeningOutputId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, server_default=text("0"), ) spaceGroup = Column(String(45)) pointGroup = Column(String(45)) bravaisLattice = Column(String(45)) rawOrientationMatrix_a_x = Column(Float) rawOrientationMatrix_a_y = Column(Float) rawOrientationMatrix_a_z = Column(Float) rawOrientationMatrix_b_x = Column(Float) rawOrientationMatrix_b_y = Column(Float) rawOrientationMatrix_b_z = Column(Float) rawOrientationMatrix_c_x = Column(Float) rawOrientationMatrix_c_y = Column(Float) rawOrientationMatrix_c_z = Column(Float) unitCell_a = Column(Float) unitCell_b = Column(Float) unitCell_c = Column(Float) unitCell_alpha = Column(Float) unitCell_beta = Column(Float) unitCell_gamma = Column(Float) bltimeStamp = Column( TIMESTAMP, nullable=False, server_default=text("current_timestamp() ON UPDATE current_timestamp()"), ) labelitIndexing = Column(TINYINT(1), server_default=text("0")) ScreeningOutput = relationship("ScreeningOutput") class ScreeningStrategy(Base): __tablename__ = "ScreeningStrategy" screeningStrategyId = Column(INTEGER(10), primary_key=True) screeningOutputId = Column( ForeignKey( "ScreeningOutput.screeningOutputId", ondelete="CASCADE", onupdate="CASCADE" ), nullable=False, index=True, server_default=text("0"), ) phiStart = Column(Float) phiEnd = Column(Float) rotation = Column(Float) exposureTime = Column(Float) resolution = Column(Float) completeness = Column(Float) multiplicity = Column(Float) anomalous = Column(TINYINT(1), nullable=False, server_default=text("0")) program = Column(String(45)) rankingResolution = Column(Float) transmission = Column( Float, comment="Transmission for the strategy as given by the strategy program." ) ScreeningOutput = relationship("ScreeningOutput") class XFEFluorescenceComposite(Base): __tablename__ = "XFEFluorescenceComposite" __table_args__ = { "comment": "A composite XRF map composed of three XRFFluorescenceMapping entries creating r, g, b layers" } xfeFluorescenceCompositeId = Column(INTEGER(10), primary_key=True) r = Column( ForeignKey("XRFFluorescenceMapping.xrfFluorescenceMappingId"), nullable=False, index=True, comment="Red layer", ) g = Column( ForeignKey("XRFFluorescenceMapping.xrfFluorescenceMappingId"), nullable=False, index=True, comment="Green layer", ) b = Column( ForeignKey("XRFFluorescenceMapping.xrfFluorescenceMappingId"), nullable=False, index=True, comment="Blue layer", ) rOpacity = Column( Float, nullable=False, server_default=text("1"), comment="Red layer opacity" ) bOpacity = Column( Float, nullable=False, server_default=text("1"), comment="Red layer opacity" ) gOpacity = Column( Float, nullable=False, server_default=text("1"), comment="Red layer opacity" ) opacity = Column( Float, nullable=False, server_default=text("1"), comment="Total map opacity" ) XRFFluorescenceMapping = relationship( "XRFFluorescenceMapping", primaryjoin="XFEFluorescenceComposite.b == XRFFluorescenceMapping.xrfFluorescenceMappingId", ) XRFFluorescenceMapping1 = relationship( "XRFFluorescenceMapping", primaryjoin="XFEFluorescenceComposite.g == XRFFluorescenceMapping.xrfFluorescenceMappingId", ) XRFFluorescenceMapping2 = relationship( "XRFFluorescenceMapping", primaryjoin="XFEFluorescenceComposite.r == XRFFluorescenceMapping.xrfFluorescenceMappingId", ) class ParticleClassification(Base): __tablename__ = "ParticleClassification" __table_args__ = {"comment": "Results of 2D or 2D classification"} particleClassificationId = Column(INTEGER(10), primary_key=True) classNumber = Column( INTEGER(10), comment="Identified of the class. A unique ID given by Relion" ) classImageFullPath = Column(String(255), comment="The PNG of the class") particlesPerClass = Column( INTEGER(10), comment="Number of particles within the selected class, can then be used together with the total number above to calculate the percentage", ) rotationAccuracy = Column(Float, comment="???") translationAccuracy = Column(Float, comment="Unit: Angstroms") estimatedResolution = Column(Float, comment="???, Unit: Angstroms") overallFourierCompleteness = Column(Float) particleClassificationGroupId = Column( ForeignKey( "ParticleClassificationGroup.particleClassificationGroupId", ondelete="CASCADE", onupdate="CASCADE", ), index=True, ) classDistribution = Column( Float, comment="Provides a figure of merit for the class, higher number is better", ) ParticleClassificationGroup = relationship("ParticleClassificationGroup") class ScreeningStrategyWedge(Base): __tablename__ = "ScreeningStrategyWedge" screeningStrategyWedgeId = Column( INTEGER(10), primary_key=True, comment="Primary key" ) screeningStrategyId = Column( ForeignKey( "ScreeningStrategy.screeningStrategyId", ondelete="CASCADE", onupdate="CASCADE", ), index=True, comment="Foreign key to parent table", ) wedgeNumber = Column( INTEGER(10), comment="The number of this wedge within the strategy" ) resolution = Column(Float) completeness = Column(Float) multiplicity = Column(Float) doseTotal = Column(Float, comment="Total dose for this wedge") numberOfImages = Column(INTEGER(10), comment="Number of images for this wedge") phi = Column(Float) kappa = Column(Float) chi = Column(Float) comments = Column(String(255)) wavelength = Column(Float(asdecimal=True)) ScreeningStrategy = relationship("ScreeningStrategy") t_ParticleClassification_has_CryoemInitialModel = Table( "ParticleClassification_has_CryoemInitialModel", metadata, Column( "particleClassificationId", ForeignKey( "ParticleClassification.particleClassificationId", ondelete="CASCADE", onupdate="CASCADE", ), primary_key=True, nullable=False, ), Column( "cryoemInitialModelId", ForeignKey( "CryoemInitialModel.cryoemInitialModelId", ondelete="CASCADE", onupdate="CASCADE", ), primary_key=True, nullable=False, index=True, ), ) class ScreeningStrategySubWedge(Base): __tablename__ = "ScreeningStrategySubWedge" screeningStrategySubWedgeId = Column( INTEGER(10), primary_key=True, comment="Primary key" ) screeningStrategyWedgeId = Column( ForeignKey( "ScreeningStrategyWedge.screeningStrategyWedgeId", ondelete="CASCADE", onupdate="CASCADE", ), index=True, comment="Foreign key to parent table", ) subWedgeNumber = Column( INTEGER(10), comment="The number of this subwedge within the wedge" ) rotationAxis = Column(String(45), comment="Angle where subwedge starts") axisStart = Column(Float, comment="Angle where subwedge ends") axisEnd = Column(Float, comment="Exposure time for subwedge") exposureTime = Column(Float, comment="Transmission for subwedge") transmission = Column(Float) oscillationRange = Column(Float) completeness = Column(Float) multiplicity = Column(Float) RESOLUTION = Column(Float) doseTotal = Column(Float, comment="Total dose for this subwedge") numberOfImages = Column(INTEGER(10), comment="Number of images for this subwedge") comments = Column(String(255)) ScreeningStrategyWedge = relationship("ScreeningStrategyWedge")
DiamondLightSource/ispyb-api
src/ispyb/sqlalchemy/_auto_db_schema.py
Python
apache-2.0
191,002
[ "CRYSTAL" ]
2b20e027a77a13bb6c7d759603ab2d6f1318308e4d13fd418a301f1f7892db9b
# -*- coding: utf-8 -*- import datetime import re from PyQt5.QtCore import pyqtSignal, QThread, QMutex from .xn_page_cache import XNovaPageCache from .xn_page_dnl import XNovaPageDownload from .xn_data import XNAccountInfo, XNCoords, XNFlight, XNPlanet, XNPlanetBuildingItem from .xn_techtree import XNTechTree_instance from .xn_parser_overview import OverviewParser from .xn_parser_userinfo import UserInfoParser from .xn_parser_curplanet import CurPlanetParser from .xn_parser_imperium import ImperiumParser from .xn_parser_galaxy import GalaxyParser from .xn_parser_planet_buildings import PlanetBuildingsAvailParser, PlanetBuildingsProgressParser from .xn_parser_planet_energy import PlanetEnergyResParser from .xn_parser_shipyard import ShipyardShipsAvailParser, ShipyardBuildsInProgressParser from .xn_parser_research import ResearchAvailParser from .xn_parser_techtree import TechtreeParser from .xn_parser_fleet import FleetsMaxParser from . import xn_logger logger = xn_logger.get(__name__, debug=True) # created by main window to keep info about world updated class XNovaWorld(QThread): SIGNAL_QUIT = 0 SIGNAL_RELOAD_PAGE = 1 # args: page_name SIGNAL_RENAME_PLANET = 2 # args: planet_id, new_name SIGNAL_RELOAD_PLANET = 3 # args: planet_id SIGNAL_BUILD_ITEM = 4 # args: bitem: XNPlanetBuildingItem, quantity, planet_id SIGNAL_BUILD_CANCEL = 5 # args: bitem: XNPlanetBuildingItem, planet_id SIGNAL_BUILD_DISMANTLE = 6 # args: bitem: XNPlanetBuildingItem, planet_id SIGNAL_GET_URL = 7 # args: url, referer: optional # testing signals ... ? SIGNAL_TEST_PARSE_GALAXY = 100 # args: galaxy, system # signal is emitted to report full world refresh progress # str is a comment what is loading now, int is a progress percent [0..100] world_load_progress = pyqtSignal(str, int) # signal to be emitted when initial world loading is complete world_load_complete = pyqtSignal() # emitted when fleet has arrived at its destination flight_arrived = pyqtSignal(XNFlight) # emitted when building has completed on planet build_complete = pyqtSignal(XNPlanet, XNPlanetBuildingItem) # emitted when overview was reloaded (but not during world refresh) loaded_overview = pyqtSignal() # emitted when imperium was reloaded (but not during world refresh) loaded_imperium = pyqtSignal() # emitted when full planet was refreshed (but not during world refresh) loaded_planet = pyqtSignal(int) # planet_id # emitted when any network request has started (but not during world refresh) net_request_started = pyqtSignal() # emitted when network request has finished (but not during world refresh) net_request_finished = pyqtSignal() def __init__(self, parent=None): super(XNovaWorld, self).__init__(parent) self._world_is_loading = False # true is _full_refresh() is running (world is loading) # helpers self._page_dnl_times = dict() # last time when a page was downloaded self._page_cache = XNovaPageCache() self._page_cache.save_load_encoding = 'UTF-8' self._page_downloader = XNovaPageDownload() # parsers self._parser_overview = OverviewParser() self._parser_userinfo = UserInfoParser() self._parser_curplanet = CurPlanetParser() self._parser_imperium = ImperiumParser() self._parser_planet_buildings_avail = PlanetBuildingsAvailParser() self._parser_planet_buildings_progress = PlanetBuildingsProgressParser() self._parser_planet_energy = PlanetEnergyResParser() self._parser_shipyard_ships_avail = ShipyardShipsAvailParser() self._parser_shipyard_progress = ShipyardBuildsInProgressParser() self._parser_researches_avail = ResearchAvailParser() self._parser_techtree = TechtreeParser() self._parser_fleetmax = FleetsMaxParser() # world/user info self._server_time = datetime.datetime.today() # server time at last overview update # all we need to calc server time is actually time diff with our time: self._diff_with_server_time_secs = 0 # calculated as: our_time - server_time self._vacation_mode = False self._account = XNAccountInfo() self._flights = [] self._cur_planet_id = 0 self._cur_planet_name = '' self._cur_planet_coords = XNCoords(0, 0, 0) self._planets = [] # list of XNPlanet self._techtree = XNTechTree_instance() self._new_messages_count = 0 self._get_bonus_url = None self._server_online_players = 0 self._max_fleets_count = 0 self._cur_fleets_count = 0 # internal need self._net_errors_count = 0 self._net_errors_list = [] self._NET_ERRORS_MAX = 100 self._mutex = QMutex(QMutex.Recursive) self._signal_kwargs = dict() # thread identifiers, collected here mainly for debugging purposes # those are actually: # - DWORD GetCurrentThreadId() in Windows # - pthread_t pthread_current() in Linux self._maintid = 0 self._worldtid = 0 # settings self._overview_update_interval = 120 # seconds self._galaxy_cache_lifetime = 60 # seconds self._planet_buildings_cache_lifetime = 60 # seconds self._planet_shipyard_cache_lifetime = 60 # seconds self._planet_research_cache_lifetime = 60 # seconds def initialize(self, cookies_dict: dict): """ Called from main window just before thread starts :param cookies_dict: dictionary with cookies for networking :return: None """ # load cached pages self._page_cache.load_from_disk_cache(clean=True) # init network session with cookies for authorization self._page_downloader.set_cookies_from_dict(cookies_dict, do_save=True) # misc self._maintid = self._gettid() logger.debug('initialized from tid={0}'.format(self._maintid)) def reload_config(self): """ Reloads network config in page downloader and if network settings change, creates a new HTTP session, resulting in fact in new connection! :return: """ prev_proxy = self._page_downloader.proxy self._page_downloader.read_config() new_proxy = self._page_downloader.proxy # reconstruct session only if network settings were changed if new_proxy != prev_proxy: logger.debug('reload_config(): net proxy changed, recreating ' 'HTTP session ({0} != {1})'.format(prev_proxy, new_proxy)) self._page_downloader.construct_session() self._page_downloader.apply_useragent() def lock(self, timeout_ms=None, raise_on_timeout=False): """ Locks thread mutex to protect thread state vars :param timeout_ms: timeout ms to wait, default infinite :param raise_on_timeout: if true, OSError/TimeoutError will be thrown on timeout :return: True if all OK, False if not locked """ if timeout_ms is None: self._mutex.lock() return True ret = self._mutex.tryLock(timeout_ms) if ret: return True else: if raise_on_timeout: # python >= 3.3 has TimeoutError, others only have OSError # raise TimeoutError('XNovaWorld: failed to get mutex lock, timeout was {0} ms.'.format(timeout_ms)) raise OSError('XNovaWorld: failed to get mutex lock, timeout was {0} ms.'.format(timeout_ms)) return False def unlock(self): self._mutex.unlock() def signal_quit(self): self.quit() def signal(self, signal_code=0, **kwargs): # logger.debug('signal: kwargs = {0}'.format(str(kwargs))) # ^^: args = (), kwargs = {'page': 'overview'} self.lock() if kwargs is not None: self._signal_kwargs = kwargs self.unlock() self.exit(signal_code) # QEventLoop.exit(code) makes thread's # event loop to exit with code ################################### # Getters ################################### def get_account_info(self) -> XNAccountInfo: self.lock() ret = self._account self.unlock() return ret def set_login_email(self, email: str): self.lock() self._account.email = email self.unlock() def get_flights(self) -> list: ret = [] # try to lock with 0ms wait, if fails, return empty list if self.lock(0): ret = self._flights self.unlock() return ret def get_flight_remaining_time_secs(self, fl: XNFlight) -> int: """ Calculates flight remaining time, adjusting time by difference between our time and server :param fl: flight :return: remaining time in seconds, or -1 on error """ self.lock() dsts = self._diff_with_server_time_secs self.unlock() # secs_left = -1 if fl.seconds_left != -1: secs_left = fl.seconds_left + dsts # return secs_left def get_current_server_time(self) -> datetime.datetime: """ Calculates current server time (at this moment), using previously calculated time diff (checked at last update) :return: """ self.lock() dt_now = datetime.datetime.today() dt_delta = datetime.timedelta(seconds=self._diff_with_server_time_secs) dt_server = dt_now - dt_delta self.unlock() return dt_server def get_planets(self, timeout_ms=None) -> list: """ Gets list of planets (XNPlanet) :param timeout_ms: milliseconds to wait the lock, default infinite :return list of [XPlanet()] """ ret = [] # try to lock with 0ms wait, if fails, return empty list if self.lock(timeout_ms): ret = self._planets self.unlock() return ret def get_planet(self, planet_id) -> XNPlanet: pls = self.get_planets() for pl in pls: if pl.planet_id == planet_id: return pl logger.warn('Could not find planet with id: {0}'.format(planet_id)) return None def get_new_messages_count(self) -> int: self.lock() ret = self._new_messages_count self.unlock() return ret def get_online_players(self): self.lock() ret = self._server_online_players self.unlock() return ret def get_fleets_count(self) -> list: """ Get current/maximum fleets count :return: list[0] = cur, list[1] = max """ ret = [0, 0] self.lock() ret[0] = self._cur_fleets_count ret[1] = self._max_fleets_count self.unlock() return ret def get_bonus_url(self) -> str: ret = '' if self.lock(0): ret = self._get_bonus_url self.unlock() return ret def clear_bonus_url(self): self.lock() self._get_bonus_url = None self.unlock() ################################################################################ # this should re-calculate all user's object statuses # like fleets in flight, buildings in construction, # reserches in progress, etc, ... def world_tick(self): # This is called from GUI thread =( self.lock() self._world_tick_flights() self._world_tick_planets() self._maybe_refresh_overview() self.unlock() # just counts remaining time for flights, # removes finished flights and emits signal # 'flight_arrived' for every finished flight def _world_tick_flights(self): # logger.debug('tick: server time diff: {0}'.format(self._diff_with_server_time_secs)) # 0:00:16.390197 # iterate finished_flights_count = 0 for fl in self._flights: if fl.seconds_left == -1: raise ValueError('Flight seconds left is None: {0}'.format(str(fl))) fl.seconds_left -= 1 if fl.seconds_left <= 0: fl.seconds_left = 0 logger.debug('==== Flight considered complete: {0}'.format(str(fl))) # logger.debug('==== additional debug info:') # logger.debug('==== - diff with server time: {0}'.format(self._diff_with_server_time_secs)) # logger.debug('==== - current time: {0}'.format(datetime.datetime.today())) # logger.debug('==== - current server time: {0}'.format(self.get_current_server_time())) finished_flights_count += 1 if finished_flights_count > 0: # logger.debug('==== Removing total {0} arrived flights'.format(finished_flights_count)) for irow in range(finished_flights_count): try: # finished_flight = self._flights[irow] # item-to-delete from python list will always have index 0? # because we need to delete the first item every time finished_flight = self._flights[0] # del self._flights[0] self._flights.remove(finished_flight) # emit signal self.flight_arrived.emit(finished_flight) except IndexError: # should never happen logger.error('IndexError while clearing finished flights: ') logger.error(' deleting index {0}, while total list len: {1}'.format( 0, len(self._flights))) # end _world_tick_flights() def _world_tick_planets(self): """ This should do the following: - increase planet resources every second - move planet buildings progress :return: None """ for planet in self._planets: # tick resources # calc resource speed per second mps = planet.res_per_hour.met / 3600 cps = planet.res_per_hour.cry / 3600 dps = planet.res_per_hour.deit / 3600 # add resource per second planet.res_current.met += mps planet.res_current.cry += cps planet.res_current.deit += dps # tick buildings in progress num_completed = 0 for bitem in planet.buildings_items: if bitem.is_in_progress(): bitem.seconds_left -= 1 if bitem.seconds_left <= 0: bitem.seconds_left = -1 bitem.dt_end = None # mark as stopped bitem.level += 1 # level increased num_completed += 1 self.build_complete.emit(planet, bitem) # tick shipyard builds in progress todel_list = [] for bitem in planet.shipyard_progress_items: bitem.seconds_left -= 1 if bitem.seconds_left <= 0: todel_list.append(bitem) break # only one (first) item is IN PROGRESS, others WAIT !!! if len(todel_list) > 0: for bitem in todel_list: planet.shipyard_progress_items.remove(bitem) self.build_complete.emit(planet, bitem) # tick planet researches num_completed = 0 for bitem in planet.research_items: if bitem.is_in_progress(): bitem.seconds_left -= 1 if bitem.seconds_left <= 0: bitem.seconds_left = -1 bitem.dt_end = None # mark as stopped bitem.level += 1 num_completed += 1 self.build_complete.emit(planet, bitem) # tick planet research_fleet num_completed = 0 for bitem in planet.researchfleet_items: if bitem.is_in_progress(): bitem.seconds_left -= 1 if bitem.seconds_left <= 0: bitem.seconds_left = -1 bitem.dt_end = None # mark as stopped bitem.level += 1 num_completed += 1 self.build_complete.emit(planet, bitem) # end _world_tick_planets() def _maybe_refresh_overview(self): """ Can trigger signal to refresh overview page every 'self._overview_update_interval' seconds. Called from self._world_tick(), which holds the lock already """ if 'overview' in self._page_dnl_times: dt_last = self._page_dnl_times['overview'] dt_now = datetime.datetime.today() dt_diff = dt_now - dt_last secs_ago = int(dt_diff.total_seconds()) if secs_ago >= self._overview_update_interval: logger.debug('_maybe_refresh_overview() trigger update: ' '{0} secs ago.'.format(secs_ago)) self.signal(self.SIGNAL_RELOAD_PAGE, page_name='overview') ################################################################################ def on_page_downloaded(self, page_name: str): logger.debug('on_page_downloaded( "{0}" ) tid={1}'.format(page_name, self._gettid_s())) # cache has the page inside before the signal was emitted! # we can get page content from cache page_content = self._page_cache.get_page(page_name) if page_content is None: raise ValueError('This should not ever happen!') # get current date/time dt_now = datetime.datetime.today() self._page_dnl_times[page_name] = dt_now # save last download time for page # dispatch parser and merge data if page_name == 'overview': self._parser_overview.clear() self._parser_overview.account = self._account # store previous info self._parser_overview.parse_page_content(page_content) self._account = self._parser_overview.account # get new info self._flights = self._parser_overview.flights # get server time also calculate time diff self._server_time = self._parser_overview.server_time dt_diff = dt_now - self._server_time self._diff_with_server_time_secs = int(dt_diff.total_seconds()) self._new_messages_count = self._parser_overview.new_messages_count self._vacation_mode = self._parser_overview.in_RO self._server_online_players = self._parser_overview.online_players self._get_bonus_url = self._parser_overview.bonus_url # run also cur planet parser on the same content self._parser_curplanet.parse_page_content(page_content) self._cur_planet_id = self._parser_curplanet.cur_planet_id self._cur_planet_name = self._parser_curplanet.cur_planet_name self._cur_planet_coords = self._parser_curplanet.cur_planet_coords self._internal_set_current_planet() # it may have changed # emit signal that we've loaded overview, but not during world update if not self._world_is_loading: self.loaded_overview.emit() elif page_name == 'self_user_info': self._parser_userinfo.parse_page_content(page_content) self._account.scores.buildings = self._parser_userinfo.buildings self._account.scores.buildings_rank = self._parser_userinfo.buildings_rank self._account.scores.fleet = self._parser_userinfo.fleet self._account.scores.fleet_rank = self._parser_userinfo.fleet_rank self._account.scores.defense = self._parser_userinfo.defense self._account.scores.defense_rank = self._parser_userinfo.defense_rank self._account.scores.science = self._parser_userinfo.science self._account.scores.science_rank = self._parser_userinfo.science_rank self._account.scores.total = self._parser_userinfo.total self._account.scores.rank = self._parser_userinfo.rank self._account.main_planet_name = self._parser_userinfo.main_planet_name self._account.main_planet_coords = self._parser_userinfo.main_planet_coords self._account.alliance_name = self._parser_userinfo.alliance_name elif page_name == 'imperium': self._parser_imperium.clear() self._parser_imperium.parse_page_content(page_content) self._planets = self._parser_imperium.planets # since we've overwritten the whole planets array, we need to # write current planet into it again self._internal_set_current_planet() # emit signal that we've loaded overview, but not during world update if not self._world_is_loading: self.loaded_imperium.emit() elif page_name == 'techtree': self._parser_techtree.clear() self._parser_techtree.parse_page_content(page_content) # store techtree, if there is successful parse of anything if len(self._parser_techtree.techtree) > 0: self._techtree.init_techtree(self._parser_techtree.techtree) elif page_name == 'fleet': self._parser_fleetmax.clear() self._parser_fleetmax.parse_page_content(page_content) self._cur_fleets_count = self._parser_fleetmax.fleets_cur self._max_fleets_count = self._parser_fleetmax.fleets_max elif page_name.startswith('buildings_'): try: m = re.match(r'buildings_(\d+)', page_name) planet_id = int(m.group(1)) planet = self.get_planet(planet_id) # get available buildings to build self._parser_planet_buildings_avail.clear() self._parser_planet_buildings_avail.parse_page_content(page_content) # get buildings in progress on the same page self._parser_planet_buildings_progress.clear() self._parser_planet_buildings_progress.parse_page_content(page_content) # get planet energy info, res cur/max/prod self._parser_planet_energy.clear() self._parser_planet_energy.parse_page_content(page_content) if planet is not None: planet.buildings_items = self._parser_planet_buildings_avail.builds_avail num_added = len(self._parser_planet_buildings_progress.builds_in_progress) if num_added > 0: for bip in self._parser_planet_buildings_progress.builds_in_progress: planet.add_build_in_progress(bip) logger.debug('Buildings queue for planet {0}: added {1}'.format(planet.name, num_added)) # save planet energy info, do not overwite with zeros if self._parser_planet_energy.energy_left > 0: planet.energy.energy_left = self._parser_planet_energy.energy_left if self._parser_planet_energy.energy_total > 0: planet.energy.energy_total = self._parser_planet_energy.energy_total # save planet resource info if len(self._parser_planet_energy.res_current) > 0: planet.res_current = self._parser_planet_energy.res_current if len(self._parser_planet_energy.res_max_silos) > 0: planet.res_max_silos = self._parser_planet_energy.res_max_silos if len(self._parser_planet_energy.res_per_hour) > 0: planet.res_per_hour = self._parser_planet_energy.res_per_hour except ValueError: # failed to convert to int logger.exception('Failed to convert planet_id to int, page_name=[{0}]'.format(page_name)) except AttributeError: # no match logger.exception('Invalid format for page_name=[{0}], expected buildings_123456'.format(page_name)) elif page_name.startswith('shipyard_'): try: m = re.match(r'shipyard_(\d+)', page_name) planet_id = int(m.group(1)) planet = self.get_planet(planet_id) # go parse self._parser_shipyard_ships_avail.clear() self._parser_shipyard_ships_avail.parse_page_content(page_content) self._parser_shipyard_progress.clear() self._parser_shipyard_progress.server_time = self._server_time self._parser_shipyard_progress.parse_page_content(page_content) # get planet energy info self._parser_planet_energy.clear() self._parser_planet_energy.parse_page_content(page_content) if planet is not None: planet.shipyard_tems = self._parser_shipyard_ships_avail.ships_avail planet.shipyard_progress_items = self._parser_shipyard_progress.shipyard_progress_items if len(self._parser_shipyard_progress.shipyard_progress_items) > 0: logger.debug('planet [{0}] has {1} items in shipyard queue'.format( planet.name, len(self._parser_shipyard_progress.shipyard_progress_items))) # save planet energy info, but do not overwrite with zeros # if there is no shipyard @ planet, no energy info will be on the page =( if self._parser_planet_energy.energy_left > 0: planet.energy.energy_left = self._parser_planet_energy.energy_left if self._parser_planet_energy.energy_total > 0: planet.energy.energy_total = self._parser_planet_energy.energy_total # save planet resource info if len(self._parser_planet_energy.res_current) > 0: planet.res_current = self._parser_planet_energy.res_current if len(self._parser_planet_energy.res_max_silos) > 0: planet.res_max_silos = self._parser_planet_energy.res_max_silos if len(self._parser_planet_energy.res_per_hour) > 0: planet.res_per_hour = self._parser_planet_energy.res_per_hour except AttributeError: # no match logger.exception('Invalid format for page_name=[{0}], expected shipyard_123456'.format(page_name)) except ValueError: # failed to convert to int logger.exception('Failed to convert planet_id to int, page_name=[{0}]'.format(page_name)) elif page_name.startswith('defense_'): try: m = re.match(r'defense_(\d+)', page_name) planet_id = int(m.group(1)) planet = self.get_planet(planet_id) # go parse self._parser_shipyard_ships_avail.clear() self._parser_shipyard_ships_avail.parse_page_content(page_content) self._parser_shipyard_progress.clear() self._parser_shipyard_progress.server_time = self._server_time self._parser_shipyard_progress.parse_page_content(page_content) # get planet energy info self._parser_planet_energy.clear() self._parser_planet_energy.parse_page_content(page_content) if planet is not None: # shipyard parser ships_avail can also parse planet defenses avail planet.defense_items = self._parser_shipyard_ships_avail.ships_avail # even in defense page, ships build queue is the same as in shipyard page planet.shipyard_progress_items = self._parser_shipyard_progress.shipyard_progress_items if len(self._parser_shipyard_progress.shipyard_progress_items) > 0: logger.debug('planet [{0}] has {0} items in shipyard queue'.format( planet.name, len(self._parser_shipyard_progress.shipyard_progress_items))) # save planet energy info, but do not overwrite with zeros # if there is no shipyard @ planet, no energy info will be on the page =( if self._parser_planet_energy.energy_left > 0: planet.energy.energy_left = self._parser_planet_energy.energy_left if self._parser_planet_energy.energy_total > 0: planet.energy.energy_total = self._parser_planet_energy.energy_total # save planet resource info if len(self._parser_planet_energy.res_current) > 0: planet.res_current = self._parser_planet_energy.res_current if len(self._parser_planet_energy.res_max_silos) > 0: planet.res_max_silos = self._parser_planet_energy.res_max_silos if len(self._parser_planet_energy.res_per_hour) > 0: planet.res_per_hour = self._parser_planet_energy.res_per_hour except AttributeError: # no match logger.exception('Invalid format for page_name=[{0}], expected defense_123456'.format(page_name)) except ValueError: # failed to convert to int logger.exception('Failed to convert planet_id to int, page_name=[{0}]'.format(page_name)) elif page_name.startswith('research_'): try: m = re.match(r'research_(\d+)', page_name) planet_id = int(m.group(1)) planet = self.get_planet(planet_id) # go parse self._parser_researches_avail.clear() self._parser_researches_avail.server_time = self._server_time self._parser_researches_avail.set_parsing_research_fleet(False) self._parser_researches_avail.parse_page_content(page_content) # get planet energy info self._parser_planet_energy.clear() self._parser_planet_energy.parse_page_content(page_content) if planet is not None: planet.research_items = self._parser_researches_avail.researches_avail if len(self._parser_researches_avail.researches_avail) > 0: logger.info('Planet {0} has {1} researches avail'.format( planet.name, len(self._parser_researches_avail.researches_avail))) # save planet energy info, but do not overwrite with zeros # if there is no lab @ planet, no energy info will be on the page =( if self._parser_planet_energy.energy_left > 0: planet.energy.energy_left = self._parser_planet_energy.energy_left if self._parser_planet_energy.energy_total > 0: planet.energy.energy_total = self._parser_planet_energy.energy_total # save planet resource info if len(self._parser_planet_energy.res_current) > 0: planet.res_current = self._parser_planet_energy.res_current if len(self._parser_planet_energy.res_max_silos) > 0: planet.res_max_silos = self._parser_planet_energy.res_max_silos if len(self._parser_planet_energy.res_per_hour) > 0: planet.res_per_hour = self._parser_planet_energy.res_per_hour except AttributeError: # no match logger.exception('Invalid format for page_name=[{0}], ' 'expected research_123456'.format(page_name)) except ValueError: # failed to convert to int logger.exception('Failed to convert planet_id to int, ' 'page_name=[{0}]'.format(page_name)) elif page_name.startswith('researchfleet_'): try: m = re.match(r'researchfleet_(\d+)', page_name) planet_id = int(m.group(1)) planet = self.get_planet(planet_id) # go parse self._parser_researches_avail.clear() self._parser_researches_avail.server_time = self._server_time self._parser_researches_avail.set_parsing_research_fleet(True) self._parser_researches_avail.parse_page_content(page_content) # get planet energy info self._parser_planet_energy.clear() self._parser_planet_energy.parse_page_content(page_content) if planet is not None: planet.researchfleet_items = self._parser_researches_avail.researches_avail if len(self._parser_researches_avail.researches_avail) > 0: logger.info('Planet {0} has {1} fleet researches avail'.format( planet.name, len(self._parser_researches_avail.researches_avail))) # save planet energy info, but do not overwrite with zeros # if there is no lab @ planet, no energy info will be on the page =( if self._parser_planet_energy.energy_left > 0: planet.energy.energy_left = self._parser_planet_energy.energy_left if self._parser_planet_energy.energy_total > 0: planet.energy.energy_total = self._parser_planet_energy.energy_total # save planet resource info if len(self._parser_planet_energy.res_current) > 0: planet.res_current = self._parser_planet_energy.res_current if len(self._parser_planet_energy.res_max_silos) > 0: planet.res_max_silos = self._parser_planet_energy.res_max_silos if len(self._parser_planet_energy.res_per_hour) > 0: planet.res_per_hour = self._parser_planet_energy.res_per_hour except AttributeError: # no match logger.exception('Invalid format for page_name=[{0}], ' 'expected researchfleet_123456'.format(page_name)) except ValueError: # failed to convert to int logger.exception('Failed to convert planet_id to int, ' 'page_name=[{0}]'.format(page_name)) else: logger.warn('on_page_downloaded(): Unhandled page name [{0}]. ' 'This may be not a problem, but...'.format(page_name)) def on_signal_reload_page(self): if 'page_name' in self._signal_kwargs: page_name = self._signal_kwargs['page_name'] logger.debug('on_reload_page(): reloading {0}'.format(page_name)) self.lock() self._get_page(page_name, max_cache_lifetime=1, force_download=True) self.unlock() def on_signal_rename_planet(self): if ('planet_id' in self._signal_kwargs) and ('new_name' in self._signal_kwargs): planet_id = int(self._signal_kwargs['planet_id']) new_name = self._signal_kwargs['new_name'] # go go go logger.debug('renaming planet #{0} to [{1}]'.format(planet_id, new_name)) self.lock() # first need to ensure that this planet is current self._download_planet_overview(planet_id, force_download=True) # then trigger a rename operation self._request_rename_planet(planet_id, new_name) # force imperium update to read new planet name self._get_page('imperium', 1, force_download=True) self.unlock() def on_signal_reload_planet(self): if 'planet_id' in self._signal_kwargs: planet_id = int(self._signal_kwargs['planet_id']) logger.debug('reloading planet #{0}'.format(planet_id)) self.lock() self._download_planet(planet_id, delays_msec=250, force_download=True) self.unlock() logger.debug('reload planet #{0} done'.format(planet_id)) def on_signal_get_url(self): if 'url' in self._signal_kwargs: url = self._signal_kwargs['url'] referer = None if 'referer' in self._signal_kwargs: referer = self._signal_kwargs['referer'] logger.debug('Got signal to load url: [{0}], referer=[{1}]'.format( url, referer)) self.lock() self._get_page_url(None, url, max_cache_lifetime=0, force_download=True, referer=referer) self.unlock() def on_signal_test_parse_galaxy(self): if ('galaxy' in self._signal_kwargs) and ('system' in self._signal_kwargs): gal_no = self._signal_kwargs['galaxy'] sys_no = self._signal_kwargs['system'] logger.debug('downloading galaxy page {0},{1}'.format(gal_no, sys_no)) page_content = self._download_galaxy_page(gal_no, sys_no, force_download=True) if page_content is not None: gp = GalaxyParser() gp.clear() gp.parse_page_content(page_content) logger.debug(gp.galaxy_rows) def on_signal_build_item(self): if ('bitem' in self._signal_kwargs) and ('planet_id' in self._signal_kwargs) \ and ('quantity' in self._signal_kwargs): bitem = self._signal_kwargs['bitem'] planet_id = int(self._signal_kwargs['planet_id']) quantity = int(self._signal_kwargs['quantity']) self.lock() # check that current planet is the same as requested to build on # (it should be the same) if self._cur_planet_id != planet_id: logger.debug('Current planet ({}) is not {}, force ' 'change current planet'.format( self._cur_planet_id, planet_id)) self._download_planet_overview(planet_id, force_download=True) self._request_build_item(planet_id, bitem, quantity) self.unlock() def on_signal_build_cancel(self): if ('bitem' in self._signal_kwargs) and ('planet_id' in self._signal_kwargs): bitem = self._signal_kwargs['bitem'] planet_id = int(self._signal_kwargs['planet_id']) self.lock() # check that current planet is the same as requested to build on # (it should be the same) if self._cur_planet_id != planet_id: logger.debug('Current planet ({}) is not {}, force ' 'change current planet'.format( self._cur_planet_id, planet_id)) self._download_planet_overview(planet_id) self._request_build_cancel(planet_id, bitem) self.unlock() def on_signal_build_dismantle(self): if ('bitem' in self._signal_kwargs) and ('planet_id' in self._signal_kwargs): bitem = self._signal_kwargs['bitem'] planet_id = int(self._signal_kwargs['planet_id']) self.lock() # check that current planet is the same as requested to build on # (it should be the same) if self._cur_planet_id != planet_id: logger.debug('Current planet ({}) is not {}, force ' 'change current planet'.format( self._cur_planet_id, planet_id)) self._download_planet_overview(planet_id) self._request_build_dismantle(planet_id, bitem) self.unlock() def _internal_set_current_planet(self): """ Just updates internal planets array with information about which of them is current one :return: None """ for pl in self._planets: if pl.planet_id == self._cur_planet_id: pl.is_current = True else: pl.is_current = False def _inc_network_errors(self): """ Error handler, called when network error has occured, when page could not be downloaded. Raises RuntimeError when too many errors happened. :return: """ # increase errors count self._net_errors_count += 1 # store error text if self._page_downloader.error_str != '': self._net_errors_list.append(self._page_downloader.error_str) logger.error('net error happened: [{0}], total errors count: {1}'.format( self._page_downloader.error_str, self._net_errors_count)) if self._net_errors_count > self._NET_ERRORS_MAX: raise RuntimeError('Too many network errors: {0}!'.format(self._net_errors_count)) # internal helper, converts page identifier to url path def _page_name_to_url_path(self, page_name: str): urls_dict = dict() urls_dict['overview'] = '?set=overview' urls_dict['imperium'] = '?set=imperium' urls_dict['techtree'] = '?set=techtree' urls_dict['fleet'] = '?set=fleet' sub_url = None if page_name in urls_dict: return urls_dict[page_name] elif page_name == 'self_user_info': # special page case, dynamic URL, depends on user id # http://uni4.xnova.su/?set=players&id=71995 if self._account.id == 0: logger.warn('requested account info page, but account id is 0!') return None sub_url = '?set=players&id={0}'.format(self._account.id) else: logger.warn('unknown page name requested: {0}'.format(page_name)) return sub_url def _get_page(self, page_name, max_cache_lifetime=None, force_download=False): """ Gets page from cache or from server only by page name. Converts page_name to page URL, using _page_name_to_url_path(). First tries to get cached page from cache using page_name as key. If there is no cached page there, or it is expired, downloads from network. Then calls self.on_page_downloaded() to automatically parse requested page. :param page_name: 'name' used as key in pages cache :param max_cache_lifetime: cache timeout :param force_download: :return: page contents as str, or None on error """ page_url = self._page_name_to_url_path(page_name) if not page_url: logger.error('Failed to convert page_name=[{0}] to url!'.format(page_name)) return None return self._get_page_url(page_name, page_url, max_cache_lifetime, force_download) def _get_page_url(self, page_name, page_url, max_cache_lifetime=None, force_download=False, referer=None): """ For internal needs, downloads url from server using HTTP GET. First tries to get cached page from cache using page_name as key. If there is no cached page there, or it is expired, downloads from network. Then calls self.on_page_downloaded() to automatically parse requested page. If force_download is True, max_cache_lifetime is ignored. (This method's return value is ignored for now) :param page_name: 'name' of page to use as key when stored to cache, if None - cache disabled :param page_url: URL to download in HTTP GET request :param max_cache_lifetime: :param force_download: :param referer: set this to str value to force Referer header before request :return: page contents (str) or None on error """ page_content = None if not force_download: # try to get cached page (default) page_content = self._page_cache.get_page(page_name, max_cache_lifetime) if page_content is not None: logger.debug('... got page "{0}" from cache! (lifetime < {1})'.format( page_name, max_cache_lifetime)) if page_content is None: # signal that we are starting network request if not self._world_is_loading: self.net_request_started.emit() # set referer, if set if referer is not None: self._page_downloader.set_referer(referer) # try to download page_content = self._page_downloader.download_url_path(page_url) # signal that we have finished network request if not self._world_is_loading: self.net_request_finished.emit() # save in cache, only if content anf page_name is present if (page_content is not None) and (page_name is not None): self._page_cache.set_page(page_name, page_content) # check for download error if page_content is None: # download error happened self._inc_network_errors() # parse page content independently if it was read from cache or by network from server if (page_content is not None) and (page_name is not None): self.on_page_downloaded(page_name) # process downloaded page return page_content def _post_page_url(self, page_url: str, post_data: dict=None, referer: str=None): """ For internal needs, sends a POST request, and handles possible error returns :param page_url: URL to send HTTP POST to :param post_data: dict with post data key-value pairs :param referer: if set, use this as value for HTTP Referer header :return: response content, or None on error """ # signal that we are starting network request if not self._world_is_loading: self.net_request_started.emit() page_content = self._page_downloader.post(page_url, post_data=post_data, referer=referer) # signal that we have finished network request if not self._world_is_loading: self.net_request_finished.emit() # handle errors if page_content is None: self._inc_network_errors() return page_content def _download_galaxy_page(self, galaxy_no, sys_no, force_download=False): # 'http://uni4.xnova.su/?set=galaxy&r=3&galaxy=3&system=130' page_url = '?set=galaxy&r=3&galaxy={0}&system={1}'.format(galaxy_no, sys_no) page_name = 'galaxy_{0}_{1}'.format(galaxy_no, sys_no) # if force_download is True, cache_lifetime is ignored return self._get_page_url(page_name, page_url, self._galaxy_cache_lifetime, force_download) def _download_image(self, img_path: str): img_bytes = self._page_downloader.download_url_path(img_path, return_binary=True) if img_bytes is None: logger.error('image dnl failed: [{0}]'.format(img_path)) self._inc_network_errors() return self._page_cache.save_image(img_path, img_bytes) def _download_planet_overview(self, planet_id: int, force_download=False): # url to change current planet is: # http://uni4.xnova.su/?set=overview&cp=60668&re=0 page_url = '?set=overview&cp={0}&re=0'.format(planet_id) page_name = 'overview' return self._get_page_url(page_name, page_url, 1, force_download) def _download_planet_buildings(self, planet_id: int, force_download=False): page_url = '?set=buildings&cp={0}&re=0'.format(planet_id) page_name = 'buildings_{0}'.format(planet_id) return self._get_page_url(page_name, page_url, self._planet_buildings_cache_lifetime, force_download) def _download_planet_shipyard(self, planet_id: int, force_download=False): # url to change current planet is: # http://uni4.xnova.su/?set=buildings&mode=fleet&cp=60668&re=0 page_url = '?set=buildings&mode=fleet&cp={0}&re=0'.format(planet_id) page_name = 'shipyard_{0}'.format(planet_id) return self._get_page_url(page_name, page_url, self._planet_shipyard_cache_lifetime, force_download) def _download_planet_defense(self, planet_id: int, force_download=False): # url to change current planet is: # http://uni4.xnova.su/?set=buildings&mode=defense&cp=60668&re=0 page_url = '?set=buildings&mode=defense&cp={0}&re=0'.format(planet_id) page_name = 'defense_{0}'.format(planet_id) return self._get_page_url(page_name, page_url, self._planet_shipyard_cache_lifetime, force_download) def _download_planet_researches(self, planet_id: int, force_download=False): # url: http://uni4.xnova.su/?set=buildings&mode=research&cp=57064&re=0 page_url = '?set=buildings&mode=research&cp={0}&re=0'.format(planet_id) page_name = 'research_{0}'.format(planet_id) return self._get_page_url(page_name, page_url, self._planet_research_cache_lifetime, force_download) def _download_planet_researches_fleet(self, planet_id: int, force_download=False): # url: http://uni4.xnova.su/?set=buildings&mode=research_fleet&cp=57064&re=0 page_url = '?set=buildings&mode=research_fleet&cp={0}&re=0'.format(planet_id) page_name = 'researchfleet_{0}'.format(planet_id) return self._get_page_url(page_name, page_url, self._planet_research_cache_lifetime, force_download) def _download_planet(self, planet_id: int, delays_msec: int=None, force_download: bool=False): # planet buildings in progress self._download_planet_buildings(planet_id, force_download) if delays_msec is not None: self.msleep(delays_msec) # planet researches and in progress self._download_planet_researches(planet_id, force_download) if delays_msec is not None: self.msleep(delays_msec) # planet factory researches and in progress self._download_planet_researches_fleet(planet_id, force_download) if delays_msec is not None: self.msleep(delays_msec) # planet shipyard/defense builds in progress self._download_planet_shipyard(planet_id, force_download) if delays_msec is not None: self.msleep(delays_msec) self._download_planet_defense(planet_id, force_download) if delays_msec is not None: self.msleep(delays_msec) if not self._world_is_loading: self.loaded_planet.emit(planet_id) def _request_rename_planet(self, planet_id: int, new_name: str): post_url = '?set=overview&mode=renameplanet&pl={0}'.format(planet_id) post_data = dict() post_data['action'] = 'Сменить название' post_data['newname'] = new_name referer = 'http://{0}/?set=overview&mode=renameplanet'.format(self._page_downloader.xnova_url) self._post_page_url(post_url, post_data, referer) logger.debug('Rename planet to [{0}] complete'.format(new_name)) def _request_build_item(self, planet_id: int, bitem: XNPlanetBuildingItem, quantity: int): logger.debug('Request to build: {0} lv {1} x {2} on planet {3}, build_link = [{4}]'.format( bitem.name, bitem.level+1, quantity, planet_id, bitem.build_link)) if bitem.is_building_item or bitem.is_research_item or bitem.is_researchfleet_item: if bitem.build_link is None or (bitem.build_link == ''): logger.warn('bitem build_link is empty, cannot build!') return # construct page name and referer # successful request to build item redirects to buildings page page_name = None referer = '' if bitem.is_building_item: page_name = 'buildings_{0}'.format(planet_id) referer = 'http://{0}/?set=buildings'.format( self._page_downloader.xnova_url) elif bitem.is_research_item: page_name = 'research_{0}'.format(planet_id) referer = 'http://{0}/?set=buildings&mode=research'.format( self._page_downloader.xnova_url) elif bitem.is_researchfleet_item: referer = 'http://{0}/?set=buildings&mode=research_fleet'.format( self._page_downloader.xnova_url) page_name = 'researchfleet_{0}'.format(planet_id) # send request self._get_page_url(page_name, bitem.build_link, max_cache_lifetime=0, force_download=True, referer=referer) elif bitem.is_shipyard_item: logger.debug('Build shipyard item {0} x {1}, gid={2}'.format( bitem.name, bitem.quantity, bitem.gid)) if bitem.gid <= 0: logger.warn('Invalid bitem gid: {0}! Skippping!'.format(bitem.gid)) return if quantity <= 0: logger.warn('Invalid quantity: {0}! Skippping!'.format(quantity)) return post_url = '?set=buildings&mode=fleet' post_data = dict() param_name = 'fmenge[{0}]'.format(bitem.gid) post_data[param_name] = quantity referer = 'http://{0}/?set=buildings&mode=fleet'.format( self._page_downloader.xnova_url) self._post_page_url(post_url, post_data, referer) # automatically download planet shipyard after this self._download_planet_shipyard(planet_id, force_download=True) def _request_build_cancel(self, planet_id: int, bitem: XNPlanetBuildingItem): logger.debug('Request to cancel build: {0} on planet {1}, remove_link = [{2}]'.format( bitem.name, planet_id, bitem.remove_link)) if bitem.is_building_item or bitem.is_research_item or bitem.is_researchfleet_item: if bitem.remove_link is None or (bitem.remove_link == ''): logger.warn('bitem remove_link is empty, cannot cancel build!') return # construct page name and referer # successful request to cancel build item redirects to buildings page page_name = None referer = '' if bitem.is_building_item: page_name = 'buildings_{0}'.format(planet_id) referer = '?set=buildings' elif bitem.is_research_item: page_name = 'research_{0}'.format(planet_id) referer = '?set=buildings&mode=research' elif bitem.is_researchfleet_item: page_name = 'researchfleet_{0}'.format(planet_id) referer = '?set=buildings&mode=research_fleet' # send request self._get_page_url(page_name, bitem.remove_link, max_cache_lifetime=0, force_download=True, referer=referer) else: logger.warn('Cannot cancel shipyard item: {0}'.format(bitem)) def _request_build_dismantle(self, planet_id: int, bitem: XNPlanetBuildingItem): logger.debug('Request to downgrade building: {0} on planet {1}, dismantle_link = [{2}]'.format( bitem.name, planet_id, bitem.dismantle_link)) if bitem.is_building_item: if bitem.dismantle_link is None or (bitem.dismantle_link == ''): logger.warn('bitem dismantle_link is empty, cannot dismantle build!') return # construct page name and referer # successful request to cancel build item redirects to buildings page page_name = 'buildings_{0}'.format(planet_id) referer = '?set=buildings' # send request self._get_page_url(page_name, bitem.dismantle_link, max_cache_lifetime=0, force_download=True, referer=referer) else: logger.warn('Can only dismantle buildings items! bitem={0}'.format(bitem)) # internal, called from thread on first load def _full_refresh(self): logger.info('thread: starting full world update') # full refresh always downloads all pages, ignoring cache self.lock() self._world_is_loading = True # load all pages that contain useful information load_progress_percent = 0 load_progress_step = 5 pages_list = ['techtree', 'overview', 'imperium', 'fleet'] pages_maxtime = [3600, 60, 60, 60] # pages' expiration time in cache for i in range(0, len(pages_list)): page_name = pages_list[i] page_time = pages_maxtime[i] self.world_load_progress.emit(page_name, load_progress_percent) self._get_page(page_name, max_cache_lifetime=page_time, force_download=False) self.msleep(100) # delay before requesting next page load_progress_percent += load_progress_step # # additionally request user info page, constructed as: # http://uni4.xnova.su/?set=players&id=71995 # This need overview parser to parse and fetch account id self.world_load_progress.emit('self_user_info', load_progress_percent) self._get_page('self_user_info', max_cache_lifetime=60, force_download=False) load_progress_percent += load_progress_step # # download all planets info load_progress_left = 100 - load_progress_percent if len(self._planets) > 0: load_progress_step = load_progress_left // len(self._planets) else: load_progress_step = 1 for pl in self._planets: self.world_load_progress.emit(self.tr('Planet') + ' ' + pl.name, load_progress_percent) load_progress_percent += load_progress_step # planet image self._download_image(pl.pic_url) self.msleep(100) # wait 100 ms # all other planet items self._download_planet(pl.planet_id, delays_msec=100, force_download=True) self.msleep(100) # restore original current planet that was before full world refresh # because world refresh changes it by loading every planet logger.info('Restoring current planet to #{0} ({1})'.format(self._cur_planet_id, self._cur_planet_name)) self._download_planet_overview(self._cur_planet_id, force_download=True) self._world_is_loading = False self.unlock() # unlock before emitting any signal, just for a case... # # signal wain window that we fifnished initial loading self.world_load_complete.emit() @staticmethod def _gettid(): sip_voidptr = QThread.currentThreadId() return int(sip_voidptr) def _gettid_s(self): """ Get thread ID as descriptive string :return: 'gui' if called from main GUI thread, 'network' if from net bg thread """ tid = self._gettid() if tid == self._maintid: return 'gui' if tid == self._worldtid: return 'network' return 'unknown_' + str(tid) def run(self): """ Main thread function, lives in Qt event loop to receive/send Qt events :return: cannot return any value, including None """ self._worldtid = self._gettid() # start new life from full downloading of current server state self._full_refresh() ret = -1 while ret != self.SIGNAL_QUIT: # logger.debug('thread: entering Qt event loop, tid={0}'.format(self._worldtid)) ret = self.exec() # enter Qt event loop to receive events # logger.debug('thread: Qt event loop ended with code {0}'.format(ret)) # parse event loop's return value if ret == self.SIGNAL_QUIT: break if ret == self.SIGNAL_RELOAD_PAGE: self.on_signal_reload_page() elif ret == self.SIGNAL_RENAME_PLANET: self.on_signal_rename_planet() elif ret == self.SIGNAL_RELOAD_PLANET: self.on_signal_reload_planet() elif ret == self.SIGNAL_BUILD_ITEM: self.on_signal_build_item() elif ret == self.SIGNAL_BUILD_CANCEL: self.on_signal_build_cancel() elif ret == self.SIGNAL_BUILD_DISMANTLE: self.on_signal_build_dismantle() elif ret == self.SIGNAL_GET_URL: self.on_signal_get_url() elif ret == self.SIGNAL_TEST_PARSE_GALAXY: self.on_signal_test_parse_galaxy() # # clear signal arguments after handler self._signal_kwargs = dict() logger.debug('thread: exiting.') # only one instance of XNovaWorld should be! # well, there may be others, but for coordination it should be one _singleton_XNovaWorld = None # Factory # Serves as singleton entry-point to get world class instance def XNovaWorld_instance() -> XNovaWorld: global _singleton_XNovaWorld if not _singleton_XNovaWorld: _singleton_XNovaWorld = XNovaWorld() return _singleton_XNovaWorld
minlexx/xnovacmd
ui/xnova/xn_world.py
Python
gpl-2.0
61,963
[ "Galaxy" ]
3602cfcbdb74608d26ff465350429bb26ae1abc17b783faeec3e0ed2a026fa0c
#!/usr/bin/env python # -*- coding: utf-8 -*- from .errors import GitQLError class NodeVisitor(object): @staticmethod def get_method_name(node): name = type(node).__name__.lower() # lower() make PEP happy. if name.endswith('node'): name = name[:-4] return 'visit_' + name def visit(self, node): name = self.get_method_name(node) visit_fn = getattr(self, name, self.visit_unknown) return visit_fn(node) def visit_unknown(self, node): name = self.get_method_name(node) raise GitQLError('No {} method.'.format(name))
mackong/gitql
gitql/visitor.py
Python
mit
612
[ "VisIt" ]
2c4ff6ea6bb04e8a31613cb697b57eacb2807e78e9575cde2e77100386da975e
# ---------------------------------------------------------------------------- # Copyright (c) 2016--, Calour development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from unittest import TestCase from os.path import join, dirname, abspath import logging import pandas.testing as pdt import numpy.testing as npt import calour as ca class Tests(TestCase): def setUp(self): # disable logging; otherwise, the tests will print all the # logging in the functions logging.disable(logging.CRITICAL) test_data_dir = join(dirname(abspath(__file__)), 'tests', 'data') self.test_data_dir = test_data_dir # a simple artificial biom table 21 sample x 12 feature self.test1_biom = join(test_data_dir, 'test1.biom') self.test1_samp = join(test_data_dir, 'test1.sample') self.test1_feat = join(test_data_dir, 'test1.feature') # a simpler artificial data set 9 sample x 8 feature self.test2_biom = join(test_data_dir, 'test2.biom') self.test2_samp = join(test_data_dir, 'test2.sample') self.test2_feat = join(test_data_dir, 'test2.feature') # a dense timeseries (real data) self.timeseries_biom = join(test_data_dir, 'timeseries.biom') self.timeseries_samp = join(test_data_dir, 'timeseries.sample') # a simple openms bucket table csv file self.openms_csv = join(test_data_dir, 'openms_bucket_table.csv') # a simple mzmine2 output table self.mzmine2_csv = join(test_data_dir, 'mzmine2_table.csv') # a simple mzmine2 output table with sampleids containing additional info separated by '_' self.mzmine2_with_idstr_csv = join(test_data_dir, 'mzmine2_table_with_idstr.csv') # a simple openms bucket table csv file with samples as rows self.openms_samples_rows_csv = join(test_data_dir, 'openms_bucket_table_samples_rows.csv') # a simple gnps data file for ms1 test data self.ms1_gnps = join(test_data_dir, 'ms1.gnps.txt') # the gnps exported data table self.gnps_table = join(test_data_dir, 'gnps_table.txt') # a metabolomics biom table with MZ_RT in feature id. linked to same gnps_clusterinfo file as the gnps_table self.ms_biom_table = join(test_data_dir, 'ms_biom_table.txt') # the gnps exported mapping file self.gnps_map = join(test_data_dir, 'gnps_map.txt') # the gnps per-metabolite info table (from clusterinfosummarygroup_attributes_withIDs_arbitraryattri butes/XXX.tsv) self.gnps_cluster_info = join(test_data_dir, 'gnps_clusterinfosummarygroup.txt') # a fasta file for testing the AmpliconExperiment self.seqs1_fasta = join(test_data_dir, 'seqs1.fasta') # a qiime2 non-hashed biom table artifact self.qiime2table = join(test_data_dir, 'feature-table.qza') # a qiime2 dataset with hashed biom table, rep-seqs and taxonomy self.q2_cfs_table = join(test_data_dir, 'cfs-table.qza') self.q2_cfs_map = join(test_data_dir, 'cfs-map.txt') self.q2_cfs_repseqs = join(test_data_dir, 'cfs-rep-seqs.qza') self.q2_cfs_taxonomy = join(test_data_dir, 'cfs-taxonomy.qza') # An experiment used to create the ratio experiment using from_exp() self.rat_pre_biom = join(test_data_dir, 'ratio_exp_pre_table.biom') self.rat_pre_samp = join(test_data_dir, 'ratio_exp_pre_sample_metadata.txt') # A ratio experiment table created from the ratio_pre experiment self.rat_biom = join(test_data_dir, 'ratio-exp.biom') self.rat_samp = join(test_data_dir, 'ratio-exp_sample_metadata.txt') def assert_experiment_equal(self, exp1, exp2, check_history=False, almost_equal=True, ignore_md_fields=('_calour_original_abundance',)): '''Test if two experiments are equal Parameters ---------- exp1 : Experiment exp2 : Experiment check_history : bool, optional False (default) to skip testing the command history, True to compare also the command history almost_equal : bool, optional True (default) to test for almost identical, False to test the data matrix for exact identity ignore_md_fields : tuple of str or None list of metadata fields to ignore in the comparison. Default is ignoring the original read count (when sample loaded) ''' self.assertIsInstance(exp1, ca.Experiment, 'exp1 not a calour Experiment class') self.assertIsInstance(exp2, ca.Experiment, 'exp2 not a calour Experiment class') # test the metadata sample_columns = exp1.sample_metadata.columns.union(exp1.sample_metadata.columns) feature_columns = exp1.feature_metadata.columns.union(exp2.feature_metadata.columns) if ignore_md_fields is not None: for cignore in ignore_md_fields: if cignore in sample_columns: sample_columns = sample_columns.delete(sample_columns.get_loc(cignore)) if cignore in feature_columns: feature_columns = feature_columns.delete(feature_columns.get_loc(cignore)) self.assertEqual(len(sample_columns.difference(exp1.sample_metadata.columns)), 0) self.assertEqual(len(sample_columns.difference(exp2.sample_metadata.columns)), 0) self.assertEqual(len(feature_columns.difference(exp1.feature_metadata.columns)), 0) self.assertEqual(len(feature_columns.difference(exp2.feature_metadata.columns)), 0) pdt.assert_frame_equal(exp1.feature_metadata[feature_columns], exp2.feature_metadata[feature_columns]) pdt.assert_frame_equal(exp1.sample_metadata[sample_columns], exp2.sample_metadata[sample_columns]) # test the data if almost_equal: dat1 = exp1.get_data(sparse=False, copy=True) dat2 = exp2.get_data(sparse=False, copy=True) npt.assert_array_almost_equal(dat1, dat2) else: npt.assert_array_equal(exp1.data, exp2.data) if check_history: if not exp1._call_history == exp2._call_history: raise AssertionError('histories are different between exp1 and exp2')
RNAer/Calour
calour/_testing.py
Python
bsd-3-clause
6,427
[ "OpenMS" ]
80e312bcdf3e5fc51764e2df426efa2d352460c0d9bc789da5b3dfd774f56f6d
""" Neuron Models """ import numpy as np def HodgekinHuxley(x, param): """ Computes the differential of the HH states given the current states and the model parameters :param x: Current state [v(t); m(t); n(t); h(t)] :type x: np.array :param param: Hodgkin-Huxley model parameters [I(t), gNa, gK, gL, ENa, EK, EL, C] :type param: np.array :return: dx/dt (differential of state) [(v_dt(t),m_dt(t),n_dt(t),h_dt(t))] :rtype: np.array param[]: | I(t): Total membane current per unit area | gNa: Sodium conductance per unit area | gK: Potassium conductance per unit area | gL: Leakage conductance per unit area | ENa: Sodium reversal potential | EK: Potassium reversal potential | EL: Leakage reversal potential | C: Membrane capacitance per unit area """ v, m, n, h = x I, gNa, gK, gL, ENa, EK, EL, C = param alpha_m = (2.5 - 0.1 * v) / (np.exp(2.5 - 0.1 * v) - 1.0) alpha_n = (0.1 - 0.01 * v) / (np.exp(1.0 - 0.1 * v) - 1.0) alpha_h = 0.07 * np.exp(-v / 20.0) beta_m = 4.0 * np.exp(-v / 18.0) beta_n = 0.125 * np.exp(-v / 80.0) beta_h = 1.0 / (np.exp(3.0 - 0.1 * v) + 1.0) mdot = (alpha_m * (1 - m) - beta_m * m) ndot = (alpha_n * (1 - n) - beta_n * n) hdot = (alpha_h * (1 - h) - beta_h * h) sigmaIk = gNa * (m ** 3) * h * (v - ENa) + gK * (n ** 4) * (v - EK) + gL * (v - EL) vdot = (-sigmaIk + I) / C return np.array([vdot, mdot, ndot, hdot])
cpsnowden/ComputationalNeurodynamics
HH_Braitenberg/NeuronModels.py
Python
gpl-3.0
1,536
[ "NEURON" ]
882cbea445d9a89fe5f73b10c5a314178323fd519da6e9488ae40131e6322b64
""" ComponentMonitoring class is a front-end to the Component monitoring Database """ import random from DIRAC import gConfig, S_OK, S_ERROR from DIRAC.Core.Base.DB import DB from DIRAC.Core.Utilities import Time, List, Network __RCSID__ = "$Id$" class ComponentMonitoringDB(DB): def __init__(self, requireVoms=False, useMyProxy=False): """ c'tor Initialize the DB """ DB.__init__(self, 'ComponentMonitoringDB', 'Framework/ComponentMonitoringDB') random.seed() retVal = self.__initializeDB() if not retVal['OK']: raise Exception("Can't create tables: %s" % retVal['Message']) self.__optionalFields = ('startTime', 'cycles', 'version', 'queries', 'DIRACVersion', 'description', 'platform') self.__mainFields = ("Id", "Setup", "Type", "ComponentName", "Host", "Port", "StartTime", "LastHeartbeat", "cycles", "queries", "LoggingState") self.__versionFields = ('VersionTimestamp', 'Version', 'DIRACVersion', 'Platform', 'Description') def getOptionalFields(self): return self.__optionalFields def __getTableName(self, name): return "compmon_%s" % name def __initializeDB(self): """ Create the tables """ retVal = self._query("show tables") if not retVal['OK']: return retVal tablesInDB = [t[0] for t in retVal['Value']] tablesD = {} tN = self.__getTableName("Components") if tN not in tablesInDB: tablesD[tN] = {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL', 'ComponentName': 'VARCHAR(255) NOT NULL', 'Setup': 'VARCHAR(255) NOT NULL', 'Type': 'ENUM ( "service", "agent" ) NOT NULL', 'Host': 'VARCHAR(255) NOT NULL', 'Port': 'INTEGER DEFAULT 0', 'LastHeartbeat': 'DATETIME NOT NULL', 'StartTime': 'DATETIME NOT NULL', 'LoggingState': 'VARCHAR(64) DEFAULT "unknown"', 'Cycles': 'INTEGER', 'Queries': 'INTEGER' }, 'PrimaryKey': 'Id', 'Indexes': {'ComponentIndex': ['ComponentName', 'Setup', 'Host', 'Port'], 'TypeIndex': ['Type'], } } tN = self.__getTableName("VersionHistory") if tN not in tablesInDB: tablesD[tN] = {'Fields': {'CompId': 'INTEGER NOT NULL', 'VersionTimestamp': 'DATETIME NOT NULL', 'Version': 'VARCHAR(255)', 'DIRACVersion': 'VARCHAR(255) NOT NULL', 'Platform': 'VARCHAR(255) NOT NULL', 'Description': 'BLOB', }, 'Indexes': {'Component': ['CompId']} } return self._createTables(tablesD) def __datetime2str(self, dt): if isinstance(dt, basestring): return dt return "%s-%s-%s %s:%s:%s" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) def __registerIfNotThere(self, compDict): """ Register the component if it's not there """ sqlCond = [] sqlInsertFields = [] sqlInsertValues = [] tableName = self.__getTableName("Components") for field in ('componentName', 'setup', 'type', 'host', 'port'): if field not in compDict: if field == 'port': continue return S_ERROR("Missing %s field in the component dict" % field) value = compDict[field] field = field.capitalize() sqlInsertFields.append(field) sqlInsertValues.append("'%s'" % value) sqlCond.append("%s = '%s'" % (field, value)) compLogName = ":".join(sqlInsertValues).replace("'", "") self.log.info("Trying to register %s" % compLogName) result = self._query("SELECT id FROM `%s` WHERE %s" % (tableName, " AND ".join(sqlCond))) if not result['OK']: self.log.error("Cannot register component", "%s: %s" % (compLogName, result['Message'])) return result if len(result['Value']): compId = result['Value'][0][0] self.log.info("%s has compId %s" % (compLogName, compId)) return S_OK(compId) # It's not there, we just need to insert it sqlInsertFields.append("LastHeartbeat") sqlInsertValues.append("UTC_TIMESTAMP()") if 'startTime' in compDict: sqlInsertFields.append("StartTime") val = compDict['startTime'] if type(val) in Time._allDateTypes: val = self.__datetime2str(val) sqlInsertValues.append("'%s'" % val) for field in ('cycles', 'queries'): if field not in compDict: compDict[field] = 0 value = compDict[field] field = field.capitalize() sqlInsertFields.append(field) sqlInsertValues.append(str(value)) self.log.info("Registering component %s" % compLogName) result = self._update("INSERT INTO `%s` ( %s ) VALUES ( %s )" % (tableName, ", ".join(sqlInsertFields), ", ".join(sqlInsertValues))) if not result['OK']: return result compId = result['lastRowId'] self.log.info("%s has compId %s" % (compLogName, compId)) return S_OK(compId) def __updateVersionHistoryIfNeeded(self, compId, compDict): """ Register the component if it's not there """ sqlCond = ["CompId=%s" % compId] sqlInsertFields = [] sqlInsertValues = [] tableName = self.__getTableName("VersionHistory") for field in ('version', 'DIRACVersion', 'platform'): if field not in compDict: return S_ERROR("Missing %s field in the component dict" % field) value = compDict[field] field = field.capitalize() sqlInsertFields.append(field) sqlInsertValues.append("'%s'" % value) sqlCond.append("%s = '%s'" % (field, value)) result = self._query("SELECT CompId FROM `%s` WHERE %s" % (tableName, " AND ".join(sqlCond))) if not result['OK']: return result if len(result['Value']): return S_OK(compId) # It's not there, we just need to insert it sqlInsertFields.append('CompId') sqlInsertValues.append(str(compId)) sqlInsertFields.append('VersionTimestamp') sqlInsertValues.append('UTC_TIMESTAMP()') if 'description' in compDict: sqlInsertFields.append("Description") result = self._escapeString(compDict['description']) if not result['OK']: return result sqlInsertValues.append(result['Value']) result = self._update("INSERT INTO `%s` ( %s ) VALUES ( %s )" % (tableName, ", ".join(sqlInsertFields), ", ".join(sqlInsertValues))) if not result['OK']: return result return S_OK(compId) def registerComponent(self, compDict, shallow=False): """ Register a new component in the DB. If it's already registered return id """ result = self.__registerIfNotThere(compDict) if not result['OK']: return result compId = result['Value'] if shallow: return S_OK(compId) # Check if something has changed in the version history result = self.__updateVersionHistoryIfNeeded(compId, compDict) if not result['OK']: return result return S_OK(compId) def heartbeat(self, compDict): """ Update heartbeat """ if 'compId' not in compDict: result = self.__registerIfNotThere(compDict) if not result['OK']: return result compId = result['Value'] compDict['compId'] = compId sqlUpdateFields = ['LastHeartbeat=UTC_TIMESTAMP()'] for field in ('cycles', 'queries'): value = 0 if field in compDict: value = compDict[field] sqlUpdateFields.append("%s=%s" % (field.capitalize(), value)) if 'startTime' in compDict: sqlUpdateFields.append("StartTime='%s'" % self.__datetime2str(compDict['startTime'])) return self._update("UPDATE `%s` SET %s WHERE Id=%s" % (self.__getTableName("Components"), ", ".join(sqlUpdateFields), compDict['compId'])) def __getComponents(self, condDict): """ Load the components in the DB """ compTable = self.__getTableName("Components") mainFields = ", ".join(self.__mainFields) versionTable = self.__getTableName("VersionHistory") versionFields = ", ".join(self.__versionFields) sqlWhere = [] for field in condDict: val = condDict[field] if isinstance(val, basestring): sqlWhere.append("%s='%s'" % (field, val)) elif isinstance(val, (int, long, float)): sqlWhere.append("%s='%s'" % (field, val)) else: sqlWhere.append("( %s )" % " OR ".join(["%s='%s'" % (field, v) for v in val])) if sqlWhere: sqlWhere = "WHERE %s" % " AND ".join(sqlWhere) else: sqlWhere = "" result = self._query("SELECT %s FROM `%s` %s" % (mainFields, compTable, sqlWhere)) if not result['OK']: return result records = [] dbData = result['Value'] for record in dbData: rD = {} for i in range(len(self.__mainFields)): rD[self.__mainFields[i]] = record[i] result = self._query( "SELECT %s FROM `%s` WHERE CompId=%s ORDER BY VersionTimestamp DESC LIMIT 1" % (versionFields, versionTable, rD['Id'])) if not result['OK']: return result if len(result['Value']) > 0: versionRec = result['Value'][0] for i in range(len(self.__versionFields)): rD[self.__versionFields[i]] = versionRec[i] del(rD['Id']) records.append(rD) return S_OK(StatusSet(records)) def __checkCondition(self, condDict, field, value): if field not in condDict: return True condVal = condDict[field] if isinstance(condVal, (list, tuple)): return value in condVal return value == condVal def __getComponentDefinitionFromCS(self, system, setup, instance, cType, component): componentName = "%s/%s" % (system, component) compDict = {'ComponentName': componentName, 'Type': cType, 'Setup': setup } componentSection = "/Systems/%s/%s/%s/%s" % (system, instance, "%ss" % cType.capitalize(), component) compStatus = gConfig.getValue("%s/Status" % componentSection, 'Active') if compStatus.lower() in ("inactive", ): compDict['Status'] = compStatus.lower().capitalize() if cType == 'service': result = gConfig.getOption("%s/Port" % componentSection) if not result['OK']: compDict['Status'] = 'Error' compDict['Message'] = "Component seems to be defined wrong in CS: %s" % result['Message'] return compDict try: compDict['Port'] = int(result['Value']) except BaseException: compDict['Status'] = 'Error' compDict['Message'] = "Port for component doesn't seem to be a number" return compDict return compDict def __componentMatchesCondition(self, compDict, requiredComponents, conditionDict={}): for key in compDict: if not self.__checkCondition(conditionDict, key, compDict[key]): return False return True def getComponentsStatus(self, conditionDict={}): """ Get the status of the defined components in the CS compared to the ones that are known in the DB """ result = self.__getComponents(conditionDict) if not result['OK']: return result statusSet = result['Value'] requiredComponents = {} result = gConfig.getSections("/DIRAC/Setups") if not result['OK']: return result for setup in result['Value']: if not self.__checkCondition(conditionDict, "Setup", setup): continue # Iterate through systems result = gConfig.getOptionsDict("/DIRAC/Setups/%s" % setup) if not result['OK']: return result systems = result['Value'] for system in systems: instance = systems[system] # Check defined agents and serviecs for cType in ('agent', 'service'): # Get entries for the instance of a system result = gConfig.getSections("/Systems/%s/%s/%s" % (system, instance, "%ss" % cType.capitalize())) if not result['OK']: self.log.warn( "Opps, sytem seems to be defined wrong\n", "System %s at %s: %s" % (system, instance, result['Message'])) continue components = result['Value'] for component in components: componentName = "%s/%s" % (system, component) compDict = self.__getComponentDefinitionFromCS(system, setup, instance, cType, component) if self.__componentMatchesCondition(compDict, requiredComponents, conditionDict): statusSet.addUniqueToSet(requiredComponents, compDict) # Walk the URLs result = gConfig.getOptionsDict("/Systems/%s/%s/URLs" % (system, instance)) if not result['OK']: self.log.warn("There doesn't to be defined the URLs section for %s in %s instance" % (system, instance)) else: serviceURLs = result['Value'] for service in serviceURLs: for url in List.fromChar(serviceURLs[service]): loc = url[url.find("://") + 3:] iS = loc.find("/") componentName = loc[iS + 1:] loc = loc[:iS] hostname, port = loc.split(":") compDict = {'ComponentName': componentName, 'Type': 'service', 'Setup': setup, 'Host': hostname, 'Port': int(port) } if self.__componentMatchesCondition(compDict, requiredComponents, conditionDict): statusSet.addUniqueToSet(requiredComponents, compDict) # WALK THE DICT statusSet.setComponentsAsRequired(requiredComponents) return S_OK((statusSet.getRequiredComponents(), self.__mainFields[1:] + self.__versionFields + ('Status', 'Message'))) class StatusSet: def __init__(self, dbRecordsList=[]): self.__requiredSet = {} self.__requiredFields = ('Setup', 'Type', 'ComponentName') self.__maxSecsSinceHeartbeat = 600 self.setDBRecords(dbRecordsList) def setDBRecords(self, recordsList): self.__dbSet = {} for record in recordsList: cD = self.walkSet(self.__dbSet, record) cD.append(record) return S_OK() def addUniqueToSet(self, setDict, compDict): rC = self.walkSet(setDict, compDict) if compDict not in rC: rC.append(compDict) inactive = False for cD in rC: if 'Status' in cD and cD['Status'] == 'Inactive': inactive = True break if inactive: for cD in rC: cD['Status'] = 'Inactive' def walkSet(self, setDict, compDict, createMissing=True): sD = setDict for field in self.__requiredFields: val = compDict[field] if val not in sD: if not createMissing: return None if field == self.__requiredFields[-1]: sD[val] = [] else: sD[val] = {} sD = sD[val] return sD def __reduceComponentList(self, componentList): """ Only keep the most restrictive components """ for i in range(len(componentList)): component = componentList[i] for j in range(len(componentList)): if i == j or componentList[j] is False: continue potentiallyMoreRestrictiveComponent = componentList[j] match = True for key in component: if key not in potentiallyMoreRestrictiveComponent: match = False break if key == 'Host': result = Network.checkHostsMatch(component[key], potentiallyMoreRestrictiveComponent[key]) if not result['OK'] or not result['Value']: match = False break else: if component[key] != potentiallyMoreRestrictiveComponent[key]: match = False break if match: componentList[i] = False break return [comp for comp in componentList if comp] def setComponentsAsRequired(self, requiredSet): for setup in requiredSet: for cType in requiredSet[setup]: for name in requiredSet[setup][cType]: # Need to narrow down required cDL = requiredSet[setup][cType][name] cDL = self.__reduceComponentList(cDL) self.__setComponentListAsRequired(cDL) def __setComponentListAsRequired(self, compDictList): dbD = self.walkSet(self.__dbSet, compDictList[0], createMissing=False) if not dbD: self.__addMissingDefinedComponents(compDictList) return S_OK() self.__addFoundDefinedComponent(compDictList) return S_OK() def __addMissingDefinedComponents(self, compDictList): cD = self.walkSet(self.__requiredSet, compDictList[0]) for compDict in compDictList: compDict = self.__setStatus(compDict, 'Error', "Component is not up or hasn't connected to register yet") cD.append(compDict) def __setStatus(self, compDict, status, message=False): if 'Status' in compDict: return compDict compDict['Status'] = status if message: compDict['Message'] = message return compDict def __addFoundDefinedComponent(self, compDictList): cD = self.walkSet(self.__requiredSet, compDictList[0]) dbD = self.walkSet(self.__dbSet, compDictList[0]) now = Time.dateTime() unmatched = compDictList for dbComp in dbD: if 'Status' not in dbComp: self.__setStatus(dbComp, 'OK') if dbComp['Type'] == "service": if 'Port' not in dbComp: self.__setStatus(dbComp, 'Error', "Port is not defined") elif dbComp['Port'] not in [compDict['Port'] for compDict in compDictList if 'Port' in compDict]: self.__setStatus(compDict, 'Error', "Port (%s) is different that specified in the CS" % dbComp['Port']) elapsed = now - dbComp['LastHeartbeat'] elapsed = elapsed.days * 86400 + elapsed.seconds if elapsed > self.__maxSecsSinceHeartbeat: self.__setStatus(dbComp, "Error", "Last heartbeat was received at %s (%s secs ago)" % (dbComp['LastHeartbeat'], elapsed)) cD.append(dbComp) # See if we have a perfect match newUnmatched = [] for unmatchedComp in unmatched: perfectMatch = True for field in unmatchedComp: if field in ('Status', 'Message'): continue if field not in dbComp: perfectMatch = False continue if field == 'Host': result = Network.checkHostsMatch(unmatchedComp[field], dbComp[field]) if not result['OK'] or not result['Value']: perfectMatch = False else: if unmatchedComp[field] != dbComp[field]: perfectMatch = False if not perfectMatch: newUnmatched.append(unmatchedComp) unmatched = newUnmatched for unmatchedComp in unmatched: self.__setStatus(unmatchedComp, "Error", "There is no component up with this properties") cD.append(unmatchedComp) def getRequiredComponents(self): return self.__requiredSet
fstagni/DIRAC
FrameworkSystem/DB/ComponentMonitoringDB.py
Python
gpl-3.0
20,017
[ "DIRAC" ]
37d61e09ac239022d15124271270bc5a2e95be579348c5de69206d83b99aa4fb
# # ast_body.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. from pynestml.meta_model.ast_node import ASTNode class ASTBody(ASTNode): """ This class is used to store the body of a neuron, an object containing all the definitions. ASTBody The body of the neuron, e.g. internal, state, parameter... Grammar: body : BLOCK_OPEN (NEWLINE | blockWithVariables | updateBlock | equationsBlock | inputBlock | outputBlock | function)* BLOCK_CLOSE; Attributes: bodyElements = None """ def __init__(self, body_elements, source_position): """ Standard constructor. :param body_elements: a list of elements, e.g. variable blocks. :type body_elements: list() :param source_position: the position of the element in the source model :rtype source_location: ASTSourceLocation """ super(ASTBody, self).__init__(source_position) self.body_elements = body_elements def get_body_elements(self): """ Returns the list of body elements. :return: a list of body elements. :rtype: list() """ return self.body_elements def equals(self, other): """ The equals method. :param other: a different object. :type other: object :return: True if equal, otherwise False. :rtype: bool """ if not isinstance(other, ASTBody): return False if len(self.get_body_elements()) != len(other.get_body_elements()): return False my_body_elements = self.get_body_elements() your_body_elements = other.get_body_elements() for i in range(0, len(my_body_elements)): if not my_body_elements[i].equals(your_body_elements[i]): return False return True
kperun/nestml
pynestml/meta_model/ast_body.py
Python
gpl-2.0
2,508
[ "NEURON" ]
6ce59b41d8658a7827d49988344123a2a500993fb74610d4358971b89dc090a0
# First-Visit MC Policy Improvement with Exploring Starts # Author: Yiheng Zhu # Date: 13th/Dec/2017 import numpy as np import Gridworld ######################## # Deterministic Policy # ######################## # hyperparameters GAMMA = 0.9 # discount rate NUM_EPISODES = 100 # number of episodes ACTIONS = ('U', 'D', 'L', 'R') # all possible actions # input the gridworld environment and current policy # output state, action and returns def play_one_episode(gridworld, policy): # randomly select one starting state and one action valid_states = list(gridworld.actions.keys()) # print(valid_states) start_state_index = np.random.choice(len(valid_states)) # print(start_state_index) # print(valid_states[start_state_index]) start_state = valid_states[start_state_index] gridworld.set_state(start_state) s = gridworld.get_state() a = np.random.choice(gridworld.actions[s]) print("Starting state: ", s) # print starting state print("Starting action: ", a) # print starting state states_actions_rewards = [(s, a, 0)] # a list to store s, a and r visited_states = set() # prevent visiting same states # proceed until the episode ends while True: # old_s = gridworld.get_state() r = gridworld.step(a) s = gridworld.get_state() if s in visited_states: # prevent infinite loop/multiple visits to same state states_actions_rewards.append((s, None, -100)) break elif gridworld.is_terminal(): # terminal states states_actions_rewards.append((s, None, r)) break else: a = policy[s] # next action states_actions_rewards.append((s, a, r)) visited_states.add(s) # calculate the return G = 0 # initialize the return, G, as 0 states_actions_returns = [] # an empty list to store s, a and G first_step = True # first step is already in the list [(s, a, 0)] # calculate and store returns from terminal state to start state for s, a, r in reversed(states_actions_rewards): if first_step: # do nothing for the first step first_step = False else: # append state and return to the list states_actions_returns.append((s, a, G)) G = r + GAMMA*G # update return # adjust the order of the list # (terminal -> start) -> (start -> terminal) states_actions_returns.reverse() return states_actions_returns # find the key and value of the maximum value in a dictionary def max_dict(dict_tmp): max_key = None max_val = float('-inf') for key, val in dict_tmp.items(): if val > max_val: max_val = val max_key = key return max_key, max_val if __name__ == '__main__': # create a default gridworld # oo -- traversable state # xx -- untraversable state # ss -- start state for agent # +1 -- winning state with reward +1 # -1 -- losing state with reward -1 # x0 x1 x2 x3 # --------------------- # y0 | oo | oo | oo | +1 | # --------------------- # y1 | oo | xx | oo | -1 | # --------------------- # y2 | ss | oo | oo | oo | # --------------------- gridworld = Gridworld.default_gridworld() # all traversable states states = gridworld.all_states() # draw the reward function print("Reward function:") Gridworld.draw_value_function(gridworld.rewards, gridworld) # initialize a random policy as a dictionary # for all traversable non-terminal states # policy = {} # for s in gridworld.actions.keys(): # policy[s] = np.random.choice(ACTIONS) # inital policy # use the same initial condition # to compare with value iteration # ----------------- # | D | L | D | T | # ----------------- # | D | X | R | T | # ----------------- # | U | L | L | U | # ----------------- # X -- untraversable # T -- terminal policy = { (0, 0): 'D', (0, 1): 'L', (0, 2): 'D', (1, 0): 'D', (1, 2): 'R', (2, 0): 'U', (2, 1): 'L', (2, 2): 'L', (2, 3): 'U', } # initialize Q(s, a) and returns Q = {} # value function returns = {} # returns given state and action for s in states: if s in gridworld.actions.keys(): Q[s] = {} for a in ACTIONS: Q[s][a] = 0 returns[(s, a)] = [] else: # non-traversable states and terminal states pass # repeat until convergence deltas = [] for t in range(NUM_EPISODES): if t % 100 == 0: print(t) # generate an episode using policy pi delta = 0 # biggest change states_actions_returns = play_one_episode(gridworld, policy) visited_state_action_pairs = set() for s, a, G in states_actions_returns: # first-visit MC policy evaluation sa = (s, a) if sa not in visited_state_action_pairs: old_q = Q[s][a] returns[sa].append(G) Q[s][a] = np.mean(returns[sa]) delta = max(delta, np.abs(old_q - Q[s][a])) visited_state_action_pairs.add(sa) deltas.append(delta) # update policy for s in policy.keys(): policy[s] = max_dict(Q[s])[0] # only need the key # draw the final policy print("Final policy:") Gridworld.draw_policy(policy, gridworld) # calculate and draw the final value function V = {} for s, Qs in Q.items(): V[s] = max_dict(Q[s])[1] print("Final value function:") Gridworld.draw_value_function(V, gridworld)
GitYiheng/reinforcement_learning_test
test03_monte_carlo/policy_improvement_exploring_starts.py
Python
mit
5,056
[ "VisIt" ]
f60be862e40ac2adb65703a448ae2ec23558e1096363a352bb8c2d92c4de55c3
# Copyright (C) 2010-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Visualize the Poiseuille flow in a lattice-Boltzmann fluid with an external force applied. """ from espressomd import System, lb, shapes, lbboundaries import numpy as np import espressomd.visualization_opengl required_features = ["LB_BOUNDARIES", "EXTERNAL_FORCES"] espressomd.assert_features(required_features) # System setup box_l = 16 system = System(box_l=[box_l] * 3) system.set_random_state_PRNG() np.random.seed(seed=system.seed) system.time_step = 0.01 system.cell_system.skin = 0.2 visualizer = espressomd.visualization_opengl.openGLLive( system, LB_draw_boundaries=True, LB_draw_velocity_plane=True, LB_plane_dist=8, LB_plane_axis=1, LB_vel_scale=1e2, LB_plane_ngrid=15, camera_position=[8, 16, 50], velocity_arrows=True, velocity_arrows_type_scale=[20.], velocity_arrows_type_radii=[0.1], velocity_arrows_type_colors=[[0, 1, 0]]) lbf = lb.LBFluid(kT=0, agrid=1.0, dens=1.0, visc=1.0, tau=0.1, ext_force_density=[0, 0.003, 0]) system.actors.add(lbf) system.thermostat.set_lb(LB_fluid=lbf, gamma=1.5) # Setup boundaries walls = [lbboundaries.LBBoundary() for k in range(2)] walls[0].set_params(shape=shapes.Wall(normal=[1, 0, 0], dist=1.5)) walls[1].set_params(shape=shapes.Wall(normal=[-1, 0, 0], dist=-14.5)) for i in range(100): system.part.add(pos=np.random.random(3) * system.box_l) for wall in walls: system.lbboundaries.add(wall) visualizer.run(1)
psci2195/espresso-ffans
samples/visualization_poiseuille.py
Python
gpl-3.0
2,161
[ "ESPResSo" ]
74e2f0d3960540f482f751ac89f71436abcd5168f64d801d615aa560230caee4
# This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. # # Bio.Wise contains modules for running and processing the output of # some of the models in the Wise2 package by Ewan Birney available from: # ftp://ftp.ebi.ac.uk/pub/software/unix/wise2/ # http://www.ebi.ac.uk/Wise2/ # # Bio.Wise.psw is for protein Smith-Waterman alignments # Bio.Wise.dnal is for Smith-Waterman DNA alignments __version__ = "$Revision: 1.16 $" import os import sys import tempfile from Bio import SeqIO def _build_align_cmdline(cmdline, pair, output_filename, kbyte=None, force_type=None, quiet=False): """ >>> os.environ["WISE_KBYTE"]="300000" >>> _build_align_cmdline(["dnal"], ("seq1.fna", "seq2.fna"), "/tmp/output", kbyte=100000) 'dnal -kbyte 100000 seq1.fna seq2.fna > /tmp/output' >>> _build_align_cmdline(["psw"], ("seq1.faa", "seq2.faa"), "/tmp/output_aa") 'psw -kbyte 300000 seq1.faa seq2.faa > /tmp/output_aa' """ cmdline = cmdline[:] ### XXX: force_type ignored if kbyte is None: try: cmdline.extend(("-kbyte", os.environ["WISE_KBYTE"])) except KeyError: pass else: cmdline.extend(("-kbyte", str(kbyte))) if not os.isatty(sys.stderr.fileno()): cmdline.append("-quiet") cmdline.extend(pair) cmdline.extend((">", output_filename)) if quiet: cmdline.extend(("2>", "/dev/null")) cmdline_str = ' '.join(cmdline) return cmdline_str def align(cmdline, pair, kbyte=None, force_type=None, dry_run=False, quiet=False, debug=False): """ Returns a filehandle """ assert len(pair) == 2 output_file = tempfile.NamedTemporaryFile(mode='r') input_files = tempfile.NamedTemporaryFile(mode="w"), tempfile.NamedTemporaryFile(mode="w") if dry_run: print _build_align_cmdline(cmdline, pair, output_file.name, kbyte, force_type, quiet) return for filename, input_file in zip(pair, input_files): # Pipe the file through Biopython's Fasta parser/writer # to make sure it conforms to the Fasta standard (in particular, # Wise2 may choke on long lines in the Fasta file) records = SeqIO.parse(open(filename), 'fasta') SeqIO.write(records, input_file, 'fasta') input_file.flush() input_file_names = [input_file.name for input_file in input_files] cmdline_str = _build_align_cmdline(cmdline, input_file_names, output_file.name, kbyte, force_type, quiet) if debug: print >>sys.stderr, cmdline_str status = os.system(cmdline_str) >> 8 if status > 1: if kbyte != 0: # possible memory problem; could be None print >>sys.stderr, "INFO trying again with the linear model" return align(cmdline, pair, 0, force_type, dry_run, quiet, debug) else: raise OSError, "%s returned %s" % (" ".join(cmdline), status) return output_file def all_pairs(singles): """ Generate pairs list for all-against-all alignments >>> all_pairs(range(4)) [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)] """ pairs = [] singles = list(singles) while singles: suitor = singles.pop(0) # if sorted, stay sorted pairs.extend([(suitor, single) for single in singles]) return pairs def main(): pass def _test(*args, **keywds): import doctest, sys doctest.testmod(sys.modules[__name__], *args, **keywds) if __name__ == "__main__": if __debug__: _test() main()
dbmi-pitt/DIKB-Micropublication
scripts/mp-scripts/Bio/Wise/__init__.py
Python
apache-2.0
3,993
[ "Biopython" ]
4251a78b07fc6a67f1daf725837410e569609ba64d2ea360f503fa8a94108854
# -*- coding: utf-8 -*- #The following code assumes that Biopython is installed from Bio.Seq import Seq from Bio import motifs from Bio.Alphabet import IUPAC #imports the NCBI_utils lib containing the functions for GI accession and BLAST import MG_synth_lib as MGlib #random number generation import random #mean and std from numpy import mean from numpy import std import numpy as np #normal dist from scipy.stats import norm from scipy.stats import geom #system calls import sys #import math # Using numpy instead for vectorized operations ########################################################################################### def create_COG(mode, mot): # generate random sequence set1 (100 seqs of length 300 bp) num_seqs_in_set = 100 len_seq = 300 geom_rvs = geom.rvs(0.75, size=num_seqs_in_set, loc=-1) #sym2=0.75, sym3=.7. Originally 0.5 set1 = MGlib.random_DNA(len_seq,{'A': 0.3,'C': 0.2,'G': 0.2,'T': 0.3},num_seqs_in_set) # sample large number of sites from motif pmot1 = MGlib.sample_motif(mot, num_seqs_in_set) if mode=="positive": #insert sites in sequences e=0 while (e<len(set1)): # edit sequence to include random site(s) # determine number of sites per geometric distribution num_sites = geom_rvs[e] new_sites = "" for j in range(0, num_sites): new_sites += random.choice(pmot1) if len(new_sites) > len_seq: new_sites = new_sites[:len_seq] set1[e] = new_sites + set1[e][len(new_sites):] e = e+1 set2=set1 return set2 def get_cog_type(which_one, ncutoff): ctype = "negative" if which_one >= ncutoff: ctype = "positive" return ctype def compute_log_l(score_set, the_n_g, the_n_m, alpha): sumll = 0.0 for score_list in score_set: # for each of the 100 sequences #compute the sum of log likelihood of each score array lpd_fs = np.log(alpha*the_n_m.pdf(score_list) + (1-alpha)*the_n_g.pdf(score_list)) sumll += sum(lpd_fs) return sumll def sample(n, xs, replace=True): """Samples n objects from the list xs.""" if replace: return [random.choice(xs) for i in range(n)] else: ys = list(xs[:]) samp = [] for i in range(n): y = random.choice(ys) samp.append(y) ys.remove(y) return samp def permute_pssm(the_pssm): arr = the_pssm.pssm arr2 = the_pssm.pssm nums = [i for i in range(len(arr[0]))] pnums = sample(len(nums), nums, replace=False) for n in nums: for r in range(0,4): arr2[r][n] = arr[r][pnums[n]] return arr2 def permute_sites(sites): """Permutes columns of the binding motif.""" sites = sites[:] l = len(sites[0]) p = sample(l, range(l), replace=False) for i, site in enumerate(sites): sites[i] = "".join(site[p[j]] for j in p) return sites def permute_motif(cur_motif_sites): new_sites = permute_sites(cur_motif_sites) new_motif_sites = [] for new_site in new_sites: new_motif_sites.append(Seq(new_site,IUPAC.unambiguous_dna)) new_motif = motifs.create(new_motif_sites) new_motif.pseudocounts=1 new_motif.background=None return new_motif def sym_permute_sites(sites): sites = sites[:] # We know CsoR is symmetrical around pos 8, so permute 0..7 msize = len(sites[0])-1 # Want 16 here l = 8 p_half = sample(l, range(l), replace=False) p = [] for num in p_half: p.append(num) # save first half p.append(l) for num in reversed(p_half): p.append(msize-num) # save 2nd half for i, site in enumerate(sites): sites[i] = "".join(site[p[j]] for j in p) return sites def sym_permute_motif(cur_motif_sites): new_sites = sym_permute_sites(cur_motif_sites) new_motif_sites = [] for new_site in new_sites: new_motif_sites.append(Seq(new_site,IUPAC.unambiguous_dna)) new_motif = motifs.create(new_motif_sites) new_motif.pseudocounts=1 new_motif.background=None return new_motif def compute_p_val(ll_list, true_ll): # Sort the ll_list # See where the true_ll falls ll_list_sorted = sorted(ll_list, reverse=True) pos = len(ll_list)+1 for i in range(0, len(ll_list_sorted)): # Go from largest value to smallest val = ll_list_sorted[i] if val <= true_ll: # True ll would go here in the list pos = i+1 # Since i starts counting with 0 break pval = float(pos) / float((len(ll_list)+1)) #print "PVAL", pval, true_ll, pos, ll_list_sorted return pval def main(): ############################################################################### #set default parameters motif_filename="CsoR.txt" #input file out_filename="cog_exp_sym2_c" #prefix for output verbose=0 #verbose mode alpha=1.0/300.0 #mixing ratio for regulated model rproms=3.0 #number of regulated promoters [prior] tproms=1811.0 #total number of promoters in genome [prior] # control number of cogs and number of permutations num_cogs = 10000 neg_cutoff = 9900 # Cog #'s less than this are negative num_perms = 100 cog_sample_size = 1000 #verbose if verbose: print "Using: ", motif_filename, " as input" if verbose: print "Writing to (suffix): ", "[void]" if out_filename==""\ else out_filename #open file for ouput try: out_file = open(out_filename + str(num_cogs)+"_s"+str(cog_sample_size)+"_p"+str(num_perms)+".csv","w") except (IOError, OSError) as file_open_exception: print "*** Something went wrong while opening the output file" print "*** Error: ", file_open_exception.errno, " - ",\ file_open_exception.strerror sys.exit() #compute priors PR=rproms/tproms #prior probability of regulation PB=1.0-PR #prior probability of non-regulation PPR=PB/PR #prior probability ratio # read motif and assign 0.25 pseudocounts to PSWM # also assign background uniform distribution for the PSSM (default) mot = MGlib.read_motif(motif_filename) mot.pseudocounts=1 mot.background=None # save the pssm for the motif and the reverse complement #(so that they are not recalculated everytime we invoke motif.pssm) pssm = mot.pssm rpssm = pssm.reverse_complement() # Save the motif itself as a list of strings for later permuting motif_sites = [] num_motif_sites = len(mot.instances) for i in range(num_motif_sites): motif_sites.append(str(mot.instances[i])) random.seed(None) # Create the COGS all_cogs = [] the_neg_seqs = [] neg_cog_nums = [i for i in range(0, neg_cutoff)] ran_neg_cog_nums = sample(cog_sample_size, neg_cog_nums, replace=False) cog_file = open("seqs_sym2_"+str(num_cogs)+"_s"+str(cog_sample_size)+"_p"+str(num_perms)+".csv","w") for i in range(0,num_cogs): label = get_cog_type(i, neg_cutoff) #print "Create cog #", i, label cur_cog = create_COG(label, mot) all_cogs.append(cur_cog) if i in ran_neg_cog_nums: # A negatively regulated cog for s in cur_cog: the_neg_seqs.append(s) cog_file.write("%d,%s\n" % (i,s)) else: for s in cur_cog: cog_file.write("%d,%s\n" % (i,s)) cog_file.close() # compute softmax scores for sampled background sequences gscr = MGlib.esfmax_score_seqs(the_neg_seqs,pssm,rpssm) # compute softmax scores for motif sequences mscr = MGlib.esfmax_score_seqs(mot.instances,pssm,rpssm) # get normal distributions for background and motif mean_gscr = mean(gscr) std_gscr = std(gscr) n_g=norm(mean_gscr, std_gscr) mean_mscr = mean(mscr) std_mscr = std(mscr) n_m=norm(mean(mscr), std(mscr)) smeans_file = open("smeans_stds_sym2_"+str(num_cogs)+"_s"+str(cog_sample_size)+"_p"+str(num_perms)+".csv", "w") smeans_file.write("PSSM n_g,%13.10f,%13.10f\n" % (mean_gscr,std_gscr)) smeans_file.write("PSSM n_m,%13.10f,%13.10f\n" % (mean_mscr,std_mscr)) # Create the permuted pssm and n_m and n_g for the permutation tests new_pssm_list = [] rnew_pssm_list = [] n_m_perms = [] n_g_perms = [] for j in range(0, num_perms): #print "\n***************** Create permutation #", j # permute the columns of the motif new_mot = sym_permute_motif(motif_sites) new_pssm = new_mot.pssm # rnew_pssm = new_pssm.reverse_complement() new_pssm_list.append(new_pssm) rnew_pssm_list.append(rnew_pssm) # compute score for the negative sequences gscr = MGlib.esfmax_score_seqs(the_neg_seqs,new_pssm,rnew_pssm) mean_gscr = mean(gscr) std_gscr = std(gscr) # compute softmax scores for new motif sequences mscr = MGlib.esfmax_score_seqs(new_mot.instances,new_pssm,rnew_pssm) mean_mscr = mean(mscr) std_mscr = std(mscr) smeans_file.write("PermPSSM n_g,%13.10f,%13.10f\n" % (mean_gscr,std_gscr)) smeans_file.write("PermPSSM n_m,%13.10f,%13.10f\n" % (mean_mscr,std_mscr)) # get normal distributions for background and motif n_g_temp=norm(mean_gscr, std_gscr) n_g_perms.append(n_g_temp) n_m_temp=norm(mean_mscr, std_mscr) n_m_perms.append(n_m_temp) smeans_file.close() # write csv header out_file.write('COG Num,Pos/Neg Regulated,Posterior,LogLikelihood,True Model LL,LL Pval\n') # For each cog, do the posterior calculation and the permutation tests for i in range(0,num_cogs): label = get_cog_type(i, neg_cutoff) #print "Test Cog:", i,label # The original posterior computation #compute softmax scores for sequences in dataset scrs=MGlib.esfmax_score_seqs(all_cogs[i],pssm,rpssm) #print np.min(scrs[0]), np.max(scrs[0]) # Compute posterior # get log-likelihoods for sequences in dataset llrs=MGlib.ll_ratios(scrs,n_g,n_m,alpha) # get per-sequence posterior for the sequences in dataset fpost=MGlib.PostP(llrs,PPR,0) true_model_ll = compute_log_l(scrs, n_g, n_m, alpha) ##################################### # Permutation test log_ls = [] for j in range(0, num_perms): #print " ... perm test", j # Compute score and log likelihood for each permutation. scrs=MGlib.esfmax_score_seqs(all_cogs[i],new_pssm_list[j],rnew_pssm_list[j]) log_l = compute_log_l(scrs, n_g_perms[j], n_m_perms[j], alpha) log_ls.append(log_l) rev_pval = compute_p_val(log_ls, true_model_ll) pval = 1.0 - rev_pval out_file.write("%d,%s,%10.7f,%10.7f,%10.7f,%10.7f\n" % (i, label, fpost, rev_pval, true_model_ll, pval)) out_file.close() #if __name__ == "__main__": main()
ErillLab/CogsNormalizedPosteriorProbabilityThetas
MGtest/permutation_test.py
Python
gpl-3.0
11,301
[ "BLAST", "Biopython" ]
f995e5e042a60740363053388d53539c374c64c138eaf32d31763e88219defcc
#!/usr/bin/python2 import optparse import subprocess import sys from builds import GporcaBuild, GpcodegenBuild, GporcacodegenBuild def make(num_cpus): return subprocess.call("make -j %d" % (num_cpus), cwd="gpdb_src", shell=True) def install(output_dir): subprocess.call("make install", cwd="gpdb_src", shell=True) subprocess.call("mkdir -p " + output_dir, shell=True) return subprocess.call("cp -r /usr/local/gpdb/* " + output_dir, shell=True) def unittest(): return subprocess.call("make -s unittest-check", cwd="gpdb_src/src/backend", shell=True) def main(): parser = optparse.OptionParser() parser.add_option("--build_type", dest="build_type", default="RELEASE") parser.add_option("--mode", choices=['orca', 'codegen', 'orca_codegen'], default="orca_codegen") parser.add_option("--compiler", dest="compiler") parser.add_option("--cxxflags", dest="cxxflags") parser.add_option("--output_dir", dest="output_dir", default="install") (options, args) = parser.parse_args() if options.mode == 'orca': ciCommon = GporcaBuild() elif options.mode == 'codegen': ciCommon = GpcodegenBuild() elif options.mode == 'orca_codegen': ciCommon = GporcacodegenBuild() status = ciCommon.install_system_deps() if status: return status for dependency in args: status = ciCommon.install_dependency(dependency) if status: return status status = ciCommon.configure() if status: return status status = make(ciCommon.num_cpus()) if status: return status status = unittest() if status: return status status = install(options.output_dir) if status: return status return 0 if __name__ == "__main__": sys.exit(main())
CraigHarris/gpdb
concourse/scripts/build_gpdb.py
Python
apache-2.0
1,806
[ "ORCA" ]
468ef80559a964e56df575ea7f7bb4e509d8a701a4d71dc43d76945246e645ee
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """check for signs of poor design""" import re from collections import defaultdict from astroid import If, BoolOp from pylint.interfaces import IAstroidChecker from pylint.checkers import BaseChecker from pylint.checkers.utils import check_messages # regexp for ignored argument name IGNORED_ARGUMENT_NAMES = re.compile('_.*') MSGS = { 'R0901': ('Too many ancestors (%s/%s)', 'too-many-ancestors', 'Used when class has too many parent classes, try to reduce \ this to get a simpler (and so easier to use) class.'), 'R0902': ('Too many instance attributes (%s/%s)', 'too-many-instance-attributes', 'Used when class has too many instance attributes, try to reduce \ this to get a simpler (and so easier to use) class.'), 'R0903': ('Too few public methods (%s/%s)', 'too-few-public-methods', 'Used when class has too few public methods, so be sure it\'s \ really worth it.'), 'R0904': ('Too many public methods (%s/%s)', 'too-many-public-methods', 'Used when class has too many public methods, try to reduce \ this to get a simpler (and so easier to use) class.'), 'R0911': ('Too many return statements (%s/%s)', 'too-many-return-statements', 'Used when a function or method has too many return statement, \ making it hard to follow.'), 'R0912': ('Too many branches (%s/%s)', 'too-many-branches', 'Used when a function or method has too many branches, \ making it hard to follow.'), 'R0913': ('Too many arguments (%s/%s)', 'too-many-arguments', 'Used when a function or method takes too many arguments.'), 'R0914': ('Too many local variables (%s/%s)', 'too-many-locals', 'Used when a function or method has too many local variables.'), 'R0915': ('Too many statements (%s/%s)', 'too-many-statements', 'Used when a function or method has too many statements. You \ should then split it in smaller functions / methods.'), 'R0916': ('Too many boolean expressions in if statement (%s/%s)', 'too-many-boolean-expressions', 'Used when a if statement contains too many boolean ' 'expressions'), } def _count_boolean_expressions(bool_op): """Counts the number of boolean expressions in BoolOp `bool_op` (recursive) example: a and (b or c or (d and e)) ==> 5 boolean expressions """ nb_bool_expr = 0 for bool_expr in bool_op.get_children(): if isinstance(bool_expr, BoolOp): nb_bool_expr += _count_boolean_expressions(bool_expr) else: nb_bool_expr += 1 return nb_bool_expr class MisdesignChecker(BaseChecker): """checks for sign of poor/misdesign: * number of methods, attributes, local variables... * size, complexity of functions, methods """ __implements__ = (IAstroidChecker,) # configuration section name name = 'design' # messages msgs = MSGS priority = -2 # configuration options options = (('max-args', {'default' : 5, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of arguments for function / method'} ), ('ignored-argument-names', {'default' : IGNORED_ARGUMENT_NAMES, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Argument names that match this expression will be ' 'ignored. Default to name with leading underscore'} ), ('max-locals', {'default' : 15, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of locals for function / method body'} ), ('max-returns', {'default' : 6, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of return / yield for function / ' 'method body'} ), ('max-branches', {'default' : 12, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of branch for function / method body'} ), ('max-statements', {'default' : 50, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of statements in function / method ' 'body'} ), ('max-parents', {'default' : 7, 'type' : 'int', 'metavar' : '<num>', 'help' : 'Maximum number of parents for a class (see R0901).'} ), ('max-attributes', {'default' : 7, 'type' : 'int', 'metavar' : '<num>', 'help' : 'Maximum number of attributes for a class \ (see R0902).'} ), ('min-public-methods', {'default' : 2, 'type' : 'int', 'metavar' : '<num>', 'help' : 'Minimum number of public methods for a class \ (see R0903).'} ), ('max-public-methods', {'default' : 20, 'type' : 'int', 'metavar' : '<num>', 'help' : 'Maximum number of public methods for a class \ (see R0904).'} ), ('max-bool-expr', {'default': 5, 'type': 'int', 'metavar': '<num>', 'help': 'Maximum number of boolean expressions in a if ' 'statement'} ), ) def __init__(self, linter=None): BaseChecker.__init__(self, linter) self.stats = None self._returns = None self._branches = None self._stmts = 0 def open(self): """initialize visit variables""" self.stats = self.linter.add_stats() self._returns = [] self._branches = defaultdict(int) @check_messages('too-many-ancestors', 'too-many-instance-attributes', 'too-few-public-methods', 'too-many-public-methods') def visit_classdef(self, node): """check size of inheritance hierarchy and number of instance attributes """ # Is the total inheritance hierarchy is 7 or less? nb_parents = len(list(node.ancestors())) if nb_parents > self.config.max_parents: self.add_message('too-many-ancestors', node=node, args=(nb_parents, self.config.max_parents)) # Does the class contain less than 20 attributes for # non-GUI classes (40 for GUI)? # FIXME detect gui classes if len(node.instance_attrs) > self.config.max_attributes: self.add_message('too-many-instance-attributes', node=node, args=(len(node.instance_attrs), self.config.max_attributes)) @check_messages('too-few-public-methods', 'too-many-public-methods') def leave_classdef(self, node): """check number of public methods""" my_methods = sum(1 for method in node.mymethods() if not method.name.startswith('_')) all_methods = sum(1 for method in node.methods() if not method.name.startswith('_')) # Does the class contain less than n public methods ? # This checks only the methods defined in the current class, # since the user might not have control over the classes # from the ancestors. It avoids some false positives # for classes such as unittest.TestCase, which provides # a lot of assert methods. It doesn't make sense to warn # when the user subclasses TestCase to add his own tests. if my_methods > self.config.max_public_methods: self.add_message('too-many-public-methods', node=node, args=(my_methods, self.config.max_public_methods)) # stop here for exception, metaclass and interface classes if node.type != 'class': return # Does the class contain more than n public methods ? # This checks all the methods defined by ancestors and # by the current class. if all_methods < self.config.min_public_methods: self.add_message('too-few-public-methods', node=node, args=(all_methods, self.config.min_public_methods)) @check_messages('too-many-return-statements', 'too-many-branches', 'too-many-arguments', 'too-many-locals', 'too-many-statements') def visit_functiondef(self, node): """check function name, docstring, arguments, redefinition, variable names, max locals """ # init branch and returns counters self._returns.append(0) # check number of arguments args = node.args.args if args is not None: ignored_args_num = len( [arg for arg in args if self.config.ignored_argument_names.match(arg.name)]) argnum = len(args) - ignored_args_num if argnum > self.config.max_args: self.add_message('too-many-arguments', node=node, args=(len(args), self.config.max_args)) else: ignored_args_num = 0 # check number of local variables locnum = len(node.locals) - ignored_args_num if locnum > self.config.max_locals: self.add_message('too-many-locals', node=node, args=(locnum, self.config.max_locals)) # init statements counter self._stmts = 1 visit_asyncfunctiondef = visit_functiondef @check_messages('too-many-return-statements', 'too-many-branches', 'too-many-arguments', 'too-many-locals', 'too-many-statements') def leave_functiondef(self, node): """most of the work is done here on close: checks for max returns, branch, return in __init__ """ returns = self._returns.pop() if returns > self.config.max_returns: self.add_message('too-many-return-statements', node=node, args=(returns, self.config.max_returns)) branches = self._branches[node] if branches > self.config.max_branches: self.add_message('too-many-branches', node=node, args=(branches, self.config.max_branches)) # check number of statements if self._stmts > self.config.max_statements: self.add_message('too-many-statements', node=node, args=(self._stmts, self.config.max_statements)) leave_asyncfunctiondef = leave_functiondef def visit_return(self, _): """count number of returns""" if not self._returns: return # return outside function, reported by the base checker self._returns[-1] += 1 def visit_default(self, node): """default visit method -> increments the statements counter if necessary """ if node.is_statement: self._stmts += 1 def visit_tryexcept(self, node): """increments the branches counter""" branches = len(node.handlers) if node.orelse: branches += 1 self._inc_branch(node, branches) self._stmts += branches def visit_tryfinally(self, node): """increments the branches counter""" self._inc_branch(node, 2) self._stmts += 2 @check_messages('too-many-boolean-expressions') def visit_if(self, node): """increments the branches counter and checks boolean expressions""" self._check_boolean_expressions(node) branches = 1 # don't double count If nodes coming from some 'elif' if node.orelse and (len(node.orelse) > 1 or not isinstance(node.orelse[0], If)): branches += 1 self._inc_branch(node, branches) self._stmts += branches def _check_boolean_expressions(self, node): """Go through "if" node `node` and counts its boolean expressions if the "if" node test is a BoolOp node """ condition = node.test if not isinstance(condition, BoolOp): return nb_bool_expr = _count_boolean_expressions(condition) if nb_bool_expr > self.config.max_bool_expr: self.add_message('too-many-boolean-expressions', node=condition, args=(nb_bool_expr, self.config.max_bool_expr)) def visit_while(self, node): """increments the branches counter""" branches = 1 if node.orelse: branches += 1 self._inc_branch(node, branches) visit_for = visit_while def _inc_branch(self, node, branchesnum=1): """increments the branches counter""" self._branches[node.scope()] += branchesnum def register(linter): """required method to auto register this checker """ linter.register_checker(MisdesignChecker(linter))
si618/pi-time
node_modules/grunt-pylint/tasks/lib/pylint/checkers/design_analysis.py
Python
gpl-3.0
14,322
[ "VisIt" ]
895737fcd4c0e97c1f94fc74a8ac5a07f9086fe14ff7be5b78303c8bd307dd68
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2016 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # """Extension to format and index PSI variables.""" #Sphinx.add_object_type(psivar, rolename, indextemplate='', parse_node=None, ref_nodeclass=None, objname='', doc_field_types=[]) def setup(app): app.add_object_type('psivar', 'psivar', indextemplate='single: %s')
kannon92/psi4
doc/sphinxman/source/psi4doc/ext/psidomain.py
Python
gpl-2.0
1,221
[ "Psi4" ]
8fa3d94ff0fe31434539907be3948d69f2d306ad203b14e944d9f73e8d5316bb
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.magic.sources.extractor Contains the SourceExtractor class. # ----------------------------------------------------------------- # Ensure Python 3 compatibility from __future__ import absolute_import, division, print_function # Import standard modules import math import numpy as np # Import the relevant PTS classes and modules from ..basics.mask import Mask from ..region.ellipse import PixelEllipseRegion from ..basics.coordinate import PixelCoordinate from ..core.detection import Detection from ...core.basics.log import log from ...core.basics.configurable import Configurable from ..tools import masks from ...core.basics.animation import Animation from ..region.list import PixelRegionList from ..core.image import Image from ..core.frame import Frame from ...core.tools import filesystem as fs from ..region.list import load_as_pixel_region_list from ..region.point import PixelPointRegion from pts.core.tools.utils import lazyproperty from ..core.mask import Mask as newMask # ----------------------------------------------------------------- class SourceExtractor(Configurable): """ This class ... """ def __init__(self, *args, **kwargs): """ The constructor ... :param interactive: :return: """ # Call the constructor of the base class super(SourceExtractor, self).__init__(*args, **kwargs) # -- Attributes -- # The image frame self.frame = None # The original minimum and maximum value self.minimum_value = None self.maximum_value = None # The mask of nans self.nan_mask = None # Regions self.galaxy_region = None self.star_region = None self.saturation_region = None self.other_region = None # The animation self.animation = None # Special mask self.special_mask = None # Segmentation maps self.galaxy_segments = None self.star_segments = None self.other_segments = None # The total mask of removed sources self.mask = None # The list of sources self.sources = [] self.labels = [] # STATISTICS self.ngalaxy_sources = 0 self.nstar_sources = 0 self.nother_sources = 0 self.nforeground = 0 self.nfailed = 0 self.nsuccess = 0 self.nwith_saturation = 0 # ----------------------------------------------------------------- def _run(self, **kwargs): """ This function ... :return: """ # 2. Load the sources self.load_sources() # 3. Create the mask of all sources to be removed self.create_mask() # 4. For each source, check the pixels in the background that belong to an other source self.set_cross_contamination() # 5. Remove the sources self.remove_sources() # 6. Fix extreme values that showed up during the interpolation steps self.fix_extreme_values() # 7. Set nans back into the frame self.set_nans() # 8. Writing if self.config.output is not None and self.config.write: self.write() # ----------------------------------------------------------------- def setup(self, **kwargs): """ This function ... :param kwargs: :return: """ # Call the setup function of the base class super(SourceExtractor, self).setup(**kwargs) # Set the image frame if "frame" in kwargs: self.frame = kwargs.pop("frame") else: self.load_frame() # Load the region lists self.load_regions(**kwargs) # Load the segmentation maps self.load_segments(**kwargs) # Initialize the mask self.mask = Mask.empty_like(self.frame) # Remember the minimum and maximum value self.minimum_value = np.nanmin(self.frame) self.maximum_value = np.nanmax(self.frame) # Create a mask of the pixels that are NaNs self.nan_mask = Mask.is_nan(self.frame) self.frame[self.nan_mask] = 0.0 # Make a reference to the animation self.animation = kwargs.pop("animation", None) # Create mask from special region if "special_region" in kwargs: special_region = kwargs.pop("special_region") self.special_mask = Mask.from_region(special_region, self.frame.xsize, self.frame.ysize) if special_region is not None else None # If making animation is enabled if self.config.animation: self.animation = Animation() self.animation.fps = 1 # ----------------------------------------------------------------- def load_frame(self): """ This function ... :return: """ # Inform the user log.info("Loading the image frame ...") # load self.frame = Frame.from_file(self.config.image) # ----------------------------------------------------------------- @property def frame_name(self): """ This function ... :return: """ return self.frame.name # ----------------------------------------------------------------- @property def filter(self): """ This function ... :return: """ return self.frame.filter # ----------------------------------------------------------------- @property def filter_name(self): """ This function ... :return: """ return self.frame.filter_name # ----------------------------------------------------------------- def load_regions(self, **kwargs): """ This function ... :param kwargs: :return: """ # Inform the user log.info("Loading the regions ...") # Load the galaxy region if "galaxy_region" in kwargs: self.galaxy_region = kwargs.pop("galaxy_region") else: if "name" in kwargs: galaxy_region_path = self.input_path_file("galaxies_" + kwargs["name"] + ".reg") else: galaxy_region_path = self.input_path_file("galaxies.reg") # Check if not fs.is_file(galaxy_region_path): if self.filter_name is not None: galaxy_region_path = self.input_path_file("galaxies_" + self.filter_name + ".reg") if not fs.is_file(galaxy_region_path): log.warning("No galaxy regions file could be found") galaxy_region_path = None elif self.frame_name is not None: galaxy_region_path = self.input_path_file("galaxies_" + self.frame_name + ".reg") if not fs.is_file(galaxy_region_path): log.warning("No galaxy regions file could be found") galaxy_region_path = None else: log.warning("No galaxy regions file could be found") galaxy_region_path = None # Load the galaxy regions if galaxy_region_path is not None: self.galaxy_region = load_as_pixel_region_list(galaxy_region_path, self.frame.wcs) # Load the star region if "star_region" in kwargs: self.star_region = kwargs.pop("star_region") else: if "name" in kwargs: star_region_path = self.input_path_file("stars_" + kwargs["name"] + ".reg") else: star_region_path = self.input_path_file("stars.reg") # Check if not fs.is_file(star_region_path): if self.filter_name is not None: star_region_path = self.input_path_file("stars_" + self.filter_name + ".reg") if not fs.is_file(star_region_path): log.warning("No star regions file could be found") star_region_path = None elif self.frame_name is not None: star_region_path = self.input_path_file("stars_" + self.frame_name + ".reg") if not fs.is_file(star_region_path): log.warning("No star regions file could be found") star_region_path = None else: log.warning("No star regions file could be found") star_region_path = None # Load the star regions if star_region_path is not None: self.star_region = load_as_pixel_region_list(star_region_path, self.frame.wcs) # Load the saturation region if "saturation_region" in kwargs: self.saturation_region = kwargs.pop("saturation_region") else: if "name" in kwargs: saturation_region_path = self.input_path_file("saturation_" + kwargs["name"] + ".reg") else: saturation_region_path = self.input_path_file("saturation.reg") # Check if not fs.is_file(saturation_region_path): if self.filter_name is not None: saturation_region_path = self.input_path_file("saturation_" + self.filter_name + ".reg") if not fs.is_file(saturation_region_path): log.warning("No saturation regions file could be found") saturation_region_path = None elif self.frame_name is not None: saturation_region_path = self.input_path_file("saturation_" + self.frame_name + ".reg") if not fs.is_file(saturation_region_path): log.warning("No saturation regions file could be found") saturation_region_path = None else: log.warning("No saturation regions file could be found") saturation_region_path = None # Load the saturation regions if saturation_region_path is not None: self.saturation_region = load_as_pixel_region_list(saturation_region_path, self.frame.wcs) # Load the region of other sources if "other_region" in kwargs: self.other_region = kwargs.pop("other_region") else: if "name" in kwargs: other_region_path = self.input_path_file("other_sources_" + kwargs["name"] + ".reg") else: other_region_path = self.input_path_file("other_sources.reg") # Check if not fs.is_file(other_region_path): if self.filter_name is not None: other_region_path = self.input_path_file("other_sources_" + self.filter_name + ".reg") if not fs.is_file(other_region_path): log.warning("No other regions file could be found") other_region_path = None elif self.frame_name is not None: other_region_path = self.input_path_file("other_sources_" + self.frame_name + ".reg") if not fs.is_file(other_region_path): log.warning("No other regions file could be found") other_region_path = None else: log.warning("No other regions file could be found") other_region_path = None # Load the other regions if other_region_path is not None: self.other_region = load_as_pixel_region_list(other_region_path, self.frame.wcs) # Debugging if self.galaxy_region is not None: log.debug("Galaxy regions: PRESENT") else: log.debug("Galaxy regions: NOT PRESENT") if self.star_region is not None: log.debug("Star regions: PRESENT") else: log.debug("Star regions: NOT PRESENT") if self.saturation_region is not None: log.debug("Saturation regions: PRESENT") else: log.debug("Saturation regions: NOT PRESENT") if self.other_region is not None: log.debug("Other regions: PRESENT") else: log.debug("Other regions: NOT PRESENT") # ----------------------------------------------------------------- def load_segments(self, **kwargs): """ This function ... :param kwargs: :return: """ # Inform the user log.info("Loading the segmentation maps ...") # Load the image with segmentation maps segments = None if "segments" in kwargs: segments = kwargs.pop("segments") else: if "name" in kwargs: segments_path = self.input_path_file("segments_" + kwargs["name"] + ".fits") else: segments_path = self.input_path_file("segments.fits") if not fs.is_file(segments_path): if self.filter_name is not None: segments_path = self.input_path_file("segments_" + self.filter_name + ".fits") if not fs.is_file(segments_path): log.warning("No segmentation maps found, will be using regions to define the to be extracted patches") segments_path = None elif self.frame_name is not None: segments_path = self.input_path_file("segments_" + self.frame_name + ".fits") if not fs.is_file(segments_path): log.warning("No segmentation maps found, will be using regions to define the to be extracted patches") segments_path = None else: log.warning("No segmentation maps found, will be using regions to define the to be extracted patches") segments_path = None # Load the segments if segments_path is not None: segments = Image.from_file(segments_path, no_filter=True) # If segments is not None if segments is not None: #print(segments.frames.keys()) # Get the segmentation maps self.galaxy_segments = segments.frames["extended"] if "extended" in segments.frames else None self.star_segments = segments.frames["point"] if "point" in segments.frames else None self.other_segments = segments.frames["other"] if "other" in segments.frames else None # Debugging if self.galaxy_segments is not None: log.debug("Galaxy segments: PRESENT") else: log.debug("Galaxy segments: NOT PRESENT") if self.star_segments is not None: log.debug("Star segments: PRESENT") else: log.debug("Star segments: NOT PRESENT") if self.other_segments is not None: log.debug("Other segments: PRESENT") else: log.debug("Other segments: NOT PRESENT") # ----------------------------------------------------------------- def load_sources(self): """ This function ... :return: """ # Inform the user log.info("Loading the sources ...") # Load the galaxy sources self.load_galaxy_sources() # Load the star sources if self.star_region is not None: self.load_star_sources() # Load the other sources if self.other_region is not None: self.load_other_sources() # ----------------------------------------------------------------- def load_galaxy_sources(self): """ This function ... :return: """ # Inform the user log.info("Loading the galaxy sources ...") # Loop over the shapes in the galaxy region for shape in self.galaxy_region: #print(shape.label) #print(shape.meta) # Shapes without text are in this case just coordinates #if "text" not in shape.meta: continue if shape.label is None: continue # Debugging log.debug("Adding galaxy '" + shape.label + "' ...") # Segments are passed if self.galaxy_segments is not None: # Get the coordinate of the center for this galaxy center = shape.center #print("here") # Check the label of the corresponding segment label = self.galaxy_segments[int(center.y), int(center.x)] if label == 3 or (label == 2 and self.config.remove_companions): # Create a source source = Detection.from_shape(self.frame, shape, self.config.source_outer_factor) # Check whether it is a 'special' source source.special = self.special_mask.masks(center) if self.special_mask is not None else False self.ngalaxy_sources += 1 # Add the source to the list self.sources.append(source) self.labels.append(label) elif "principal" not in shape.label: # Create a source source = Detection.from_shape(self.frame, shape, self.config.source_outer_factor) # Check whether it is a special source source.special = self.special_mask.masks(shape.center) if self.special_mask is not None else False self.ngalaxy_sources += 1 # Add the source to the list self.sources.append(source) self.labels.append(shape.label) # ----------------------------------------------------------------- def load_star_sources(self): """ This function ... :return: """ # Inform the user log.info("Loading the star sources ...") # Loop over all stars in the region for shape in self.star_region: #print(shape.label) # Ignore shapes without text, these should be just the positions of the peaks #if "text" not in shape.meta: continue if shape.label is None: continue # Ignore shapes with color red (stars without source) if shape.appearance["color"] == "red": continue # Get the star index index = int(shape.label) # Debugging log.debug("Adding star " + str(index) + " ...") # Get the saturation source saturation_source = self.find_saturation_source(index) # Check whether the star is a 'special' region special = self.special_mask.masks(shape.center) if self.special_mask is not None else False # Saturation source was found if saturation_source is not None: self.nwith_saturation += 1 ## DILATION if self.config.dilate_saturation: self.dilate_saturation_source(saturation_source) # Set the source to be the saturation source source = saturation_source # Create a new source from the shape else: source = Detection.from_shape(self.frame, shape, self.config.source_outer_factor) # Set special flag source.special = special # Increment self.nstar_sources += 1 # Add it to the list self.sources.append(source) # self.labels.append(index) # ----------------------------------------------------------------- def find_saturation_source(self, index): """ This function ... :param index: :return: """ # Deubgging log.debug("Finding a saturation source for star " + str(index) + " ...") # Look whether a saturation source is present saturation_source = None # Check whether the star is a foreground star #if self.principal_mask.masks(shape.center): foreground = True # If there is a saturation region if self.saturation_region is not None: # Add the saturation sources # Loop over the shapes in the saturation region for j in range(len(self.saturation_region)): saturation_shape = self.saturation_region[j] #if "text" not in saturation_shape.meta: continue if saturation_shape.label is None: continue saturation_index = int(saturation_shape.label) if index != saturation_index: continue else: # Remove the saturation shape from the region saturation_shape = self.saturation_region.pop(j) # Create saturation source saturation_source = Detection.from_shape(self.frame, saturation_shape, self.config.source_outer_factor) # Replace the saturation mask segments_cutout = self.star_segments[saturation_source.y_slice, saturation_source.x_slice] saturation_mask = Mask(segments_cutout == index) saturation_source.mask = saturation_mask.fill_holes() # Break the loop break # Return the saturation source return saturation_source # ----------------------------------------------------------------- def dilate_saturation_source(self, saturation_source): """ This function ... :param saturation_source: :return: """ # factor = saturation_dilation_factor dilation_factor = self.config.saturation_dilation_factor saturation_source = saturation_source.zoom_out(dilation_factor, self.frame, keep_original_mask=True) mask_area = np.sum(saturation_source.mask) area_dilation_factor = dilation_factor ** 2. new_area = mask_area * area_dilation_factor ## Circular mask approximation # ellipse = find_contour(source.mask.astype(float), source.mask) # radius = ellipse.radius.norm mask_radius = math.sqrt(mask_area / math.pi) new_radius = math.sqrt(new_area / math.pi) kernel_radius = new_radius - mask_radius # Replace mask saturation_source.mask = saturation_source.mask.disk_dilation(radius=kernel_radius) # ----------------------------------------------------------------- def load_other_sources(self): """ This function ... :return: """ # Inform the user log.info("Loading the other sources ...") # Loop over the shapes in the other sources region for shape in self.other_region: # This is a source found by SourceFinder if shape.label is not None: # Debugging log.debug("Adding other source '" + shape.label + "' ...") # Get integer label label = int(shape.label) # Create a source source = Detection.from_shape(self.frame, shape, self.config.source_outer_factor) # Replace the source mask segments_cutout = self.other_segments[source.y_slice, source.x_slice] source.mask = Mask(segments_cutout == label).fill_holes() ## DILATION if self.config.dilate_other: # DILATE SOURCE # factor = other_dilation_factor dilation_factor = self.config.other_dilation_factor ## CODE FOR DILATION (FROM SOURCES MODULE) source = source.zoom_out(dilation_factor, self.frame, keep_original_mask=True) mask_area = np.sum(source.mask) area_dilation_factor = dilation_factor ** 2. new_area = mask_area * area_dilation_factor ## Circular mask approximation # ellipse = find_contour(source.mask.astype(float), source.mask) # radius = ellipse.radius.norm mask_radius = math.sqrt(mask_area / math.pi) new_radius = math.sqrt(new_area / math.pi) kernel_radius = new_radius - mask_radius # Replace mask source.mask = source.mask.disk_dilation(radius=kernel_radius) ## END DILATION CODE # This is a shape drawn by the user and added to the other sources region # # Create a source else: source = Detection.from_shape(self.frame, shape, self.config.source_outer_factor) # Check whether source is 'special' source.special = self.special_mask.masks(shape.center) if self.special_mask is not None else False # Increment self.nother_sources += 1 # Add the source to the list self.sources.append(source) self.labels.append(label) # ----------------------------------------------------------------- def create_mask(self): """ This function ... :return: """ # Inform the user log.info("Creating the mask of all sources to be removed ...") # Loop over all sources #for source in self.sources: index = 0 while index < len(self.sources): # Get the current source source = self.sources[index] # If these pixels are already masked by an overlapping source (e.g. saturation), remove this source, # otherwise the area will be messed up current_mask_cutout = self.mask[source.y_slice, source.x_slice] if current_mask_cutout.covers(source.mask): self.sources.pop(index) continue # Adapt the mask self.mask[source.y_slice, source.x_slice] += source.mask # Increment the index index += 1 # ----------------------------------------------------------------- def set_cross_contamination(self): """ This function ... :return: """ # Inform the user log.info("For each source, checking which pixels in the neighborhood are contaminated by other sources ...") # Loop over all sources for source in self.sources: # Create the contamination mask for this source other_sources_mask = Mask.empty_like(source.cutout) other_sources_mask[source.background_mask] = self.mask[source.y_slice, source.x_slice][source.background_mask] source.contamination = other_sources_mask # ----------------------------------------------------------------- @property def nsources(self): """ This function ... :return: """ return len(self.sources) # ----------------------------------------------------------------- def remove_sources(self): """ This function ... :return: """ # Inform the user log.info("Interpolating the frame over the masked pixels ...") nsources = len(self.sources) count = 0 # Set principal ellipse for the source extraction animation if self.animation is not None: self.animation.principal_shape = self.principal_shape # Loop over all sources and remove them from the frame for label, source in zip(self.labels, self.sources): # Debugging log.debug("Estimating background and replacing the frame pixels of source " + str(count+1) + " of " + str(nsources) + " ...") # Check whether the source is in front of the principal galaxy #foreground = self.principal_mask.masks(source.center) if self.principal_mask is not None: foreground = masks.overlap(self.principal_mask[source.y_slice, source.x_slice], source.mask) else: foreground = False if foreground: self.nforeground += 1 # SKip foreground if requested if self.config.only_foreground and not foreground: continue # Disable sigma-clipping for estimating background when the source is foreground to the principal galaxy (to avoid clipping the galaxy's gradient) sigma_clip = self.config.sigma_clip if not foreground else False # Debugging log.debug("Sigma-clipping enabled for estimating background gradient for this source" if sigma_clip else "Sigma-clipping disabled for estimating background gradient for this source") # If these pixels are already replaced by an overlapping source (e.g. saturation), skip this source, # otherwise the area will be messed up #current_mask_cutout = self.mask[source.y_slice, source.x_slice] #if current_mask_cutout.covers(source.mask): # count += 1 # continue ## ==> this is now also done in create_mask # Estimate the background try: source.estimate_background(self.config.interpolation_method, sigma_clip=sigma_clip) except ValueError: # ValueError: zero-size array to reduction operation minimum which has no identity # in: limits = (np.min(known_points), np.max(known_points)) [inpaint_biharmonic] self.nfailed += 1 count += 1 continue # Adapt the mask #self.mask[source.y_slice, source.x_slice] += source.mask # this is now done beforehand, in the create_mask function # Add frame to the animation if self.animation is not None and (self.principal_mask is None or self.principal_mask.masks(source.center)) and self.animation.nframes <= 20: self.animation.add_source(source) # Replace the pixels by the background source.background.replace(self.frame, where=source.mask) # Increment self.nsuccess += 1 #if not sigma_clip: # # source.plot() # plotting.plot_removal(source.cutout, source.mask, source.background, # self.frame[source.y_slice, source.x_slice]) count += 1 # ----------------------------------------------------------------- def fix_extreme_values(self): """ This function ... :return: """ # Inform the user log.info("Fixing extreme values that were introduced during the interpolation steps ...") self.frame[self.frame < self.minimum_value] = self.minimum_value self.frame[self.frame > self.maximum_value] = self.maximum_value # ----------------------------------------------------------------- def set_nans(self): """ This function ... :return: """ # Inform the user log.info("Setting original NaN-pixels back to NaN ...") # Set the NaN pixels to zero in the frame self.frame[self.nan_mask] = float("nan") # ----------------------------------------------------------------- def write(self): """ THis function ... :return: """ # Inform the suer log.info("Writing ...") # Write the animation if self.animation is not None: self.write_animation() # Write the resulting frame self.write_frame() # Write the mask self.write_mask() # ----------------------------------------------------------------- def write_animation(self): """ This function ... :return: """ # Inform the user log.info("Writing the animation ...") # Save the animation path = self.output_path_file("animation.gif") self.animation.saveto(path) # ----------------------------------------------------------------- def write_frame(self): """ This function ... :return: """ # Inform the user log.info("Writing the result ...") # Determine the path to the resulting FITS file path = self.output_path_file("extracted.fits") # Save the resulting image as a FITS file self.frame.saveto(path) # ----------------------------------------------------------------- def write_mask(self): """ This function ... :return: """ # Inform the user log.info("Writing the mask ...") # Determine the path to the mask path = self.output_path_file("mask.fits") # Save the total mask as a FITS file Frame(self.mask.astype(float)).saveto(path) # ----------------------------------------------------------------- @lazyproperty def principal_shape(self): """ This function ... :return: """ if self.galaxy_region is None: return None largest_shape = None # Loop over all the shapes in the galaxy region for shape in self.galaxy_region: # Skip single coordinates if isinstance(shape, PixelCoordinate): continue if shape.label is not None and "principal" in shape.label: return shape if "text" in shape.meta and "principal" in shape.meta["text"]: return shape if not isinstance(shape, PixelEllipseRegion) and not isinstance(shape, PixelPointRegion): return shape semimajor_axis_length = shape.semimajor if largest_shape is None or semimajor_axis_length > largest_shape.semimajor: largest_shape = shape # Return the largest shape return largest_shape # ----------------------------------------------------------------- @lazyproperty def principal_mask(self): """ This function ... :return: """ if self.galaxy_segments is not None: return newMask.where(self.galaxy_segments, 1) elif self.principal_shape is not None: return self.principal_shape.to_mask(self.frame.xsize, self.frame.ysize) else: return None # -----------------------------------------------------------------
SKIRT/PTS
magic/sources/extractor.py
Python
agpl-3.0
34,804
[ "Galaxy" ]
931ee6f9934fd33c79a85cf318f329cab7ca5b9443f141ad02a7e816fce51772
# -*- coding: utf-8 -*- """ End-to-end tests for the Account Settings page. """ from unittest import skip from nose.plugins.attrib import attr from bok_choy.web_app_test import WebAppTest from ...pages.lms.account_settings import AccountSettingsPage from ...pages.lms.auto_auth import AutoAuthPage from ...pages.lms.dashboard import DashboardPage from ..helpers import EventsTestMixin @attr('shard_5') class AccountSettingsTestMixin(EventsTestMixin, WebAppTest): """ Mixin with helper methods to test the account settings page. """ CHANGE_INITIATED_EVENT_NAME = u"edx.user.settings.change_initiated" USER_SETTINGS_CHANGED_EVENT_NAME = 'edx.user.settings.changed' ACCOUNT_SETTINGS_REFERER = u"/account/settings" def visit_account_settings_page(self): """ Visit the account settings page for the current user, and store the page instance as self.account_settings_page. """ # pylint: disable=attribute-defined-outside-init self.account_settings_page = AccountSettingsPage(self.browser) self.account_settings_page.visit() self.account_settings_page.wait_for_ajax() def log_in_as_unique_user(self, email=None): """ Create a unique user and return the account's username and id. """ username = "test_{uuid}".format(uuid=self.unique_id[0:6]) auto_auth_page = AutoAuthPage(self.browser, username=username, email=email).visit() user_id = auto_auth_page.get_user_id() return username, user_id def settings_changed_event_filter(self, event): """Filter out any events that are not "settings changed" events.""" return event['event_type'] == self.USER_SETTINGS_CHANGED_EVENT_NAME def expected_settings_changed_event(self, setting, old, new, table=None): """A dictionary representing the expected fields in a "settings changed" event.""" return { 'username': self.username, 'referer': self.get_settings_page_url(), 'event': { 'user_id': self.user_id, 'setting': setting, 'old': old, 'new': new, 'truncated': [], 'table': table or 'auth_userprofile' } } def settings_change_initiated_event_filter(self, event): """Filter out any events that are not "settings change initiated" events.""" return event['event_type'] == self.CHANGE_INITIATED_EVENT_NAME def expected_settings_change_initiated_event(self, setting, old, new, username=None, user_id=None): """A dictionary representing the expected fields in a "settings change initiated" event.""" return { 'username': username or self.username, 'referer': self.get_settings_page_url(), 'event': { 'user_id': user_id or self.user_id, 'setting': setting, 'old': old, 'new': new, } } def get_settings_page_url(self): """The absolute URL of the account settings page given the test context.""" return self.relative_path_to_absolute_uri(self.ACCOUNT_SETTINGS_REFERER) def assert_no_setting_changed_event(self): """Assert no setting changed event has been emitted thus far.""" self.assert_no_matching_events_were_emitted({'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME}) @attr('shard_5') class DashboardMenuTest(AccountSettingsTestMixin, WebAppTest): """ Tests that the dashboard menu works correctly with the account settings page. """ def test_link_on_dashboard_works(self): """ Scenario: Verify that the "Account" link works from the dashboard. Given that I am a registered user And I visit my dashboard And I click on "Account" in the top drop down Then I should see my account settings page """ self.log_in_as_unique_user() dashboard_page = DashboardPage(self.browser) dashboard_page.visit() dashboard_page.click_username_dropdown() self.assertIn('Account', dashboard_page.username_dropdown_link_text) dashboard_page.click_account_settings_link() @attr('shard_5') class AccountSettingsPageTest(AccountSettingsTestMixin, WebAppTest): """ Tests that verify behaviour of the Account Settings page. """ SUCCESS_MESSAGE = 'Your changes have been saved.' def setUp(self): """ Initialize account and pages. """ super(AccountSettingsPageTest, self).setUp() self.username, self.user_id = self.log_in_as_unique_user() self.visit_account_settings_page() def test_page_view_event(self): """ Scenario: An event should be recorded when the "Account Settings" page is viewed. Given that I am a registered user And I visit my account settings page Then a page view analytics event should be recorded """ actual_events = self.wait_for_events( event_filter={'event_type': 'edx.user.settings.viewed'}, number_of_matches=1) self.assert_events_match( [ { 'event': { 'user_id': self.user_id, 'page': 'account', 'visibility': None } } ], actual_events ) def test_all_sections_and_fields_are_present(self): """ Scenario: Verify that all sections and fields are present on the page. """ expected_sections_structure = [ { 'title': 'Basic Account Information (required)', 'fields': [ 'Username', 'Full Name', 'Email Address', 'Password', 'Language', 'Country or Region' ] }, { 'title': 'Additional Information (optional)', 'fields': [ 'Education Completed', 'Gender', 'Year of Birth', 'Preferred Language', ] }, { 'title': 'Connected Accounts', 'fields': [ 'Dummy', 'Facebook', 'Google', ] } ] self.assertEqual(self.account_settings_page.sections_structure(), expected_sections_structure) def _test_readonly_field(self, field_id, title, value): """ Test behavior of a readonly field. """ self.assertEqual(self.account_settings_page.title_for_field(field_id), title) self.assertEqual(self.account_settings_page.value_for_readonly_field(field_id), value) def _test_text_field( self, field_id, title, initial_value, new_invalid_value, new_valid_values, success_message=SUCCESS_MESSAGE, assert_after_reload=True ): """ Test behaviour of a text field. """ self.assertEqual(self.account_settings_page.title_for_field(field_id), title) self.assertEqual(self.account_settings_page.value_for_text_field(field_id), initial_value) self.assertEqual( self.account_settings_page.value_for_text_field(field_id, new_invalid_value), new_invalid_value ) self.account_settings_page.wait_for_indicator(field_id, 'validation-error') self.browser.refresh() self.assertNotEqual(self.account_settings_page.value_for_text_field(field_id), new_invalid_value) for new_value in new_valid_values: self.assertEqual(self.account_settings_page.value_for_text_field(field_id, new_value), new_value) self.account_settings_page.wait_for_message(field_id, success_message) if assert_after_reload: self.browser.refresh() self.assertEqual(self.account_settings_page.value_for_text_field(field_id), new_value) def _test_dropdown_field( self, field_id, title, initial_value, new_values, success_message=SUCCESS_MESSAGE, reloads_on_save=False ): """ Test behaviour of a dropdown field. """ self.assertEqual(self.account_settings_page.title_for_field(field_id), title) self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id), initial_value) for new_value in new_values: self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id, new_value), new_value) self.account_settings_page.wait_for_message(field_id, success_message) if reloads_on_save: self.account_settings_page.wait_for_loading_indicator() else: self.browser.refresh() self.account_settings_page.wait_for_page() self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id), new_value) def _test_link_field(self, field_id, title, link_title, success_message): """ Test behaviour a link field. """ self.assertEqual(self.account_settings_page.title_for_field(field_id), title) self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title) self.account_settings_page.click_on_link_in_link_field(field_id) self.account_settings_page.wait_for_message(field_id, success_message) def test_username_field(self): """ Test behaviour of "Username" field. """ self._test_readonly_field('username', 'Username', self.username) def test_full_name_field(self): """ Test behaviour of "Full Name" field. """ self._test_text_field( u'name', u'Full Name', self.username, u'@', [u'another name', self.username], ) actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2) self.assert_events_match( [ self.expected_settings_changed_event('name', self.username, 'another name'), self.expected_settings_changed_event('name', 'another name', self.username), ], actual_events ) def test_email_field(self): """ Test behaviour of "Email" field. """ email = u"test@example.com" username, user_id = self.log_in_as_unique_user(email=email) self.visit_account_settings_page() self._test_text_field( u'email', u'Email Address', email, u'@', [u'me@here.com', u'you@there.com'], success_message='Click the link in the message to update your email address.', assert_after_reload=False ) actual_events = self.wait_for_events( event_filter=self.settings_change_initiated_event_filter, number_of_matches=2) self.assert_events_match( [ self.expected_settings_change_initiated_event( 'email', email, 'me@here.com', username=username, user_id=user_id), # NOTE the first email change was never confirmed, so old has not changed. self.expected_settings_change_initiated_event( 'email', email, 'you@there.com', username=username, user_id=user_id), ], actual_events ) # Email is not saved until user confirms, so no events should have been # emitted. self.assert_no_setting_changed_event() def test_password_field(self): """ Test behaviour of "Password" field. """ self._test_link_field( u'password', u'Password', u'Reset Password', success_message='Click the link in the message to reset your password.', ) event_filter = self.expected_settings_change_initiated_event('password', None, None) self.wait_for_events(event_filter=event_filter, number_of_matches=1) # Like email, since the user has not confirmed their password change, # the field has not yet changed, so no events will have been emitted. self.assert_no_setting_changed_event() @skip( 'On bokchoy test servers, language changes take a few reloads to fully realize ' 'which means we can no longer reliably match the strings in the html in other tests.' ) def test_language_field(self): """ Test behaviour of "Language" field. """ self._test_dropdown_field( u'pref-lang', u'Language', u'English', [u'Dummy Language (Esperanto)', u'English'], reloads_on_save=True, ) def test_education_completed_field(self): """ Test behaviour of "Education Completed" field. """ self._test_dropdown_field( u'level_of_education', u'Education Completed', u'', [u'Bachelor\'s degree', u''], ) actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2) self.assert_events_match( [ self.expected_settings_changed_event('level_of_education', None, 'b'), self.expected_settings_changed_event('level_of_education', 'b', None), ], actual_events ) def test_gender_field(self): """ Test behaviour of "Gender" field. """ self._test_dropdown_field( u'gender', u'Gender', u'', [u'Female', u''], ) actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2) self.assert_events_match( [ self.expected_settings_changed_event('gender', None, 'f'), self.expected_settings_changed_event('gender', 'f', None), ], actual_events ) def test_year_of_birth_field(self): """ Test behaviour of "Year of Birth" field. """ # Note that when we clear the year_of_birth here we're firing an event. self.assertEqual(self.account_settings_page.value_for_dropdown_field('year_of_birth', ''), '') expected_events = [ self.expected_settings_changed_event('year_of_birth', None, 1980), self.expected_settings_changed_event('year_of_birth', 1980, None), ] with self.assert_events_match_during(self.settings_changed_event_filter, expected_events): self._test_dropdown_field( u'year_of_birth', u'Year of Birth', u'', [u'1980', u''], ) def test_country_field(self): """ Test behaviour of "Country or Region" field. """ self._test_dropdown_field( u'country', u'Country or Region', u'', [u'Pakistan', u'Palau'], ) def test_preferred_language_field(self): """ Test behaviour of "Preferred Language" field. """ self._test_dropdown_field( u'language_proficiencies', u'Preferred Language', u'', [u'Pushto', u''], ) actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2) self.assert_events_match( [ self.expected_settings_changed_event( 'language_proficiencies', [], [{'code': 'ps'}], table='student_languageproficiency'), self.expected_settings_changed_event( 'language_proficiencies', [{'code': 'ps'}], [], table='student_languageproficiency'), ], actual_events ) def test_connected_accounts(self): """ Test that fields for third party auth providers exist. Currently there is no way to test the whole authentication process because that would require accounts with the providers. """ providers = ( ['auth-oa2-facebook', 'Facebook', 'Link'], ['auth-oa2-google-oauth2', 'Google', 'Link'], ) for field_id, title, link_title in providers: self.assertEqual(self.account_settings_page.title_for_field(field_id), title) self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title) @attr('a11y') class AccountSettingsA11yTest(AccountSettingsTestMixin, WebAppTest): """ Class to test account settings accessibility. """ def test_account_settings_a11y(self): """ Test the accessibility of the account settings page. """ self.log_in_as_unique_user() self.visit_account_settings_page() self.account_settings_page.a11y_audit.check_for_accessibility_errors()
xingyepei/edx-platform
common/test/acceptance/tests/lms/test_account_settings.py
Python
agpl-3.0
17,287
[ "VisIt" ]
91a1ab8bf59abcfb5ebdfdeb428d51a9ad5c576dfdf362ee58f252f46e706b81
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. r""" This module contains classes useful for analyzing ferroelectric candidates. The Polarization class can recover the spontaneous polarization using multiple calculations along a nonpolar to polar ferroelectric distortion. The EnergyTrend class is useful for assessing the trend in energy across the distortion. See Nicola Spaldin's "A beginner's guide to the modern theory of polarization" (https://arxiv.org/abs/1202.1831) for an introduction to crystal polarization. VASP reports dipole moment values (used to derive polarization) along Cartesian directions (see pead.F around line 970 in the VASP source to confirm this). However, it is most convenient to perform the adjustments necessary to recover a same branch polarization by expressing the polarization along lattice directions. For this reason, calc_ionic calculates ionic contributions to the polarization along lattice directions. We provide the means to convert Cartesian direction polarizations to lattice direction polarizations in the Polarization class. We recommend using our calc_ionic function for calculating the ionic polarization rather than the values from OUTCAR. We find that the ionic dipole moment reported in OUTCAR differ from the naive calculation of \\sum_i Z_i r_i where i is the index of the atom, Z_i is the ZVAL from the pseudopotential file, and r is the distance in Angstroms along the lattice vectors. Note, this difference is not simply due to VASP using Cartesian directions and calc_ionic using lattice direction but rather how the ionic polarization is computed. Compare calc_ionic to VASP SUBROUTINE POINT_CHARGE_DIPOL in dipol.F in the VASP source to see the differences. We are able to recover a smooth same branch polarization more frequently using the naive calculation in calc_ionic than using the ionic dipole moment reported in the OUTCAR. Some defintions of terms used in the comments below: A polar structure belongs to a polar space group. A polar space group has a one of the 10 polar point group: (1, 2, m, mm2, 4, 4mm, 3, 3m, 6, 6m) Being nonpolar is not equivalent to being centrosymmetric (having inversion symmetry). For example, any space group with point group 222 is nonpolar but not centrosymmetric. By symmetry the polarization of a nonpolar material modulo the quantum of polarization can only be zero or 1/2. We use a nonpolar structure to help determine the spontaneous polarization because it serves as a reference point. """ import numpy as np from pymatgen.core.lattice import Lattice from pymatgen.core.structure import Structure __author__ = "Tess Smidt" __copyright__ = "Copyright 2017, The Materials Project" __version__ = "1.0" __email__ = "tsmidt@berkeley.edu" __status__ = "Development" __date__ = "April 15, 2017" def zval_dict_from_potcar(potcar): """ Creates zval_dictionary for calculating the ionic polarization from Potcar object potcar: Potcar object """ zval_dict = {} for p in potcar: zval_dict.update({p.element: p.ZVAL}) return zval_dict def calc_ionic(site, structure, zval): """ Calculate the ionic dipole moment using ZVAL from pseudopotential site: PeriodicSite structure: Structure zval: Charge value for ion (ZVAL for VASP pseudopotential) Returns polarization in electron Angstroms. """ norms = structure.lattice.lengths return np.multiply(norms, -site.frac_coords * zval) def get_total_ionic_dipole(structure, zval_dict): """ Get the total ionic dipole moment for a structure. structure: pymatgen Structure zval_dict: specie, zval dictionary pairs center (np.array with shape [3,1]) : dipole center used by VASP tiny (float) : tolerance for determining boundary of calculation. """ tot_ionic = [] for site in structure: zval = zval_dict[str(site.specie)] tot_ionic.append(calc_ionic(site, structure, zval)) return np.sum(tot_ionic, axis=0) class PolarizationLattice(Structure): """ Why is a Lattice inheriting a structure? This is ridiculous. """ def get_nearest_site(self, coords, site, r=None): """ Given coords and a site, find closet site to coords. Args: coords (3x1 array): cartesian coords of center of sphere site: site to find closest to coords r: radius of sphere. Defaults to diagonal of unit cell Returns: Closest site and distance. """ index = self.index(site) if r is None: r = np.linalg.norm(np.sum(self.lattice.matrix, axis=0)) ns = self.get_sites_in_sphere(coords, r, include_index=True) # Get sites with identical index to site ns = [n for n in ns if n[2] == index] # Sort by distance to coords ns.sort(key=lambda x: x[1]) # Return PeriodicSite and distance of closest image return ns[0][0:2] class Polarization: """ Class for recovering the same branch polarization for a set of polarization calculations along the nonpolar - polar distortion path of a ferroelectric. p_elecs, p_ions, and structures lists should be given in order of nonpolar to polar! For example, the structures returned from: nonpolar.interpolate(polar,interpolate_lattices=True) if nonpolar is the nonpolar Structure and polar is the polar structure. It is assumed that the electronic and ionic dipole moment values are given in electron Angstroms along the three lattice directions (a,b,c). """ def __init__( self, p_elecs, p_ions, structures, p_elecs_in_cartesian=True, p_ions_in_cartesian=False, ): """ p_elecs: np.array of electronic contribution to the polarization with shape [N, 3] p_ions: np.array of ionic contribution to the polarization with shape [N, 3] p_elecs_in_cartesian: whether p_elecs is along Cartesian directions (rather than lattice directions). Default is True because that is the convention for VASP. p_ions_in_cartesian: whether p_ions is along Cartesian directions (rather than lattice directions). Default is False because calc_ionic (which we recommend using for calculating the ionic contribution to the polarization) uses lattice directions. """ if len(p_elecs) != len(p_ions) or len(p_elecs) != len(structures): raise ValueError("The number of electronic polarization and ionic polarization values must be equal.") if p_elecs_in_cartesian: p_elecs = np.array( [struct.lattice.get_vector_along_lattice_directions(p_elecs[i]) for i, struct in enumerate(structures)] ) if p_ions_in_cartesian: p_ions = np.array( [struct.lattice.get_vector_along_lattice_directions(p_ions[i]) for i, struct in enumerate(structures)] ) self.p_elecs = np.array(p_elecs) self.p_ions = np.array(p_ions) self.structures = structures @classmethod def from_outcars_and_structures(cls, outcars, structures, calc_ionic_from_zval=False): """ Create Polarization object from list of Outcars and Structures in order of nonpolar to polar. Note, we recommend calculating the ionic dipole moment using calc_ionic than using the values in Outcar (see module comments). To do this set calc_ionic_from_zval = True """ p_elecs = [] p_ions = [] for i, o in enumerate(outcars): p_elecs.append(o.p_elec) if calc_ionic_from_zval: p_ions.append(get_total_ionic_dipole(structures[i], o.zval_dict)) else: p_ions.append(o.p_ion) return cls(p_elecs, p_ions, structures) def get_pelecs_and_pions(self, convert_to_muC_per_cm2=False): """ Get the electronic and ionic dipole moments / polarizations. convert_to_muC_per_cm2: Convert from electron * Angstroms to microCoulomb per centimeter**2 """ if not convert_to_muC_per_cm2: return self.p_elecs, self.p_ions if convert_to_muC_per_cm2: p_elecs = self.p_elecs.T p_ions = self.p_ions.T volumes = [s.lattice.volume for s in self.structures] e_to_muC = -1.6021766e-13 cm2_to_A2 = 1e16 units = 1.0 / np.array(volumes) units *= e_to_muC * cm2_to_A2 p_elecs = np.matmul(units, p_elecs) p_ions = np.matmul(units, p_ions) p_elecs, p_ions = p_elecs.T, p_ions.T return p_elecs, p_ions return None def get_same_branch_polarization_data(self, convert_to_muC_per_cm2=True, all_in_polar=True): r""" Get same branch dipole moment (convert_to_muC_per_cm2=False) or polarization for given polarization data (convert_to_muC_per_cm2=True). Polarization is a lattice vector, meaning it is only defined modulo the quantum of polarization: P = P_0 + \\sum_i \\frac{n_i e R_i}{\\Omega} where n_i is an integer, e is the charge of the electron in microCoulombs, R_i is a lattice vector, and \\Omega is the unit cell volume in cm**3 (giving polarization units of microCoulomb per centimeter**2). The quantum of the dipole moment in electron Angstroms (as given by VASP) is: \\sum_i n_i e R_i where e, the electron charge, is 1 and R_i is a lattice vector, and n_i is an integer. Given N polarization calculations in order from nonpolar to polar, this algorithm minimizes the distance between adjacent polarization images. To do this, it constructs a polarization lattice for each polarization calculation using the pymatgen.core.structure class and calls the get_nearest_site method to find the image of a given polarization lattice vector that is closest to the previous polarization lattice vector image. Note, using convert_to_muC_per_cm2=True and all_in_polar=True calculates the "proper polarization" (meaning the change in polarization does not depend on the choice of polarization branch) while convert_to_muC_per_cm2=True and all_in_polar=False calculates the "improper polarization" (meaning the change in polarization does depend on the choice of branch). As one might guess from the names. We recommend calculating the "proper polarization". convert_to_muC_per_cm2: convert polarization from electron * Angstroms to microCoulomb per centimeter**2 all_in_polar: convert polarization to be in polar (final structure) polarization lattice """ p_elec, p_ion = self.get_pelecs_and_pions() p_tot = p_elec + p_ion p_tot = np.array(p_tot) lattices = [s.lattice for s in self.structures] volumes = np.array([s.lattice.volume for s in self.structures]) L = len(p_elec) e_to_muC = -1.6021766e-13 cm2_to_A2 = 1e16 units = 1.0 / np.array(volumes) units *= e_to_muC * cm2_to_A2 # convert polarizations and lattice lengths prior to adjustment if convert_to_muC_per_cm2 and not all_in_polar: # Convert the total polarization p_tot = np.multiply(units.T[:, np.newaxis], p_tot) # adjust lattices for i in range(L): lattice = lattices[i] l = lattice.lengths a = lattice.angles lattices[i] = Lattice.from_parameters(*(np.array(l) * units.ravel()[i]), *a) # convert polarizations to polar lattice elif convert_to_muC_per_cm2 and all_in_polar: abc = [lattice.abc for lattice in lattices] abc = np.array(abc) # [N, 3] p_tot /= abc # e * Angstroms to e p_tot *= abc[-1] / volumes[-1] * e_to_muC * cm2_to_A2 # to muC / cm^2 for i in range(L): lattice = lattices[-1] # Use polar lattice l = lattice.lengths a = lattice.angles # Use polar units (volume) lattices[i] = Lattice.from_parameters(*(np.array(l) * units.ravel()[-1]), *a) d_structs = [] sites = [] for i in range(L): l = lattices[i] frac_coord = np.divide(np.array([p_tot[i]]), np.array([l.a, l.b, l.c])) d = PolarizationLattice(l, ["C"], [np.array(frac_coord).ravel()]) d_structs.append(d) site = d[0] if i == 0: # Adjust nonpolar polarization to be closest to zero. # This is compatible with both a polarization of zero or a half quantum. prev_site = [0, 0, 0] else: prev_site = sites[-1].coords new_site = d.get_nearest_site(prev_site, site) sites.append(new_site[0]) adjust_pol = [] for s, d in zip(sites, d_structs): l = d.lattice adjust_pol.append(np.multiply(s.frac_coords, np.array([l.a, l.b, l.c])).ravel()) adjust_pol = np.array(adjust_pol) return adjust_pol def get_lattice_quanta(self, convert_to_muC_per_cm2=True, all_in_polar=True): """ Returns the dipole / polarization quanta along a, b, and c for all structures. """ lattices = [s.lattice for s in self.structures] volumes = np.array([s.lattice.volume for s in self.structures]) L = len(self.structures) e_to_muC = -1.6021766e-13 cm2_to_A2 = 1e16 units = 1.0 / np.array(volumes) units *= e_to_muC * cm2_to_A2 # convert polarizations and lattice lengths prior to adjustment if convert_to_muC_per_cm2 and not all_in_polar: # adjust lattices for i in range(L): lattice = lattices[i] l = lattice.lengths a = lattice.angles lattices[i] = Lattice.from_parameters(*(np.array(l) * units.ravel()[i]), *a) elif convert_to_muC_per_cm2 and all_in_polar: for i in range(L): lattice = lattices[-1] l = lattice.lengths a = lattice.angles lattices[i] = Lattice.from_parameters(*(np.array(l) * units.ravel()[-1]), *a) quanta = np.array([np.array(l.lengths) for l in lattices]) return quanta def get_polarization_change(self, convert_to_muC_per_cm2=True, all_in_polar=True): """ Get difference between nonpolar and polar same branch polarization. """ tot = self.get_same_branch_polarization_data( convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar ) # reshape to preserve backwards compatibility due to changes # when switching from np.matrix to np.array return (tot[-1] - tot[0]).reshape((1, 3)) def get_polarization_change_norm(self, convert_to_muC_per_cm2=True, all_in_polar=True): """ Get magnitude of difference between nonpolar and polar same branch polarization. """ polar = self.structures[-1] a, b, c = polar.lattice.matrix a, b, c = a / np.linalg.norm(a), b / np.linalg.norm(b), c / np.linalg.norm(c) P = self.get_polarization_change( convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar ).ravel() P_norm = np.linalg.norm(a * P[0] + b * P[1] + c * P[2]) return P_norm def same_branch_splines(self, convert_to_muC_per_cm2=True, all_in_polar=True): """ Fit splines to same branch polarization. This is used to assess any jumps in the same branch polarizaiton. """ from scipy.interpolate import UnivariateSpline tot = self.get_same_branch_polarization_data( convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar ) L = tot.shape[0] try: sp_a = UnivariateSpline(range(L), tot[:, 0].ravel()) except Exception: sp_a = None try: sp_b = UnivariateSpline(range(L), tot[:, 1].ravel()) except Exception: sp_b = None try: sp_c = UnivariateSpline(range(L), tot[:, 2].ravel()) except Exception: sp_c = None return sp_a, sp_b, sp_c def max_spline_jumps(self, convert_to_muC_per_cm2=True, all_in_polar=True): """ Get maximum difference between spline and same branch polarization data. """ tot = self.get_same_branch_polarization_data( convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar ) sps = self.same_branch_splines(convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar) max_jumps = [None, None, None] for i, sp in enumerate(sps): if sp is not None: max_jumps[i] = max(tot[:, i].ravel() - sp(range(len(tot[:, i].ravel())))) return max_jumps def smoothness(self, convert_to_muC_per_cm2=True, all_in_polar=True): """ Get rms average difference between spline and same branch polarization data. """ tot = self.get_same_branch_polarization_data( convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar ) L = tot.shape[0] try: sp = self.same_branch_splines(convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar) except Exception: print("Something went wrong.") return None sp_latt = [sp[i](range(L)) for i in range(3)] diff = [sp_latt[i] - tot[:, i].ravel() for i in range(3)] rms = [np.sqrt(np.sum(np.square(diff[i])) / L) for i in range(3)] return rms class EnergyTrend: """ Class for fitting trends to energies. """ def __init__(self, energies): """ :param energies: Energies """ self.energies = energies def spline(self): """ Fit spline to energy trend data. """ from scipy.interpolate import UnivariateSpline sp = UnivariateSpline(range(len(self.energies)), self.energies, k=4) return sp def smoothness(self): """ Get rms average difference between spline and energy trend. """ energies = self.energies try: sp = self.spline() except Exception: print("Energy spline failed.") return None spline_energies = sp(range(len(energies))) diff = spline_energies - energies rms = np.sqrt(np.sum(np.square(diff)) / len(energies)) return rms def max_spline_jump(self): """ Get maximum difference between spline and energy trend. """ sp = self.spline() return max(self.energies - sp(range(len(self.energies)))) def endpoints_minima(self, slope_cutoff=5e-3): """ Test if spline endpoints are at minima for a given slope cutoff. """ energies = self.energies try: sp = self.spline() except Exception: print("Energy spline failed.") return None der = sp.derivative() der_energies = der(range(len(energies))) return { "polar": abs(der_energies[-1]) <= slope_cutoff, "nonpolar": abs(der_energies[0]) <= slope_cutoff, }
gmatteo/pymatgen
pymatgen/analysis/ferroelectricity/polarization.py
Python
mit
19,826
[ "CRYSTAL", "VASP", "pymatgen" ]
1d30bb5d26ea77e67c47e3a3b79b38767dd8ac53f00289dcdc4d997606c9400d
#!C:\Python33\python.exe -u # -*- coding: UTF-8 -*- # enable debugging import cgi import cgitb cgitb.enable() import struct import array import uuid import mysql.connector from mysql.connector import errorcode import configparser import re import http.cookies import os def getCharsAndDeaths(): chars = [] form = cgi.FieldStorage() if "dsSaveFile" not in form: print("No save file input. You can either <a href='/index.html'>submit a save file</a> or <a href='stats.py'>view other players' stats.</a><br>") else: saveFile = form["dsSaveFile"] if saveFile.file and saveFile.done != -1 and len(saveFile.value) == 4330480: saveFile.file.seek(0x2c0, 0) for slot in range(0, 10): saveFile.file.seek(0x100, 1) name = saveFile.file.read(32) if name[0] != '\00': saveFile.file.seek(-0x120, 1) saveFile.file.seek(0x1f128, 1) deaths = saveFile.file.read(4) saveFile.file.seek(-0x04, 1) saveFile.file.seek(-0x1f128, 1) charName = name.decode('utf-16').split('\00')[0] charDeaths = struct.unpack('i', deaths)[0] if charName != "": chars.append([charName, charDeaths]) else: saveFile.file.seek(-0x120, 1) saveFile.file.seek(0x60190, 1) else: print("Did not input a valid file.") return chars #get DB connection info config = configparser.ConfigParser() config.read('../conf/settings.ini') dbInfo = config['db'] #connect to DB playerIDCharacterList = [] try: dbConn = mysql.connector.connect(user=dbInfo['user'], password=dbInfo['password'], host=dbInfo['host'], database=dbInfo['database']) cursor = dbConn.cursor() except mysql.connector.Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: print("Error: could not connect to the database") elif err.errno == errorcode.ER_BAD_DB_ERROR: print("Error: Database does not exists") else: print("Error: " + str(err)) else: #get list of characters for this player cookieEnvVar = 'HTTP_COOKIE' if cookieEnvVar in os.environ: cookieString = os.environ.get(cookieEnvVar) c = http.cookies.SimpleCookie(cookieString) utmaName = '__utma' if utmaName in c: utma = c[utmaName].value #just get the first 3 parts of utma: domain hash, unique ID, initial visit match = re.match(r"^\d+\.\d+\.\d+", utma) if match: playerId = match.string[match.start():match.end()] doesCharacterExistInDbQuery = ("select charactername from characters where playerid = '" + str(playerId) + "'") cursor.execute(doesCharacterExistInDbQuery) for character in cursor: playerIDCharacterList.append(character[0]) dbConn.close() print("Content-Type: text/html") print(""" <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> <html> <head> <noscript> <p>Sorry, we can't analyze your stats without scripting enabled.</p> </noscript> <title>Dark Souls death counter - Results</title> <link rel="stylesheet" type="text/css" href="../styles.css" /> <style type="text/css"> html { background-image:url(../images/BG_torchbastards_fade.jpg); } </style> </head> <body> <div class="header"> </div> <div class="wrapper"> <div class="container"> <div class="navbar"> <ul> <li><a href="../index.html">Home</a></li> <li><a href="stats.py">Stats</a></li> <li><a href="../about.html">About</a></li> </ul> </div> <div class="content"> <p>Here are your results! Click the link next to one of your characters to refine your stats.</p> <table id="table-results">""") charList = getCharsAndDeaths() for x in range(0, len(charList)): name = charList[x][0] buttonText = "Stats for this character" if name in playerIDCharacterList: buttonText = "Update this character" print(""" <tr id="char{x}" class="tr-char" style='display: block;'> <td>Character: <span id="span-char{x}">{name}</span></td> <td>Total deaths: <span id="span-deaths{x}">{deaths}</span></td> <td><form id="saveFile" method="POST" action="submit.py" target="_blank" > <input type="hidden" name="name" value="{name}" /> <input type="hidden" name="deaths" value="{deaths}" /> <input type="submit" id="submit" value="{buttonText}" /> </form> </tr>""".format(x=x, name=name, deaths=charList[x][1], buttonText=buttonText)) print(""" </table> </div> </div> </div> </body> <script type="text/javascript"> var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-47846181-1']); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script> </html> """)
RKYates/Dark-Souls-Death-Count-cgi-page
cgi-bin/results.py
Python
gpl-3.0
4,756
[ "VisIt" ]
6d211f65f090ce252fa4964c71da4a4322166810295c7657d70d80944217d659
""" Mask R-CNN The main Mask R-CNN model implementation. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ import os import random import datetime import re import math import logging from collections import OrderedDict import multiprocessing import numpy as np import tensorflow as tf #import tensorflow.keras as keras #import tensorflow.keras.backend as K #import tensorflow.keras.layers as KL #import tensorflow.python.keras.engine as KE #import tensorflow.keras.models as KM import keras import keras.backend as K import keras.layers as KL import keras.engine as KE import keras.models as KM from ..mrcnn import utils # Requires TensorFlow 1.3+ and Keras 2.0.8+. from distutils.version import LooseVersion assert LooseVersion(tf.__version__) >= LooseVersion("1.3") assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8') ############################################################ # Utility Functions ############################################################ def log(text, array=None): """Prints a text message. And, optionally, if a Numpy array is provided it prints it's shape, min, and max values. """ if array is not None: text = text.ljust(25) text += ("shape: {:20} ".format(str(array.shape))) if array.size: text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max())) else: text += ("min: {:10} max: {:10}".format("","")) text += " {}".format(array.dtype) print(text) class BatchNorm(KL.BatchNormalization): """Extends the Keras BatchNormalization class to allow a central place to make changes if needed. Batch normalization has a negative effect on training if batches are small so this layer is often frozen (via setting in Config class) and functions as linear layer. """ def call(self, inputs, training=None): """ Note about training values: None: Train BN layers. This is the normal mode False: Freeze BN layers. Good when batch size is small True: (don't use). Set layer in training mode even when making inferences """ return super(self.__class__, self).call(inputs, training=training) def compute_backbone_shapes(config, image_shape): """Computes the width and height of each stage of the backbone network. Returns: [N, (height, width)]. Where N is the number of stages """ if callable(config.BACKBONE): return config.COMPUTE_BACKBONE_SHAPE(image_shape) # Currently supports ResNet only assert config.BACKBONE in ["resnet50", "resnet101"] return np.array( [[int(math.ceil(image_shape[0] / stride)), int(math.ceil(image_shape[1] / stride))] for stride in config.BACKBONE_STRIDES]) ############################################################ # Resnet Graph ############################################################ # Code adopted from: # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py def identity_block(input_tensor, kernel_size, filters, stage, block, use_bias=True, train_bn=True): """The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), use_bias=True, train_bn=True): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=use_bias)(input_tensor) shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn) x = KL.Add()([x, shortcut]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x def resnet_graph(input_image, architecture, stage5=False, train_bn=True): """Build a ResNet graph. architecture: Can be resnet50 or resnet101 stage5: Boolean. If False, stage5 of the network is not created train_bn: Boolean. Train or freeze Batch Norm layers """ assert architecture in ["resnet50", "resnet101"] # Stage 1 x = KL.ZeroPadding2D((3, 3))(input_image) x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(name='bn_conv1')(x, training=train_bn) x = KL.Activation('relu')(x) C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn) # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn) # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn) block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################ def apply_box_deltas_graph(boxes, deltas): """Applies the given deltas to the given boxes. boxes: [N, (y1, x1, y2, x2)] boxes to update deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply """ # Convert to y, x, h, w height = boxes[:, 2] - boxes[:, 0] width = boxes[:, 3] - boxes[:, 1] center_y = boxes[:, 0] + 0.5 * height center_x = boxes[:, 1] + 0.5 * width # Apply deltas center_y += deltas[:, 0] * height center_x += deltas[:, 1] * width height *= tf.exp(deltas[:, 2]) width *= tf.exp(deltas[:, 3]) # Convert back to y1, x1, y2, x2 y1 = center_y - 0.5 * height x1 = center_x - 0.5 * width y2 = y1 + height x2 = x1 + width result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out") return result def clip_boxes_graph(boxes, window): """ boxes: [N, (y1, x1, y2, x2)] window: [4] in the form y1, x1, y2, x2 """ # Split wy1, wx1, wy2, wx2 = tf.split(window, 4) y1, x1, y2, x2 = tf.split(boxes, 4, axis=1) # Clip y1 = tf.maximum(tf.minimum(y1, wy2), wy1) x1 = tf.maximum(tf.minimum(x1, wx2), wx1) y2 = tf.maximum(tf.minimum(y2, wy2), wy1) x2 = tf.maximum(tf.minimum(x2, wx2), wx1) clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes") clipped.set_shape((clipped.shape[0], 4)) return clipped class ProposalLayer(KE.Layer): """Receives anchor scores and selects a subset to pass as proposals to the second stage. Filtering is done based on anchor scores and non-max suppression to remove overlaps. It also applies bounding box refinement deltas to anchors. Inputs: rpn_probs: [batch, num_anchors, (bg prob, fg prob)] rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))] anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates Returns: Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)] """ def __init__(self, proposal_count, nms_threshold, config=None, **kwargs): super(ProposalLayer, self).__init__(**kwargs) self.config = config self.proposal_count = proposal_count self.nms_threshold = nms_threshold def call(self, inputs): # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1] scores = inputs[0][:, :, 1] # Box deltas [batch, num_rois, 4] deltas = inputs[1] deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4]) # Anchors anchors = inputs[2] # Improve performance by trimming to top anchors by score # and doing the rest on the smaller subset. pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1]) ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True, name="top_anchors").indices scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y), self.config.IMAGES_PER_GPU) deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y), self.config.IMAGES_PER_GPU) pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x), self.config.IMAGES_PER_GPU, names=["pre_nms_anchors"]) # Apply deltas to anchors to get refined anchors. # [batch, N, (y1, x1, y2, x2)] boxes = utils.batch_slice([pre_nms_anchors, deltas], lambda x, y: apply_box_deltas_graph(x, y), self.config.IMAGES_PER_GPU, names=["refined_anchors"]) # Clip to image boundaries. Since we're in normalized coordinates, # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)] window = np.array([0, 0, 1, 1], dtype=np.float32) boxes = utils.batch_slice(boxes, lambda x: clip_boxes_graph(x, window), self.config.IMAGES_PER_GPU, names=["refined_anchors_clipped"]) # Filter out small boxes # According to Xinlei Chen's paper, this reduces detection accuracy # for small objects, so we're skipping it. # Non-max suppression def nms(boxes, scores): indices = tf.image.non_max_suppression( boxes, scores, self.proposal_count, self.nms_threshold, name="rpn_non_max_suppression") proposals = tf.gather(boxes, indices) # Pad if needed padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0) proposals = tf.pad(proposals, [(0, padding), (0, 0)]) return proposals proposals = utils.batch_slice([boxes, scores], nms, self.config.IMAGES_PER_GPU) return proposals def compute_output_shape(self, input_shape): return (None, self.proposal_count, 4) ############################################################ # ROIAlign Layer ############################################################ def log2_graph(x): """Implementation of Log2. TF doesn't have a native implementation.""" return tf.log(x) / tf.log(2.0) class PyramidROIAlign(KE.Layer): """Implements ROI Pooling on multiple levels of the feature pyramid. Params: - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7] Inputs: - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized coordinates. Possibly padded with zeros if not enough boxes to fill the array. - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - feature_maps: List of feature maps from different levels of the pyramid. Each is [batch, height, width, channels] Output: Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels]. The width and height are those specific in the pool_shape in the layer constructor. """ def __init__(self, pool_shape, **kwargs): super(PyramidROIAlign, self).__init__(**kwargs) self.pool_shape = tuple(pool_shape) def call(self, inputs): # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords boxes = inputs[0] # Image meta # Holds details about the image. See compose_image_meta() image_meta = inputs[1] # Feature Maps. List of feature maps from different level of the # feature pyramid. Each is [batch, height, width, channels] feature_maps = inputs[2:] # Assign each ROI to a level in the pyramid based on the ROI area. y1, x1, y2, x2 = tf.split(boxes, 4, axis=2) h = y2 - y1 w = x2 - x1 # Use shape of first image. Images in a batch must have the same size. image_shape = parse_image_meta_graph(image_meta)['image_shape'][0] # Equation 1 in the Feature Pyramid Networks paper. Account for # the fact that our coordinates are normalized here. # e.g. a 224x224 ROI (in pixels) maps to P4 image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32) roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area))) roi_level = tf.minimum(5, tf.maximum( 2, 4 + tf.cast(tf.round(roi_level), tf.int32))) roi_level = tf.squeeze(roi_level, 2) # Loop through levels and apply ROI pooling to each. P2 to P5. pooled = [] box_to_level = [] for i, level in enumerate(range(2, 6)): ix = tf.where(tf.equal(roi_level, level)) level_boxes = tf.gather_nd(boxes, ix) # Box indices for crop_and_resize. box_indices = tf.cast(ix[:, 0], tf.int32) # Keep track of which box is mapped to which level box_to_level.append(ix) # Stop gradient propogation to ROI proposals level_boxes = tf.stop_gradient(level_boxes) box_indices = tf.stop_gradient(box_indices) # Crop and Resize # From Mask R-CNN paper: "We sample four regular locations, so # that we can evaluate either max or average pooling. In fact, # interpolating only a single value at each bin center (without # pooling) is nearly as effective." # # Here we use the simplified approach of a single value per bin, # which is how it's done in tf.crop_and_resize() # Result: [batch * num_boxes, pool_height, pool_width, channels] pooled.append(tf.image.crop_and_resize( feature_maps[i], level_boxes, box_indices, self.pool_shape, method="bilinear")) # Pack pooled features into one tensor pooled = tf.concat(pooled, axis=0) # Pack box_to_level mapping into one array and add another # column representing the order of pooled boxes box_to_level = tf.concat(box_to_level, axis=0) box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1) box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range], axis=1) # Rearrange pooled features to match the order of the original boxes # Sort box_to_level by batch then box index # TF doesn't have a way to sort by two columns, so merge them and sort. sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1] ix = tf.nn.top_k(sorting_tensor, k=tf.shape( box_to_level)[0]).indices[::-1] ix = tf.gather(box_to_level[:, 2], ix) pooled = tf.gather(pooled, ix) # Re-add the batch dimension shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0) pooled = tf.reshape(pooled, shape) return pooled def compute_output_shape(self, input_shape): return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], ) ############################################################ # Detection Target Layer ############################################################ def overlaps_graph(boxes1, boxes2): """Computes IoU overlaps between two sets of boxes. boxes1, boxes2: [N, (y1, x1, y2, x2)]. """ # 1. Tile boxes2 and repeat boxes1. This allows us to compare # every boxes1 against every boxes2 without loops. # TF doesn't have an equivalent to np.repeat() so simulate it # using tf.tile() and tf.reshape. b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1), [1, 1, tf.shape(boxes2)[0]]), [-1, 4]) b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1]) # 2. Compute intersections b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1) b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1) y1 = tf.maximum(b1_y1, b2_y1) x1 = tf.maximum(b1_x1, b2_x1) y2 = tf.minimum(b1_y2, b2_y2) x2 = tf.minimum(b1_x2, b2_x2) intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0) # 3. Compute unions b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1) b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1) union = b1_area + b2_area - intersection # 4. Compute IoU and reshape to [boxes1, boxes2] iou = intersection / union overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]]) return overlaps def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config): """Generates detection targets for one image. Subsamples proposals and generates target class IDs, bounding box deltas, and masks for each. Inputs: proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might be zero padded if there are not enough proposals. gt_class_ids: [MAX_GT_INSTANCES] int class IDs gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates. gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type. Returns: Target ROIs and corresponding class IDs, bounding box shifts, and masks. rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded. deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))] masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox boundaries and resized to neural network output size. Note: Returned arrays might be zero padded if not enough target ROIs. """ # Assertions asserts = [ tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals], name="roi_assertion"), ] with tf.control_dependencies(asserts): proposals = tf.identity(proposals) # Remove zero padding proposals, _ = trim_zeros_graph(proposals, name="trim_proposals") gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes") gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros, name="trim_gt_class_ids") gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2, name="trim_gt_masks") # Handle COCO crowds # A crowd box in COCO is a bounding box around several instances. Exclude # them from training. A crowd box is given a negative class ID. crowd_ix = tf.where(gt_class_ids < 0)[:, 0] non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0] crowd_boxes = tf.gather(gt_boxes, crowd_ix) gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix) gt_boxes = tf.gather(gt_boxes, non_crowd_ix) gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2) # Compute overlaps matrix [proposals, gt_boxes] overlaps = overlaps_graph(proposals, gt_boxes) # Compute overlaps with crowd boxes [proposals, crowd_boxes] crowd_overlaps = overlaps_graph(proposals, crowd_boxes) crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1) no_crowd_bool = (crowd_iou_max < 0.001) # Determine positive and negative ROIs roi_iou_max = tf.reduce_max(overlaps, axis=1) # 1. Positive ROIs are those with >= 0.5 IoU with a GT box positive_roi_bool = (roi_iou_max >= 0.5) positive_indices = tf.where(positive_roi_bool)[:, 0] # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds. negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0] # Subsample ROIs. Aim for 33% positive # Positive ROIs positive_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO) positive_indices = tf.random_shuffle(positive_indices)[:positive_count] positive_count = tf.shape(positive_indices)[0] # Negative ROIs. Add enough to maintain positive:negative ratio. r = 1.0 / config.ROI_POSITIVE_RATIO negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count negative_indices = tf.random_shuffle(negative_indices)[:negative_count] # Gather selected ROIs positive_rois = tf.gather(proposals, positive_indices) negative_rois = tf.gather(proposals, negative_indices) # Assign positive ROIs to GT boxes. positive_overlaps = tf.gather(overlaps, positive_indices) roi_gt_box_assignment = tf.cond( tf.greater(tf.shape(positive_overlaps)[1], 0), true_fn = lambda: tf.argmax(positive_overlaps, axis=1), false_fn = lambda: tf.cast(tf.constant([]),tf.int64) ) roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment) roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment) # Compute bbox refinement for positive ROIs deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes) deltas /= config.BBOX_STD_DEV # Assign positive ROIs to GT masks # Permute masks to [N, height, width, 1] transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1) # Pick the right mask for each ROI roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment) # Compute mask targets boxes = positive_rois if config.USE_MINI_MASK: # Transform ROI coordinates from normalized image space # to normalized mini-mask space. y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1) gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1) gt_h = gt_y2 - gt_y1 gt_w = gt_x2 - gt_x1 y1 = (y1 - gt_y1) / gt_h x1 = (x1 - gt_x1) / gt_w y2 = (y2 - gt_y1) / gt_h x2 = (x2 - gt_x1) / gt_w boxes = tf.concat([y1, x1, y2, x2], 1) box_ids = tf.range(0, tf.shape(roi_masks)[0]) masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes, box_ids, config.MASK_SHAPE) # Remove the extra dimension from masks. masks = tf.squeeze(masks, axis=3) # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with # binary cross entropy loss. masks = tf.round(masks) # Append negative ROIs and pad bbox deltas and masks that # are not used for negative ROIs with zeros. rois = tf.concat([positive_rois, negative_rois], axis=0) N = tf.shape(negative_rois)[0] P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0) rois = tf.pad(rois, [(0, P), (0, 0)]) roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)]) roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)]) deltas = tf.pad(deltas, [(0, N + P), (0, 0)]) masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)]) return rois, roi_gt_class_ids, deltas, masks class DetectionTargetLayer(KE.Layer): """Subsamples proposals and generates target box refinement, class_ids, and masks for each. Inputs: proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might be zero padded if there are not enough proposals. gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs. gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates. gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type Returns: Target ROIs and corresponding class IDs, bounding box shifts, and masks. rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs. target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)] target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width] Masks cropped to bbox boundaries and resized to neural network output size. Note: Returned arrays might be zero padded if not enough target ROIs. """ def __init__(self, config, **kwargs): super(DetectionTargetLayer, self).__init__(**kwargs) self.config = config def call(self, inputs): proposals = inputs[0] gt_class_ids = inputs[1] gt_boxes = inputs[2] gt_masks = inputs[3] # Slice the batch and run a graph for each slice # TODO: Rename target_bbox to target_deltas for clarity names = ["rois", "target_class_ids", "target_bbox", "target_mask"] outputs = utils.batch_slice( [proposals, gt_class_ids, gt_boxes, gt_masks], lambda w, x, y, z: detection_targets_graph( w, x, y, z, self.config), self.config.IMAGES_PER_GPU, names=names) return outputs def compute_output_shape(self, input_shape): return [ (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0], self.config.MASK_SHAPE[1]) # masks ] def compute_mask(self, inputs, mask=None): return [None, None, None, None] ############################################################ # Detection Layer ############################################################ def refine_detections_graph(rois, probs, deltas, window, config): """Refine classified proposals and filter overlaps and return final detections. Inputs: rois: [N, (y1, x1, y2, x2)] in normalized coordinates probs: [N, num_classes]. Class probabilities. deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific bounding box deltas. window: (y1, x1, y2, x2) in normalized coordinates. The part of the image that contains the image excluding the padding. Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where coordinates are normalized. """ # Class IDs per ROI class_ids = tf.argmax(probs, axis=1, output_type=tf.int32) # Class probability of the top class of each ROI indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1) class_scores = tf.gather_nd(probs, indices) # Class-specific bounding box deltas deltas_specific = tf.gather_nd(deltas, indices) # Apply bounding box deltas # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates refined_rois = apply_box_deltas_graph( rois, deltas_specific * config.BBOX_STD_DEV) # Clip boxes to image window refined_rois = clip_boxes_graph(refined_rois, window) # TODO: Filter out boxes with zero area # Filter out background boxes keep = tf.where(class_ids > 0)[:, 0] # Filter out low confidence boxes if config.DETECTION_MIN_CONFIDENCE: conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0] keep = tf.sets.set_intersection(tf.expand_dims(keep, 0), tf.expand_dims(conf_keep, 0)) keep = tf.sparse_tensor_to_dense(keep)[0] # Apply per-class NMS # 1. Prepare variables pre_nms_class_ids = tf.gather(class_ids, keep) pre_nms_scores = tf.gather(class_scores, keep) pre_nms_rois = tf.gather(refined_rois, keep) unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0] def nms_keep_map(class_id): """Apply Non-Maximum Suppression on ROIs of the given class.""" # Indices of ROIs of the given class ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0] # Apply NMS class_keep = tf.image.non_max_suppression( tf.gather(pre_nms_rois, ixs), tf.gather(pre_nms_scores, ixs), max_output_size=config.DETECTION_MAX_INSTANCES, iou_threshold=config.DETECTION_NMS_THRESHOLD) # Map indices class_keep = tf.gather(keep, tf.gather(ixs, class_keep)) # Pad with -1 so returned tensors have the same shape gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0] class_keep = tf.pad(class_keep, [(0, gap)], mode='CONSTANT', constant_values=-1) # Set shape so map_fn() can infer result shape class_keep.set_shape([config.DETECTION_MAX_INSTANCES]) return class_keep # 2. Map over class IDs nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids, dtype=tf.int64) # 3. Merge results into one list, and remove -1 padding nms_keep = tf.reshape(nms_keep, [-1]) nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0]) # 4. Compute intersection between keep and nms_keep keep = tf.sets.set_intersection(tf.expand_dims(keep, 0), tf.expand_dims(nms_keep, 0)) keep = tf.sparse_tensor_to_dense(keep)[0] # Keep top detections roi_count = config.DETECTION_MAX_INSTANCES class_scores_keep = tf.gather(class_scores, keep) num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count) top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1] keep = tf.gather(keep, top_ids) # Arrange output as [N, (y1, x1, y2, x2, class_id, score)] # Coordinates are normalized. detections = tf.concat([ tf.gather(refined_rois, keep), tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis], tf.gather(class_scores, keep)[..., tf.newaxis] ], axis=1) # Pad with zeros if detections < DETECTION_MAX_INSTANCES gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0] detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT") return detections class DetectionLayer(KE.Layer): """Takes classified proposal boxes and their bounding box deltas and returns the final detection boxes. Returns: [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where coordinates are normalized. """ def __init__(self, config=None, **kwargs): super(DetectionLayer, self).__init__(**kwargs) self.config = config def call(self, inputs): rois = inputs[0] mrcnn_class = inputs[1] mrcnn_bbox = inputs[2] image_meta = inputs[3] # Get windows of images in normalized coordinates. Windows are the area # in the image that excludes the padding. # Use the shape of the first image in the batch to normalize the window # because we know that all images get resized to the same size. m = parse_image_meta_graph(image_meta) image_shape = m['image_shape'][0] window = norm_boxes_graph(m['window'], image_shape[:2]) # Run detection refinement graph on each item in the batch detections_batch = utils.batch_slice( [rois, mrcnn_class, mrcnn_bbox, window], lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config), self.config.IMAGES_PER_GPU) # Reshape output # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in # normalized coordinates return tf.reshape( detections_batch, [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6]) def compute_output_shape(self, input_shape): return (None, self.config.DETECTION_MAX_INSTANCES, 6) ############################################################ # Region Proposal Network (RPN) ############################################################ def rpn_graph(feature_map, anchors_per_location, anchor_stride): """Builds the computation graph of Region Proposal Network. feature_map: backbone features [batch, height, width, depth] anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). Returns: rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities. rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors. """ # TODO: check if stride of 2 causes alignment issues if the feature map # is not even. # Shared convolutional base of the RPN shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu', strides=anchor_stride, name='rpn_conv_shared')(feature_map) # Anchor Score. [batch, height, width, anchors per location * 2]. x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid', activation='linear', name='rpn_class_raw')(shared) # Reshape to [batch, anchors, 2] rpn_class_logits = KL.Lambda( lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x) # Softmax on last dimension of BG/FG. rpn_probs = KL.Activation( "softmax", name="rpn_class_xxx")(rpn_class_logits) # Bounding box refinement. [batch, H, W, anchors per location * depth] # where depth is [x, y, log(w), log(h)] x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid", activation='linear', name='rpn_bbox_pred')(shared) # Reshape to [batch, anchors, 4] rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x) return [rpn_class_logits, rpn_probs, rpn_bbox] def build_rpn_model(anchor_stride, anchors_per_location, depth): """Builds a Keras model of the Region Proposal Network. It wraps the RPN graph so it can be used multiple times with shared weights. anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). depth: Depth of the backbone feature map. Returns a Keras Model object. The model outputs, when called, are: rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities. rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors. """ input_feature_map = KL.Input(shape=[None, None, depth], name="input_rpn_feature_map") outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride) return KM.Model([input_feature_map], outputs, name="rpn_model") ############################################################ # Feature Pyramid Network Heads ############################################################ def fpn_classifier_graph(rois, feature_maps, image_meta, pool_size, num_classes, train_bn=True, fc_layers_size=1024): """Builds the computation graph of the feature pyramid network classifier and regressor heads. rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized coordinates. feature_maps: List of feature maps from different layers of the pyramid, [P2, P3, P4, P5]. Each has a different resolution. image_meta: [batch, (meta data)] Image details. See compose_image_meta() pool_size: The width of the square feature map generated from ROI Pooling. num_classes: number of classes, which determines the depth of the results train_bn: Boolean. Train or freeze Batch Norm layers fc_layers_size: Size of the 2 FC layers Returns: logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax) probs: [batch, num_rois, NUM_CLASSES] classifier probabilities bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to proposal boxes """ # ROI Pooling # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels] x = PyramidROIAlign([pool_size, pool_size], name="roi_align_classifier")([rois, image_meta] + feature_maps) # Two 1024 FC layers (implemented with Conv2D for consistency) x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"), name="mrcnn_class_conv1")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)), name="mrcnn_class_conv2")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn) x = KL.Activation('relu')(x) shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2), name="pool_squeeze")(x) # Classifier head mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes), name='mrcnn_class_logits')(shared) mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"), name="mrcnn_class")(mrcnn_class_logits) # BBox head # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))] x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'), name='mrcnn_bbox_fc')(shared) # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] s = K.int_shape(x) mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name="mrcnn_bbox")(x) return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox def build_fpn_mask_graph(rois, feature_maps, image_meta, pool_size, num_classes, train_bn=True): """Builds the computation graph of the mask head of Feature Pyramid Network. rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized coordinates. feature_maps: List of feature maps from different layers of the pyramid, [P2, P3, P4, P5]. Each has a different resolution. image_meta: [batch, (meta data)] Image details. See compose_image_meta() pool_size: The width of the square feature map generated from ROI Pooling. num_classes: number of classes, which determines the depth of the results train_bn: Boolean. Train or freeze Batch Norm layers Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES] """ # ROI Pooling # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels] x = PyramidROIAlign([pool_size, pool_size], name="roi_align_mask")([rois, image_meta] + feature_maps) # Conv layers x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv1")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_mask_bn1')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv2")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_mask_bn2')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv3")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_mask_bn3')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv4")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_mask_bn4')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"), name="mrcnn_mask_deconv")(x) x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"), name="mrcnn_mask")(x) return x ############################################################ # Loss Functions ############################################################ def smooth_l1_loss(y_true, y_pred): """Implements Smooth-L1 loss. y_true and y_pred are typically: [N, 4], but could be any shape. """ diff = K.abs(y_true - y_pred) less_than_one = K.cast(K.less(diff, 1.0), "float32") loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5) return loss def rpn_class_loss_graph(rpn_match, rpn_class_logits): """RPN anchor classifier loss. rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, -1=negative, 0=neutral anchor. rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG. """ # Squeeze last dim to simplify rpn_match = tf.squeeze(rpn_match, -1) # Get anchor classes. Convert the -1/+1 match to 0/1 values. anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32) # Positive and Negative anchors contribute to the loss, # but neutral anchors (match value = 0) don't. indices = tf.where(K.not_equal(rpn_match, 0)) # Pick rows that contribute to the loss and filter out the rest. rpn_class_logits = tf.gather_nd(rpn_class_logits, indices) anchor_class = tf.gather_nd(anchor_class, indices) # Cross entropy loss loss = K.sparse_categorical_crossentropy(target=anchor_class, output=rpn_class_logits, from_logits=True) loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) return loss def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox): """Return the RPN bounding box loss graph. config: the model config object. target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))]. Uses 0 padding to fill in unsed bbox deltas. rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, -1=negative, 0=neutral anchor. rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))] """ # Positive anchors contribute to the loss, but negative and # neutral anchors (match value of 0 or -1) don't. rpn_match = K.squeeze(rpn_match, -1) indices = tf.where(K.equal(rpn_match, 1)) # Pick bbox deltas that contribute to the loss rpn_bbox = tf.gather_nd(rpn_bbox, indices) # Trim target bounding box deltas to the same length as rpn_bbox. batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1) target_bbox = batch_pack_graph(target_bbox, batch_counts, config.IMAGES_PER_GPU) loss = smooth_l1_loss(target_bbox, rpn_bbox) loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) return loss def mrcnn_class_loss_graph(target_class_ids, pred_class_logits, active_class_ids): """Loss for the classifier head of Mask RCNN. target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero padding to fill in the array. pred_class_logits: [batch, num_rois, num_classes] active_class_ids: [batch, num_classes]. Has a value of 1 for classes that are in the dataset of the image, and 0 for classes that are not in the dataset. """ # During model building, Keras calls this function with # target_class_ids of type float32. Unclear why. Cast it # to int to get around it. target_class_ids = tf.cast(target_class_ids, 'int64') # Find predictions of classes that are not in the dataset. pred_class_ids = tf.argmax(pred_class_logits, axis=2) # TODO: Update this line to work with batch > 1. Right now it assumes all # images in a batch have the same active_class_ids pred_active = tf.gather(active_class_ids[0], pred_class_ids) # Loss loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=target_class_ids, logits=pred_class_logits) # Erase losses of predictions of classes that are not in the active # classes of the image. loss = loss * pred_active # Computer loss mean. Use only predictions that contribute # to the loss to get a correct mean. loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active) return loss def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox): """Loss for Mask R-CNN bounding box refinement. target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))] target_class_ids: [batch, num_rois]. Integer class IDs. pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))] """ # Reshape to merge batch and roi dimensions for simplicity. target_class_ids = K.reshape(target_class_ids, (-1,)) target_bbox = K.reshape(target_bbox, (-1, 4)) pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4)) # Only positive ROIs contribute to the loss. And only # the right class_id of each ROI. Get their indices. positive_roi_ix = tf.where(target_class_ids > 0)[:, 0] positive_roi_class_ids = tf.cast( tf.gather(target_class_ids, positive_roi_ix), tf.int64) indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1) # Gather the deltas (predicted and true) that contribute to loss target_bbox = tf.gather(target_bbox, positive_roi_ix) pred_bbox = tf.gather_nd(pred_bbox, indices) # Smooth-L1 Loss loss = K.switch(tf.size(target_bbox) > 0, smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox), tf.constant(0.0)) loss = K.mean(loss) return loss def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks): """Mask binary cross-entropy loss for the masks head. target_masks: [batch, num_rois, height, width]. A float32 tensor of values 0 or 1. Uses zero padding to fill array. target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded. pred_masks: [batch, proposals, height, width, num_classes] float32 tensor with values from 0 to 1. """ # Reshape for simplicity. Merge first two dimensions into one. target_class_ids = K.reshape(target_class_ids, (-1,)) mask_shape = tf.shape(target_masks) target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3])) pred_shape = tf.shape(pred_masks) pred_masks = K.reshape(pred_masks, (-1, pred_shape[2], pred_shape[3], pred_shape[4])) # Permute predicted masks to [N, num_classes, height, width] pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2]) # Only positive ROIs contribute to the loss. And only # the class specific mask of each ROI. positive_ix = tf.where(target_class_ids > 0)[:, 0] positive_class_ids = tf.cast( tf.gather(target_class_ids, positive_ix), tf.int64) indices = tf.stack([positive_ix, positive_class_ids], axis=1) # Gather the masks (predicted and true) that contribute to loss y_true = tf.gather(target_masks, positive_ix) y_pred = tf.gather_nd(pred_masks, indices) # Compute binary cross entropy. If no positive ROIs, then return 0. # shape: [batch, roi, num_classes] loss = K.switch(tf.size(y_true) > 0, K.binary_crossentropy(target=y_true, output=y_pred), tf.constant(0.0)) loss = K.mean(loss) return loss ############################################################ # Data Generator ############################################################ def load_image_gt(dataset, config, image_id, augment=False, augmentation=None, use_mini_mask=False): """Load and return ground truth data for an image (image, mask, bounding boxes). augment: (deprecated. Use augmentation instead). If true, apply random image augmentation. Currently, only horizontal flipping is offered. augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation. For example, passing imgaug.augmenters.Fliplr(0.5) flips images right/left 50% of the time. use_mini_mask: If False, returns full-size masks that are the same height and width as the original image. These can be big, for example 1024x1024x100 (for 100 instances). Mini masks are smaller, typically, 224x224 and are generated by extracting the bounding box of the object and resizing it to MINI_MASK_SHAPE. Returns: image: [height, width, 3] shape: the original shape of the image before resizing and cropping. class_ids: [instance_count] Integer class IDs bbox: [instance_count, (y1, x1, y2, x2)] mask: [height, width, instance_count]. The height and width are those of the image unless use_mini_mask is True, in which case they are defined in MINI_MASK_SHAPE. """ # Load image and mask image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) original_shape = image.shape image, window, scale, padding, crop = utils.resize_image( image, min_dim=config.IMAGE_MIN_DIM, min_scale=config.IMAGE_MIN_SCALE, max_dim=config.IMAGE_MAX_DIM, mode=config.IMAGE_RESIZE_MODE) mask = utils.resize_mask(mask, scale, padding, crop) # Random horizontal flips. # TODO: will be removed in a future update in favor of augmentation if augment: logging.warning("'augment' is deprecated. Use 'augmentation' instead.") if random.randint(0, 1): image = np.fliplr(image) mask = np.fliplr(mask) # Augmentation # This requires the imgaug lib (https://github.com/aleju/imgaug) if augmentation: import imgaug # Augmenters that are safe to apply to masks # Some, such as Affine, have settings that make them unsafe, so always # test your augmentation on masks MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes", "Fliplr", "Flipud", "CropAndPad", "Affine", "PiecewiseAffine"] def hook(images, augmenter, parents, default): """Determines which augmenters to apply to masks.""" return augmenter.__class__.__name__ in MASK_AUGMENTERS # Store shapes before augmentation to compare image_shape = image.shape mask_shape = mask.shape # Make augmenters deterministic to apply similarly to images and masks det = augmentation.to_deterministic() image = det.augment_image(image) # Change mask to np.uint8 because imgaug doesn't support np.bool mask = det.augment_image(mask.astype(np.uint8), hooks=imgaug.HooksImages(activator=hook)) # Verify that shapes didn't change assert image.shape == image_shape, "Augmentation shouldn't change image size" assert mask.shape == mask_shape, "Augmentation shouldn't change mask size" # Change mask back to bool mask = mask.astype(np.bool) # Note that some boxes might be all zeros if the corresponding mask got cropped out. # and here is to filter them out _idx = np.sum(mask, axis=(0, 1)) > 0 mask = mask[:, :, _idx] class_ids = class_ids[_idx] # Bounding boxes. Note that some boxes might be all zeros # if the corresponding mask got cropped out. # bbox: [num_instances, (y1, x1, y2, x2)] bbox = utils.extract_bboxes(mask) # Active classes # Different datasets have different classes, so track the # classes supported in the dataset of this image. active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32) source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]] active_class_ids[source_class_ids] = 1 # Resize masks to smaller size to reduce memory usage if use_mini_mask: mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE) # Image meta data image_meta = compose_image_meta(image_id, original_shape, image.shape, window, scale, active_class_ids) return image, image_meta, class_ids, bbox, mask def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config): """Generate targets for training Stage 2 classifier and mask heads. This is not used in normal training. It's useful for debugging or to train the Mask RCNN heads without using the RPN head. Inputs: rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes. gt_class_ids: [instance count] Integer class IDs gt_boxes: [instance count, (y1, x1, y2, x2)] gt_masks: [height, width, instance count] Ground truth masks. Can be full size or mini-masks. Returns: rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific bbox refinements. masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped to bbox boundaries and resized to neural network output size. """ assert rpn_rois.shape[0] > 0 assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format( gt_class_ids.dtype) assert gt_boxes.dtype == np.int32, "Expected int but got {}".format( gt_boxes.dtype) assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format( gt_masks.dtype) # It's common to add GT Boxes to ROIs but we don't do that here because # according to XinLei Chen's paper, it doesn't help. # Trim empty padding in gt_boxes and gt_masks parts instance_ids = np.where(gt_class_ids > 0)[0] assert instance_ids.shape[0] > 0, "Image must contain instances." gt_class_ids = gt_class_ids[instance_ids] gt_boxes = gt_boxes[instance_ids] gt_masks = gt_masks[:, :, instance_ids] # Compute areas of ROIs and ground truth boxes. rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \ (rpn_rois[:, 3] - rpn_rois[:, 1]) gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \ (gt_boxes[:, 3] - gt_boxes[:, 1]) # Compute overlaps [rpn_rois, gt_boxes] overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0])) for i in range(overlaps.shape[1]): gt = gt_boxes[i] overlaps[:, i] = utils.compute_iou( gt, rpn_rois, gt_box_area[i], rpn_roi_area) # Assign ROIs to GT boxes rpn_roi_iou_argmax = np.argmax(overlaps, axis=1) rpn_roi_iou_max = overlaps[np.arange( overlaps.shape[0]), rpn_roi_iou_argmax] # GT box assigned to each ROI rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax] rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax] # Positive ROIs are those with >= 0.5 IoU with a GT box. fg_ids = np.where(rpn_roi_iou_max > 0.5)[0] # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining) # TODO: To hard example mine or not to hard example mine, that's the question # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0] bg_ids = np.where(rpn_roi_iou_max < 0.5)[0] # Subsample ROIs. Aim for 33% foreground. # FG fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO) if fg_ids.shape[0] > fg_roi_count: keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False) else: keep_fg_ids = fg_ids # BG remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0] if bg_ids.shape[0] > remaining: keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False) else: keep_bg_ids = bg_ids # Combine indices of ROIs to keep keep = np.concatenate([keep_fg_ids, keep_bg_ids]) # Need more? remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0] if remaining > 0: # Looks like we don't have enough samples to maintain the desired # balance. Reduce requirements and fill in the rest. This is # likely different from the Mask RCNN paper. # There is a small chance we have neither fg nor bg samples. if keep.shape[0] == 0: # Pick bg regions with easier IoU threshold bg_ids = np.where(rpn_roi_iou_max < 0.5)[0] assert bg_ids.shape[0] >= remaining keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False) assert keep_bg_ids.shape[0] == remaining keep = np.concatenate([keep, keep_bg_ids]) else: # Fill the rest with repeated bg rois. keep_extra_ids = np.random.choice( keep_bg_ids, remaining, replace=True) keep = np.concatenate([keep, keep_extra_ids]) assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \ "keep doesn't match ROI batch size {}, {}".format( keep.shape[0], config.TRAIN_ROIS_PER_IMAGE) # Reset the gt boxes assigned to BG ROIs. rpn_roi_gt_boxes[keep_bg_ids, :] = 0 rpn_roi_gt_class_ids[keep_bg_ids] = 0 # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement. rois = rpn_rois[keep] roi_gt_boxes = rpn_roi_gt_boxes[keep] roi_gt_class_ids = rpn_roi_gt_class_ids[keep] roi_gt_assignment = rpn_roi_iou_argmax[keep] # Class-aware bbox deltas. [y, x, log(h), log(w)] bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.NUM_CLASSES, 4), dtype=np.float32) pos_ids = np.where(roi_gt_class_ids > 0)[0] bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement( rois[pos_ids], roi_gt_boxes[pos_ids, :4]) # Normalize bbox refinements bboxes /= config.BBOX_STD_DEV # Generate class-specific target masks masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES), dtype=np.float32) for i in pos_ids: class_id = roi_gt_class_ids[i] assert class_id > 0, "class id must be greater than 0" gt_id = roi_gt_assignment[i] class_mask = gt_masks[:, :, gt_id] if config.USE_MINI_MASK: # Create a mask placeholder, the size of the image placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool) # GT box gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id] gt_w = gt_x2 - gt_x1 gt_h = gt_y2 - gt_y1 # Resize mini mask to size of GT box placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \ np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool) # Place the mini batch in the placeholder class_mask = placeholder # Pick part of the mask and resize it y1, x1, y2, x2 = rois[i].astype(np.int32) m = class_mask[y1:y2, x1:x2] mask = utils.resize(m, config.MASK_SHAPE) masks[i, :, :, class_id] = mask return rois, roi_gt_class_ids, bboxes, masks def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config): """Given the anchors and GT boxes, compute overlaps and identify positive anchors and deltas to refine them to match their corresponding GT boxes. anchors: [num_anchors, (y1, x1, y2, x2)] gt_class_ids: [num_gt_boxes] Integer class IDs. gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)] Returns: rpn_match: [N] (int32) matches between anchors and GT boxes. 1 = positive anchor, -1 = negative anchor, 0 = neutral rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas. """ # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32) # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))] rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4)) # Handle COCO crowds # A crowd box in COCO is a bounding box around several instances. Exclude # them from training. A crowd box is given a negative class ID. crowd_ix = np.where(gt_class_ids < 0)[0] if crowd_ix.shape[0] > 0: # Filter out crowds from ground truth class IDs and boxes non_crowd_ix = np.where(gt_class_ids > 0)[0] crowd_boxes = gt_boxes[crowd_ix] gt_class_ids = gt_class_ids[non_crowd_ix] gt_boxes = gt_boxes[non_crowd_ix] # Compute overlaps with crowd boxes [anchors, crowds] crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes) crowd_iou_max = np.amax(crowd_overlaps, axis=1) no_crowd_bool = (crowd_iou_max < 0.001) else: # All anchors don't intersect a crowd no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool) # Compute overlaps [num_anchors, num_gt_boxes] overlaps = utils.compute_overlaps(anchors, gt_boxes) # Match anchors to GT Boxes # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive. # If an anchor overlaps a GT box with IoU < 0.3 then it's negative. # Neutral anchors are those that don't match the conditions above, # and they don't influence the loss function. # However, don't keep any GT box unmatched (rare, but happens). Instead, # match it to the closest anchor (even if its max IoU is < 0.3). # # 1. Set negative anchors first. They get overwritten below if a GT box is # matched to them. Skip boxes in crowd areas. anchor_iou_argmax = np.argmax(overlaps, axis=1) anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax] rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1 # 2. Set an anchor for each GT box (regardless of IoU value). # If multiple anchors have the same IoU match all of them gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0] rpn_match[gt_iou_argmax] = 1 # 3. Set anchors with high overlap as positive. rpn_match[anchor_iou_max >= 0.7] = 1 # Subsample to balance positive and negative anchors # Don't let positives be more than half the anchors ids = np.where(rpn_match == 1)[0] extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2) if extra > 0: # Reset the extra ones to neutral ids = np.random.choice(ids, extra, replace=False) rpn_match[ids] = 0 # Same for negative proposals ids = np.where(rpn_match == -1)[0] extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE - np.sum(rpn_match == 1)) if extra > 0: # Rest the extra ones to neutral ids = np.random.choice(ids, extra, replace=False) rpn_match[ids] = 0 # For positive anchors, compute shift and scale needed to transform them # to match the corresponding GT boxes. ids = np.where(rpn_match == 1)[0] ix = 0 # index into rpn_bbox # TODO: use box_refinement() rather than duplicating the code here for i, a in zip(ids, anchors[ids]): # Closest gt box (it might have IoU < 0.7) gt = gt_boxes[anchor_iou_argmax[i]] # Convert coordinates to center plus width/height. # GT Box gt_h = gt[2] - gt[0] gt_w = gt[3] - gt[1] gt_center_y = gt[0] + 0.5 * gt_h gt_center_x = gt[1] + 0.5 * gt_w # Anchor a_h = a[2] - a[0] a_w = a[3] - a[1] a_center_y = a[0] + 0.5 * a_h a_center_x = a[1] + 0.5 * a_w # Compute the bbox refinement that the RPN should predict. rpn_bbox[ix] = [ (gt_center_y - a_center_y) / a_h, (gt_center_x - a_center_x) / a_w, np.log(gt_h / a_h), np.log(gt_w / a_w), ] # Normalize rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV ix += 1 return rpn_match, rpn_bbox def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes): """Generates ROI proposals similar to what a region proposal network would generate. image_shape: [Height, Width, Depth] count: Number of ROIs to generate gt_class_ids: [N] Integer ground truth class IDs gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels. Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels. """ # placeholder rois = np.zeros((count, 4), dtype=np.int32) # Generate random ROIs around GT boxes (90% of count) rois_per_box = int(0.9 * count / gt_boxes.shape[0]) for i in range(gt_boxes.shape[0]): gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i] h = gt_y2 - gt_y1 w = gt_x2 - gt_x1 # random boundaries r_y1 = max(gt_y1 - h, 0) r_y2 = min(gt_y2 + h, image_shape[0]) r_x1 = max(gt_x1 - w, 0) r_x2 = min(gt_x2 + w, image_shape[1]) # To avoid generating boxes with zero area, we generate double what # we need and filter out the extra. If we get fewer valid boxes # than we need, we loop and try again. while True: y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2)) x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2)) # Filter out zero area boxes threshold = 1 y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:rois_per_box] x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:rois_per_box] if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box: break # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape # into x1, y1, x2, y2 order x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1) y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1) box_rois = np.hstack([y1, x1, y2, x2]) rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois # Generate random ROIs anywhere in the image (10% of count) remaining_count = count - (rois_per_box * gt_boxes.shape[0]) # To avoid generating boxes with zero area, we generate double what # we need and filter out the extra. If we get fewer valid boxes # than we need, we loop and try again. while True: y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2)) x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2)) # Filter out zero area boxes threshold = 1 y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:remaining_count] x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:remaining_count] if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count: break # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape # into x1, y1, x2, y2 order x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1) y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1) global_rois = np.hstack([y1, x1, y2, x2]) rois[-remaining_count:] = global_rois return rois def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None, random_rois=0, batch_size=1, detection_targets=False, no_augmentation_sources=None): """A generator that returns images and corresponding target class ids, bounding box deltas, and masks. dataset: The Dataset object to pick data from config: The model config object shuffle: If True, shuffles the samples before every epoch augment: (deprecated. Use augmentation instead). If true, apply random image augmentation. Currently, only horizontal flipping is offered. augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation. For example, passing imgaug.augmenters.Fliplr(0.5) flips images right/left 50% of the time. random_rois: If > 0 then generate proposals to be used to train the network classifier and mask heads. Useful if training the Mask RCNN part without the RPN. batch_size: How many images to return in each call detection_targets: If True, generate detection targets (class IDs, bbox deltas, and masks). Typically for debugging or visualizations because in trainig detection targets are generated by DetectionTargetLayer. no_augmentation_sources: Optional. List of sources to exclude for augmentation. A source is string that identifies a dataset and is defined in the Dataset class. Returns a Python generator. Upon calling next() on it, the generator returns two lists, inputs and outputs. The contents of the lists differs depending on the received arguments: inputs list: - images: [batch, H, W, C] - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral) - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas. - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width are those of the image unless use_mini_mask is True, in which case they are defined in MINI_MASK_SHAPE. outputs list: Usually empty in regular training. But if detection_targets is True then the outputs list contains target class_ids, bbox deltas, and masks. """ b = 0 # batch item index image_index = -1 image_ids = np.copy(dataset.image_ids) error_count = 0 no_augmentation_sources = no_augmentation_sources or [] # Anchors # [anchor_count, (y1, x1, y2, x2)] backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE) anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES, config.RPN_ANCHOR_RATIOS, backbone_shapes, config.BACKBONE_STRIDES, config.RPN_ANCHOR_STRIDE) # Keras requires a generator to run indefinitely. while True: try: # Increment index to pick next image. Shuffle if at the start of an epoch. image_index = (image_index + 1) % len(image_ids) if shuffle and image_index == 0: np.random.shuffle(image_ids) # Get GT bounding boxes and masks for image. image_id = image_ids[image_index] # If the image source is not to be augmented pass None as augmentation if dataset.image_info[image_id]['source'] in no_augmentation_sources: image, image_meta, gt_class_ids, gt_boxes, gt_masks = \ load_image_gt(dataset, config, image_id, augment=augment, augmentation=None, use_mini_mask=config.USE_MINI_MASK) else: image, image_meta, gt_class_ids, gt_boxes, gt_masks = \ load_image_gt(dataset, config, image_id, augment=augment, augmentation=augmentation, use_mini_mask=config.USE_MINI_MASK) # Skip images that have no instances. This can happen in cases # where we train on a subset of classes and the image doesn't # have any of the classes we care about. if not np.any(gt_class_ids > 0): continue # RPN Targets rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors, gt_class_ids, gt_boxes, config) # Mask R-CNN Targets if random_rois: rpn_rois = generate_random_rois( image.shape, random_rois, gt_class_ids, gt_boxes) if detection_targets: rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\ build_detection_targets( rpn_rois, gt_class_ids, gt_boxes, gt_masks, config) # Init batch arrays if b == 0: batch_image_meta = np.zeros( (batch_size,) + image_meta.shape, dtype=image_meta.dtype) batch_rpn_match = np.zeros( [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype) batch_rpn_bbox = np.zeros( [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype) batch_images = np.zeros( (batch_size,) + image.shape, dtype=np.float32) batch_gt_class_ids = np.zeros( (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32) batch_gt_boxes = np.zeros( (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32) batch_gt_masks = np.zeros( (batch_size, gt_masks.shape[0], gt_masks.shape[1], config.MAX_GT_INSTANCES), dtype=gt_masks.dtype) if random_rois: batch_rpn_rois = np.zeros( (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype) if detection_targets: batch_rois = np.zeros( (batch_size,) + rois.shape, dtype=rois.dtype) batch_mrcnn_class_ids = np.zeros( (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype) batch_mrcnn_bbox = np.zeros( (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype) batch_mrcnn_mask = np.zeros( (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype) # If more instances than fits in the array, sub-sample from them. if gt_boxes.shape[0] > config.MAX_GT_INSTANCES: ids = np.random.choice( np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False) gt_class_ids = gt_class_ids[ids] gt_boxes = gt_boxes[ids] gt_masks = gt_masks[:, :, ids] # Add to batch batch_image_meta[b] = image_meta batch_rpn_match[b] = rpn_match[:, np.newaxis] batch_rpn_bbox[b] = rpn_bbox batch_images[b] = mold_image(image.astype(np.float32), config) batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks if random_rois: batch_rpn_rois[b] = rpn_rois if detection_targets: batch_rois[b] = rois batch_mrcnn_class_ids[b] = mrcnn_class_ids batch_mrcnn_bbox[b] = mrcnn_bbox batch_mrcnn_mask[b] = mrcnn_mask b += 1 # Batch full? if b >= batch_size: inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox, batch_gt_class_ids, batch_gt_boxes, batch_gt_masks] outputs = [] if random_rois: inputs.extend([batch_rpn_rois]) if detection_targets: inputs.extend([batch_rois]) # Keras requires that output and targets have the same number of dimensions batch_mrcnn_class_ids = np.expand_dims( batch_mrcnn_class_ids, -1) outputs.extend( [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask]) yield inputs, outputs # start a new batch b = 0 except (GeneratorExit, KeyboardInterrupt): raise except: # Log it and skip the image logging.exception("Error processing image {}".format( dataset.image_info[image_id])) error_count += 1 if error_count > 5: raise ############################################################ # MaskRCNN Class ############################################################ class MaskRCNN(): """Encapsulates the Mask RCNN model functionality. The actual Keras model is in the keras_model property. """ def __init__(self, mode, config, model_dir): """ mode: Either "training" or "inference" config: A Sub-class of the Config class model_dir: Directory to save training logs and trained weights """ assert mode in ['training', 'inference'] self.mode = mode self.config = config self.model_dir = model_dir self.set_log_dir() self.keras_model = self.build(mode=mode, config=config) def build(self, mode, config): """Build Mask R-CNN architecture. input_shape: The shape of the input image. mode: Either "training" or "inference". The inputs and outputs of the model differ accordingly. """ assert mode in ['training', 'inference'] # Image size must be dividable by 2 multiple times h, w = config.IMAGE_SHAPE[:2] if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6): raise Exception("Image size must be dividable by 2 at least 6 times " "to avoid fractions when downscaling and upscaling." "For example, use 256, 320, 384, 448, 512, ... etc. ") # Inputs input_image = KL.Input( shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image") input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE], name="input_image_meta") if mode == "training": # RPN GT input_rpn_match = KL.Input( shape=[None, 1], name="input_rpn_match", dtype=tf.int32) input_rpn_bbox = KL.Input( shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32) # Detection GT (class IDs, bounding boxes, and masks) # 1. GT Class IDs (zero padded) input_gt_class_ids = KL.Input( shape=[None], name="input_gt_class_ids", dtype=tf.int32) # 2. GT Boxes in pixels (zero padded) # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates input_gt_boxes = KL.Input( shape=[None, 4], name="input_gt_boxes", dtype=tf.float32) # Normalize coordinates gt_boxes = KL.Lambda(lambda x: norm_boxes_graph( x, K.shape(input_image)[1:3]))(input_gt_boxes) # 3. GT Masks (zero padded) # [batch, height, width, MAX_GT_INSTANCES] if config.USE_MINI_MASK: input_gt_masks = KL.Input( shape=[config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1], None], name="input_gt_masks", dtype=bool) else: input_gt_masks = KL.Input( shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None], name="input_gt_masks", dtype=bool) elif mode == "inference": # Anchors in normalized coordinates input_anchors = KL.Input(shape=[None, 4], name="input_anchors") # Build the shared convolutional layers. # Bottom-up Layers # Returns a list of the last layers of each stage, 5 in total. # Don't create the thead (stage 5), so we pick the 4th item in the list. if callable(config.BACKBONE): _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True, train_bn=config.TRAIN_BN) else: _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE, stage5=True, train_bn=config.TRAIN_BN) # Top-down Layers # TODO: add assert to varify feature map sizes match what's in config P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5) P4 = KL.Add(name="fpn_p4add")([ KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5), KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)]) P3 = KL.Add(name="fpn_p3add")([ KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4), KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)]) P2 = KL.Add(name="fpn_p2add")([ KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3), KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)]) # Attach 3x3 conv to all P layers to get the final feature maps. P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2) P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3) P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4) P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5) # P6 is used for the 5th anchor scale in RPN. Generated by # subsampling from P5 with stride of 2. P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5) # Note that P6 is used in RPN, but not in the classifier heads. rpn_feature_maps = [P2, P3, P4, P5, P6] mrcnn_feature_maps = [P2, P3, P4, P5] # Anchors if mode == "training": anchors = self.get_anchors(config.IMAGE_SHAPE) # Duplicate across the batch dimension because Keras requires it # TODO: can this be optimized to avoid duplicating the anchors? anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape) # A hack to get around Keras's bad support for constants anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image) else: anchors = input_anchors # RPN Model rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE, len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE) # Loop through pyramid layers layer_outputs = [] # list of lists for p in rpn_feature_maps: layer_outputs.append(rpn([p])) # Concatenate layer outputs # Convert from list of lists of level outputs to list of lists # of outputs across levels. # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]] output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"] outputs = list(zip(*layer_outputs)) outputs = [KL.Concatenate(axis=1, name=n)(list(o)) for o, n in zip(outputs, output_names)] rpn_class_logits, rpn_class, rpn_bbox = outputs # Generate proposals # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates # and zero padded. proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\ else config.POST_NMS_ROIS_INFERENCE rpn_rois = ProposalLayer( proposal_count=proposal_count, nms_threshold=config.RPN_NMS_THRESHOLD, name="ROI", config=config)([rpn_class, rpn_bbox, anchors]) if mode == "training": # Class ID mask to mark class IDs supported by the dataset the image # came from. active_class_ids = KL.Lambda( lambda x: parse_image_meta_graph(x)["active_class_ids"] )(input_image_meta) if not config.USE_RPN_ROIS: # Ignore predicted ROIs and use ROIs provided as an input. input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4], name="input_roi", dtype=np.int32) # Normalize coordinates target_rois = KL.Lambda(lambda x: norm_boxes_graph( x, K.shape(input_image)[1:3]))(input_rois) else: target_rois = rpn_rois # Generate detection targets # Subsamples proposals and generates target outputs for training # Note that proposal class IDs, gt_boxes, and gt_masks are zero # padded. Equally, returned rois and targets are zero padded. rois, target_class_ids, target_bbox, target_mask =\ DetectionTargetLayer(config, name="proposal_targets")([ target_rois, input_gt_class_ids, gt_boxes, input_gt_masks]) # Network Heads # TODO: verify that this handles zero padded ROIs mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\ fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta, config.POOL_SIZE, config.NUM_CLASSES, train_bn=config.TRAIN_BN, fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE) mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps, input_image_meta, config.MASK_POOL_SIZE, config.NUM_CLASSES, train_bn=config.TRAIN_BN) # TODO: clean up (use tf.identify if necessary) output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois) # Losses rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")( [input_rpn_match, rpn_class_logits]) rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")( [input_rpn_bbox, input_rpn_match, rpn_bbox]) class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")( [target_class_ids, mrcnn_class_logits, active_class_ids]) bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")( [target_bbox, target_class_ids, mrcnn_bbox]) mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")( [target_mask, target_class_ids, mrcnn_mask]) # Model inputs = [input_image, input_image_meta, input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks] if not config.USE_RPN_ROIS: inputs.append(input_rois) outputs = [rpn_class_logits, rpn_class, rpn_bbox, mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask, rpn_rois, output_rois, rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss] model = KM.Model(inputs, outputs, name='mask_rcnn') else: # Network Heads # Proposal classifier and BBox regressor heads mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\ fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta, config.POOL_SIZE, config.NUM_CLASSES, train_bn=config.TRAIN_BN, fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE) # Detections # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in # normalized coordinates detections = DetectionLayer(config, name="mrcnn_detection")( [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta]) # Create masks for detections detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections) mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps, input_image_meta, config.MASK_POOL_SIZE, config.NUM_CLASSES, train_bn=config.TRAIN_BN) model = KM.Model([input_image, input_image_meta, input_anchors], [detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, rpn_rois, rpn_class, rpn_bbox], name='mask_rcnn') # Add multi-GPU support. if config.GPU_COUNT > 1: from mrcnn.parallel_model import ParallelModel model = ParallelModel(model, config.GPU_COUNT) return model def find_last(self): """Finds the last checkpoint file of the last trained model in the model directory. Returns: The path of the last checkpoint file """ # Get directory names. Each directory corresponds to a model dir_names = next(os.walk(self.model_dir))[1] key = self.config.NAME.lower() dir_names = filter(lambda f: f.startswith(key), dir_names) dir_names = sorted(dir_names) if not dir_names: import errno raise FileNotFoundError( errno.ENOENT, "Could not find model directory under {}".format(self.model_dir)) # Pick last directory dir_name = os.path.join(self.model_dir, dir_names[-1]) # Find the last checkpoint checkpoints = next(os.walk(dir_name))[2] checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints) checkpoints = sorted(checkpoints) if not checkpoints: import errno raise FileNotFoundError( errno.ENOENT, "Could not find weight files in {}".format(dir_name)) checkpoint = os.path.join(dir_name, checkpoints[-1]) return checkpoint def load_weights(self, filepath, by_name=False, exclude=None): """Modified version of the corresponding Keras function with the addition of multi-GPU support and the ability to exclude some layers from loading. exclude: list of layer names to exclude """ import h5py # Conditional import to support versions of Keras before 2.2 # TODO: remove in about 6 months (end of 2018) try: from keras.engine import saving except ImportError: # Keras before 2.2 used the 'topology' namespace. from keras.engine import topology as saving if exclude: by_name = True if h5py is None: raise ImportError('`load_weights` requires h5py.') f = h5py.File(filepath, mode='r') if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. keras_model = self.keras_model layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\ else keras_model.layers # Exclude some layers if exclude: layers = filter(lambda l: l.name not in exclude, layers) if by_name: saving.load_weights_from_hdf5_group_by_name(f, layers) else: saving.load_weights_from_hdf5_group(f, layers) if hasattr(f, 'close'): f.close() # Update the log directory self.set_log_dir(filepath) def get_imagenet_weights(self): """Downloads ImageNet trained weights from Keras. Returns path to weights file. """ from keras.utils.data_utils import get_file TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\ 'releases/download/v0.2/'\ 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='a268eb855778b3df3c7506639542a6af') return weights_path def compile(self, learning_rate, momentum): """Gets the model ready for training. Adds losses, regularization, and metrics. Then calls the Keras compile() function. """ # Optimizer object optimizer = keras.optimizers.SGD( lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM) # Add Losses # First, clear previously set losses to avoid duplication self.keras_model._losses = [] self.keras_model._per_input_losses = {} loss_names = [ "rpn_class_loss", "rpn_bbox_loss", "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"] for name in loss_names: layer = self.keras_model.get_layer(name) if layer.output in self.keras_model.losses: continue loss = ( tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)) self.keras_model.add_loss(loss) # Add L2 Regularization # Skip gamma and beta weights of batch normalization layers. reg_losses = [ keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32) for w in self.keras_model.trainable_weights if 'gamma' not in w.name and 'beta' not in w.name] self.keras_model.add_loss(tf.add_n(reg_losses)) # Compile self.keras_model.compile( optimizer=optimizer, loss=[None] * len(self.keras_model.outputs)) # Add metrics for losses for name in loss_names: if name in self.keras_model.metrics_names: continue layer = self.keras_model.get_layer(name) self.keras_model.metrics_names.append(name) loss = ( tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)) self.keras_model.metrics_tensors.append(loss) def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1): """Sets model layers as trainable if their names match the given regular expression. """ # Print message on the first call (but not on recursive calls) if verbose > 0 and keras_model is None: log("Selecting layers to train") keras_model = keras_model or self.keras_model # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\ else keras_model.layers for layer in layers: # Is the layer a model? if layer.__class__.__name__ == 'Model': print("In model: ", layer.name) self.set_trainable( layer_regex, keras_model=layer, indent=indent + 4) continue if not layer.weights: continue # Is it trainable? trainable = bool(re.fullmatch(layer_regex, layer.name)) # Update layer. If layer is a container, update inner layer. if layer.__class__.__name__ == 'TimeDistributed': layer.layer.trainable = trainable else: layer.trainable = trainable # Print trainable layer names if trainable and verbose > 0: log("{}{:20} ({})".format(" " * indent, layer.name, layer.__class__.__name__)) def set_log_dir(self, model_path=None): """Sets the model log directory and epoch counter. model_path: If None, or a format different from what this code uses then set a new log directory and start epochs from 0. Otherwise, extract the log directory and the epoch counter from the file name. """ # Set date and epoch counter as if starting a new model self.epoch = 0 now = datetime.datetime.now() # If we have a model path with date and epochs use them if model_path: # Continue from we left of. Get epoch and date from the file name # A sample model path might look like: # \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows) # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux) regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5" m = re.match(regex, model_path) if m: now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5))) # Epoch number in file is 1-based, and in Keras code it's 0-based. # So, adjust for that then increment by one to start from the next epoch self.epoch = int(m.group(6)) - 1 + 1 print('Re-starting from epoch %d' % self.epoch) # Directory for training logs self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format( self.config.NAME.lower(), now)) # Path to save after each epoch. Include placeholders that get filled by Keras. self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format( self.config.NAME.lower())) self.checkpoint_path = self.checkpoint_path.replace( "*epoch*", "{epoch:04d}") def train(self, train_dataset, val_dataset, learning_rate, epochs, layers, augmentation=None, custom_callbacks=None, no_augmentation_sources=None): """Train the model. train_dataset, val_dataset: Training and validation Dataset objects. learning_rate: The learning rate to train with epochs: Number of training epochs. Note that previous training epochs are considered to be done alreay, so this actually determines the epochs to train in total rather than in this particaular call. layers: Allows selecting wich layers to train. It can be: - A regular expression to match layer names to train - One of these predefined values: heads: The RPN, classifier and mask heads of the network all: All the layers 3+: Train Resnet stage 3 and up 4+: Train Resnet stage 4 and up 5+: Train Resnet stage 5 and up augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation. For example, passing imgaug.augmenters.Fliplr(0.5) flips images right/left 50% of the time. You can pass complex augmentations as well. This augmentation applies 50% of the time, and when it does it flips images right/left half the time and adds a Gaussian blur with a random sigma in range 0 to 5. augmentation = imgaug.augmenters.Sometimes(0.5, [ imgaug.augmenters.Fliplr(0.5), imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0)) ]) custom_callbacks: Optional. Add custom callbacks to be called with the keras fit_generator method. Must be list of type keras.callbacks. no_augmentation_sources: Optional. List of sources to exclude for augmentation. A source is string that identifies a dataset and is defined in the Dataset class. """ assert self.mode == "training", "Create model in training mode." # Pre-defined layer regular expressions layer_regex = { # all layers but the backbone "heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", # From a specific Resnet stage and up "3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", "4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", "5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", "1channel": r"(res1.*)|(bn1.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", "rpn":r"(rpn\_.*)|(fpn\_.*)", "mrcnn":r"(mrcnn\_.*)", # All layers "all": ".*", } if layers in layer_regex.keys(): layers = layer_regex[layers] # Data generators train_generator = data_generator(train_dataset, self.config, shuffle=True, augmentation=augmentation, batch_size=self.config.BATCH_SIZE, no_augmentation_sources=no_augmentation_sources) val_generator = data_generator(val_dataset, self.config, shuffle=True, batch_size=self.config.BATCH_SIZE) # Create log_dir if it does not exist if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) # Callbacks callbacks = [ keras.callbacks.TensorBoard(log_dir=self.log_dir, histogram_freq=0, write_graph=True, write_images=False), keras.callbacks.ModelCheckpoint(self.checkpoint_path, verbose=0, save_weights_only=True), ] # Add custom callbacks to the list if custom_callbacks: callbacks += custom_callbacks # Train log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate)) log("Checkpoint Path: {}".format(self.checkpoint_path)) self.set_trainable(layers) self.compile(learning_rate, self.config.LEARNING_MOMENTUM) # Work-around for Windows: Keras fails on Windows when using # multiprocessing workers. See discussion here: # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009 if os.name is 'nt': workers = 0 else: workers = multiprocessing.cpu_count() self.keras_model.fit_generator( train_generator, initial_epoch=self.epoch, epochs=epochs, steps_per_epoch=self.config.STEPS_PER_EPOCH, callbacks=callbacks, validation_data=val_generator, validation_steps=self.config.VALIDATION_STEPS, max_queue_size=100, workers=workers, use_multiprocessing=True, ) self.epoch = max(self.epoch, epochs) def mold_inputs(self, images): """Takes a list of images and modifies them to the format expected as an input to the neural network. images: List of image matrices [height,width,depth]. Images can have different sizes. Returns 3 Numpy matrices: molded_images: [N, h, w, 3]. Images resized and normalized. image_metas: [N, length of meta data]. Details about each image. windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the original image (padding excluded). """ molded_images = [] image_metas = [] windows = [] for image in images: # Resize image # TODO: move resizing to mold_image() molded_image, window, scale, padding, crop = utils.resize_image( image, min_dim=self.config.IMAGE_MIN_DIM, min_scale=self.config.IMAGE_MIN_SCALE, max_dim=self.config.IMAGE_MAX_DIM, mode=self.config.IMAGE_RESIZE_MODE) molded_image = mold_image(molded_image, self.config) # Build image_meta image_meta = compose_image_meta( 0, image.shape, molded_image.shape, window, scale, np.zeros([self.config.NUM_CLASSES], dtype=np.int32)) # Append molded_images.append(molded_image) windows.append(window) image_metas.append(image_meta) # Pack into arrays molded_images = np.stack(molded_images) image_metas = np.stack(image_metas) windows = np.stack(windows) return molded_images, image_metas, windows def unmold_detections(self, detections, mrcnn_mask, original_image_shape, image_shape, window): """Reformats the detections of one image from the format of the neural network output to a format suitable for use in the rest of the application. detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates mrcnn_mask: [N, height, width, num_classes] original_image_shape: [H, W, C] Original image shape before resizing image_shape: [H, W, C] Shape of the image after resizing and padding window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real image is excluding the padding. Returns: boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels class_ids: [N] Integer class IDs for each bounding box scores: [N] Float probability scores of the class_id masks: [height, width, num_instances] Instance masks """ # How many detections do we have? # Detections array is padded with zeros. Find the first class_id == 0. zero_ix = np.where(detections[:, 4] == 0)[0] N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0] # Extract boxes, class_ids, scores, and class-specific masks boxes = detections[:N, :4] class_ids = detections[:N, 4].astype(np.int32) scores = detections[:N, 5] masks = mrcnn_mask[np.arange(N), :, :, class_ids] # Translate normalized coordinates in the resized image to pixel # coordinates in the original image before resizing window = utils.norm_boxes(window, image_shape[:2]) wy1, wx1, wy2, wx2 = window shift = np.array([wy1, wx1, wy1, wx1]) wh = wy2 - wy1 # window height ww = wx2 - wx1 # window width scale = np.array([wh, ww, wh, ww]) # Convert boxes to normalized coordinates on the window boxes = np.divide(boxes - shift, scale) # Convert boxes to pixel coordinates on the original image boxes = utils.denorm_boxes(boxes, original_image_shape[:2]) # Filter out detections with zero area. Happens in early training when # network weights are still random exclude_ix = np.where( (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0] if exclude_ix.shape[0] > 0: boxes = np.delete(boxes, exclude_ix, axis=0) class_ids = np.delete(class_ids, exclude_ix, axis=0) scores = np.delete(scores, exclude_ix, axis=0) masks = np.delete(masks, exclude_ix, axis=0) N = class_ids.shape[0] # Resize masks to original image size and set boundary threshold. full_masks = [] for i in range(N): # Convert neural network mask to full size mask full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape) full_masks.append(full_mask) full_masks = np.stack(full_masks, axis=-1)\ if full_masks else np.empty(original_image_shape[:2] + (0,)) return boxes, class_ids, scores, full_masks def detect(self, images, verbose=0): """Runs the detection pipeline. images: List of images, potentially of different sizes. Returns a list of dicts, one dict per image. The dict contains: rois: [N, (y1, x1, y2, x2)] detection bounding boxes class_ids: [N] int class IDs scores: [N] float probability scores for the class IDs masks: [H, W, N] instance binary masks """ assert self.mode == "inference", "Create model in inference mode." assert len( images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE" if verbose: log("Processing {} images".format(len(images))) for image in images: log("image", image) # Mold inputs to format expected by the neural network molded_images, image_metas, windows = self.mold_inputs(images) # Validate image sizes # All images in a batch MUST be of the same size image_shape = molded_images[0].shape for g in molded_images[1:]: assert g.shape == image_shape,\ "After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes." # Anchors anchors = self.get_anchors(image_shape) # Duplicate across the batch dimension because Keras requires it # TODO: can this be optimized to avoid duplicating the anchors? anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) if verbose: log("molded_images", molded_images) log("image_metas", image_metas) log("anchors", anchors) # Run object detection detections, _, _, mrcnn_mask, _, _, _ =\ self.keras_model.predict([molded_images, image_metas, anchors], verbose=0) # Process detections results = [] for i, image in enumerate(images): final_rois, final_class_ids, final_scores, final_masks =\ self.unmold_detections(detections[i], mrcnn_mask[i], image.shape, molded_images[i].shape, windows[i]) results.append({ "rois": final_rois, "class_ids": final_class_ids, "scores": final_scores, "masks": final_masks, }) return results def detect_molded(self, molded_images, image_metas, verbose=0): """Runs the detection pipeline, but expect inputs that are molded already. Used mostly for debugging and inspecting the model. molded_images: List of images loaded using load_image_gt() image_metas: image meta data, also returned by load_image_gt() Returns a list of dicts, one dict per image. The dict contains: rois: [N, (y1, x1, y2, x2)] detection bounding boxes class_ids: [N] int class IDs scores: [N] float probability scores for the class IDs masks: [H, W, N] instance binary masks """ assert self.mode == "inference", "Create model in inference mode." assert len(molded_images) == self.config.BATCH_SIZE,\ "Number of images must be equal to BATCH_SIZE" if verbose: log("Processing {} images".format(len(molded_images))) for image in molded_images: log("image", image) # Validate image sizes # All images in a batch MUST be of the same size image_shape = molded_images[0].shape for g in molded_images[1:]: assert g.shape == image_shape, "Images must have the same size" # Anchors anchors = self.get_anchors(image_shape) # Duplicate across the batch dimension because Keras requires it # TODO: can this be optimized to avoid duplicating the anchors? anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) if verbose: log("molded_images", molded_images) log("image_metas", image_metas) log("anchors", anchors) # Run object detection detections, _, _, mrcnn_mask, _, _, _ =\ self.keras_model.predict([molded_images, image_metas, anchors], verbose=0) # Process detections results = [] for i, image in enumerate(molded_images): window = [0, 0, image.shape[0], image.shape[1]] final_rois, final_class_ids, final_scores, final_masks =\ self.unmold_detections(detections[i], mrcnn_mask[i], image.shape, molded_images[i].shape, window) results.append({ "rois": final_rois, "class_ids": final_class_ids, "scores": final_scores, "masks": final_masks, }) return results def get_anchors(self, image_shape): """Returns anchor pyramid for the given image size.""" backbone_shapes = compute_backbone_shapes(self.config, image_shape) # Cache anchors and reuse if image shape is the same if not hasattr(self, "_anchor_cache"): self._anchor_cache = {} if not tuple(image_shape) in self._anchor_cache: # Generate Anchors a = utils.generate_pyramid_anchors( self.config.RPN_ANCHOR_SCALES, self.config.RPN_ANCHOR_RATIOS, backbone_shapes, self.config.BACKBONE_STRIDES, self.config.RPN_ANCHOR_STRIDE) # Keep a copy of the latest anchors in pixel coordinates because # it's used in inspect_model notebooks. # TODO: Remove this after the notebook are refactored to not use it self.anchors = a # Normalize coordinates self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2]) return self._anchor_cache[tuple(image_shape)] def ancestor(self, tensor, name, checked=None): """Finds the ancestor of a TF tensor in the computation graph. tensor: TensorFlow symbolic tensor. name: Name of ancestor tensor to find checked: For internal use. A list of tensors that were already searched to avoid loops in traversing the graph. """ checked = checked if checked is not None else [] # Put a limit on how deep we go to avoid very long loops if len(checked) > 500: return None # Convert name to a regex and allow matching a number prefix # because Keras adds them automatically if isinstance(name, str): name = re.compile(name.replace("/", r"(\_\d+)*/")) parents = tensor.op.inputs for p in parents: if p in checked: continue if bool(re.fullmatch(name, p.name)): return p checked.append(p) a = self.ancestor(p, name, checked) if a is not None: return a return None def find_trainable_layer(self, layer): """If a layer is encapsulated by another layer, this function digs through the encapsulation and returns the layer that holds the weights. """ if layer.__class__.__name__ == 'TimeDistributed': return self.find_trainable_layer(layer.layer) return layer def get_trainable_layers(self): """Returns a list of layers that have weights.""" layers = [] # Loop through all layers for l in self.keras_model.layers: # If layer is a wrapper, find inner trainable layer l = self.find_trainable_layer(l) # Include layer if it has weights if l.get_weights(): layers.append(l) return layers def run_graph(self, images, outputs, image_metas=None): """Runs a sub-set of the computation graph that computes the given outputs. image_metas: If provided, the images are assumed to be already molded (i.e. resized, padded, and normalized) outputs: List of tuples (name, tensor) to compute. The tensors are symbolic TensorFlow tensors and the names are for easy tracking. Returns an ordered dict of results. Keys are the names received in the input and values are Numpy arrays. """ model = self.keras_model # Organize desired outputs into an ordered dict outputs = OrderedDict(outputs) for o in outputs.values(): assert o is not None # Build a Keras function to run parts of the computation graph inputs = model.inputs if model.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs += [K.learning_phase()] kf = K.function(model.inputs, list(outputs.values())) # Prepare inputs if image_metas is None: molded_images, image_metas, _ = self.mold_inputs(images) else: molded_images = images image_shape = molded_images[0].shape # Anchors anchors = self.get_anchors(image_shape) # Duplicate across the batch dimension because Keras requires it # TODO: can this be optimized to avoid duplicating the anchors? anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) model_in = [molded_images, image_metas, anchors] # Run inference if model.uses_learning_phase and not isinstance(K.learning_phase(), int): model_in.append(0.) outputs_np = kf(model_in) # Pack the generated Numpy arrays into a a dict and log the results. outputs_np = OrderedDict([(k, v) for k, v in zip(outputs.keys(), outputs_np)]) for k, v in outputs_np.items(): log(k, v) return outputs_np ############################################################ # Data Formatting ############################################################ def compose_image_meta(image_id, original_image_shape, image_shape, window, scale, active_class_ids): """Takes attributes of an image and puts them in one 1D array. image_id: An int ID of the image. Useful for debugging. original_image_shape: [H, W, C] before resizing or padding. image_shape: [H, W, C] after resizing and padding window: (y1, x1, y2, x2) in pixels. The area of the image where the real image is (excluding the padding) scale: The scaling factor applied to the original image (float32) active_class_ids: List of class_ids available in the dataset from which the image came. Useful if training on images from multiple datasets where not all classes are present in all datasets. """ meta = np.array( [image_id] + # size=1 list(original_image_shape) + # size=3 list(image_shape) + # size=3 list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates [scale] + # size=1 list(active_class_ids) # size=num_classes ) return meta def parse_image_meta(meta): """Parses an array that contains image attributes to its components. See compose_image_meta() for more details. meta: [batch, meta length] where meta length depends on NUM_CLASSES Returns a dict of the parsed values. """ image_id = meta[:, 0] original_image_shape = meta[:, 1:4] image_shape = meta[:, 4:7] window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels scale = meta[:, 11] active_class_ids = meta[:, 12:] return { "image_id": image_id.astype(np.int32), "original_image_shape": original_image_shape.astype(np.int32), "image_shape": image_shape.astype(np.int32), "window": window.astype(np.int32), "scale": scale.astype(np.float32), "active_class_ids": active_class_ids.astype(np.int32), } def parse_image_meta_graph(meta): """Parses a tensor that contains image attributes to its components. See compose_image_meta() for more details. meta: [batch, meta length] where meta length depends on NUM_CLASSES Returns a dict of the parsed tensors. """ image_id = meta[:, 0] original_image_shape = meta[:, 1:4] image_shape = meta[:, 4:7] window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels scale = meta[:, 11] active_class_ids = meta[:, 12:] return { "image_id": image_id, "original_image_shape": original_image_shape, "image_shape": image_shape, "window": window, "scale": scale, "active_class_ids": active_class_ids, } def mold_image(images, config): """Expects an RGB image (or array of images) and subtracts the mean pixel and converts it to float. Expects image colors in RGB order. """ return images.astype(np.float32) - config.MEAN_PIXEL def unmold_image(normalized_images, config): """Takes a image normalized with mold() and returns the original.""" return (normalized_images + config.MEAN_PIXEL).astype(np.uint8) ############################################################ # Miscellenous Graph Functions ############################################################ def trim_zeros_graph(boxes, name='trim_zeros'): """Often boxes are represented with matrices of shape [N, 4] and are padded with zeros. This removes zero boxes. boxes: [N, 4] matrix of boxes. non_zeros: [N] a 1D boolean mask identifying the rows to keep """ non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool) boxes = tf.boolean_mask(boxes, non_zeros, name=name) return boxes, non_zeros def batch_pack_graph(x, counts, num_rows): """Picks different number of values from each row in x depending on the values in counts. """ outputs = [] for i in range(num_rows): outputs.append(x[i, :counts[i]]) return tf.concat(outputs, axis=0) def norm_boxes_graph(boxes, shape): """Converts boxes from pixel coordinates to normalized coordinates. boxes: [..., (y1, x1, y2, x2)] in pixel coordinates shape: [..., (height, width)] in pixels Note: In pixel coordinates (y2, x2) is outside the box. But in normalized coordinates it's inside the box. Returns: [..., (y1, x1, y2, x2)] in normalized coordinates """ h, w = tf.split(tf.cast(shape, tf.float32), 2) scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0) shift = tf.constant([0., 0., 1., 1.]) return tf.divide(boxes - shift, scale) def denorm_boxes_graph(boxes, shape): """Converts boxes from normalized coordinates to pixel coordinates. boxes: [..., (y1, x1, y2, x2)] in normalized coordinates shape: [..., (height, width)] in pixels Note: In pixel coordinates (y2, x2) is outside the box. But in normalized coordinates it's inside the box. Returns: [..., (y1, x1, y2, x2)] in pixel coordinates """ h, w = tf.split(tf.cast(shape, tf.float32), 2) scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0) shift = tf.constant([0., 0., 1., 1.]) return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
agiovann/Constrained_NMF
caiman/source_extraction/volpy/mrcnn/model.py
Python
gpl-2.0
127,322
[ "Gaussian" ]
553a3c9517649d25aaa5434bd4c17bdc02cb18eb31b5214d61706d375f6451f0
#!/usr/bin/env python ''' Use geomeTRIC library to optimize the molecular geometry. ''' from pyscf import gto, scf from pyscf.geomopt.geometric_solver import optimize mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='ccpvdz') mf = scf.RHF(mol) # # geometry optimization for HF. There are two entries to invoke the geomeTRIC # optimization # # method 1: import the optimize function from pyscf.geomopt.geometric_solver mol_eq = optimize(mf) print(mol_eq.atom_coords()) # method 2: create the optimizer from Gradients class mol_eq = mf.Gradients().optimizer(solver='geomeTRIC').kernel() # # geometry optimization for CASSCF # from pyscf import mcscf mf = scf.RHF(mol) mc = mcscf.CASSCF(mf, 4, 4) conv_params = { 'convergence_energy': 1e-4, # Eh 'convergence_grms': 3e-3, # Eh/Bohr 'convergence_gmax': 4.5e-3, # Eh/Bohr 'convergence_drms': 1.2e-2, # Angstrom 'convergence_dmax': 1.8e-2, # Angstrom } # method 1 mol_eq = optimize(mc, **conv_params) # method 2 mol_eq = mc.Gradients().optimizer(solver='geomeTRIC').kernel(conv_params)
gkc1000/pyscf
examples/geomopt/01-geomeTRIC.py
Python
apache-2.0
1,061
[ "PySCF" ]
c98144eb9d3c6ac75ecc6b1f49dc64421afdae442e8637ff01163aeb62248bb4
from __future__ import print_function import unittest import sys import os import numpy as np import macrodensity as md import pkg_resources from os.path import join as path_join try: import pandas has_pandas = True except ImportError: has_pandas = False test_dir = os.path.abspath(os.path.dirname(__file__)) class TestDensityReadingFunctions(unittest.TestCase): ''' Test the code for reading in charge and density files''' def test_read_vasp(self): '''Test the function for reading CHGCAR/LOCPOT''' chgcar = pkg_resources.resource_filename( __name__, path_join('..', 'CHGCAR.test')) charge, ngx, ngy, ngz, lattice = md.read_vasp_density(chgcar, quiet=True) for v, t in ((charge, np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(charge[0], -.76010173913E+01) self.assertEqual(charge[56 * 56 * 56 -1], -4.4496715627) self.assertEqual(lattice[0, 0], 2.7150000) self.assertEqual(ngx, 56) def test_read_vasp_parchg(self): '''Test the function for reading CHGCAR/LOCPOT''' parchg = pkg_resources.resource_filename( __name__, path_join('..', 'PARCHG.test')) spin, ngx, ngy, ngz, lattice = md.read_vasp_parchg(parchg, quiet=True) for v, t in ((spin, np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(spin[0], 1.0) self.assertEqual(lattice[0, 0], 11.721852) spin, ngx, ngy, ngz, lattice = md.read_vasp_parchg(parchg, spin=True, quiet=True) for v, t in ((spin[0], np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) for v, t in ((spin[1], np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(spin[1][0], 0.0) def test_read_gulp(self): '''Test the function for reading GULP output''' gulpcar = pkg_resources.resource_filename( __name__, path_join('../examples', 'gulp.out')) potential, ngx, ngy, ngz, lattice = md.read_gulp_potential(gulpcar) for v, t in ((potential, np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(potential[0], 8.732207) self.assertEqual(potential[10 * 10 * 20 -1], 8.732207) self.assertEqual(lattice[0, 0], 11.996500) self.assertEqual(ngx, 10) def test_density_2_grid(self): '''Test the function for projecting the potential onto a grid''' chgcar = pkg_resources.resource_filename( __name__, path_join('..', 'CHGCAR.test')) charge, ngx, ngy, ngz, lattice = md.read_vasp_density(chgcar, quiet=True) grid_pot, electrons = md.density_2_grid(charge, ngx, ngy, ngz) self.assertAlmostEqual(grid_pot[0, 0, 0], - .76010173913E+01) self.assertAlmostEqual(grid_pot[55, 55, 55], -4.4496715627) self.assertAlmostEqual(electrons, 8.00000, places=4) @unittest.skipIf(not has_pandas, "Already using pandas-free reader") class TestDensityReadingFunctionsNoPandas(TestDensityReadingFunctions): """Disable Pandas and test code for reading charge and density files""" def setUp(self): self._pandas = sys.modules['pandas'] sys.modules['pandas'] = None def tearDown(self): sys.modules['pandas'] = self._pandas class TestOtherReadingFunctions(unittest.TestCase): def test_read_vasp_classic(self): '''Test the function for reading CHGCAR/LOCPOT''' chgcar = pkg_resources.resource_filename( __name__, path_join('..', 'CHGCAR.test')) (charge, ngx, ngy, ngz, lattice) = md.read_vasp_density_classic(chgcar) for v, t in ((charge, np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(charge[0], -.76010173913E+01) self.assertEqual(charge[56 * 56 * 56 -1], -4.4496715627) self.assertEqual(lattice[0, 0], 2.7150000) self.assertEqual(ngx, 56) def test_matrix_2_abc(self): '''Test conversion of lattice to abc, alpha, beta, gamma format''' lattice = np.asarray([[2.715, 2.715, 0.], [0., 2.715, 2.715], [2.715, 0., 2.715]]) a, b, c, a_vec, b_vec, c_vec = md.matrix_2_abc(lattice) self.assertAlmostEqual(a, 3.8395898218429529) self.assertAlmostEqual(b, 3.8395898218429529) self.assertAlmostEqual(c, 3.8395898218429529) class TestAveragingFunctions(unittest.TestCase): '''Test various functions for manipulating and measuring the density''' def test_planar_average(self): ''' Test the code for averaging the density''' test_grid = np.zeros(shape=(3, 3, 3)) for i in range(3): test_grid[i, :, 0] = float(i) planar = md.planar_average(test_grid, 3, 3, 3) self.assertAlmostEqual(planar[0], 1.0) planar = md.planar_average(test_grid, 3, 3, 3, axis='x') self.assertAlmostEqual(planar[2], 0.66666667) def test_volume_average(self): '''Test the volume_average function''' test_grid = np.zeros(shape=(5, 5, 5)) for i in range(5): for j in range(5): for k in range(5): test_grid[i, j, k] = float(i * j * k) potential, variance = md.volume_average([0, 0, 0], [2, 2, 2], test_grid, 5, 5, 5) self.assertAlmostEqual(potential, 0.125) self.assertAlmostEqual(variance, 0.109375) potential, variance = md.volume_average([1, 1, 1], [2, 2, 2], test_grid, 5, 5, 5) potential, variance = md.volume_average([1, 1, 1], [3, 3, 3], test_grid, 5, 5, 5) self.assertAlmostEqual(potential, 1.0) self.assertAlmostEqual(variance, 3.6296296296296298) def test_ipr(self): '''Test the ipr function''' parchg = pkg_resources.resource_filename( __name__, path_join('..', 'CHGCAR.test')) dens, ngx, ngy, ngz, lattice = md.read_vasp_density(parchg, quiet=True) self.assertAlmostEqual(md.inverse_participation_ratio(dens), 1.407e-5) class TestGeometryFunctions(unittest.TestCase): '''Test the functions that do geometry and trig''' def test_gradient_magnitude(self): '''Test the function for returning the magnitude of gradient at a voxel''' grid = np.zeros(shape=(3, 3, 3)) for i in range(3): for j in range(3): for k in range(3): grid[i, j, k] = i * j * k gx, gy, gz = np.gradient(grid) magnitudes = md.gradient_magnitude(gx, gy, gz) self.assertEqual(magnitudes[1, 1, 1], 1.7320508075688772) self.assertEqual(magnitudes[2, 2, 2], 6.9282032302755088) def test_macroscopic_average(self): '''Test the macroscopic averaging function''' f = 2. fs = 100 x = np.arange(fs) potential = [np.sin(2 * np.pi * f * (i/float(fs))) for i in np.arange(fs)] macro = md.macroscopic_average(potential, 50, 1) self.assertAlmostEqual(macro[20], 0.) def test_vector_2_abscissa(self): ''' Test the vector_2_abscissa function''' abscissa = md.vector_2_abscissa([5, 6, 7], 10, 0.2, 0.2, 0.2) self.assertEqual(abscissa[5], 10.488088481701517) def test_number_in_field(self): '''Test the number_in_field function''' test_field = np.zeros(shape=(5, 5, 5)) test_field[0, 0, 0] = 1. test_field[4, 4, 4] = 1. test_field[2, 3, 2] = 0.5 test_field[1, 4, 2] = 0.3 self.assertEqual(md.number_in_field(test_field, 0.3), 4) self.assertEqual(md.number_in_field(test_field, 0.5), 3) self.assertEqual(md.number_in_field(test_field, 1.0), 2) self.assertEqual(md.number_in_field(test_field, 1.1), 0) def test_element_vol(self): '''Test the element_vol function''' self.assertEqual(md.element_vol(3000.,10, 20, 30), 0.5) def test_get_volume(self): '''Test the get_volume function''' a = [5.43 * 0.5, 0., 5.43 * 0.5] b = [5.43 * 0.5, 5.43 * 0.5, 0.] c = [0., 5.43 * 0.5, 5.43 * 0.5] self.assertAlmostEqual(md.get_volume(a, b, c), 40.03, places=2) def test_numbers_2_grid(self): '''Tests the numbers_2_grid function''' a = md.numbers_2_grid([0.5, 0.5, 0.5], 10, 10, 10) b = [5, 5, 5] self.assertSequenceEqual(a.tolist(), b) def test_GCD(self): '''Test the GCD function''' self.assertEqual(md.GCD(100,12), 4) def test_GCD_List(self): '''Tests the GCD_List function''' self.assertEqual(md.GCD_List([15,100,45]), 5) if __name__ == '__main__': unittest.main()
WMD-group/MacroDensity
tests/unit_tests.py
Python
mit
9,944
[ "GULP" ]
066afdc4bd5e22819f88da3dc278f188388865f48b23a216163644356d684ab4
# -*- coding: utf-8 -*- """ Created on Thu Oct 22 13:22:26 2015 @author: agiovann """ #%% from scipy.ndimage.filters import gaussian_filter from scipy.ndimage import label import pylab as pl import numpy as np #%% def extractROIsFromPCAICA(spcomps, numSTD=4, gaussiansigmax=2 , gaussiansigmay=2,thresh=None): """ Given the spatial components output of the IPCA_stICA function extract possible regions of interest The algorithm estimates the significance of a components by thresholding the components after gaussian smoothing Parameters ----------- spcompomps, 3d array containing the spatial components numSTD: number of standard deviation above the mean of the spatial component to be considered signiificant """ numcomps, width, height=spcomps.shape rowcols=int(np.ceil(np.sqrt(numcomps))); #% allMasks=[]; maskgrouped=[]; for k in xrange(0,numcomps): comp=spcomps[k] # plt.subplot(rowcols,rowcols,k+1) comp=gaussian_filter(comp,[gaussiansigmay,gaussiansigmax]) maxc=np.percentile(comp,99); minc=np.percentile(comp,1); # comp=np.sign(maxc-np.abs(minc))*comp; q75, q25 = np.percentile(comp, [75 ,25]) iqr = q75 - q25 minCompValuePos=np.median(comp)+numSTD*iqr/1.35; minCompValueNeg=np.median(comp)-numSTD*iqr/1.35; # got both positive and negative large magnitude pixels if thresh is None: compabspos=comp*(comp>minCompValuePos)-comp*(comp<minCompValueNeg); else: compabspos=comp*(comp>thresh)-comp*(comp<-thresh); #height, width = compabs.shape labeledpos, n = label(compabspos>0, np.ones((3,3))) maskgrouped.append(labeledpos) for jj in range(1,n+1): tmp_mask=np.asarray(labeledpos==jj) allMasks.append(tmp_mask) # labeledneg, n = label(compabsneg>0, np.ones((3,3))) # maskgrouped.append(labeledneg) # for jj in range(n): # tmp_mask=np.asarray(labeledneg==jj) # allMasks.append(tmp_mask) # plt.imshow(labeled) # plt.axis('off') return allMasks,maskgrouped
agiovann/CalBlitz
calblitz/rois.py
Python
gpl-3.0
2,284
[ "Gaussian" ]
8af0c1812bf205500827cdee1cad39ee1710d0030b119c72dc6b21a81d6bfac9
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.eagle.extractor Extracting data for a given EAGLE galaxy from the simulation snapshot. # # The EAGLE simulation output is stored in a (large) set of data files in the HDF5 format, documented at the # <a href="http://www.hdfgroup.org/HDF5/">HFD5 home page</a>. The output is organized in \em snapshots, where # each snapshot represents the state of the universe at a particular time (or equivalently, redshift). # # The function in this module allows extracting information relevant for SKIRT from the EAGLE output. # The function converts physical quantities from EAGLE snapshot units (documented through hdf5 attributes # in the snapshot files) to SKIRT import units (documented in the SKIRT SPH classes). # The following table lists some of the units in each system. # #<TABLE> #<TR><TD><B>Physical Quantity</B></TD> <TD><B>EAGLE snapshot</B></TD> # <TD><B>SKIRT import</B></TD></TR> #<TR><TD>position, size</TD> <TD>\f$\textrm{Mpc}\,a\,h^{-1}\f$</TD> # <TD>\f$\textrm{pc}\f$</TD></TR> #<TR><TD>mass</TD> <TD>\f$10^{10}\,\textrm{M}_\odot\,h^{-1}\f$</TD> # <TD>\f$\textrm{M}_\odot\f$</TD></TR> #<TR><TD>velocity</TD> <TD>\f$\textrm{km/s}\,a^{1/2}\f$</TD> # <TD>--</TD></TR> #<TR><TD>time, age</TD> <TD>--</TD> # <TD>year</TD></TR> #<TR><TD>temperature</TD> <TD>K</TD> # <TD>--</TD></TR> #</TABLE> # # Note the corrections for cosmological scale factor \f$a\f$ and hubble parameter \f$h\f$ in the EAGLE snapshot units. # ----------------------------------------------------------------- import os.path import numpy as np import h5py import read_eagle # EAGLE-specific package by must be seperately installed from ..core.tools.geometry import Transform from . import config as config from .skirtrun import SkirtRun # ----------------------------------------------------------------- ## This function extracts information relevant for SKIRT from the EAGLE output for the galaxy described # by the specified SKIRT-runs database record. It places the resulting files in the "in" folder of the # appropriate SkirtRun data structure. The function uses the following fields in the specified record: # runid, eaglesim, snaptag, galaxyid, groupnr, subgroupnr, copx, copy, copz. # # The exported files are named "SIM_GID_stars.dat", "SIM_GID_hii.dat", and "SIM_GID_gas.dat", where # SIM and GID are replaced respectively by the name of the simulation in which the galaxy resides and by the # identifier of the galaxy in the public EAGLE database. The file format is as described for SKIRT SPH import. # In addition, the function creates a text file named "SIM_GID_info.txt", which contains relevant statistics # including particle numbers and various total masses. The contents is documented in the file. def extract(record): # ---- get the particle data # initialise star and gas dictionaries sdat = {} gdat = {} yngstars = {} hiiregions = {} # open snapshot and read relevant field attributes sfn = snapfilename(record["eaglesim"], record["snaptag"]) snapshot = read_eagle.EagleSnapshot(sfn) params = fieldAttrs(sfn, "Header") params.update(fieldAttrs(sfn, "Constants")) params.update(fieldAttrs(sfn, "RuntimePars")) hubbleparam = params["HubbleParam"] expansionfactor = params["ExpansionFactor"] schmidtparams = schmidtParameters(params) # convert center of potential to snapshot units copx = record["copx"] * hubbleparam copy = record["copy"] * hubbleparam copz = record["copz"] * hubbleparam # specify (2*250kpc)^3 physical volume about galaxy centre delta = 0.25 * hubbleparam / expansionfactor snapshot.select_region(copx-delta, copx+delta, copy-delta, copy+delta, copz-delta, copz+delta) # read star particle informaton insubhalo = (snapshot.read_dataset(4, "GroupNumber") == record["groupnr"]) & \ (snapshot.read_dataset(4, "SubGroupNumber") == record["subgroupnr"]) sdat['r'] = snapshot.read_dataset(4, "Coordinates") [insubhalo] sdat['h'] = snapshot.read_dataset(4, "SmoothingLength") [insubhalo] sdat['im'] = snapshot.read_dataset(4, "InitialMass") [insubhalo] sdat['m'] = snapshot.read_dataset(4, "Mass") [insubhalo] sdat['v'] = snapshot.read_dataset(4, "Velocity") [insubhalo] sdat['Z'] = snapshot.read_dataset(4, "SmoothedMetallicity") [insubhalo] sdat['born'] = snapshot.read_dataset(4, "StellarFormationTime") [insubhalo] sdat['rho_born'] = snapshot.read_dataset(4, "BirthDensity") [insubhalo] # read gas particle informaton insubhalo = (snapshot.read_dataset(0, "GroupNumber") == record["groupnr"]) & \ (snapshot.read_dataset(0, "SubGroupNumber") == record["subgroupnr"]) gdat['r'] = snapshot.read_dataset(0, "Coordinates") [insubhalo] gdat['h'] = snapshot.read_dataset(0, "SmoothingLength") [insubhalo] gdat['m'] = snapshot.read_dataset(0, "Mass") [insubhalo] gdat['v'] = snapshot.read_dataset(0, "Velocity") [insubhalo] gdat['Z'] = snapshot.read_dataset(0, "SmoothedMetallicity") [insubhalo] gdat['T'] = snapshot.read_dataset(0, "Temperature") [insubhalo] gdat['rho'] = snapshot.read_dataset(0, "Density") [insubhalo] gdat['sfr'] = snapshot.read_dataset(0, "StarFormationRate") [insubhalo] # convert units sdat['r'] = periodicCorrec(sdat['r'], params["BoxSize"]) sdat['r'] = toparsec(sdat['r'], hubbleparam, expansionfactor) sdat['h'] = toparsec(sdat['h'], hubbleparam, expansionfactor) sdat['im'] = tosolar(sdat['im'], hubbleparam) sdat['m'] = tosolar(sdat['m'], hubbleparam) sdat['t'] = age(sdat['born']) - age(expansionfactor) sdat['rho_born'] *= 6.7699e-31 gdat['r'] = periodicCorrec(gdat['r'], params["BoxSize"]) gdat['r'] = toparsec(gdat['r'], hubbleparam, expansionfactor) gdat['h'] = toparsec(gdat['h'], hubbleparam, expansionfactor) gdat['m'] = tosolar(gdat['m'], hubbleparam) gdat['rho'] = togcm3(gdat['rho'], hubbleparam, expansionfactor) # remember density conversion from g cm^-3 to M_sun Mpc^-3 densconv = ((params['CM_PER_MPC']/1.e6)**3) / params['SOLAR_MASS'] # calculate the ISM pressure sdat['P'] = getPtot(sdat['rho_born'], schmidtparams) gdat['P'] = getPtot(gdat['rho'], schmidtparams) # calculate stellar center of mass and translational velocity using shrinking aperture technique com, v_bar = shrinkingCentroid(sdat['r'], sdat['m'], sdat['v']) # find unit rotation axis vector, using only stellar information and an aperture of 30 kpc n_rot = rotAxis(sdat['r'], sdat['v'], sdat['m'], com, v_bar, apt=30e3, aptfrac=0.08) # translate to center of mass and line up with angular momentum vector transf = Transform() transf.translate(-com[0], -com[1], -com[2]) a, b, c = n_rot v = np.sqrt(b*b+c*c) if v > 0.3: transf.rotateX(c/v, -b/v) transf.rotateY(v, -a) else: v = np.sqrt(a*a+c*c) transf.rotateY(c/v, -a/v) transf.rotateX(v, -b) sdat['r'],w = transf.transform_vec(sdat['r'][:,0],sdat['r'][:,1],sdat['r'][:,2], np.ones(sdat['r'].shape[0])) gdat['r'],w = transf.transform_vec(gdat['r'][:,0],gdat['r'][:,1],gdat['r'][:,2], np.ones(gdat['r'].shape[0])) # apply 30kpc aperture (i.e. remove all particles outside the aperture) applyAperture(sdat, 30e3) applyAperture(gdat, 30e3) # ---- gather statistics about the data as read from the snapshot # information identifying the SKIRT-run record and the galaxy info = { } info["skirt_run_id"] = record["runid"] info["galaxy_id"] = record["galaxyid"] # information about the particles info["original_particles_stars"] = len(sdat['m']) info["original_initial_mass_stars"] = sdat['im'].sum() info["original_mass_stars"] = sdat['m'].sum() info["original_particles_gas"] = len(gdat['m']) info["original_mass_gas"] = gdat['m'].sum() info["original_mass_baryons"] = info["original_mass_stars"] + info["original_mass_gas"] # information about the direction of the stellar angular momentum axis info["original_rotation_axis_x"] = n_rot[0] info["original_rotation_axis_y"] = n_rot[1] info["original_rotation_axis_z"] = n_rot[2] # ---- initialize statistics about the exported data info["exported_particles_old_stars"] = 0 info["exported_initial_mass_old_stars"] = 0 info["exported_mass_old_stars"] = 0 info["exported_particles_non_star_forming_gas"] = 0 info["exported_mass_non_star_forming_gas"] = 0 info["exported_particles_young_stars_from_stars"] = 0 info["exported_initial_mass_young_stars_from_stars"] = 0 info["exported_mass_young_stars_from_stars"] = 0 info["exported_particles_hii_regions_from_stars"] = 0 info["exported_initial_mass_hii_regions_from_stars"] = 0 info["exported_mass_hii_regions_from_stars"] = 0 info["exported_particles_unspent_gas_from_stars"] = 0 info["exported_mass_unspent_gas_from_stars"] = 0 info["exported_particles_young_stars_from_gas"] = 0 info["exported_initial_mass_young_stars_from_gas"] = 0 info["exported_mass_young_stars_from_gas"] = 0 info["exported_particles_hii_regions_from_gas"] = 0 info["exported_initial_mass_hii_regions_from_gas"] = 0 info["exported_mass_hii_regions_from_gas"] = 0 info["exported_particles_negative_gas_from_stars"] = 0 info["exported_particles_negative_gas_from_gas"] = 0 info["exported_mass_negative_gas_from_stars"] = 0 info["exported_mass_negative_gas_from_gas"] = 0 info["exported_particles_unspent_gas_from_gas"] = 0 info["exported_mass_unspent_gas_from_gas"] = 0 # ---- resample star forming regions # set the "standard" constant covering fraction (see Camps+ 2016) f_PDR = 0.1 # seed the random generator so that a consistent pseudo-random sequence is used for each particular galaxy np.random.seed(int(record["galaxyid"])) # define HII region age constants (in years) young_age = 1e8 # 100 Myr --> particles below this age are resampled infant_age = 1e7 # 10 Myr --> resampled particles below this age are converted to HII regions # resampled particles above this age are converted young stars # <==> lifetime of an HII region # set up GALAXEV array bcstars = np.column_stack([[],[],[],[],[],[],[]]) # set up MAPPINGS-III array mapstars = np.column_stack([[],[],[],[],[],[],[],[],[]]) # set up dust array dust = np.column_stack([[],[],[],[],[],[],[]]) # index for particles to resample issf = gdat['sfr'] > 0. isyoung = sdat['t'] < young_age # append older stars to GALAXEV array if (~isyoung).any(): bcstars = np.concatenate((bcstars, np.column_stack([sdat['r'], sdat['h'], sdat['im'], sdat['Z'], sdat['t']])[~isyoung]), axis=0) info["exported_particles_old_stars"] = np.count_nonzero(~isyoung) info["exported_initial_mass_old_stars"] = sdat['im'][~isyoung].sum() info["exported_mass_old_stars"] = sdat['m'][~isyoung].sum() # append non-SF gas data to dust array if (~issf).any(): dust = np.concatenate((dust, np.column_stack([gdat['r'], gdat['h'], gdat['m'], gdat['Z'], gdat['T']])[~issf].copy()), axis=0) info["exported_particles_non_star_forming_gas"] = np.count_nonzero(~issf) info["exported_mass_non_star_forming_gas"] = gdat['m'][~issf].sum() # resample stars if isyoung.any(): for k in sdat.keys(): sdat[k] = sdat[k][isyoung].copy() # calculate SFR at birth of young star particles in M_sun / yr sdat['sfr'] = getSFR(sdat['rho_born'], sdat['im'], schmidtparams) ms, ts, idxs, mdiffs = stochResamp(sdat['sfr'], sdat['im']) isinfant = ts < infant_age if (~isinfant).any(): yngstars['r'] = sdat['r'][idxs][~isinfant] yngstars['h'] = sdat['h'][idxs][~isinfant] yngstars['im'] = ms[~isinfant] yngstars['Z'] = sdat['Z'][idxs][~isinfant] yngstars['t'] = ts[~isinfant] bcstars = np.concatenate((bcstars, np.column_stack([yngstars['r'], yngstars['h'], yngstars['im'], yngstars['Z'], yngstars['t']])), axis=0) info["exported_particles_young_stars_from_stars"] = np.count_nonzero(~isinfant) info["exported_initial_mass_young_stars_from_stars"] = ms[~isinfant].sum() info["exported_mass_young_stars_from_stars"] = info["exported_initial_mass_young_stars_from_stars"] if (isinfant).any(): hiiregions['r'] = sdat['r'][idxs][isinfant] hiiregions['h'] = sdat['h'][idxs][isinfant] hiiregions['SFR'] = ms[isinfant] / infant_age # Assume constant SFR over HII region lifetime hiiregions['Z'] = sdat['Z'][idxs][isinfant] hiiregions['P'] = sdat['P'][idxs][isinfant] * 0.1 # Convert to Pa for output hiiregions['logC'] = 0.6*np.log10(ms[isinfant]) + 0.4*np.log10(hiiregions['P']) - 0.4*np.log10(params['BOLTZMANN']) + 0.4 hiiregions['fPDR'] = np.zeros_like(ts[isinfant]) + f_PDR # Covering fraction is set to constant value # calculate the HII region smoothing length from the mass of the surrounding PDR region, # estimated to be 10 times as massive (see Jonsson et al. 2010, MNRAS 403, 17-44), # using SKIRT's standard smoothing kernel mass/size normalization: rho = 8/pi * M/h^3; # and randomly shift the positions of the HII regions within a similarly enlarged range hiiregions['h_mapp'] = (10*ms[isinfant] / (np.pi/8 * sdat['rho_born'][idxs][isinfant] * densconv))**(1/3.) stochShiftPos(hiiregions['r'], hiiregions['h'], hiiregions['h_mapp']) # append to MAPPINGSIII array mapstars = np.concatenate((mapstars, np.column_stack([hiiregions['r'], hiiregions['h_mapp'], hiiregions['SFR'], hiiregions['Z'], hiiregions['logC'], hiiregions['P'], hiiregions['fPDR']])), axis=0) info["exported_particles_hii_regions_from_stars"] = np.count_nonzero(isinfant) info["exported_initial_mass_hii_regions_from_stars"] = ms[isinfant].sum() info["exported_mass_hii_regions_from_stars"] = info["exported_initial_mass_hii_regions_from_stars"] # append to dust array with negative mass to compensate for the mass of the surrounding PDR region, # considered to be 10 times as massive; use zero temperature as T is unavailable for resampled star particles dust = np.concatenate((dust, np.column_stack([hiiregions['r'], hiiregions['h_mapp']*3., -10*ms[isinfant], hiiregions['Z'], np.zeros(hiiregions['Z'].shape[0])]).copy()), axis=0) info["exported_particles_negative_gas_from_stars"] = np.count_nonzero(isinfant) info["exported_mass_negative_gas_from_stars"] = 10*ms[isinfant].sum() # add unspent young star particle material to dust array # use zero temperature as T is unavailable for resampled star particles mass = sdat['im'] - mdiffs dust = np.concatenate((dust, np.column_stack([sdat['r'], sdat['h'], mass, sdat['Z'], np.zeros(sdat['Z'].shape[0])]).copy()), axis=0) info["exported_particles_unspent_gas_from_stars"] = len(mass) info["exported_mass_unspent_gas_from_stars"] = mass.sum() # resample gas if issf.any(): for k in gdat.keys(): gdat[k] = gdat[k][issf].copy() ms, ts, idxs, mdiffs = stochResamp(gdat['sfr'], gdat['m']) isinfant = ts < infant_age if (~isinfant).any(): yngstars['r'] = gdat['r'][idxs][~isinfant] yngstars['h'] = gdat['h'][idxs][~isinfant] yngstars['im'] = ms[~isinfant] yngstars['Z'] = gdat['Z'][idxs][~isinfant] yngstars['t'] = ts[~isinfant] bcstars = np.concatenate((bcstars, np.column_stack([yngstars['r'], yngstars['h'], yngstars['im'], yngstars['Z'], yngstars['t']])), axis=0) info["exported_particles_young_stars_from_gas"] = np.count_nonzero(~isinfant) info["exported_initial_mass_young_stars_from_gas"] = ms[~isinfant].sum() info["exported_mass_young_stars_from_gas"] = info["exported_initial_mass_young_stars_from_gas"] if (isinfant).any(): hiiregions['r'] = gdat['r'][idxs][isinfant] hiiregions['h'] = gdat['h'][idxs][isinfant] hiiregions['SFR'] = ms[isinfant] / infant_age # Assume constant SFR over HII region lifetime hiiregions['Z'] = gdat['Z'][idxs][isinfant] hiiregions['P'] = gdat['P'][idxs][isinfant] * 0.1 # convert to Pa hiiregions['logC'] = 0.6*np.log10(ms[isinfant]) + 0.4*np.log10(hiiregions['P']) - 0.4*np.log10(params['BOLTZMANN']) + 0.4 hiiregions['fPDR'] = np.zeros_like(ts[isinfant]) + f_PDR # Covering fraction is set to constant value # calculate the HII region smoothing length from the mass of the surrounding PDR region, # estimated to be 10 times as massive (see Jonsson et al. 2010, MNRAS 403, 17-44), # using SKIRT's standard smoothing kernel mass/size normalization: rho = 8/pi * M/h^3; # and randomly shift the positions of the HII regions within a similarly enlarged range hiiregions['h_mapp'] = (10*ms[isinfant] / (np.pi/8 * gdat['rho'][idxs][isinfant] * densconv))**(1/3.) stochShiftPos(hiiregions['r'], hiiregions['h'], hiiregions['h_mapp']) # append to MAPPINGSIII array mapstars = np.concatenate((mapstars, np.column_stack([hiiregions['r'], hiiregions['h_mapp'], hiiregions['SFR'], hiiregions['Z'], hiiregions['logC'], hiiregions['P'], hiiregions['fPDR']])), axis=0) info["exported_particles_hii_regions_from_gas"] = np.count_nonzero(isinfant) info["exported_initial_mass_hii_regions_from_gas"] = ms[isinfant].sum() info["exported_mass_hii_regions_from_gas"] = info["exported_initial_mass_hii_regions_from_gas"] # append to dust array with negative mass to compensate for the mass of the surrounding PDR region, # considered to be 10 times as massive; use negative temperature to indicate that it is not a physical value dust = np.concatenate((dust, np.column_stack([hiiregions['r'], hiiregions['h_mapp']*3, -10*ms[isinfant], hiiregions['Z'], -gdat['T'][idxs][isinfant]]).copy()), axis=0) info["exported_particles_negative_gas_from_gas"] = np.count_nonzero(isinfant) info["exported_mass_negative_gas_from_gas"] = 10*ms[isinfant].sum() # add unspent SF gas material to dust array; use negative temperature to indicate that it is not a physical value mass = gdat['m'] - mdiffs dust = np.concatenate((dust, np.column_stack([gdat['r'], gdat['h'], mass, gdat['Z'], -gdat['T']]).copy()), axis=0) info["exported_particles_unspent_gas_from_gas"] = len(mass) info["exported_mass_unspent_gas_from_gas"] = mass.sum() # ---- make some sums and write the statistics and output files info["exported_particles_young_stars"] = info["exported_particles_young_stars_from_stars"] + info["exported_particles_young_stars_from_gas"] info["exported_initial_mass_young_stars"] = info["exported_initial_mass_young_stars_from_stars"] + info["exported_initial_mass_young_stars_from_gas"] info["exported_mass_young_stars"] = info["exported_mass_young_stars_from_stars"] + info["exported_mass_young_stars_from_gas"] info["exported_particles_stars"] = info["exported_particles_old_stars"] + info["exported_particles_young_stars"] info["exported_initial_mass_stars"] = info["exported_initial_mass_old_stars"] + info["exported_initial_mass_young_stars"] info["exported_mass_stars"] = info["exported_mass_old_stars"] + info["exported_mass_young_stars"] info["exported_particles_hii_regions"] = info["exported_particles_hii_regions_from_stars"] + info["exported_particles_hii_regions_from_gas"] info["exported_initial_mass_hii_regions"] = info["exported_initial_mass_hii_regions_from_stars"] + info["exported_initial_mass_hii_regions_from_gas"] info["exported_mass_hii_regions"] = info["exported_mass_hii_regions_from_stars"] + info["exported_mass_hii_regions_from_gas"] info["exported_particles_unspent_gas"] = info["exported_particles_unspent_gas_from_stars"] + info["exported_particles_unspent_gas_from_gas"] info["exported_mass_unspent_gas"] = info["exported_mass_unspent_gas_from_stars"] + info["exported_mass_unspent_gas_from_gas"] info["exported_particles_negative_gas"] = info["exported_particles_negative_gas_from_stars"] + info["exported_particles_negative_gas_from_gas"] info["exported_mass_negative_gas"] = info["exported_mass_negative_gas_from_stars"] + info["exported_mass_negative_gas_from_gas"] info["exported_particles_gas"] = info["exported_particles_non_star_forming_gas"] + info["exported_particles_unspent_gas"] + info["exported_particles_negative_gas"] info["exported_mass_gas"] = info["exported_mass_non_star_forming_gas"] + info["exported_mass_unspent_gas"] # - info["exported_mass_negative_gas"] info["exported_mass_baryons"] = info["exported_mass_stars"] + info["exported_mass_hii_regions"] + info["exported_mass_gas"] # create the appropriate SKIRT-run directories skirtrun = SkirtRun(record["runid"], create=True) filepathprefix = os.path.join(skirtrun.inpath(), "{}_{}_".format(record["eaglesim"], record["galaxyid"])) # write the statistics file infofile = open(filepathprefix + "info.txt", 'w') infofile.write('# Statistics for SPH particles extracted from EAGLE HDF5 snapshot to SKIRT6 format\n') infofile.write('# Masses are expressed in solar mass units\n') maxkeylen = max(map(len,info.keys())) for key in sorted(info.keys()): valueformat = "d" if "_particles_" in key or "_id" in key else ".9e" infofile.write( ("{0:"+str(maxkeylen)+"} = {1:15"+valueformat+"}\n").format(key, info[key]) ) infofile.close() # ---- write output files # open output files starsfile = open(filepathprefix + "stars.dat", 'w') starsfile.write('# SPH Star Particles\n') starsfile.write('# Extracted from EAGLE HDF5 snapshot to SKIRT6 format\n') starsfile.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr)\n') gasfile = open(filepathprefix + "gas.dat", 'w') gasfile.write('# SPH Gas Particles\n') gasfile.write('# Extracted from EAGLE HDF5 snapshot to SKIRT6 format\n') gasfile.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) T(K)\n') hiifile = open(filepathprefix + "hii.dat", 'w') hiifile.write('# SPH Hii Particles\n') hiifile.write('# Extracted from EAGLE HDF5 snapshot to SKIRT6 format\n') hiifile.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) SFR(Msun/yr) Z(0-1) logC P(Pa) f_PDR\n') # save particle data np.savetxt(starsfile, bcstars, fmt=['%f']*7) np.savetxt(gasfile, dust, fmt=['%f']*7) np.savetxt(hiifile, mapstars, fmt=['%f']*7+['%e','%f']) # close output files starsfile.close() gasfile.close() hiifile.close() # ----------------------------------------------------------------- ## This private helper function returns the absolute path to the first EAGLE snapshot file # corresponding to the given EAGLE simulation name and snapshot tag def snapfilename(eaglesim, snaptag): # snapshot filename segment corresponding to the snapshot tag snapname = { 0 : "000_z020p000", 1 : "001_z015p132", 2 : "002_z009p993", 3 : "003_z008p988", 4 : "004_z008p075", 5 : "005_z007p050", 6 : "006_z005p971", 7 : "007_z005p487", 8 : "008_z005p037", 9 : "009_z004p485", 10 : "010_z003p984", 11 : "011_z003p528", 12 : "012_z003p017", 13 : "013_z002p478", 14 : "014_z002p237", 15 : "015_z002p012", 16 : "016_z001p737", 17 : "017_z001p487", 18 : "018_z001p259", 19 : "019_z001p004", 20 : "020_z000p865", 21 : "021_z000p736", 22 : "022_z000p615", 23 : "023_z000p503", 24 : "024_z000p366", 25 : "025_z000p271", 26 : "026_z000p183", 27 : "027_z000p101", 28 : "028_z000p000" } [snaptag] return os.path.join(config.eagledata_path[eaglesim], "particledata_{0}/eagle_subfind_particles_{0}.0.hdf5".format(snapname)) # ----------------------------------------------------------------- ## This private helper function reads a hdf5 file field's attributes into a python dictionary. def fieldAttrs(filename, fieldname): fileobj = h5py.File(filename, 'r') fieldobj = fileobj[fieldname] fieldkeys = list(fieldobj.attrs) result = { } for key in fieldkeys: result[key] = fieldobj.attrs[str(key)] return result # ----------------------------------------------------------------- ## This private helper function converts a length/distance/size value from EAGLE snapshot units to parsec def toparsec(eaglevalue, hubbleparam, expansionfactor): return eaglevalue * (1e6 * expansionfactor / hubbleparam) ## This private helper function converts a mass value from EAGLE snapshot units to solar masses def tosolar(eaglevalue, hubbleparam): return eaglevalue * (1e10 / hubbleparam) ## This private helper function converts a velocity value from EAGLE snapshot units to km/s def tokms(eaglevalue, expansionfactor): return eaglevalue * np.sqrt(expansionfactor) ## This private helper function converts a current density value from EAGLE snapshot units to g m^-3 def togcm3(eaglevalue, hubbleparam, expansionfactor): return eaglevalue * (6.7699e-31 * (expansionfactor**-3) * (hubbleparam**2)) ## This private helper function returns the age of a star (in yr) given the universe expansion factor # when the star was born (in range 0-1) def age(R): H0 = 2.3e-18 OmegaM0 = 0.27 yr = 365.25 * 24 * 3600 T0 = 13.7e9 return T0 - (2./3./H0/np.sqrt(1-OmegaM0)) * np.arcsinh(np.sqrt( (1/OmegaM0-1)*R**3 )) / yr # ----------------------------------------------------------------- ## This private helper function returns the periodicity corrected coordinates input as a (N,3) # numpy array, and takes the box size (in units of crds) and a test length in units of box size def periodicCorrec(crds, boxsize, testfact = 0.5): if len(crds)>0: for i in range(3): crd = crds[:,i] booldx = np.abs(crd - crd.min()) > boxsize * testfact if booldx.any(): crd[booldx] = crd[booldx] - boxsize return crds # ----------------------------------------------------------------- ## This private helper function returns the centre of mass or the centre of mass and mean velocity # from input particle data, in the units of crds and vels respectively def centroid(crds, masses, vels): moments = (crds * np.column_stack([masses]*3)).sum(axis = 0) M = masses.sum() if vels.any(): momenta = (vels * np.column_stack([masses]*3)).sum(axis = 0) return moments/M, momenta/M else: return moments/M # ----------------------------------------------------------------- ## This private helper function returns the periodicity corrected coordinates input as a (N,3) # numpy array, and takes the box size (in units of crds) and a test length in units of box size. # Finds the centroid of a given set of particles, each time reducing the # maximum distance between the previously found centroid to particles used # to calculate the next centroid. def shrinkingCentroid(crds, masses, vels, thresh=200, shrinkfactor=1.2): N = np.inf # N set high initially to consider all particles while N >= thresh: C, C_vel = centroid(crds, masses, vels) # define new aperture size as eps eps = ((((crds-C)**2).sum(axis = -1))**0.5).max()/float(shrinkfactor) # index for particles within new aperture size shiftcrds = crds - C boolidx = ((shiftcrds**2).sum(axis = -1))**0.5 < eps N = boolidx.sum() crds = crds[boolidx].copy() masses = masses[boolidx].copy() vels = vels[boolidx].copy() return C, C_vel # ----------------------------------------------------------------- ## This private helper function returns the unit vector pointing in the direction of the rotation # axis for input particle data, input CoM and input mean velocity. apt specifies an aperture to # consider particles within in the units of pos, and aptfrac defines and inner radius within which to # exclude particles in units of apt. def rotAxis(crds, vels, mass, com, v_bar, apt = 3e4, aptfrac = 0.08): # put in centre of mass and rest frame pos = crds - com v_rel = vels - v_bar # calculate apertures disp = (pos**2).sum(axis=-1) outapt = disp < apt ** 2 inapt = disp > (aptfrac * apt) ** 2 totapt = inapt * outapt # calculate J vectors in arbitrary units Js = np.cross(pos[totapt], v_rel[totapt]) * np.column_stack([mass[totapt]]*3) # calculate net J vector and normalise to unit vector J = Js.sum(axis = 0) norm2 = np.dot(J, J).sum() if norm2 > 0: return J * norm2 ** -0.5 else: return np.array((0,0,1)) # ----------------------------------------------------------------- ## This private helper function applies a spherical aperture to a dictionary of particle data, i.e. it # adjusts the dictionary so that the particles outside the aperture are removed from each array. def applyAperture(data, radius): x,y,z = data['r'].T inside = (x*x+y*y+z*z) <= (radius*radius) if inside.any(): for key in data: data[key] = data[key][inside] else: for key in data: shape = list(data[key].shape) shape[0] = 0 data[key] = np.zeros(shape) # ----------------------------------------------------------------- ## This private helper function reads the Schmidt parameters into a python structure. def schmidtParameters(params): # extract relevent unit conversions CM_PER_MPC = params['CM_PER_MPC'] GAMMA = params['GAMMA'] GRAV = params['GRAVITY'] K_B = params['BOLTZMANN'] M_PROTON = params['PROTONMASS'] M_SUN = params['SOLAR_MASS'] SEC_PER_YEAR = params['SEC_PER_YEAR'] # extract relevent runtime parameters used to create EAGLE snapshot GammaEff = params['EOS_Jeans_GammaEffective'] InitH = params['InitAbundance_Hydrogen'] RhoHi = params['SF_SchmidtLawHighDensThresh_HpCM3'] RhoNorm = params['EOS_NormPhysDens_HpCM3'] SchmidtCoeff = params['SF_SchmidtLawCoeff_MSUNpYRpKPC2'] SchmidtExp = params['SF_SchmidtLawExponent'] SchmidtExpHi = params['SF_SchmidtLawHighDensExponent'] T_JeansNorm = params['EOS_Jeans_TempNorm_K'] # Normalisation in cgs units Norm_cgs = SchmidtCoeff * pow(pow(CM_PER_MPC / 1.e6, 2) / M_SUN , SchmidtExp - 1) / (1.e6 * SEC_PER_YEAR) # High density Threshold RhoHi_cgs = RhoHi * M_PROTON / InitH # Density normalisation in cgs RhoNorm_cgs = RhoNorm * M_PROTON / InitH # Min total Pressure P_totc = RhoNorm * T_JeansNorm * K_B / (InitH * 1.22) # Pressure at high density Schmidt law break PBreak_cgs = P_totc * (RhoHi/RhoNorm) ** GammaEff # Assume f_g = 1 NormHi_cgs = Norm_cgs * (GAMMA * PBreak_cgs / GRAV) ** ((SchmidtExp - SchmidtExpHi) * 0.5) # tuple of universal SF parameters sfparams = RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff # tuples of high and low pressure SF parameters sf_lo = Norm_cgs, GAMMA/GRAV, SchmidtExp sf_hi = NormHi_cgs, GAMMA/GRAV, SchmidtExpHi return sfparams, sf_lo, sf_hi # ----------------------------------------------------------------- ## This private helper function obtains the SFR of gas from which star particles formed. # # Inputs: # - rho_form: gas density at formation of star particle # - mass: mass of star particle # - schmidtpars: parameters for implementing Schmidt law from schmidtParameters() # # Outputs: # - SFR = Star formation rate for gas particle in input mass units per year # def getSFR(rho_form, mass, schmidtpars): # unpack universal SF law parameters RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff = schmidtpars[0] # Pressure at star formation P_form = P_totc * (rho_form / RhoNorm_cgs) ** GammaEff # unpack high and low pressure SF law parameters sf_lo, sf_hi = schmidtpars[1:] # calculate SFR if type(rho_form) == np.ndarray: hidx = rho_form > RhoHi_cgs SFR = np.zeros(rho_form.size) if np.any(hidx): SFR[hidx] = mass[hidx] * sf_hi[0] * (sf_hi[1] * P_form[hidx]) ** ((sf_hi[2] - 1) * 0.5) if np.any(-hidx): SFR[-hidx] = mass[-hidx] * sf_lo[0] * (sf_lo[1] * P_form[-hidx]) ** ((sf_lo[2] - 1) * 0.5) else: if rho_form > RhoHi_cgs: SFR = mass * sf_hi[0] * (sf_hi[1] * P_form) ** ((sf_hi[2] - 1) * 0.5) else: SFR = mass * sf_lo[0] * (sf_lo[1] * P_form) ** ((sf_lo[2] - 1) * 0.5) # return SFR converted to input mass units per year from per second return np.array(SFR) * 3.15569e7 # ----------------------------------------------------------------- ## This private helper function obtains the ambient pressure of gas from which star particles formed. # # Inputs: # - rho: gas density of star forming particle # - schmidtpars: parameters for implementing Schmidt law from schmidtParameters() # # Outputs: # - P_tot: Ambient pressure from polytropic effective EoS (Schaye & Dalla Vecchia (2004)) # def getPtot(rho, schmidtpars): RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff = schmidtpars[0] P_form = P_totc * (rho / RhoNorm_cgs) ** GammaEff return P_form # ----------------------------------------------------------------- ## This private helper function samples star forming gas particles into a number of sub-particles. # # Inputs: # - sfr: star formation rate in solar masses per yr # - m_gas: particle mass in solar masses # # Outputs: # - nested arrays with a list of subparticles for each parent input particle: # - ms: sub-particle stellar masses in solar masses # - ts: lookback times of sub-particle formation # - idxs: index of the sub-particle's parent particle in input array # - mdiffs: mass of parent particles locked up in new stars; this can be subtracted from the parent gas # particles for mass conservation # def stochResamp(sfr, m_gas): # mass resampling parameters (see Kennicutt & Evans 2012 section 2.5) m_min = 700 # minimum mass of sub-particle in M_solar m_max = 1e6 # maximum mass of sub-particle in M_solar alpha = 1.8 # exponent of power-law mass function alpha1 = 1. - alpha # age resampling parameters thresh_age = 1e8 # period over which to resample in yr (100 Myr) # initialise lists for output ms = [[]] ts = [[]] idxs = [[]] mdiffs = [] # for each parent particle, determine the star-forming sub-particles for i in range(sfr.size): sfri = sfr[i] mi = m_gas[i] # determine the maximum number of sub-particles based on the minimum sub-particle mass N = int(max(1,np.ceil(mi/m_min))) # generate random sub-particle masses from a power-law distribution between min and max values X = np.random.random(N) m = (m_min**alpha1 + X*(m_max**alpha1-m_min**alpha1))**(1./alpha1) # limit and normalize the list of sub-particles to the total mass of the parent mlim = m[np.cumsum(m)<=mi] if len(mlim)<1: mlim = m[:1] m = mi/mlim.sum() * mlim N = len(m) # generate random decay lookback time for each sub-particle X = np.random.random(N) # X in range (0,1] t = thresh_age + mi/sfri * np.log(1-X) # determine mask for sub-particles that form stars by present day issf = t > 0. # add star-forming sub-particles to the output lists ms.append(m[issf]) ts.append(t[issf]) idxs.append([i]*np.count_nonzero(issf)) mdiffs.append(m[issf].sum()) # convert sub-particle lists into numpy arrays ms = np.hstack(ms) ts = np.hstack(ts) idxs = np.hstack(idxs).astype(int) mdiffs = np.array(mdiffs) return ms, ts, idxs, mdiffs # ----------------------------------------------------------------- ## This private helper function randomly shifts the positions of HII region sub-particles # within the smoothing sphere of their parent. # # Arguments: # - r: parent positions; updated by this function to the shifted positions # - h: the smoothing lengths of the parents # - h_mapp: the smoothing lengths of the sub-particles # def stochShiftPos(r, h, h_mapp): # the offset sampling smoothing length is determined so that in the limit of infinite particles, # the light distribution is the same as the parent particle kernel; # assuming Gaussian kernels this means h_sampling**2 + h_mapp**2 = h**2. h_sampling = np.sqrt(np.maximum(0,h*h - h_mapp*h_mapp)) # sample the offset from a scaled gaussian that resembles a cubic spline kernel # (see the documentation of the SPHDustDistribution class in SKIRT) r[:,0] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape) r[:,1] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape) r[:,2] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape) # -----------------------------------------------------------------
SKIRT/PTS
eagle/extractor.py
Python
agpl-3.0
39,415
[ "Galaxy", "Gaussian" ]
8c5840088af1baec10333e3d1d9603ba77fee319c2a5369e4a9a149c0b5ef15f
# Orca # # Copyright 2006-2008 Sun Microsystems Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., Franklin Street, Fifth Floor, # Boston MA 02110-1301 USA. """Exposes a dictionary, pronunciation_dict, that maps words to what they sound like.""" __id__ = "$Id$" __version__ = "$Revision$" __date__ = "$Date$" __copyright__ = "Copyright (c) 2006-2008 Sun Microsystems Inc." __license__ = "LGPL" def getPronunciation(word, pronunciations=None): """Given a word, return a string that represents what this word sounds like. Note: This code does not handle the pronunciation of character names. If you want a character name to be spoken, treat it as a punctuation character at LEVEL_NONE in punctuation_settings.py. See, for example, the left_arrow and right_arrow characters. Arguments: - word: the word to get the "sounds like" representation for. - pronunciations: an optional dictionary used to get the pronunciation from. Returns a string that represents what this word sounds like, or the word if there is no representation. """ lowerWord = word.lower() dictionary = pronunciations or pronunciation_dict entry = dictionary.get(lowerWord, [word, word]) return entry[1] def setPronunciation(word, replacementString, pronunciations=None): """Given an actual word, and a replacement string, set a key/value pair in a pronunciation dictionary. Arguments: - word: the word to be pronunced. - replacementString: the replacement string to use instead. - pronunciations: an optional dictionary used to set the pronunciation into. """ key = word.lower() if pronunciations is not None: pronunciations[key] = [ word, replacementString ] else: pronunciation_dict[key] = [ word, replacementString ] # pronunciation_dict is a dictionary where the keys are words and the # values represent word the pronunciation of that word (in other words, # what the word sounds like). # pronunciation_dict = {}
GNOME/orca
src/orca/pronunciation_dict.py
Python
lgpl-2.1
2,668
[ "ORCA" ]
c14fd857d8dce249859fdad59ace432101896efbc4b0927bf0e44f78cb6d87bc
""" Module to set up run time parameters for Clawpack -- classic code. The values set in the function setrun are then written out to data files that will be read in by the Fortran code. """ import os import numpy as np #------------------------------ def setrun(claw_pkg='classic'): #------------------------------ """ Define the parameters used for running Clawpack. INPUT: claw_pkg expected to be "classic" for this setrun. OUTPUT: rundata - object of class ClawRunData """ from clawpack.clawutil import data assert claw_pkg.lower() == 'classic', "Expected claw_pkg = 'classic'" num_dim = 1 rundata = data.ClawRunData(claw_pkg, num_dim) #------------------------------------------------------------------ # Problem-specific parameters to be written to setprob.data: #------------------------------------------------------------------ probdata = rundata.new_UserData(name='probdata',fname='setprob.data') probdata.add_param('u', 1.0, 'advection velocity') probdata.add_param('beta', 100., 'for width of Gaussian data') probdata.add_param('freq', 80., 'frequency in wave packet') #------------------------------------------------------------------ # Standard Clawpack parameters to be written to claw.data: #------------------------------------------------------------------ clawdata = rundata.clawdata # initialized when rundata instantiated # --------------- # Spatial domain: # --------------- # Number of space dimensions: clawdata.num_dim = num_dim # Lower and upper edge of computational domain: clawdata.lower[0] = 0.000000e+00 # xlower clawdata.upper[0] = 1.000000e+00 # xupper # Number of grid cells: clawdata.num_cells[0] = 200 # mx # --------------- # Size of system: # --------------- # Number of equations in the system: clawdata.num_eqn = 1 # Number of auxiliary variables in the aux array (initialized in setaux) clawdata.num_aux = 0 # Index of aux array corresponding to capacity function, if there is one: clawdata.capa_index = 0 # ------------- # Initial time: # ------------- clawdata.t0 = 0.000000 # Restart from checkpoint file of a previous run? # Note: If restarting, you must also change the Makefile to set: # RESTART = True # If restarting, t0 above should be from original run, and the # restart_file 'fort.qNNNN' specified below should be in # the OUTDIR indicated in Makefile. clawdata.restart = False # True to restart from prior results clawdata.restart_file = 'fort.q0006' # File to use for restart data # ------------- # Output times: #-------------- # Specify at what times the results should be written to fort.q files. # Note that the time integration stops after the final output time. clawdata.output_style = 1 if clawdata.output_style==1: # Output ntimes frames at equally spaced times up to tfinal: # Can specify num_output_times = 0 for no output clawdata.num_output_times = 10 clawdata.tfinal = 1.000000 clawdata.output_t0 = True # output at initial (or restart) time? elif clawdata.output_style == 2: # Specify a list or numpy array of output times: # Include t0 if you want output at the initial time. clawdata.output_times = [0., 0.1] elif clawdata.output_style == 3: # Output every step_interval timesteps over total_steps timesteps: clawdata.output_step_interval = 2 clawdata.total_steps = 4 clawdata.output_t0 = True # output at initial (or restart) time? clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf' clawdata.output_q_components = 'all' # could be list such as [True,True] clawdata.output_aux_components = 'none' # could be list clawdata.output_aux_onlyonce = True # output aux arrays only at t0 # --------------------------------------------------- # Verbosity of messages to screen during integration: # --------------------------------------------------- # The current t, dt, and cfl will be printed every time step # at AMR levels <= verbosity. Set verbosity = 0 for no printing. # (E.g. verbosity == 2 means print only on levels 1 and 2.) clawdata.verbosity = 0 # -------------- # Time stepping: # -------------- # if dt_variable==True: variable time steps used based on cfl_desired, # if dt_variable==False: fixed time steps dt = dt_initial always used. clawdata.dt_variable = True # Initial time step for variable dt. # (If dt_variable==0 then dt=dt_initial for all steps) clawdata.dt_initial = 1.000000e-01 # Max time step to be allowed if variable dt used: clawdata.dt_max = 1.000000e+99 # Desired Courant number if variable dt used clawdata.cfl_desired = 0.800000 # max Courant number to allow without retaking step with a smaller dt: clawdata.cfl_max = 1.000000 # Maximum number of time steps to allow between output times: clawdata.steps_max = 5000 # ------------------ # Method to be used: # ------------------ # Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters clawdata.order = 2 # Number of waves in the Riemann solution: clawdata.num_waves = 1 # List of limiters to use for each wave family: # Required: len(limiter) == num_waves # Some options: # 0 or 'none' ==> no limiter (Lax-Wendroff) # 1 or 'minmod' ==> minmod # 2 or 'superbee' ==> superbee # 3 or 'vanleer' ==> van Leer # 4 or 'mc' ==> MC limiter clawdata.limiter = ['none'] clawdata.use_fwaves = False # True ==> use f-wave version of algorithms # Source terms splitting: # src_split == 0 or 'none' ==> no source term (src routine never called) # src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used, # src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended. clawdata.source_split = 0 # -------------------- # Boundary conditions: # -------------------- # Number of ghost cells (usually 2) clawdata.num_ghost = 2 # Choice of BCs at xlower and xupper: # 0 or 'user' => user specified (must modify bcNamr.f to use this option) # 1 or 'extrap' => extrapolation (non-reflecting outflow) # 2 or 'periodic' => periodic (must specify this at both boundaries) # 3 or 'wall' => solid wall for systems where q(2) is normal velocity clawdata.bc_lower[0] = 'periodic' # at xlower clawdata.bc_upper[0] = 'periodic' # at xupper return rundata # end of function setrun # ---------------------- if __name__ == '__main__': # Set up run-time parameters and write all data files. import sys rundata = setrun(*sys.argv[1:]) rundata.write()
amath574w2015/am574-class
labs/lab4/chap6/wavepacket/setrun.py
Python
bsd-3-clause
7,233
[ "Gaussian", "NetCDF" ]
0b532fc19c6475fbde34b5309217c284c40fb71f85ac294e9cda6279208ec32f
#!/usr/bin/env python3 # Copyright 2020 Efabless Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #-------------------------------------------------------- # Padframe Editor and Core Floorplanner # #-------------------------------------------------------- # Written by Tim Edwards # efabless, inc. # April 24, 2019 # Version 0.5 # Based on https://github.com/YosysHQ/padring (requirement) # Update: May 9, 2019 to add console message window # Update: May 10, 2019 to incorporate core floorplanning # Update: Jan 31, 2020 to allow batch operation #-------------------------------------------------------- import os import re import sys import glob import json import math import shutil import signal import select import subprocess import faulthandler import tkinter from tkinter import ttk from tkinter import filedialog import tksimpledialog from consoletext import ConsoleText # User preferences file (if it exists) prefsfile = '~/design/.profile/prefs.json' #------------------------------------------------------ # Dialog for entering a pad #------------------------------------------------------ class PadNameDialog(tksimpledialog.Dialog): def body(self, master, warning=None, seed=None): if warning: ttk.Label(master, text=warning).grid(row = 0, columnspan = 2, sticky = 'wns') ttk.Label(master, text="Enter new group name:").grid(row = 1, column = 0, sticky = 'wns') self.nentry = ttk.Entry(master) self.nentry.grid(row = 1, column = 1, sticky = 'ewns') if seed: self.nentry.insert(0, seed) return self.nentry # initial focus def apply(self): return self.nentry.get() #------------------------------------------------------ # Dialog for entering core dimensions #------------------------------------------------------ class CoreSizeDialog(tksimpledialog.Dialog): def body(self, master, warning="Chip core dimensions", seed=None): if warning: ttk.Label(master, text=warning).grid(row = 0, columnspan = 2, sticky = 'wns') ttk.Label(master, text="Enter core width x height (microns):").grid(row = 1, column = 0, sticky = 'wns') self.nentry = ttk.Entry(master) self.nentry.grid(row = 1, column = 1, sticky = 'ewns') if seed: self.nentry.insert(0, seed) return self.nentry # initial focus def apply(self): return self.nentry.get() #------------------------------------------------ # SoC Floorplanner and Padframe Generator GUI #------------------------------------------------ class SoCFloorplanner(ttk.Frame): """Open Galaxy Pad Frame Generator.""" def __init__(self, parent = None, *args, **kwargs): '''See the __init__ for Tkinter.Toplevel.''' ttk.Frame.__init__(self, parent, *args[1:], **kwargs) self.root = parent self.init_data() if args[0] == True: self.do_gui = True self.init_gui() else: self.do_gui = False self.use_console = False def on_quit(self): """Exits program.""" quit() def init_gui(self): """Builds GUI.""" global prefsfile message = [] fontsize = 11 # Read user preferences file, get default font size from it. prefspath = os.path.expanduser(prefsfile) if os.path.exists(prefspath): with open(prefspath, 'r') as f: self.prefs = json.load(f) if 'fontsize' in self.prefs: fontsize = self.prefs['fontsize'] else: self.prefs = {} s = ttk.Style() available_themes = s.theme_names() s.theme_use(available_themes[0]) s.configure('normal.TButton', font=('Helvetica', fontsize), border = 3, relief = 'raised') s.configure('title.TLabel', font=('Helvetica', fontsize, 'bold italic'), foreground = 'brown', anchor = 'center') s.configure('blue.TLabel', font=('Helvetica', fontsize), foreground = 'blue') s.configure('normal.TLabel', font=('Helvetica', fontsize)) s.configure('normal.TCheckbutton', font=('Helvetica', fontsize)) s.configure('normal.TMenubutton', font=('Helvetica', fontsize)) s.configure('normal.TEntry', font=('Helvetica', fontsize), background='white') s.configure('pad.TLabel', font=('Helvetica', fontsize), foreground = 'blue', relief = 'flat') s.configure('select.TLabel', font=('Helvetica', fontsize, 'bold'), foreground = 'white', background = 'blue', relief = 'flat') # parent.withdraw() self.root.title('Padframe Generator and Core Floorplanner') self.root.option_add('*tearOff', 'FALSE') self.pack(side = 'top', fill = 'both', expand = 'true') self.root.protocol("WM_DELETE_WINDOW", self.on_quit) pane = tkinter.PanedWindow(self, orient = 'vertical', sashrelief = 'groove', sashwidth = 6) pane.pack(side = 'top', fill = 'both', expand = 'true') self.toppane = ttk.Frame(pane) self.botpane = ttk.Frame(pane) self.toppane.columnconfigure(0, weight = 1) self.toppane.rowconfigure(0, weight = 1) self.botpane.columnconfigure(0, weight = 1) self.botpane.rowconfigure(0, weight = 1) # Scrolled frame using canvas widget self.pframe = tkinter.Frame(self.toppane) self.pframe.grid(row = 0, column = 0, sticky = 'news') self.pframe.rowconfigure(0, weight = 1) self.pframe.columnconfigure(0, weight = 1) # Add column on the left, listing all groups and the pads they belong to. # This starts as just a frame to be filled. Use a canvas to create a # scrolled frame. # The primary frame holds the canvas self.canvas = tkinter.Canvas(self.pframe, background = "white") self.canvas.grid(row = 0, column = 0, sticky = 'news') # Add Y scrollbar to pad list window xscrollbar = ttk.Scrollbar(self.pframe, orient = 'horizontal') xscrollbar.grid(row = 1, column = 0, sticky = 'news') yscrollbar = ttk.Scrollbar(self.pframe, orient = 'vertical') yscrollbar.grid(row = 0, column = 1, sticky = 'news') self.canvas.config(xscrollcommand = xscrollbar.set) xscrollbar.config(command = self.canvas.xview) self.canvas.config(yscrollcommand = yscrollbar.set) yscrollbar.config(command = self.canvas.yview) self.canvas.bind("<Button-4>", self.on_scrollwheel) self.canvas.bind("<Button-5>", self.on_scrollwheel) # Configure callback self.canvas.bind("<Configure>", self.frame_configure) # Add a text window to capture output. Redirect print statements to it. self.console = ttk.Frame(self.botpane) self.console.grid(column = 0, row = 0, sticky = "news") self.text_box = ConsoleText(self.console, wrap='word', height = 4) self.text_box.pack(side='left', fill='both', expand='true') console_scrollbar = ttk.Scrollbar(self.console) console_scrollbar.pack(side='right', fill='y') # Attach console to scrollbar self.text_box.config(yscrollcommand = console_scrollbar.set) console_scrollbar.config(command = self.text_box.yview) # Add the bottom bar with buttons self.bbar = ttk.Frame(self.botpane) self.bbar.grid(column = 0, row = 1, sticky = "news") self.bbar.import_button = ttk.Button(self.bbar, text='Import', command=self.vlogimport, style='normal.TButton') self.bbar.import_button.grid(column=0, row=0, padx = 5) self.bbar.generate_button = ttk.Button(self.bbar, text='Generate', command=self.generate, style='normal.TButton') self.bbar.generate_button.grid(column=1, row=0, padx = 5) self.bbar.save_button = ttk.Button(self.bbar, text='Save', command=self.save, style='normal.TButton') self.bbar.save_button.grid(column=2, row=0, padx = 5) self.bbar.cancel_button = ttk.Button(self.bbar, text='Quit', command=self.on_quit, style='normal.TButton') self.bbar.cancel_button.grid(column=3, row=0, padx = 5) pane.add(self.toppane) pane.add(self.botpane) pane.paneconfig(self.toppane, stretch='first') def init_data(self): self.vlogpads = [] self.corecells = [] self.Npads = [] self.Spads = [] self.Epads = [] self.Wpads = [] self.NEpad = [] self.NWpad = [] self.SEpad = [] self.SWpad = [] self.coregroup = [] self.celldefs = [] self.coredefs = [] self.selected = [] self.ioleflibs = [] self.llx = 0 self.lly = 0 self.urx = 0 self.ury = 0 self.event_data = {} self.event_data['x0'] = 0 self.event_data['y0'] = 0 self.event_data['x'] = 0 self.event_data['y'] = 0 self.event_data['tag'] = None self.scale = 1.0 self.margin = 100 self.pad_rotation = 0 self.init_messages = [] self.stdout = None self.stderr = None self.keep_cfg = False self.ef_format = False self.use_console = False def init_padframe(self): self.set_project() self.vlogimport() self.readplacement(precheck=True) self.resolve() self.generate(0) # Local routines for handling printing to the text console def print(self, message, file=None, end='\n', flush=True): if not file: if not self.use_console: file = sys.stdout else: file = ConsoleText.StdoutRedirector(self.text_box) if self.stdout: print(message, file=file, end=end) if flush: self.stdout.flush() self.update_idletasks() else: self.init_messages.append(message) def text_to_console(self): # Redirect stdout and stderr to the console as the last thing to do. . . # Otherwise errors in the GUI get sucked into the void. self.stdout = sys.stdout self.stderr = sys.stderr if self.use_console: sys.stdout = ConsoleText.StdoutRedirector(self.text_box) sys.stderr = ConsoleText.StderrRedirector(self.text_box) if len(self.init_messages) > 0: for message in self.init_messages: self.print(message) self.init_messages = [] # Set the project name(s). This is the name of the top-level verilog. # The standard protocol is that the project directory contains a file # project.json that defines a name 'ip-name' that is the same as the # layout name, the verilog module name, etc. def set_project(self): # Check pwd pwdname = self.projectpath if self.projectpath else os.getcwd() subdir = os.path.split(pwdname)[1] if subdir == 'mag' or subdir == 'verilog': projectpath = os.path.split(pwdname)[0] else: projectpath = pwdname projectroot = os.path.split(projectpath)[0] projectdirname = os.path.split(projectpath)[1] # Check for project.json jsonname = None if os.path.isfile(projectpath + '/project.json'): jsonname = projectpath + '/project.json' elif os.path.isfile(projectroot + '/' + projectdirname + '.json'): jsonname = projectroot + '/' + projectdirname + '.json' if os.path.isfile(projectroot + '/project.json'): # Just in case this was started from some other subdirectory projectpath = projectroot jsonname = projectroot + '/project.json' if jsonname: self.print('Reading project JSON file ' + jsonname) with open(jsonname, 'r') as ifile: topdata = json.load(ifile) if 'data-sheet' in topdata: dsheet = topdata['data-sheet'] if 'ip-name' in dsheet: self.project = dsheet['ip-name'] self.projectpath = projectpath else: self.print('No project JSON file; using directory name as the project name.') self.project = os.path.split(projectpath)[1] self.projectpath = projectpath self.print('Project name is ' + self.project + ' (' + self.projectpath + ')') # Functions for drag-and-drop capability def add_draggable(self, tag): self.canvas.tag_bind(tag, '<ButtonPress-1>', self.on_button_press) self.canvas.tag_bind(tag, '<ButtonRelease-1>', self.on_button_release) self.canvas.tag_bind(tag, '<B1-Motion>', self.on_button_motion) self.canvas.tag_bind(tag, '<ButtonPress-2>', self.on_button2_press) self.canvas.tag_bind(tag, '<ButtonPress-3>', self.on_button3_press) def on_button_press(self, event): '''Begining drag of an object''' # Find the closest item, then record its tag. locx = event.x + self.canvas.canvasx(0) locy = event.y + self.canvas.canvasy(0) item = self.canvas.find_closest(locx, locy)[0] self.event_data['tag'] = self.canvas.gettags(item)[0] self.event_data['x0'] = event.x self.event_data['y0'] = event.y self.event_data['x'] = event.x self.event_data['y'] = event.y def on_button2_press(self, event): '''Flip an object (excluding corners)''' locx = event.x + self.canvas.canvasx(0) locy = event.y + self.canvas.canvasy(0) item = self.canvas.find_closest(locx, locy)[0] tag = self.canvas.gettags(item)[0] try: corecell = next(item for item in self.coregroup if item['name'] == tag) except: try: pad = next(item for item in self.Npads if item['name'] == tag) except: pad = None if not pad: try: pad = next(item for item in self.Epads if item['name'] == tag) except: pad = None if not pad: try: pad = next(item for item in self.Spads if item['name'] == tag) except: pad = None if not pad: try: pad = next(item for item in self.Wpads if item['name'] == tag) except: pad = None if not pad: self.print('Error: Object cannot be flipped.') else: # Flip the pad (in the only way meaningful for the pad). orient = pad['o'] if orient == 'N': pad['o'] = 'FN' elif orient == 'E': pad['o'] = 'FW' elif orient == 'S': pad['o'] = 'FS' elif orient == 'W': pad['o'] = 'FE' elif orient == 'FN': pad['o'] = 'N' elif orient == 'FE': pad['o'] = 'W' elif orient == 'FS': pad['o'] = 'S' elif orient == 'FW': pad['o'] = 'E' else: # Flip the cell. Use the DEF meaning of flip, which is to # add or subtract 'F' from the orientation. orient = corecell['o'] if not 'F' in orient: corecell['o'] = 'F' + orient else: corecell['o'] = orient[1:] # Redraw self.populate(0) def on_button3_press(self, event): '''Rotate a core object (no pads) ''' locx = event.x + self.canvas.canvasx(0) locy = event.y + self.canvas.canvasy(0) item = self.canvas.find_closest(locx, locy)[0] tag = self.canvas.gettags(item)[0] try: corecell = next(item for item in self.coregroup if item['name'] == tag) except: self.print('Error: Object cannot be rotated.') else: # Modify its orientation orient = corecell['o'] if orient == 'N': corecell['o'] = 'E' elif orient == 'E': corecell['o'] = 'S' elif orient == 'S': corecell['o'] = 'W' elif orient == 'W': corecell['o'] = 'N' elif orient == 'FN': corecell['o'] = 'FW' elif orient == 'FW': corecell['o'] = 'FS' elif orient == 'FS': corecell['o'] = 'FE' elif orient == 'FE': corecell['o'] = 'FN' # rewrite the core DEF file self.write_core_def() # Redraw self.populate(0) def on_button_motion(self, event): '''Handle dragging of an object''' # compute how much the mouse has moved delta_x = event.x - self.event_data['x'] delta_y = event.y - self.event_data['y'] # move the object the appropriate amount self.canvas.move(self.event_data['tag'], delta_x, delta_y) # record the new position self.event_data['x'] = event.x self.event_data['y'] = event.y def on_button_release(self, event): '''End drag of an object''' # Find the pad associated with the tag and update its position information tag = self.event_data['tag'] # Collect pads in clockwise order. Note that E and S rows are not clockwise allpads = [] allpads.extend(self.Npads) allpads.extend(self.NEpad) allpads.extend(reversed(self.Epads)) allpads.extend(self.SEpad) allpads.extend(reversed(self.Spads)) allpads.extend(self.SWpad) allpads.extend(self.Wpads) allpads.extend(self.NWpad) # Create a list of row references (also in clockwise order, but no reversing) padrows = [self.Npads, self.NEpad, self.Epads, self.SEpad, self.Spads, self.SWpad, self.Wpads, self.NWpad] # Record the row or corner where this pad was located before the move for row in padrows: try: pad = next(item for item in row if item['name'] == tag) except: pass else: padrow = row break # Currently there is no procedure to move a pad out of the corner # position; corners are fixed by definition. if padrow == self.NEpad or padrow == self.SEpad or padrow == self.SWpad or padrow == self.NWpad: # Easier to run generate() than to put the pad back. . . self.generate(0) return # Find the original center point of the pad being moved padllx = pad['x'] padlly = pad['y'] if pad['o'] == 'N' or pad['o'] == 'S': padurx = padllx + pad['width'] padury = padlly + pad['height'] else: padurx = padllx + pad['height'] padury = padlly + pad['width'] padcx = (padllx + padurx) / 2 padcy = (padlly + padury) / 2 # Add distance from drag information (note that drag position in y # is negative relative to the chip dimensions) padcx += (self.event_data['x'] - self.event_data['x0']) / self.scale padcy -= (self.event_data['y'] - self.event_data['y0']) / self.scale # reset the drag information self.event_data['tag'] = None self.event_data['x'] = 0 self.event_data['y'] = 0 self.event_data['x0'] = 0 self.event_data['y0'] = 0 # Find the distance from the pad to all other pads, and get the two # closest entries. wwidth = self.urx - self.llx dist0 = wwidth dist1 = wwidth pad0 = None pad1 = None for npad in allpads: if npad == pad: continue npadllx = npad['x'] npadlly = npad['y'] if npad['o'] == 'N' or npad['o'] == 'S': npadurx = npadllx + npad['width'] npadury = npadlly + npad['height'] else: npadurx = npadllx + npad['height'] npadury = npadlly + npad['width'] npadcx = (npadllx + npadurx) / 2 npadcy = (npadlly + npadury) / 2 deltx = npadcx - padcx delty = npadcy - padcy pdist = math.sqrt(deltx * deltx + delty * delty) if pdist < dist0: dist1 = dist0 pad1 = pad0 dist0 = pdist pad0 = npad elif pdist < dist1: dist1 = pdist pad1 = npad # Diagnostic # self.print('Two closest pads to pad ' + pad['name'] + ' (' + pad['cell'] + '): ') # self.print(pad0['name'] + ' (' + pad0['cell'] + ') dist = ' + str(dist0)) # self.print(pad1['name'] + ' (' + pad1['cell'] + ') dist = ' + str(dist1)) # Record the row or corner where these pads are for row in padrows: try: testpad = next(item for item in row if item['name'] == pad0['name']) except: pass else: padrow0 = row break for row in padrows: try: testpad = next(item for item in row if item['name'] == pad1['name']) except: pass else: padrow1 = row break # Remove pad from its own row padrow.remove(pad) # Insert pad into new row. Watch for wraparound from the last entry to the first padidx0 = allpads.index(pad0) padidx1 = allpads.index(pad1) if padidx0 == 0 and padidx1 > 2: padidx1 = -1 if padidx1 > padidx0: padafter = pad1 rowafter = padrow1 padbefore = pad0 rowbefore = padrow0 else: padafter = pad0 rowafter = padrow0 padbefore = pad1 rowbefore = padrow1 # Do not replace corner positions (? may be necessary ?) if rowafter == self.NWpad: self.Wpads.append(pad) elif rowafter == self.NWpad: self.Npads.append(pad) elif rowafter == self.SEpad: self.Epads.insert(0, pad) elif rowafter == self.SWpad: self.Spads.insert(0, pad) elif rowafter == self.Wpads or rowafter == self.Npads: idx = rowafter.index(padafter) rowafter.insert(idx, pad) elif rowbefore == self.NEpad: self.Epads.append(pad) elif rowbefore == self.SEpad: self.Spads.append(pad) else: # rows E and S are ordered counterclockwise idx = rowbefore.index(padbefore) rowbefore.insert(idx, pad) # Re-run padring self.generate(0) def on_scrollwheel(self, event): if event.num == 4: zoomval = 1.1; elif event.num == 5: zoomval = 0.9; else: zoomval = 1.0; self.scale *= zoomval self.canvas.scale('all', -15 * zoomval, -15 * zoomval, zoomval, zoomval) self.event_data['x'] *= zoomval self.event_data['y'] *= zoomval self.event_data['x0'] *= zoomval self.event_data['y0'] *= zoomval self.frame_configure(event) # Callback functions similar to the pad event callbacks above, but for # core cells. Unlike pad cells, core cells can be rotated and flipped # arbitrarily, and they do not force a recomputation of the padframe # unless their position forces the padframe to expand def add_core_draggable(self, tag): self.canvas.tag_bind(tag, '<ButtonPress-1>', self.on_button_press) self.canvas.tag_bind(tag, '<ButtonRelease-1>', self.core_on_button_release) self.canvas.tag_bind(tag, '<B1-Motion>', self.on_button_motion) self.canvas.tag_bind(tag, '<ButtonPress-2>', self.on_button2_press) self.canvas.tag_bind(tag, '<ButtonPress-3>', self.on_button3_press) def core_on_button_release(self, event): '''End drag of a core cell''' # Find the pad associated with the tag and update its position information tag = self.event_data['tag'] try: corecell = next(item for item in self.coregroup if item['name'] == tag) except: self.print('Error: cell ' + item['name'] + ' is not in coregroup!') else: # Modify its position values corex = corecell['x'] corey = corecell['y'] # Add distance from drag information (note that drag position in y # is negative relative to the chip dimensions) deltax = (self.event_data['x'] - self.event_data['x0']) / self.scale deltay = (self.event_data['y'] - self.event_data['y0']) / self.scale corecell['x'] = corex + deltax corecell['y'] = corey - deltay # rewrite the core DEF file self.write_core_def() # reset the drag information self.event_data['tag'] = None self.event_data['x'] = 0 self.event_data['y'] = 0 self.event_data['x0'] = 0 self.event_data['y0'] = 0 # Critically needed or else frame does not resize to scrollbars! def grid_configure(self, padx, pady): pass # Redraw the chip frame view in response to changes in the pad list def redraw_frame(self): self.canvas.coords('boundary', self.llx, self.urx, self.lly, self.ury) # Update the canvas scrollregion to incorporate all the interior windows def frame_configure(self, event): if self.do_gui == False: return self.update_idletasks() bbox = self.canvas.bbox("all") try: newbbox = (-15, -15, bbox[2] + 15, bbox[3] + 15) except: pass else: self.canvas.configure(scrollregion = newbbox) # Fill the GUI entries with resident data def populate(self, level): if self.do_gui == False: return if level > 1: self.print('Recursion error: Returning now.') return self.print('Populating floorplan view.') # Remove all entries from the canvas self.canvas.delete('all') allpads = self.Npads + self.NEpad + self.Epads + self.SEpad + self.Spads + self.SWpad + self.Wpads + self.NWpad notfoundlist = [] for pad in allpads: if 'x' not in pad: self.print('Error: Pad ' + pad['name'] + ' has no placement information.') continue llx = int(pad['x']) lly = int(pad['y']) pado = pad['o'] try: padcell = next(item for item in self.celldefs if item['name'] == pad['cell']) except: # This should not happen (failsafe) if pad['cell'] not in notfoundlist: self.print('Warning: there is no cell named ' + pad['cell'] + ' in the libraries.') notfoundlist.append(pad['cell']) continue padw = padcell['width'] padh = padcell['height'] if 'N' in pado or 'S' in pado: urx = int(llx + padw) ury = int(lly + padh) else: urx = int(llx + padh) ury = int(lly + padw) pad['llx'] = llx pad['lly'] = lly pad['urx'] = urx pad['ury'] = ury # Note that the DEF coordinate system is reversed in Y from the canvas. . . height = self.ury - self.lly for pad in allpads: llx = pad['llx'] lly = height - pad['lly'] urx = pad['urx'] ury = height - pad['ury'] tag_id = pad['name'] if 'subclass' in pad: if pad['subclass'] == 'POWER': pad_color = 'orange2' elif pad['subclass'] == 'INOUT': pad_color = 'yellow' elif pad['subclass'] == 'OUTPUT': pad_color = 'powder blue' elif pad['subclass'] == 'INPUT': pad_color = 'goldenrod1' elif pad['subclass'] == 'SPACER': pad_color = 'green yellow' elif pad['class'] == 'ENDCAP': pad_color = 'green yellow' elif pad['subclass'] == '' or pad['class'] == ';': pad_color = 'khaki1' else: self.print('Unhandled pad class ' + pad['class']) pad_color = 'gray' else: pad_color = 'gray' sllx = self.scale * llx slly = self.scale * lly surx = self.scale * urx sury = self.scale * ury self.canvas.create_rectangle((sllx, slly), (surx, sury), fill=pad_color, tags=[tag_id]) cx = (sllx + surx) / 2 cy = (slly + sury) / 2 s = 10 if pad['width'] >= 10 else pad['width'] if pad['height'] < s: s = pad['height'] # Create an indicator line at the bottom left corner of the cell if pad['o'] == 'N': allx = sllx ally = slly - s aurx = sllx + s aury = slly elif pad['o'] == 'E': allx = sllx ally = sury + s aurx = sllx + s aury = sury elif pad['o'] == 'S': allx = surx ally = sury + s aurx = surx - s aury = sury elif pad['o'] == 'W': allx = surx ally = slly - s aurx = surx - s aury = slly elif pad['o'] == 'FN': allx = surx ally = slly - s aurx = surx - s aury = slly elif pad['o'] == 'FE': allx = surx ally = sury + s aurx = surx - s aury = sury elif pad['o'] == 'FS': allx = sllx ally = sury + s aurx = sllx + s aury = sury elif pad['o'] == 'FW': allx = sllx ally = slly - s aurx = sllx + s aury = slly self.canvas.create_line((allx, ally), (aurx, aury), tags=[tag_id]) # Rotate text on top and bottom rows if the tkinter version allows it. if tkinter.TclVersion >= 8.6: if pad['o'] == 'N' or pad['o'] == 'S': angle = 90 else: angle = 0 self.canvas.create_text((cx, cy), text=pad['name'], angle=angle, tags=[tag_id]) else: self.canvas.create_text((cx, cy), text=pad['name'], tags=[tag_id]) # Make the pad draggable self.add_draggable(tag_id) # Now add the core cells for cell in self.coregroup: if 'x' not in cell: self.print('Error: Core cell ' + cell['name'] + ' has no placement information.') continue # else: # self.print('Diagnostic: Creating object for core cell ' + cell['name']) llx = int(cell['x']) lly = int(cell['y']) cello = cell['o'] try: corecell = next(item for item in self.coredefs if item['name'] == cell['cell']) except: # This should not happen (failsafe) if cell['cell'] not in notfoundlist: self.print('Warning: there is no cell named ' + cell['cell'] + ' in the libraries.') notfoundlist.append(cell['cell']) continue cellw = corecell['width'] cellh = corecell['height'] if 'N' in cello or 'S' in cello: urx = int(llx + cellw) ury = int(lly + cellh) else: urx = int(llx + cellh) ury = int(lly + cellw) print('NOTE: cell ' + corecell['name'] + ' is rotated, w = ' + str(urx - llx) + '; h = ' + str(ury - lly)) cell['llx'] = llx cell['lly'] = lly cell['urx'] = urx cell['ury'] = ury # Watch for out-of-window position in core cells. corellx = self.llx corelly = self.lly coreurx = self.urx coreury = self.ury for cell in self.coregroup: if 'llx' not in cell: # Error message for this was handled above continue llx = cell['llx'] lly = height - cell['lly'] urx = cell['urx'] ury = height - cell['ury'] # Check for out-of-window cell if llx < corellx: corellx = llx if lly < corelly: corelly = lly if urx > coreurx: coreurx = urx if ury > coreury: coreury = ury tag_id = cell['name'] cell_color = 'gray40' sllx = self.scale * llx slly = self.scale * lly surx = self.scale * urx sury = self.scale * ury self.canvas.create_rectangle((sllx, slly), (surx, sury), fill=cell_color, tags=[tag_id]) cx = (sllx + surx) / 2 cy = (slly + sury) / 2 s = 10 if cell['width'] >= 10 else cell['width'] if cell['height'] < s: s = cell['height'] # Create an indicator line at the bottom left corner of the cell if cell['o'] == 'N': allx = sllx ally = slly - s aurx = sllx + s aury = slly elif cell['o'] == 'E': allx = sllx ally = sury + s aurx = sllx + s aury = sury elif cell['o'] == 'S': allx = surx ally = sury + s aurx = surx - s aury = sury elif cell['o'] == 'W': allx = surx ally = slly - s aurx = surx - s aury = slly elif cell['o'] == 'FN': allx = surx ally = slly - s aurx = surx - s aury = slly elif cell['o'] == 'FE': allx = surx ally = sury + s aurx = surx - s aury = sury elif cell['o'] == 'FS': allx = sllx ally = sury + s aurx = sllx + s aury = sury elif cell['o'] == 'FW': allx = sllx ally = slly - s aurx = sllx + s aury = slly self.canvas.create_line((allx, ally), (aurx, aury), tags=[tag_id]) # self.print('Created entry for cell ' + cell['name'] + ' at {0:g}, {1:g}'.format(cx, cy)) # Rotate text on top and bottom rows if the tkinter version allows it. if tkinter.TclVersion >= 8.6: if 'N' in cell['o'] or 'S' in cell['o']: angle = 90 else: angle = 0 self.canvas.create_text((cx, cy), text=cell['name'], angle=angle, tags=[tag_id]) else: self.canvas.create_text((cx, cy), text=cell['name'], tags=[tag_id]) # Make the core cell draggable self.add_core_draggable(tag_id) # Is there a boundary size defined? if self.urx > self.llx and self.ury > self.lly: self.create_boundary() # Did the core extend into negative X or Y? If so, adjust all canvas # coordinates to fit in the window, or else objects cannot be reached # even by zooming out (since zooming is pinned on the top corner). offsetx = 0 offsety = 0 # NOTE: Probably want to check if the core exceeds the inner # dimension of the pad ring, not the outer (to check and to do). if corellx < self.llx: offsetx = self.llx - corellx if corelly < self.lly: offsety = self.lly - corelly if offsetx > 0 or offsety > 0: self.canvas.move("all", offsetx, offsety) # An offset implies that the chip is core limited, and the # padframe requires additional space. This can be accomplished # simply by running "Generate". NOTE: Since generate() calls # populate(), be VERY SURE that this does not infinitely recurse! self.generate(level) # Generate a DEF file of the core area def write_core_def(self): self.print('Writing core placementment information in DEF file "core.def".') mag_path = self.projectpath + '/mag' # The core cells must always clear the I/O pads on the left and # bottom (with the ad-hoc margin of self.margin). If core cells have # been moved to the left or down past the padframe edge, then the # entire core needs to be repositioned. # To be done: draw a boundary around the core, let the edges of that # boundary be draggable, and let the difference between the boundary # and the core area define the margin. if self.SWpad != []: corellx = self.SWpad[0]['x'] + self.SWpad[0]['width'] + self.margin corelly = self.SWpad[0]['y'] + self.SWpad[0]['height'] + self.margin else: corellx = self.Wpads[0]['x'] + self.Wpads[0]['height'] + self.margin corelly = self.Spads[0]['x'] + self.Spads[0]['height'] + self.margin offsetx = 0 offsety = 0 for corecell in self.coregroup: if corecell['x'] < corellx: if corellx - corecell['x'] > offsetx: offsetx = corellx - corecell['x'] if corecell['y'] < corelly: if corelly - corecell['y'] > offsety: offsety = corelly - corecell['y'] if offsetx > 0 or offsety > 0: for corecell in self.coregroup: corecell['x'] += offsetx corecell['y'] += offsety # Now write the core DEF file with open(mag_path + '/core.def', 'w') as ofile: print('DESIGN CORE ;', file=ofile) print('UNITS DISTANCE MICRONS 1000 ;', file=ofile) print('COMPONENTS {0:d} ;'.format(len(self.coregroup)), file=ofile) for corecell in self.coregroup: print(' - ' + corecell['name'] + ' ' + corecell['cell'], file=ofile, end='') print(' + PLACED ( {0:d} {1:d} ) {2:s} ;'.format(int(corecell['x'] * 1000), int(corecell['y'] * 1000), corecell['o']), file=ofile) print ('END COMPONENTS', file=ofile) print ('END DESIGN', file=ofile) # Create the chip boundary area def create_boundary(self): scale = self.scale llx = (self.llx - 10) * scale lly = (self.lly - 10) * scale urx = (self.urx + 10) * scale ury = (self.ury + 10) * scale pad_color = 'plum1' tag_id = 'boundary' self.canvas.create_rectangle((llx, lly), (urx, ury), outline=pad_color, width=2, tags=[tag_id]) # Add text to the middle representing the chip and core areas cx = ((self.llx + self.urx) / 2) * scale cy = ((self.lly + self.ury) / 2) * scale width = self.urx - self.llx height = self.ury - self.lly areatext = 'Chip dimensions (um): {0:g} x {1:g}'.format(width, height) tag_id = 'chiparea' self.canvas.create_text((cx, cy), text=areatext, tags=[tag_id]) # Rotate orientation according to self.pad_rotation. def rotate_orientation(self, orient_in): orient_v = ['N', 'E', 'S', 'W', 'N', 'E', 'S', 'W'] idxadd = int(self.pad_rotation / 90) idx = orient_v.index(orient_in) return orient_v[idx + idxadd] # Read a list of cell macros (name, size, class) from a LEF library def read_lef_macros(self, libpath, libname = None, libtype = 'iolib'): if libtype == 'iolib': libtext = 'I/O ' elif libtype == 'celllib': libtext = 'core ' else: libtext = '' macros = [] if libname: if os.path.splitext(libname)[1] == '': libname += '.lef' leffiles = glob.glob(libpath + '/' + libname) else: leffiles = glob.glob(libpath + '/*.lef') if leffiles == []: if libname: self.print('WARNING: No file ' + libpath + '/' + libname + '.lef') else: self.print('WARNING: No files ' + libpath + '/*.lef') for leffile in leffiles: libpath = os.path.split(leffile)[0] libname = os.path.split(libpath)[1] self.print('Reading LEF ' + libtext + 'library ' + leffile) with open(leffile, 'r') as ifile: ilines = ifile.read().splitlines() in_macro = False for iline in ilines: iparse = iline.split() if iparse == []: continue elif iparse[0] == 'MACRO': in_macro = True newmacro = {} newmacro['name'] = iparse[1] newmacro[libtype] = leffile macros.append(newmacro) elif in_macro: if iparse[0] == 'END': if len(iparse) > 1 and iparse[1] == newmacro['name']: in_macro = False elif iparse[0] == 'CLASS': newmacro['class'] = iparse[1] if len(iparse) > 2: newmacro['subclass'] = iparse[2] # Use the 'ENDCAP' class to identify pad rotations # other than BOTTOMLEFT. This is somewhat ad-hoc # depending on the foundry; may not be generally # applicable. if newmacro['class'] == 'ENDCAP': if newmacro['subclass'] == 'TOPLEFT': self.pad_rotation = 90 elif newmacro['subclass'] == 'TOPRIGHT': self.pad_rotation = 180 elif newmacro['subclass'] == 'BOTTOMRIGHT': self.pad_rotation = 270 else: newmacro['subclass'] = None elif iparse[0] == 'SIZE': newmacro['width'] = float(iparse[1]) newmacro['height'] = float(iparse[3]) elif iparse[0] == 'ORIGIN': newmacro['x'] = float(iparse[1]) newmacro['y'] = float(iparse[2]) return macros # Read a list of cell names from a verilog file # If filename is relative, then check in the same directory as the verilog # top-level netlist (vlogpath) and in the subdirectory 'source/' of the top- # level directory. Also check in the ~/design/ip/ directory. These are # common include paths for the simulation. def read_verilog_lib(self, incpath, vlogpath): iocells = [] if not os.path.isfile(incpath) and incpath[0] != '/': locincpath = vlogpath + '/' + incpath if not os.path.isfile(locincpath): locincpath = vlogpath + '/source/' + incpath if not os.path.isfile(locincpath): projectpath = os.path.split(vlogpath)[0] designpath = os.path.split(projectpath)[0] locincpath = designpath + '/ip/' + incpath else: locincpath = incpath if not os.path.isfile(locincpath): self.print('File ' + incpath + ' not found (at ' + locincpath + ')!') else: self.print('Reading verilog library ' + locincpath) with open(locincpath, 'r') as ifile: ilines = ifile.read().splitlines() for iline in ilines: iparse = re.split('[\t ()]', iline) while '' in iparse: iparse.remove('') if iparse == []: continue elif iparse[0] == 'module': iocells.append(iparse[1]) return iocells # Generate a LEF abstract view from a magic layout. If "outpath" is not # "None", then write output to outputpath (this is required if the input # file is in a read-only directory). def write_lef_file(self, magfile, outpath=None): mag_path = os.path.split(magfile)[0] magfullname = os.path.split(magfile)[1] module = os.path.splitext(magfullname)[0] if outpath: write_path = outpath else: write_path = mag_path self.print('Generating LEF view from layout for module ' + module) with open(write_path + '/pfg_write_lef.tcl', 'w') as ofile: print('drc off', file=ofile) print('box 0 0 0 0', file=ofile) # NOTE: Using "-force" option in case an IP with a different but # compatible tech is used (e.g., EFHX035A IP inside EFXH035C). This # is not checked for legality! if outpath: print('load ' + magfile + ' -force', file=ofile) else: print('load ' + module + ' -force', file=ofile) print('lef write', file=ofile) print('quit', file=ofile) magicexec = self.magic_path if self.magic_path else 'magic' mproc = subprocess.Popen([magicexec, '-dnull', '-noconsole', 'pfg_write_lef.tcl'], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = write_path, universal_newlines = True) self.watch(mproc) os.remove(write_path + '/pfg_write_lef.tcl') # Watch a running process, polling for output and updating the GUI message # window as output arrives. Return only when the process has exited. # Note that this process cannot handle stdin(), so any input to the process # must be passed from a file. def watch(self, process): if process == None: return while True: status = process.poll() if status != None: try: outputpair = process.communicate(timeout=1) except ValueError: self.print("Process forced stop, status " + str(status)) else: for line in outputpair[0].splitlines(): self.print(line) for line in outputpair[1].splitlines(): self.print(line, file=sys.stderr) break else: sresult = select.select([process.stdout, process.stderr], [], [], 0)[0] if process.stdout in sresult: outputline = process.stdout.readline().strip() self.print(outputline) elif process.stderr in sresult: outputline = process.stderr.readline().strip() self.print(outputline, file=sys.stderr) else: self.update_idletasks() # Reimport the pad list by reading the top-level verilog netlist. Determine # what pads are listed in the file, and check against the existing pad list. # The verilog/ directory should have a .v file containing a module of the # same name as self.project (ip-name). The .v filename should have the # same name as well (but not necessarily). To do: Handle import of # projects having a top-level schematic instead of a verilog netlist. def vlogimport(self): self.print('Importing verilog sources.') # First find the process PDK name for this project. Read the nodeinfo.json # file and find the list of I/O cell libraries. techpath = self.techpath if self.techpath else self.projectpath if os.path.exists(techpath + '/.config'): config_dir = '/.config' else: config_dir = '/.ef-config' if os.path.exists(techpath + config_dir): self.ef_format = True pdkpath = self.techpath if self.techpath else os.path.realpath(self.projectpath + config_dir + '/techdir') nodeinfopath = pdkpath + config_dir + '/nodeinfo.json' ioleflist = [] if os.path.exists(nodeinfopath): self.print('Reading known I/O cell libraries from ' + nodeinfopath) with open(nodeinfopath, 'r') as ifile: itop = json.load(ifile) if 'iocells' in itop: ioleflist = [] for iolib in itop['iocells']: if '/' in iolib: # Entries <lib>/<cell> refer to specific files libcell = iolib.split('/') if self.ef_format: iolibpath = pdkpath + '/libs.ref/lef/' + libcell[0] else: iolibpath = pdkpath + '/libs.ref/' + libcell[0] + '/lef/' ioleflist.extend(glob.glob(iolibpath + '/' + libcell[1] + '.lef')) else: # All other entries refer to everything in the directory. if self.ef_format: iolibpath = pdkpath + '/libs.ref/lef/' + iolib else: iolibpath = pdkpath + '/libs.ref/' + iolib + '/lef/' print(iolibpath) ioleflist.extend(glob.glob(iolibpath + '/*.lef')) else: # Diagnostic print('Cannot read PDK information file ' + nodeinfopath) # Fallback behavior: List everything in libs.ref/lef/ beginning with "IO" if len(ioleflist) == 0: if self.ef_format: ioleflist = glob.glob(pdkpath + '/libs.ref/lef/IO*/*.lef') else: ioleflist = glob.glob(pdkpath + '/libs.ref/IO*/lef/*.lef') if len(ioleflist) == 0: self.print('Cannot find any I/O cell libraries for this technology') return # Read the LEF libraries to get a list of all available cells. Keep # this list of cells in "celldefs". celldefs = [] ioliblist = [] ioleflibs = [] for iolib in ioleflist: iolibpath = os.path.split(iolib)[0] iolibfile = os.path.split(iolib)[1] ioliblist.append(os.path.split(iolibpath)[1]) celldefs.extend(self.read_lef_macros(iolibpath, iolibfile, 'iolib')) verilogcells = [] newpadlist = [] coredefs = [] corecells = [] corecelllist = [] lefprocessed = [] busrex = re.compile('.*\[[ \t]*([0-9]+)[ \t]*:[ \t]*([0-9]+)[ \t]*\]') vlogpath = self.projectpath + '/verilog' vlogfile = vlogpath + '/' + self.project + '.v' if os.path.isfile(vlogfile): with open(vlogfile, 'r') as ifile: vloglines = ifile.read().splitlines() for vlogline in vloglines: vlogparse = re.split('[\t ()]', vlogline) while '' in vlogparse: vlogparse.remove('') if vlogparse == []: continue elif vlogparse[0] == '//': continue elif vlogparse[0] == '`include': incpath = vlogparse[1].strip('"') libpath = os.path.split(incpath)[0] libname = os.path.split(libpath)[1] libfile = os.path.split(incpath)[1] # Read the verilog library for module names to match # against macro names in celldefs. modulelist = self.read_verilog_lib(incpath, vlogpath) matching = list(item for item in celldefs if item['name'] in modulelist) for imatch in matching: verilogcells.append(imatch['name']) leffile = imatch['iolib'] if leffile not in ioleflibs: ioleflibs.append(leffile) # Read a corresponding LEF file entry for non-I/O macros, if one # can be found (this handles files in the PDK). if len(matching) == 0: if libname != '': # (NOTE: Assumes full path starting with '/') lefpath = libpath.replace('verilog', 'lef') lefname = libfile.replace('.v', '.lef') if not os.path.exists(lefpath + '/' + lefname): leffiles = glob.glob(lefpath + '/*.lef') else: leffiles = [lefpath + '/' + lefname] for leffile in leffiles: if leffile in ioleflibs: continue elif leffile in lefprocessed: continue else: lefprocessed.append(leffile) lefname = os.path.split(leffile)[1] newcoredefs = self.read_lef_macros(lefpath, lefname, 'celllib') coredefs.extend(newcoredefs) corecells.extend(list(item['name'] for item in newcoredefs)) if leffiles == []: maglefname = libfile.replace('.v', '.mag') # Handle PDK files with a maglef/ view but no LEF file. maglefpath = libpath.replace('verilog', 'maglef') if not os.path.exists(maglefpath + '/' + maglefname): magleffiles = glob.glob(maglefpath + '/*.mag') else: magleffiles = [maglefpath + '/' + maglefname] if magleffiles == []: # Handle user ip/ files with a maglef/ view but # no LEF file. maglefpath = libpath.replace('verilog', 'maglef') designpath = os.path.split(self.projectpath)[0] maglefpath = designpath + '/ip/' + maglefpath if not os.path.exists(maglefpath + '/' + maglefname): magleffiles = glob.glob(maglefpath + '/*.mag') else: magleffiles = [maglefpath + '/' + maglefname] for magleffile in magleffiles: # Generate LEF file. Since PDK and ip/ entries # are not writeable, write into the project mag/ # directory. magpath = self.projectpath + '/mag' magname = os.path.split(magleffile)[1] magroot = os.path.splitext(magname)[0] leffile = magpath + '/' + magroot + '.lef' if not os.path.isfile(leffile): self.write_lef_file(magleffile, magpath) if leffile in ioleflibs: continue elif leffile in lefprocessed: continue else: lefprocessed.append(leffile) lefname = os.path.split(leffile)[1] newcoredefs = self.read_lef_macros(magpath, lefname, 'celllib') coredefs.extend(newcoredefs) corecells.extend(list(item['name'] for item in newcoredefs)) # LEF files generated on-the-fly are not needed # after they have been parsed. # os.remove(leffile) # Check if all modules in modulelist are represented by # corresponding LEF macros. If not, then go looking for a LEF # file in the mag/ or maglef/ directory. Then, go looking for # a .mag file in the mag/ or maglef/ directory, and build a # LEF macro from it. matching = list(item['name'] for item in coredefs if item['name'] in modulelist) for module in modulelist: if module not in matching: lefpath = self.projectpath + '/lef' magpath = self.projectpath + '/mag' maglefpath = self.projectpath + '/mag' lefname = libfile.replace('.v', '.lef') # If the verilog file root name is not the same as # the module name, then make a quick check for a # LEF file with the same root name as the verilog. # That indicates that the module does not exist in # the LEF file, probably because it is a primary # module that does not correspond to any layout. leffile = lefpath + '/' + lefname if os.path.exists(leffile): self.print('Diagnostic: module ' + module + ' is not in ' + leffile + ' (probably a primary module)') continue leffile = magpath + '/' + lefname istemp = False if not os.path.exists(leffile): magname = libfile.replace('.v', '.mag') magfile = magpath + '/' + magname if os.path.exists(magfile): self.print('Diagnostic: Found a .mag file for ' + module + ' in ' + magfile) self.write_lef_file(magfile) istemp = True else: magleffile = maglefpath + '/' + lefname if not os.path.exists(magleffile): self.print('Diagnostic: (module ' + module + ') has no LEF file ' + leffile + ' or ' + magleffile) magleffile = maglefpath + '/' + magname if os.path.exists(magleffile): self.print('Diagnostic: Found a .mag file for ' + module + ' in ' + magleffile) if os.access(maglefpath, os.W_OK): self.write_lef_file(magleffile) leffile = magleffile istemp = True else: self.write_lef_file(magleffile, magpath) else: self.print('Did not find a file ' + magfile) # self.print('Warning: module ' + module + ' has no LEF or .mag views') pass else: self.print('Diagnostic: Found a LEF file for ' + module + ' in ' + magleffile) leffile = magleffile else: self.print('Diagnostic: Found a LEF file for ' + module + ' in ' + leffile) if os.path.exists(leffile): if leffile in lefprocessed: continue else: lefprocessed.append(leffile) newcoredefs = self.read_lef_macros(magpath, lefname, 'celllib') # The LEF file generated on-the-fly is not needed # any more after parsing the macro(s). # if istemp: # os.remove(leffile) coredefs.extend(newcoredefs) corecells.extend(list(item['name'] for item in newcoredefs)) else: # self.print('Failed to find a LEF view for module ' + module) pass elif vlogparse[0] in verilogcells: # Check for array of pads bushigh = buslow = -1 if len(vlogparse) >= 3: bmatch = busrex.match(vlogline) if bmatch: bushigh = int(bmatch.group(1)) buslow = int(bmatch.group(2)) for i in range(buslow, bushigh + 1): newpad = {} if i >= 0: newpad['name'] = vlogparse[1] + '[' + str(i) + ']' else: newpad['name'] = vlogparse[1] # hack newpad['name'] = newpad['name'].replace("\\", "") newpad['cell'] = vlogparse[0] padcell = next(item for item in celldefs if item['name'] == vlogparse[0]) newpad['iolib'] = padcell['iolib'] newpad['class'] = padcell['class'] newpad['subclass'] = padcell['subclass'] newpad['width'] = padcell['width'] newpad['height'] = padcell['height'] newpadlist.append(newpad) elif vlogparse[0] in corecells: # Check for array of cells bushigh = buslow = -1 if len(vlogparse) >= 3: bmatch = busrex.match(vlogline) if bmatch: bushigh = int(bmatch.group(1)) buslow = int(bmatch.group(2)) for i in range(buslow, bushigh + 1): newcorecell = {} if i >= 0: newcorecell['name'] = vlogparse[1] + '[' + str(i) + ']' else: newcorecell['name'] = vlogparse[1] newcorecell['cell'] = vlogparse[0] corecell = next(item for item in coredefs if item['name'] == vlogparse[0]) newcorecell['celllib'] = corecell['celllib'] newcorecell['class'] = corecell['class'] newcorecell['subclass'] = corecell['subclass'] newcorecell['width'] = corecell['width'] newcorecell['height'] = corecell['height'] corecelllist.append(newcorecell) self.print('') self.print('Source file information:') self.print('Source filename: ' + vlogfile) self.print('Number of I/O libraries is ' + str(len(ioleflibs))) self.print('Number of library cells in I/O libraries used: ' + str(len(verilogcells))) self.print('Number of core celldefs is ' + str(len(coredefs))) self.print('') self.print('Number of I/O cells in design: ' + str(len(newpadlist))) self.print('Number of core cells in design: ' + str(len(corecelllist))) self.print('') # Save the results self.celldefs = celldefs self.coredefs = coredefs self.vlogpads = newpadlist self.corecells = corecelllist self.ioleflibs = ioleflibs # Check self.vlogpads, which was created during import (above) against # self.(N,S,W,E)pads, which was read from the DEF file (if there was one) # Also check self.corecells, which was created during import against # self.coregroup, which was read from the DEF file. def resolve(self): self.print('Resolve differences in verilog and LEF views.') samepads = [] addedpads = [] removedpads = [] # (1) Create list of entries that are in both self.vlogpads and self.()pads # (2) Create list of entries that are in self.vlogpads but not in self.()pads allpads = self.Npads + self.NEpad + self.Epads + self.SEpad + self.Spads + self.SWpad + self.Wpads + self.NWpad for pad in self.vlogpads: newpadname = pad['name'] try: lpad = next(item for item in allpads if item['name'] == newpadname) except: addedpads.append(pad) else: samepads.append(lpad) # (3) Create list of entries that are in allpads but not in self.vlogpads for pad in allpads: newpadname = pad['name'] try: lpad = next(item for item in self.vlogpads if item['name'] == newpadname) except: removedpads.append(pad) # Print results if len(addedpads) > 0: self.print('Added pads:') for pad in addedpads: self.print(pad['name'] + ' (' + pad['cell'] + ')') if len(removedpads) > 0: plist = [] nspacers = 0 for pad in removedpads: if 'subclass' in pad: if pad['subclass'] != 'SPACER': plist.append(pad) else: nspacers += 1 if nspacers > 0: self.print(str(nspacers) + ' spacer cells ignored.') if len(plist) > 0: self.print('Removed pads:') for pad in removedpads: self.print(pad['name'] + ' (' + pad['cell'] + ')') if len(addedpads) + len(removedpads) == 0: self.print('Pad list has not changed.') # Remove all cells from the "removed" list, with comment allpads = [self.Npads, self.NEpad, self.Epads, self.SEpad, self.Spads, self.SWpad, self.Wpads, self.NWpad] for pad in removedpads: rname = pad['name'] for row in allpads: try: rpad = next(item for item in row if item['name'] == rname) except: rpad = None else: row.remove(rpad) # Now the verilog file has no placement information, so the old padlist # entries (if they exist) are preferred. Add to these the new padlist # entries # First pass for unassigned pads: Use of "CLASS ENDCAP" is preferred # for identifying corner pads. Otherwise, if 'CORNER' or 'corner' is # in the pad name, then make sure there is one per row in the first # position. This is not foolproof and depends on the cell library # using the text 'corner' in the name of the corner cell. However, # if the ad hoc methods fail, the user can still manually move the # corner cells to the right place (to be done: Identify if library # uses ENDCAP designation for corner cells up front; don't go # looking for 'corner' text if the cells are easily identifiable by # LEF class). for pad in addedpads[:]: iscorner = False if 'class' in pad and pad['class'] == 'ENDCAP': iscorner = True elif 'CORNER' in pad['cell'].upper(): iscorner = True if iscorner: if self.NWpad == []: self.NWpad.append(pad) pad['o'] = 'E' addedpads.remove(pad) elif self.NEpad == []: self.NEpad.append(pad) pad['o'] = 'S' addedpads.remove(pad) elif self.SEpad == []: self.SEpad.append(pad) pad['o'] = 'W' addedpads.remove(pad) elif self.SWpad == []: self.SWpad.append(pad) pad['o'] = 'N' addedpads.remove(pad) numN = len(self.Npads) numS = len(self.Spads) numE = len(self.Epads) numW = len(self.Wpads) minnum = min(numN, numS, numE, numW) minnum = max(minnum, int(len(addedpads) / 4)) # Add pads in clockwise order. Note that S and E pads are defined counterclockwise for pad in addedpads: if numN < minnum: self.Npads.append(pad) numN += 1 pad['o'] = 'S' self.print("Adding pad " + pad['name'] + " to Npads") elif numE < minnum: self.Epads.insert(0, pad) numE += 1 pad['o'] = 'W' self.print("Adding pad " + pad['name'] + " to Epads") elif numS < minnum: self.Spads.insert(0, pad) numS += 1 pad['o'] = 'N' self.print("Adding pad " + pad['name'] + " to Spads") # elif numW < minnum: else: self.Wpads.append(pad) numW += 1 pad['o'] = 'E' self.print("Adding pad " + pad['name'] + " to Wpads") minnum = min(numN, numS, numE, numW) minnum = max(minnum, int(len(addedpads) / 4)) # Make sure all pads have included information from the cell definition allpads = self.Npads + self.NEpad + self.Epads + self.SEpad + self.Spads + self.SWpad + self.Wpads + self.NWpad for pad in allpads: if 'width' not in pad: try: celldef = next(item for item in celldefs if item['name'] == pad['cell']) except: self.print('Cell ' + pad['cell'] + ' not found!') else: pad['width'] = celldef['width'] pad['height'] = celldef['height'] pad['class'] = celldef['class'] pad['subclass'] = celldef['subclass'] # Now treat the core cells in the same way (resolve list parsed from verilog # against the list parsed from DEF) # self.print('Diagnostic: ') # self.print('self.corecells = ' + str(self.corecells)) # self.print('self.coregroup = ' + str(self.coregroup)) samecore = [] addedcore = [] removedcore = [] # (1) Create list of entries that are in both self.corecells and self.coregroup # (2) Create list of entries that are in self.corecells but not in self.coregroup for cell in self.corecells: newcellname = cell['name'] try: lcore = next(item for item in self.coregroup if item['name'] == newcellname) except: addedcore.append(cell) else: samecore.append(lcore) # (3) Create list of entries that are in self.coregroup but not in self.corecells for cell in self.coregroup: newcellname = cell['name'] try: lcore = next(item for item in self.corecells if item['name'] == newcellname) except: removedcore.append(cell) # Print results if len(addedcore) > 0: self.print('Added core cells:') for cell in addedcore: self.print(cell['name'] + ' (' + cell['cell'] + ')') if len(removedcore) > 0: clist = [] for cell in removedcore: clist.append(cell) if len(clist) > 0: self.print('Removed core cells:') for cell in removedcore: self.print(cell['name'] + ' (' + cell['cell'] + ')') if len(addedcore) + len(removedcore) == 0: self.print('Core cell list has not changed.') # Remove all cells from the "removed" list coregroup = self.coregroup for cell in removedcore: rname = cell['name'] try: rcell = next(item for item in coregroup if item['name'] == rname) except: rcell = None else: coregroup.remove(rcell) # Add all cells from the "added" list to coregroup for cell in addedcore: rname = cell['name'] try: rcell = next(item for item in coregroup if item['name'] == rname) except: coregroup.append(cell) if not 'o' in cell: cell['o'] = 'N' if not 'x' in cell: if len(self.Wpads) > 0: pad = self.Wpads[0] padx = pad['x'] if 'x' in pad else 0 cell['x'] = padx + pad['height'] + self.margin else: cell['x'] = self.margin if not 'y' in cell: if len(self.Spads) > 0: pad = self.Spads[0] pady = pad['y'] if 'y' in pad else 0 cell['y'] = pady + pad['height'] + self.margin else: cell['y'] = self.margin else: rcell = None # Make sure all core cells have included information from the cell definition for cell in coregroup: if 'width' not in cell: try: coredef = next(item for item in coredefs if item['name'] == cell['cell']) except: self.print('Cell ' + cell['cell'] + ' not found!') else: cell['width'] = coredef['width'] cell['height'] = coredef['height'] cell['class'] = coredef['class'] cell['subclass'] = coredef['subclass'] # Generate a new padframe by writing the configuration file, running # padring, reading back the DEF file, and (re)poplulating the workspace def generate(self, level): self.print('Generate legal padframe using padring') # Write out the configuration file self.writeconfig() # Run the padring app self.runpadring() # Rotate pads in the output if pad orientations are different from # padring's expectations self.rotate_pads_in_def() # Read the placement information back from the generated DEF file self.readplacement() # Resolve differences (e.g., remove spacers) self.resolve() # Recreate and draw the padframe view on the canvas self.populate(level + 1) self.frame_configure(None) # Write a new configuration file def writeconfig(self): mag_path = self.projectpath + '/mag' self.print('Writing padring configuration file.') # Determine cell width and height from pad sizes. # NOTE: This compresses the chip to the minimum dimensions # allowed by the arrangement of pads. Use a "core" block to # force the area larger than minimum (not yet implemented) topwidth = 0 for pad in self.Npads: if 'width' not in pad: self.print('No width: pad = ' + str(pad)) topwidth += pad['width'] # Add in the corner cells if self.NWpad != []: topwidth += self.NWpad[0]['height'] if self.NEpad != []: topwidth += self.NEpad[0]['width'] botwidth = 0 for pad in self.Spads: botwidth += pad['width'] # Add in the corner cells if self.SWpad != []: botwidth += self.SWpad[0]['width'] if self.SEpad != []: botwidth += self.SEpad[0]['height'] width = max(botwidth, topwidth) # if width < self.urx - self.llx: # width = self.urx - self.llx leftheight = 0 for pad in self.Wpads: leftheight += pad['width'] # Add in the corner cells if self.NWpad != []: leftheight += self.NWpad[0]['height'] if self.SWpad != []: leftheight += self.SWpad[0]['width'] rightheight = 0 for pad in self.Epads: rightheight += pad['width'] # Add in the corner cells if self.NEpad != []: rightheight += self.NEpad[0]['width'] if self.SEpad != []: rightheight += self.SEpad[0]['height'] height = max(leftheight, rightheight) # Check the dimensions of the core cells. If they exceed the available # padframe area, then expand the padframe to accomodate the core. corellx = coreurx = (self.llx + self.urx) / 2 corelly = coreury = (self.lly + self.ury) / 2 for corecell in self.coregroup: corient = corecell['o'] if 'S' in corient or 'N' in corient: cwidth = corecell['width'] cheight = corecell['height'] else: cwidth = corecell['height'] cheight = corecell['width'] if corecell['x'] < corellx: corellx = corecell['x'] if corecell['x'] + cwidth > coreurx: coreurx = corecell['x'] + cwidth if corecell['y'] < corelly: corelly = corecell['y'] if corecell['y'] + cheight > coreury: coreury = corecell['y'] + cheight coreheight = coreury - corelly corewidth = coreurx - corellx # Ignoring the possibility of overlaps with nonstandard-sized pads, # assuming that the user has visually separated them. Only check # the core bounds against the standard padframe inside edge. if self.SWpad != [] and self.SEpad != []: if corewidth > width - self.SWpad[0]['width'] - self.SEpad[0]['width']: width = corewidth + self.SWpad[0]['width'] + self.SEpad[0]['width'] if self.NWpad != [] and self.SWpad != []: if coreheight > height - self.NWpad[0]['height'] - self.SWpad[0]['height']: height = coreheight + self.NWpad[0]['height'] + self.SWpad[0]['height'] # Core cells are given a margin of self.margin from the pad inside edge, so the # core area passed to the padring app is 2 * self.margin larger than the # measured size of the core area. width += 2 * self.margin height += 2 * self.margin # SCALE UP # width *= 1.4 # height *= 1.4 if self.keep_cfg == False or not os.path.exists(mag_path + '/padframe.cfg'): if os.path.exists(mag_path + '/padframe.cfg'): # Copy the previous padframe.cfg file to a backup. In case something # goes badly wrong, this should be the only file overwritten, and can # be recovered from the backup. shutil.copy(mag_path + '/padframe.cfg', mag_path + '/padframe.cfg.bak') with open(mag_path + '/padframe.cfg', 'w') as ofile: print('AREA ' + str(int(width)) + ' ' + str(int(height)) + ' ;', file=ofile) print('', file=ofile) for pad in self.NEpad: print('CORNER ' + pad['name'] + ' SW ' + pad['cell'] + ' ;', file=ofile) for pad in self.SEpad: print('CORNER ' + pad['name'] + ' NW ' + pad['cell'] + ' ;', file=ofile) for pad in self.SWpad: print('CORNER ' + pad['name'] + ' NE ' + pad['cell'] + ' ;', file=ofile) for pad in self.NWpad: print('CORNER ' + pad['name'] + ' SE ' + pad['cell'] + ' ;', file=ofile) for pad in self.Npads: flip = 'F ' if 'F' in pad['o'] else '' print('PAD ' + pad['name'] + ' N ' + flip + pad['cell'] + ' ;', file=ofile) for pad in self.Epads: flip = 'F ' if 'F' in pad['o'] else '' print('PAD ' + pad['name'] + ' E ' + flip + pad['cell'] + ' ;', file=ofile) for pad in self.Spads: flip = 'F ' if 'F' in pad['o'] else '' print('PAD ' + pad['name'] + ' S ' + flip + pad['cell'] + ' ;', file=ofile) for pad in self.Wpads: flip = 'F ' if 'F' in pad['o'] else '' print('PAD ' + pad['name'] + ' W ' + flip + pad['cell'] + ' ;', file=ofile) # Run the padring app. def runpadring(self): self.print('Running padring') mag_path = self.projectpath + '/mag' if self.padring_path: padringopts = [self.padring_path] else: padringopts = ['padring'] # Diagnostic # self.print('Used libraries (self.ioleflibs) = ' + str(self.ioleflibs)) for iolib in self.ioleflibs: padringopts.append('-L') padringopts.append(iolib) padringopts.append('--def') padringopts.append('padframe.def') padringopts.append('padframe.cfg') self.print('Running ' + str(padringopts)) p = subprocess.Popen(padringopts, stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = mag_path) self.watch(p) # Read placement information from the DEF file generated by padring. def readplacement(self, precheck=False): self.print('Reading placement information from DEF file') mag_path = self.projectpath + '/mag' if not os.path.isfile(mag_path + '/padframe.def'): if not precheck: self.print('No file padframe.def: pad frame was not generated.') return False # Very simple DEF file parsing. The placement DEF only contains a # COMPONENTS section. Certain assumptions are made about the syntax # that depends on the way 'padring' writes its output. This is not # a rigorous DEF parser! units = 1000 in_components = False Npadlist = [] Spadlist = [] Epadlist = [] Wpadlist = [] NEpad = [] NWpad = [] SEpad = [] SWpad = [] coregroup = [] # Reset bounds self.llx = self.lly = self.urx = self.ury = 0 corners = 0 with open(mag_path + '/padframe.def', 'r') as ifile: deflines = ifile.read().splitlines() for line in deflines: if 'UNITS DISTANCE MICRONS' in line: units = line.split()[3] elif in_components: lparse = line.split() if lparse[0] == '-': instname = lparse[1] cellname = lparse[2] elif lparse[0] == '+': if lparse[1] == 'FIXED': placex = lparse[3] placey = lparse[4] placeo = lparse[6] newpad = {} newpad['name'] = instname newpad['cell'] = cellname try: celldef = next(item for item in self.celldefs if item['name'] == cellname) except: celldef = None else: newpad['iolib'] = celldef['iolib'] newpad['width'] = celldef['width'] newpad['height'] = celldef['height'] newpad['class'] = celldef['class'] newpad['subclass'] = celldef['subclass'] newpad['x'] = float(placex) / float(units) newpad['y'] = float(placey) / float(units) newpad['o'] = placeo # Adjust bounds if celldef: if newpad['x'] < self.llx: self.llx = newpad['x'] if newpad['y'] < self.lly: self.lly = newpad['y'] if newpad['o'] == 'N' or newpad['o'] == 'S': padurx = newpad['x'] + celldef['width'] padury = newpad['y'] + celldef['height'] else: padurx = newpad['x'] + celldef['height'] padury = newpad['y'] + celldef['width'] if padurx > self.urx: self.urx = padurx if padury > self.ury: self.ury = padury # First four entries in the DEF file are corners # padring puts the lower left corner at zero, so # use the zero coordinates to determine which pads # are which. Note that padring assumes the corner # pad is drawn in the SW corner position! if corners < 4: if newpad['x'] == 0 and newpad['y'] == 0: SWpad.append(newpad) elif newpad['x'] == 0: NWpad.append(newpad) elif newpad['y'] == 0: SEpad.append(newpad) else: NEpad.append(newpad) corners += 1 else: # Place according to orientation. If orientation # is not standard, be sure to make it standard! placeo = self.rotate_orientation(placeo) if placeo == 'N': Spadlist.append(newpad) elif placeo == 'E': Wpadlist.append(newpad) elif placeo == 'S': Npadlist.append(newpad) else: Epadlist.append(newpad) elif 'END COMPONENTS' in line: in_components = False elif 'COMPONENTS' in line: in_components = True self.Npads = Npadlist self.Wpads = Wpadlist self.Spads = Spadlist self.Epads = Epadlist self.NWpad = NWpad self.NEpad = NEpad self.SWpad = SWpad self.SEpad = SEpad # The padframe has its own DEF file from the padring app, but the core # does not. The core needs to be floorplanned in a very similar manner. # This will be done by searching for a DEF file of the project top-level # layout. If none exists, it is created by generating it from the layout. # If the top-level layout does not exist, then all core cells are placed # at the origin, and the origin placed at the padframe inside corner. mag_path = self.projectpath + '/mag' if not os.path.isfile(mag_path + '/' + self.project + '.def'): if os.path.isfile(mag_path + '/' + self.project + '.mag'): # Create a DEF file from the layout with open(mag_path + '/pfg_write_def.tcl', 'w') as ofile: print('drc off', file=ofile) print('box 0 0 0 0', file=ofile) print('load ' + self.project, file=ofile) print('def write', file=ofile) print('quit', file=ofile) magicexec = self.magic_path if self.magic_path else 'magic' mproc = subprocess.Popen([magicexec, '-dnull', '-noconsole', 'pfg_write_def.tcl'], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = mag_path, universal_newlines = True) self.watch(mproc) os.remove(mag_path + '/pfg_write_def.tcl') elif not os.path.isfile(mag_path + '/core.def'): # With no other information available, copy the corecells # (from the verilog file) into the coregroup list. # Position all core cells starting at the padframe top left # inside corner, and arranging in rows without overlapping. # Note that no attempt is made to organize the cells or # otherwise produce an efficient layout. Any dimension larger # than the current padframe overruns to the right or bottom. if self.SWpad != []: corellx = SWpad[0]['x'] + SWpad[0]['width'] + self.margin corelly = SWpad[0]['y'] + SWpad[0]['height'] + self.margin else: corellx = Wpadlist[0]['x'] + Wpadlist[0]['height'] + self.margin corelly = Spadlist[0]['x'] + Spadlist[0]['height'] + self.margin if self.NEpad != []: coreurx = NEpad[0]['x'] - self.margin coreury = NEpad[0]['y'] - self.margin else: coreurx = Epadlist[0]['x'] - self.margin coreury = Npadlist[0]['x'] - self.margin locllx = corellx testllx = corellx loclly = corelly testlly = corelly nextlly = corelly for cell in self.corecells: testllx = locllx + cell['width'] if testllx > coreurx: locllx = corellx corelly = nextlly loclly = nextlly newcore = cell newcore['x'] = locllx newcore['y'] = loclly newcore['o'] = 'N' locllx += cell['width'] + self.margin testlly = corelly + cell['height'] + self.margin if testlly > nextlly: nextlly = testlly coregroup.append(newcore) self.coregroup = coregroup if os.path.isfile(mag_path + '/' + self.project + '.def'): # Read the top-level DEF, and use it to position the core cells. self.print('Reading the top-level cell DEF for core cell placement.') units = 1000 in_components = False with open(mag_path + '/' + self.project + '.def', 'r') as ifile: deflines = ifile.read().splitlines() for line in deflines: if 'UNITS DISTANCE MICRONS' in line: units = line.split()[3] elif in_components: lparse = line.split() if lparse[0] == '-': instname = lparse[1] # NOTE: Magic should not drop the entire path to the # cell for the cellname; this needs to be fixed! To # work around it, remove any path components. cellpath = lparse[2] cellname = os.path.split(cellpath)[1] elif lparse[0] == '+': if lparse[1] == 'PLACED': placex = lparse[3] placey = lparse[4] placeo = lparse[6] newcore = {} newcore['name'] = instname newcore['cell'] = cellname try: celldef = next(item for item in self.coredefs if item['name'] == cellname) except: celldef = None else: newcore['celllib'] = celldef['celllib'] newcore['width'] = celldef['width'] newcore['height'] = celldef['height'] newcore['class'] = celldef['class'] newcore['subclass'] = celldef['subclass'] newcore['x'] = float(placex) / float(units) newcore['y'] = float(placey) / float(units) newcore['o'] = placeo coregroup.append(newcore) elif 'END COMPONENTS' in line: in_components = False elif 'COMPONENTS' in line: in_components = True self.coregroup = coregroup elif os.path.isfile(mag_path + '/core.def'): # No DEF or .mag file, so fallback position is the last core.def # file generated by this script. self.read_core_def(precheck=precheck) return True # Read placement information from the "padframe.def" file and rotate # all cells according to self.pad_rotation. This accounts for the # problem that the default orientation of pads is arbitrarily defined # by the foundry, while padring assumes that the corner pad is drawn # in the lower-left position and other pads are drawn with the pad at # the bottom and the buses at the top. def rotate_pads_in_def(self): if self.pad_rotation == 0: return self.print('Rotating pads in padframe DEF file.') mag_path = self.projectpath + '/mag' if not os.path.isfile(mag_path + '/padframe.def'): self.print('No file padframe.def: Cannot modify pad rotations.') return deflines = [] with open(mag_path + '/padframe.def', 'r') as ifile: deflines = ifile.read().splitlines() outlines = [] in_components = False for line in deflines: if in_components: lparse = line.split() if lparse[0] == '+': if lparse[1] == 'PLACED': lparse[1] = 'FIXED' neworient = lparse[6] lparse[6] = neworient line = ' '.join(lparse) elif 'END COMPONENTS' in line: in_components = False elif 'COMPONENTS' in line: in_components = True outlines.append(line) with open(mag_path + '/padframe.def', 'w') as ofile: for line in outlines: print(line, file=ofile) # Read placement information from the DEF file for the core (created by # a previous run of this script) def read_core_def(self, precheck=False): self.print('Reading placement information from core DEF file.') mag_path = self.projectpath + '/mag' if not os.path.isfile(mag_path + '/core.def'): if not precheck: self.print('No file core.def: core placement was not generated.') return False # Very simple DEF file parsing, similar to the padframe.def reading # routine above. units = 1000 in_components = False coregroup = [] with open(mag_path + '/core.def', 'r') as ifile: deflines = ifile.read().splitlines() for line in deflines: if 'UNITS DISTANCE MICRONS' in line: units = line.split()[3] elif in_components: lparse = line.split() if lparse[0] == '-': instname = lparse[1] cellname = lparse[2] elif lparse[0] == '+': if lparse[1] == 'PLACED': placex = lparse[3] placey = lparse[4] placeo = lparse[6] newcore = {} newcore['name'] = instname newcore['cell'] = cellname try: celldef = next(item for item in self.coredefs if item['name'] == cellname) except: celldef = None else: newcore['celllib'] = celldef['celllib'] newcore['width'] = celldef['width'] newcore['height'] = celldef['height'] newcore['class'] = celldef['class'] newcore['subclass'] = celldef['subclass'] newcore['x'] = float(placex) / float(units) newcore['y'] = float(placey) / float(units) newcore['o'] = placeo coregroup.append(newcore) elif 'END COMPONENTS' in line: in_components = False elif 'COMPONENTS' in line: in_components = True self.coregroup = coregroup return True # Save the layout to a Magic database file (to be completed) def save(self): self.print('Saving results in a magic layout database.') # Generate a list of (unique) LEF libraries for all padframe and core cells leflist = [] for pad in self.celldefs: if pad['iolib'] not in leflist: leflist.append(pad['iolib']) for core in self.coredefs: if core['celllib'] not in leflist: leflist.append(core['celllib']) # Run magic, and generate the padframe with a series of commands mag_path = self.projectpath + '/mag' with open(mag_path + '/pfg_write_mag.tcl', 'w') as ofile: print('drc off', file=ofile) print('box 0 0 0 0', file=ofile) for leffile in leflist: print('lef read ' + leffile, file=ofile) print('def read padframe', file=ofile) print('select top cell', file=ofile) print('select area', file=ofile) print('select save padframe', file=ofile) print('delete', file=ofile) print('def read core', file=ofile) print('getcell padframe', file=ofile) print('save ' + self.project, file=ofile) print('writeall force ' + self.project, file=ofile) print('quit', file=ofile) magicexec = self.magic_path if self.magic_path else 'magic' mproc = subprocess.Popen([magicexec, '-dnull', '-noconsole', 'pfg_write_mag.tcl'], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = mag_path, universal_newlines = True) self.watch(mproc) os.remove(mag_path + '/pfg_write_mag.tcl') self.print('Done writing layout ' + self.project + '.mag') # Write the core DEF file if it does not exist yet. if not os.path.isfile(mag_path + '/core.def'): self.write_core_def() if __name__ == '__main__': faulthandler.register(signal.SIGUSR2) options = [] arguments = [] for item in sys.argv[1:]: if item.find('-', 0) == 0: options.append(item) else: arguments.append(item) if '-help' in options: print(sys.argv[0] + ' [options]') print('') print('options:') print(' -noc Print output to terminal, not the gui window') print(' -nog No graphics, run in batch mode') print(' -cfg Use existing padframe.cfg, do not regenerate') print(' -padring-path=<path> path to padring executable') print(' -magic-path=<path> path to magic executable') print(' -tech-path=<path> path to tech root folder') print(' -project-path=<path> path to project root folder') print(' -help Print this usage information') print('') sys.exit(0) root = tkinter.Tk() do_gui = False if ('-nog' in options or '-nogui' in options) else True app = SoCFloorplanner(root, do_gui) # Allow option -noc to bypass the text-to-console redirection, so crash # information doesn't disappear with the app. app.use_console = False if ('-noc' in options or '-noconsole' in options) else True if do_gui == False: app.use_console = False # efabless format can be specified on the command line, but note that it # is otherwise auto-detected by checking for .config vs. .ef-config in # the project space. app.ef_format = True if '-ef_format' in options else False app.keep_cfg = True if '-cfg' in options else False app.padring_path = None app.magic_path = None app.techpath = None app.projectpath = None for option in options: if option.split('=')[0] == '-padring-path': app.padring_path = option.split('=')[1] elif option.split('=')[0] == '-magic-path': app.magic_path = option.split('=')[1] elif option.split('=')[0] == '-tech-path': app.techpath = option.split('=')[1] elif option.split('=')[0] == '-project-path': app.projectpath = option.split('=')[1] app.projectpath = app.projectpath[:-1] if app.projectpath[-1] == '/' else app.projectpath app.text_to_console() app.init_padframe() if app.do_gui: root.mainloop() else: # Run 'save' in non-GUI mode app.save() sys.exit(0)
efabless/openlane
scripts/pfg.py
Python
apache-2.0
106,511
[ "Galaxy" ]
924c61ae0e275292cd21a8c2cd2366101090671dbd2e736da8a2cacfa1cd0dde
import numpy as np from unittest import TestCase from os import sep, remove, rmdir from tempfile import mkdtemp import tarfile from glob import glob from exatomic.base import resource from exatomic.va import VA, get_data, gen_delta from exatomic.gaussian import Fchk, Output as gOutput from exatomic.nwchem import Output TMPDIR = mkdtemp() h2o2_freq = Fchk(resource('g16-h2o2-def2tzvp-freq.fchk')) methyloxirane_freq = Fchk(resource('g16-methyloxirane-def2tzvp-freq.fchk')) tar = tarfile.open(resource('va-vroa-h2o2.tar.bz'), mode='r') tar.extractall(TMPDIR) tar.close() tar = tarfile.open(resource('va-vroa-methyloxirane.tar.bz'), mode='r') tar.extractall(TMPDIR) tar.close() nitro_freq = gOutput(resource('g09-nitromalonamide-6-31++g-freq.out')) tar = tarfile.open(resource('va-zpvc-nitro_nmr.tar.bz'), mode='r') tar.extractall(TMPDIR) tar.close() class TestGetData(TestCase): def test_getter_small(self): path = sep.join([TMPDIR, 'h2o2', '*']) df = get_data(path=path, attr='roa', soft=Output, f_start='va-roa-h2o2-def2tzvp-514.5-', f_end='.out') self.assertEqual(df.shape[0], 130) df = get_data(path=path, attr='gradient', soft=Output, f_start='va-roa-h2o2-def2tzvp-514.5-', f_end='.out') self.assertEqual(df.shape[0], 52) def test_getter_large(self): path = sep.join([TMPDIR, 'methyloxirane', '*']) df = get_data(path=path, attr='roa', soft=Output, f_start='va-roa-methyloxirane-def2tzvp-488.9-', f_end='.out') self.assertEqual(df.shape[0], 160) df = get_data(path=path, attr='gradient', soft=Output, f_start='va-roa-methyloxirane-def2tzvp-488.9-', f_end='.out') self.assertEqual(df.shape[0], 160) class TestVROA(TestCase): def test_vroa(self): h2o2_freq.parse_frequency() h2o2_freq.parse_frequency_ext() delta = gen_delta(delta_type=2, freq=h2o2_freq.frequency.copy()) va_corr = VA() path = sep.join([TMPDIR, 'h2o2', '*']) va_corr.roa = get_data(path=path, attr='roa', soft=Output, f_start='va-roa-h2o2-def2tzvp-514.5-', f_end='.out') va_corr.roa['exc_freq'] = np.tile(514.5, len(va_corr.roa)) va_corr.gradient = get_data(path=path, attr='gradient', soft=Output, f_start='va-roa-h2o2-def2tzvp-514.5-', f_end='.out') va_corr.gradient['exc_freq'] = np.tile(514.5, len(va_corr.gradient)) va_corr.vroa(uni=h2o2_freq, delta=delta['delta'].values) scatter_data = np.array([[ 3.47311779e+02, 0.00000000e+00, -3.27390198e+02, -8.44921542e+01, -4.22102267e-02, -3.41332079e-02, -3.91676006e-03, 5.14500000e+02], [ 8.60534577e+02, 1.00000000e+00, 1.75268228e+02, -8.09603043e+00, -8.26286589e+00, 1.65666769e-02, -3.01543530e-03, 5.14500000e+02], [ 1.24319010e+03, 2.00000000e+00, -3.35605422e+02, -7.26516978e+01, 1.06370293e-06, -3.45429748e-02, -4.20725882e-03, 5.14500000e+02], [ 1.37182002e+03, 3.00000000e+00, 2.20210484e+02, 5.78999940e+01, -4.46257676e+00, 2.29930063e-02, -6.16087427e-04, 5.14500000e+02], [ 3.59750268e+03, 4.00000000e+00, -3.50819253e+03, -3.90826518e+02, 5.90325028e-02, -3.49292932e-01, -4.98353528e-02, 5.14500000e+02], [ 3.59821746e+03, 5.00000000e+00, 5.05236006e+03, 4.00023286e+02, 6.60389814e+00, 4.97827311e-01, 7.91921951e-02, 5.14500000e+02]]) raman_data = np.array([[3.47311779e+02, 0.00000000e+00, 3.42307581e-05, 6.38955258e-01, 2.04527298e+01, 5.14500000e+02], [8.60534577e+02, 1.00000000e+00, 1.90190339e-01, 1.07090867e+00, 6.85033386e+01, 5.14500000e+02], [1.24319010e+03, 2.00000000e+00, 1.27606294e-08, 3.46909710e-01, 1.11011130e+01, 5.14500000e+02], [1.37182002e+03, 3.00000000e+00, 3.94139588e-02, 1.19286864e+00, 4.52663091e+01, 5.14500000e+02], [3.59750268e+03, 4.00000000e+00, 1.52822910e-02, 6.21166773e+00, 2.01524180e+02, 5.14500000e+02], [3.59821746e+03, 5.00000000e+00, 1.60412161e+00, 5.19841596e+00, 4.55091201e+02, 5.14500000e+02]]) scatter_data = scatter_data.T.copy() raman_data = raman_data.T.copy() # test all columns of the respective dataframe to get a better sense of what is broken self.assertTrue(np.allclose(va_corr.scatter['freq'].values, scatter_data[0], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['freqdx'].values, scatter_data[1], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['beta_g*1e6'].values, scatter_data[2], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['beta_A*1e6'].values, scatter_data[3], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['alpha_g*1e6'].values, scatter_data[4], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['backscatter'].values, scatter_data[5], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['forwardscatter'].values, scatter_data[6], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['exc_freq'].values, scatter_data[7], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['freq'].values, raman_data[0], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['freqdx'].values, raman_data[1], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['alpha_squared'].values, raman_data[2], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['beta_alpha'].values, raman_data[3], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['raman_int'].values, raman_data[4], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['exc_freq'].values, raman_data[5], rtol=5e-4)) def test_select_freq(self): methyloxirane_freq.parse_frequency() methyloxirane_freq.parse_frequency_ext() delta = gen_delta(delta_type=2, freq=methyloxirane_freq.frequency.copy()) va_corr = VA() path = sep.join([TMPDIR, 'methyloxirane', '*']) va_corr.roa = get_data(path=path, attr='roa', soft=Output, f_start='va-roa-methyloxirane-def2tzvp-488.9-', f_end='.out') va_corr.roa['exc_freq'] = np.tile(488.9, len(va_corr.roa)) va_corr.gradient = get_data(path=path, attr='gradient', soft=Output, f_start='va-roa-methyloxirane-def2tzvp-488.9-', f_end='.out') va_corr.gradient['exc_freq'] = np.tile(488.9, len(va_corr.gradient)) va_corr.vroa(uni=methyloxirane_freq, delta=delta['delta'].values) scatter_data = np.array([[ 1.12639199e+03, 1.00000000e+01, -6.15736884e+01, -1.53103521e+01, -1.68892383e-01, -6.40100535e-03, -8.61815897e-04, 4.88900000e+02], [ 1.15100631e+03, 1.10000000e+01, -1.06898371e+02, -2.76794343e+01, 3.53857297e+00, -1.11479855e-02, 1.28026956e-03, 4.88900000e+02], [ 1.24937064e+03, 1.20000000e+01, 5.23431984e+01, 5.17874012e+00, -8.24615516e+00, 5.19066673e-03, -5.18260038e-03, 4.88900000e+02], [ 1.37094149e+03, 1.30000000e+01, 3.49746537e+01, -9.43653998e+00, -8.72689935e-02, 3.05559747e-03, 6.47745423e-04, 4.88900000e+02], [ 1.39064221e+03, 1.40000000e+01, -5.31532967e+01, -5.65151249e+00, -6.11336634e+00, -5.28356488e-03, -5.16165231e-03, 4.88900000e+02], [ 1.44754882e+03, 1.50000000e+01, -1.25064010e+02, -2.49033712e+01, -8.12931706e-02, -1.28030529e-02, -1.66110131e-03, 4.88900000e+02]]) raman_data = np.array([[1.12639199e+03, 1.00000000e+01, 3.61528920e-04, 5.94192417e-01, 1.90792325e+01, 4.88900000e+02], [1.15100631e+03, 1.10000000e+01, 3.36862711e-02, 1.62723253e-01, 1.12706729e+01, 4.88900000e+02], [1.24937064e+03, 1.20000000e+01, 3.10356963e-01, 1.61670230e+00, 1.07598727e+02, 4.88900000e+02], [1.37094149e+03, 1.30000000e+01, 6.63217766e-03, 2.37302109e-01, 8.78745947e+00, 4.88900000e+02], [1.39064221e+03, 1.40000000e+01, 6.30361373e-02, 8.04145907e-01, 3.70791737e+01, 4.88900000e+02], [1.44754882e+03, 1.50000000e+01, 5.36564516e-05, 1.07944901e+00, 3.45520265e+01, 4.88900000e+02]]) scatter_data = scatter_data.T.copy() raman_data = raman_data.T.copy() # test all columns of the respective dataframe to get a better sense of what is broken self.assertTrue(np.allclose(va_corr.scatter['freq'].values, scatter_data[0], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['freqdx'].values, scatter_data[1], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['beta_g*1e6'].values, scatter_data[2], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['beta_A*1e6'].values, scatter_data[3], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['alpha_g*1e6'].values, scatter_data[4], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['backscatter'].values, scatter_data[5], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['forwardscatter'].values, scatter_data[6], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.scatter['exc_freq'].values, scatter_data[7], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['freq'].values, raman_data[0], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['freqdx'].values, raman_data[1], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['alpha_squared'].values, raman_data[2], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['beta_alpha'].values, raman_data[3], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['raman_int'].values, raman_data[4], rtol=5e-4)) self.assertTrue(np.allclose(va_corr.raman['exc_freq'].values, raman_data[5], rtol=5e-4)) #class TestZPVC(TestCase): def test_zpvc(self): nitro_freq.parse_frequency() nitro_freq.parse_frequency_ext() path = sep.join([TMPDIR, 'nitromalonamide_nmr', '*']) va_corr = VA() va_corr.gradient = get_data(path=path, attr='gradient', soft=gOutput, f_start='nitromal_grad_', f_end='.out') va_corr.property = get_data(path=path, attr='nmr_shielding', soft=gOutput, f_start='nitromal_prop', f_end='.out').groupby( 'atom').get_group(0)[['isotropic', 'file']].reset_index(drop=True) delta = gen_delta(delta_type=2, freq=nitro_freq.frequency.copy()) va_corr.zpvc(uni=nitro_freq, delta=delta['delta'].values, temperature=[0, 200]) zpvc_results = np.array([[ 13.9329 , -1.80706136, 12.12583864, -2.65173195, 0.84467059, 0. ], [ 13.9329 , -1.48913965, 12.44376035, -2.39264653, 0.90350687, 200. ]]) eff_coord = np.array([[ 1. , 0.43933078, -4.1685104 , 0. ], [ 1. , -5.89314484, -2.18542914, 0. ], [ 1. , -4.92050271, 1.02704248, 0. ], [ 1. , 6.27207499, -0.61188352, 0. ], [ 1. , 4.51177692, 2.23712277, 0. ], [ 6. , 2.50712078, -1.03455595, 0. ], [ 8. , 2.68478934, -3.43995936, 0. ], [ 7. , 4.63712862, 0.33562593, 0. ], [ 6. , -0.00921467, 0.10958486, 0. ], [ 6. , -2.14821065, -1.60648521, 0. ], [ 8. , -1.72316811, -4.00875859, 0. ], [ 7. , -0.356593 , 2.76254918, 0. ], [ 8. , 1.51924782, 4.1859399 , 0. ], [ 8. , -2.53953864, 3.65511737, 0. ], [ 7. , -4.55751742, -0.8483932 , 0. ], [ 1. , 0.42984441, -4.17018127, 0. ], [ 1. , -5.88480085, -2.18035498, 0. ], [ 1. , -4.91515264, 1.02280195, 0. ], [ 1. , 6.26195233, -0.60839022, 0. ], [ 1. , 4.50575843, 2.22964105, 0. ], [ 6. , 2.50646903, -1.03456634, 0. ], [ 8. , 2.68667123, -3.43354071, 0. ], [ 7. , 4.62935451, 0.33263776, 0. ], [ 6. , -0.00919927, 0.1084463 , 0. ], [ 6. , -2.14622625, -1.60477806, 0. ], [ 8. , -1.72498822, -4.00524579, 0. ], [ 7. , -0.35655368, 2.76183552, 0. ], [ 8. , 1.50986822, 4.18058144, 0. ], [ 8. , -2.52931977, 3.65383296, 0. ], [ 7. , -4.55110836, -0.84857983, 0. ]]) vib_average = np.array([[ 8.50080000e+01, 0.00000000e+00, -0.00000000e+00, 7.20197387e-03, 7.20197387e-03, 0.00000000e+00], [ 8.92980000e+01, 1.00000000e+00, -0.00000000e+00, -1.92131709e-03, -1.92131709e-03, 0.00000000e+00], [ 1.46093800e+02, 2.00000000e+00, -0.00000000e+00, 2.20218342e-02, 2.20218342e-02, 0.00000000e+00], [ 2.17704400e+02, 3.00000000e+00, -0.00000000e+00, 9.06126467e-04, 9.06126467e-04, 0.00000000e+00], [ 3.21218700e+02, 4.00000000e+00, -1.04570441e+00, 7.43587697e-02, -9.71345643e-01, 0.00000000e+00], [ 3.54578000e+02, 5.00000000e+00, -1.69193968e-01, 1.02299102e-02, -1.58964058e-01, 0.00000000e+00], [ 4.01846100e+02, 6.00000000e+00, -9.95359531e-04, 3.02869005e-03, 2.03333052e-03, 0.00000000e+00], [ 4.18516500e+02, 7.00000000e+00, -0.00000000e+00, -1.59091073e-02, -1.59091073e-02, 0.00000000e+00], [ 4.25136100e+02, 8.00000000e+00, 9.96865969e-02, 9.60161049e-03, 1.09288207e-01, 0.00000000e+00], [ 4.33864900e+02, 9.00000000e+00, -0.00000000e+00, -2.24181573e-02, -2.24181573e-02, 0.00000000e+00], [ 4.61383400e+02, 1.00000000e+01, 3.02755804e-01, 7.35128407e-02, 3.76268645e-01, 0.00000000e+00], [ 4.85255900e+02, 1.10000000e+01, 5.73683252e-04, 3.43416159e-03, 4.00784484e-03, 0.00000000e+00], [ 6.09549200e+02, 1.20000000e+01, 8.95579596e-02, 9.59664081e-03, 9.91546004e-02, 0.00000000e+00], [ 6.66622400e+02, 1.30000000e+01, -0.00000000e+00, -2.76336851e-03, -2.76336851e-03, 0.00000000e+00], [ 6.85145800e+02, 1.40000000e+01, -0.00000000e+00, -7.69008204e-03, -7.69008204e-03, 0.00000000e+00], [ 7.03986700e+02, 1.50000000e+01, -3.59227410e-01, 2.76777550e-02, -3.31549655e-01, 0.00000000e+00], [ 7.14892300e+02, 1.60000000e+01, -0.00000000e+00, 2.59325707e-03, 2.59325707e-03, 0.00000000e+00], [ 7.25846000e+02, 1.70000000e+01, -0.00000000e+00, -6.41058056e-03, -6.41058056e-03, 0.00000000e+00], [ 7.62762300e+02, 1.80000000e+01, -0.00000000e+00, -5.35324542e-03, -5.35324542e-03, 0.00000000e+00], [ 8.46200900e+02, 1.90000000e+01, -3.55846141e-03, -8.75803015e-04, -4.43426443e-03, 0.00000000e+00], [ 1.07527990e+03, 2.00000000e+01, -1.84207554e-02, 4.72086557e-03, -1.36998899e-02, 0.00000000e+00], [ 1.09465730e+03, 2.10000000e+01, -3.15295434e-02, 5.46069862e-03, -2.60688448e-02, 0.00000000e+00], [ 1.10619190e+03, 2.20000000e+01, -0.00000000e+00, 4.49794220e-02, 4.49794220e-02, 0.00000000e+00], [ 1.16155690e+03, 2.30000000e+01, -1.07394061e-02, 2.19152295e-03, -8.54788314e-03, 0.00000000e+00], [ 1.17408590e+03, 2.40000000e+01, 2.00299542e-02, 9.48213114e-03, 2.95120853e-02, 0.00000000e+00], [ 1.26700700e+03, 2.50000000e+01, -5.20609316e-01, 1.81355739e-01, -3.39253577e-01, 0.00000000e+00], [ 1.31668580e+03, 2.60000000e+01, -3.86074967e-03, 2.99473191e-03, -8.66017757e-04, 0.00000000e+00], [ 1.39527270e+03, 2.70000000e+01, -1.48603969e-03, 3.70156572e-03, 2.21552603e-03, 0.00000000e+00], [ 1.45205880e+03, 2.80000000e+01, -2.50446596e-03, -1.17089006e-03, -3.67535602e-03, 0.00000000e+00], [ 1.55570980e+03, 2.90000000e+01, -6.91414901e-04, -9.62715516e-04, -1.65413042e-03, 0.00000000e+00], [ 1.57585090e+03, 3.00000000e+01, 4.04075730e-03, -1.72319285e-03, 2.31756445e-03, 0.00000000e+00], [ 1.59816940e+03, 3.10000000e+01, -3.02221180e-03, -1.26461431e-02, -1.56683549e-02, 0.00000000e+00], [ 1.63134120e+03, 3.20000000e+01, -3.85968122e-02, 1.18178497e-02, -2.67789625e-02, 0.00000000e+00], [ 1.71103720e+03, 3.30000000e+01, -5.30081127e-02, 9.07966487e-03, -4.39284478e-02, 0.00000000e+00], [ 2.26025580e+03, 3.40000000e+01, -9.00414250e-01, 4.04124232e-01, -4.96290017e-01, 0.00000000e+00], [ 3.52007010e+03, 3.50000000e+01, -3.79625326e-04, 7.67211701e-04, 3.87586375e-04, 0.00000000e+00], [ 3.54188550e+03, 3.60000000e+01, -2.40654106e-03, -3.52958626e-04, -2.75949969e-03, 0.00000000e+00], [ 3.68688910e+03, 3.70000000e+01, -1.40898432e-03, 3.54638132e-04, -1.05434619e-03, 0.00000000e+00], [ 3.69638850e+03, 3.80000000e+01, -6.18866036e-04, -3.25687908e-04, -9.44553944e-04, 0.00000000e+00], [ 8.50080000e+01, 0.00000000e+00, -0.00000000e+00, 2.42846696e-02, 2.42846696e-02, 2.00000000e+02], [ 8.92980000e+01, 1.00000000e+00, -0.00000000e+00, -6.18637794e-03, -6.18637794e-03, 2.00000000e+02], [ 1.46093800e+02, 2.00000000e+00, -0.00000000e+00, 4.56979114e-02, 4.56979114e-02, 2.00000000e+02], [ 2.17704400e+02, 3.00000000e+00, -0.00000000e+00, 1.38459170e-03, 1.38459170e-03, 2.00000000e+02], [ 3.21218700e+02, 4.00000000e+00, -1.01880224e+00, 9.07354499e-02, -9.28066794e-01, 2.00000000e+02], [ 3.54578000e+02, 5.00000000e+00, -1.47884125e-01, 1.19615757e-02, -1.35922549e-01, 2.00000000e+02], [ 4.01846100e+02, 6.00000000e+00, -1.06537672e-03, 3.38490386e-03, 2.31952714e-03, 2.00000000e+02], [ 4.18516500e+02, 7.00000000e+00, -0.00000000e+00, -1.75578234e-02, -1.75578234e-02, 2.00000000e+02], [ 4.25136100e+02, 8.00000000e+00, 1.13889092e-01, 1.05481068e-02, 1.24437199e-01, 2.00000000e+02], [ 4.33864900e+02, 9.00000000e+00, -0.00000000e+00, -2.44873679e-02, -2.44873679e-02, 2.00000000e+02], [ 4.61383400e+02, 1.00000000e+01, 3.50247546e-01, 7.90337919e-02, 4.29281338e-01, 2.00000000e+02], [ 4.85255900e+02, 1.10000000e+01, 1.71633606e-04, 3.65009840e-03, 3.82173201e-03, 2.00000000e+02], [ 6.09549200e+02, 1.20000000e+01, 7.81791456e-02, 9.83892770e-03, 8.80180732e-02, 2.00000000e+02], [ 6.66622400e+02, 1.30000000e+01, -0.00000000e+00, -2.80944797e-03, -2.80944797e-03, 2.00000000e+02], [ 6.85145800e+02, 1.40000000e+01, -0.00000000e+00, -7.80220122e-03, -7.80220122e-03, 2.00000000e+02], [ 7.03986700e+02, 1.50000000e+01, -2.69187645e-01, 2.80298166e-02, -2.41157829e-01, 2.00000000e+02], [ 7.14892300e+02, 1.60000000e+01, -0.00000000e+00, 2.62373990e-03, 2.62373990e-03, 2.00000000e+02], [ 7.25846000e+02, 1.70000000e+01, -0.00000000e+00, -6.48019409e-03, -6.48019409e-03, 2.00000000e+02], [ 7.62762300e+02, 1.80000000e+01, -0.00000000e+00, -5.39776338e-03, -5.39776338e-03, 2.00000000e+02], [ 8.46200900e+02, 1.90000000e+01, 2.39979858e-03, -8.79791824e-04, 1.52000675e-03, 2.00000000e+02], [ 1.07527990e+03, 2.00000000e+01, -1.08417544e-02, 4.72499608e-03, -6.11675836e-03, 2.00000000e+02], [ 1.09465730e+03, 2.10000000e+01, -2.64528256e-03, 5.46485456e-03, 2.81957200e-03, 2.00000000e+02], [ 1.10619190e+03, 2.20000000e+01, -0.00000000e+00, 4.50109276e-02, 4.50109276e-02, 2.00000000e+02], [ 1.16155690e+03, 2.30000000e+01, -6.76838012e-03, 2.19255359e-03, -4.57582653e-03, 2.00000000e+02], [ 1.17408590e+03, 2.40000000e+01, 9.29716984e-03, 9.48620604e-03, 1.87833759e-02, 2.00000000e+02], [ 1.26700700e+03, 2.50000000e+01, -5.14657884e-01, 1.81395679e-01, -3.33262205e-01, 2.00000000e+02], [ 1.31668580e+03, 2.60000000e+01, -2.24279961e-02, 2.99519325e-03, -1.94328029e-02, 2.00000000e+02], [ 1.39527270e+03, 2.70000000e+01, -9.34932877e-04, 3.70188971e-03, 2.76695683e-03, 2.00000000e+02], [ 1.45205880e+03, 2.80000000e+01, 3.53202807e-03, -1.17095817e-03, 2.36106990e-03, 2.00000000e+02], [ 1.55570980e+03, 2.90000000e+01, -2.49842213e-04, -9.62742087e-04, -1.21258430e-03, 2.00000000e+02], [ 1.57585090e+03, 3.00000000e+01, 2.39535538e-03, -1.72323400e-03, 6.72121388e-04, 2.00000000e+02], [ 1.59816940e+03, 3.10000000e+01, 4.90707509e-03, -1.26464003e-02, -7.73932517e-03, 2.00000000e+02], [ 1.63134120e+03, 3.20000000e+01, -3.00698127e-02, 1.18180390e-02, -1.82517738e-02, 2.00000000e+02], [ 1.71103720e+03, 3.30000000e+01, -3.95916986e-02, 9.07974685e-03, -3.05119518e-02, 2.00000000e+02], [ 2.26025580e+03, 3.40000000e+01, -8.86090407e-01, 4.04124303e-01, -4.81966104e-01, 2.00000000e+02], [ 3.52007010e+03, 3.50000000e+01, -5.63472032e-04, 7.67211701e-04, 2.03739669e-04, 2.00000000e+02], [ 3.54188550e+03, 3.60000000e+01, -3.39925478e-03, -3.52958626e-04, -3.75221341e-03, 2.00000000e+02], [ 3.68688910e+03, 3.70000000e+01, -1.73391855e-03, 3.54638132e-04, -1.37928042e-03, 2.00000000e+02], [ 3.69638850e+03, 3.80000000e+01, -7.51344750e-04, -3.25687908e-04, -1.07703266e-03, 2.00000000e+02]]) cols = ['property', 'zpvc', 'zpva', 'tot_anharm', 'tot_curva', 'temp'] self.assertTrue(np.allclose(va_corr.zpvc_results[cols].values, zpvc_results, rtol=5e-4)) va_corr.eff_coord['Z'] = va_corr.eff_coord['Z'].astype(int) self.assertTrue(np.allclose(va_corr.eff_coord[['Z','x','y','z']].values, eff_coord, atol=5e-5)) cols = ['freq', 'freqdx', 'anharm', 'curva', 'sum', 'temp'] self.assertTrue(np.allclose(va_corr.vib_average[cols].values, vib_average, rtol=5e-4))
exa-analytics/atomic
exatomic/va/tests/test_va.py
Python
apache-2.0
24,233
[ "Gaussian", "NWChem" ]
6b1e6f0c6a58301eb9eb0ad159381afdca0de58d64f81dd0d3375fada98bf516
import string import numpy from os import path from orbkit.units import u_to_me nist_mass = None # Standard atomic masses as "Linearized ASCII Output", see http://physics.nist.gov nist_file = path.join(path.dirname(path.realpath(__file__)), 'supporting_data/Atomic_Weights_NIST.html') # see http://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=&all=all&ascii=ascii2&isotype=some def read_nist(): '''Reads and converts the atomic masses from the "Linearized ASCII Output", see http://physics.nist.gov. ''' global nist_mass f = open(nist_file,'r') flines = f.readlines() f.close() nist_mass = [] index = None new = True def rm_brackets(text,rm=['(',')','[',']']): for i in rm: text = text.replace(i,'') return text for line in flines: thisline = line.split() if 'Atomic Number =' in line: i = int(thisline[-1]) - 1 new = (i != index) if new: nist_mass.append(['',0]) index = i elif 'Atomic Symbol =' in line and new: nist_mass[index][0] = thisline[-1] elif 'Standard Atomic Weight =' in line and new: nist_mass[index][1] = float(rm_brackets(thisline[-1])) def standard_mass(atom): '''Returns the standard atomic mass of a given atom. **Parameters:** atom : int or str Contains the name or atomic number of the atom. **Returns:** mass : float Contains the atomic mass in atomic units. ''' if nist_mass is None: read_nist() try: atom = int(atom) - 1 return nist_mass[atom][1] * u_to_me except ValueError: return dict(nist_mass)[atom.title()] * u_to_me def get_atom_symbol(atom): '''Returns the atomic symbol of a given atom. **Parameters:** atom : int or str Contains the atomic number of the atom. **Returns:** symbol : str Contains the atomic symbol. ''' if nist_mass is None: read_nist() try: atom = int(atom) - 1 return nist_mass[atom][0] except ValueError: return atom.title() # Assign the quantum number l to every AO symbol (s,p,d,etc.) orbit = 'spd' + string.ascii_lowercase[5:].replace('s','').replace('p','') lquant = dict([(j, i) for i,j in enumerate(orbit)]) def l_deg(l=0,ao=None,cartesian_basis=True): '''Calculates the degeneracy of a given atomic orbitals. **Options:** Works with the molpro output nomenclature for Cartesian Harmonics: s->'s', p->['x','y','z'], d-> ['xx','yy', etc.], etc. e.g., l_deg(ao='xxy') Works with quantum number l for the Cartesian Harmonic: e.g., l_deg(l=1) Works with name of the Cartesian Harmonic: e.g., l_deg(l='p') ''' if ao != None: if ao == 's': return 1 else: l = len(ao) elif not isinstance(l,int): l = lquant[l] return int((l+1)*(l+2)/2) if cartesian_basis else int(2*l+1) # l_deg # Molden AO order exp = [] exp.append([(0,0,0)]) # s orbitals exp.append([(1,0,0), (0,1,0), (0,0,1)]) # p orbitals exp.append([(2,0,0), (0,2,0), (0,0,2), (1,1,0), (1,0,1), (0,1,1)]) # d orbitals exp.append([(3,0,0), (0,3,0), (0,0,3), (1,2,0), (2,1,0), (2,0,1), (1,0,2), (0,1,2), (0,2,1), (1,1,1)]) # f orbitals exp.append([(4,0,0), (0,4,0), (0,0,4), (3,1,0), (3,0,1), (1,3,0), (0,3,1), (1,0,3), (0,1,3), (2,2,0), (2,0,2), (0,2,2), (2,1,1), (1,2,1), (1,1,2)]) # g orbitals # wfn order of exponents exp_wfn = exp[:3] # s,p,d orbitals exp_wfn.append([(3,0,0), (0,3,0), (0,0,3), (2,1,0), (2,0,1),(0,2,1), (1,2,0), (1,0,2), (0,1,2), (1,1,1)]) # f orbitals exp_wfn.append(exp[4]) # g orbitals ''' Transformation Between Cartesian and (Real) Pure Spherical Harmonic Gaussians adapted from H.B. Schlegel and M.J. Frisch International Journal of Quantum Chemistry, Vol. 54, 83-87 (1995). ''' sqrt = numpy.sqrt cart2sph = [ #: Transformation Between Cartesian and (Real) Pure Spherical Harmonic Gaussians [ [[(0,0,0)], [1.], 1.] ], # s orbitals [ [[(0,1,0)], [1.], 1.], [[(0,0,1)], [1.], 1.], [[(1,0,0)], [1.], 1.], ], # p orbitals [ [[(1,1,0)], [1.], 1.], [[(0,1,1)], [1.], 1.], [[(0,0,2),(2,0,0),(0,2,0)], [1., -1/2., -1/2.], 1.], [[(1,0,1)], [1.], 1.], [[(2,0,0),(0,2,0)], [1.,-1.], sqrt(3)/2.], ], # d orbitals [ [[(0,3,0),(2,1,0)], [-sqrt(5), 3.], 1/(2.*sqrt(2))], [[(1,1,1)], [1.], 1.], [[(0,1,2),(0,3,0),(2,1,0)], [sqrt(3/5.), -sqrt(3)/4., -sqrt(3)/(4.*sqrt(5))], sqrt(2)] , [[(0,0,3),(2,0,1),(0,2,1)], [1.,-3/(2*sqrt(5)),-3/(2*sqrt(5))], 1.], [[(1,0,2),(3,0,0),(1,2,0)], [sqrt(3/5.), -sqrt(3)/4., -sqrt(3)/(4.*sqrt(5))], sqrt(2)], [[(2,0,1),(0,2,1)], [1.,-1.], sqrt(3)/2.], [[(3,0,0),(1,2,0)], [sqrt(5), -3.], 1/(2.*sqrt(2))], ], # f orbitals [ [[(3,1,0), (1,3,0)], [1.,-1.], sqrt(2) * sqrt(5/8.)], [[(0,3,1), (2,1,1)], [-sqrt(5)/4.,3/4.], sqrt(2)], [[(1,1,2), (3,1,0), (1,3,0)], [3/sqrt(14), -sqrt(5)/(2*sqrt(14)), -sqrt(5)/(2*sqrt(14))], sqrt(2)], [[(0,3,1), (0,3,1), (2,1,1)], [sqrt(5/7.), -3*sqrt(5)/(4.*sqrt(7)), -3/(4.*sqrt(7))], sqrt(2)], [[(0,0,4), (4,0,0), (0,4,0), (2,0,2), (0,2,2), (2,2,0)], [1., 3/8., 3/8., -3*sqrt(3)/sqrt(35), -3*sqrt(3)/sqrt(35), -1/4.], sqrt(2)], [[(1,0,3), (3,0,1), (1,2,1)], [sqrt(5/7.), -3*sqrt(5)/(4.*sqrt(7)), -3/(4.*sqrt(7))], sqrt(2)], [[(2,0,2), (0,2,2), (4,0,0), (0,4,0)], [3*sqrt(3)/(2.*sqrt(14)), -3*sqrt(3)/(2.*sqrt(14)), -sqrt(5)/(4.*sqrt(2)), sqrt(5)/(4.*sqrt(2))], sqrt(2)], [[(3,0,1), (1,2,1)], [sqrt(5)/4., -3/4.], sqrt(2)], [[(4,0,0), (0,4,0), (2,2,0)], [sqrt(35)/(8.*sqrt(2)), sqrt(35)/(8.*sqrt(2)), -3*sqrt(3)/(4.*sqrt(2))], sqrt(2)], ], # g orbitals ] def get_cart2sph(l,m): '''Returns the linear combination required for the transformation Between the Cartesian and (Real) Pure Spherical Harmonic Gaussian basis. Adapted from H.B. Schlegel and M.J. Frisch, International Journal of Quantum Chemistry, Vol. 54, 83-87 (1995). **Parameters:** l : int Angular momentum quantum number. m : int Magnetic quantum number. **Returns:** cart2sph[l][l+m] : list Contains the conversion instructions with three elements 1. Exponents of Cartesian basis functions (cf. `core.exp`): list of tuples 2. The corresponding expansion coefficients: list of floats 3. Global factor ..hint: The conversion is currently only supported up to g atomic orbitals. ''' return cart2sph[l][l+m] def remove_from_list(inlist, value): return [item for item in inlist if item != value] def validate_drv(drv): if drv is None or drv == 'None' or drv == '': return 0 elif drv == 'x': return 1 elif drv == 'y': return 2 elif drv == 'z': return 3 elif drv == 'xx' or drv == 'x2': return 4 elif drv == 'yy' or drv == 'y2': return 5 elif drv == 'zz' or drv == 'z2': return 6 elif drv == 'xy' or drv == 'yx': return 7 elif drv == 'xz' or drv == 'zx': return 8 elif drv == 'yz' or drv == 'zy': return 9 elif not (isinstance(drv,int) and 0 <= drv <= 9): raise ValueError("The selection `drv=%s` is not valid!" % drv) else: return drv def each_ao_is_normalized(ao_spec): is_normalized = [] for sel_ao in range(len(ao_spec)): is_normalized.append((ao_spec[sel_ao]['pnum'] < 0)) if all(is_normalized) != any(is_normalized): raise ValueError('Either all or none of the atomic orbitals have to be normalized!') return all(is_normalized) def prepare_ao_calc(ao_spec): pnum_list = [] atom_indices = [] ao_coeffs = numpy.zeros((0,2)) for sel_ao in range(len(ao_spec)): atom_indices.append(ao_spec[sel_ao]['atom']) c = ao_spec[sel_ao]['coeffs'] ao_coeffs = numpy.append(ao_coeffs,c,axis=0) pnum_list.append(len(c)) pnum_list = require(pnum_list, dtype='i') atom_indices = require(atom_indices, dtype='i') ao_coeffs = require(ao_coeffs, dtype='f') return ao_coeffs,pnum_list,atom_indices def is_mo_spec(mo): '''Checks if :literal:`mo` is of :literal:`mo_spec` type. (See :ref:`Central Variables` for details.)''' #Avoids circular inports: from orbkit.orbitals import MOClass if not isinstance(mo, MOClass): return False return_val = True for i in mo: try: return_val = return_val and 'coeffs' in i.keys() except: return_val = False return return_val # Compatability function for old numpy versions def moveaxis(array, source, target): transpose = array.transpose order = [n for n in range(array.ndim) if n not in source] for dest, src in sorted(zip(target, source)): order.insert(dest, src) result = transpose(order) return result def require(data,dtype='f',requirements='CA'): if dtype == 'f': dtype = numpy.float64 elif dtype == 'i': dtype = numpy.intc return numpy.require(data, dtype=dtype, requirements='CA') def convert(data,was_vector,N): data = numpy.array(data,order='C') if not was_vector: data = data.reshape(data.shape[:-1] + N,order='C') return data def zeros(shape,name,hdf5_file=None,chunks=True): if hdf5_file is None: return numpy.zeros(shape) else: return hdf5_file.create_dataset(name,shape,dtype=numpy.float64,chunks=chunks) def reshape(data,shape,save_hdf5): if not save_hdf5: return data.reshape(shape) else: data.attrs['shape'] = shape return data[...].reshape(shape) def print2D(x,format='%+.2f ',start='\t',end=''): '''Prints a 2D matrix. **Parameters:** x : numpy.ndarray, shape = (n,m) Contains a 2D matrix. format : str Specifies the output format. ''' shape = numpy.shape(x) for i in range(shape[0]): s = start for j in range(shape[1]): s += format % x[i,j] print(s + end) def pmat(matrix,vmax=lambda x: numpy.max(numpy.abs(x))): import matplotlib.pyplot as plt fig = plt.figure() if matrix.dtype == complex: print('plotting real part of matrix') matrix = matrix.real vm = vmax(numpy.abs(matrix)) if callable(vmax) else vmax plt.imshow(matrix,interpolation=None,vmin=-vm,vmax=vm,cmap='seismic_r') plt.colorbar() return fig
orbkit/orbkit
orbkit/tools.py
Python
lgpl-3.0
10,554
[ "Gaussian", "Molpro" ]
e769b3781adae15d69e5e0c2751aea0f0194030d22e386c7646b74e981a272e8
from __future__ import division, print_function, absolute_import import numpy as np import warnings from scipy._lib._util import check_random_state def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None): """ Generate random samples from a probability density function using the ratio-of-uniforms method. Parameters ---------- pdf : callable A function with signature `pdf(x)` that is the probability density function of the distribution. umax : float The upper bound of the bounding rectangle in the u-direction. vmin : float The lower bound of the bounding rectangle in the v-direction. vmax : float The upper bound of the bounding rectangle in the v-direction. size : int or tuple of ints, optional Defining number of random variates (default is 1). c : float, optional. Shift parameter of ratio-of-uniforms method, see Notes. Default is 0. random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional If `random_state` is `None` the `~np.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with random_state. If `random_state` is already a ``RandomState`` or ``Generator`` instance, then that object is used. Default is None. Returns ------- rvs : ndarray The random variates distributed according to the probability distribution defined by the pdf. Notes ----- Given a univariate probability density function `pdf` and a constant `c`, define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``. If `(U, V)` is a random vector uniformly distributed over `A`, then `V/U + c` follows a distribution according to `pdf`. The above result (see [1]_, [2]_) can be used to sample random variables using only the pdf, i.e. no inversion of the cdf is required. Typical choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of the rectangle ``R = [0, umax] x [vmin, vmax]`` where - ``umax = sup sqrt(pdf(x))`` - ``vmin = inf (x - c) sqrt(pdf(x))`` - ``vmax = sup (x - c) sqrt(pdf(x))`` In particular, these values are finite if `pdf` is bounded and ``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails). One can generate `(U, V)` uniformly on `R` and return `V/U + c` if `(U, V)` are also in `A` which can be directly verified. Intuitively, the method works well if `A` fills up most of the enclosing rectangle such that the probability is high that `(U, V)` lies in `A` whenever it lies in `R` as the number of required iterations becomes too large otherwise. To be more precise, note that the expected number of iterations to draw `(U, V)` uniformly distributed on `R` such that `(U, V)` is also in `A` is given by the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin)``, using the fact that the area of `A` is equal to 1/2 (Theorem 7.1 in [1]_). A warning is displayed if this ratio is larger than 20. Moreover, if the sampling fails to generate a single random variate after 50000 iterations (i.e. not a single draw is in `A`), an exception is raised. If the bounding rectangle is not correctly specified (i.e. if it does not contain `A`), the algorithm samples from a distribution different from the one given by `pdf`. It is therefore recommended to perform a test such as `~scipy.stats.kstest` as a check. References ---------- .. [1] L. Devroye, "Non-Uniform Random Variate Generation", Springer-Verlag, 1986. .. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian random variates", Statistics and Computing, 24(4), p. 547--557, 2014. .. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random Variables Using the Ratio of Uniform Deviates", ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977. Examples -------- >>> from scipy import stats Simulate normally distributed random variables. It is easy to compute the bounding rectangle explicitly in that case. >>> f = stats.norm.pdf >>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) >>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound >>> np.random.seed(12345) >>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500) The K-S test confirms that the random variates are indeed normally distributed (normality is not rejected at 5% significance level): >>> stats.kstest(rvs, 'norm')[1] 0.3420173467307603 The exponential distribution provides another example where the bounding rectangle can be determined explicitly. >>> np.random.seed(12345) >>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1, ... vmin=0, vmax=2*np.exp(-1), size=1000) >>> stats.kstest(rvs, 'expon')[1] 0.928454552559516 Sometimes it can be helpful to use a non-zero shift parameter `c`, see e.g. [2]_ above in the case of the generalized inverse Gaussian distribution. """ if vmin >= vmax: raise ValueError("vmin must be smaller than vmax.") if umax <= 0: raise ValueError("umax must be positive.") exp_iter = 2 * (vmax - vmin) * umax # rejection constant (see [1]) if exp_iter > 20: msg = ("The expected number of iterations to generate a single random " "number from the desired distribution is larger than {}, " "potentially causing bad performance.".format(int(exp_iter))) warnings.warn(msg, RuntimeWarning) size1d = tuple(np.atleast_1d(size)) N = np.prod(size1d) # number of rvs needed, reshape upon return # start sampling using ratio of uniforms method rng = check_random_state(random_state) x = np.zeros(N) simulated, i = 0, 1 # loop until N rvs have been generated: expected runtime is finite # to avoid infinite loop, raise exception if not a single rv has been # generated after 50000 tries. even if exp_iter = 1000, probability of # this event is (1-1/1000)**50000 which is of order 10e-22 while simulated < N: k = N - simulated # simulate uniform rvs on [0, umax] and [vmin, vmax] u1 = umax * rng.uniform(size=k) v1 = rng.uniform(vmin, vmax, size=k) # apply rejection method rvs = v1 / u1 + c accept = (u1**2 <= pdf(rvs)) num_accept = np.sum(accept) if num_accept > 0: x[simulated:(simulated + num_accept)] = rvs[accept] simulated += num_accept if (simulated == 0) and (i*N >= 50000): msg = ("Not a single random variate could be generated in {} " "attempts. The ratio of uniforms method does not appear " "to work for the provided parameters. Please check the " "pdf and the bounds.".format(i*N)) raise RuntimeError(msg) i += 1 return np.reshape(x, size1d)
arokem/scipy
scipy/stats/_rvs_sampling.py
Python
bsd-3-clause
7,146
[ "Gaussian" ]
9b8b1bb0416299aab11c034e82df903c7db127661c9aa4785d7f357444d23303
import hashlib from tango.ast import * from tango.builtin import Int, Double, String from tango.types import FunctionType, NominalType, TypeUnion def transpile(module, header_stream, source_stream): transpiler = Transpiler(header_stream, source_stream) transpiler.visit(module) def compatibilize(name): result = str(str(name.encode())[2:-1]).replace('\\', '') for punct in '. ()[]<>-:': result = result.replace(punct, '') if result[0].isdigit(): result = '_' + result return result operator_translations = { '+': '__add__', '-': '__sub__', '*': '__mul__', '/': '__div__', } class Functor(object): def __init__(self, function_type): self.function_type = function_type @property def type_signature(self): # FIXME This discriminator isn't good enough, as different signatures # may have the same string representation, since their `__str__` # implementation doesn't use full names. discriminator = hashlib.sha1(str(self.function_type).encode()).hexdigest()[-8:] return compatibilize('Sig' + str(self.function_type) + discriminator) class Transpiler(Visitor): def __init__(self, header_stream, source_stream): self.header_stream = header_stream self.source_stream = source_stream self.indent = 0 self.containers = {} self.functions = {} self.functors = {} self.types = {} def write_header(self, data, end='\n'): print(' ' * self.indent + data, file=self.header_stream, end=end) def write_source(self, data, end='\n'): print(' ' * self.indent + data, file=self.source_stream, end=end) def visit_ModuleDecl(self, node): self.write_source('#include "tango.hh"') self.write_source('') self.write_source('int main(int argc, char* argv[]) {') self.indent += 4 self.generic_visit(node) self.write_source('return 0;') self.indent -= 4 self.write_source('}') def visit_ContainerDecl(self, node): # Write a new variable declaration. var_type = self.translate_type(node.__info__['type']) var_name = compatibilize(node.__info__['scope'].name + '_' + node.name) declaration = var_type + ' ' + var_name # If the container's has an initial value, write it as well. if node.initial_value: declaration += ' = ' + self.translate_expr(node.initial_value) self.write_source(declaration + ';') def visit_Call(self, node): self.write_source(self.translate_expr(node) + ';') def visit_If(self, node): assert not node.pattern.parameters, 'TODO pattern matching in if expressions' condition = self.translate_expr(node.pattern.expression) self.write_source('if (' + condition + ') {') self.indent += 4 self.visit(node.body) self.indent -= 4 self.write_source('}') if isinstance(node.else_clause, Block): self.write_source('else {') self.indent += 4 self.visit(node.else_clause) self.indent -= 4 self.write_source('}') elif isinstance(node.else_clause, If): self.write_source('else') self.visit(node.else_clause) def translate_type(self, type_instance): if isinstance(type_instance, NominalType): return compatibilize(type_instance.scope.name + '_' + type_instance.name) if isinstance(type_instance, FunctionType): # Register a new functor for the parsed function type. functor = self.functors.get(type_instance) if functor is None: functor = Functor(type_instance) self.functors[type_instance] = functor return 'std::shared_ptr<' + functor.type_signature + '>' assert False, 'cannot translate {}'.format(type_instance) def translate_expr(self, node): if isinstance(node, Literal): if node.__info__['type'] == String: return '"' + node.value + '"' return node.value if isinstance(node, Identifier): # If the identifier is `true` or `false`, we write it as is. if node.name in ['true', 'false']: return node.name # If the identifier isn't a keyword, first, we retrive the entity # the identifier is denoting. decls = node.__info__['scope'][node.name] # If the identifier denotes a simple container, we return its full # name (i.e. scope + name). if isinstance(decls[0], ContainerDecl): return compatibilize(node.__info__['scope'].name + '_' + node.name) # If the identifier denotes a function declaration, we have to # know which overload and/or specialization it refers to, so as to # create a different full name for each case. if isinstance(decls[0], FunctionDecl): # If the identifier has a single type non generic type, we can # use it as is to discriminate the identifier. node_type = node.__info__['type'] if not isinstance(node_type, TypeUnion) and not node_type.is_generic: discriminating_type = node_type # If the identifier was used as the callee of a function call, # we can expect the type solver to add a `specialized_type` # key in the node's metadata. elif 'specialized_type' in node.__info__: discriminating_type = node.__info__['specialized_type'] # It should be illegal to use an overloaded or generic # identifier outside of a function call. else: assert False, ( "ambiguous use of '{}' wasn't handled by the type disambiguator" .format(node)) # FIXME This discriminator isn't good enough, as different # signatures may have the same string representation, since # their `__str__` implementation doesn't use full names. discriminator = hashlib.sha1(str(discriminating_type).encode()).hexdigest()[-8:] return compatibilize(node.__info__['scope'].name + '_' + node.name + discriminator) if isinstance(node, PrefixedExpression): return '{}.{}({})'.format( self.translate_type(node.operand.__info__['type']), operator_translations[node.operator], self.translate_expr(node.operand)) if isinstance(node, BinaryExpression): return '{}.{}({}, {})'.format( self.translate_type(node.left.__info__['type']), operator_translations[node.operator], self.translate_expr(node.left), self.translate_expr(node.right)) if isinstance(node, Call): callee_name = self.translate_expr(node.callee) return '(*({}))({})'.format( callee_name, ', '.join(map(self.translate_expr, node.arguments))) if isinstance(node, CallArgument): return self.translate_expr(node.value) assert False, 'cannot translate {}'.format(node) def find_function_implementation(node): scope = node.callee.__info__['scope'] while scope is not None: for decl in node.callee.__info__['scope'][node.callee.name]: # When the object denoted by the identifier is a declaration, it # means we have to instantiate that declaration. if isinstance(decl, FunctionDecl): function_type = decl.__info__['type'] # We select the first non-generic function declaration that # that matches the signature candidate of the call node. if function_type == node.__info__['signature_candidate']: return decl assert not function_type.is_generic, 'TODO: {} is generic'.format(function_type) # When the object denoted by the identifier is a type, it means # it's been declared in another module. Hence, we should refer to # the symbol of this other module. else: assert False, 'TODO: {} is declared in another module'.format(node.callee) # Move to the enclosing scope if we couldn't find any match. scope = scope.parent # We should always find at least one valid implementation, unless # something went wrong with the type solver. assert False, 'could not find the implementation of {}'.format(node.callee)
kyouko-taiga/tango
tango/transpilers/cpp.py
Python
apache-2.0
8,789
[ "VisIt" ]
7068fa2b5c19e2d9e6720e3e789bef7c32ee591782622a4835166b69a7cca1d5
#!/usr/bin/env python import argparse from dataclasses import dataclass import os from pathlib import Path import shutil import sys import time import traceback import orjson import numpy as np import psutil from pysisyphus.calculators import XTB from pysisyphus.constants import AU2KJPERMOL from pysisyphus.cos.GrowingNT import GrowingNT from pysisyphus.helpers import geom_loader, do_final_hessian, check_for_end_sign from pysisyphus.helpers_pure import highlight_text, touch from pysisyphus.irc import EulerPC from pysisyphus.optimizers.PreconLBFGS import PreconLBFGS from pysisyphus.tsoptimizers.RSPRFOptimizer import RSPRFOptimizer def dump_trj(geoms, fn): trj = "\n".join([geom.as_xyz() for geom in geoms]) with open(fn, "w") as handle: handle.write(trj) @dataclass class CGNTResult: atoms: tuple init_energy: float gnt_last_image_c3d: np.ndarray gnt_ts_c3d: np.ndarray = None ts_c3d: np.ndarray = None ts_opt_converged: bool = False final_energy: float = None barrier: float = None def run_gnt(id_, geom, bonds, flag="CONVERGED", opt_ts=False, irc=False, force=False): work_dir = Path(id_) def get_fn(fn): return work_dir / fn # Return quick flag_fn = get_fn(flag) if not force and flag_fn.exists(): print(f"@@@ {id_} is already converged!") return True # Else, start over try: shutil.rmtree(work_dir) except FileNotFoundError: pass os.mkdir(work_dir) calc = XTB( pal=psutil.cpu_count(logical=False), quiet=True, gfn=2, out_dir=work_dir, retry_etemp=1000.0, ) geom.set_calculator(calc) # Calculate initial energy and dump initial geometry init_energy = geom.energy with open(get_fn("input.xyz"), "w") as handle: handle.write(geom.as_xyz()) # # Run Growing Newton trajectory # gnt_kwargs = { "step_len": 0.1, "bonds": bonds, "stop_after_ts": True, "rms_thresh": 0.003, } gnt = GrowingNT(geom, **gnt_kwargs) opt_kwargs = { "max_cycles": 1_000, "dump": True, "max_step": 0.1, "out_dir": work_dir, "line_search": "armijo_fg", } print(highlight_text(f"{id_} GNT Optimization")) opt = PreconLBFGS(gnt, **opt_kwargs) try: opt.run() except Exception: exc_info = sys.exc_info() traceback.print_exception(*exc_info) print() # Pick best geometry from GNT. Check if we found a TS (guess). If not # use the latest converged geometry instead. try: last_gnt_geom = gnt.ts_images[0] except IndexError: last_gnt_geom = gnt.images[-1] last_gnt_geom.set_calculator(calc) do_final_hessian( last_gnt_geom, write_imag_modes=True, prefix=f"{id_}_last_gnt_geom", out_dir=work_dir, ) gnt_succeeded = opt.is_converged # Dump all GNT images and all obtained stationary points dump_trj(gnt.images, get_fn(f"{id_}_gnt.trj")) dump_trj(gnt.sp_images, get_fn(f"{id_}_gnt_sps.trj")) # Run actual TS optimization if explicitly requested, or the # previous GNT run failed. But don't run TS optimization if # the GNT never completed its first step. ts_opt = None if opt_ts or (not gnt_succeeded and (len(gnt.images) > 1)): print(highlight_text(f"{id_} (Recovery) TS optimization")) bonds_ = np.array(bonds, dtype=int).reshape(-1, 3) typed_prims = [["BOND", from_, to_] for from_, to_, _ in bonds_] # Start from latest converged point on Newton trajectory ts_guess_geom = last_gnt_geom.copy( coord_type="redund", # coord_kwargs={ # "define_prims": typed_prims, # }, ) ts_guess_geom.set_calculator(calc) ts_opt_kwargs = { "hessian_recalc": 10, "trust_max": 0.5, "overachieve_factor": 3, "prefix": f"{id_}_ts", "out_dir": work_dir, } ts_opt = RSPRFOptimizer(ts_guess_geom, **ts_opt_kwargs) ts_opt.run() ts_geom = ts_opt.geometry # Overwrite GNT optimizer, so we can just return 'opt.is_converged' as below opt = ts_opt elif not gnt_succeeded and (len(gnt.images) == 1): raise Exception("GNT failed in first step!") else: ts_geom = last_gnt_geom # Below this point we expect a 'ts_geom' to be present assert "ts_geom" in locals() ts_geom.set_calculator(geom.calculator) do_final_hessian(ts_geom, write_imag_modes=True, prefix=id_, out_dir=work_dir) final_energy = ts_geom.energy barrier = final_energy - init_energy barrier_j = barrier * AU2KJPERMOL print(f"@@@ {id_}: barrier={barrier_j:.2f} kJ mol⁻¹") if opt.is_converged: # Touch flag touch(flag_fn) try: gnt_ts_c3d = gnt.ts_images[-1].coords3d except IndexError: gnt_ts_c3d = None cgntres_kwargs = { "atoms": geom.atoms, "init_energy": init_energy, "gnt_last_image_c3d": gnt.images[-1].coords3d, "gnt_ts_c3d": gnt_ts_c3d, "final_energy": final_energy, "barrier": barrier, } if ts_opt is not None: cgntres_kwargs.update( ts_c3d=ts_geom.coords3d, ts_opt_converged=ts_opt.is_converged, ) if ts_opt and ts_opt.is_converged and irc: irc_geom = ts_geom.copy(coord_type="cart") irc_geom.set_calculator(calc) irc = EulerPC( irc_geom, out_dir=work_dir, hessian_recalc=10, prefix=id_, hard_rms_grad_thresh=5e-4, ) irc.run() print(f"@@@ {id_}: IRC converged? {irc.converged}") cgntres = CGNTResult(**cgntres_kwargs) with open(get_fn(f"{id_}_dump.json"), "wb") as handle: handle.write( orjson.dumps( cgntres, option=orjson.OPT_SERIALIZE_DATACLASS | orjson.OPT_SERIALIZE_NUMPY, ) ) return opt.is_converged def parse_args(args): parser = argparse.ArgumentParser() parser.add_argument("name") parser.add_argument("geoms") parser.add_argument("bonds", nargs="+", type=int) parser.add_argument("--force", action="store_true") parser.add_argument("--optts", dest="opt_ts", action="store_true") parser.add_argument("--irc", action="store_true") parser.add_argument("--first", default=None, type=int) return parser.parse_args(args) def run(): start = time.time() args = parse_args(sys.argv[1:]) geoms = geom_loader(args.geoms) print(f"@@@ Loaded {len(geoms)} geometries from '{args.geoms}'") if first := args.first: geoms = geoms[:first] print(f"@@@ Using only first {first} geometries.") bonds = np.array(args.bonds, dtype=int).reshape(-1, 3).tolist() conv_num = 0 for i, geom in enumerate(geoms): id_ = f"{i:04d}_{args.name}" try: converged = run_gnt( id_, geom, bonds, opt_ts=args.opt_ts, force=args.force, irc=args.irc ) except Exception: exc_info = sys.exc_info() traceback.print_exception(*exc_info) converged = "error" print(f"@@@ {id_}: converged? {converged}") print("@@@") conv_num += 1 if (converged is True) else 0 if check_for_end_sign(): break print() print(f"@@@ converged: {conv_num}/{len(geoms)}") dur = time.time() - start print(f"@@@ crestnt.py run took {dur/60:.2f} min") if __name__ == "__main__": run()
eljost/pysisyphus
scripts/crest_gnts.py
Python
gpl-3.0
7,690
[ "xTB" ]
db46fa656b4ddbbb084669ac0cb5c39841c0579ded86a73a3aded8158034eec6
# -*- coding: utf-8 -*- """ Acceptance tests for CMS Video Module. """ import os from mock import patch from nose.plugins.attrib import attr from unittest import skipIf from ...pages.studio.auto_auth import AutoAuthPage from ...pages.studio.overview import CourseOutlinePage from ...pages.studio.video.video import VideoComponentPage from ...fixtures.course import CourseFixture, XBlockFixtureDesc from ..helpers import UniqueCourseTest, is_youtube_available, YouTubeStubConfig @skipIf(is_youtube_available() is False, 'YouTube is not available!') class CMSVideoBaseTest(UniqueCourseTest): """ CMS Video Module Base Test Class """ def setUp(self): """ Initialization of pages and course fixture for tests """ super(CMSVideoBaseTest, self).setUp() self.video = VideoComponentPage(self.browser) # This will be initialized later self.unit_page = None self.outline = CourseOutlinePage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) self.course_fixture = CourseFixture( self.course_info['org'], self.course_info['number'], self.course_info['run'], self.course_info['display_name'] ) self.assets = [] self.addCleanup(YouTubeStubConfig.reset) def _create_course_unit(self, youtube_stub_config=None, subtitles=False): """ Create a Studio Video Course Unit and Navigate to it. Arguments: youtube_stub_config (dict) subtitles (bool) """ if youtube_stub_config: YouTubeStubConfig.configure(youtube_stub_config) if subtitles: self.assets.append('subs_3_yD_cEKoCk.srt.sjson') self.navigate_to_course_unit() def _create_video(self): """ Create Xblock Video Component. """ self.video.create_video() video_xblocks = self.video.xblocks() # Total video xblock components count should be equals to 2 # Why 2? One video component is created by default for each test. Please see # test_studio_video_module.py:CMSVideoTest._create_course_unit # And we are creating second video component here. self.assertTrue(video_xblocks == 2) def _install_course_fixture(self): """ Prepare for tests by creating a course with a section, subsection, and unit. Performs the following: Create a course with a section, subsection, and unit Create a user and make that user a course author Log the user into studio """ if self.assets: self.course_fixture.add_asset(self.assets) # Create course with Video component self.course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('video', 'Video') ) ) ) ).install() # Auto login and register the course AutoAuthPage( self.browser, staff=False, username=self.course_fixture.user.get('username'), email=self.course_fixture.user.get('email'), password=self.course_fixture.user.get('password') ).visit() def _navigate_to_course_unit_page(self): """ Open the course from the dashboard and expand the section and subsection and click on the Unit link The end result is the page where the user is editing the newly created unit """ # Visit Course Outline page self.outline.visit() # Visit Unit page self.unit_page = self.outline.section('Test Section').subsection('Test Subsection').expand_subsection().unit( 'Test Unit').go_to() self.video.wait_for_video_component_render() def navigate_to_course_unit(self): """ Install the course with required components and navigate to course unit page """ self._install_course_fixture() self._navigate_to_course_unit_page() def edit_component(self, xblock_index=1): """ Open component Edit Dialog for first component on page. Arguments: xblock_index: number starting from 1 (0th entry is the unit page itself) """ self.unit_page.xblocks[xblock_index].edit() def open_advanced_tab(self): """ Open components advanced tab. """ # The 0th entry is the unit page itself. self.unit_page.xblocks[1].open_advanced_tab() def open_basic_tab(self): """ Open components basic tab. """ # The 0th entry is the unit page itself. self.unit_page.xblocks[1].open_basic_tab() def save_unit_settings(self): """ Save component settings. """ # The 0th entry is the unit page itself. self.unit_page.xblocks[1].save_settings() @attr('shard_4') class CMSVideoTest(CMSVideoBaseTest): """ CMS Video Test Class """ def test_youtube_stub_proxy(self): """ Scenario: YouTube stub server proxies YouTube API correctly Given youtube stub server proxies YouTube API And I have created a Video component Then I can see video button "play" And I click video button "play" Then I can see video button "pause" """ self._create_course_unit(youtube_stub_config={'youtube_api_blocked': False}) self.assertTrue(self.video.is_button_shown('play')) self.video.click_player_button('play') self.video.wait_for_state('playing') self.assertTrue(self.video.is_button_shown('pause')) def test_youtube_stub_blocks_youtube_api(self): """ Scenario: YouTube stub server can block YouTube API Given youtube stub server blocks YouTube API And I have created a Video component Then I do not see video button "play" """ self._create_course_unit(youtube_stub_config={'youtube_api_blocked': True}) self.assertFalse(self.video.is_button_shown('play')) def test_autoplay_is_disabled(self): """ Scenario: Autoplay is disabled in Studio Given I have created a Video component Then when I view the video it does not have autoplay enabled """ self._create_course_unit() self.assertFalse(self.video.is_autoplay_enabled) def test_video_creation_takes_single_click(self): """ Scenario: Creating a video takes a single click And creating a video takes a single click """ self._create_course_unit() # This will create a video by doing a single click and then ensure that video is created self._create_video() def test_captions_hidden_correctly(self): """ Scenario: Captions are hidden correctly Given I have created a Video component with subtitles And I have hidden captions Then when I view the video it does not show the captions """ self._create_course_unit(subtitles=True) self.video.hide_captions() self.assertFalse(self.video.is_captions_visible()) def test_video_controls_shown_correctly(self): """ Scenario: Video controls for all videos show correctly Given I have created two Video components And first is private video When I reload the page Then video controls for all videos are visible """ self._create_course_unit(youtube_stub_config={'youtube_api_private_video': True}) self.video.create_video() # change id of first default video self.edit_component(1) self.open_advanced_tab() self.video.set_field_value('YouTube ID', 'sampleid123') self.save_unit_settings() # again open unit page and check that video controls show for both videos self._navigate_to_course_unit_page() self.assertTrue(self.video.is_controls_visible()) def test_captions_shown_correctly(self): """ Scenario: Captions are shown correctly Given I have created a Video component with subtitles Then when I view the video it does show the captions """ self._create_course_unit(subtitles=True) self.assertTrue(self.video.is_captions_visible()) def test_captions_toggling(self): """ Scenario: Captions are toggled correctly Given I have created a Video component with subtitles And I have toggled captions Then when I view the video it does show the captions """ self._create_course_unit(subtitles=True) self.video.click_player_button('CC') self.assertFalse(self.video.is_captions_visible()) self.video.click_player_button('CC') self.assertTrue(self.video.is_captions_visible()) def test_caption_line_focus(self): """ Scenario: When enter key is pressed on a caption, an outline shows around it Given I have created a Video component with subtitles And Make sure captions are opened Then I focus on first caption line And I see first caption line has focused """ self._create_course_unit(subtitles=True) self.video.show_captions() self.video.focus_caption_line(2) self.assertTrue(self.video.is_caption_line_focused(2)) def test_slider_range_works(self): """ Scenario: When start and end times are specified, a range on slider is shown Given I have created a Video component with subtitles And Make sure captions are closed And I edit the component And I open tab "Advanced" And I set value "00:00:12" to the field "Video Start Time" And I set value "00:00:24" to the field "Video Stop Time" And I save changes And I click video button "play" Then I see a range on slider """ self._create_course_unit(subtitles=True) self.video.hide_captions() self.edit_component() self.open_advanced_tab() self.video.set_field_value('Video Start Time', '00:00:12') self.video.set_field_value('Video Stop Time', '00:00:24') self.save_unit_settings() self.video.click_player_button('play') @attr('a11y') class CMSVideoA11yTest(CMSVideoBaseTest): """ CMS Video Accessibility Test Class """ def setUp(self): browser = os.environ.get('SELENIUM_BROWSER', 'firefox') # the a11y tests run in CI under phantomjs which doesn't # support html5 video or flash player, so the video tests # don't work in it. We still want to be able to run these # tests in CI, so override the browser setting if it is # phantomjs. if browser == 'phantomjs': browser = 'firefox' with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}): super(CMSVideoA11yTest, self).setUp() def test_video_player_a11y(self): # Limit the scope of the audit to the video player only. self.outline.a11y_audit.config.set_scope(include=["div.video"]) self._create_course_unit() self.outline.a11y_audit.check_for_accessibility_errors()
adoosii/edx-platform
common/test/acceptance/tests/video/test_studio_video_module.py
Python
agpl-3.0
11,604
[ "VisIt" ]
9d1597fd77a90b0a27240e696796b46cefae770b3689a12ce5163f83b8e4b071
""" """ import numpy import theano import theano.tensor as T from pythonDnn.layers.logistic_sgd import LogisticRegression from pythonDnn.layers.mlp import HiddenLayer from pythonDnn.layers.rbm import RBM, GBRBM from pythonDnn.models import nnet class DBN(nnet): """Deep Belief Network A deep belief network is obtained by stacking several RBMs on top of each other. The hidden layer of the RBM at layer `i` becomes the input of the RBM at layer `i+1`. The first layer RBM gets as input the input of the network, and the hidden layer of the last RBM represents the output. When used for classification, the DBN is treated as a MLP, by adding a logistic regression layer on top. """ def __init__(self, numpy_rng, theano_rng=None, n_ins=784, hidden_layers_sizes=[500, 500], n_outs=10, first_layer_gb = True,pretrainedLayers=None,activation=T.nnet.sigmoid): """This class is made to support a variable number of layers. :type numpy_rng: numpy.random.RandomState :param numpy_rng: numpy random number generator used to draw initial weights :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams :param theano_rng: Theano random generator; if None is given one is generated based on a seed drawn from `rng` :type n_ins: int :param n_ins: dimension of the input to the DBN :type hidden_layers_sizes: list of ints :param hidden_layers_sizes: intermediate layers size, must contain at least one value :type n_outs: int :param n_outs: dimension of the output of the network :type first_layer_gb: bool :param first_layer_gb: wether first layer is gausian-bernolli or bernolli-bernolli """ super(DBN, self).__init__() self.layers = [] self.rbm_layers = [] self.n_layers = len(hidden_layers_sizes) if pretrainedLayers == None: self.nPreTrainLayers = n_layers else : self.nPreTrainLayers = pretrainedLayers assert self.n_layers > 0 if not theano_rng: theano_rng = RandomStreams(numpy_rng.randint(2 ** 30)) # allocate symbolic variables for the data self.x = T.matrix('x') # the data is presented as rasterized images self.y = T.ivector('y') # the labels are presented as 1D vector # of [int] labels # The DBN is an MLP, for which all weights of intermediate # layers are shared with a different RBM. We will first # construct the DBN as a deep multilayer perceptron, and when # constructing each sigmoidal layer we also construct an RBM # that shares weights with that layer. During pretraining we # will train these RBMs (which will lead to chainging the # weights of the MLP as well) During finetuning we will finish # training the DBN by doing stochastic gradient descent on the # MLP. for i in xrange(self.n_layers): # construct the sigmoidal layer # the size of the input is either the number of hidden # units of the layer below or the input size if we are on # the first layer # the input to this layer is either the activation of the # hidden layer below or the input of the DBN if you are on # the first layer if i == 0: input_size = n_ins layer_input = self.x else: input_size = hidden_layers_sizes[i - 1] layer_input = self.layers[-1].output sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input, n_in=input_size, n_out=hidden_layers_sizes[i], activation=activation) # add the layer to our list of layers self.layers.append(sigmoid_layer) # the parameters of the sigmoid_layers are parameters of the DBN. # The visible biases in the RBM are parameters of those RBMs, # but not of the DBN. self.params.extend(sigmoid_layer.params) self.delta_params.extend(sigmoid_layer.delta_params) # Construct an RBM that shared weights with this layer # the first layer could be Gaussian-Bernoulli RBM # other layers are Bernoulli-Bernoulli RBMs if i == 0 and first_layer_gb: rbm_layer = GBRBM(numpy_rng=numpy_rng, theano_rng=theano_rng, input=layer_input, n_visible=input_size, n_hidden=hidden_layers_sizes[i], W=sigmoid_layer.W, hbias=sigmoid_layer.b, activation=activation) else: rbm_layer = RBM(numpy_rng=numpy_rng, theano_rng=theano_rng, input=layer_input, n_visible=input_size, n_hidden=hidden_layers_sizes[i], W=sigmoid_layer.W, hbias=sigmoid_layer.b, activation=activation) self.rbm_layers.append(rbm_layer) # We now need to add a logistic layer on top of the MLP self.logLayer = LogisticRegression( input=self.layers[-1].output, n_in=hidden_layers_sizes[-1], n_out=n_outs) self.layers.append(self.logLayer) self.params.extend(self.logLayer.params) self.delta_params.extend(self.logLayer.delta_params) # compute the cost for second phase of training, defined as the # negative log likelihood of the logistic regression (output) layer self.finetune_cost = self.logLayer.negative_log_likelihood(self.y) # compute the gradients with respect to the model parameters # symbolic variable that points to the number of errors made on the # minibatch given by self.x and self.y self.errors = self.logLayer.errors(self.y) self.output = self.logLayer.prediction(); self.features = self.layers[-2].output; self.features_dim = self.layers[-2].n_out def pretraining_functions(self, train_set_x, batch_size, weight_cost): '''Generates a list of functions, for performing one step of gradient descent at a given layer. The function will require as input the minibatch index, and to train an RBM you just need to iterate, calling the corresponding function on all minibatch indexes. :type train_set_x: theano.tensor.TensorType :param train_set_x: Shared var. that contains all datapoints used for training the RBM :type batch_size: int :param batch_size: size of a [mini]batch :param weight_cost: weigth cost ''' # index to a [mini]batch index = T.lscalar('index') # index to a minibatch momentum = T.scalar('momentum') learning_rate = T.scalar('lr') # learning rate to use # number of batches n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size # begining of a batch, given `index` batch_begin = index * batch_size # ending of a batch given `index` batch_end = batch_begin + batch_size pretrain_fns = [] for rbm in self.rbm_layers: # get the cost and the updates list # using CD-k here (persisent=None,k=1) for training each RBM. r_cost, fe_cost, updates = rbm.get_cost_updates(batch_size, learning_rate, momentum, weight_cost) # compile the theano function fn = theano.function(inputs=[index, theano.Param(learning_rate, default=0.0001), theano.Param(momentum, default=0.5)], outputs= [r_cost, fe_cost], updates=updates, givens={self.x: train_set_x[batch_begin:batch_end]}) # append function to the list of functions pretrain_fns.append(fn) return pretrain_fns
IITM-DONLAB/python-dnn
src/pythonDnn/models/dbn.py
Python
apache-2.0
8,684
[ "Gaussian" ]
a56e200639f49f3b92d841d01f862ca13e2b81ffb161539c59a5f28f223ed231
#!/usr/bin/env python #Schedule is Call, Long, Short, MVSC, Vacation import numpy as np import itertools import random import sys import math import logging def gen_permutations(n, mvsc_list): #Generate all possible combinations for a week's schedule p = itertools.permutations(list(range(1,n+1))) temp = np.array([list(w) for w in p if w[3] in mvsc_list]) return temp def cost(schedule_cand): tot_cost = 0 for i in range(1, len(schedule_cand)): if (np.all(schedule_cand[i])): if (schedule_cand[i][3] == schedule_cand[i-1][3]): #Cost to be at MVSC consecutive weeks tot_cost += 7 if (schedule_cand[i][1] == schedule_cand[i-1][1]): #Cost to be long consecutive weeks tot_cost += 4 if (i > 1 and schedule_cand[i][0] == schedule_cand[i-2][0]): #Cost to be on call 2 times in 3 weeks tot_cost += 8 if (schedule_cand[i][0] == schedule_cand[i-1][3]): #Cost to be call following MVSC tot_cost += 1 if (schedule_cand[i][4] == schedule_cand[i-1][0] or schedule_cand[i][4] == schedule_cand[i-1][2]): #Cost reduction to be on call or early prior to vacation tot_cost -= 1 tot_cost += 1 return tot_cost def is_schedule_valid(schedule_cand, target_num): #Check if valid schedule (approx equal number of MVSC, call, and short weeks) div_factor = target_num / 53. for i in range(1,6): if (schedule_cand[1:target_num,0]==i).sum() > math.ceil(12 * div_factor): #Equal number of call weeks return False if (schedule_cand[1:target_num,2]==i).sum() > math.ceil(12 * div_factor): #Equal number of early weeks return False if (schedule_cand[1:target_num,3]==i).sum() > math.ceil(18 * div_factor): #Equal number of MVSC weeks for Brian, Jason, Dick return False return True def are_shifts_equal(schedule_cand, target_num): sum_shifts = np.zeros((3,5)) for i in range(1,6): sum_shifts[0,i-1] = (schedule_cand[1:target_num,0]==i).sum() sum_shifts[1,i-1] = (schedule_cand[1:target_num,2]==i).sum() sum_shifts[2,i-1] = (schedule_cand[1:target_num,3]==i).sum() for i in range(3): b = np.unique(sum_shifts[i,:]) if (i == 0 and b[-1] - b[0] > 1): #equal call (within 1) #print 'failed for equal call' return False if (i == 1 and b[-1] - b[0] > 2): #equal early (within 2) #print 'failed for equal early' return False if (i == 2 and b[-1] - b[1] > 1): #equal MVSC (within 1) #print 'failed for equal mvsc' return False return True def get_cands(week, schedule_cand): cands = list() filled = list(itertools.chain.from_iterable(np.nonzero(master_cand[week]))) for perm in perm_list: truth_array = (master_cand[week]==perm) if (np.array([truth_array[f] for f in filled]).all()): if (schedule_cand[week-1][4] != perm[0] and schedule_cand[week-1][0] != perm[0]): #No call following vacation or back to back call cands.append(perm) return cands def no_prune(schedule_cand, week_num): if (best_cost < 9999): cs = cost(schedule_cand) if (cs > best_cost or cs > overall_best_cost): #print 'failed for cost of %d, week %d' % (cs, week_num) return False if (week_num > 10): for i in range(1,6): if (schedule_cand[week_num-8:week_num,0]==i).sum() < 1: #at least 1 call every 8 weeks #print 'failed call' return False if ((schedule_cand[week_num-8:week_num,2]==i).sum()) < 1: #at least 1 early every 8 #print 'failed regina/early' return False if i < 3: if (schedule_cand[week_num-6:week_num,3]==i).sum() < 1: #at least 1 mvsc every 5 weeks #print 'failed mvsc' return False #print 'passed prune week %d' % week_num return True def gen_schedules(week_num, schedule_cand, target_num): #Generate schedule candidates using DFS global schedules global best_cost global overall_best_cost if (week_num == target_num): if (are_shifts_equal(schedule_cand, target_num)): cs = cost(schedule_cand) if (cs < best_cost and cs < overall_best_cost): print 'week_num %d, cost %d' % (week_num, cs) if (target_num == 53): schedules.append(np.array(schedule_cand)) overall_best_cost = cs best_cost = cs print schedule_cand logging.info('candidate found, cost %d', overall_best_cost) else: temp = cs best_cost = temp + (2 * target_num) logging.info('week %d, cost %d', week_num, cs) print schedule_cand gen_schedules(target_num, schedule_cand, target_num + 13) best_cost = temp return weeks_cand = get_cands(week_num, schedule_cand) temp = np.array(schedule_cand[week_num]) for cand in random.sample(weeks_cand, len(weeks_cand)): schedule_cand[week_num] = cand if (is_schedule_valid(schedule_cand, target_num) and no_prune(schedule_cand, week_num)): gen_schedules(week_num + 1, schedule_cand, target_num) schedule_cand[week_num] = temp return def read_input(filename): schedule_cand = np.genfromtxt(filename, delimiter=',') assert schedule_cand.shape==(53,5) return schedule_cand def save_result(filename): np.savetxt('schedules/' + filename, schedules[-1], delimiter=',') logging.info('saved schedule to schedules/%s', filename) def run_schedule(s_filename, savename): global schedules, perm_list, best_cost, overall_best_cost, master_cand logging.basicConfig(filename='log/' + savename + '.log', format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO, mode='w') sys.setrecursionlimit(1000) overall_best_cost = 9999 schedules = list() perm_list = gen_permutations(5, [1, 2, 3]) best_cost = 25 schedule_cand = read_input(s_filename) master_cand = schedule_cand.copy() print master_cand logging.info('beginning schedule generation') gen_schedules(1, schedule_cand, 14) logging.info('finished schedule generation, best cost %d', cost(schedules[-1])) save_result(savename) if __name__ == "__main__": try: s_filename = sys.argv[1] savename = sys.argv[2] except: print'Please supply the name of the input file and output file' sys.exit() person_dict = {1:'Jason', 2:'Brian', 3:'Dick', 4:'Mark', 5:'Tim'} position_dict = {0:'Call', 1:'Long', 2:'Short', 3:'MVSC', 4:'Vacation'} run_schedule(s_filename, savename)
jbolinge/scheduler
scheduler.py
Python
lgpl-3.0
6,098
[ "Brian" ]
58adb3d69f0f12d22ffa7993cd331d04f0af75beb56153a5b4f26c66d6153546
#!/usr/bin/env python3 """ Smooth one or more labels within an integer atlas volume Usage ---- smooth_labels.py -i <input label image> -o <output label image> [label numbers] smooth_labels.py -h Example ---- >>> smooth_labels.py -i atlas.nii.gz -o atlas_smooth_5.nii.gz 5 10 11 Authors ---- Mike Tyszka, Caltech Brain Imaging Center Dates ---- 2015-04-07 JMT From scratch 2015-12-08 JMT Update command line arguments and port to python 3 License ---- This file is part of atlaskit. atlaskit is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. atlaskit is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with atlaskit. If not, see <http://www.gnu.org/licenses/>. Copyright ---- 2015 California Institute of Technology. """ __version__ = '0.1.0' import sys import argparse from scipy.ndimage.filters import gaussian_filter import nibabel as nib def main(): # Parse command line arguments parser = argparse.ArgumentParser(description='Smooth one or more atlas labels') parser.add_argument('-i','--in_file', help="source atlas labels filename") parser.add_argument('-o','--out_file', help="smoothed atlas labels filename") parser.add_argument('labels', metavar='label', type=int, nargs='+', help='label numbers to smooth') args = parser.parse_args() in_file = args.in_file out_file = args.out_file labels = args.labels # Load the source atlas image print('Opening %s' % in_file) in_nii = nib.load(in_file) # Load label image print('Loading labels') src_labels = in_nii.get_data() # Duplicate into output image print('Creating new label image') out_labels = src_labels.copy() for label in labels: print(' Smoothing label %d' % label) # Extract target label as a boolean mask print(' Identifying target label region %d' % label) label_mask = (src_labels == label) # Smooth target label region print(' Gaussian smoothing original target label') label_mask_smooth = gaussian_filter(label_mask.astype(float), sigma=1.0) # Normalize smoothed intensities label_mask_smooth = label_mask_smooth / label_mask_smooth.max() # Threshold smoothed mask at 0.5 to create new boolean mask print(' Thresholding smoothed label') label_mask_smooth = label_mask_smooth > 0.5 # Replace unsmoothed with smoothed label, overwriting other labels print(' Inserting smoothed target label') out_labels[label_mask] = 0 out_labels[label_mask_smooth] = label # Save smoothed labels image print('Saving smoothed labels to %s' % out_file) out_nii = nib.Nifti1Image(out_labels, in_nii.affine) out_nii.to_filename(out_file) print('Done') # Clean exit sys.exit(0) # This is the standard boilerplate that calls the main() function. if __name__ == '__main__': main()
jmtyszka/atlaskit
smooth_labels.py
Python
gpl-3.0
3,435
[ "Gaussian" ]
e9c654ec9c7b2bc6248f3be708a62af0b32bba9f46a7b8e72e086d0d5b9c366e
import requests from lxml import html import datetime import inflect from docx import Document from docx.shared import Inches from PyQt4 import QtGui, uic from PyQt4.QtGui import QTableWidgetItem, QApplication, QColor, QIcon, QPushButton, QHBoxLayout, QAction from PyQt4.QtCore import QDate, QTimer, QSize, Qt import sys # Start a new session. session_requests = requests.session() # Create some global URL variables. login_url = "https://mindovermathtutoring.teachworks.com/accounts/login" dashboard_url = "https://mindovermathtutoring.teachworks.com/dashboard" participants_url = "https://mindovermathtutoring.teachworks.com/participants" # Find the main UI elements. if hasattr(sys, '_MEIPASS'): ui_login_path = os.path.join(sys._MEIPASS, "ui/login_window.ui") ui_main_path = os.path.join(sys._MEIPASS, "ui/main_window.ui") else: ui_login_path = "ui/login_window.ui" ui_main_path = "ui/main_window.ui" # Load the main UI elements. Ui_LoginWindow, QLoginWindow = uic.loadUiType(ui_login_path) Ui_MainWindow, QMainWindow = uic.loadUiType(ui_main_path) class Lesson(object): def __init__(self, lesson_url, date, notes, topics): # Initialize the lesson with certain attributed. self.lesson_url = lesson_url self.date = date.strftime("%D") self.notes = notes self.topics = topics class Student(object): def __init__(self, name, subject, tutor): # Initialize the student with certain attributes. self.name = name self.first_name = str.split(name)[0] self.last_name = str.split(name)[-1] self.subject = subject self.tutor = tutor self.lessons = [] self.notes = [] self.topics = [] def __str__(self): # Format student's info as a string. st = "---------------\n" st += self.name + "\n" st += self.subject + "\n" st += "---------------\n" for i in range(len(self.lessons)): lesson = self.lessons[i] st += "Lesson " + lesson.code + "\n" st += lesson.date.isoformat() + "\n" st += lesson.notes + "\n" st += "Topics: " for topic in self.topics: st += topic + "; " st += "\n\n" # Return the string. return st def add_lesson(self, lesson): # Add a lesson to the student current list of lessons. self.lessons.append(lesson) def get_topics(self): # Grab all of the topics from the student's lessons. topics = [] for lesson in self.lessons: topics.extend(lesson.topics) # Return a list of topics return topics class RemovalButtonWidget(QtGui.QWidget): def __init__(self, parent, row): # Initialize the button with certain attributes. self.parent = parent self.row = row super(RemovalButtonWidget, self).__init__(parent) # Create the button and its icon. self.button = QtGui.QPushButton() self.button.setIcon(QIcon("gfx/x.png")) self.button.clicked.connect(self.remove_row) # Make sure the button is centered. layout = QtGui.QHBoxLayout() layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(0) layout.addWidget(self.button) self.setLayout(layout) def set_row(self, row): # Set the number of a row. self.row = row def remove_row(self): # Delete a row from the table. row = self.row self.parent.removeRow(row) self.update_rows() def update_rows(self): # Re-number all of the rows after a deletion takes place. nrows = self.parent.rowCount() for i in range(nrows): self.parent.cellWidget(i, 0).set_row(i) self.parent.setItem(i, 1, QtGui.QTableWidgetItem(str(i+1))) class LoginWindow(QLoginWindow, Ui_LoginWindow): def __init__(self, ): # Initialize the login window. super(LoginWindow, self).__init__() self.setupUi(self) self.setWindowTitle("Mind Over Math Login") # Save email and password for later? self.email_field.setText("") self.password_field.setText("") # Activate upon login. self.login_button.clicked.connect(self.login_button_clicked) def login_button_clicked(self): # Get login information from user. email = self.email_field.text() password = self.password_field.text() # Open the login page. response = session_requests.get(login_url) # Grab hidden inputs. tree = html.fromstring(response.text) hidden_inputs = tree.xpath(r"//form//input[@type='hidden']") form = {x.attrib["name"]: x.attrib["value"] for x in hidden_inputs} # Enter username and password. form["user[email]"] = email form["user[password]"] = password # POST the login request. response = session_requests.post(login_url, data=form) if response.url == dashboard_url: self.error_label.setText("Login successful!") self.main_window = MainWindow() self.main_window.show() self.close() else: self.error_label.setText("Invalid email/password.") class MainWindow(QMainWindow, Ui_MainWindow): def __init__(self, ): # Initialize the main window. super(MainWindow, self).__init__() self.setupUi(self) self.setWindowTitle("Saga") # Set the column widths. self.student_table.setColumnWidth(0, 25) self.student_table.setColumnWidth(1, 25) self.student_table.setColumnWidth(2, 200) self.student_table.setColumnWidth(3, 200) self.student_table.setColumnWidth(4, 75) # Determine an appropriate date range for the progress reports. today = datetime.date.today() if today.day < 7: first = today.replace(day=1) last_month = first - datetime.timedelta(days=1) self.start_date_edit.setDate(QDate(last_month.year, last_month.month, 1)) self.end_date_edit.setDate(QDate(last_month.year, last_month.month, last_month.day)) self.names_ak_radio_button.toggle() elif today.day > 24: first = today.replace(day=1) next_month = first.replace(day=28) + datetime.timedelta(days=4) last_day = next_month - datetime.timedelta(days=next_month.day) self.start_date_edit.setDate(QDate(first.year, first.month, 1)) self.end_date_edit.setDate(QDate(last_day.year, last_day.month, last_day.day)) self.names_ak_radio_button.toggle() else: first = today.replace(day=1) last_month = first - datetime.timedelta(days=1) self.start_date_edit.setDate(QDate(last_month.year, last_month.month, 15)) self.end_date_edit.setDate(QDate(today.year, today.month, 14)) self.names_lz_radio_button.toggle() # Activate if something in the File menu was clicked. self.actionNew.triggered.connect(self.file_new) self.actionOpen.triggered.connect(self.file_open) self.actionSave.triggered.connect(self.file_save) # Activate if one of the GUI buttons was clicked. self.scrape_info_button.clicked.connect(self.scrape_info_button_clicked) self.generate_reports_button.clicked.connect(self.generate_reports_button_clicked) def file_new(self): # Remove all students from the table. self.student_table.setRowCount(0) def file_open(self): # Open a .saga file. print("Open triggered.") def file_save(self): # Save the table as a .saga file. print("Save triggered.") def scrape_info_button_clicked(self): # Reset the main GUI elements. self.progress_bar.setTextVisible(True) self.scrape_info_button.setEnabled(False) self.generate_reports_button.setEnabled(False) self.student_table.setRowCount(0) # Grab the user-selected ranges for dates and last names. start_date = self.start_date_edit.date().toPyDate() end_date = self.end_date_edit.date().toPyDate() valid_names = "" try: valid_names = self.valid_names_button_group.checkedButton().text() except AttributeError: self.error_label.setText("Please specify last names.") if valid_names != "": # Count the number of lessons and initiate the progress bar. num_lessons = self.count_lessons(start_date, end_date, valid_names) self.progress_bar.setMinimum(1) self.progress_bar.setMaximum(num_lessons) # Scrape the requested student info, and return a list of students. students = self.scrape_info(start_date, end_date, valid_names) for i in range(len(students)): # Grab each student. s = students[i] # Order the topics chronologically. topics = s.get_topics() topics.reverse() # Insert a new row in the table for each student. row_position = self.student_table.rowCount() self.student_table.insertRow(row_position) # Populate the row with relevant student info. self.student_table.setCellWidget(i, 0, RemovalButtonWidget(self.student_table, i)) self.student_table.setItem(i, 1, QtGui.QTableWidgetItem(str(i+1))) self.student_table.setItem(i, 2, QtGui.QTableWidgetItem(s.name)) self.student_table.setItem(i, 3, QtGui.QTableWidgetItem(s.subject)) self.student_table.setItem(i, 4, QtGui.QTableWidgetItem(str(len(s.lessons)))) self.student_table.setItem(i, 5, QtGui.QTableWidgetItem("\n".join(topics))) # Make sure the row fits inside the table. self.student_table.resizeRowToContents(row_position) # Reactivate the main GUI buttons. self.scrape_info_button.setEnabled(True) self.generate_reports_button.setEnabled(True) def count_lessons(self, start_date, end_date, valid_names): num_lessons = 0 page = 0 cont = True while cont: # Visit the next page of lessons. page += 1 response = session_requests.get(participants_url + "?page=" + str(page)) tree = html.fromstring(response.content) # If the last page is reached, break out of the loop. if tree.xpath("//div[@id='participants']/div/table/tbody/tr[1]/td[1]/text()")[0] == "No results found.": break else: # Scrape relevant information. dates = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[2]/text()") names = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[6]/text()") statuses = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[8]/text()") for j in range(len(dates)): date = datetime.date(int(dates[j].split("/")[2]), int(dates[j].split("/")[0]), int(dates[j].split("/")[1])) name = names[j] status = statuses[j] # Skip this entry if the date is outside of the acceptable range of values. if date < start_date: cont = False break if date > end_date: continue # Skip this entry if the last name is outside of the acceptable range of values. letter = str.upper(name.split()[-1][0]) if (valid_names == "A-K" and letter > "K") or (valid_names == "L-Z" and letter < "L"): continue # Add one lesson to the running total. if status == "Attended": num_lessons += 1 # Return the total number of lessons. return num_lessons def scrape_info(self, start_date, end_date, valid_names): students = [] student_names = [] num_lessons = 0 page = 0 cont = True while cont: page += 1 # Visit the next page of lessons. response = session_requests.get(participants_url + "?page=" + str(page)) tree = html.fromstring(response.content) # If the last page is reached, break out of the loop. if tree.xpath("//div[@id='participants']/div/table/tbody/tr[1]/td[1]/text()")[0] == "No results found.": break else: # Scrape relevant information. dates = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[2]/text()") lessons = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[4]/a/@href") subjects = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[4]/a/text()") tutors = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[5]/text()") names = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[6]/text()") statuses = tree.xpath("//div[@id='participants']/div/table/tbody/tr/td[8]/text()") for j in range(len(lessons)): lesson_url = str.split(lessons[j], "/")[-1] subject = str.split(subjects[j], "-")[-1].strip() tutor = tutors[j] name = names[j] status = statuses[j] # Skip this entry if the date is outside of the acceptable range of values. date = datetime.date(int(dates[j].split("/")[2]), int(dates[j].split("/")[0]), int(dates[j].split("/")[1])) if date < start_date: cont = False break if date > end_date: continue # Skip this entry if the last name is outside of the acceptable range of values. letter = str.upper(name.split()[-1][0]) if (valid_names == "A-K" and letter > "K") or (valid_names == "L-Z" and letter < "L"): continue # Either create a new student or add a lesson to an existing student. if status == "Attended": num_lessons += 1 student = None if name not in student_names: student = Student(name, subject, tutor) student_names.append(name) students.append(student) else: for s in students: if s.name == name: student = s break # Visit the lesson page and scrape the internal notes. response = session_requests.get(participants_url + "/" + lesson_url) tree = html.fromstring(response.content) notes_array = tree.xpath("//div[@class='row participant-notes']/div[2]/span/text()") notes = "\n".join(notes_array) # If there are topics listed, parse them out. notes_split = notes.split("Topics:") topics = [] if len(notes_split) > 1: topics = notes_split[-1].split(";") topics = [topic.strip() for topic in topics] # Add the new lesson for this student. lesson = Lesson(lesson_url, date, notes, topics) student.add_lesson(lesson) # Update the progress bar. self.progress_bar.setValue(num_lessons) QApplication.processEvents() # Reset the progress bar. self.progress_bar.reset() self.progress_bar.setTextVisible(False) # Sort the list of students by last name. students.sort(key=lambda x: x.last_name) # Return the list of students. return students def generate_reports_button_clicked(self): # Create a new Word document. document = Document() # Initialize the progress bar. nrows = self.student_table.rowCount() self.progress_bar.setMinimum(1) self.progress_bar.setMaximum(nrows) self.progress_bar.setTextVisible(True) for row in range(nrows): # Grab relevant information from each row in the table. full_name = self.student_table.item(row, 2).text() subject = self.student_table.item(row, 3).text() num = self.student_table.item(row, 4).text() topics = self.student_table.item(row, 5).text().splitlines() # Parse some additional relevant information. #################### ### UPDATE THIS! ### #################### first_name = full_name.split(" ")[0] tutor = "Troy Kling" start_date = self.start_date_edit.date() end_date = self.end_date_edit.date() valid_names = "" try: valid_names = self.valid_names_button_group.checkedButton().text() except AttributeError: self.error_label.setText("Please specify last names.") # Write the intro paragraph for each student. p = document.add_paragraph("") p.add_run(full_name + " - " + subject).underline = True p.add_run("\n") p.add_run( "From " + start_date.toString("MMMM d") + " to " + end_date.toString("MMMM d") + ", " + first_name + " attended " + inflect.engine().number_to_words(num) + " tutoring sessions for " + subject + ". " + tutor.split()[0].strip() + " and " + first_name + " were able to cover the following topics:") # Write the bullet list of topics for each student. for topic in topics: document.add_paragraph(topic, style="List Bullet").paragraph_format.left_indent = Inches(.5) # Write a filler paragraph at the end (to be manually completed by the tutor). document.add_paragraph( "Paragraph detailing: (1) improvements, breakthroughs; (2) struggles, solutions; (3) test grades, " + "positive/negative, plan of action; (4) concerns about student; (5) goals for future sessions. " + "Please let us know if you have any questions about " + first_name + "'s progress.\n") # Update the progress bar. self.progress_bar.setValue(row) QApplication.processEvents() # Add a page break every couple students. if row % 2 == 1: document.add_page_break() # Save the progress reports as a Word document. document.save(start_date.toString("MMMM yyyy") + " (" + valid_names + ")" + " Progress Reports " + tutor.split()[0].strip()[0] + " " + tutor.split()[-1].strip() + ".docx") # Reset the progress bar. self.progress_bar.reset() self.progress_bar.setTextVisible(False) def closeEvent(self, event): # Close the application nicely. self.deleteLater() if __name__ == "__main__": # Start the application. app = QtGui.QApplication(sys.argv) # Set icons of various sizes. app_icon = QtGui.QIcon() app_icon.addFile("gfx/saga16.png", QSize(16, 16)) app_icon.addFile("gfx/saga24.png", QSize(24, 24)) app_icon.addFile("gfx/saga32.png", QSize(32, 32)) app_icon.addFile("gfx/saga64.png", QSize(64, 64)) app_icon.addFile("gfx/saga256.png", QSize(256, 256)) app.setWindowIcon(app_icon) # Open the login window. login_window = LoginWindow() login_window.show() # Terminate when finished. sys.exit(app.exec_())
CoronalRain/Saga
saga.py
Python
gpl-3.0
20,417
[ "VisIt" ]
f1346c8eeb2694d9a3cb28ffd90d3cc9bb1722250df290e57a0efad008925346
# -*- coding: utf-8 -*- """ General class for parsing different DEMs Created on Mon Sep 10 14:43:26 2012 @author: mrayson """ import numpy as np from netCDF4 import Dataset from scipy import spatial import matplotlib.pyplot as plt from interpXYZ import tile_vector import time import shutil import gdal from gdalconst import * import pdb class DEM(object): """ General DEM class """ W = 1.0 # Weight maxdist = 250.0 def __init__(self,infile,**kwargs): self.infile=infile self.__dict__.update(kwargs) if self.infile[-3:]=='.nc': xgrd,ygrd,self.Z = self.loadnc() elif self.infile[-3:] in ['dem','asc']: xgrd,ygrd,self.Z = self.readraster() # Generate the grid self.x0 = xgrd.min() self.y0 = ygrd.min() self.x1 = xgrd.max() self.y1 = ygrd.max() self.dx= xgrd[1]-xgrd[0] self.dy= ygrd[1]-ygrd[0] # self.nx = len(xgrd) self.ny = len(ygrd) self.npts = self.nx*self.ny # self.X,self.Y = np.meshgrid(xgrd,ygrd) def loadnc(self): """ Load the DEM data from a netcdf file""" nc = Dataset(self.infile, 'r') #print nc.variables.keys() try: X = nc.variables['X'][:] Y = nc.variables['Y'][:] Z = nc.variables['topo'][:] except: try: X = nc.variables['x'][:] Y = nc.variables['x'][:] Z = nc.variables['z'][:] except: X = nc.variables['lon'][:] Y = nc.variables['lat'][:] Z = nc.variables['topo'][:] nc.close() return X,Y,Z def readraster(self): """ Loads the data from a DEM raster file""" # register all of the drivers gdal.AllRegister() # open the image ds = gdal.Open(self.infile, GA_ReadOnly) # Read the x and y coordinates cols = ds.RasterXSize rows = ds.RasterYSize bands = ds.RasterCount geotransform = ds.GetGeoTransform() originX = geotransform[0] originY = geotransform[3] pixelWidth = geotransform[1] pixelHeight = geotransform[5] x = originX + np.linspace(0,cols-1,cols)*pixelWidth y = originY + np.linspace(0,rows-1,rows)*pixelHeight # Read the actual data data = ds.ReadAsArray(0,0,cols,rows) # Remove missing points data[data==-32767]=np.nan return x, y, data def ravel(self): """ Returns the grid coordinates as a vector""" return np.concatenate( (np.reshape(np.ravel(self.X),(self.npts,1)),\ np.reshape(np.ravel(self.Y),(self.npts,1))),axis=1) def nanxy(self): """ Returns the x,y locations of the nan points """ ind = np.isnan(self.Z) nc = np.sum(ind) xy = np.zeros((nc,2)) n = -1 for jj in range(0,self.ny): for ii in range(0,self.nx): if ind[jj,ii]: n+=1 xy[n,0]=self.X[jj,ii] xy[n,1]=self.Y[jj,ii] return xy # ind = np.isnan(np.ravel(self.Z)) # nc = np.sum(ind) # # x=np.ravel(self.X) # y=np.ravel(self.Y) # # return np.concatenate((np.reshape(x[ind],(nc,1)),np.reshape(y[ind],(nc,1))),axis=1) def nonnanxy(self): """ Returns the x,y locations of the non-nan points """ ind = np.isnan(self.Z) ind = ind==False nc = np.sum(ind) xy = np.zeros((nc,2)) n = -1 for jj in range(0,self.ny): for ii in range(0,self.nx): if ind[jj,ii]: n+=1 xy[n,0]=self.X[jj,ii] xy[n,1]=self.Y[jj,ii] return xy # ind = np.isnan(np.ravel(self.Z)) # ind = ind==False # nc = np.sum(ind) # print nc # x=np.ravel(self.X) # y=np.ravel(self.Y) # # return np.concatenate((np.reshape(x[ind],(nc,1)),np.reshape(y[ind],(nc,1))),axis=1) def returnij(self,x,y): """def contourf(self,Z,vv=range(-10,0),**kwargs): fig= plt.figure(figsize=(9,8)) plt.contourf(self.X,self.Y,Z,vv,**kwargs) plt.colorbar() plt.axis('equal') return fig Returns the grid cell indices that points x,y reside inside of. """ I = np.ceil( (x-self.x0)/self.dx) J =np.ceil( (y-self.y0)/self.dy) J = np.array(J,dtype=int) I = np.array(I,dtype=int) # blank out bad cells J[J<0]=-1 J[J>self.ny-1]=-1 I[I<0]=-1 I[I>self.nx-1]=-1 return J,I def calcWeight(self): """ Calculate the weight at each point """ MAXPOINTS=20e6 weight = np.zeros((self.ny,self.nx)) # Calculate the distance from each point to a nan point xy = self.nonnanxy() xynan = self.nanxy() # Compute the spatial tree kd = spatial.cKDTree(xynan) nxy = len(xy) if nxy <= MAXPOINTS: # Perform query on all of the points in the grid dist,ind=kd.query(xy) # Compute the actual weight w = dist/self.maxdist w[dist>self.maxdist]=1.0 w=self.W*w # Map onto the grid J,I=self.returnij(xy[:,0],xy[:,1]) weight[J,I]=w else: print 'Dataset too large - calculating weights for chunks...' nchunks = np.ceil(len(xy)/MAXPOINTS) pt1,pt2=tile_vector(len(xy),int(nchunks)) for p1,p2 in zip(pt1,pt2): print 'Calculating points %d to %d of %d...'%(p1,p2,nxy) dist,ind=kd.query(xy[p1:p2,:]) # Compute the actual weight w = dist/self.maxdist w[dist>self.maxdist]=1.0 w=self.W*w # Map onto the grid J,I=self.returnij(xy[p1:p2,0],xy[p1:p2,1]) weight[J,I]=w return weight def contourf(self,Z,vv=range(-10,0),**kwargs): fig= plt.figure(figsize=(9,8)) plt.contourf(self.X,self.Y,Z,vv,**kwargs) plt.colorbar() plt.hold(True) plt.contour(self.X,self.Y,Z,[0.0,0.0],colors='k',linewidths=0.02) plt.axis('equal') return fig def contour(self,Z,vv=range(-10,0),**kwargs): fig= plt.figure(figsize=(9,8)) C = plt.contour(self.X,self.Y,Z,vv,colors='k',linestyles='-') plt.axis('equal') return fig,C def plot(self,Z,**kwargs): h= plt.figure(figsize=(9,8)) #h.imshow(np.flipud(self.Z),extent=[bbox[0],bbox[1],bbox[3],bbox[2]]) plt.imshow(Z,extent=[self.x0,self.x1,self.y0,self.y1],**kwargs) plt.colorbar() return h def savenc(self,outfile='DEM.nc'): """ Saves the DEM to a netcdf file""" # Create the global attributes globalatts = {'title':'DEM model',\ 'history':'Created on '+time.ctime(),\ 'Input dataset':self.infile} nc = Dataset(outfile, 'w', format='NETCDF4') # Write the global attributes for gg in globalatts.keys(): nc.setncattr(gg,globalatts[gg]) # Create the dimensions dimnamex = 'nx' dimlength = self.nx nc.createDimension(dimnamex,dimlength) dimnamey = 'ny' dimlength = self.ny nc.createDimension(dimnamey,dimlength) # Create the lat lon variables tmpvarx=nc.createVariable('X','f8',(dimnamex,)) tmpvary=nc.createVariable('Y','f8',(dimnamey,)) tmpvarx[:] = self.X[0,:] tmpvary[:] = self.Y[:,0] # Create the attributes tmpvarx.setncattr('long_name','Easting') tmpvarx.setncattr('units','metres') tmpvary.setncattr('long_name','Northing') tmpvary.setncattr('units','metres') # Write the topo data tmpvarz=nc.createVariable('topo','f8',(dimnamey,dimnamex),zlib=True,least_significant_digit=1) tmpvarz[:] = self.Z tmpvarz.setncattr('long_name','Topographic elevation') tmpvarz.setncattr('units','metres') tmpvarz.setncattr('coordinates','X, Y') tmpvarz.setncattr('positive','up') nc.close() print 'DEM save to %s.'%outfile def tile_vector(count,chunks): rem = np.remainder(count,chunks) cnt2 = count-rem dx = cnt2/chunks if count != cnt2: pt1 = range(0,cnt2,dx) pt2 = range(dx,cnt2,dx) + [count] else: pt1 = range(0,count-dx,dx) pt2 = range(dx,count,dx) return pt1,pt2 def blendDEMs(ncfile,outfile,W,maxdist): ### Combine multiple files ### #Calculate the weights for each file nfiles = len(ncfile) ii=-1 for nc in ncfile: ii+=1 d = DEM(infile=nc,W=W[ii],maxdist=maxdist[ii]) print 'Calculating weights for %s...'%nc print 'Weight = %6.3f, maxdist = %f'%(W[ii],maxdist[ii]) w=d.calcWeight() ny = d.ny nx = d.nx # if ii == 1: # f=d.contourf(w,vv=np.linspace(0,W[ii],10)) # f.savefig('%s_Weights.pdf'%outfile[:-2]) # del f del d if ii == 0: Wall = np.zeros((ny,nx,nfiles)) Wall[:,:,ii]=w del w # Normalise the weights print 'Normalising the weights...' Wsum = np.sum(Wall,axis=2) for ii in range(0,nfiles): Wall[:,:,ii] = np.squeeze(Wall[:,:,ii]) / Wsum # Re-load in the depths from each file and sum print 'Writing to an output file...' Zout = np.zeros((ny,nx)) filestr = '' ii=-1 for infile in ncfile: ii+=1 nc = Dataset(infile, 'r') Zin = nc.variables['topo'][:] nc.close() Zin[np.isnan(Zin)]=0.0 Zout += np.squeeze(Wall[:,:,ii]) * Zin filestr +='%s, '%infile # Copy the data to a new netcdf file shutil.copyfile(ncfile[-1],outfile) nc = Dataset(outfile, 'r+') nc.variables['topo'][:]=Zout globalatts = {'title':'DEM model',\ 'history':'Created on '+time.ctime(),\ 'Input datasets':filestr} # Write the global attributes for gg in globalatts.keys(): nc.setncattr(gg,globalatts[gg]) nc.close() print 'Completed write to %s.'%outfile #ncfile = [\ #'C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/USACELIDAR_dx25_blockavg.nc',\ #'C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/NOAADEM_dx25_IDW_dist100_NNEar3.nc',\ #'C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/NOAASoundingsDEM_dx25_KRIG_dist500_Nnear3_range200.nc',\ #'C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/TNRIS_dx25_GridData.nc'\ #] #W = [50.0,1.0,10.0,0.1] #maxdist = [100.0,100.,1500.,1000.0] #outfile = 'C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/Blended/NOAA_Blended_All.nc' # #blendDEMs(ncfile,outfile,W,maxdist) #d = DEM(infile=outfile) #f=d.contourf(d.Z,vv=range(-15,3),vmin=-15,vmax=4,cmap=plt.cm.gist_earth) #f.savefig(outfile[:-2]+'pdf') #plt.show() ## Load in other formats #infile = 'C:/Projects/GOMGalveston/DATA/Bathymetry/NGDCCoastalRelief/galveston_tx.asc' #print 'Loading %s...'%infile #d = DEM(infile=infile) # #print 'Saving to an image...' ##f=d.contourf(d.Z,vv=range(-15,3),vmin=-15,vmax=4,cmap=plt.cm.gist_earth) #f=d.plot(d.Z,vmin=-20,vmax=5,cmap=plt.cm.gist_earth) #f.savefig(infile[:-3]+'png',dpi=1200) #d.savenc(outfile='C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/NOAA_10m_DEM.nc') #infile='C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/NOAA_10m_DEM.nc' #print 'Loading %s...'%infile #d = DEM(infile=infile) # #print 'Saving to an image...' #f=d.contourf(d.Z+0.14,vv=range(-20,3),vmin=-20,vmax=4,cmap=plt.cm.gist_earth) #f.savefig(infile[:-3]+'_MSL.'+'pdf')
UT-CWE/Hyospy
Hyospy_ensemble/lib/SUNTANS/GIS/dem.py
Python
mit
12,866
[ "NetCDF" ]
9f39541c1688548e0b5899aa29b06abe14a3a0a068e9fabe1b50fca359a3903c
#!/usr/bin/env python # # This file is part of OpenDrift. # # OpenDrift is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 2 # # OpenDrift is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenDrift. If not, see <http://www.gnu.org/licenses/>. # # Copyright 2015, Knut-Frode Dagestad, MET Norway # Utility script to display contents of a netCDF CF-compliant # file or URL containing driver data suitable for opendrift # # Knut-Frode Dagestad, 19 Feb 2015 import sys import argparse from datetime import datetime import numpy as np try: from opendrift.readers import reader_netCDF_CF_generic from opendrift.readers import reader_ROMS_native try: from opendrift.readers import reader_grib readers = [reader_netCDF_CF_generic, reader_ROMS_native, reader_grib] except: readers = [reader_netCDF_CF_generic, reader_ROMS_native] except ImportError: # development sys.exit('Please add opendrift folder to your PYTHONPATH.') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('filename', help='<URL or netCDF filename>') parser.add_argument('-p', dest='variable', default='noplot', nargs='?', help='Plot domain (or variable if given)') parser.add_argument('-vmin', dest='vmin', default=None, nargs='?', help='Minimum value for colorbar') parser.add_argument('-vmax', dest='vmax', default=None, nargs='?', help='Maximum value for colorbar') parser.add_argument('-e', action='store_true', help='Report errors on failure.') parser.add_argument('-lon', dest='lon', default=None, help='Report data from position.') parser.add_argument('-lat', dest='lat', default=None, help='Report data from position.') parser.add_argument('-time', dest='time', default=None, help='Report data from position at time [YYYYmmddHHMM].') args = parser.parse_args() for reader in readers: try: print('Testing %s...' % reader.__file__) r = reader.Reader(args.filename) print(r) break except Exception as me: if args.e is True: print(me) import traceback print(traceback.format_exc()) print('---------------------------------------') print('...not applicable.') if not 'r' in locals(): sys.exit('No readers applicable for ' + args.filename) if args.lon is not None: if args.lon is None or args.time is None: raise ValueError('Both lon, lat and time must be given') lon = np.atleast_1d(float(args.lon)) lat = np.atleast_1d(float(args.lat)) x,y = r.lonlat2xy(lon, lat) time = datetime.strptime(args.time, '%Y%m%d%H%M') r.buffer=3 i=3; j=3 # center of block variables = [var for var in r.variables if var not in ('time') and 'time' not in var] data = r.get_variables(variables, time, x, y, z=0, block=True) for var in variables: print('%s : %s' % (var, data[var][i,j])) if 'x_wind' in variables and 'y_wind' in variables: print('windspeed : %s' % np.sqrt( data['x_wind'][i,j]**2 + data['y_wind'][i,j]**2)) if args.variable != 'noplot': if args.variable is None: r.plot() else: if args.vmin is None: vmin = None else: vmin = np.float(args.vmin) if args.vmax is None: vmax = None else: vmax = np.float(args.vmax) r.plot(args.variable, vmin=vmin, vmax=vmax)
knutfrode/opendrift
opendrift/scripts/readerinfo.py
Python
gpl-2.0
4,321
[ "NetCDF" ]
b358595a4b7ec6e64dc957793637252c3a559253d19a37e6e36fc3a17ab24337
#!/bin/env python """ Tests for Bdii2CSAgent module """ import unittest from mock import MagicMock as Mock, patch from DIRAC import S_OK, S_ERROR from DIRAC.ConfigurationSystem.Agent import Bdii2CSAgent MODNAME= "DIRAC.ConfigurationSystem.Agent.Bdii2CSAgent" MAINBDII = { 'site1': {'CEs': { 'ce1': { 'Queues': { 'queue1': "SomeValues" }}}}, 'site2': {'CEs': { 'ce2': { 'Queues': { 'queue2': "SomeValues" }}}} } ALTBDII = { 'site2': {'CEs': { 'ce2': { 'Queues': { 'queue2': "SomeOtherValues" }}, 'ce2b': { 'Queues': { 'queue2b': "SomeOtherValues" }}}}, 'site3': {'CEs': { 'ce3': { 'Queues': { 'queue3': "SomeValues" }}}} } class Bdii2CSTests( unittest.TestCase ): def setUp( self ): with patch( "DIRAC.ConfigurationSystem.Agent.Bdii2CSAgent.AgentModule.__init__", new=Mock() ): self.agent = Bdii2CSAgent.Bdii2CSAgent( agentName="Configuration/testing", loadName="Configuration/testing" ) ## as we ignore the init from the baseclass some agent variables might not be present so we set them here ## in any case with this we can check that log is called with proper error messages self.agent.log = Mock() def tearDown( self ): pass def test__getBdiiCEInfo_success( self ): expectedResult = {} expectedResult.update( ALTBDII ) expectedResult.update( MAINBDII ) self.agent.alternativeBDIIs = [ "server2" ] with patch( MODNAME+".getBdiiCEInfo", new=Mock( side_effect=[ S_OK( MAINBDII ), S_OK( ALTBDII ), ] ) ) as infoMock: ret = self.agent._Bdii2CSAgent__getBdiiCEInfo( "vo" ) #pylint: disable=no-member infoMock.assert_any_call("vo", host=self.agent.host, glue2=self.agent.glue2Only) infoMock.assert_any_call("vo", host="server2", glue2=self.agent.glue2Only) self.assertTrue( ret['OK'] ) self.assertEqual( expectedResult, ret['Value'] ) self.assertEqual( ret['Value']['site2']['CEs']['ce2']['Queues']['queue2'], "SomeValues" ) self.assertNotIn( 'ce2b', ret['Value']['site2']['CEs'] ) def test__getBdiiCEInfo_fail_10( self ): self.agent.alternativeBDIIs = [ "server2" ] with patch( MODNAME+".getBdiiCEInfo", new=Mock( side_effect=[ S_ERROR( "error" ), S_OK( ALTBDII ), ] ) ) as infoMock: ret = self.agent._Bdii2CSAgent__getBdiiCEInfo( "vo" ) #pylint: disable=no-member infoMock.assert_any_call("vo", host=self.agent.host, glue2=self.agent.glue2Only) infoMock.assert_any_call("vo", host="server2", glue2=self.agent.glue2Only) self.assertTrue( any ( "Failed getting information from default" in str(args) \ for args in self.agent.log.error.call_args_list ), self.agent.log.error.call_args_list ) self.assertTrue( ret['OK'] ) self.assertEqual( ALTBDII, ret['Value'] ) def test__getBdiiCEInfo_fail_01( self ): self.agent.alternativeBDIIs = [ "server2" ] with patch( MODNAME+".getBdiiCEInfo", new=Mock( side_effect=[ S_OK( MAINBDII ), S_ERROR( "error" ), ] ) ) as infoMock: ret = self.agent._Bdii2CSAgent__getBdiiCEInfo( "vo" ) #pylint: disable=no-member infoMock.assert_any_call("vo", host="server2", glue2=False) infoMock.assert_any_call("vo", host="server2", glue2=False) self.assertTrue( any ( "Failed getting information from server2" in str(args) \ for args in self.agent.log.error.call_args_list ), self.agent.log.error.call_args_list ) self.assertTrue( ret['OK'] ) self.assertEqual( MAINBDII, ret['Value'] ) def test__getBdiiCEInfo_fail_11( self ): self.agent.alternativeBDIIs = [ "server2" ] with patch( MODNAME+".getBdiiCEInfo", new=Mock( side_effect=[ S_ERROR( "error1" ), S_ERROR( "error2" ), ] ) ) as infoMock: ret = self.agent._Bdii2CSAgent__getBdiiCEInfo( "vo" ) #pylint: disable=no-member infoMock.assert_any_call("vo", host=self.agent.host, glue2=False) infoMock.assert_any_call("vo", host="server2", glue2=False) self.assertTrue( any ( "Failed getting information from server2" in str(args) \ for args in self.agent.log.error.call_args_list ), self.agent.log.error.call_args_list ) self.assertTrue( any ( "Failed getting information from default" in str(args) \ for args in self.agent.log.error.call_args_list ), self.agent.log.error.call_args_list ) self.assertFalse( ret['OK'] ) self.assertIn( "error1\nerror2", ret['Message'] ) if __name__ == '__main__': SUITE = unittest.defaultTestLoader.loadTestsFromTestCase( Bdii2CSTests ) unittest.TextTestRunner( verbosity = 2 ).run( SUITE )
fstagni/DIRAC
ConfigurationSystem/Agent/test/Test_Bdii2CS.py
Python
gpl-3.0
5,058
[ "DIRAC" ]
fac53fa97ad6119df5057a6636ae3c5fcaeda0fae71c0a16a7b5f71f7ac63a5a
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import itertools import os import re from tests.unit import test class Variants(object): def __init__(self, variants, print_prefix="mock_"): self.variants = variants self.print_prefix = print_prefix def __repr__(self): variants = self.variants if len(variants) > 3: variants = variants[:3] variants = [repr(self.print_prefix + var) for var in variants] return "{" + ", ".join(variants) + ( ", ...}" if len(self.variants) > 3 else "}") def __eq__(self, val): return getattr(val, "variants", val) == self.variants def __ne__(self, other): return not self.__eq__(other) def __contains__(self, val): return val in self.variants def pairwise_isinstance(*args): return all(itertools.starmap(isinstance, args)) class FuncMockArgsDecoratorsChecker(ast.NodeVisitor): """Recursively visit an AST looking for misusage of mocks in tests. The misusage being tested by this particular class is unmatched mocked object name against the argument names. The following is the correct usages:: @mock.patch("module.abc") # or # or @mock.patch(MODULE + ".abc") # or @mock.patch("%s.abc" % MODULE) where MODULE="module" def test_foobar(self, mock_module_abc): # or `mock_abc' ... @mock.patch("pkg.ClassName.abc") # or # or @mock.patch(CLASSNAME + ".abc") # or @mock.patch("%s.abc" % CLASSNAME) where CLASSNAME="pkg.ClassName" def test_foobar(self, mock_class_name_abc): ... class FooClassNameTestCase(...): @mock.patch("pkg.FooClassName.abc") def test_foobar(self, mock_abc): # Iff the mocked object is inside the tested class then # the class name in mock argname is optional. ... While these are not:: @mock.patch("module.abc") def test_foobar(self, m_abc): # must be prefixed with `mock_' @mock.patch("module.abc") def test_foobar(self, mock_cba): # must contain mocked object name (`mock_abc') @mock.patch("module.abc") def test_foobar(self, mock_modulewrong_abc): # must match the module `mock_module_abc' @mock.patch("ClassName.abc") def test_foobar(self, mock_class_abc): # must match the python-styled class name + method name """ # NOTE(amaretskiy): Disable check if shortest variant is too long # because long name is not convenient and could # even be blocked by PEP8 SHORTEST_VARIANT_LEN_LIMIT = 25 def __init__(self): self.errors = [] self.globals_ = {} @classmethod def _get_name(cls, node): if isinstance(node, ast.Name): return node.id if isinstance(node, ast.Attribute): return cls._get_name(node.value) + "." + node.attr return "" def _get_value(self, node): """Get mock.patch string argument regexp. It is either a string (if we are lucky), string-format of ("%s.something" % GVAL) or (GVAL + ".something") """ val = None if isinstance(node, ast.Str): val = node.s elif isinstance(node, ast.BinOp): if pairwise_isinstance( (node.op, ast.Mod), (node.left, ast.Str), (node.right, ast.Name)): val = node.left.s % self.globals_[node.right.id] elif pairwise_isinstance( (node.op, ast.Add), (node.left, ast.Name), (node.right, ast.Str)): val = self.globals_[node.left.id] + node.right.s elif isinstance(node, ast.Name): val = self.globals_[node.id] if val is None: raise ValueError( "Unable to find value in %s, only the following are parsed: " "GLOBAL, 'pkg.foobar', '%%s.foobar' %% GLOBAL or 'GLOBAL + " "'.foobar'" % ast.dump(node)) return val CAMELCASE_SPLIT_ANY_AND_CAPITAL = re.compile("(.)([A-Z][a-z]+)") CAMELCASE_SPLIT_LOWER_AND_CAPITAL = re.compile("([a-z0-9])([A-Z])") CAMELCASE_SPLIT_REPL = r"\1_\2" @classmethod def _camelcase_to_python(cls, name): for regexp in (cls.CAMELCASE_SPLIT_ANY_AND_CAPITAL, cls.CAMELCASE_SPLIT_LOWER_AND_CAPITAL): name = regexp.sub(cls.CAMELCASE_SPLIT_REPL, name) return name.lower() def _get_mocked_class_value_variants(self, class_name, mocked_name): class_name = self._camelcase_to_python(class_name) mocked_name = self._camelcase_to_python(mocked_name) if class_name == self.classname_python: # Optional, since class name of the mocked package is the same as # class name of the *TestCase return [mocked_name, class_name + "_" + mocked_name] # Full class name is required otherwise return [class_name + "_" + mocked_name] def _add_pkg_optional_prefixes(self, tokens, variants): prefixed_variants = list(variants) for token in map(self._camelcase_to_python, reversed(tokens)): prefixed_variants.append(token + "_" + prefixed_variants[-1]) return prefixed_variants def _get_mocked_name_variants(self, name): tokens = name.split(".") variants = [self._camelcase_to_python(tokens.pop())] if tokens: if tokens[-1][0].isupper(): # Mocked something inside a class, check if we should require # the class name to be present in mock argument variants = self._get_mocked_class_value_variants( class_name=tokens.pop(), mocked_name=variants[0]) variants = self._add_pkg_optional_prefixes(tokens, variants) return Variants(variants) def _get_mock_decorators_variants(self, funccall): """Return all the mock.patch{,.object} decorated for function.""" mock_decorators = [] for decorator in reversed(funccall.decorator_list): if not isinstance(decorator, ast.Call): continue funcname = self._get_name(decorator.func) if funcname == "mock.patch": decname = self._get_value(decorator.args[0]) elif funcname == "mock.patch.object": decname = (self._get_name(decorator.args[0]) + "." + self._get_value(decorator.args[1])) else: continue mock_decorators.append( self._get_mocked_name_variants(decname) ) return mock_decorators @staticmethod def _get_mock_args(node): """Return all the mock arguments.""" args = [] PREFIX_LENGTH = len("mock_") for arg in node.args.args: name = getattr(arg, "id", getattr(arg, "arg", None)) if not name.startswith("mock_"): continue args.append(name[PREFIX_LENGTH:]) return args def visit_Assign(self, node): """Catch all the globals.""" self.generic_visit(node) if node.col_offset == 0: mnode = ast.parse("") mnode.body = [node] mnode = ast.fix_missing_locations(mnode) code = compile(mnode, "<ast>", "exec") try: exec(code, self.globals_) except Exception: pass self.globals_.pop("__builtins__", None) self.globals_.pop("builtins", None) def visit_ClassDef(self, node): classname_camel = node.name if node.name.endswith("TestCase"): classname_camel = node.name[:-len("TestCase")] self.classname_python = self._camelcase_to_python(classname_camel) self.generic_visit(node) def check_name(self, arg, dec_vars): return dec_vars is not None and arg in dec_vars def visit_FunctionDef(self, node): self.generic_visit(node) mock_decs = self._get_mock_decorators_variants(node) if not mock_decs: return mock_args = self._get_mock_args(node) error_msgs = [] mismatched = False for arg, dec_vars in itertools.zip_longest(mock_args, mock_decs): if not self.check_name(arg, dec_vars): if arg and dec_vars: sorted_by_len = sorted( dec_vars.variants, key=lambda i: len(i), reverse=True) shortest_name = sorted_by_len.pop() if len(shortest_name) <= self.SHORTEST_VARIANT_LEN_LIMIT: error_msgs.append( (f"Argument 'mock_{arg}' misnamed; should be " f"either of {dec_vars} that is derived from the " f"mock decorator args.\n") ) elif not arg: error_msgs.append( f"Missing or malformed argument for {dec_vars} " f"decorator.") mismatched = True elif not dec_vars: error_msgs.append( f"Missing or malformed decorator for 'mock_{arg}' " f"argument.") mismatched = True if error_msgs: if mismatched: self.errors.append({ "lineno": node.lineno, "args": mock_args, "decs": mock_decs, "messages": error_msgs }) else: self.errors.append({ "lineno": node.lineno, "mismatch_pairs": list(zip(mock_args, mock_decs)), "messages": error_msgs }) class MockUsageCheckerTestCase(test.TestCase): tests_path = os.path.join(os.path.dirname(__file__)) def test_mock_decorators_and_args(self): """Ensure that mocked objects are called correctly in the arguments. See `FuncMockArgsDecoratorsChecker' docstring for details. """ errors = [] for dirname, dirnames, filenames in os.walk(self.tests_path): for filename in filenames: if (not filename.startswith("test_") or not filename.endswith(".py")): continue filename = os.path.relpath(os.path.join(dirname, filename)) with open(filename, "rb") as fh: tree = ast.parse(fh.read(), filename) visitor = FuncMockArgsDecoratorsChecker() visitor.visit(tree) errors.extend( dict(filename=filename, **error) for error in visitor.errors) if errors: print(FuncMockArgsDecoratorsChecker.__doc__) print( "\n\n" "The following errors were found during the described check:") for error in errors: print("\n\n" "Errors at file %(filename)s line %(lineno)d:\n\n" "%(message)s" % { "message": "\n".join(error["messages"]), "filename": error["filename"], "lineno": error["lineno"]}) # NOTE(pboldin): When the STDOUT is shuted the below is the last # resort to know what is wrong with the mock names. for error in errors: error["messages"] = [ message.rstrip().replace("\n", " ").replace("\t", "") for message in error["messages"] ] self.assertEqual([], errors)
openstack/rally
tests/unit/test_mock.py
Python
apache-2.0
12,518
[ "VisIt" ]
d43c80152a03699bfb04e979e326d94dfa990efef9f240a13e6b5e7fb97670ae
# -*- coding: utf-8 -*- # # glif_psc_neuron.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Current-based generalized leaky integrate and fire (GLIF) neuron example -------------------------------- Simple example of how to use the ``glif_psc`` neuron model for five different levels of GLIF neurons. Four stimulation paradigms are illustrated for the GLIF model with externally applied current and spikes impinging Voltage traces, current traces, threshold traces, and spikes are shown. KEYWORDS: glif_psc """ ############################################################################## # First, we import all necessary modules to simulate, analyze and plot this # example. import nest import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec ############################################################################## # We initialize the nest and set the simulation resolution. nest.ResetKernel() resolution = 0.05 nest.SetKernelStatus({"resolution": resolution}) ############################################################################## # We also pre-define the synapse time constant array, [2.0, 1.0] ms for # the two desired synaptic ports of the GLIF neurons. Note that the default # synapse time constant is [2.0] ms, which is for neuron with one port. syn_tau = [2.0, 1.0] ############################################################################### # We create the five levels of GLIF model to be tested, i.e., # ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``. # For each level of GLIF model, we create a ``glif_psc`` node. The node is # created by setting relative model mechanism parameters and the time constant # of the 2 synaptic ports as mentioned above. Other neuron parameters are set # as default. The five ``glif_psc`` node handles were combined as a list. n_lif = nest.Create("glif_psc", params={"spike_dependent_threshold": False, "after_spike_currents": False, "adapting_threshold": False, "tau_syn": syn_tau}) n_lif_r = nest.Create("glif_psc", params={"spike_dependent_threshold": True, "after_spike_currents": False, "adapting_threshold": False, "tau_syn": syn_tau}) n_lif_asc = nest.Create("glif_psc", params={"spike_dependent_threshold": False, "after_spike_currents": True, "adapting_threshold": False, "tau_syn": syn_tau}) n_lif_r_asc = nest.Create("glif_psc", params={"spike_dependent_threshold": True, "after_spike_currents": True, "adapting_threshold": False, "tau_syn": syn_tau}) n_lif_r_asc_a = nest.Create("glif_psc", params={"spike_dependent_threshold": True, "after_spike_currents": True, "adapting_threshold": True, "tau_syn": syn_tau}) neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a ############################################################################### # For the stimulation input to the glif_psc neurons, we create one excitation # spike generator and one inhibition spike generator, each of which generates # three spikes; we also create one step current generator and a Poisson # generator, a parrot neuron (to be paired with the Poisson generator). # The three different injections are spread to three different time periods, # i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms. # Each of the excitation and inhibition spike generators generates three spikes # at different time points. Configuration of the current generator includes the # definition of the start and stop times and the amplitude of the injected # current. Configuration of the Poisson generator includes the definition of # the start and stop times and the rate of the injected spike train. espikes = nest.Create("spike_generator", params={"spike_times": [10., 100., 150.], "spike_weights": [20.]*3}) ispikes = nest.Create("spike_generator", params={"spike_times": [15., 99., 150.], "spike_weights": [-20.]*3}) cg = nest.Create("step_current_generator", params={"amplitude_values": [400., ], "amplitude_times": [200., ], "start": 200., "stop": 500.}) pg = nest.Create("poisson_generator", params={"rate": 150000., "start": 600., "stop": 900.}) pn = nest.Create("parrot_neuron") ############################################################################### # The generators are then connected to the neurons. Specification of # the ``receptor_type`` uniquely defines the target receptor. # We connect current generator, the spike generators, Poisson generator (via # parrot neuron) to receptor 0, 1, and 2 of the GLIF neurons, respectively. # Note that Poisson generator is connected to parrot neuron to transit the # spikes to the glif_psc neuron. nest.Connect(cg, neurons, syn_spec={"delay": resolution}) nest.Connect(espikes, neurons, syn_spec={"delay": resolution, "receptor_type": 1}) nest.Connect(ispikes, neurons, syn_spec={"delay": resolution, "receptor_type": 1}) nest.Connect(pg, pn, syn_spec={"delay": resolution}) nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 2}) ############################################################################### # A ``multimeter`` is created and connected to the neurons. The parameters # specified for the multimeter include the list of quantities that should be # recorded and the time interval at which quantities are measured. mm = nest.Create("multimeter", params={"interval": resolution, "record_from": ["V_m", "I", "I_syn", "threshold", "threshold_spike", "threshold_voltage", "ASCurrents_sum"]}) nest.Connect(mm, neurons) ############################################################################### # A ``spike_detector`` is created and connected to the neurons record the # spikes generated by the glif_psc neurons. sd = nest.Create("spike_detector") nest.Connect(neurons, sd) ############################################################################### # Run the simulation for 1000 ms and retrieve recorded data from # the multimeter and spike detector. nest.Simulate(1000.) data = nest.GetStatus(mm)[0]["events"] senders = data['senders'] spike_data = nest.GetStatus(sd)[0]["events"] spike_senders = spike_data["senders"] spikes = spike_data["times"] ############################################################################### # We plot the time traces of the membrane potential (in blue) and # the overall threshold (in green), and the spikes (as red dots) in one panel; # the spike component of threshold (in yellow) and the voltage component of # threshold (in black) in another panel; the injected currents(in strong blue), # the sum of after spike currents(in cyan), and the synaptic currents (in # magenta) in responding to the spike inputs to the neurons in the third panel. # We plot all these three panels for each level of GLIF model in a seperated # figure. glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"] for i in range(len(glif_models)): glif_model = glif_models[i] plt.figure(glif_model) gs = gridspec.GridSpec(3, 1, height_ratios=[2, 1, 1]) t = data["times"][senders == 1] ax1 = plt.subplot(gs[0]) plt.plot(t, data["V_m"][senders == neurons[i]], "b") plt.plot(t, data["threshold"][senders == neurons[i]], "g--") plt.plot(spikes[spike_senders == neurons[i]], [max(data["threshold"][senders == neurons[i]]) * 0.95] * len(spikes[spike_senders == neurons[i]]), "r.") plt.legend(["V_m", "threshold", "spike"]) plt.ylabel("V (mV)") plt.title("Simulation of glif_psc neuron of " + glif_model) ax2 = plt.subplot(gs[1]) plt.plot(t, data["threshold_spike"][senders == neurons[i]], "y") plt.plot(t, data["threshold_voltage"][senders == neurons[i]], "k--") plt.legend(["threshold_spike", "threshold_voltage"]) plt.ylabel("V (mV)") ax3 = plt.subplot(gs[2]) plt.plot(t, data["I"][senders == neurons[i]], "--") plt.plot(t, data["ASCurrents_sum"][senders == neurons[i]], "c-.") plt.plot(t, data["I_syn"][senders == neurons[i]], "m") plt.legend(["I_e", "ASCurrents_sum", "I_syn"]) plt.ylabel("I (pA)") plt.xlabel("t (ms)") plt.show()
hakonsbm/nest-simulator
pynest/examples/glif_psc_neuron.py
Python
gpl-2.0
9,614
[ "NEURON" ]
affc5edaa644144f9ffd12953f5b9d88b46318a05070a32198bfae0ba3aebeae
#!/usr/bin/python3 # -*- coding: utf-8 -*- import os, sys, time from PyQt5.QtCore import Qt, QThread, QObject, pyqtSignal, pyqtSlot, QObjectCleanupHandler from PyQt5.QtWidgets import QApplication, QWidget, QGridLayout, QStackedLayout, QLayout, QTextEdit from PyQt5.QtGui import QPixmap, QPalette, QBrush from fsSocket import * from fsGauge import * from fsLayout import * from fsDebug import * class Main(QWidget): def __init__(self): QWidget.__init__(self) self.magneto = magnetoGauge() self.magneto.initialize({'mag': {'value':0}}) self.switch = switchPanel() self.light = lightPanel() self.warn = warnPanel() self.radio = radioPanel() self.radio.initialize({}) self.airspeed = airspeedGauge() self.accelerometer = accelerometerGauge() self.accelerometer.initialize({'load': {'value':0}}) self.attitude = attitudeGauge() self.attitude.initialize({'pitch': {'value':0}, 'roll': {'value':0}}) self.altitude = altitudeGauge() self.altitude.initialize({'alt': {'value':0}, 'baro': {'value':0}}) self.turnslip = turnslipGauge() self.turnslip.initialize({'turn': {'value':0}, 'slip': {'value':0}}) self.dg = dgGauge() self.dg.initialize({'cap': {'value':0}}) self.vario = varioGauge() self.vario.initialize({'vvi': {'value':0}}) self.fuel = fuelGauge() self.fuel.initialize({'fuel': {'value':0}}) self.manifold = manifoldGauge() self.manifold.initialize({'man': {'value':0}, 'flow': {'value':0}}) self.vacuum = vacuumGauge() self.vacuum.initialize({'vacuum': {'value':0}}) self.oil = oilGauge() self.oil.initialize({'heat': {'value':0, 'max': 250}, 'psi': {'value':0, 'max': 25}}) self.vor = vorGauge() self.vor.initialize({'obs': {'value':0}, 'tofr': {'value':1}, 'dme': {'value':0}, 'hdef': {'value':0}, 'vdef': {'value':0}}) self.adf = adfGauge() self.adf.initialize({'frq': {'value':0}, 'card': {'value':0}, 'brg': {'value':0}}) self.engine = engineGauge() self.engine.initialize({'rpm': {'value':0}}) self.trim = trimGauge() self.trim.initialize({'pitch': {'value':0}}) self.ident = identPanel() self.debug = fsDebug(self) self.debug.keyPressEvent = self.keyPressEvent self.debug.appendText('debug initiated', 'info') self.debug.appendText('error test', 'error') self.stack = QStackedLayout(self) #print (self.stack.parent.vor.param) for i in range(NUM_LAYOUT): page = QWidget() layout = QGridLayout() page.setLayout(layout) self.stack.addWidget(page) self.setFSLayout(DEFAULT_LAYOUT) self.b = 0 def keyPressEvent(self, event): key = event.key() if (key == Qt.Key_1): self.setFSLayout(1) elif (key == Qt.Key_2): self.setFSLayout(2) elif (key == Qt.Key_3): self.setFSLayout(3) elif (key == Qt.Key_4): self.setFSLayout(4) elif (key == Qt.Key_5): self.b = (self.b + 1) % 2 self.switch.setValue({'power':self.b}) self.light.setValue({'power':self.b}) #self.warn.setValue({'power':self.b,'gene':1,'oil':1,'fuel':1,'gear':1}) elif (key == Qt.Key_D): self.setFSLayout(0) elif (key == Qt.Key_Space): self.setWindowState(self.windowState() ^ Qt.WindowFullScreen) elif (key == Qt.Key_Q): self.close() elif (key == Qt.Key_7): self.setPanel(-1) elif (key == Qt.Key_9): self.setPanel(1) elif (key == Qt.Key_B): self.airspeed.setup({'unit':'kt'}) elif (key == Qt.Key_N): self.airspeed.setup({'unit':'kmh'}) elif (key == Qt.Key_C): self.light.setValue({'strobe':1}) elif (key == Qt.Key_U): self.socket.send("com", "outer", -1) elif (key == Qt.Key_I): self.socket.send("com", "outer", 1) elif (key == Qt.Key_O): self.socket.send("com", "inner", -1) elif (key == Qt.Key_P): self.socket.send("com", "inner", 1) elif (key == Qt.Key_K): self.socket.send("com", "button", 1) elif (key == Qt.Key_L): self.socket.send("nav", "coder", -1) elif (key == Qt.Key_M): self.socket.send("nav", "coder", 1) def setBackground(self, pic): palette = QPalette() pixmap = QPixmap(pic) brush = QBrush(pixmap) palette.setBrush(QPalette.Background, brush) self.setPalette(palette) def setPanel(self, direction): index = (self.activeLayout + direction) % NUM_LAYOUT if (index == 0): index = (index + direction) % NUM_LAYOUT self.setFSLayout(index) def setFSLayout(self, index): self.activeLayout = index #self.stack.setCurrentIndex(index) layout = self.stack.widget(index).layout() populateLayout(self, layout, index) self.stack.setCurrentIndex(index) def setUDPSocket(self, socket): self.socket = socket socket.switch.connect(self.switch.setValue) socket.light.connect(self.light.setValue) socket.warn.connect(self.warn.setValue) socket.radio.connect(self.radio.setValue) socket.airspeed.connect(self.airspeed.setValue) socket.load.connect(self.accelerometer.setValue) socket.attitude.connect(self.attitude.setValue) socket.altitude.connect(self.altitude.setValue) socket.turnslip.connect(self.turnslip.setValue) socket.dg.connect(self.dg.setValue) socket.vario.connect(self.vario.setValue) socket.vacuum.connect(self.vacuum.setValue) socket.flow.connect(self.manifold.setValue) socket.fuel.connect(self.fuel.setValue) socket.oil.connect(self.oil.setValue) socket.vor.connect(self.vor.setValue) socket.adf.connect(self.adf.setValue) socket.engine.connect(self.engine.setValue) socket.trim.connect(self.trim.setValue) socket.magneto.connect(self.magneto.setValue) socket.panel.connect(self.setPanel) socket.debug.connect(self.debug.appendText) def main(): app = QApplication(sys.argv) app.setOverrideCursor(Qt.BlankCursor) thread = QThread() socket = fsSocket() socket.moveToThread(thread) socket.finished.connect(thread.quit) thread.started.connect(socket.run) fs = Main() fs.setUDPSocket(socket) fs.setWindowTitle("FS Panel") fs.resize(1280, 1024) fs.setGeometry(0,0,1280,1024) fs.move(0,0) #fs.show() fs.showFullScreen() ''' th = QThread() fsw = fsWorker() fsw.moveToThread(th) fsw.finished.connect(th.quit) th.started.connect(fsw.run) fsw.layout.connect(fs.setFSLayout) th.start() ''' thread.start() app.exec_() socket.stop() thread.quit() sys.exit(0) if __name__ == "__main__": main()
neksysinfo/fspanel
fsPanel.py
Python
gpl-3.0
7,462
[ "ADF" ]
1cdf3fe9b0c7f8fd0b550f4cf46db2bba586beb070829f51e59255cd042eec59
#!/usr/bin/env python # -*- coding: utf-8 -*- """ sessions2trash.py Run this script in a web2py environment shell e.g. python web2py.py -S app If models are loaded (-M option) auth.settings.expiration is assumed for sessions without an expiration. If models are not loaded, sessions older than 60 minutes are removed. Use the --expiration option to override these values. Typical usage: # Delete expired sessions every 5 minutes nohup python web2py.py -S app -M -R scripts/sessions2trash.py & # Delete sessions older than 60 minutes regardless of expiration, # with verbose output, then exit. python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 3600 -f -v # Delete all sessions regardless of expiry and exit. python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 0 """ from __future__ import with_statement from gluon.storage import Storage from optparse import OptionParser import cPickle import datetime import os import stat import time EXPIRATION_MINUTES = 60 SLEEP_MINUTES = 5 VERSION = 0.3 class SessionSet(object): """Class representing a set of sessions""" def __init__(self, expiration, force, verbose): self.expiration = expiration self.force = force self.verbose = verbose def get(self): """Get session files/records.""" raise NotImplementedError def trash(self): """Trash expired sessions.""" now = datetime.datetime.now() for item in self.get(): status = 'OK' last_visit = item.last_visit_default() try: session = item.get() if session.auth: if session.auth.expiration and not self.force: self.expiration = session.auth.expiration if session.auth.last_visit: last_visit = session.auth.last_visit except: pass age = 0 if last_visit: age = total_seconds(now - last_visit) if age > self.expiration or not self.expiration: item.delete() status = 'trashed' if self.verbose > 1: print 'key: %s' % str(item) print 'expiration: %s seconds' % self.expiration print 'last visit: %s' % str(last_visit) print 'age: %s seconds' % age print 'status: %s' % status print '' elif self.verbose > 0: print('%s %s' % (str(item), status)) class SessionSetDb(SessionSet): """Class representing a set of sessions stored in database""" def __init__(self, expiration, force, verbose): SessionSet.__init__(self, expiration, force, verbose) def get(self): """Return list of SessionDb instances for existing sessions.""" sessions = [] tablename = 'web2py_session' if request.application: tablename = 'web2py_session_' + request.application if tablename in db: for row in db(db[tablename].id > 0).select(): sessions.append(SessionDb(row)) return sessions class SessionSetFiles(SessionSet): """Class representing a set of sessions stored in flat files""" def __init__(self, expiration, force, verbose): SessionSet.__init__(self, expiration, force, verbose) def get(self): """Return list of SessionFile instances for existing sessions.""" path = os.path.join(request.folder, 'sessions') return [SessionFile(os.path.join(path, x)) for x in os.listdir(path)] class SessionDb(object): """Class representing a single session stored in database""" def __init__(self, row): self.row = row def delete(self): self.row.delete_record() db.commit() def get(self): session = Storage() session.update(cPickle.loads(self.row.session_data)) return session def last_visit_default(self): return self.row.modified_datetime def __str__(self): return self.row.unique_key class SessionFile(object): """Class representing a single session stored as a flat file""" def __init__(self, filename): self.filename = filename def delete(self): os.unlink(self.filename) def get(self): session = Storage() with open(self.filename, 'rb+') as f: session.update(cPickle.load(f)) return session def last_visit_default(self): return datetime.datetime.fromtimestamp( os.stat(self.filename)[stat.ST_MTIME]) def __str__(self): return self.filename def total_seconds(delta): """ Adapted from Python 2.7's timedelta.total_seconds() method. Args: delta: datetime.timedelta instance. """ return (delta.microseconds + (delta.seconds + (delta.days * 24 * 3600)) * \ 10 ** 6) / 10 ** 6 def main(): """Main processing.""" usage = '%prog [options]' + '\nVersion: %s' % VERSION parser = OptionParser(usage=usage) parser.add_option('-f', '--force', action='store_true', dest='force', default=False, help=('Ignore session expiration. ' 'Force expiry based on -x option or auth.settings.expiration.') ) parser.add_option('-o', '--once', action='store_true', dest='once', default=False, help='Delete sessions, then exit.', ) parser.add_option('-s', '--sleep', dest='sleep', default=SLEEP_MINUTES * 60, type="int", help='Number of seconds to sleep between executions. Default 300.', ) parser.add_option('-v', '--verbose', default=0, action='count', help="print verbose output, a second -v increases verbosity") parser.add_option('-x', '--expiration', dest='expiration', default=None, type="int", help='Expiration value for sessions without expiration (in seconds)', ) (options, unused_args) = parser.parse_args() expiration = options.expiration if expiration is None: try: expiration = auth.settings.expiration except: expiration = EXPIRATION_MINUTES * 60 set_db = SessionSetDb(expiration, options.force, options.verbose) set_files = SessionSetFiles(expiration, options.force, options.verbose) while True: set_db.trash() set_files.trash() if options.once: break else: if options.verbose: print 'Sleeping %s seconds' % (options.sleep) time.sleep(options.sleep) main()
andersonsilvade/python_C
Python32/web2py/scripts/sessions2trash.py
Python
mit
6,674
[ "VisIt" ]
3d152439ac0b36b665316a9c11007f7775c80aa7250e60a987a9cf66cd71f690
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # this script tests vtkImageSlab with various axes permutations, # in order to cover a nasty set of "if" statements that check # the intersections of the raster lines with the input bounding box. # Image pipeline reader = vtk.vtkImageReader() reader.ReleaseDataFlagOff() reader.SetDataByteOrderToLittleEndian() reader.SetDataExtent(0,63,0,63,1,93) reader.SetDataSpacing(3.2,3.2,1.5) reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter") reader.SetDataMask(0x7fff) slab1 = vtk.vtkImageSlab() slab1.SetInputConnection(reader.GetOutputPort()) slab1.SetOperationToMean() slab1.TrapezoidIntegrationOn() slab1.SetOrientationToZ() slab2 = vtk.vtkImageSlab() slab2.SetInputConnection(reader.GetOutputPort()) slab2.SetOperationToMax() slab2.MultiSliceOutputOff() slab2.SetOutputScalarTypeToInputScalarType() slab3 = vtk.vtkImageSlab() slab3.SetInputConnection(reader.GetOutputPort()) slab3.SetOperationToSum() slab3.SetOrientationToX() slab3.MultiSliceOutputOn() slab3.SetOutputScalarTypeToDouble() reslice3 = vtk.vtkImageReslice() reslice3.SetInputConnection(slab3.GetOutputPort()) reslice3.SetResliceAxesDirectionCosines([0,1,0,0,0,-1,1,0,0]) reslice3.SetOutputSpacing(3.2,3.2,3.2) reslice3.SetOutputExtent(0,74,0,74,0,0) slab4 = vtk.vtkImageSlab() slab4.SetInputConnection(reader.GetOutputPort()) slab4.SetOperationToMax() slab4.SetOrientation(0) slab4.MultiSliceOutputOn() slab4.SetOutputScalarTypeToFloat() reslice4 = vtk.vtkImageReslice() reslice4.SetInputConnection(slab4.GetOutputPort()) reslice4.SetResliceAxesDirectionCosines([0,1,0,0,0,-1,1,0,0]) reslice4.SetOutputSpacing(3.2,3.2,3.2) reslice4.SetOutputExtent(0,74,0,74,0,0) slab5 = vtk.vtkImageSlab() slab5.SetInputConnection(reader.GetOutputPort()) slab5.SetOperationToMean() slab5.SetOrientationToY() slab5.MultiSliceOutputOn() reslice5 = vtk.vtkImageReslice() reslice5.SetInputConnection(slab5.GetOutputPort()) reslice5.SetResliceAxesDirectionCosines([1,0,0,0,0,-1,0,1,0]) reslice5.SetOutputSpacing(3.2,3.2,3.2) reslice5.SetOutputExtent(0,74,0,74,0,0) slab6 = vtk.vtkImageSlab() slab6.SetInputConnection(reader.GetOutputPort()) slab6.SetOperationToMax() slab6.SetOrientation(1) slab6.MultiSliceOutputOn() reslice6 = vtk.vtkImageReslice() reslice6.SetInputConnection(slab6.GetOutputPort()) reslice6.SetResliceAxesDirectionCosines([1,0,0,0,0,-1,0,1,0]) reslice6.SetOutputSpacing(3.2,3.2,3.2) reslice6.SetOutputExtent(0,74,0,74,0,0) mapper1 = vtk.vtkImageMapper() mapper1.SetInputConnection(slab1.GetOutputPort()) mapper1.SetColorWindow(2000) mapper1.SetColorLevel(1000) mapper1.SetZSlice(0) mapper2 = vtk.vtkImageMapper() mapper2.SetInputConnection(slab2.GetOutputPort()) mapper2.SetColorWindow(2000) mapper2.SetColorLevel(1000) mapper2.SetZSlice(0) mapper3 = vtk.vtkImageMapper() mapper3.SetInputConnection(reslice3.GetOutputPort()) mapper3.SetColorWindow(128000) mapper3.SetColorLevel(64000) mapper3.SetZSlice(0) mapper4 = vtk.vtkImageMapper() mapper4.SetInputConnection(reslice4.GetOutputPort()) mapper4.SetColorWindow(2000) mapper4.SetColorLevel(1000) mapper4.SetZSlice(0) mapper5 = vtk.vtkImageMapper() mapper5.SetInputConnection(reslice5.GetOutputPort()) mapper5.SetColorWindow(2000) mapper5.SetColorLevel(1000) mapper5.SetZSlice(0) mapper6 = vtk.vtkImageMapper() mapper6.SetInputConnection(reslice6.GetOutputPort()) mapper6.SetColorWindow(2000) mapper6.SetColorLevel(1000) mapper6.SetZSlice(0) actor1 = vtk.vtkActor2D() actor1.SetMapper(mapper1) actor2 = vtk.vtkActor2D() actor2.SetMapper(mapper2) actor3 = vtk.vtkActor2D() actor3.SetMapper(mapper3) actor4 = vtk.vtkActor2D() actor4.SetMapper(mapper4) actor5 = vtk.vtkActor2D() actor5.SetMapper(mapper5) actor6 = vtk.vtkActor2D() actor6.SetMapper(mapper6) imager1 = vtk.vtkRenderer() imager1.AddActor2D(actor1) imager1.SetViewport(0.0,0.0,0.3333,0.5) imager2 = vtk.vtkRenderer() imager2.AddActor2D(actor2) imager2.SetViewport(0.0,0.5,0.3333,1.0) imager3 = vtk.vtkRenderer() imager3.AddActor2D(actor3) imager3.SetViewport(0.3333,0.0,0.6667,0.5) imager4 = vtk.vtkRenderer() imager4.AddActor2D(actor4) imager4.SetViewport(0.3333,0.5,0.6667,1.0) imager5 = vtk.vtkRenderer() imager5.AddActor2D(actor5) imager5.SetViewport(0.6667,0.0,1.0,0.5) imager6 = vtk.vtkRenderer() imager6.AddActor2D(actor6) imager6.SetViewport(0.6667,0.5,1.0,1.0) imgWin = vtk.vtkRenderWindow() imgWin.AddRenderer(imager1) imgWin.AddRenderer(imager2) imgWin.AddRenderer(imager3) imgWin.AddRenderer(imager4) imgWin.AddRenderer(imager5) imgWin.AddRenderer(imager6) imgWin.SetSize(225,150) imgWin.Render() # --- end of script --
HopeFOAM/HopeFOAM
ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/TestImageProjection.py
Python
gpl-3.0
4,660
[ "VTK" ]
65232acfcd64a0bcfbf07c6e2a5013981a7231c85d0a5025850ee31d3fcdc61d
#BEGIN_HEADER import sys import os import glob import json #sys.path.insert(0, '/kb/dev_container/modules/genome_util/lib/biokbase/genome_util') import script_util #from biokbase.workspace.client import Workspace #from workspace.client import Workspace #END_HEADER class KBaseGenomeUtil: ''' Module Name: KBaseGenomeUtil Module Description: ''' ######## WARNING FOR GEVENT USERS ####### # Since asynchronous IO can lead to methods - even the same method - # interrupting each other, you must be *very* careful when using global # state. A method could easily clobber the state set by another while # the latter method is running. ######################################### #BEGIN_CLASS_HEADER #END_CLASS_HEADER # config contains contents of config file in a hash or None if it couldn't # be found def __init__(self, config): #BEGIN_CONSTRUCTOR #END_CONSTRUCTOR pass def blast_against_genome(self, ctx, params): # ctx is the context object # return variables are: returnVal #BEGIN blast_against_genome print "start" if len(params['query']) > 5: sequence=params['query'] else: #error message: your sequence are too short print "error" #else: #sequence=script_util.get_seq(params['gene_id']) #sequence=(params['gene_id']) genome_id='Bifidobacterium_animalis_subsp._lactis_AD011' workspaceid='plane83:1436884411390' #print "generate input file for query sequence\n" target=open('tmp_seq','w') target.write(">") target.write("input_seq\n") target.write(sequence) target.close() #print "downloading genome object from workspace\n" #genome=script_util.get_genome('genome_id','workspaceid',ctx['token']) #print "finished downloading\n"; if os.path.exists('blast_db'): files=glob.glob('blast_db/*') for f in files: os.remove(f) if not os.path.exists('blast_db'): os.makedirs('blast_db') #with open('tmp_data','w') as outfile: # json.dump(genome, outfile) if(params['blast_program'] == 'blastp'): formatdb_type='T' #extract protein sequences from the genome object res1=open('tmp_data').read() res=json.loads(res1) target=open('blast_db/tmp_genome_fasta','w') for gene in res['data']['features']: if 'protein_translation' in gene.keys(): target.write(">" + gene['id'] + "\n" + gene['protein_translation'] + "\n") target.close() if(params['blast_program'] == 'blastn'): formatdb_type='F' #extract dna sequence from the genome object res1=open('tmp_data').read() res=json.loads(res1) target=open('blast_db/tmp_genome_fasta','w') for gene in res['data']['features']: if 'dna_sequence' in gene.keys(): target.write(">" + gene['id'] + "\n" + gene['dna_sequence'] + "\n") target.close() #os.remove('tmp_data') #print "formatdb..\n" #format database for blast cmdstring="formatdb -i blast_db/tmp_genome_fasta -p %s" %(formatdb_type) os.system(cmdstring) #blast search cmdstring="blastall -p %s -i tmp_seq -m 9 -o tmp_out -d blast_db/tmp_genome_fasta -e %s" % (params['blast_program'], params['e-value']) os.system(cmdstring) os.remove('tmp_seq') #extract the blast output res=script_util.extract_blast_output('tmp_out') os.remove('tmp_out') res1=json.loads(res) print "finished" returnVal = res1 #END blast_against_genome # At some point might do deeper type checking... if not isinstance(returnVal, list): raise ValueError('Method blast_against_genome return value ' + 'returnVal is not type list as required.') # return the results return [returnVal] def compare_genome_groups(self, ctx, input): # ctx is the context object # return variables are: returnVal #BEGIN compare_genome_groups #END compare_genome_groups # At some point might do deeper type checking... if not isinstance(returnVal, basestring): raise ValueError('Method compare_genome_groups return value ' + 'returnVal is not type basestring as required.') # return the results return [returnVal]
pranjan77/genome_util
lib/biokbase/genome_util/KBaseGenomeUtilImpl.py
Python
mit
4,192
[ "BLAST" ]
c5a3f3bb7b8979be04367044b2a196bac2cd1a7050828b86be3937fe71ccedcf
from flask import Blueprint, make_response, render_template from octopus.core import app blueprint = Blueprint('configjs', __name__) # this allows us to serve our standard javascript config @blueprint.route("/javascript/config.js") def javascript_config(): configs = {} for key, val in app.config.iteritems(): if key.startswith("CLIENTJS_"): nk = key[9:].lower() configs[nk] = val resp = make_response(render_template("js/config.js.jinja", configs=configs)) resp.mimetype = "application/javascript" return resp
JiscPER/magnificent-octopus
octopus/modules/clientjs/configjs.py
Python
apache-2.0
566
[ "Octopus" ]
f3eb6d8998e71179d76750bd2279acbcf374baf43e53feb0232f418b48c929e9
#!/usr/bin/env python # -*- coding: utf-8 -*- __all__ = [] from molecules import Molecule def plot_maya( mol, key = lambda x: (x[0].r, x[1].r, x[2].r), copy = True ): """Wrapper function to plot quivers of beta For Residue at1 is the first atom to center around at2 is the atom which will lie in the z axis together with at1 at3 will be projected to lie in the zx-axis """ assert isinstance( mol, Molecule ) from mayavi import mlab if copy: copy = mol.copy() else: copy = mol copy.populate_bonds() p1, p2, p3 = key( copy ) v, t1, t2, t3 = utilz.center_and_xz( p1, p2, p3 ) # plotting section copy.translate_by_r( v ) copy.inv_rotate( t1, t2, t3 ) x, y, z = utilz.E_at_sphere(r_points=5) bv = utilz.b_at_sphere( utilz.ut2s(copy.p.b) , x, y, z ) mlab.figure(figure=None, bgcolor=(1,1,1), fgcolor=None, engine=None, size=(400, 350)) mlab.quiver3d( x, y, z, bv[...,0], bv[...,1], bv[...,2], colormap = 'BrBG' ) #Plot bonds for each in copy.bond_dict: for key in copy.bond_dict[ each ]: mlab.plot3d( [key.x, each.x], [key.y, each.y], [key.z, each.z], color = (0,0,0,) ) for i in copy: mlab.points3d([i.x], [i.y], [i.z], color = color_dict[ i.element ], resolution = 50, scale_factor = scale_factor_dict[ i.element] )
fishstamp82/moltools
moltools/plot_functions.py
Python
mit
1,470
[ "Mayavi" ]
f2d60d61eabd55c5274c1b3e0670b0a95f96d0770486c63bbedaf4d94dbb4a99
#!/usr/bin/python # # CCLib_proxy Utilities # Copyright (c) 2014 Ioannis Charalampidis # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import print_function from cclib import hexdump, getOptions, openCCDebugger import sys # Get serial port either form environment or from arguments opts = getOptions("Generic CCDebugger CPU Resume Tool") # Open debugger try: dbg = openCCDebugger(opts['port'], enterDebug=opts['enter']) except Exception as e: print("ERROR: %s" % str(e)) sys.exit(1) # Check if we are already outside the debug mode if (dbg.debugStatus & 0x20) == 0: print("CPU Already running") sys.exit(0) # Exit debug mode & resume CPU print("Exiting DEBUG mode...") dbg.exit() if (dbg.debugStatus & 0x20) == 0: print("CPU is now running") else: print("ERROR: Could not exit from debug mode") # Done print("")
wavesoft/CCLib
Python/cc_resume.py
Python
gpl-3.0
1,430
[ "cclib" ]
5ad00fe5793757b0f02bebd6713e94891657ac200b2547e31610c011ca391441
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2019 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # import pickle from . import dependency_check from qcelemental import constants from psi4.driver import psifiles as psif from psi4.driver.molutil import * from psi4.driver.inputparser import process_input from psi4.driver.p4util.util import * from psi4.driver.p4util.testing import * from psi4.driver.p4util.fcidump import * from psi4.driver.p4util.text import * from psi4.driver.qmmm import QMMM from psi4.driver.plugin import * from psi4.driver import gaussian_n from psi4.driver import aliases from psi4.driver import diatomic from psi4.driver import wrapper_database from psi4.driver import wrapper_autofrag from psi4.driver import schema_wrapper from psi4.driver import schema_wrapper as json_wrapper # Deprecate in 1.4 from psi4.driver import frac from psi4.driver.driver import * # Single functions from psi4.driver.driver_cbs import cbs from psi4.driver.p4util.python_helpers import set_options, set_module_options, pcm_helper, basis_helper
CDSherrill/psi4
psi4/driver/__init__.py
Python
lgpl-3.0
1,868
[ "Psi4" ]
1b838261ad5dd47fafbc3fc989db8332443e6c206470590595ec14e3f38ba0af
# coding: utf-8 # # Copyright 2014 NAMD-EMAP-FGV # # This file is part of PyPLN. You can get more information at: http://pypln.org/. # # PyPLN is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyPLN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PyPLN. If not, see <http://www.gnu.org/licenses/>. import base64 import string from StringIO import StringIO import numpy import nltk from wordcloud import make_wordcloud from pypln.backend.celery_task import PyPLNTask def filter_stopwords(fdist, lang): long_name = {'en': 'english', 'pt': 'portuguese'} stopwords = list(string.punctuation) if lang in long_name: stopwords += nltk.corpus.stopwords.words(long_name[lang]) return filter(lambda pair: pair[0].lower() not in stopwords, fdist) class WordCloud(PyPLNTask): def process(self, document): fdist = filter_stopwords(document['freqdist'], document['language']) words = numpy.array([t[0] for t in fdist]) counts = numpy.array([t[1] for t in fdist]) wordcloud_img = make_wordcloud(words, counts) fd = StringIO() wordcloud_img.save(fd, format="PNG") fd.seek(0) result = {'wordcloud': base64.b64encode(fd.read())} fd.close() return result
NAMD/pypln.backend
pypln/backend/workers/word_cloud.py
Python
gpl-3.0
1,702
[ "NAMD" ]
212d243eb11c69b7f9a3aad04ba63ab47a2e4be0d0edb6855268c5fac7be904c
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import unittest from pymatgen.analysis.hhi import HHIModel class HHIModelTest(unittest.TestCase): def test_hhi(self): hhi = HHIModel() self.assertEqual(hhi.get_hhi("He"), (3200, 3900)) self.assertEqual(hhi.get_hhi_production("He"), 3200) self.assertEqual(hhi.get_hhi_reserve("He"), 3900) self.assertAlmostEqual(hhi.get_hhi_production("Li2O"), 1614.96, 1) self.assertAlmostEqual(hhi.get_hhi_reserve("Li2O"), 2218.90, 1) self.assertEqual(hhi.get_hhi_designation(1400), "low") self.assertEqual(hhi.get_hhi_designation(1800), "medium") self.assertEqual(hhi.get_hhi_designation(3000), "high") self.assertEqual(hhi.get_hhi_designation(None), None) if __name__ == "__main__": unittest.main()
vorwerkc/pymatgen
pymatgen/analysis/tests/test_hhi.py
Python
mit
874
[ "pymatgen" ]
5245d3d0cddcd9dd6e74a893342ea5f33a5a8ca214fe9696abac3d85034666f3
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import numpy as np from six import StringIO from skbio.stats.distance import DistanceMatrix from skbio.tree import TreeNode def nj(dm, disallow_negative_branch_length=True, result_constructor=None): """ Apply neighbor joining for phylogenetic reconstruction. Parameters ---------- dm : skbio.DistanceMatrix Input distance matrix containing distances between OTUs. disallow_negative_branch_length : bool, optional Neighbor joining can result in negative branch lengths, which don't make sense in an evolutionary context. If `True`, negative branch lengths will be returned as zero, a common strategy for handling this issue that was proposed by the original developers of the algorithm. result_constructor : function, optional Function to apply to construct the result object. This must take a newick-formatted string as input. The result of applying this function to a newick-formatted string will be returned from this function. This defaults to ``lambda x: TreeNode.read(StringIO(x), format='newick')``. Returns ------- TreeNode By default, the result object is a `TreeNode`, though this can be overridden by passing `result_constructor`. See Also -------- TreeNode.root_at_midpoint Notes ----- Neighbor joining was initially described in Saitou and Nei (1987) [1]_. The example presented here is derived from the Wikipedia page on neighbor joining [2]_. The Phylip manual also describes the method [3]_ and Phylip itself provides an implementation which is useful for comparison. Neighbor joining, by definition, creates unrooted trees. One strategy for rooting the resulting trees is midpoint rooting, which is accessible as ``TreeNode.root_at_midpoint``. References ---------- .. [1] Saitou N, and Nei M. (1987) "The neighbor-joining method: a new method for reconstructing phylogenetic trees." Molecular Biology and Evolution. PMID: 3447015. .. [2] http://en.wikipedia.org/wiki/Neighbour_joining .. [3] http://evolution.genetics.washington.edu/phylip/doc/neighbor.html Examples -------- Define a new distance matrix object describing the distances between five OTUs: a, b, c, d, and e. >>> from skbio import DistanceMatrix >>> from skbio.tree import nj >>> data = [[0, 5, 9, 9, 8], ... [5, 0, 10, 10, 9], ... [9, 10, 0, 8, 7], ... [9, 10, 8, 0, 3], ... [8, 9, 7, 3, 0]] >>> ids = list('abcde') >>> dm = DistanceMatrix(data, ids) Contstruct the neighbor joining tree representing the relationship between those OTUs. This is returned as a TreeNode object. >>> tree = nj(dm) >>> print(tree.ascii_art()) /-d | | /-c |---------| ---------| | /-b | \--------| | \-a | \-e Again, construct the neighbor joining tree, but instead return the newick string representing the tree, rather than the TreeNode object. (Note that in this example the string output is truncated when printed to facilitate rendering.) >>> newick_str = nj(dm, result_constructor=str) >>> print(newick_str[:55], "...") (d:2.000000, (c:4.000000, (b:3.000000, a:2.000000):3.00 ... """ if dm.shape[0] < 3: raise ValueError( "Distance matrix must be at least 3x3 to " "generate a neighbor joining tree.") if result_constructor is None: def result_constructor(x): return TreeNode.read(StringIO(x), format='newick') # initialize variables node_definition = None # while there are still more than three distances in the distance matrix, # join neighboring nodes. while(dm.shape[0] > 3): # compute the Q matrix q = _compute_q(dm) # identify the pair of nodes that have the lowest Q value. if multiple # pairs have equally low Q values, the first pair identified (closest # to the top-left of the matrix) will be chosen. these will be joined # in the current node. idx1, idx2 = _lowest_index(q) pair_member_1 = dm.ids[idx1] pair_member_2 = dm.ids[idx2] # determine the distance of each node to the new node connecting them. pair_member_1_len, pair_member_2_len = _pair_members_to_new_node( dm, idx1, idx2, disallow_negative_branch_length) # define the new node in newick style node_definition = "(%s:%f, %s:%f)" % (pair_member_1, pair_member_1_len, pair_member_2, pair_member_2_len) # compute the new distance matrix, which will contain distances of all # other nodes to this new node dm = _compute_collapsed_dm( dm, pair_member_1, pair_member_2, disallow_negative_branch_length=disallow_negative_branch_length, new_node_id=node_definition) # When there are three distances left in the distance matrix, we have a # fully defined tree. The last node is internal, and its distances are # defined by these last three values. # First determine the distance between the last two nodes to be joined in # a pair... pair_member_1 = dm.ids[1] pair_member_2 = dm.ids[2] pair_member_1_len, pair_member_2_len = \ _pair_members_to_new_node(dm, pair_member_1, pair_member_2, disallow_negative_branch_length) # ...then determine their distance to the other remaining node, but first # handle the trival case where the input dm was only 3 x 3 node_definition = node_definition or dm.ids[0] internal_len = _otu_to_new_node( dm, pair_member_1, pair_member_2, node_definition, disallow_negative_branch_length=disallow_negative_branch_length) # ...and finally create the newick string describing the whole tree. newick = "(%s:%f, %s:%f, %s:%f);" % (pair_member_1, pair_member_1_len, node_definition, internal_len, pair_member_2, pair_member_2_len) # package the result as requested by the user and return it. return result_constructor(newick) def _compute_q(dm): """Compute Q matrix, used to identify the next pair of nodes to join. """ q = np.zeros(dm.shape) n = dm.shape[0] for i in range(n): for j in range(i): q[i, j] = q[j, i] = \ ((n - 2) * dm[i, j]) - dm[i].sum() - dm[j].sum() return DistanceMatrix(q, dm.ids) def _compute_collapsed_dm(dm, i, j, disallow_negative_branch_length, new_node_id): """Return the distance matrix resulting from joining ids i and j in a node. If the input distance matrix has shape ``(n, n)``, the result will have shape ``(n-1, n-1)`` as the ids `i` and `j` are collapsed to a single new ids. """ in_n = dm.shape[0] out_n = in_n - 1 out_ids = [new_node_id] out_ids.extend([e for e in dm.ids if e not in (i, j)]) result = np.zeros((out_n, out_n)) for idx1, out_id1 in enumerate(out_ids[1:]): result[0, idx1 + 1] = result[idx1 + 1, 0] = _otu_to_new_node( dm, i, j, out_id1, disallow_negative_branch_length) for idx2, out_id2 in enumerate(out_ids[1:idx1+1]): result[idx1+1, idx2+1] = result[idx2+1, idx1+1] = \ dm[out_id1, out_id2] return DistanceMatrix(result, out_ids) def _lowest_index(dm): """Return the index of the lowest value in the input distance matrix. If there are ties for the lowest value, the index of top-left most occurrence of that value will be returned. This should be ultimately be replaced with a new DistanceMatrix object method (#228). """ lowest_value = np.inf for i in range(dm.shape[0]): for j in range(i): curr_index = i, j curr_value = dm[curr_index] if curr_value < lowest_value: lowest_value = curr_value result = curr_index return result def _otu_to_new_node(dm, i, j, k, disallow_negative_branch_length): """Return the distance between a new node and some other node. Parameters ---------- dm : skbio.DistanceMatrix The input distance matrix. i, j : str Identifiers of entries in the distance matrix to be collapsed. These get collapsed to a new node, internally represented as `u`. k : str Identifier of the entry in the distance matrix for which distance to `u` will be computed. disallow_negative_branch_length : bool Neighbor joining can result in negative branch lengths, which don't make sense in an evolutionary context. If `True`, negative branch lengths will be returned as zero, a common strategy for handling this issue that was proposed by the original developers of the algorithm. """ k_to_u = 0.5 * (dm[i, k] + dm[j, k] - dm[i, j]) if disallow_negative_branch_length and k_to_u < 0: k_to_u = 0 return k_to_u def _pair_members_to_new_node(dm, i, j, disallow_negative_branch_length): """Return the distance between a new node and decendants of that new node. Parameters ---------- dm : skbio.DistanceMatrix The input distance matrix. i, j : str Identifiers of entries in the distance matrix to be collapsed (i.e., the descendents of the new node, which is internally represented as `u`). disallow_negative_branch_length : bool Neighbor joining can result in negative branch lengths, which don't make sense in an evolutionary context. If `True`, negative branch lengths will be returned as zero, a common strategy for handling this issue that was proposed by the original developers of the algorithm. """ n = dm.shape[0] i_to_j = dm[i, j] i_to_u = (0.5 * i_to_j) + ((dm[i].sum() - dm[j].sum()) / (2 * (n - 2))) if disallow_negative_branch_length and i_to_u < 0: i_to_u = 0 j_to_u = i_to_j - i_to_u if disallow_negative_branch_length and j_to_u < 0: j_to_u = 0 return i_to_u, j_to_u
jensreeder/scikit-bio
skbio/tree/_nj.py
Python
bsd-3-clause
10,946
[ "scikit-bio" ]
7fa8f2485f6767dd5ac0171b9542e9a8635989c3c9255b575df4f10d824cf5b9
# Copyright (C) 2009, Thomas Leonard # See the README file for details, or visit http://0install.net. import gtk, pango from zeroinstall import _, translation from zeroinstall.support import tasks, pretty_size from zeroinstall.injector import model, reader, download from zeroinstall.gui import properties from zeroinstall.gtkui.icon import load_icon from logging import warning, info from zeroinstall.gui import utils from zeroinstall.gui.gui import gobject ngettext = translation.ngettext ICON_SIZE = 20.0 CELL_TEXT_INDENT = int(ICON_SIZE) + 4 def get_tooltip_text(mainwindow, details, main_feed, model_column): interface = details['interface'] if model_column == InterfaceBrowser.INTERFACE_NAME: return _("Full name: %s") % interface elif model_column == InterfaceBrowser.SUMMARY: if main_feed is None or not main_feed.description: return _("(no description available)") first_para = main_feed.description.split('\n\n', 1)[0] return first_para.replace('\n', ' ') elif model_column is None: return _("Click here for more options...") version = details.get('version', None) if version is None: return _("No suitable version was found. Double-click " "here to find out why.") if model_column == InterfaceBrowser.VERSION: return details['version-tip'] assert model_column == InterfaceBrowser.DOWNLOAD_SIZE return details["fetch-tip"] import math angle_right = math.pi / 2 class MenuIconRenderer(gtk.GenericCellRenderer): def __init__(self): gtk.GenericCellRenderer.__init__(self) self.set_property('mode', gtk.CELL_RENDERER_MODE_ACTIVATABLE) def do_set_property(self, prop, value): setattr(self, prop.name, value) def do_get_size(self, widget, cell_area, layout = None): return (0, 0, 20, 20) on_get_size = do_get_size # GTK 2 if gtk.pygtk_version >= (2, 90): # note: if you get "TypeError: Couldn't find conversion for foreign struct 'cairo.Context'", you need "python3-gi-cairo" def do_render(self, cr, widget, background_area, cell_area, flags): # GTK 3 context = widget.get_style_context() gtk.render_arrow(context, cr, angle_right, cell_area.x + 5, cell_area.y + 5, max(cell_area.width, cell_area.height) - 10) else: def on_render(self, window, widget, background_area, cell_area, expose_area, flags): # GTK 2 if flags & gtk.CELL_RENDERER_PRELIT: state = gtk.STATE_PRELIGHT else: state = gtk.STATE_NORMAL widget.style.paint_box(window, state, gtk.SHADOW_OUT, expose_area, widget, None, cell_area.x, cell_area.y, cell_area.width, cell_area.height) widget.style.paint_arrow(window, state, gtk.SHADOW_NONE, expose_area, widget, None, gtk.ARROW_RIGHT, True, cell_area.x + 5, cell_area.y + 5, cell_area.width - 10, cell_area.height - 10) class IconAndTextRenderer(gtk.GenericCellRenderer): __gproperties__ = { "image": (gobject.TYPE_PYOBJECT, "Image", "Image", gobject.PARAM_READWRITE), "text": (gobject.TYPE_STRING, "Text", "Text", "-", gobject.PARAM_READWRITE), } def do_set_property(self, prop, value): setattr(self, prop.name, value) def do_get_size(self, widget, cell_area, layout = None): if not layout: layout = widget.create_pango_layout(self.text) a, rect = layout.get_pixel_extents() if self.image: pixmap_height = self.image.get_height() else: pixmap_height = 32 if not isinstance(rect, tuple): rect = (rect.x, rect.y, rect.width, rect.height) # GTK 3 both_height = max(rect[1] + rect[3], pixmap_height) return (0, 0, rect[0] + rect[2] + CELL_TEXT_INDENT, both_height) on_get_size = do_get_size # GTK 2 if gtk.pygtk_version >= (2, 90): def do_render(self, cr, widget, background_area, cell_area, flags): # GTK 3 if self.image is None: return layout = widget.create_pango_layout(self.text) a, rect = layout.get_pixel_extents() context = widget.get_style_context() image_y = int(0.5 * (cell_area.height - self.image.get_height())) gtk.render_icon(context, cr, self.image, cell_area.x, cell_area.y + image_y) text_y = int(0.5 * (cell_area.height - (rect.y + rect.height))) gtk.render_layout(context, cr, cell_area.x + CELL_TEXT_INDENT, cell_area.y + text_y, layout) else: def on_render(self, window, widget, background_area, cell_area, expose_area, flags): # GTK 2 layout = widget.create_pango_layout(self.text) a, rect = layout.get_pixel_extents() if flags & gtk.CELL_RENDERER_SELECTED: state = gtk.STATE_SELECTED elif flags & gtk.CELL_RENDERER_PRELIT: state = gtk.STATE_PRELIGHT else: state = gtk.STATE_NORMAL image_y = int(0.5 * (cell_area.height - self.image.get_height())) window.draw_pixbuf(widget.style.white_gc, self.image, 0, 0, cell_area.x, cell_area.y + image_y) text_y = int(0.5 * (cell_area.height - (rect[1] + rect[3]))) widget.style.paint_layout(window, state, True, expose_area, widget, "cellrenderertext", cell_area.x + CELL_TEXT_INDENT, cell_area.y + text_y, layout) if gtk.pygtk_version < (2, 8, 0): # Note sure exactly which versions need this. # 2.8.0 gives a warning if you include it, though. gobject.type_register(IconAndTextRenderer) gobject.type_register(MenuIconRenderer) def walk(model, it): while it: yield it for x in walk(model, model.iter_children(it)): yield x it = model.iter_next(it) class InterfaceBrowser(object): model = None root = None cached_icon = None driver = None config = None update_icons = False implementations = None # Interface URI -> Implementation DETAILS = 0 INTERFACE_NAME = 1 VERSION = 2 SUMMARY = 3 DOWNLOAD_SIZE = 4 ICON = 5 BACKGROUND = 6 PROBLEM = 7 columns = [(_('Component'), INTERFACE_NAME), (_('Version'), VERSION), (_('Fetch'), DOWNLOAD_SIZE), (_('Description'), SUMMARY), ('', None)] def __init__(self, driver, widgets): self.driver = driver self.config = driver.config self.implementations = {} tree_view = widgets.get_widget('components') tree_view.set_property('has-tooltip', True) def callback(widget, x, y, keyboard_mode, tooltip): x, y = tree_view.convert_widget_to_bin_window_coords(x, y) pos = tree_view.get_path_at_pos(x, y) if pos: tree_view.set_tooltip_cell(tooltip, pos[0], pos[1], None) path = pos[0] try: col_index = column_objects.index(pos[1]) except ValueError: return False else: col = self.columns[col_index][1] row = self.model[path] details = row[InterfaceBrowser.DETAILS] iface = details['interface'] main_feed = self.config.iface_cache.get_feed(iface) tooltip.set_text(get_tooltip_text(self, details, main_feed, col)) return True else: return False tree_view.connect('query-tooltip', callback) self.cached_icon = {} # URI -> GdkPixbuf self.default_icon = tree_view.get_style().lookup_icon_set(gtk.STOCK_EXECUTE).render_icon(tree_view.get_style(), gtk.TEXT_DIR_NONE, gtk.STATE_NORMAL, gtk.ICON_SIZE_SMALL_TOOLBAR, tree_view, None) self.model = gtk.TreeStore(object, str, str, str, str, gobject.TYPE_PYOBJECT, str, bool) self.tree_view = tree_view tree_view.set_model(self.model) column_objects = [] text = gtk.CellRendererText() coloured_text = gtk.CellRendererText() for name, model_column in self.columns: if model_column == InterfaceBrowser.INTERFACE_NAME: column = gtk.TreeViewColumn(name, IconAndTextRenderer(), text = model_column, image = InterfaceBrowser.ICON) elif model_column == None: menu_column = column = gtk.TreeViewColumn('', MenuIconRenderer()) else: if model_column == InterfaceBrowser.SUMMARY: text_ellip = gtk.CellRendererText() try: text_ellip.set_property('ellipsize', pango.ELLIPSIZE_END) except: pass column = gtk.TreeViewColumn(name, text_ellip, text = model_column) column.set_expand(True) elif model_column == InterfaceBrowser.VERSION: column = gtk.TreeViewColumn(name, coloured_text, text = model_column, background = InterfaceBrowser.BACKGROUND) else: column = gtk.TreeViewColumn(name, text, text = model_column) tree_view.append_column(column) column_objects.append(column) tree_view.set_enable_search(True) selection = tree_view.get_selection() def button_press(tree_view, bev): pos = tree_view.get_path_at_pos(int(bev.x), int(bev.y)) if not pos: return False path, col, x, y = pos if (bev.button == 3 or (bev.button < 4 and col is menu_column)) \ and bev.type == gtk.gdk.BUTTON_PRESS: selection.select_path(path) iface = self.model[path][InterfaceBrowser.DETAILS]['interface'] self.show_popup_menu(iface, bev) return True if bev.button != 1 or bev.type != gtk.gdk._2BUTTON_PRESS: return False details = self.model[path][InterfaceBrowser.DETAILS] iface_uri = details['interface'] iface = self.config.iface_cache.get_interface(iface_uri) properties.edit(driver, iface, self.compile, show_versions = True) tree_view.connect('button-press-event', button_press) tree_view.connect('destroy', lambda s: driver.watchers.remove(self.build_tree)) driver.watchers.append(self.build_tree) def set_root(self, root): assert isinstance(root, model.Interface) self.root = root def set_update_icons(self, update_icons): if update_icons: # Clear icons cache to make sure they're really updated self.cached_icon = {} self.update_icons = update_icons def get_icon(self, iface_uri): """Get an icon for this interface. If the icon is in the cache, use that. If not, start a download. If we already started a download (successful or not) do nothing. Returns None if no icon is currently available.""" try: # Try the in-memory cache return self.cached_icon[iface_uri] except KeyError: # Try the on-disk cache iface = self.config.iface_cache.get_interface(iface_uri) iconpath = self.config.iface_cache.get_icon_path(iface) if iconpath: icon = load_icon(iconpath, ICON_SIZE, ICON_SIZE) # (if icon is None, cache the fact that we can't load it) self.cached_icon[iface.uri] = icon else: icon = None # Download a new icon if we don't have one, or if the # user did a 'Refresh' if iconpath is None or self.update_icons: if self.config.network_use == model.network_offline: fetcher = None else: fetcher = self.config.fetcher.download_icon(iface) if fetcher: if iface.uri not in self.cached_icon: self.cached_icon[iface.uri] = None # Only try once @tasks.async def update_display(): yield fetcher try: tasks.check(fetcher) # Try to insert new icon into the cache # If it fails, we'll be left with None in the cached_icon so # we don't try again. iconpath = self.config.iface_cache.get_icon_path(iface) if iconpath: self.cached_icon[iface.uri] = load_icon(iconpath, ICON_SIZE, ICON_SIZE) self.build_tree() else: warning("Failed to download icon for '%s'", iface) except download.DownloadAborted as ex: info("Icon download aborted: %s", ex) # Don't report further; the user knows they cancelled except download.DownloadError as ex: warning("Icon download failed: %s", ex) # Not worth showing a dialog box for this except Exception as ex: import traceback traceback.print_exc() self.config.handler.report_error(ex) update_display() # elif fetcher is None: don't store anything in cached_icon # Note: if no icon is available for downloading, # more attempts are made later. # It can happen that no icon is yet available because # the interface was not downloaded yet, in which case # it's desireable to try again once the interface is available return icon return None def build_tree(self): self.implementations = {} self.model.clear() def add_node(parent, details): iter = self.model.append(parent) iface = details['interface'] self.model[iter][InterfaceBrowser.DETAILS] = details self.model[iter][InterfaceBrowser.INTERFACE_NAME] = details["name"] self.model[iter][InterfaceBrowser.SUMMARY] = details["summary"] self.model[iter][InterfaceBrowser.ICON] = self.get_icon(iface) or self.default_icon problem = details["type"] == "problem" self.model[iter][InterfaceBrowser.PROBLEM] = problem if problem: self.model[iter][InterfaceBrowser.VERSION] = '(problem)' self.model[iter][InterfaceBrowser.DOWNLOAD_SIZE] = '' else: impl = utils.get_impl(self.config, details) if impl: self.implementations[iface] = impl if details["type"] == "selected": self.model[iter][InterfaceBrowser.VERSION] = details["version"] self.model[iter][InterfaceBrowser.DOWNLOAD_SIZE] = details["fetch"] for child in details["children"]: add_node(iter, child) else: self.model[iter][InterfaceBrowser.VERSION] = _('(problem)') if details["type"] == "problem" else _('(none)') try: add_node(None, self.driver.tree) self.tree_view.expand_all() except Exception as ex: warning("Failed to build tree: %s", ex, exc_info = ex) raise def show_popup_menu(self, iface_uri, bev): from zeroinstall.gui import bugs iface = self.config.iface_cache.get_interface(iface_uri) have_source = properties.have_source_for(self.config, iface) global menu # Fix GC problem in PyGObject menu = gtk.Menu() for label, cb in [(_('Show Feeds'), lambda: properties.edit(self.driver, iface, self.compile)), (_('Show Versions'), lambda: properties.edit(self.driver, iface, self.compile, show_versions = True)), (_('Report a Bug...'), lambda: bugs.report_bug(self.driver, iface))]: item = gtk.MenuItem() item.set_label(label) if cb: item.connect('activate', lambda item, cb=cb: cb()) else: item.set_sensitive(False) item.show() menu.append(item) item = gtk.MenuItem() item.set_label(_('Compile')) item.show() menu.append(item) if have_source: compile_menu = gtk.Menu() item.set_submenu(compile_menu) item = gtk.MenuItem() item.set_label(_('Automatic')) item.connect('activate', lambda item: self.compile(iface, autocompile = True)) item.show() compile_menu.append(item) item = gtk.MenuItem() item.set_label(_('Manual...')) item.connect('activate', lambda item: self.compile(iface, autocompile = False)) item.show() compile_menu.append(item) else: item.set_sensitive(False) if gtk.pygtk_version >= (2, 90): menu.popup(None, None, None, None, bev.button, bev.time) else: menu.popup(None, None, None, bev.button, bev.time) def compile(self, interface, autocompile = True): from zeroinstall.gui import compile def on_success(): # A new local feed may have been registered, so reload it from the disk cache info(_("0compile command completed successfully. Reloading interface details.")) reader.update_from_cache(interface) for feed in interface.extra_feeds: self.config.iface_cache.get_feed(feed.uri, force = True) from zeroinstall.gui import main main.recalculate() compile.compile(on_success, interface.uri, autocompile = autocompile) def update_download_status(self, only_update_visible = False): """Called at regular intervals while there are downloads in progress, and once at the end. Also called when things are added to the store. Update the TreeView with the interfaces.""" # A download may be for a feed, an interface or an implementation. # Create the reverse mapping (item -> download) hints = {} for dl in self.config.handler.monitored_downloads: if dl.hint: if dl.hint not in hints: hints[dl.hint] = [] hints[dl.hint].append(dl) # Only update currently visible rows if only_update_visible and self.tree_view.get_visible_range() != None: firstVisiblePath, lastVisiblePath = self.tree_view.get_visible_range() firstVisibleIter = self.model.get_iter(firstVisiblePath) else: # (or should we just wait until the TreeView has settled enough to tell # us what is visible?) firstVisibleIter = self.model.get_iter_root() lastVisiblePath = None iface_cache = self.config.iface_cache for it in walk(self.model, firstVisibleIter): row = self.model[it] iface = iface_cache.get_interface(row[InterfaceBrowser.DETAILS]['interface']) # Is this interface the download's hint? downloads = hints.get(iface, []) # The interface itself downloads += hints.get(iface.uri, []) # The main feed for feed in iface_cache.get_feed_imports(iface): downloads += hints.get(feed.uri, []) # Other feeds sel = self.implementations.get(iface.uri, None) if sel: downloads += hints.get(sel, []) # The chosen implementation if downloads: so_far = 0 expected = None for dl in downloads: if dl.expected_size: expected = (expected or 0) + dl.expected_size so_far += dl.get_bytes_downloaded_so_far() if expected: summary = ngettext("(downloading %(downloaded)s/%(expected)s [%(percentage).2f%%])", "(downloading %(downloaded)s/%(expected)s [%(percentage).2f%%] in %(number)d downloads)", downloads) values_dict = {'downloaded': pretty_size(so_far), 'expected': pretty_size(expected), 'percentage': 100 * so_far / float(expected), 'number': len(downloads)} else: summary = ngettext("(downloading %(downloaded)s/unknown)", "(downloading %(downloaded)s/unknown in %(number)d downloads)", downloads) values_dict = {'downloaded': pretty_size(so_far), 'number': len(downloads)} row[InterfaceBrowser.SUMMARY] = summary % values_dict else: feed = iface_cache.get_feed(iface.uri) details = row[InterfaceBrowser.DETAILS] if 'id' in details: impl = utils.get_impl(self.config, details) else: impl = None row[InterfaceBrowser.DOWNLOAD_SIZE] = utils.get_fetch_info(self.config, impl) row[InterfaceBrowser.SUMMARY] = feed.summary if feed else "-" if self.model.get_path(it) == lastVisiblePath: break def highlight_problems(self): """Called when the solve finishes. Highlight any missing implementations.""" for it in walk(self.model, self.model.get_iter_root()): row = self.model[it] if row[InterfaceBrowser.PROBLEM]: row[InterfaceBrowser.BACKGROUND] = '#f88'
linuxmidhun/0install
zeroinstall/gui/iface_browser.py
Python
lgpl-2.1
18,312
[ "VisIt" ]
a5bd1f345269e071bdff139efba1119e7a98a1a478d0e5870adbeac87e02d093
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test gluon.probability with HybridBlock.hybrid_forward api """ import mxnet as mx import numpy as _np from mxnet import np, npx, autograd from mxnet import gluon import mxnet.gluon.probability as mgp from mxnet.gluon.probability import StochasticBlock, StochasticSequential from mxnet.gluon import HybridBlock from mxnet.test_utils import use_np, assert_almost_equal from numpy.testing import assert_array_equal import pytest import scipy.stats as ss import scipy.special as scipy_special import itertools from numbers import Number def prob_to_logit(prob): return np.log(prob) - np.log1p(-prob) def _distribution_method_invoker(dist, func, *args): """Wrapper for invoking different types of class methods with one unified interface. Parameters ---------- dist : Distribution func : method """ if (len(args) == 0): out = getattr(dist, func) if callable(out): return out() else: return out return getattr(dist, func)(*args) def test_mgp_getF_v1(): # Test getF getF = mgp.utils.getF nd = mx.nd sym = mx.sym assert getF(nd.ones((2, 2)), nd.ones((2, 2))) == nd assert getF(sym.ones((2, 2)), sym.ones((2, 2))) == sym assert getF(1.0, 2.0) == nd # Test exception with pytest.raises(TypeError): getF(nd.ones((2, 2)), sym.ones((2, 2))) getF(sym.ones((2, 2)), nd.ones((2, 2))) @use_np def test_gluon_uniform_v1(): class TestUniform(HybridBlock): def __init__(self, func): super(TestUniform, self).__init__() self._func = func def hybrid_forward(self, F, low, high, *args): uniform = mgp.Uniform(low, high, validate_args=True) return _distribution_method_invoker(uniform, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): low = np.random.uniform(-1, 1, shape) high = low + np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(low, high) net = TestUniform("log_prob") if hybridize: net.hybridize() for i in range(2): mx_out = net(low, high, samples).asnumpy() np_out = ss.uniform(low.asnumpy(), (high - low).asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): low = np.random.uniform(-1, 1, shape) high = low + np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(low, high) net = TestUniform("cdf") if hybridize: net.hybridize() mx_out = net(low, high, samples).asnumpy() np_out = ss.uniform(low.asnumpy(), (high - low).asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): low = np.random.uniform(-1, 1, shape) high = low + np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape) net = TestUniform("icdf") if hybridize: net.hybridize() mx_out = net(low, high, samples).asnumpy() np_out = ss.uniform(low.asnumpy(), (high - low).asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize in itertools.product(shapes, [True, False]): low = np.random.uniform(-1, 1, shape) high = low + np.random.uniform(0.5, 1.5, shape) net = TestUniform("entropy") if hybridize: net.hybridize() mx_out = net(low, high).asnumpy() np_out = ss.uniform(low.asnumpy(), (high - low).asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_normal_v1(): class TestNormal(HybridBlock): def __init__(self, func): super(TestNormal, self).__init__() self._func = func def hybrid_forward(self, F, loc, scale, *args): normal = mgp.Normal(loc, scale, validate_args=True) return _distribution_method_invoker(normal, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.normal(size=shape) net = TestNormal("log_prob") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.norm(loc.asnumpy(), scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.normal(size=shape) net = TestNormal("cdf") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.norm(loc.asnumpy(), scale.asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape) net = TestNormal("icdf") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.norm(loc.asnumpy(), scale.asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) net = TestNormal("entropy") if hybridize: net.hybridize() mx_out = net(loc, scale).asnumpy() np_out = ss.norm(loc.asnumpy(), scale.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_laplace_v1(): class TestLaplace(HybridBlock): def __init__(self, func): super(TestLaplace, self).__init__() self._func = func def hybrid_forward(self, F, loc, scale, *args): laplace = mgp.Laplace(loc, scale, validate_args=True) return _distribution_method_invoker(laplace, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.laplace(size=shape) net = TestLaplace("log_prob") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.laplace(loc.asnumpy(), scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.laplace(size=shape) net = TestLaplace("cdf") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.laplace(loc.asnumpy(), scale.asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape) net = TestLaplace("icdf") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.laplace(loc.asnumpy(), scale.asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) net = TestLaplace("entropy") if hybridize: net.hybridize() mx_out = net(loc, scale).asnumpy() np_out = ss.laplace(loc.asnumpy(), scale.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_cauchy_v1(): class TestCauchy(HybridBlock): def __init__(self, func): self._func = func super(TestCauchy, self).__init__() def hybrid_forward(self, F, loc, scale, *args): cauchy = mgp.Cauchy(loc, scale, F, validate_args=True) return _distribution_method_invoker(cauchy, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test sampling for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.normal(size=shape) net = TestCauchy("sample") if hybridize: net.hybridize() mx_out = net(loc, scale) desired_shape = (shape,) if isinstance(shape, Number) else shape assert mx_out.shape == desired_shape # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.normal(size=shape) net = TestCauchy("log_prob") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.cauchy(loc.asnumpy(), scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.normal(size=shape) net = TestCauchy("cdf") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.cauchy(loc.asnumpy(), scale.asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape, low=1e-4, high=1.0-1e-4) net = TestCauchy("icdf") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.cauchy(loc.asnumpy(), scale.asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) net = TestCauchy("entropy") if hybridize: net.hybridize() mx_out = net(loc, scale).asnumpy() np_out = ss.cauchy(loc.asnumpy(), scale.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_half_cauchy_v1(): class TestHalfCauchy(HybridBlock): def __init__(self, func): super(TestHalfCauchy, self).__init__() self._func = func def hybrid_forward(self, F, scale, *args): half_normal = mgp.HalfCauchy(scale, F, validate_args=True) return getattr(half_normal, self._func)(*args) shapes = [(), (1,), (2, 3), 6] # Test sampling for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) net = TestHalfCauchy("sample") if hybridize: net.hybridize() mx_out = net(scale).asnumpy() if isinstance(shape, Number): shape = (shape,) assert mx_out.shape == shape # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.abs(np.random.normal(size=shape)) net = TestHalfCauchy("log_prob") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.halfcauchy(0, scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.abs(np.random.normal(size=shape)) net = TestHalfCauchy("cdf") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.halfcauchy(0, scale.asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape, high=1.0-1e-4) net = TestHalfCauchy("icdf") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.halfcauchy(0, scale.asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_poisson_v1(): class TestPoisson(HybridBlock): def __init__(self, func): self._func = func super(TestPoisson, self).__init__() def hybrid_forward(self, F, rate, *args): poisson = mgp.Poisson(rate, F, validate_args=True) return _distribution_method_invoker(poisson, self._func, *args) shapes = [(1,), (2, 3), 6] # Test sampling for shape, hybridize in itertools.product(shapes, [False]): rate = np.random.uniform(0.5, 1.5, shape) net = TestPoisson("sample") if hybridize: net.hybridize() mx_out = net(rate).asnumpy() assert mx_out.shape == rate.shape # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): rate = np.random.uniform(0.5, 1.5, shape) samples = np.random.randint(0, 5, shape).astype('float') net = TestPoisson("log_prob") if hybridize: net.hybridize() mx_out = net(rate, samples).asnumpy() np_out = ss.poisson(mu=rate.asnumpy()).logpmf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_geometric_v1(): class TestGeometric(HybridBlock): def __init__(self, func, is_logit=False): super(TestGeometric, self).__init__() self._is_logit = is_logit self._func = func def hybrid_forward(self, F, params, *args): dist = mgp.Geometric(logit=params, validate_args=True) if self._is_logit else \ mgp.Geometric(prob=params, validate_args=True) return _distribution_method_invoker(dist, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): prob = np.random.uniform(size=shape) sample = np.random.randint(0, 10, size=shape).astype('float32') param = prob if use_logit: param = prob_to_logit(param) net = TestGeometric("log_prob", use_logit) if hybridize: net.hybridize() mx_out = net(param, sample).asnumpy() np_out = ss.geom.logpmf(sample.asnumpy() + 1, prob.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test variance for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): prob = np.random.uniform(size=shape) param = prob if use_logit: param = prob_to_logit(param) net = TestGeometric("variance", use_logit) if hybridize: net.hybridize() mx_out = net(param).asnumpy() np_out = ss.geom(prob.asnumpy()).var() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): # Add lower bound constraint, otherwise scipy would raise warning. prob = np.random.uniform(low=0.1, size=shape) param = prob if use_logit: param = prob_to_logit(param) net = TestGeometric("entropy", use_logit) if hybridize: net.hybridize() mx_out = net(param).asnumpy() np_out = ss.geom(prob.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_negative_binomial_v1(): class TestNegativeBinomial(HybridBlock): def __init__(self, func, is_logit=False): super(TestNegativeBinomial, self).__init__() self._is_logit = is_logit self._func = func def hybrid_forward(self, F, n, params, *args): dist = mgp.NegativeBinomial(n=n, logit=params, validate_args=True) if self._is_logit else \ mgp.NegativeBinomial(n=n, prob=params, validate_args=True) return _distribution_method_invoker(dist, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): n = np.random.randint(1, 10, size=shape).astype('float32') prob = np.random.uniform(low=0.2, high=0.6, size=shape).astype('float32') sample = np.random.randint(0, 10, size=shape).astype('float32') param = prob if use_logit: param = prob_to_logit(param) net = TestNegativeBinomial("log_prob", use_logit) if hybridize: net.hybridize() mx_out = net(n, param, sample).asnumpy() np_out = ss.nbinom(n=n.asnumpy(), p=prob.asnumpy() ).logpmf(sample.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test mean and variance for shape, hybridize in itertools.product(shapes, [True, False]): for func in ['mean', 'variance']: for use_logit in [True, False]: n = np.random.randint(1, 10, size=shape).astype('float32') prob = np.random.uniform(low=0.2, high=0.6, size=shape).astype('float32') net = TestNegativeBinomial(func, use_logit) param = prob if use_logit: param = prob_to_logit(param) if hybridize: net.hybridize() mx_out = net(n, param).asnumpy() ss_nbinom = ss.nbinom(n=n.asnumpy(), p=1 - prob.asnumpy()) if func == 'mean': np_out = ss_nbinom.mean() else: np_out = ss_nbinom.var() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_exponential_v1(): class TestExponential(HybridBlock): def __init__(self, func): self._func = func super(TestExponential, self).__init__() def hybrid_forward(self, F, scale, *args): exponential = mgp.Exponential(scale, F, validate_args=True) return _distribution_method_invoker(exponential, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(0.2, 1.2, size=shape) net = TestExponential("log_prob") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.expon(scale=scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(0.2, 1.2, size=shape) net = TestExponential("cdf") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.expon(scale=scale.asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(0.0, 1.0, size=shape) net = TestExponential("icdf") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.expon(scale=scale.asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) net = TestExponential("entropy") if hybridize: net.hybridize() mx_out = net(scale).asnumpy() np_out = ss.expon(scale=scale.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_weibull_v1(): class TestWeibull(HybridBlock): def __init__(self, func): super(TestWeibull, self).__init__() self._func = func def hybrid_forward(self, F, concentration, scale, *args): weibull = mgp.Weibull(concentration, scale, F, validate_args=True) return _distribution_method_invoker(weibull, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): concentration = np.random.uniform(size=shape) scale = np.random.uniform(size=shape) samples = np.random.uniform(size=shape) net = TestWeibull("log_prob") if hybridize: net.hybridize() mx_out = net(concentration, scale, samples).asnumpy() np_out = ss.weibull_min(c=concentration.asnumpy( ), scale=scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): concentration = np.random.uniform(size=shape) scale = np.random.uniform(size=shape) samples = np.random.uniform(size=shape) net = TestWeibull("cdf") if hybridize: net.hybridize() mx_out = net(concentration, scale, samples).asnumpy() np_out = ss.weibull_min(c=concentration.asnumpy( ), scale=scale.asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): concentration = np.random.uniform(size=shape) scale = np.random.uniform(size=shape) samples = np.random.uniform(size=shape) net = TestWeibull("icdf") if hybridize: net.hybridize() mx_out = net(concentration, scale, samples).asnumpy() np_out = ss.weibull_min(c=concentration.asnumpy( ), scale=scale.asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize in itertools.product(shapes, [True, False]): concentration = np.random.uniform(size=shape) scale = np.random.uniform(size=shape) net = TestWeibull("entropy") if hybridize: net.hybridize() mx_out = net(concentration, scale).asnumpy() np_out = ss.weibull_min(c=concentration.asnumpy(), scale=scale.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_pareto_v1(): class TestPareto(HybridBlock): def __init__(self, func): super(TestPareto, self).__init__() self._func = func def hybrid_forward(self, F, alpha, scale, *args): pareto = mgp.Pareto(alpha, scale, F, validate_args=True) return _distribution_method_invoker(pareto, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): alpha = np.random.uniform(size=shape) scale = np.random.uniform(size=shape) samples = np.random.uniform(1, 2, size=shape) net = TestPareto("log_prob") if hybridize: net.hybridize() mx_out = net(alpha, scale, samples).asnumpy() np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).logpdf( samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): alpha = np.random.uniform(size=shape) scale = np.random.uniform(size=shape) samples = np.random.uniform(1.0, 2.0, size=shape) net = TestPareto("cdf") if hybridize: net.hybridize() mx_out = net(alpha, scale, samples).asnumpy() np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).cdf( samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): alpha = np.random.uniform(size=shape) scale = np.random.uniform(size=shape) samples = np.random.uniform(size=shape) net = TestPareto("icdf") if hybridize: net.hybridize() mx_out = net(alpha, scale, samples).asnumpy() np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).ppf( samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize in itertools.product(shapes, [True, False]): alpha = np.random.uniform(size=shape) scale = np.random.uniform(size=shape) net = TestPareto("entropy") if hybridize: net.hybridize() mx_out = net(alpha, scale).asnumpy() np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_gamma_v1(): class TestGamma(HybridBlock): def __init__(self, func): super(TestGamma, self).__init__() self._func = func def hybrid_forward(self, F, shape, scale, *args): gamma = mgp.Gamma(shape, scale, F, validate_args=True) return _distribution_method_invoker(gamma, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): alpha = np.random.uniform(0.5, 1.5, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape) net = TestGamma("log_prob") if hybridize: net.hybridize() mx_out = net(alpha, scale, samples).asnumpy() np_out = ss.gamma(a=alpha.asnumpy(), loc=0, scale=scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test `mean`, `var` and `entropy` for shape, hybridize in itertools.product(shapes, [True, False]): for func in ['mean', 'variance', 'entropy']: alpha = np.random.uniform(0.5, 1.5, shape) scale = np.random.uniform(0.5, 1.5, shape) net = TestGamma(func) if hybridize: net.hybridize() mx_out = net(alpha, scale).asnumpy() ss_gamma = ss.gamma(a=alpha.asnumpy(), loc=0, scale=scale.asnumpy()) if func == 'mean': np_out = ss_gamma.mean() elif func == 'variance': np_out = ss_gamma.var() else: np_out = ss_gamma.entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_dirichlet_v1(): class TestDirichlet(HybridBlock): def __init__(self, func): super(TestDirichlet, self).__init__() self._func = func def hybrid_forward(self, F, alpha, *args): dirichlet = mgp.Dirichlet(alpha, F, validate_args=True) return _distribution_method_invoker(dirichlet, self._func, *args) event_shapes = [2, 4, 6] batch_shapes = [None, (2, 3)] # Test sampling for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): for hybridize in [True, False]: desired_shape = ( batch_shape if batch_shape is not None else ()) + (event_shape,) alpha = np.random.uniform(1.0, 5.0, size=desired_shape) net = TestDirichlet("sample") if hybridize: net.hybridize() mx_out = net(alpha).asnumpy() # Check shape assert mx_out.shape == desired_shape # Check simplex assert_almost_equal(mx_out.sum(-1), _np.ones_like(mx_out.sum(-1)), atol=1e-4, rtol=1e-3, use_broadcast=False) # Test log_prob # Scipy does not support batch `alpha`, thus we skip multi-dimensional batch_shape case. for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes[:1]): for hybridize in [True, False]: desired_shape = ( batch_shape if batch_shape is not None else ()) + (event_shape,) alpha = np.random.uniform(1.0, 5.0, desired_shape) np_samples = _np.random.dirichlet( [10.0 / event_shape] * event_shape, size=batch_shape) net = TestDirichlet("log_prob") if hybridize: net.hybridize() mx_out = net(alpha, np.array(np_samples)).asnumpy() np_out = ss.dirichlet(alpha=alpha.asnumpy()).logpdf(np_samples) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test `mean`, `var` and `entropy` for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes[:1]): for hybridize in [False]: for func in ['mean', 'variance', 'entropy']: desired_shape = ( batch_shape if batch_shape is not None else ()) + (event_shape,) alpha = np.random.uniform(1.0, 5.0, desired_shape) net = TestDirichlet(func) if hybridize: net.hybridize() mx_out = net(alpha).asnumpy() ss_dir = ss.dirichlet(alpha=alpha.asnumpy()) if func == 'mean': np_out = ss_dir.mean() elif func == 'variance': np_out = ss_dir.var() else: np_out = ss_dir.entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_beta_v1(): class TestBeta(HybridBlock): def __init__(self, func): super(TestBeta, self).__init__() self._func = func def hybrid_forward(self, F, alpha, beta, *args): beta_dist = mgp.Beta(alpha, beta, F, validate_args=True) return _distribution_method_invoker(beta_dist, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): alpha = np.random.uniform(0.5, 1.5, shape) beta = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape) net = TestBeta("log_prob") if hybridize: net.hybridize() mx_out = net(alpha, beta, samples).asnumpy() np_out = ss.beta(alpha.asnumpy(), beta.asnumpy() ).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test `mean`, `var` and `entropy` for shape, hybridize in itertools.product(shapes, [True, False]): for func in ['mean', 'variance', 'entropy']: alpha = np.random.uniform(0.5, 1.5, shape) beta = np.random.uniform(0.5, 1.5, shape) net = TestBeta(func) if hybridize: net.hybridize() mx_out = net(alpha, beta).asnumpy() ss_beta = ss.beta(alpha.asnumpy(), beta.asnumpy()) if func == 'mean': np_out = ss_beta.mean() elif func == 'variance': np_out = ss_beta.var() else: np_out = ss_beta.entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_fisher_snedecor_v1(): class TestFisherSnedecor(HybridBlock): def __init__(self, func): super(TestFisherSnedecor, self).__init__() self._func = func def hybrid_forward(self, F, df1, df2, *args): beta_dist = mgp.FisherSnedecor(df1, df2, F, validate_args=True) return _distribution_method_invoker(beta_dist, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): df1 = np.random.uniform(0.5, 1.5, shape) df2 = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape) net = TestFisherSnedecor("log_prob") if hybridize: net.hybridize() mx_out = net(df1, df2, samples).asnumpy() np_out = ss.f(dfn=df1.asnumpy(), dfd=df2.asnumpy() ).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test `mean` and `var` for shape, hybridize in itertools.product(shapes, [True, False]): for func in ['mean', 'variance']: df1 = np.random.uniform(0.5, 1.5, shape) df2 = np.random.uniform(4.0, 6.0, shape) net = TestFisherSnedecor(func) if hybridize: net.hybridize() mx_out = net(df1, df2).asnumpy() ss_f = ss.f(dfn=df1.asnumpy(), dfd=df2.asnumpy()) if func == 'mean': np_out = ss_f.mean() else: np_out = ss_f.var() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_student_t_v1(): class TestT(HybridBlock): def __init__(self, func): super(TestT, self).__init__() self._func = func def hybrid_forward(self, F, df, loc, scale, *args): t_dist = mgp.StudentT(df, loc, scale, F, validate_args=True) return _distribution_method_invoker(t_dist, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.zeros(shape) scale = np.random.uniform(0.5, 1.5, shape) df = np.random.uniform(2, 4, shape) samples = np.random.uniform(0, 4, size=shape) net = TestT("log_prob") if hybridize: net.hybridize() mx_out = net(df, loc, scale, samples).asnumpy() np_out = ss.t(loc=0, scale=scale.asnumpy(), df=df.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test `mean`, `var` and `entropy` for shape, hybridize in itertools.product(shapes, [False, True]): for func in ['mean', 'variance', 'entropy']: loc = np.zeros(shape) scale = np.random.uniform(0.5, 1.5, shape) df = np.random.uniform(3, 4, shape) net = TestT(func) if hybridize: net.hybridize() mx_out = net(df, loc, scale).asnumpy() ss_f = ss.t(loc=0, scale=scale.asnumpy(), df=df.asnumpy()) if func == 'mean': np_out = ss_f.mean() elif func == 'variance': np_out = ss_f.var() else: np_out = ss_f.entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_gumbel_v1(): class TestGumbel(HybridBlock): def __init__(self, func): super(TestGumbel, self).__init__() self._func = func def hybrid_forward(self, F, loc, scale, *args): normal = mgp.Gumbel(loc, scale, F, validate_args=True) return getattr(normal, self._func)(*args) shapes = [(), (1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.normal(size=shape) net = TestGumbel("log_prob") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.gumbel_r(loc=loc.asnumpy(), scale=scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.normal(size=shape) net = TestGumbel("cdf") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.gumbel_r(loc.asnumpy(), scale.asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape) net = TestGumbel("icdf") if hybridize: net.hybridize() mx_out = net(loc, scale, samples).asnumpy() np_out = ss.gumbel_r(loc.asnumpy(), scale.asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) scale = np.random.uniform(0.5, 1.5, shape) net = TestGumbel("entropy") if hybridize: net.hybridize() mx_out = net(loc, scale).asnumpy() np_out = ss.gumbel_r(loc.asnumpy(), scale.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_multinomial_v1(): class TestMultinomial(HybridBlock): def __init__(self, func, num_events, total_count, is_logit, batch_shape=None, sample_shape=None): super(TestMultinomial, self).__init__() self._num_events = num_events self._total_count = total_count self._is_logit = is_logit self._func = func self._batch_shape = batch_shape self._sample_shape = sample_shape def hybrid_forward(self, F, params, *args): multinomial = ( mgp.Multinomial(self._num_events, logit=params, total_count=self._total_count, validate_args=True) if self._is_logit else mgp.Multinomial(self._num_events, prob=params, total_count=self._total_count, validate_args=True) ) if self._func == 'sample': return multinomial.sample(self._batch_shape) if self._func == 'sample_n': return multinomial.sample_n(self._sample_shape) return _distribution_method_invoker(multinomial, self._func, *args) def one_hot(a, num_classes): return np.identity(num_classes)[a] event_shapes = [2, 5, 10] batch_shapes = [None, (2, 3)] # , (4, 0, 5)] sample_shapes = [None, (2,), (3, 4)] # Test sampling for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) param = prob if use_logit: param = np.log(param) net = TestMultinomial("sample", event_shape, _np.random.randint(1, 5), use_logit, batch_shape) if hybridize: net.hybridize() mx_out = net(param).asnumpy() desired_shape = batch_shape if batch_shape is not None else () assert mx_out.shape == desired_shape + (event_shape,) # Test sample_n for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) param = prob if use_logit: param = np.log(param) net = TestMultinomial("sample_n", event_shape, _np.random.randint(1, 5), use_logit, batch_shape, sample_shape) if hybridize: net.hybridize() mx_out = net(param).asnumpy() sample_shape = () if sample_shape is None else sample_shape desired_shape = sample_shape + \ (batch_shape if batch_shape is not None else ()) assert mx_out.shape == desired_shape + (event_shape,) # Test log_prob for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes): for use_logit, hybridize in itertools.product([True, False], [False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) eps = _np.finfo('float32').eps prob = np.clip(prob, eps, 1 - eps) param = prob sample_shape = () if sample_shape is None else sample_shape desired_shape = sample_shape + \ (batch_shape if batch_shape is not None else ()) samples = np.random.choice(event_shape, size=desired_shape) samples = one_hot(samples, event_shape) if use_logit: param = np.log(param) net = TestMultinomial("log_prob", event_shape, _np.random.randint(1, 5), use_logit) if hybridize: net.hybridize() mx_out = net(param, samples).asnumpy() # Check shape assert mx_out.shape == desired_shape @use_np def test_gluon_binomial_v1(): class TestBinomial(HybridBlock): def __init__(self, func, is_logit=False, n=1): super(TestBinomial, self).__init__() self._is_logit = is_logit self._func = func self._n = n def hybrid_forward(self, F, params, *args): dist = mgp.Binomial(n=self._n, logit=params, validate_args=True) \ if self._is_logit else \ mgp.Binomial(n=self._n, prob=params, validate_args=True) return _distribution_method_invoker(dist, self._func, *args) shapes = [(), (1,), (2, 3), 6] # Test sampling for shape, hybridize in itertools.product(shapes, [True, False]): for use_logit in [True, False]: n = _np.random.randint(5, 10) prob = np.random.uniform(low=0.1, size=shape) net = TestBinomial('sample', use_logit, n=float(n)) param = prob if use_logit: param = prob_to_logit(param) if hybridize: net.hybridize() mx_out = net(param).asnumpy() desired_shape = (shape,) if isinstance(shape, int) else shape assert mx_out.shape == desired_shape # Test sample_n prefix_shape = (2, 3) for shape in shapes: n = _np.random.randint(5, 10) prob = np.random.uniform(low=0.1, size=shape) dist = mgp.Binomial(n=n, prob=prob) samples = dist.sample_n(prefix_shape) assert samples.shape == (prefix_shape + prob.shape) # Test log_prob for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): n = _np.random.randint(5, 10) prob = np.random.uniform(low=0.1, size=shape) sample = np.random.randint(0, n, size=shape).astype('float32') param = prob if use_logit: param = prob_to_logit(param) net = TestBinomial("log_prob", use_logit, n=float(n)) if hybridize: net.hybridize() mx_out = net(param, sample).asnumpy() np_out = ss.binom(n=n, p=prob.asnumpy()).logpmf(sample.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test mean and variance for shape, hybridize in itertools.product(shapes, [True, False]): for func in ['mean', 'variance']: for use_logit in [True, False]: n = _np.random.randint(5, 10) prob = np.random.uniform(low=0.1, size=shape) net = TestBinomial(func, use_logit, n=float(n)) param = prob if use_logit: param = prob_to_logit(param) if hybridize: net.hybridize() mx_out = net(param).asnumpy() ss_binom = ss.binom(n=n, p=prob.asnumpy()) if func == 'mean': np_out = ss_binom.mean() else: np_out = ss_binom.var() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np @pytest.mark.flaky def test_gluon_bernoulli_v1(): class TestBernoulli(HybridBlock): def __init__(self, func, is_logit=False): super(TestBernoulli, self).__init__() self._is_logit = is_logit self._func = func def hybrid_forward(self, F, params, *args): bernoulli = mgp.Bernoulli(logit=params, validate_args=True) if self._is_logit else \ mgp.Bernoulli(prob=params, validate_args=True) return _distribution_method_invoker(bernoulli, self._func, *args) # Test log_prob shapes = [(), (1,), (2, 3), 6] for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): prob = np.random.uniform(size=shape) sample = npx.random.bernoulli(prob=0.5, size=shape) param = prob if use_logit: param = prob_to_logit(param) net = TestBernoulli("log_prob", use_logit) if hybridize: net.hybridize() mx_out = net(param, sample).asnumpy() np_out = _np.log(ss.bernoulli.pmf(sample.asnumpy(), prob.asnumpy())) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test variance for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): prob = np.random.uniform(size=shape) sample = npx.random.bernoulli(prob=0.5, size=shape) param = prob if use_logit: param = prob_to_logit(param) net = TestBernoulli("variance", use_logit) if hybridize: net.hybridize() mx_out = net(param).asnumpy() np_out = ss.bernoulli(prob.asnumpy()).var() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): prob = np.random.uniform(size=shape) sample = npx.random.bernoulli(prob=0.5, size=shape) param = prob if use_logit: param = prob_to_logit(param) net = TestBernoulli("entropy", use_logit) if hybridize: net.hybridize() mx_out = net(param).asnumpy() np_out = ss.bernoulli(prob.asnumpy()).entropy() assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_relaxed_bernoulli_v1(): class TestRelaxedBernoulli(HybridBlock): def __init__(self, func, is_logit=False): super(TestRelaxedBernoulli, self).__init__() self._is_logit = is_logit self._func = func def hybrid_forward(self, F, params, *args): relaxed_bernoulli = mgp.RelaxedBernoulli(T=1.0, logit=params, validate_args=True)\ if self._is_logit else \ mgp.RelaxedBernoulli(T=1.0, prob=params, validate_args=True) if self._func == "sample": return relaxed_bernoulli.sample() return _distribution_method_invoker(relaxed_bernoulli, self._func, *args) def prob_to_logit(prob): return np.log(prob) - np.log1p(-prob) shapes = [(), (1,), (2, 3), 6] # Test sampling for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): prob = np.random.uniform(size=shape) param = prob if use_logit: param = prob_to_logit(param) param.attach_grad() net = TestRelaxedBernoulli("sample", use_logit) if hybridize: net.hybridize() with autograd.record(): mx_out = net(param) mx_out.backward() desired_shape = (shape,) if isinstance(shape, int) else shape assert param.grad.shape == desired_shape for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]): prob = np.random.uniform(size=shape) sample = np.random.uniform(0.1, 0.9, size=shape) param = prob if use_logit: param = prob_to_logit(param) net = TestRelaxedBernoulli("log_prob", use_logit) if hybridize: net.hybridize() mx_out = net(param, sample).asnumpy() desired_shape = (shape,) if isinstance(shape, int) else shape assert mx_out.shape == desired_shape @use_np def test_gluon_categorical_v1(): class TestCategorical(HybridBlock): def __init__(self, func, is_logit=False, batch_shape=None, num_events=None, sample_shape=None): super(TestCategorical, self).__init__() self._is_logit = is_logit self._func = func self._batch_shape = batch_shape self._num_events = num_events self._sample_shape = sample_shape def hybrid_forward(self, F, params, *args): categorical = mgp.Categorical(self._num_events, logit=params, validate_args=True)\ if self._is_logit else \ mgp.Categorical(self._num_events, prob=params, validate_args=True) if self._func == "sample": return categorical.sample(self._batch_shape) if self._func == "sample_n": return categorical.sample_n(self._sample_shape) return _distribution_method_invoker(categorical, self._func, *args) event_shapes = [2, 5, 10] batch_shapes = [None, (2, 3)] # , (4, 0, 5)] sample_shapes = [(), (2,), (3, 4)] # Test sampling for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) param = prob.astype('float32') if use_logit: param = np.log(param) net = TestCategorical("sample", use_logit, batch_shape, event_shape) if hybridize: net.hybridize() mx_out = net(param).asnumpy() desired_shape = batch_shape if batch_shape is not None else () assert mx_out.shape == desired_shape # Test sample_n for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) param = prob.astype('float32') if use_logit: param = np.log(param) net = TestCategorical("sample_n", is_logit=use_logit, batch_shape=batch_shape, num_events=event_shape, sample_shape=sample_shape ) if hybridize: net.hybridize() mx_out = net(param).asnumpy() desired_shape = sample_shape + \ (batch_shape if batch_shape is not None else ()) assert mx_out.shape == desired_shape # Test log_prob for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) eps = _np.finfo('float32').eps prob = np.clip(prob, eps, 1 - eps) param = prob.astype('float32') desired_shape = sample_shape + \ (batch_shape if batch_shape is not None else ()) samples = np.random.choice(event_shape, size=desired_shape) if use_logit: param = np.log(param) net = TestCategorical("log_prob", use_logit, batch_shape, event_shape) if hybridize: net.hybridize() mx_out = net(param, samples) # Check shape assert mx_out.shape == desired_shape # Check value log_pmf, indices = np.broadcast_arrays( np.log(prob), np.expand_dims(samples, -1)) if indices.ndim >= 1: indices = indices[..., :1] expect_log_prob = _np.take_along_axis( log_pmf, indices.astype('int'), axis=-1).asnumpy() assert_almost_equal(mx_out.asnumpy(), expect_log_prob.squeeze(), atol=1e-4, rtol=1e-3, use_broadcast=False) # Test enumerate_support for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) param = prob.astype('float32') if use_logit: param = np.log(param) net = TestCategorical("enumerate_support", use_logit, batch_shape, event_shape) if hybridize: net.hybridize() mx_out = net(param).asnumpy() desired_shape = (event_shape,) + \ (batch_shape if batch_shape is not None else ()) assert mx_out.shape == desired_shape @use_np def test_gluon_one_hot_categorical_v1(): def one_hot(a, num_classes): return np.identity(num_classes)[a] class TestOneHotCategorical(HybridBlock): def __init__(self, func, is_logit=False, batch_shape=None, num_events=None): super(TestOneHotCategorical, self).__init__() self._is_logit = is_logit self._func = func self._batch_shape = batch_shape self._num_events = num_events def hybrid_forward(self, F, params, *args): categorical = mgp.OneHotCategorical(num_events=self._num_events, logit=params) \ if self._is_logit else \ mgp.OneHotCategorical(num_events=self._num_events, prob=params) if self._func == "sample": return categorical.sample(self._batch_shape) return _distribution_method_invoker(categorical, self._func, *args) event_shapes = [2, 5, 10] batch_shapes = [None, (2, 3)] # , (4, 0, 5)] sample_shapes = [(), (2,), (3, 4)] # Test sampling for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) param = prob if use_logit: param = np.log(param) net = TestOneHotCategorical( "sample", use_logit, batch_shape, event_shape) if hybridize: net.hybridize() mx_out = net(param).asnumpy() desired_shape = batch_shape if batch_shape is not None else () assert mx_out.shape == desired_shape + (event_shape,) # Test log_prob for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) eps = _np.finfo('float32').eps prob = np.clip(prob, eps, 1 - eps) param = prob desired_shape = sample_shape + \ (batch_shape if batch_shape is not None else ()) samples = np.random.choice(event_shape, size=desired_shape) samples = one_hot(samples, event_shape) if use_logit: param = np.log(param) net = TestOneHotCategorical( "log_prob", use_logit, batch_shape, event_shape) if hybridize: net.hybridize() mx_out = net(param, samples) # Check shape assert mx_out.shape == desired_shape # Test enumerate support for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) param = prob if use_logit: param = np.log(param) net = TestOneHotCategorical( "enumerate_support", use_logit, batch_shape, event_shape) if hybridize: net.hybridize() mx_out = net(param).asnumpy() desired_shape = batch_shape if batch_shape is not None else () assert mx_out.shape == (event_shape,) + \ desired_shape + (event_shape,) @use_np def test_relaxed_one_hot_categorical_v1(): class TestRelaxedOneHotCategorical(HybridBlock): def __init__(self, func, is_logit=False, batch_shape=None, num_events=None): super(TestRelaxedOneHotCategorical, self).__init__() self._is_logit = is_logit self._func = func self._batch_shape = batch_shape self._num_events = num_events def hybrid_forward(self, F, params, *args): categorical = mgp.RelaxedOneHotCategorical(T=1.0, num_events=self._num_events, logit=params) \ if self._is_logit else \ mgp.RelaxedOneHotCategorical( T=1.0, num_events=self._num_events, prob=params) if self._func == "sample": return categorical.sample(self._batch_shape) return _distribution_method_invoker(categorical, self._func, *args) event_shapes = [2, 5, 10] batch_shapes = [None, (2, 3)] # , (4, 0, 5)] sample_shapes = [(), (2,), (3, 4)] # Test sampling for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): for use_logit, hybridize in itertools.product([True, False], [True, False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) prob = prob.astype('float32') param = prob if use_logit: param = np.log(param) param.attach_grad() net = TestRelaxedOneHotCategorical( "sample", use_logit, batch_shape, event_shape) if hybridize: net.hybridize() with autograd.record(): mx_out = net(param) mx_out.backward() desired_shape = batch_shape if batch_shape is not None else () assert mx_out.shape == desired_shape + (event_shape,) assert param.grad.shape == param.shape # Test log_prob for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes): for use_logit, hybridize in itertools.product([True, False], [False]): prob = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=batch_shape)) eps = _np.finfo('float32').eps prob = np.clip(prob, eps, 1 - eps) param = prob desired_shape = sample_shape + \ (batch_shape if batch_shape is not None else ()) # Samples from a Relaxed One-hot Categorical lie on a simplex. samples = np.array(_np.random.dirichlet( [1 / event_shape] * event_shape, size=desired_shape)) if use_logit: param = np.log(param) net = TestRelaxedOneHotCategorical( "log_prob", use_logit, batch_shape, event_shape) if hybridize: net.hybridize() mx_out = net(param, samples) # Check shape assert mx_out.shape == desired_shape @use_np def test_gluon_mvn_v1(): class TestMVN(HybridBlock): def __init__(self, func, param_type): super(TestMVN, self).__init__() self._func = func # cov, precision or scale_tril self._param_type = param_type def hybrid_forward(self, F, loc, cov, *args): mvn = mgp.MultivariateNormal(loc=loc, **{self._param_type: cov}, validate_args=True) return _distribution_method_invoker(mvn, self._func, *args) def _stable_inv(cov): """ Force the precision matrix to be symmetric. """ precision = np.linalg.inv(cov) precision_t = np.swapaxes(precision, -1, -2) return (precision + precision_t) / 2 event_shapes = [3, 5] loc_shapes = [(), (2,), (4, 2)] cov_shapes = [(), (2,), (4, 2)] cov_func = { 'cov': lambda s: s, 'precision': lambda s: _stable_inv(s), 'scale_tril': lambda s: np.linalg.cholesky(s) } # Test sampling for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes): for cov_type in cov_func.keys(): for hybridize in [False]: loc = np.random.randn(*(loc_shape + (event_shape,))) _s = np.random.randn(*(cov_shape + (event_shape, event_shape))) loc.attach_grad() _s.attach_grad() # Full covariance matrix sigma = np.matmul(_s, np.swapaxes( _s, -1, -2)) + np.eye(event_shape) cov_param = cov_func[cov_type](sigma) net = TestMVN('sample', cov_type) if hybridize: net.hybridize() with autograd.record(): mx_out = net(loc, cov_param) desired_shape = (loc + sigma[..., 0]).shape assert mx_out.shape == desired_shape mx_out.backward() assert loc.grad.shape == loc.shape assert _s.grad.shape == _s.shape # Test log_prob for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes): for cov_type in cov_func.keys(): for hybridize in [True, False]: loc = np.random.randn(*(loc_shape + (event_shape,))) _s = np.random.randn(*(cov_shape + (event_shape, event_shape))) samples = np.random.normal( np.zeros_like(loc), np.ones_like(_s[..., 0])) loc.attach_grad() _s.attach_grad() # Full covariance matrix sigma = np.matmul(_s, np.swapaxes( _s, -1, -2)) + np.eye(event_shape) cov_param = cov_func[cov_type](sigma) net = TestMVN('log_prob', cov_type) if hybridize: net.hybridize() mx_out = net(loc, cov_param, samples) assert mx_out.shape == samples.shape[:-1] if mx_out.shape == (): mx_out_t = mx_out.asnumpy() else: mx_out_t = mx_out.flatten()[0].asnumpy() samples_t = samples.reshape(-1, event_shape).asnumpy()[0] # Select the first element in the batch, because scipy does not support batching. loc_t = loc.reshape(-1, event_shape)[0].asnumpy() sigma_t = sigma.reshape(-1, event_shape, event_shape)[0].asnumpy() scipy_mvn = ss.multivariate_normal(loc_t, sigma_t) ss_out = scipy_mvn.logpdf(samples_t) assert_almost_equal(mx_out_t, ss_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test entropy for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes): for cov_type in cov_func.keys(): for hybridize in [True, False]: loc = np.random.randn(*(loc_shape + (event_shape,))) _s = np.random.randn(*(cov_shape + (event_shape, event_shape))) loc.attach_grad() _s.attach_grad() # Full covariance matrix sigma = np.matmul(_s, np.swapaxes( _s, -1, -2)) + np.eye(event_shape) cov_param = cov_func[cov_type](sigma) net = TestMVN('entropy', cov_type) if hybridize: net.hybridize() mx_out = net(loc, cov_param) assert mx_out.shape == sigma.shape[:-2] if mx_out.shape == (): mx_out_t = mx_out.asnumpy() else: mx_out_t = mx_out.flatten()[0].asnumpy() # Select the first element in the batch, because scipy does not support batching. loc_t = loc.reshape(-1, event_shape)[0].asnumpy() sigma_t = sigma.reshape(-1, event_shape, event_shape)[0].asnumpy() scipy_mvn = ss.multivariate_normal(loc_t, sigma_t) ss_out = scipy_mvn.entropy() assert_almost_equal(mx_out_t, ss_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_gluon_half_normal_v1(): class TestHalfNormal(HybridBlock): def __init__(self, func): super(TestHalfNormal, self).__init__() self._func = func def hybrid_forward(self, F, scale, *args): half_normal = mgp.HalfNormal(scale, F, validate_args=True) return getattr(half_normal, self._func)(*args) shapes = [(), (1,), (2, 3), 6] # Test sampling for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) net = TestHalfNormal("sample") if hybridize: net.hybridize() mx_out = net(scale).asnumpy() if isinstance(shape, Number): shape = (shape,) assert mx_out.shape == shape # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.abs(np.random.normal(size=shape)) net = TestHalfNormal("log_prob") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.halfnorm(0, scale.asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test cdf for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.abs(np.random.normal(size=shape)) net = TestHalfNormal("cdf") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.halfnorm(0, scale.asnumpy()).cdf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test icdf for shape, hybridize in itertools.product(shapes, [True, False]): scale = np.random.uniform(0.5, 1.5, shape) samples = np.random.uniform(size=shape) net = TestHalfNormal("icdf") if hybridize: net.hybridize() mx_out = net(scale, samples).asnumpy() np_out = ss.halfnorm(0, scale.asnumpy()).ppf(samples.asnumpy()) assert_almost_equal(mx_out, np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_affine_transform_v1(): r""" Test the correctness of affine transformation by performing it on a standard normal, since N(\mu, \sigma^2) = \mu + \sigma * N(0, 1) """ class TestAffineTransform(HybridBlock): def __init__(self, func): super(TestAffineTransform, self).__init__() self._func = func def hybrid_forward(self, F, loc, scale, *args): std_normal = mgp.Normal(F.np.zeros_like(loc), F.np.ones_like(scale), F) transforms = [mgp.AffineTransform(loc=0, scale=scale), mgp.AffineTransform(loc=loc, scale=1)] transformed_normal = mgp.TransformedDistribution( std_normal, transforms) if (len(args) == 0): return getattr(transformed_normal, self._func) return getattr(transformed_normal, self._func)(*args) shapes = [(1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) loc.attach_grad() scale = np.random.uniform(0.5, 1.5, shape) scale.attach_grad() samples = np.random.normal(size=shape) net = TestAffineTransform('log_prob') if hybridize: net.hybridize() with autograd.record(): mx_out = net(loc, scale, samples) np_out = _np.log(ss.norm(loc.asnumpy(), scale.asnumpy()).pdf(samples.asnumpy())) assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) mx_out.backward() loc_expected_grad = ((samples - loc) / scale ** 2).asnumpy() scale_expected_grad = (samples - loc) ** 2 * \ np.power(scale, -3) - (1 / scale) assert_almost_equal(loc.grad.asnumpy(), loc_expected_grad, atol=1e-4, rtol=1e-3, use_broadcast=False) assert_almost_equal(scale.grad.asnumpy(), scale_expected_grad, atol=1e-4, rtol=1e-3, use_broadcast=False) # Test sampling for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) loc.attach_grad() scale = np.random.uniform(0.5, 1.5, shape) scale.attach_grad() if not isinstance(shape, tuple): shape = (shape,) expected_shape = (4, 5) + shape net = TestAffineTransform('sample') mx_out = net(loc, scale, expected_shape).asnumpy() assert mx_out.shape == expected_shape @use_np def test_compose_transform_v1(): class TestComposeTransform(HybridBlock): def __init__(self, func): super(TestComposeTransform, self).__init__() self._func = func def hybrid_forward(self, F, loc, scale, *args): # Generate a log_normal distribution. std_normal = mgp.Normal(F.np.zeros_like(loc), F.np.ones_like(scale), F) transforms = mgp.ComposeTransform([ mgp.AffineTransform(loc=0, scale=scale), mgp.AffineTransform(loc=loc, scale=1), mgp.ExpTransform() ]) transformed_normal = mgp.TransformedDistribution( std_normal, transforms) if (len(args) == 0): return getattr(transformed_normal, self._func) return getattr(transformed_normal, self._func)(*args) shapes = [(1,), (2, 3), 6] # Test log_prob for shape, hybridize in itertools.product(shapes, [True, False]): loc = np.random.uniform(-1, 1, shape) loc.attach_grad() scale = np.random.uniform(0.5, 1.5, shape) scale.attach_grad() samples = np.random.uniform(1, 2, size=shape) net = TestComposeTransform('log_prob') if hybridize: net.hybridize() with autograd.record(): mx_out = net(loc, scale, samples) np_out = ss.lognorm(s=scale.asnumpy(), scale=np.exp( loc).asnumpy()).logpdf(samples.asnumpy()) assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-4, rtol=1e-3, use_broadcast=False) @use_np def test_cached_property_v1(): x = np.random.normal() x.attach_grad() scale = 0.1 class Dummy(object): def __init__(self, x): super(Dummy, self).__init__() self.x = x @mgp.cached_property def y(self): return scale * self.x + 1 with autograd.record(): obj = Dummy(x) obj.y.backward() assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,))) class DummyBlock(HybridBlock): def hybrid_forward(self, F, x): obj = Dummy(x) return obj.y x = np.random.normal() x.attach_grad() net = DummyBlock() with autograd.record(): y = net(x) y.backward() assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,))) x = np.random.normal() x.attach_grad() net.hybridize() with autograd.record(): y = net(x) y.backward() assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,))) @use_np def test_independent_v1(): class TestIndependent(HybridBlock): def __init__(self, event_dim, func): super(TestIndependent, self).__init__() self._event_dim = event_dim self._func = func def hybrid_forward(self, F, logit, *args): base_dist = mgp.Bernoulli(logit=logit) reshaped_dist = mgp.Independent(base_dist, self._event_dim) return getattr(reshaped_dist, self._func)(*args) event_shapes = [(1,), (4,), (2, 2)] batch_shapes = [(2, 3), (2,)] for (batch_shape, event_shape) in itertools.product(batch_shapes, event_shapes): for hybridize in [False, True]: for func in ['log_prob']: full_shape = batch_shape + event_shape logit = np.random.normal(0, 2, size=full_shape) samples = np.round(np.random.uniform(size=full_shape)) net = TestIndependent(len(event_shape), func) if hybridize: net.hybridize() mx_out = net(logit, samples) assert mx_out.shape == batch_shape @use_np def test_gluon_kl_v1(): def _test_zero_kl(p, shape): """Check if KL(p || p) = 0 Parameters ---------- p : Distribution """ mx_out = mgp.kl_divergence(p, p).asnumpy() np_out = _np.zeros(shape) assert_almost_equal(mx_out, np_out, atol=1e-3, rtol=1e-2, use_broadcast=False) def _test_monte_carlo(p, q, M=50000): r"""Check if KL(p || q) is approximately equal to 1/M * \Sum_{i=1}^{M} log(p(x_i) / q(x_i)), x_i ~ p(x) """ kl = mgp.kl_divergence(p, q) mc_approx = mgp.empirical_kl(p, q, M) assert_almost_equal(mc_approx.asnumpy(), kl.asnumpy(), atol=1e-1, rtol=1e-1, use_broadcast=False) def _dist_factory(dist, *param_funcs): """Generate a distribution object with parameters of random value. Parameters ---------- dist : Type A type of distribution. param_funcs : List A list of functions that generate valid parameters for `dist` """ params = [f() if callable(f) else f for f in param_funcs] return dist(*params) # could cause longer runtime and potential flaky tests monte_carlo_test = False repeated_times = 50000 shapes = [(), (1,), (2, 3), 6] # Test kl between same distributions # uniform for shape in shapes: dist = mgp.Uniform def low(): return np.random.uniform(0, 1, shape) def high(): return np.random.uniform(1, 2, shape) _test_zero_kl(_dist_factory(dist, low, high), shape) # normal, laplace, cauchy, gumbel for dist in [mgp.Normal, mgp.Laplace, mgp.Cauchy, mgp.Gumbel]: for shape in shapes: def loc(): return np.random.uniform(-1, 1, shape) def scale(): return np.random.uniform(0.5, 1.5, shape) _test_zero_kl(_dist_factory(dist, loc, scale), shape) if monte_carlo_test: _test_monte_carlo(_dist_factory(dist, loc, scale), _dist_factory(dist, loc, scale), repeated_times) # poisson for shape in shapes[1:]: dist = mgp.Poisson def rate(): return np.random.uniform(0.5, 1.5, shape) _test_zero_kl(_dist_factory(dist, rate), shape) if monte_carlo_test: _test_monte_carlo(_dist_factory(dist, rate), _dist_factory(dist, rate), repeated_times) # exponential, geometric for dist in [mgp.Exponential, mgp.Geometric]: for shape in shapes: def s(): return np.random.uniform(size=shape, low=1e-3) _test_zero_kl(_dist_factory(dist, s), shape) if monte_carlo_test: _test_monte_carlo(_dist_factory(dist, s), _dist_factory(dist, s), repeated_times) # pareto for shape in shapes: dist = mgp.Pareto def alpha(): return np.random.uniform(size=shape) def scale(): return np.random.uniform(size=shape) _test_zero_kl(_dist_factory(dist, alpha, scale), shape) for shape in shapes: dist = mgp.HalfNormal def scale(): return np.random.uniform(0.5, 1.5, shape) _test_zero_kl(_dist_factory(dist, scale), shape) if monte_carlo_test: _test_monte_carlo(_dist_factory(dist, scale), _dist_factory(dist, scale), repeated_times) # gamma, beta for dist in [mgp.Gamma, mgp.Beta]: for shape in shapes: def param1(): return np.random.uniform(0.5, 1.5, shape) def param2(): return np.random.uniform(0.5, 1.5, shape) _test_zero_kl(_dist_factory(dist, param1, param2), shape) if monte_carlo_test: _test_monte_carlo(_dist_factory(dist, param1, param2), _dist_factory(dist, param1, param2), 50000) # binomial for shape in shapes: n = _np.random.randint(5, 10) prob = np.random.uniform(low=0.1, size=shape) dist = mgp.Binomial(n=n, prob=prob) _test_zero_kl(dist, shape) # bernoulli for shape in shapes: prob = np.random.uniform(size=shape) dist = mgp.Bernoulli(prob=prob) _test_zero_kl(dist, shape) event_shapes = [3, 5, 10] loc_shapes = [(), (2,), (4, 2)] cov_shapes = [(), (2,), (4, 2)] for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes): loc = np.random.randn(*(loc_shape + (event_shape,))) _s = np.random.randn(*(cov_shape + (event_shape, event_shape))) sigma = np.matmul(_s, np.swapaxes(_s, -1, -2)) + np.eye(event_shape) dist = mgp.MultivariateNormal(loc, cov=sigma) desired_shape = (loc + sigma[..., 0]).shape[:-1] _test_zero_kl(dist, desired_shape) batch_shapes = loc_shapes # dirichlet for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): desired_shape = (batch_shape if batch_shape is not None else ()) dist = mgp.Dirichlet def alpha(): return np.random.uniform( 0.5, 1.5, size=(desired_shape + (event_shape,))) _test_zero_kl(_dist_factory(dist, alpha), desired_shape) if monte_carlo_test: _test_monte_carlo(_dist_factory(dist, alpha), _dist_factory(dist, alpha), 50000) # categorical, One-hot categorical for dist in [mgp.Categorical, mgp.OneHotCategorical]: for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes): prob = (lambda: np.array(_np.random.dirichlet([1 / event_shape] * event_shape, size=batch_shape))) _test_zero_kl(_dist_factory(dist, event_shape, prob), batch_shape) if monte_carlo_test: _test_monte_carlo(_dist_factory(dist, event_shape, prob), _dist_factory(dist, event_shape, prob), repeated_times) # Test kl between different distributions # KL(Uniform || ...) for shape in shapes: rhs_dists = [ mgp.Normal(np.random.uniform(-1, 1, shape), np.random.uniform(0.5, 1.5, shape)), mgp.Gumbel(np.random.uniform(-1, 1, shape), np.random.uniform(0.5, 1.5, shape)), ] for rhs_dist in rhs_dists: low = np.random.uniform(-1, 1, shape) high = low + np.random.uniform(0.5, 1.5, shape) lhs_dist = mgp.Uniform(low, high) kl = mgp.kl_divergence(lhs_dist, rhs_dist) assert kl.shape == low.shape if monte_carlo_test: _test_monte_carlo(lhs_dist, rhs_dist, repeated_times) # KL(Exponential || ...) for shape in shapes: rhs_dists = [ mgp.Normal(np.random.uniform(-1, 1, shape), np.random.uniform(0.5, 1.5, shape)), mgp.Gumbel(np.random.uniform(-1, 1, shape), np.random.uniform(0.5, 1.5, shape)), mgp.Gamma(np.random.uniform(0.5, 1.5, shape), np.random.uniform(0.5, 1.5, shape)) ] for rhs_dist in rhs_dists: s = np.random.uniform(size=shape) lhs_dist = mgp.Exponential(s) kl = mgp.kl_divergence(lhs_dist, rhs_dist) assert kl.shape == s.shape if monte_carlo_test: _test_monte_carlo(lhs_dist, rhs_dist, repeated_times) @pytest.mark.garbage_expected @use_np def test_gluon_stochastic_block_v1(): class dummyBlock(StochasticBlock): """In this test case, we generate samples from a Gaussian parameterized by `loc` and `scale` and accumulate the KL-divergence between it and its prior and the l2 norm of `loc` into the block's loss storage.""" @StochasticBlock.collectLoss def hybrid_forward(self, F, loc, scale): qz = mgp.Normal(loc, scale) # prior pz = mgp.Normal(F.np.zeros_like(loc), F.np.ones_like(scale)) self.add_loss(mgp.kl_divergence(qz, pz)) self.add_loss((loc ** 2).sum(1)) return qz.sample() shape = (4, 4) for hybridize in [True, False]: net = dummyBlock() if hybridize: net.hybridize() loc = np.random.randn(*shape) scale = np.random.rand(*shape) mx_out = net(loc, scale).asnumpy() kl = net.losses[0].asnumpy() l2_norm = net.losses[1].asnumpy() assert mx_out.shape == loc.shape assert kl.shape == loc.shape assert l2_norm.shape == shape[:-1] @use_np def test_gluon_stochastic_block_exception_v1(): class problemBlock(StochasticBlock): def hybrid_forward(self, F, loc, scale): qz = mgp.Normal(loc, scale) # prior pz = mgp.Normal(F.np.zeros_like(loc), F.np.ones_like(scale)) self.add_loss(mgp.kl_divergence(qz, pz)) self.add_loss((loc ** 2).sum(1)) return qz.sample() shape = (4, 4) for hybridize in [True, False]: net = problemBlock() if hybridize: net.hybridize() loc = np.random.randn(*shape) scale = np.random.rand(*shape) with pytest.raises(ValueError): mx_out = net(loc, scale).asnumpy() @pytest.mark.garbage_expected @use_np def test_gluon_stochastic_sequential_v1(): class normalBlock(HybridBlock): def hybrid_forward(self, F, x): return (x + 1) class stochasticBlock(StochasticBlock): @StochasticBlock.collectLoss def hybrid_forward(self, F, x): self.add_loss(x ** 2) self.add_loss(x - 1) return (x + 1) class problemBlock(StochasticBlock): def hybrid_forward(self, F, x): self.add_loss(x ** 2) self.add_loss(x - 1) return (x + 1) shape = (4, 4) for hybridize in [True, False]: initial_value = np.ones(shape) net = StochasticSequential() net.add(stochasticBlock()) net.add(normalBlock()) net.add(stochasticBlock()) net.add(normalBlock()) if hybridize: net.hybridize() mx_out = net(initial_value).asnumpy() assert_almost_equal(mx_out, _np.ones(shape) * 5) accumulated_loss = net.losses assert len(accumulated_loss) == 2 assert_almost_equal(accumulated_loss[0][0].asnumpy(), _np.ones(shape)) assert_almost_equal( accumulated_loss[0][1].asnumpy(), _np.ones(shape) - 1) assert_almost_equal( accumulated_loss[1][0].asnumpy(), _np.ones(shape) * 9) assert_almost_equal( accumulated_loss[1][1].asnumpy(), _np.ones(shape) + 1) for hybridize in [True, False]: initial_value = np.ones(shape) net = StochasticSequential() net.add(stochasticBlock()) net.add(normalBlock()) net.add(problemBlock()) net.add(normalBlock()) if hybridize: net.hybridize() with pytest.raises(ValueError): mx_out = net(initial_value).asnumpy() @use_np def test_gluon_constraint_v1(): class TestConstraint(HybridBlock): def __init__(self, constraint_type): super(TestConstraint, self).__init__() self._constraint_type = getattr(mgp.constraint, constraint_type) def hybrid_forward(self, F, *params): value = params[0] constraint_param = params[1:] if len(constraint_param) == 0: constraint = self._constraint_type() else: constraint = self._constraint_type(*constraint_param) return constraint.check(value) _s = np.random.randn(5, 10, 10) psd_matrix = np.matmul(_s, np.swapaxes(_s, -1, -2)) + np.eye(_s.shape[-1]) constraints_zoo = [ # (constraint_type, constraint_param, test_samples) ('Real', (), [np.random.randn(2, 2)]), ('Boolean', (), [np.random.randint(0, 20, size=(2, 2)) % 2 == 0]), ('Interval', [np.zeros((2, 2)), np.ones( (2, 2))], [np.random.rand(2, 2)]), ('OpenInterval', [np.zeros((2, 2)), np.ones( (2, 2))], [np.random.rand(2, 2)]), ('HalfOpenInterval', [np.zeros((2, 2)), np.ones((2, 2))], [np.random.rand(2, 2)]), ('IntegerInterval', [np.zeros((2, 2)), np.ones((2, 2)) * 10], [np.random.randint(0, 10, size=(2, 2)).astype('float32')]), ('IntegerOpenInterval', [np.zeros((2, 2)), np.ones((2, 2)) * 10], [np.random.randint(1, 9, size=(2, 2)).astype('float32')]), ('IntegerHalfOpenInterval', [np.zeros((2, 2)), np.ones((2, 2)) * 10], [np.random.randint(1, 9, size=(2, 2)).astype('float32')]), ('GreaterThan', [np.zeros((2, 2))], [np.random.rand(2, 2)]), ('GreaterThanEq', [np.zeros((2, 2))], [np.random.rand(2, 2)]), ('LessThan', [np.ones((2, 2))], [np.random.rand(2, 2)]), ('LessThanEq', [np.ones((2, 2))], [np.random.rand(2, 2)]), ('IntegerGreaterThan', [np.zeros((2, 2))], [np.random.randint(1, 10, size=(2, 2)).astype('float32')]), ('IntegerGreaterThanEq', [np.zeros((2, 2))], [np.random.randint(0, 10, size=(2, 2)).astype('float32')]), ('IntegerLessThan', [np.ones((2, 2)) * 10], [np.random.randint(0, 9, size=(2, 2)).astype('float32')]), ('IntegerLessThanEq', [np.ones((2, 2)) * 10], [np.random.randint(0, 10, size=(2, 2)).astype('float32')]), ('Positive', (), [np.random.rand(2, 2)]), ('NonNegative', (), [np.random.rand(2, 2)]), ('PositiveInteger', (), [np.random.randint( 1, 5, size=(2, 2)).astype('float32')]), ('NonNegativeInteger', (), [np.random.randint( 0, 5, size=(2, 2)).astype('float32')]), ('Simplex', (), [npx.softmax(np.random.randn(4, 4), axis=-1)]), ('LowerTriangular', (), [np.tril(np.random.randn(5, 3, 3))]), ('LowerCholesky', (), [np.linalg.cholesky(psd_matrix)]), ('PositiveDefinite', (), [psd_matrix]), ] for (constraint_type, constraint_arg, test_samples) in constraints_zoo: for hybridize in [True, False]: net = TestConstraint(constraint_type) if hybridize: net.hybridize() for test_sample in test_samples: mx_out = net(test_sample, *constraint_arg).asnumpy() assert_almost_equal(mx_out, test_sample.asnumpy()) @use_np def test_gluon_domain_map_v1(): class TestDomainMap(HybridBlock): def __init__(self, constraint_type, bijective): super(TestDomainMap, self).__init__() self._constraint_type = getattr(mgp.constraint, constraint_type) def hybrid_forward(self, F, *params): value = params[0] constraint_param = params[1:] if len(constraint_param) == 0: constraint = self._constraint_type() else: constraint = self._constraint_type(*constraint_param) if bijective: bijector = mgp.biject_to(constraint) bijector.F = F value = bijector(value) else: transformation = mgp.transform_to(constraint) transformation.F = F value = transformation(value) return (value, constraint.check(value)) constraints_zoo = [ # (constraint_type, constraint_param) ('Positive', ()), ('GreaterThan', [np.random.randn(2, 2)]), ('GreaterThanEq', [np.random.randn(2, 2)]), ('LessThan', [np.random.randn(2, 2)]), ('Interval', [np.random.uniform(0, 1, (2, 2)), np.random.uniform(2, 3, (2, 2))]), ('HalfOpenInterval', [np.random.uniform( 0, 1, (2, 2)), np.random.uniform(2, 3, (2, 2))]) ] test_sample = np.random.randn(2, 2) for (constraint_type, constraint_arg) in constraints_zoo: for bijective in [True, False]: for hybridize in [True, False]: net = TestDomainMap(constraint_type, bijective) if hybridize: net.hybridize() constrained_out, constraint_status = net( test_sample, *constraint_arg) assert_almost_equal(constrained_out.asnumpy(), constraint_status.asnumpy())
leezu/mxnet
tests/python/unittest/test_gluon_probability_v1.py
Python
apache-2.0
97,927
[ "Gaussian" ]
4599ce21c2a4170558d3008816222ce754ba0474bf060cd19fa45f5e988fd957
#Purpose: To implement a suite of 3D shape statistics and to use them for point #cloud classification #TODO: Fill in all of this code for group assignment 2 import sys sys.path.append("S3DGLPy") from Primitives3D import * from PolyMesh import * import numpy as np import matplotlib.pyplot as plt POINTCLOUD_CLASSES = ['biplane', 'desk_chair', 'dining_chair', 'fighter_jet', 'fish', 'flying_bird', 'guitar', 'handgun', 'head', 'helicopter', 'human', 'human_arms_out', 'potted_plant', 'race_car', 'sedan', 'shelves', 'ship', 'sword', 'table', 'vase'] NUM_PER_CLASS = 10 ######################################################### ## UTILITY FUNCTIONS ## ######################################################### #Purpose: Export a sampled point cloud into the JS interactive point cloud viewer #Inputs: Ps (3 x N array of points), Ns (3 x N array of estimated normals), #filename: Output filename def exportPointCloud(Ps, Ns, filename): N = Ps.shape[1] fout = open(filename, "w") fmtstr = "%g" + " %g"*5 + "\n" for i in range(N): fields = np.zeros(6) fields[0:3] = Ps[:, i] fields[3:] = Ns[:, i] fout.write(fmtstr%tuple(fields.flatten().tolist())) fout.close() #Purpose: To sample a point cloud, center it on its centroid, and #then scale all of the points so that the RMS distance to the origin is 1 def samplePointCloud(mesh, N): (Ps, Ns) = mesh.randomlySamplePoints(N) ##TODO: Center the point cloud on its centroid and normalize #by its root mean square distance to the origin. Note that this #does not change the normals at all, only the points, since it's a #uniform scale centroid = np.mean(Ps, 1)[:, None] #return 3 by 1 Ps -= centroid; scale = np.sqrt(np.sum(np.square(Ps))/N) Ps /= scale; return (Ps, Ns) #Purpose: To sample the unit sphere as evenly as possible. The higher #res is, the more samples are taken on the sphere (in an exponential #relationship with res). By default, samples 66 points def getSphereSamples(res = 2): m = getSphereMesh(1, res) return m.VPos.T #Purpose: To compute PCA on a point cloud #Inputs: X (3 x N array representing a point cloud) def doPCA(X): return np.linalg.eigh(X.dot(X.T)) ######################################################### ## SHAPE DESCRIPTORS ## ######################################################### #Purpose: To compute a shape histogram, counting points #distributed in concentric spherical shells centered at the origin #Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here #but passed along for consistency) #NShells (number of shells), RMax (maximum radius) #Returns: hist (histogram of length NShells) def getShapeHistogram(Ps, Ns, NShells, RMax): H = np.sqrt(np.sum(Ps**2, 0))[None, :] - np.linspace(0, RMax, NShells, False)[:, None] S = np.sum((H >= 0).reshape(NShells, Ps.shape[1]), 1) N = np.resize(S[1:], NShells) N[-1] = np.sum(np.sqrt(np.sum(Ps**2, 0)) > RMax) return S-N #Purpose: To create shape histogram with concentric spherical shells and #sectors within each shell, sorted in decreasing order of number of points #Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here #but passed along for consistency), NShells (number of shells), #RMax (maximum radius), SPoints: A 3 x S array of points sampled evenly on #the unit sphere (get these with the function "getSphereSamples") def getShapeShellHistogram(Ps, Ns, NShells, RMax, SPoints): NSectors = SPoints.shape[1] #A number of sectors equal to the number of #points sampled on the sphere #Create a 2D histogram that is NShells x NSectors hist = np.zeros((NShells, NSectors)) bins = np.linspace(0, RMax, NShells, False) indx = np.digitize(np.sqrt(np.sum(np.square(Ps), axis=0)), bins) for i in range(NShells): subList = Ps[:, indx == i] dirList = np.argmax(subList.T.dot(SPoints), axis=1) #across row, size=#SizeOfShell count = np.bincount(dirList) hist[i, :count.shape[0]] = np.sort(count)[::-1] #using double slicing to reverse the sort order return hist.flatten() #Flatten the 2D histogram to a 1D array #alternative approach # raw = SPoints.T.dot(Ps) # sector = np.argmax(raw, axis=0) # dist = np.sqrt(np.sum(np.square(Ps))) # combined = zip(sector, dist) #combine two list into tuples # sorted(combined, key=lambda x: x[0]) #sort the list according to dist value #Purpose: To create shape histogram with concentric spherical shells and to #compute the PCA eigenvalues in each shell #Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here #but passed along for consistency), NShells (number of shells), #RMax (maximum radius), sphereRes: An integer specifying points on thes phere #to be used to cluster shells def getShapeHistogramPCA(Ps, Ns, NShells, RMax): #Create a 2D histogram, with 3 eigenvalues for each shell hist = np.zeros((NShells, 3)) bins = np.linspace(0, RMax, NShells, False) indx = np.digitize(np.sqrt(np.sum(np.square(Ps), axis=0)), bins) for i in range(NShells): sub = Ps[:, indx == i] (eigVs, eigVecs) = doPCA(sub) hist[i, :] = np.sort(eigVs)[::-1] # reverse order, omitting eigs.shape(0) return hist.flatten() #Flatten the 2D histogram to a 1D array #Purpose: To create shape histogram of the pairwise Euclidean distances between #randomly sampled points in the point cloud #Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here #but passed along for consistency), DMax (Maximum distance to consider), #NBins (number of histogram bins), NSamples (number of pairs of points sample #to compute distances) def getD2Histogram(Ps, Ns, DMax, NBins, NSamples): N = Ps.shape[1] S1 = Ps[:, np.random.random_integers(0, N-1, NSamples)] S2 = Ps[:, np.random.random_integers(0, N-1, NSamples)] D2 = np.sqrt(np.sum((S1-S2)**2, 0)) hist, be = np.histogram(D2, NBins, (0, DMax)) return hist #Purpose: To create shape histogram of the angles between randomly sampled #triples of points #Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here #but passed along for consistency), NBins (number of histogram bins), #NSamples (number of triples of points sample to compute angles) def getA3Histogram(Ps, Ns, NBins, NSamples): N = Ps.shape[1] S1 = Ps[:, np.random.random_integers(0, N-1, NSamples)] S2 = Ps[:, np.random.random_integers(0, N-1, NSamples)] S3 = Ps[:, np.random.random_integers(0, N-1, NSamples)] V1 = S1 - S2 L1 = np.sqrt(np.sum(V1**2, 0)) V2 = S1 - S3 L2 = np.sqrt(np.sum(V2**2, 0)) valid = (L1 > 0) * (L2 > 0) V1 = V1[:, valid] / L1[valid] V2 = V2[:, valid] / L2[valid] C = np.sum(V1*V2, 0) D2S = np.sum((V1-V2)**2, 0) C[D2S == 0] = 1 A3 = np.arccos(C) hist, be = np.histogram(A3, NBins, (0, np.pi)) return hist #Purpose: To create the Extended Gaussian Image by binning normals to #sphere directions after rotating the point cloud to align with its principal axes #Inputs: Ps (3 x N point cloud) (use to compute PCA), Ns (3 x N array of normals), #SPoints: A 3 x S array of points sampled evenly on the unit sphere used to #bin the normals def getEGIHistogram(Ps, Ns, SPoints): S = SPoints.shape[1] hist = np.zeros(S) ##TOOD: Finish this; fill in hist return hist #Purpose: To create an image which stores the amalgamation of rotating #a bunch of planes around the largest principal axis of a point cloud and #projecting the points on the minor axes onto the image. #Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals, not needed here), #NAngles: The number of angles between 0 and 2*pi through which to rotate #the plane, Extent: The extent of each axis, Dim: The number of pixels along #each minor axis def getSpinImage(Ps, Ns, NAngles, Extent, Dim): #Create an image eigs, V = doPCA(Ps) P = V[:, :2].T.dot(Ps) As = np.linspace(0, 2*np.pi, NAngles, False) C, S = np.cos(As), np.sin(As) A = np.zeros((NAngles, 2, 2)) A[:, 0, 0], A[:, 0, 1], A[:, 1, 0], A[:, 1, 1] = C, -S, S, C P = A.dot(P) x = P[:, 0, :].flatten() y = P[:, 1, :].flatten() hist, xe, ye = np.histogram2d(x, y, Dim, [[-Extent, Extent], [-Extent, Extent]]) return hist.flatten() #Purpose: To create a histogram of spherical harmonic magnitudes in concentric #spheres after rasterizing the point cloud to a voxel grid #Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals, not used here), #VoxelRes: The number of voxels along each axis (for instance, if 30, then rasterize #to 30x30x30 voxels), Extent: The number of units along each axis (if 2, then #rasterize in the box [-1, 1] x [-1, 1] x [-1, 1]), NHarmonics: The number of spherical #harmonics, NSpheres, the number of concentric spheres to take def getSphericalHarmonicMagnitudes(Ps, Ns, VoxelRes, Extent, NHarmonics, NSpheres): hist = np.zeros((NSpheres, NHarmonics)) #TODO: Finish this return hist.flatten() #Purpose: Utility function for wrapping around the statistics functions. #Inputs: PointClouds (a python list of N point clouds), Normals (a python #list of the N corresponding normals), histFunction (a function #handle for one of the above functions), *args (addditional arguments #that the descriptor function needs) #Returns: AllHists (A KxN matrix of all descriptors, where K is the length #of each descriptor) def makeAllHistograms(PointClouds, Normals, histFunction, *args): N = len(PointClouds) #Call on first mesh to figure out the dimensions of the histogram h0 = histFunction(PointClouds[0], Normals[0], *args) K = h0.size AllHists = np.zeros((K, N)) AllHists[:, 0] = h0 for i in range(1, N): print "Computing histogram %i of %i..."%(i+1, N) AllHists[:, i] = histFunction(PointClouds[i], Normals[i], *args) return AllHists ######################################################### ## HISTOGRAM COMPARISONS ## ######################################################### #Purpose: To compute the euclidean distance between a set #of histograms #Inputs: AllHists (K x N matrix of histograms, where K is the length #of each histogram and N is the number of point clouds) #Returns: D (An N x N matrix, where the ij entry is the Euclidean #distance between the histogram for point cloud i and point cloud j) def compareHistsEuclidean(AllHists): N = AllHists.shape[1] NormHists = AllHists / (AllHists.sum(axis=0)*1.0) HistsSquare = np.zeros((N,1)) HistsSquare = np.sum(NormHists*NormHists,0) P = HistsSquare[:,None] + HistsSquare[None,:] Q = np.dot(np.transpose(NormHists), NormHists) D = np.subtract(P, np.multiply(2,Q)) D[D < 0] = 0 D = np.sqrt(D) return D #Purpose: To compute the cosine distance between a set #of histograms #Inputs: AllHists (K x N matrix of histograms, where K is the length #of each histogram and N is the number of point clouds) #Returns: D (An N x N matrix, where the ij entry is the cosine #distance between the histogram for point cloud i and point cloud j) def compareHistsCosine(AllHists): N = AllHists.shape[1] NormHists = AllHists / (AllHists.sum(axis=0)*1.0) D = np.zeros((N, N)) for i in range(N): for j in range(i+1): D[i,j]= np.dot(NormHists[:,i],NormHists[:,j])/np.linalg.norm(NormHists[:,j])/np.linalg.norm(NormHists[:,i]) D[j,i]=D[i,j] return 1-D #Purpose: To compute the cosine distance between a set #of histograms #Inputs: AllHists (K x N matrix of histograms, where K is the length #of each histogram and N is the number of point clouds) #Returns: D (An N x N matrix, where the ij entry is the chi squared #distance between the histogram for point cloud i and point cloud j) def compareHistsChiSquared(AllHists): N = AllHists.shape[1] K = AllHists.shape[0] NormHists = AllHists / (AllHists.sum(axis=0)*1.0) D = np.zeros((N, N)) for i in range(N): for j in range(i): for k in range(K): D[i,j]= D[i,j]+(NormHists[k,i]-NormHists[k,j])**2/((NormHists[k,i]+NormHists[k,j])*1.0)/2.0 D[j,i]=D[i,j] return D #Purpose: To compute the 1D Earth mover's distance between a set #of histograms (note that this only makes sense for 1D histograms) #Inputs: AllHists (K x N matrix of histograms, where K is the length #of each histogram and N is the number of point clouds) #Returns: D (An N x N matrix, where the ij entry is the earth mover's #distance between the histogram for point cloud i and point cloud j) def compareHistsEMD1D(AllHists): N = AllHists.shape[1] K = AllHists.shape[0] NormHists = AllHists / (AllHists.sum(axis=0)*1.0) D = np.zeros((N, N)) for i in range(N): for j in range(i): for k in range(K): D[i,j]= D[i,j]+abs(NormHists[k,i]-NormHists[k,j]) D[j,i]=D[i,j] return D ######################################################### ## CLASSIFICATION CONTEST ## ######################################################### #Purpose: To implement your own custom distance matrix between all point #clouds for the point cloud clasification contest #Inputs: PointClouds, an array of point cloud matrices, Normals: an array #of normal matrices #Returns: D: A N x N matrix of distances between point clouds based #on your metric, where Dij is the distnace between point cloud i and point cloud j def getMyShapeDistances(PointClouds, Normals): #TODO: Finish this #This is just an example, but you should experiment to find which features #work the best, and possibly come up with a weighted combination of #different features HistsD2 = makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 100000) DEuc = compareHistsEuclidean(HistsD2) return DEuc ######################################################### ## EVALUATION ## ######################################################### #Purpose: To return an average precision recall graph for a collection of #shapes given the similarity scores of all pairs of histograms. #Inputs: D (An N x N matrix, where the ij entry is the earth mover's distance #between the histogram for point cloud i and point cloud j). It is assumed #that the point clouds are presented in contiguous chunks of classes, and that #there are "NPerClass" point clouds per each class (for the dataset provided #there are 10 per class so that's the default argument). So the program should #return a precision recall graph that has 9 elements #Returns PR, an (NPerClass-1) length array of average precision values for all #recalls def getPrecisionRecall(D, NPerClass = 10): N = D.shape[0] W = np.floor(np.linspace(0, N/NPerClass, N, False)) sortIdx = np.floor(np.argsort(D, 1) / NPerClass - W[:, None]) B = np.zeros(N)[:, None] + np.arange(N)[None, :] under = B[sortIdx == 0].reshape(N, NPerClass)[:, 1:] up = np.arange(NPerClass-1)+1 return np.mean(up/under, 0) ######################################################### ## MAIN TESTS ## ######################################################### if __name__ == '__main__': NRandSamples = 10000 #You can tweak this number np.random.seed(100) #For repeatable results randomly sampling #Load in and sample all meshes PointClouds = [] Normals = [] for i in range(len(POINTCLOUD_CLASSES)): print "LOADING CLASS %i of %i..."%(i, len(POINTCLOUD_CLASSES)) PCClass = [] for j in range(NUM_PER_CLASS): m = PolyMesh() filename = "models_off/%s%i.off"%(POINTCLOUD_CLASSES[i], j) print "Loading ", filename m.loadOffFileExternal(filename) (Ps, Ns) = samplePointCloud(m, NRandSamples) PointClouds.append(Ps) Normals.append(Ns) recalls = np.linspace(1.0/9.0, 1.0, 9) plt.hold(True) # Compare All Features SPoints = getSphereSamples(2) HistsSH = makeAllHistograms(PointClouds, Normals, getShapeHistogram, 30, 3) HistsSSH = makeAllHistograms(PointClouds, Normals, getShapeShellHistogram, 30, 3, SPoints) HistsSHPCA = makeAllHistograms(PointClouds, Normals, getShapeHistogramPCA, 30, 3) HistsSpin = makeAllHistograms(PointClouds, Normals, getSpinImage, 100, 2, 40) # HistsEGI = makeAllHistograms(PointClouds, Normals, getEGIHistogram, SPoints) HistsA3 = makeAllHistograms(PointClouds, Normals, getA3Histogram, 30, 100000) HistsD2 = makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 100000) DSH = compareHistsEuclidean(HistsSH) DSSH = compareHistsEuclidean(HistsSSH) DSSHPCA = compareHistsEuclidean(HistsSHPCA) DSpin = compareHistsEuclidean(HistsSpin) # DEGI = compareHistsEuclidean(HistsEGI) DA3 = compareHistsEuclidean(HistsA3) DD2 = compareHistsEuclidean(HistsD2) PRSH = getPrecisionRecall(DSH) PRSSH = getPrecisionRecall(DSSH) PRSHPCA = getPrecisionRecall(DSSHPCA) PRSpin = getPrecisionRecall(DSpin) # PREGI = getPrecisionRecall(DEGI) PRA3 = getPrecisionRecall(DA3) PRD2 = getPrecisionRecall(DD2) plt.plot(recalls, PRSH, 'g', label='Basic Shell') plt.plot(recalls, PRSSH, 'y', label='Sorted Sectors') plt.plot(recalls, PRSHPCA, 'm', label='Shell PCA') plt.plot(recalls, PRSpin, 'b', label='Spin') # plt.plot(recalls, PREGI, 'c', label='EGI') plt.plot(recalls, PRA3, 'k', label='A3') plt.plot(recalls, PRD2, 'r', label='D2') # # Random Histogram # PRrand = getPrecisionRecall(compareHistsEuclidean(np.random.randn(30, NRandSamples))) # plt.plot(recalls, PRrand, 'r', label='Random Histogram') # # Basic Shell Histograms with different values of NShells # PRSH1 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 10, 3))) # PRSH2 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 20, 3))) # PRSH3 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 30, 3))) # PRSH4 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 40, 3))) # PRSH8 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 80, 3))) # plt.plot(recalls, PRSH1, 'y', label='10 Shells') # plt.plot(recalls, PRSH2, 'b', label='20 Shells') # plt.plot(recalls, PRSH3, 'c', label='30 Shells') # plt.plot(recalls, PRSH4, 'k', label='40 Shells') # plt.plot(recalls, PRSH8, 'r', label='80 Shells') # # D2 Histograms with different values of NSamples # PRD2a = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 25000))) # PRD2b = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 50000))) # PRD2c = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 100000))) # PRD2d = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 200000))) # PRD2e = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 400000))) # plt.plot(recalls, PRD2a, 'y', label='25000 Points') # plt.plot(recalls, PRD2b, 'b', label='50000 Points') # plt.plot(recalls, PRD2c, 'c', label='100000 Points') # plt.plot(recalls, PRD2d, 'k', label='200000 Points') # plt.plot(recalls, PRD2e, 'r', label='400000 Points') # # # Compare All Distance Metrics on Basic Shell Histograms (Chi2 doesn't work) # # PReuc = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 30, 3))) # # PRcos = getPrecisionRecall(compareHistsCosine(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 30, 3))) # # PRchi = getPrecisionRecall(compareHistsChiSquared(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 30, 3))) # # PRemd = getPrecisionRecall(compareHistsEMD1D(makeAllHistograms(PointClouds, Normals, getShapeHistogram, 30, 3))) # # # Compare All Distance Metrics on D2 Histograms # # PReuc = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 100000))) # # PRcos = getPrecisionRecall(compareHistsCosine(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 100000))) # # PRchi = getPrecisionRecall(compareHistsChiSquared(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 100000))) # # PRemd = getPrecisionRecall(compareHistsEMD1D(makeAllHistograms(PointClouds, Normals, getD2Histogram, 3.0, 30, 100000))) # # # Compare All Distance Metrics on A3 Histograms # # PReuc = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getA3Histogram, 30, 100000))) # # PRcos = getPrecisionRecall(compareHistsCosine(makeAllHistograms(PointClouds, Normals, getA3Histogram, 30, 100000))) # # PRchi = getPrecisionRecall(compareHistsChiSquared(makeAllHistograms(PointClouds, Normals, getA3Histogram, 30, 100000))) # # PRemd = getPrecisionRecall(compareHistsEMD1D(makeAllHistograms(PointClouds, Normals, getA3Histogram, 30, 100000))) # plt.plot(recalls, PReuc, 'b', label='Euclidean') # plt.plot(recalls, PRcos, 'c', label='Cosine') # plt.plot(recalls, PRchi, 'k', label='ChiSquared') # plt.plot(recalls, PRemd, 'r', label='EMD1D') # # Spin Images with different values of Dim # PRSpin1 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 100, 2, 20))) # PRSpin2 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 100, 2, 40))) # PRSpin3 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 100, 2, 80))) # PRSpin4 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 100, 2, 160))) # PRSpin5 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 100, 2, 320))) # plt.plot(recalls, PRSpin1, 'y', label='20 Pixels') # plt.plot(recalls, PRSpin2, 'b', label='40 Pixels') # plt.plot(recalls, PRSpin3, 'c', label='80 Pixels') # plt.plot(recalls, PRSpin4, 'k', label='160 Pixels') # plt.plot(recalls, PRSpin5, 'r', label='320 Pixels') # # Spin Images with different values of NAngles (little difference) # PRSpin1 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 25, 2, 40))) # PRSpin2 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 50, 2, 40))) # PRSpin3 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 100, 2, 40))) # PRSpin4 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 200, 2, 40))) # PRSpin5 = getPrecisionRecall(compareHistsEuclidean(makeAllHistograms(PointClouds, Normals, getSpinImage, 400, 2, 40))) # plt.plot(recalls, PRSpin1, 'y', label='25 Angles') # plt.plot(recalls, PRSpin2, 'b', label='50 Angles') # plt.plot(recalls, PRSpin3, 'c', label='100 Angles') # plt.plot(recalls, PRSpin4, 'k', label='200 Angles') # plt.plot(recalls, PRSpin5, 'r', label='400 Angles') plt.xlabel('Recall') plt.ylabel('Precision') plt.legend() plt.show()
stevenyy/ShapeGoogle
ShapeStatistics.py
Python
mit
23,901
[ "Gaussian" ]
894a80902a064d334cfdb64a362a714b1be9e1117ac9522d2ed2d91041566ed6
#============================================================================ # # Copyright (c) Kitware Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #============================================================================ import imp, sys, os, unittest from __main__ import vtk, qt, ctk, slicer class AbstractInteractiveSegmentTubes(): def __init__(self): self._Observations = [] def removeObservers(self, method, objectType = ''): for o, e, m, g, t in self._Observations: if objectType != '' and o.GetClassName() != objectType: continue if method == m: o.RemoveObserver(t) self._Observations.remove([o, e, m, g, t]) def addObserver(self, object, event, method, group = 'none'): if self.hasObserver(object, event, method): print('already has observer') return tag = object.AddObserver(event, method) self._Observations.append([object, event, method, group, tag]) def hasObserver(self, object, event, method): for o, e, m, g, t in self._Observations: if o == object and e == event and m == method: return True return False def observer(self, event, method): for o, e, m, g, t in self._Observations: if e == event and m == method: return o return None def removeAllObservers(self): for o, e, m, g, t in self._Observations: o.RemoveObserver(t) self._Observations.remove([o, e, m, g, t]) # # Segment Tubes logic # class SegmentTubesLogic(AbstractInteractiveSegmentTubes): '''Logic used to drive the Segment Tubes cli from python.''' def __init__(self): AbstractInteractiveSegmentTubes.__init__(self) self.currentOutputNode = None def addDisplayNodes( self, node ): '''Add the necessary display nodes for a spatial object. ''' if node: spatialObjectLogic = slicer.modules.spatialobjects.logic() spatialObjectLogic.AddDisplayNodes(node) def getFilenameFromNode(self, node): '''Returns a filename for a spatial object. If no storage node exists, one is created and the filename will point to a file in the temp directory. ''' if not node: return '' storageNode = node.GetNthStorageNode(0) if not storageNode or not storageNode.GetFileName(): # Save it in temp dir tempPath = slicer.app.temporaryPath nodeName = os.path.join(tempPath, node.GetName() + '.tre') if os.path.isfile(nodeName): os.remove(nodeName) spatialObjectLogic = slicer.modules.spatialobjects.logic() spatialObjectLogic.SaveSpatialObject(nodeName, node) return nodeName return storageNode.GetFileName() def getCLINode(self, module, nodeName = None): '''Return the cli node correspoding to the given module if the module isn't a scripted module. If a name is given, this module will be renamed accordingly ''' if not nodeName: nodeName = module.title cliNode = slicer.mrmlScene.GetFirstNodeByName(nodeName) # Also check path to make sure the CLI isn't a scripted module if (cliNode == None) and ('qt-scripted-modules' not in module.path): cliNode = slicer.cli.createNode(module) cliNode.SetName(nodeName) return cliNode def run( self, run, parameters, callback ): '''Run the segment tubes cli with the given parameters. If run is false, the cli is cancelled. If a parameter with the name 'OutputNode' exists, this node will be updated to the output spatial object automatically after a succesful execution. The callback function can be given by the user to receive feedback on the segment tube cli execution. It is up to the user to stop the callback from listening to the cli. ''' if run: cliNode = self.getCLINode(slicer.modules.segmenttubes) if callback: self.observeCLINode(cliNode, callback) self.observeCLINode(cliNode, self.loadTREFileCallback) try: self.currentOutputNode = parameters['OutputNode'] except KeyError: self.currentOutputNode = None cliNode = slicer.cli.run(slicer.modules.segmenttubes, cliNode, parameters, wait_for_completion = False) else: cliNode = self.getCLINode(slicer.modules.segmenttubes) if cliNode: cliNode.Cancel() def loadTREFileCallback( self, cliNode, *unused ): ''' ** Protected ** Callback to automatically reload the output spatial object. ''' if cliNode.GetStatusString() == 'Completed' and self.currentOutputNode: spatialObjectLogic = slicer.modules.spatialobjects.logic() spatialObjectLogic.SetSpatialObject(self.currentOutputNode, cliNode.GetParameterAsString('outputTubeFile')) def observeCLINode(self, cliNode, onCLINodeModified = None): '''Utility function to observe the given cli StatusModifiedEvent. ''' if cliNode != None and onCLINodeModified != None: self.addObserver(cliNode, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent, onCLINodeModified)
agirault/VesselView
Modules/Scripted/InteractiveSegmentTubes/InteractiveSegmentTubesLogic/SegmentTubesLogic.py
Python
apache-2.0
5,566
[ "VTK" ]
7171d756667e0c390af0fee0d5e83f6d5c9a54f7a0a15fc1a784f10481c9afb0
#!/usr/bin/env python """Multithreaded interactive interpreter with GTK and Matplotlib support. Usage: pyint-gtk.py -> starts shell with gtk thread running separately pyint-gtk.py -pylab [filename] -> initializes matplotlib, optionally running the named file. The shell starts after the file is executed. Threading code taken from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65109, by Brian McErlean and John Finlay. Matplotlib support taken from interactive.py in the matplotlib distribution. Also borrows liberally from code.py in the Python standard library.""" __author__ = "Fernando Perez <Fernando.Perez@colorado.edu>" import sys import code import threading import gobject import gtk try: import readline except ImportError: has_readline = False else: has_readline = True class MTConsole(code.InteractiveConsole): """Simple multi-threaded shell""" def __init__(self,on_kill=None,*args,**kw): code.InteractiveConsole.__init__(self,*args,**kw) self.code_to_run = None self.ready = threading.Condition() self._kill = False if on_kill is None: on_kill = [] # Check that all things to kill are callable: for _ in on_kill: if not callable(_): raise TypeError,'on_kill must be a list of callables' self.on_kill = on_kill # Set up tab-completer if has_readline: import rlcompleter try: # this form only works with python 2.3 self.completer = rlcompleter.Completer(self.locals) except: # simpler for py2.2 self.completer = rlcompleter.Completer() readline.set_completer(self.completer.complete) # Use tab for completions readline.parse_and_bind('tab: complete') # This forces readline to automatically print the above list when tab # completion is set to 'complete'. readline.parse_and_bind('set show-all-if-ambiguous on') # Bindings for incremental searches in the history. These searches # use the string typed so far on the command line and search # anything in the previous input history containing them. readline.parse_and_bind('"\C-r": reverse-search-history') readline.parse_and_bind('"\C-s": forward-search-history') def runsource(self, source, filename="<input>", symbol="single"): """Compile and run some source in the interpreter. Arguments are as for compile_command(). One several things can happen: 1) The input is incorrect; compile_command() raised an exception (SyntaxError or OverflowError). A syntax traceback will be printed by calling the showsyntaxerror() method. 2) The input is incomplete, and more input is required; compile_command() returned None. Nothing happens. 3) The input is complete; compile_command() returned a code object. The code is executed by calling self.runcode() (which also handles run-time exceptions, except for SystemExit). The return value is True in case 2, False in the other cases (unless an exception is raised). The return value can be used to decide whether to use sys.ps1 or sys.ps2 to prompt the next line. """ try: code = self.compile(source, filename, symbol) except (OverflowError, SyntaxError, ValueError): # Case 1 self.showsyntaxerror(filename) return False if code is None: # Case 2 return True # Case 3 # Store code in self, so the execution thread can handle it self.ready.acquire() self.code_to_run = code self.ready.wait() # Wait until processed in timeout interval self.ready.release() return False def runcode(self): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback.""" self.ready.acquire() if self._kill: print 'Closing threads...', sys.stdout.flush() for tokill in self.on_kill: tokill() print 'Done.' if self.code_to_run is not None: self.ready.notify() code.InteractiveConsole.runcode(self,self.code_to_run) self.code_to_run = None self.ready.release() return True def kill (self): """Kill the thread, returning when it has been shut down.""" self.ready.acquire() self._kill = True self.ready.release() class GTKInterpreter(threading.Thread): """Run gtk.main in the main thread and a python interpreter in a separate thread. Python commands can be passed to the thread where they will be executed. This is implemented by periodically checking for passed code using a GTK timeout callback. """ TIMEOUT = 100 # Millisecond interval between timeouts. def __init__(self,banner=None): threading.Thread.__init__(self) self.banner = banner self.shell = MTConsole(on_kill=[gtk.main_quit]) def run(self): self.pre_interact() self.shell.interact(self.banner) self.shell.kill() def mainloop(self): self.start() gobject.timeout_add(self.TIMEOUT, self.shell.runcode) try: if gtk.gtk_version[0] >= 2: gtk.gdk.threads_init() except AttributeError: pass gtk.main() self.join() def pre_interact(self): """This method should be overridden by subclasses. It gets called right before interact(), but after the thread starts. Typically used to push initialization code into the interpreter""" pass class MatplotLibInterpreter(GTKInterpreter): """Threaded interpreter with matplotlib support. Note that this explicitly sets GTKAgg as the backend, since it has specific GTK hooks in it.""" def __init__(self,banner=None): banner = """\nWelcome to matplotlib, a matlab-like python environment. help(matlab) -> help on matlab compatible commands from matplotlib. help(plotting) -> help on plotting commands. """ GTKInterpreter.__init__(self,banner) def pre_interact(self): """Initialize matplotlib before user interaction begins""" push = self.shell.push # Code to execute in user's namespace lines = ["import matplotlib", "matplotlib.use('GTKAgg')", "matplotlib.interactive(1)", "import matplotlib.pylab as pylab", "from matplotlib.pylab import *\n"] map(push,lines) # Execute file if given. if len(sys.argv)>1: import matplotlib matplotlib.interactive(0) # turn off interaction fname = sys.argv[1] try: inFile = file(fname, 'r') except IOError: print '*** ERROR *** Could not read file <%s>' % fname else: print '*** Executing file <%s>:' % fname for line in inFile: if line.lstrip().find('show()')==0: continue print '>>', line, push(line) inFile.close() matplotlib.interactive(1) # turn on interaction if __name__ == '__main__': # Quick sys.argv hack to extract the option and leave filenames in sys.argv. # For real option handling, use optparse or getopt. if len(sys.argv) > 1 and sys.argv[1]=='-pylab': sys.argv = [sys.argv[0]]+sys.argv[2:] MatplotLibInterpreter().mainloop() else: GTKInterpreter().mainloop()
sniemi/SamPy
sandbox/src1/examples/interactive.py
Python
bsd-2-clause
7,867
[ "Brian" ]
0680ba0cc9602edaa9e79e436bd8099ad23fe29e43c0bd70982f1f4b40d6de91
######################################################### # # DO NOT EDIT THIS FILE. IT IS GENERATED AUTOMATICALLY. # # PLEASE LOOK INTO THE README FOR MORE INFORMATION. # # ######################################################### # coding: utf-8 # # Caffe2 Basic Concepts - Operators & Nets # # In this tutorial we will go through a set of Caffe2 basics: the basic concepts including how operators and nets are being written. # # First, let's import Caffe2. `core` and `workspace` are usually the two that you need most. If you want to manipulate protocol buffers generated by Caffe2, you probably also want to import `caffe2_pb2` from `caffe2.proto`. # In[1]: from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals # We'll also import a few standard python libraries from matplotlib import pyplot import numpy as np import time # These are the droids you are looking for. from caffe2.python import core, workspace from caffe2.proto import caffe2_pb2 # Let's show all plots inline. # You might see a warning saying that caffe2 does not have GPU support. That means you are running a CPU-only build. Don't be alarmed - anything CPU is still runnable without a problem. # ## Workspaces # # Let's cover workspaces first, where all the data resides. # # Similar to Matlab, the Caffe2 workspace consists of blobs you create and store in memory. For now, consider a blob to be a N-dimensional Tensor similar to numpy's ndarray, but contiguous. Down the road, we will show you that a blob is actually a typed pointer that can store any type of C++ objects, but Tensor is the most common type stored in a blob. Let's show what the interface looks like. # # `Blobs()` prints out all existing blobs in the workspace. # `HasBlob()` queries if a blob exists in the workspace. As of now, we don't have any. # In[2]: print("Current blobs in the workspace: {}".format(workspace.Blobs())) print("Workspace has blob 'X'? {}".format(workspace.HasBlob("X"))) # We can feed blobs into the workspace using `FeedBlob()`. # In[3]: X = np.random.randn(2, 3).astype(np.float32) print("Generated X from numpy:\n{}".format(X)) workspace.FeedBlob("X", X) # Now, let's take a look at what blobs are in the workspace. # In[4]: print("Current blobs in the workspace: {}".format(workspace.Blobs())) print("Workspace has blob 'X'? {}".format(workspace.HasBlob("X"))) print("Fetched X:\n{}".format(workspace.FetchBlob("X"))) # Let's verify that the arrays are equal. # In[5]: np.testing.assert_array_equal(X, workspace.FetchBlob("X")) # Note that if you try to access a blob that does not exist, an error will be thrown: # In[6]: try: workspace.FetchBlob("invincible_pink_unicorn") except RuntimeError as err: print(err) # One thing that you might not use immediately: you can have multiple workspaces in Python using different names, and switch between them. Blobs in different workspaces are separate from each other. You can query the current workspace using `CurrentWorkspace`. Let's try switching the workspace by name (gutentag) and creating a new one if it doesn't exist. # In[7]: print("Current workspace: {}".format(workspace.CurrentWorkspace())) print("Current blobs in the workspace: {}".format(workspace.Blobs())) # Switch the workspace. The second argument "True" means creating # the workspace if it is missing. workspace.SwitchWorkspace("gutentag", True) # Let's print the current workspace. Note that there is nothing in the # workspace yet. print("Current workspace: {}".format(workspace.CurrentWorkspace())) print("Current blobs in the workspace: {}".format(workspace.Blobs())) # Let's switch back to the default workspace. # In[8]: workspace.SwitchWorkspace("default") print("Current workspace: {}".format(workspace.CurrentWorkspace())) print("Current blobs in the workspace: {}".format(workspace.Blobs())) # Finally, `ResetWorkspace()` clears anything that is in the current workspace. # In[9]: workspace.ResetWorkspace() print("Current blobs in the workspace after reset: {}".format(workspace.Blobs())) # ## Operators # # Operators in Caffe2 are kind of like functions. From the C++ side, they all derive from a common interface, and are registered by type, so that we can call different operators during runtime. The interface of operators is defined in `caffe2/proto/caffe2.proto`. Basically, it takes in a bunch of inputs, and produces a bunch of outputs. # # Remember, when we say "create an operator" in Caffe2 Python, nothing gets run yet. All it does is create the protocol buffer that specifies what the operator should be. At a later time it will be sent to the C++ backend for execution. If you are not familiar with protobuf, it is a json-like serialization tool for structured data. Find more about protocol buffers [here](https://developers.google.com/protocol-buffers/). # # Let's see an actual example. # In[10]: # Create an operator. op = core.CreateOperator( "Relu", # The type of operator that we want to run ["X"], # A list of input blobs by their names ["Y"], # A list of output blobs by their names ) # and we are done! # As we mentioned, the created op is actually a protobuf object. Let's show the content. # In[11]: print("Type of the created op is: {}".format(type(op))) print("Content:\n") print(str(op)) # Ok, let's run the operator. We first feed the input X to the workspace. # Then the simplest way to run an operator is to do `workspace.RunOperatorOnce(operator)` # In[12]: workspace.FeedBlob("X", np.random.randn(2, 3).astype(np.float32)) workspace.RunOperatorOnce(op) # After execution, let's see if the operator is doing the right thing. # # In this case, the operator is a common activation function used in neural networks, called [ReLU](https://en.wikipedia.org/wiki/Rectifier_(neural_networks), or Rectified Linear Unit activation. ReLU activation helps to add necessary non-linear characteristics to the neural network classifier, and is defined as: # # $$ReLU(x) = max(0, x)$$ # In[13]: print("Current blobs in the workspace: {}\n".format(workspace.Blobs())) print("X:\n{}\n".format(workspace.FetchBlob("X"))) print("Y:\n{}\n".format(workspace.FetchBlob("Y"))) print("Expected:\n{}\n".format(np.maximum(workspace.FetchBlob("X"), 0))) # This is working if your Expected output matches your Y output in this example. # # Operators also take optional arguments if needed. They are specified as key-value pairs. Let's take a look at one simple example, which takes a tensor and fills it with Gaussian random variables. # In[14]: op = core.CreateOperator( "GaussianFill", [], # GaussianFill does not need any parameters. ["Z"], shape=[100, 100], # shape argument as a list of ints. mean=1.0, # mean as a single float std=1.0, # std as a single float ) print("Content of op:\n") print(str(op)) # Let's run it and see if things are as intended. # In[15]: workspace.RunOperatorOnce(op) temp = workspace.FetchBlob("Z") pyplot.hist(temp.flatten(), bins=50) pyplot.title("Distribution of Z") # If you see a bell shaped curve then it worked! # ## Nets # # Nets are essentially computation graphs. We keep the name `Net` for backward consistency (and also to pay tribute to neural nets). A Net is composed of multiple operators just like a program written as a sequence of commands. Let's take a look. # # When we talk about nets, we will also talk about BlobReference, which is an object that wraps around a string so we can do easy chaining of operators. # # Let's create a network that is essentially the equivalent of the following python math: # ``` # X = np.random.randn(2, 3) # W = np.random.randn(5, 3) # b = np.ones(5) # Y = X * W^T + b # ``` # We'll show the progress step by step. Caffe2's `core.Net` is a wrapper class around a NetDef protocol buffer. # When creating a network, its underlying protocol buffer is essentially empty other than the network name. Let's create the net and then show the proto content. # In[16]: net = core.Net("my_first_net") print("Current network proto:\n\n{}".format(net.Proto())) # Let's create a blob called X, and use GaussianFill to fill it with some random data. # In[17]: X = net.GaussianFill([], ["X"], mean=0.0, std=1.0, shape=[2, 3], run_once=0) print("New network proto:\n\n{}".format(net.Proto())) # You might have observed a few differences from the earlier `core.CreateOperator` call. Basically, when using a net, you can directly create an operator *and* add it to the net at the same time by calling `net.SomeOp` where SomeOp is a registered type string of an operator. This gets translated to # ``` # op = core.CreateOperator("SomeOp", ...) # net.Proto().op.append(op) # ``` # # Also, you might be wondering what X is. X is a `BlobReference` which records two things: # # - The blob's name, which is accessed with `str(X)` # # - The net it got created from, which is recorded by the internal variable `_from_net` # # Let's verify it. Also, remember, we are not actually running anything yet, so X contains nothing but a symbol. Don't expect to get any numerical values out of it right now :) # In[18]: print("Type of X is: {}".format(type(X))) print("The blob name is: {}".format(str(X))) # Let's continue to create W and b. # In[19]: W = net.GaussianFill([], ["W"], mean=0.0, std=1.0, shape=[5, 3], run_once=0) b = net.ConstantFill([], ["b"], shape=[5,], value=1.0, run_once=0) # Now, one simple code sugar: since the BlobReference objects know what net it is generated from, in addition to creating operators from net, you can also create operators from BlobReferences. Let's create the FC operator in this way. # In[20]: Y = X.FC([W, b], ["Y"]) # Under the hood, `X.FC(...)` simply delegates to `net.FC` by inserting `X` as the first input of the corresponding operator, so what we did above is equivalent to # ``` # Y = net.FC([X, W, b], ["Y"]) # ``` # # Let's take a look at the current network. # In[21]: print("Current network proto:\n\n{}".format(net.Proto())) # Too verbose huh? Let's try to visualize it as a graph. Caffe2 ships with a very minimal graph visualization tool for this purpose. # In[22]: from caffe2.python import net_drawer from IPython import display graph = net_drawer.GetPydotGraph(net, rankdir="LR") display.Image(graph.create_png(), width=800) # So we have defined a Net, but nothing has been executed yet. Remember that the net above is essentially a protobuf that holds the definition of the network. When we actually run the network, what happens under the hood is: # - A C++ net object is instantiated from the protobuf # - The instantiated net's Run() function is called # # Before we do anything, we should clear any earlier workspace variables with `ResetWorkspace()`. # # Then there are two ways to run a net from Python. We will do the first option in the example below. # # 1. Call `workspace.RunNetOnce()`, which instantiates, runs and immediately destructs the network # 2. Call `workspace.CreateNet()` to create the C++ net object owned by the workspace, then call `workspace.RunNet()`, passing the name of the network to it # # # In[23]: workspace.ResetWorkspace() print("Current blobs in the workspace: {}".format(workspace.Blobs())) workspace.RunNetOnce(net) print("Blobs in the workspace after execution: {}".format(workspace.Blobs())) # Let's dump the contents of the blobs for name in workspace.Blobs(): print("{}:\n{}".format(name, workspace.FetchBlob(name))) # Now let's try the second way to create the net, and run it. First, clear the variables with `ResetWorkspace()`. Then create the net with the workspace's `net` object that we created earlier using `CreateNet(net_object)`. Finally, run the net with `RunNet(net_name)`. # In[24]: workspace.ResetWorkspace() print("Current blobs in the workspace: {}".format(workspace.Blobs())) workspace.CreateNet(net) workspace.RunNet(net.Proto().name) print("Blobs in the workspace after execution: {}".format(workspace.Blobs())) for name in workspace.Blobs(): print("{}:\n{}".format(name, workspace.FetchBlob(name))) # There are a few differences between `RunNetOnce` and `RunNet`, but the main difference is the computational overhead. Since `RunNetOnce` involves serializing the protobuf to pass between Python and C and instantiating the network, it may take longer to run. Let's run a test and see what the time overhead is. # In[25]: # It seems that %timeit magic does not work well with # C++ extensions so we'll basically do for loops start = time.time() for i in range(1000): workspace.RunNetOnce(net) end = time.time() print('Run time per RunNetOnce: {}'.format((end - start) / 1000)) start = time.time() for i in range(1000): workspace.RunNet(net.Proto().name) end = time.time() print('Run time per RunNet: {}'.format((end - start) / 1000)) # Congratulations, you now know the many of the key components of the Caffe2 Python API! Ready for more Caffe2? Check out the rest of the tutorials for a variety of interesting use-cases!
Yangqing/caffe2
caffe2/python/tutorials/py_gen/Basics.py
Python
apache-2.0
13,139
[ "Gaussian" ]
c362eaaa45b0b7bf8adfc0bdde88f9e06cc29d105f4673f347ec1f7e19f7a7c7
# --- # jupyter: # jupytext: # formats: ipynb,.pct.py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.3.3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # Basic (binary) GP classification model # # # This notebook shows how to build a GP classification model using variational inference. # Here we consider binary (two-class, 0 vs. 1) classification only (there is a separate notebook on [multiclass classification](../advanced/multiclass_classification.ipynb)). # We first look at a one-dimensional example, and then show how you can adapt this when the input space is two-dimensional. # %% import numpy as np import gpflow import tensorflow as tf import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams["figure.figsize"] = (8, 4) # %% [markdown] # ## One-dimensional example # # First of all, let's have a look at the data. `X` and `Y` denote the input and output values. # **NOTE:** `X` and `Y` must be two-dimensional NumPy arrays, $N \times 1$ or $N \times D$, where $D$ is the number of input dimensions/features, with the same number of rows as $N$ (one for each data point): # %% X = np.genfromtxt("data/classif_1D_X.csv").reshape(-1, 1) Y = np.genfromtxt("data/classif_1D_Y.csv").reshape(-1, 1) plt.figure(figsize=(10, 6)) _ = plt.plot(X, Y, "C3x", ms=8, mew=2) # %% [markdown] # ### Reminders on GP classification # # For a binary classification model using GPs, we can simply use a `Bernoulli` likelihood. The details of the generative model are as follows: # # __1. Define the latent GP:__ we start from a Gaussian process $f \sim \mathcal{GP}(0, k(\cdot, \cdot'))$: # %% # build the kernel and covariance matrix k = gpflow.kernels.Matern52(variance=20.0) x_grid = np.linspace(0, 6, 200).reshape(-1, 1) K = k(x_grid) # sample from a multivariate normal rng = np.random.RandomState(6) L = np.linalg.cholesky(K) f_grid = np.dot(L, rng.randn(200, 5)) plt.plot(x_grid, f_grid, "C0", linewidth=1) _ = plt.plot(x_grid, f_grid[:, 1], "C0", linewidth=2) # %% [markdown] # __2. Squash them to $[0, 1]$:__ the samples of the GP are mapped to $[0, 1]$. # By default, GPflow uses the standard normal cumulative distribution function (inverse probit function): $p(x) = \Phi(f(x)) = \frac{1}{2} (1 + \operatorname{erf}(x / \sqrt{2}))$. # (This choice has the advantage that predictive mean, variance and density can be computed analytically, but any choice of invlink is possible, e.g. the logit $p(x) = \frac{\exp(f(x))}{1 + \exp(f(x))}$. Simply pass another function as the `invlink` argument to the `Bernoulli` likelihood class.) # %% def invlink(f): return gpflow.likelihoods.Bernoulli().invlink(f).numpy() p_grid = invlink(f_grid) plt.plot(x_grid, p_grid, "C1", linewidth=1) _ = plt.plot(x_grid, p_grid[:, 1], "C1", linewidth=2) # %% [markdown] # __3. Sample from a Bernoulli:__ for each observation point $X_i$, the class label $Y_i \in \{0, 1\}$ is generated by sampling from a Bernoulli distribution $Y_i \sim \mathcal{B}(g(X_i))$. # %% # Select some input locations ind = rng.randint(0, 200, (30,)) X_gen = x_grid[ind] # evaluate probability and get Bernoulli draws p = p_grid[ind, 1:2] Y_gen = rng.binomial(1, p) # plot plt.plot(x_grid, p_grid[:, 1], "C1", linewidth=2) plt.plot(X_gen, p, "C1o", ms=6) _ = plt.plot(X_gen, Y_gen, "C3x", ms=8, mew=2) # %% [markdown] # ### Implementation with GPflow # # For the model described above, the posterior $f(x)|Y$ (say $p$) is not Gaussian any more and does not have a closed-form expression. # A common approach is then to look for the best approximation of this posterior by a tractable distribution (say $q$) such as a Gaussian distribution. # In variational inference, the quality of an approximation is measured by the Kullback-Leibler divergence $\mathrm{KL}[q \| p]$. # For more details on this model, see Nickisch and Rasmussen (2008). # # The inference problem is thus turned into an optimization problem: finding the best parameters for $q$. # In our case, we introduce $U \sim \mathcal{N}(q_\mu, q_\Sigma)$, and we choose $q$ to have the same distribution as $f | f(X) = U$. # The parameters $q_\mu$ and $q_\Sigma$ can be seen as parameters of $q$, which can be optimized in order to minimise $\mathrm{KL}[q \| p]$. # # This variational inference model is called `VGP` in GPflow: # %% m = gpflow.models.VGP( (X, Y), likelihood=gpflow.likelihoods.Bernoulli(), kernel=gpflow.kernels.Matern52() ) opt = gpflow.optimizers.Scipy() opt.minimize(m.training_loss, variables=m.trainable_variables) # %% [markdown] # We can now inspect the result of the optimization with `gpflow.utilities.print_summary(m)`: # %% gpflow.utilities.print_summary(m, fmt="notebook") # %% [markdown] # In this table, the first two lines are associated with the kernel parameters, and the last two correspond to the variational parameters. # **NOTE:** In practice, $q_\Sigma$ is actually parameterized by its lower-triangular square root $q_\Sigma = q_\text{sqrt} q_\text{sqrt}^T$ in order to ensure its positive-definiteness. # # For more details on how to handle models in GPflow (getting and setting parameters, fixing some of them during optimization, using priors, and so on), see [Manipulating GPflow models](../understanding/models.ipynb). # %% [markdown] # ### Predictions # # Finally, we will see how to use model predictions to plot the resulting model. # We will replicate the figures of the generative model above, but using the approximate posterior distribution given by the model. # %% plt.figure(figsize=(12, 8)) # bubble fill the predictions mu, var = m.predict_f(x_grid) plt.fill_between( x_grid.flatten(), np.ravel(mu + 2 * np.sqrt(var)), np.ravel(mu - 2 * np.sqrt(var)), alpha=0.3, color="C0", ) # plot samples tf.random.set_seed(6) samples = m.predict_f_samples(x_grid, 10).numpy().squeeze().T plt.plot(x_grid, samples, "C0", lw=1) # plot p-samples p = invlink(samples) plt.plot(x_grid, p, "C1", lw=1) # plot data plt.plot(X, Y, "C3x", ms=8, mew=2) plt.ylim((-3, 3)) # %% [markdown] # ## Two-dimensional example # # In this section we will use the following data: # %% X = np.loadtxt("data/banana_X_train", delimiter=",") Y = np.loadtxt("data/banana_Y_train", delimiter=",").reshape(-1, 1) mask = Y[:, 0] == 1 plt.figure(figsize=(6, 6)) plt.plot(X[mask, 0], X[mask, 1], "oC0", mew=0, alpha=0.5) _ = plt.plot(X[np.logical_not(mask), 0], X[np.logical_not(mask), 1], "oC1", mew=0, alpha=0.5) # %% [markdown] # The model definition is the same as above; the only important difference is that we now specify that the kernel operates over a two-dimensional input space: # %% m = gpflow.models.VGP( (X, Y), kernel=gpflow.kernels.SquaredExponential(), likelihood=gpflow.likelihoods.Bernoulli() ) opt = gpflow.optimizers.Scipy() opt.minimize( m.training_loss, variables=m.trainable_variables, options=dict(maxiter=25), method="L-BFGS-B" ) # in practice, the optimization needs around 250 iterations to converge # %% [markdown] # We can now plot the predicted decision boundary between the two classes. # To do so, we can equivalently plot the contour lines $E[f(x)|Y]=0$, or $E[g(f(x))|Y]=0.5$. # We will do the latter, because it allows us to introduce the `predict_y` function, which returns the mean and variance at test points: # %% x_grid = np.linspace(-3, 3, 40) xx, yy = np.meshgrid(x_grid, x_grid) Xplot = np.vstack((xx.flatten(), yy.flatten())).T p, _ = m.predict_y(Xplot) # here we only care about the mean plt.figure(figsize=(7, 7)) plt.plot(X[mask, 0], X[mask, 1], "oC0", mew=0, alpha=0.5) plt.plot(X[np.logical_not(mask), 0], X[np.logical_not(mask), 1], "oC1", mew=0, alpha=0.5) _ = plt.contour( xx, yy, p.numpy().reshape(*xx.shape), [0.5], # plot the p=0.5 contour line only colors="k", linewidths=1.8, zorder=100, ) # %% [markdown] # ## Further reading # # There are dedicated notebooks giving more details on how to manipulate [models](../understanding/models.ipynb) and [kernels](../advanced/kernels.ipynb). # # This notebook covers only very basic classification models. You might also be interested in: # * [Multiclass classification](../advanced/multiclass_classification.ipynb) if you have more than two classes. # * [Sparse models](../advanced/gps_for_big_data.ipynb). The models above have one inducing variable $U_i$ per observation point $X_i$, which does not scale to large datasets. Sparse Variational GP (SVGP) is an efficient alternative where the variables $U_i$ are defined at some inducing input locations $Z_i$ that can also be optimized. # * [Exact inference](../advanced/mcmc.ipynb). We have seen that variational inference provides an approximation to the posterior. GPflow also supports exact inference using Markov Chain Monte Carlo (MCMC) methods, and the kernel parameters can also be assigned prior distributions in order to avoid point estimates. # # ## References # # Hannes Nickisch and Carl Edward Rasmussen. 'Approximations for binary Gaussian process classification'. *Journal of Machine Learning Research* 9(Oct):2035--2078, 2008.
GPflow/GPflow
doc/source/notebooks/basics/classification.pct.py
Python
apache-2.0
9,219
[ "Gaussian" ]
4c4ecbbae563e85407600e6d82b532ba967134e9b10f850e7db28ec651d88146
## # Copyright 2009-2021 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # https://github.com/easybuilders/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing CP2K, implemented as an easyblock @author: Stijn De Weirdt (Ghent University) @author: Dries Verdegem (Ghent University) @author: Kenneth Hoste (Ghent University) @author: Pieter De Baets (Ghent University) @author: Jens Timmerman (Ghent University) @author: Ward Poelmans (Ghent University) @author: Luca Marsella (CSCS) @author: Damian Alvarez (Forschungszentrum Juelich GmbH) @author: Alan O'Cais (Forschungszentrum Juelich GmbH) @author: Balazs Hajgato (Free University Brussels (VUB)) """ import fileinput import glob import re import os import sys from distutils.version import LooseVersion import easybuild.tools.toolchain as toolchain from easybuild.framework.easyblock import EasyBlock from easybuild.framework.easyconfig import CUSTOM from easybuild.tools.build_log import EasyBuildError from easybuild.tools.environment import setvar from easybuild.tools.filetools import change_dir, copy_dir, copy_file, mkdir, write_file from easybuild.tools.config import build_option from easybuild.tools.modules import get_software_root, get_software_version from easybuild.tools.run import run_cmd from easybuild.tools.systemtools import get_avail_core_count class EB_CP2K(EasyBlock): """ Support for building CP2K - prepare module include files if required - generate custom config file in 'arch' directory - build CP2K - run regression test if desired - install by copying binary executables """ def __init__(self, *args, **kwargs): super(EB_CP2K, self).__init__(*args, **kwargs) self.typearch = None # this should be set to False for old versions of GCC (e.g. v4.1) self.compilerISO_C_BINDING = True # compiler options that need to be set in Makefile self.debug = '' self.fpic = '' # used for both libsmm and libxsmm self.libsmm = '' self.modincpath = '' self.openmp = '' self.make_instructions = '' @staticmethod def extra_options(): extra_vars = { 'extracflags': ['', "Extra CFLAGS to be added", CUSTOM], 'extradflags': ['', "Extra DFLAGS to be added", CUSTOM], 'ignore_regtest_fails': [False, ("Ignore failures in regression test " "(should be used with care)"), CUSTOM], 'library': [False, "Also build CP2K as a library", CUSTOM], 'maxtasks': [4, ("Maximum number of CP2K instances run at " "the same time during testing"), CUSTOM], 'modinc': [[], ("List of modinc's to use (*.f90], or 'True' to use " "all found at given prefix"), CUSTOM], 'modincprefix': ['', "Intel MKL prefix for modinc include dir", CUSTOM], 'runtest': [True, "Build and run CP2K tests", CUSTOM], 'omp_num_threads': [None, "Value to set $OMP_NUM_THREADS to during testing", CUSTOM], 'plumed': [None, "Enable PLUMED support", CUSTOM], 'type': ['popt', "Type of build ('popt' or 'psmp')", CUSTOM], 'typeopt': [True, "Enable optimization", CUSTOM], } return EasyBlock.extra_options(extra_vars) def _generate_makefile(self, options): """Generate Makefile based on options dictionary and optional make instructions""" text = "# Makefile generated by CP2K easyblock in EasyBuild\n" for key, value in sorted(options.items()): text += "%s = %s\n" % (key, value) return text + self.make_instructions def configure_step(self): """Configure build - build Libint wrapper - generate Makefile """ known_types = ['popt', 'psmp'] if self.cfg['type'] not in known_types: raise EasyBuildError("Unknown build type specified: '%s', known types are %s", self.cfg['type'], known_types) # correct start dir, if needed # recent CP2K versions have a 'cp2k' dir in the unpacked 'cp2k' dir cp2k_path = os.path.join(self.cfg['start_dir'], 'cp2k') if os.path.exists(cp2k_path): self.cfg['start_dir'] = cp2k_path self.log.info("Corrected start_dir to %s" % self.cfg['start_dir']) # set compilers options according to toolchain config # full debug: -g -traceback -check all -fp-stack-check # -g links to mpi debug libs if self.toolchain.options['debug']: self.debug = '-g' self.log.info("Debug build") if self.toolchain.options['pic']: self.fpic = "-fPIC" self.log.info("Using fPIC") # report on extra flags being used if self.cfg['extracflags']: self.log.info("Using extra CFLAGS: %s" % self.cfg['extracflags']) if self.cfg['extradflags']: self.log.info("Using extra DFLAGS: %s" % self.cfg['extradflags']) # lib(x)smm support libsmm = get_software_root('libsmm') libxsmm = get_software_root('libxsmm') if libxsmm: self.cfg.update('extradflags', '-D__LIBXSMM') self.libsmm = '-lxsmm -lxsmmf' self.log.debug('Using libxsmm %s' % libxsmm) elif libsmm: libsmms = glob.glob(os.path.join(libsmm, 'lib', 'libsmm_*nn.a')) dfs = [os.path.basename(os.path.splitext(x)[0]).replace('lib', '-D__HAS_') for x in libsmms] moredflags = ' ' + ' '.join(dfs) self.cfg.update('extradflags', moredflags) self.libsmm = ' '.join(libsmms) self.log.debug('Using libsmm %s (extradflags %s)' % (self.libsmm, moredflags)) # obtain list of modinc's to use if self.cfg["modinc"]: self.modincpath = self.prepmodinc() # set typearch self.typearch = "Linux-x86-64-%s" % self.toolchain.name # extra make instructions self.make_instructions = '' # "graphcon.o: graphcon.F\n\t$(FC) -c $(FCFLAGS2) $<\n" # compiler toolchain specific configuration comp_fam = self.toolchain.comp_family() if comp_fam == toolchain.INTELCOMP: options = self.configure_intel_based() elif comp_fam == toolchain.GCC: options = self.configure_GCC_based() else: raise EasyBuildError("Don't know how to tweak configuration for compiler family %s" % comp_fam) # BLAS/LAPACK/FFTW if get_software_root('imkl'): options = self.configure_MKL(options) else: # BLAS if get_software_root('ACML'): options = self.configure_ACML(options) else: options = self.configure_BLAS_lib(options) # FFTW (no MKL involved) if 'fftw3' in os.getenv('LIBFFT', ''): options = self.configure_FFTW3(options) # LAPACK if os.getenv('LIBLAPACK_MT', None) is not None: options = self.configure_LAPACK(options) if os.getenv('LIBSCALAPACK', None) is not None: options = self.configure_ScaLAPACK(options) # PLUMED plumed = get_software_root('PLUMED') if self.cfg['plumed'] and not plumed: raise EasyBuildError("The PLUMED module needs to be loaded to build CP2K with PLUMED support") # enable PLUMED support if PLUMED is listed as a dependency # and PLUMED support is either explicitly enabled (plumed = True) or unspecified ('plumed' not defined) if plumed and (self.cfg['plumed'] or self.cfg['plumed'] is None): options['LIBS'] += ' -lplumed' options['DFLAGS'] += ' -D__PLUMED2' # ELPA elpa = get_software_root('ELPA') if elpa: options['LIBS'] += ' -lelpa' elpa_inc_dir = os.path.join(elpa, 'include', 'elpa-%s' % get_software_version('ELPA'), 'modules') options['FCFLAGSOPT'] += ' -I%s ' % elpa_inc_dir if LooseVersion(self.version) >= LooseVersion('6.1'): elpa_ver = ''.join(get_software_version('ELPA').split('.')[:2]) options['DFLAGS'] += ' -D__ELPA=%s' % elpa_ver elpa_inc_dir = os.path.join(elpa, 'include', 'elpa-%s' % get_software_version('ELPA'), 'elpa') options['FCFLAGSOPT'] += ' -I%s ' % elpa_inc_dir else: options['DFLAGS'] += ' -D__ELPA3' # CUDA cuda = get_software_root('CUDA') if cuda: options['DFLAGS'] += ' -D__ACC -D__DBCSR_ACC' options['LIBS'] += ' -lcudart -lcublas -lcufft -lrt' options['NVCC'] = ' nvcc' # avoid group nesting options['LIBS'] = options['LIBS'].replace('-Wl,--start-group', '').replace('-Wl,--end-group', '') options['LIBS'] = "-Wl,--start-group %s -Wl,--end-group" % options['LIBS'] # specify correct location for 'data' directory in final installation options['DATA_DIR'] = os.path.join(self.installdir, 'data') # create arch file using options set archfile = os.path.join(self.cfg['start_dir'], 'arch', '%s.%s' % (self.typearch, self.cfg['type'])) txt = self._generate_makefile(options) write_file(archfile, txt) self.log.info("Content of makefile (%s):\n%s" % (archfile, txt)) def prepmodinc(self): """Prepare list of module files""" self.log.debug("Preparing module files") imkl = get_software_root('imkl') if imkl: # prepare modinc target path modincpath = os.path.join(os.path.dirname(os.path.normpath(self.cfg['start_dir'])), 'modinc') self.log.debug("Preparing module files in %s" % modincpath) mkdir(modincpath, parents=True) # get list of modinc source files modincdir = os.path.join(imkl, self.cfg["modincprefix"], 'include') if isinstance(self.cfg["modinc"], list): modfiles = [os.path.join(modincdir, x) for x in self.cfg["modinc"]] elif isinstance(self.cfg["modinc"], bool) and self.cfg["modinc"]: modfiles = glob.glob(os.path.join(modincdir, '*.f90')) else: raise EasyBuildError("prepmodinc: Please specify either a boolean value or a list of files in modinc " "(found: %s).", self.cfg["modinc"]) f77 = os.getenv('F77') if not f77: raise EasyBuildError("F77 environment variable not set, can't continue.") # create modinc files for f in modfiles: if f77.endswith('ifort'): cmd = "%s -module %s -c %s" % (f77, modincpath, f) elif f77 in ['gfortran', 'mpif77']: cmd = "%s -J%s -c %s" % (f77, modincpath, f) else: raise EasyBuildError("prepmodinc: Unknown value specified for F77 (%s)", f77) run_cmd(cmd, log_all=True, simple=True) return modincpath else: raise EasyBuildError("Don't know how to prepare modinc, imkl not found") def configure_common(self): """Common configuration for all toolchains""" # openmp introduces 2 major differences # -automatic is default: -noautomatic -auto-scalar # some mem-bandwidth optimisation if self.cfg['type'] == 'psmp': self.openmp = self.toolchain.get_flag('openmp') # determine which opt flags to use if self.cfg['typeopt']: optflags = 'OPT' regflags = 'OPT2' else: optflags = 'NOOPT' regflags = 'NOOPT' # make sure a MPI-2 able MPI lib is used mpi2 = False if hasattr(self.toolchain, 'MPI_FAMILY') and self.toolchain.MPI_FAMILY is not None: known_mpi2_fams = [toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2, toolchain.OPENMPI, toolchain.INTELMPI] mpi_fam = self.toolchain.mpi_family() if mpi_fam in known_mpi2_fams: mpi2 = True self.log.debug("Determined MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam) else: self.log.debug("Cannot determine MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam) else: # can't use toolchain.mpi_family, because of system toolchain mpi2libs = ['impi', 'MVAPICH2', 'OpenMPI', 'MPICH2', 'MPICH'] for mpi2lib in mpi2libs: if get_software_root(mpi2lib): mpi2 = True self.log.debug("Determined MPI2 compatibility based on loaded MPI module: %s") else: self.log.debug("MPI-2 supporting MPI library %s not loaded.") if not mpi2: raise EasyBuildError("CP2K needs MPI-2, no known MPI-2 supporting library loaded?") cppflags = os.getenv('CPPFLAGS') ldflags = os.getenv('LDFLAGS') cflags = os.getenv('CFLAGS') fflags = os.getenv('FFLAGS') fflags_lowopt = re.sub('-O[0-9]', '-O1', fflags) options = { 'CC': os.getenv('MPICC'), 'CPP': '', 'FC': '%s %s' % (os.getenv('MPIF90'), self.openmp), 'LD': '%s %s' % (os.getenv('MPIF90'), self.openmp), 'AR': 'ar -r', 'CPPFLAGS': '', 'FPIC': self.fpic, 'DEBUG': self.debug, 'FCFLAGS': '$(FCFLAGS%s)' % optflags, 'FCFLAGS2': '$(FCFLAGS%s)' % regflags, 'CFLAGS': ' %s %s %s $(FPIC) $(DEBUG) %s ' % (cflags, cppflags, ldflags, self.cfg['extracflags']), 'DFLAGS': ' -D__parallel -D__BLACS -D__SCALAPACK -D__FFTSG %s' % self.cfg['extradflags'], 'LIBS': os.getenv('LIBS', ''), 'FCFLAGSNOOPT': '$(DFLAGS) $(CFLAGS) -O0 $(FREE) $(FPIC) $(DEBUG)', 'FCFLAGSOPT': '%s $(FREE) $(SAFE) $(FPIC) $(DEBUG)' % fflags, 'FCFLAGSOPT2': '%s $(FREE) $(SAFE) $(FPIC) $(DEBUG)' % fflags_lowopt, } libint = get_software_root('LibInt') if libint: options['DFLAGS'] += ' -D__LIBINT' libintcompiler = "%s %s" % (os.getenv('CC'), os.getenv('CFLAGS')) # Build libint-wrapper, if required libint_wrapper = '' # required for old versions of GCC if not self.compilerISO_C_BINDING: options['DFLAGS'] += ' -D__HAS_NO_ISO_C_BINDING' # determine path for libint_tools dir libinttools_paths = ['libint_tools', 'tools/hfx_tools/libint_tools'] libinttools_path = None for path in libinttools_paths: path = os.path.join(self.cfg['start_dir'], path) if os.path.isdir(path): libinttools_path = path change_dir(libinttools_path) if not libinttools_path: raise EasyBuildError("No libinttools dir found") # build libint wrapper cmd = "%s -c libint_cpp_wrapper.cpp -I%s/include" % (libintcompiler, libint) if not run_cmd(cmd, log_all=True, simple=True): raise EasyBuildError("Building the libint wrapper failed") libint_wrapper = '%s/libint_cpp_wrapper.o' % libinttools_path # determine Libint libraries based on major version number libint_maj_ver = get_software_version('Libint').split('.')[0] if libint_maj_ver == '1': libint_libs = "$(LIBINTLIB)/libderiv.a $(LIBINTLIB)/libint.a $(LIBINTLIB)/libr12.a" elif libint_maj_ver == '2': libint_libs = "$(LIBINTLIB)/libint2.a" else: raise EasyBuildError("Don't know how to handle libint version %s", libint_maj_ver) self.log.info("Using Libint version %s" % (libint_maj_ver)) options['LIBINTLIB'] = '%s/lib' % libint options['LIBS'] += ' %s -lstdc++ %s' % (libint_libs, libint_wrapper) # add Libint include dir to $FCFLAGS options['FCFLAGS'] += ' -I' + os.path.join(libint, 'include') else: # throw a warning, since CP2K without Libint doesn't make much sense self.log.warning("Libint module not loaded, so building without Libint support") libxc = get_software_root('libxc') if libxc: cur_libxc_version = get_software_version('libxc') if LooseVersion(self.version) >= LooseVersion('6.1'): libxc_min_version = '4.0.3' options['DFLAGS'] += ' -D__LIBXC' else: libxc_min_version = '2.0.1' options['DFLAGS'] += ' -D__LIBXC2' if LooseVersion(cur_libxc_version) < LooseVersion(libxc_min_version): raise EasyBuildError("This version of CP2K is not compatible with libxc < %s" % libxc_min_version) if LooseVersion(cur_libxc_version) >= LooseVersion('4.0.3'): # cfr. https://www.cp2k.org/howto:compile#k_libxc_optional_wider_choice_of_xc_functionals options['LIBS'] += ' -L%s/lib -lxcf03 -lxc' % libxc elif LooseVersion(cur_libxc_version) >= LooseVersion('2.2'): options['LIBS'] += ' -L%s/lib -lxcf90 -lxc' % libxc else: options['LIBS'] += ' -L%s/lib -lxc' % libxc self.log.info("Using Libxc-%s" % cur_libxc_version) else: self.log.info("libxc module not loaded, so building without libxc support") return options def configure_intel_based(self): """Configure for Intel based toolchains""" # based on guidelines available at # http://software.intel.com/en-us/articles/build-cp2k-using-intel-fortran-compiler-professional-edition/ intelurl = ''.join(["http://software.intel.com/en-us/articles/", "build-cp2k-using-intel-fortran-compiler-professional-edition/"]) options = self.configure_common() extrainc = '' if self.modincpath: extrainc = '-I%s' % self.modincpath options.update({ # -Vaxlib : older options 'FREE': '-fpp -free', # SAFE = -assume protect_parens -fp-model precise -ftz # causes problems, so don't use this 'SAFE': '-assume protect_parens -no-unroll-aggressive', 'INCFLAGS': '$(DFLAGS) -I$(INTEL_INC) -I$(INTEL_INCF) %s' % extrainc, 'LDFLAGS': '$(INCFLAGS) ', 'OBJECTS_ARCHITECTURE': 'machine_intel.o', }) options['DFLAGS'] += ' -D__INTEL' options['FCFLAGSOPT'] += ' $(INCFLAGS) -heap-arrays 64' options['FCFLAGSOPT2'] += ' $(INCFLAGS) -heap-arrays 64' ifortver = LooseVersion(get_software_version('ifort')) # Required due to memory leak that occurs if high optimizations are used (from CP2K 7.1 intel-popt-makefile) if ifortver >= LooseVersion("2018.5"): self.make_instructions += "mp2_optimize_ri_basis.o: mp2_optimize_ri_basis.F\n" \ "\t$(FC) -c $(subst O2,O0,$(FCFLAGSOPT)) $<\n" self.log.info("Optimization level of mp2_optimize_ri_basis.F was decreased to '-O0'") # RHEL8 intel/2020a lots of CPASSERT failed (due to high optimization in cholesky decomposition) if ifortver >= LooseVersion("2019"): self.make_instructions += "cp_fm_cholesky.o: cp_fm_cholesky.F\n\t$(FC) -c $(FCFLAGS2) $<\n" self.log.info("Optimization flags for cp_fm_cholesky.F is set to '%s'", options['FCFLAGSOPT2']) # -i-static has been deprecated prior to 2013, but was still usable. From 2015 it is not. if ifortver < LooseVersion("2013"): options['LDFLAGS'] += ' -i-static ' else: options['LDFLAGS'] += ' -static-intel ' # Otherwise it fails on linking, since there are 2 definitions of main if LooseVersion(self.version) >= LooseVersion('4.1'): options['LDFLAGS'] += ' -nofor-main ' failmsg = "CP2K won't build correctly with the Intel %%s compilers prior to %%s, see %s" % intelurl if ifortver >= LooseVersion("2011") and ifortver < LooseVersion("2012"): # don't allow using Intel compiler 2011 prior to release 8, because of known issue (see Intel URL) if ifortver >= LooseVersion("2011.8"): # add additional make instructions to Makefile self.make_instructions += "et_coupling.o: et_coupling.F\n\t$(FC) -c $(FCFLAGS2) $<\n" self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" else: raise EasyBuildError(failmsg, "v12", "v2011.8") elif ifortver >= LooseVersion("11"): if LooseVersion(get_software_version('ifort')) >= LooseVersion("11.1.072"): self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" else: raise EasyBuildError(failmsg, "v11", "v11.1.072") else: raise EasyBuildError("Intel compilers version %s not supported yet.", ifortver) return options def configure_GCC_based(self): """Configure for GCC based toolchains""" options = self.configure_common() options.update({ # need this to prevent "Unterminated character constant beginning" errors 'FREE': '-ffree-form -ffree-line-length-none', 'LDFLAGS': '$(FCFLAGS)', 'OBJECTS_ARCHITECTURE': 'machine_gfortran.o', }) options['DFLAGS'] += ' -D__GFORTRAN' options['FCFLAGSOPT'] += ' $(DFLAGS) $(CFLAGS) -fmax-stack-var-size=32768' options['FCFLAGSOPT2'] += ' $(DFLAGS) $(CFLAGS)' gcc_version = get_software_version('GCCcore') or get_software_version('GCC') if LooseVersion(gcc_version) >= LooseVersion('10.0') and LooseVersion(self.version) <= LooseVersion('7.1'): # -fallow-argument-mismatch is required for CP2K 7.1 (and older) when compiling with GCC 10.x & more recent, # see https://github.com/cp2k/cp2k/issues/1157, https://github.com/cp2k/dbcsr/issues/351, # https://github.com/cp2k/dbcsr/commit/58ee9709545deda8524cab804bf1f88a61a864ac and # https://gcc.gnu.org/legacy-ml/gcc-patches/2019-10/msg01861.html options['FCFLAGSOPT'] += ' -fallow-argument-mismatch' options['FCFLAGSOPT2'] += ' -fallow-argument-mismatch' return options def configure_ACML(self, options): """Configure for AMD Math Core Library (ACML)""" openmp_suffix = '' if self.openmp: openmp_suffix = '_mp' options['ACML_INC'] = '%s/gfortran64%s/include' % (get_software_root('ACML'), openmp_suffix) options['CFLAGS'] += ' -I$(ACML_INC) -I$(FFTW_INC)' options['DFLAGS'] += ' -D__FFTACML' blas = os.getenv('LIBBLAS', '') blas = blas.replace('gfortran64', 'gfortran64%s' % openmp_suffix) options['LIBS'] += ' %s %s %s' % (self.libsmm, os.getenv('LIBSCALAPACK', ''), blas) return options def configure_BLAS_lib(self, options): """Configure for BLAS library.""" options['LIBS'] += ' %s %s' % (self.libsmm, os.getenv('LIBBLAS', '')) return options def configure_MKL(self, options): """Configure for Intel Math Kernel Library (MKL)""" options['INTEL_INC'] = '$(MKLROOT)/include' options['DFLAGS'] += ' -D__FFTW3' extra = '' if self.modincpath: extra = '-I%s' % self.modincpath options['CFLAGS'] += ' -I$(INTEL_INC) %s $(FPIC) $(DEBUG)' % extra options['LIBS'] += ' %s %s' % (self.libsmm, os.getenv('LIBSCALAPACK', '')) fftw_root = get_software_root('FFTW') if fftw_root: libfft = '-lfftw3' if self.cfg['type'] == 'psmp': libfft += ' -lfftw3_omp' options['CFLAGS'] += ' -I$(INTEL_INCF)' options['INTEL_INCF'] = os.path.join(fftw_root, 'include') options['LIBS'] += ' -L%s %s' % (os.path.join(fftw_root, 'lib'), libfft) else: # only use Intel FFTW wrappers if FFTW is not loaded options['CFLAGS'] += ' -I$(INTEL_INCF)' options['DFLAGS'] += ' -D__FFTMKL' options['INTEL_INCF'] = '$(INTEL_INC)/fftw' options['LIBS'] = '%s %s' % (os.getenv('LIBFFT', ''), options['LIBS']) return options def configure_FFTW3(self, options): """Configure for FFTW3""" options.update({ 'FFTW_INC': os.getenv('FFT_INC_DIR', ''), # GCC 'FFTW3INC': os.getenv('FFT_INC_DIR', ''), # Intel 'FFTW3LIB': os.getenv('FFT_LIB_DIR', ''), # Intel }) options['DFLAGS'] += ' -D__FFTW3' if self.cfg['type'] == 'psmp': libfft = os.getenv('LIBFFT_MT', '') else: libfft = os.getenv('LIBFFT', '') options['LIBS'] += ' -L%s %s' % (os.getenv('FFT_LIB_DIR', '.'), libfft) return options def configure_LAPACK(self, options): """Configure for LAPACK library""" options['LIBS'] += ' %s' % os.getenv('LIBLAPACK_MT', '') return options def configure_ScaLAPACK(self, options): """Configure for ScaLAPACK library""" options['LIBS'] += ' %s' % os.getenv('LIBSCALAPACK', '') return options def build_step(self): """Start the actual build - go into makefiles dir - patch Makefile -build_and_install """ if LooseVersion(self.version) < LooseVersion('7.0'): makefiles = os.path.join(self.cfg['start_dir'], 'makefiles') change_dir(makefiles) # modify makefile for parallel build parallel = self.cfg['parallel'] if parallel: try: for line in fileinput.input('Makefile', inplace=1, backup='.orig.patchictce'): line = re.sub(r"^PMAKE\s*=.*$", "PMAKE\t= $(SMAKE) -j %s" % parallel, line) sys.stdout.write(line) except IOError as err: raise EasyBuildError("Can't modify/write Makefile in %s: %s", makefiles, err) # update make options with MAKE self.cfg.update('buildopts', 'MAKE="make -j %s"' % self.cfg['parallel']) # update make options with ARCH and VERSION self.cfg.update('buildopts', 'ARCH=%s VERSION=%s' % (self.typearch, self.cfg['type'])) cmd = "make %s" % self.cfg['buildopts'] # clean first run_cmd(cmd + " clean", log_all=True, simple=True, log_output=True) # build and install if self.cfg['library']: cmd += ' libcp2k' run_cmd(cmd + " all", log_all=True, simple=True, log_output=True) def test_step(self): """Run regression test.""" if self.cfg['runtest']: # we need to specify location of 'data' directory in *build* dir, # since we've configured CP2K to look into the installation directory # (where 'data' will be copied to in install step) setvar('CP2K_DATA_DIR', os.path.join(self.cfg['start_dir'], 'data')) if not build_option('mpi_tests'): self.log.info("Skipping testing of CP2K since MPI testing is disabled") return if self.cfg['omp_num_threads']: setvar('OMP_NUM_THREADS', self.cfg['omp_num_threads']) # change to root of build dir change_dir(self.builddir) # use regression test reference output if available # try and find an unpacked directory that starts with 'LAST-' regtest_refdir = None for d in os.listdir(self.builddir): if d.startswith("LAST-"): regtest_refdir = d break # location of do_regtest script cfg_fn = 'cp2k_regtest.cfg' regtest_script = os.path.join(self.cfg['start_dir'], 'tools', 'regtesting', 'do_regtest') regtest_cmd = [regtest_script, '-nobuild', '-config', cfg_fn] if LooseVersion(self.version) < LooseVersion('7.1'): # -nosvn option was removed in CP2K 7.1 regtest_cmd.insert(1, '-nosvn') # older version of CP2K if not os.path.exists(regtest_script): regtest_script = os.path.join(self.cfg['start_dir'], 'tools', 'do_regtest') regtest_cmd = [regtest_script, '-nocvs', '-quick', '-nocompile', '-config', cfg_fn] regtest_cmd = ' '.join(regtest_cmd) # patch do_regtest so that reference output is used if regtest_refdir: self.log.info("Using reference output available in %s" % regtest_refdir) try: for line in fileinput.input(regtest_script, inplace=1, backup='.orig.refout'): line = re.sub(r"^(dir_last\s*=\${dir_base})/.*$", r"\1/%s" % regtest_refdir, line) sys.stdout.write(line) except IOError as err: raise EasyBuildError("Failed to modify '%s': %s", regtest_script, err) else: self.log.info("No reference output found for regression test, just continuing without it...") # prefer using 4 cores, since some tests require/prefer square (n^2) numbers or powers of 2 (2^n) test_core_cnt = min(self.cfg['parallel'], 4) if get_avail_core_count() < test_core_cnt: raise EasyBuildError("Cannot run MPI tests as not enough cores (< %s) are available", test_core_cnt) else: self.log.info("Using %s cores for the MPI tests" % test_core_cnt) # configure regression test cfg_txt = '\n'.join([ 'FORT_C_NAME="%(f90)s"', 'dir_base=%(base)s', 'cp2k_version=%(cp2k_version)s', 'dir_triplet=%(triplet)s', 'export ARCH=${dir_triplet}', 'cp2k_dir=%(cp2k_dir)s', 'leakcheck="YES"', 'maxtasks=%(maxtasks)s', 'cp2k_run_prefix="%(mpicmd_prefix)s"', ]) % { 'f90': os.getenv('F90'), 'base': os.path.dirname(os.path.normpath(self.cfg['start_dir'])), 'cp2k_version': self.cfg['type'], 'triplet': self.typearch, 'cp2k_dir': os.path.basename(os.path.normpath(self.cfg['start_dir'])), 'maxtasks': self.cfg['maxtasks'], 'mpicmd_prefix': self.toolchain.mpi_cmd_for('', test_core_cnt), } write_file(cfg_fn, cfg_txt) self.log.debug("Contents of %s: %s" % (cfg_fn, cfg_txt)) # run regression test (regtest_output, ec) = run_cmd(regtest_cmd, log_all=True, simple=False, log_output=True) if ec == 0: self.log.info("Regression test output:\n%s" % regtest_output) else: raise EasyBuildError("Regression test failed (non-zero exit code): %s", regtest_output) # pattern to search for regression test summary re_pattern = r"number\s+of\s+%s\s+tests\s+(?P<cnt>[0-9]+)" # find total number of tests regexp = re.compile(re_pattern % "", re.M | re.I) res = regexp.search(regtest_output) tot_cnt = None if res: tot_cnt = int(res.group('cnt')) else: raise EasyBuildError("Finding total number of tests in regression test summary failed") # function to report on regtest results def test_report(test_result): """Report on tests with given result.""" postmsg = '' test_result = test_result.upper() regexp = re.compile(re_pattern % test_result, re.M | re.I) cnt = None res = regexp.search(regtest_output) if not res: raise EasyBuildError("Finding number of %s tests in regression test summary failed", test_result.lower()) else: cnt = int(res.group('cnt')) logmsg = "Regression test reported %s / %s %s tests" logmsg_values = (cnt, tot_cnt, test_result.lower()) # failed tests indicate problem with installation # wrong tests are only an issue when there are excessively many if (test_result == "FAILED" and cnt > 0) or (test_result == "WRONG" and (cnt / tot_cnt) > 0.1): if self.cfg['ignore_regtest_fails']: self.log.warning(logmsg, *logmsg_values) self.log.info("Ignoring failures in regression test, as requested.") else: raise EasyBuildError(logmsg, *logmsg_values) elif test_result == "CORRECT" or cnt == 0: self.log.info(logmsg, *logmsg_values) else: self.log.warning(logmsg, *logmsg_values) return postmsg # number of failed/wrong tests, will report error if count is positive self.postmsg += test_report("FAILED") self.postmsg += test_report("WRONG") # there are no more 'new' tests from CP2K 8.1 onwards if LooseVersion(self.version) < LooseVersion('8.0'): # number of new tests, will be high if a non-suitable regtest reference was used # will report error if count is positive (is that what we want?) self.postmsg += test_report("NEW") # number of correct tests: just report test_report("CORRECT") def install_step(self): """Install built CP2K - copy from exe to bin - copy data dir (if exists) - copy tests """ # copy executables exedir = os.path.join(self.cfg['start_dir'], 'exe', self.typearch) targetdir = os.path.join(self.installdir, 'bin') copy_dir(exedir, targetdir) # copy libraries and include files, not sure what is strictly required so we take everything if self.cfg['library']: libdir = os.path.join(self.cfg['start_dir'], 'lib', self.typearch, self.cfg['type']) targetdir = os.path.join(self.installdir, 'lib') copy_dir(libdir, targetdir) # Also need to populate the include directory targetdir = os.path.join(self.installdir, 'include') libcp2k_header = os.path.join(self.cfg['start_dir'], 'src', 'start', 'libcp2k.h') target_header = os.path.join(targetdir, os.path.basename(libcp2k_header)) copy_file(libcp2k_header, target_header) # include all .mod files for fortran users (don't know the exact list so take everything) mod_path = os.path.join(self.cfg['start_dir'], 'obj', self.typearch, self.cfg['type']) for mod_file in glob.glob(os.path.join(mod_path, '*.mod')): target_mod = os.path.join(targetdir, os.path.basename(mod_file)) copy_file(mod_file, target_mod) # copy data dir datadir = os.path.join(self.cfg['start_dir'], 'data') targetdir = os.path.join(self.installdir, 'data') if os.path.exists(targetdir): self.log.info("Won't copy data dir. Destination directory %s already exists" % targetdir) elif os.path.exists(datadir): copy_dir(datadir, targetdir) else: self.log.info("Won't copy data dir. Source directory %s does not exist" % datadir) # copy tests srctests = os.path.join(self.cfg['start_dir'], 'tests') targetdir = os.path.join(self.installdir, 'tests') if os.path.exists(targetdir): self.log.info("Won't copy tests. Destination directory %s already exists" % targetdir) else: copy_dir(srctests, targetdir) # copy regression test results if self.cfg['runtest']: try: testdir = os.path.dirname(os.path.normpath(self.cfg['start_dir'])) for d in os.listdir(testdir): if d.startswith('TEST-%s-%s' % (self.typearch, self.cfg['type'])): path = os.path.join(testdir, d) target = os.path.join(self.installdir, d) copy_dir(path, target) self.log.info("Regression test results dir %s copied to %s" % (d, self.installdir)) break except (OSError, IOError) as err: raise EasyBuildError("Failed to copy regression test results dir: %s", err) def sanity_check_step(self): """Custom sanity check for CP2K""" cp2k_type = self.cfg['type'] custom_paths = { 'files': ["bin/%s.%s" % (x, cp2k_type) for x in ["cp2k", "cp2k_shell"]], 'dirs': ["tests"] } if self.cfg['library']: custom_paths['files'].append(os.path.join('lib', 'libcp2k.a')) custom_paths['files'].append(os.path.join('include', 'libcp2k.h')) custom_paths['files'].append(os.path.join('include', 'libcp2k.mod')) super(EB_CP2K, self).sanity_check_step(custom_paths=custom_paths) def make_module_extra(self): """Set up a CP2K_DATA_DIR environment variable to find CP2K provided basis sets""" txt = super(EB_CP2K, self).make_module_extra() # also define $CP2K_DATA_DIR in module, # even though CP2K was already configured to pick up 'data' from install dir # this could be useful for users to access the 'data' dir in a documented way (and it doesn't hurt) datadir = os.path.join(self.installdir, 'data') if os.path.exists(datadir): txt += self.module_generator.set_environment('CP2K_DATA_DIR', datadir) return txt
hpcugent/easybuild-easyblocks
easybuild/easyblocks/c/cp2k.py
Python
gpl-2.0
39,559
[ "CP2K" ]
e6880856a8e2e7d51010a63ce8ad1f992bb4bd1ff66526626f5be5d8625a1a79
#!/usr/bin/env python # # This file is part of cclib (http://cclib.sf.net), a library for parsing # and interpreting the results of computational chemistry packages. # # Copyright (C) 2006-2013, the cclib development team # # The library is free software, distributed under the terms of # the GNU Lesser General Public version 2.1 or later. You should have # received a copy of the license along with cclib. You can also access # the full license online at http://www.gnu.org/copyleft/lgpl.html. """cclib: parsers and algorithms for computational chemistry cclib is a Python library that provides parsers for computational chemistry log files. It also provides a platform to implement algorithms in a package-independent manner. """ __revision__ = "$Revision$" doclines = __doc__.split("\n") # Chosen from http://www.python.org/pypi?:action=list_classifiers classifiers = """\ Development Status :: 5 - Production/Stable Environment :: Console Intended Audience :: Science/Research Intended Audience :: Developers License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) Natural Language :: English Operating System :: OS Independent Programming Language :: Python Topic :: Scientific/Engineering :: Chemistry Topic :: Software Development :: Libraries :: Python Modules """ programs = ['ADF', 'GAMESS', 'GAMESS-UK', 'Gaussian', 'Jaguar', 'Molpro', 'ORCA'] def setup_cclib(): import os import sys # Import from setuptools only if requested. if 'egg' in sys.argv: sys.argv.pop(sys.argv.index('egg')) from setuptools import setup from distutils.core import setup # Setup the list of packages. cclib_packages = ['cclib', 'cclib.parser', 'cclib.progress', 'cclib.method', 'cclib.bridge', 'cclib.test'] # Setup the list of data files. cclib_prefix = 'lib/python%i.%i/site-packages/cclib' %(sys.version_info[0], sys.version_info[1]) test_prefix = cclib_prefix + '/test' data_prefix = cclib_prefix + '/data' cclib_datafiles = [ (cclib_prefix, ['ANNOUNCE', 'CHANGELOG', 'INSTALL', 'LICENSE', 'README', 'THANKS']), (test_prefix, ['test/testdata']), (data_prefix, ['data/regressionfiles.txt', 'data/wget.sh'])] for program in programs: data_dirs = os.listdir('data/%s' %program) for data_dir in data_dirs: if data_dir[:5] == 'basic': dest = '%s/%s/%s' %(data_prefix, program, data_dir) path = 'data/%s/%s' %(program, data_dir) newfiles = ['%s/%s' %(path,fname) for fname in os.listdir(path) if fname[0] != '.'] cclib_datafiles.append((dest, newfiles)) setup( name = "cclib", version = "1.1", url = "http://cclib.sf.net", author = "cclib development team", author_email = "cclib-users@lists.sourceforge.net", maintainer = "cclib development team", maintainer_email = "cclib development team", license = "LGPL", description = doclines[0], long_description = "\n".join(doclines[2:]), classifiers = filter(None, classifiers.split("\n")), platforms = ["Any."], scripts = ["src/scripts/ccget", "src/scripts/cda"], package_dir = {'cclib':'src/cclib', 'cclib.test':'test'}, packages = cclib_packages, data_files = cclib_datafiles ) if __name__ == '__main__': setup_cclib()
Clyde-fare/cclib_bak
setup.py
Python
lgpl-2.1
3,486
[ "ADF", "GAMESS", "Gaussian", "Jaguar", "Molpro", "ORCA", "cclib" ]
0a5382f48b3224d4ac37611aafe11a859975556326ee9c42e23f765f90df8136
# -*- coding: utf-8 -*- # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """This module contains tests for bodhi.server.services.updates.""" import copy import textwrap import time import mock import urlparse from mock import ANY from nose.tools import eq_ from datetime import datetime, timedelta from webtest import TestApp import bodhi.tests.server.functional.base from bodhi.server import main from bodhi.server.config import config from bodhi.server.models import ( Build, Group, Package, Update, User, UpdateStatus, UpdateRequest, Release, ReleaseState, BuildrootOverride, UpdateStatus, UpdateType, ) YEAR = time.localtime().tm_year mock_valid_requirements = { 'target': 'bodhi.server.validators._get_valid_requirements', 'return_value': ['rpmlint', 'upgradepath'], } mock_uuid4_version1 = { 'target': 'uuid.uuid4', 'return_value': 'this is a consistent string', } mock_uuid4_version2 = { 'target': 'uuid.uuid4', 'return_value': 'this is another consistent string', } mock_taskotron_results = { 'target': 'bodhi.server.util.taskotron_results', 'return_value': [{ "outcome": "PASSED", "result_data": {}, "testcase": { "name": "rpmlint", } }], } mock_failed_taskotron_results = { 'target': 'bodhi.server.util.taskotron_results', 'return_value': [{ "outcome": "FAILED", "result_data": {}, "testcase": { "name": "rpmlint", } }], } mock_absent_taskotron_results = { 'target': 'bodhi.server.util.taskotron_results', 'return_value': [], } class TestNewUpdate(bodhi.tests.server.functional.base.BaseWSGICase): """ This class contains tests for the new_update() function. """ @mock.patch(**mock_valid_requirements) def test_invalid_build_name(self, *args): res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0-1.fc17,invalidbuild-1.0'), status=400) assert 'Build not in name-version-release format' in res, res @mock.patch(**mock_valid_requirements) def test_empty_build_name(self, *args): res = self.app.post_json('/updates/', self.get_update([u'']), status=400) self.assertEquals(res.json_body['errors'][0]['name'], 'builds.0') self.assertEquals(res.json_body['errors'][0]['description'], 'Required') @mock.patch(**mock_valid_requirements) def test_fail_on_edit_with_empty_build_list(self, *args): update = self.get_update() update['edited'] = update['builds'] # the update title.. update['builds'] = [] res = self.app.post_json('/updates/', update, status=400) self.assertEquals(len(res.json_body['errors']), 2) self.assertEquals(res.json_body['errors'][0]['name'], 'builds') self.assertEquals( res.json_body['errors'][0]['description'], 'You may not specify an empty list of builds.') self.assertEquals(res.json_body['errors'][1]['name'], 'builds') self.assertEquals( res.json_body['errors'][1]['description'], 'ACL validation mechanism was unable to determine ACLs.') @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_unicode_description(self, publish, *args): update = self.get_update('bodhi-2.0.0-2.fc17') update['notes'] = u'This is wünderfül' r = self.app.post_json('/updates/', update) up = r.json_body self.assertEquals(up['title'], u'bodhi-2.0.0-2.fc17') self.assertEquals(up['notes'], u'This is wünderfül') self.assertIsNotNone(up['date_submitted']) publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) @mock.patch(**mock_valid_requirements) def test_duplicate_build(self, *args): res = self.app.post_json('/updates/', self.get_update([u'bodhi-2.0-2.fc17', u'bodhi-2.0-2.fc17']), status=400) assert 'Duplicate builds' in res, res @mock.patch(**mock_valid_requirements) def test_multiple_builds_of_same_package(self, *args): res = self.app.post_json('/updates/', self.get_update([u'bodhi-2.0-2.fc17', u'bodhi-2.0-3.fc17']), status=400) assert 'Multiple bodhi builds specified' in res, res @mock.patch(**mock_valid_requirements) def test_invalid_autokarma(self, *args): res = self.app.post_json('/updates/', self.get_update(stable_karma=-1), status=400) assert '-1 is less than minimum value 1' in res, res res = self.app.post_json('/updates/', self.get_update(unstable_karma=1), status=400) assert '1 is greater than maximum value -1' in res, res @mock.patch(**mock_valid_requirements) def test_duplicate_update(self, *args): res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0-1.fc17'), status=400) assert 'Update for bodhi-2.0-1.fc17 already exists' in res, res @mock.patch(**mock_valid_requirements) def test_invalid_requirements(self, *args): update = self.get_update() update['requirements'] = 'rpmlint silly-dilly' res = self.app.post_json('/updates/', update, status=400) assert 'Invalid requirement' in res, res @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_no_privs(self, publish, *args): user = User(name=u'bodhi') self.db.add(user) self.db.flush() app = TestApp(main({}, testing=u'bodhi', session=self.db, **self.app_settings)) res = app.post_json('/updates/', self.get_update(u'bodhi-2.1-1.fc17'), status=400) assert 'bodhi does not have commit access to bodhi' in res, res self.assertEquals(publish.call_args_list, []) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_provenpackager_privs(self, publish, *args): "Ensure provenpackagers can push updates for any package" user = User(name=u'bodhi') self.db.add(user) self.db.flush() group = self.db.query(Group).filter_by(name=u'provenpackager').one() user.groups.append(group) app = TestApp(main({}, testing=u'bodhi', session=self.db, **self.app_settings)) update = self.get_update(u'bodhi-2.1-1.fc17') update['csrf_token'] = app.get('/csrf').json_body['csrf_token'] res = app.post_json('/updates/', update) assert 'bodhi does not have commit access to bodhi' not in res, res build = self.db.query(Build).filter_by(nvr=u'bodhi-2.1-1.fc17').one() assert build.update is not None publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) @mock.patch(**mock_valid_requirements) def test_pkgdb_outage(self, *args): "Test the case where our call to the pkgdb throws an exception" settings = self.app_settings.copy() settings['acl_system'] = 'pkgdb' settings['pkgdb_url'] = 'invalidurl' app = TestApp(main({}, testing=u'guest', session=self.db, **settings)) update = self.get_update(u'bodhi-2.0-2.fc17') update['csrf_token'] = app.get('/csrf').json_body['csrf_token'] res = app.post_json('/updates/', update, status=400) assert "Unable to access the Package Database" in res, res @mock.patch(**mock_valid_requirements) def test_invalid_acl_system(self, *args): settings = self.app_settings.copy() settings['acl_system'] = 'null' app = TestApp(main({}, testing=u'guest', session=self.db, **settings)) res = app.post_json('/updates/', self.get_update(u'bodhi-2.0-2.fc17'), status=400) assert "guest does not have commit access to bodhi" in res, res def test_put_json_update(self): self.app.put_json('/updates/', self.get_update(), status=405) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_post_json_update(self, publish, *args): self.app.post_json('/updates/', self.get_update('bodhi-2.0.0-1.fc17')) publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) @mock.patch(**mock_uuid4_version1) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_new_update(self, publish, *args): r = self.app.post_json('/updates/', self.get_update('bodhi-2.0.0-2.fc17')) up = r.json_body self.assertEquals(up['title'], u'bodhi-2.0.0-2.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'this is a test update') self.assertIsNotNone(up['date_submitted']) self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], False) self.assertEquals(up['alias'], u'FEDORA-%s-033713b73b' % YEAR) self.assertEquals(up['karma'], 0) self.assertEquals(up['requirements'], 'rpmlint') publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_new_update_with_multiple_bugs(self, publish, *args): update = self.get_update('bodhi-2.0.0-2.fc17') update['bugs'] = ['1234', '5678'] r = self.app.post_json('/updates/', update) up = r.json_body self.assertEquals(len(up['bugs']), 2) self.assertEquals(up['bugs'][0]['bug_id'], 1234) self.assertEquals(up['bugs'][1]['bug_id'], 5678) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_new_update_with_multiple_bugs_as_str(self, publish, *args): update = self.get_update('bodhi-2.0.0-2.fc17') update['bugs'] = '1234, 5678' r = self.app.post_json('/updates/', update) up = r.json_body self.assertEquals(len(up['bugs']), 2) self.assertEquals(up['bugs'][0]['bug_id'], 1234) self.assertEquals(up['bugs'][1]['bug_id'], 5678) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_new_update_with_invalid_bugs_as_str(self, publish, *args): update = self.get_update('bodhi-2.0.0-2.fc17') update['bugs'] = '1234, blargh' r = self.app.post_json('/updates/', update, status=400) up = r.json_body self.assertEquals(up['status'], 'error') self.assertEquals(up['errors'][0]['description'], "Invalid bug ID specified: [u'1234', u'blargh']") @mock.patch(**mock_valid_requirements) def test_new_update_with_existing_build(self, *args): """Test submitting a new update with a build already in the database""" package = Package.get('bodhi', self.db) self.db.add(Build(nvr=u'bodhi-2.0.0-3.fc17', package=package)) self.db.flush() args = self.get_update(u'bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) eq_(resp.json['title'], 'bodhi-2.0.0-3.fc17') @mock.patch(**mock_valid_requirements) def test_new_update_with_existing_package(self, *args): """Test submitting a new update with a package that is already in the database.""" package = Package(name='existing-package') self.db.add(package) self.db.flush() args = self.get_update(u'existing-package-2.4.1-5.fc17') resp = self.app.post_json('/updates/', args) eq_(resp.json['title'], 'existing-package-2.4.1-5.fc17') package = self.db.query(Package).filter_by(name=u'existing-package').one() self.assertEqual(package.name, 'existing-package') @mock.patch(**mock_valid_requirements) def test_new_update_with_missing_package(self, *args): """Test submitting a new update with a package that is not already in the database.""" args = self.get_update(u'missing-package-2.4.1-5.fc17') resp = self.app.post_json('/updates/', args) eq_(resp.json['title'], 'missing-package-2.4.1-5.fc17') package = self.db.query(Package).filter_by(name=u'missing-package').one() self.assertEqual(package.name, 'missing-package') @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_cascade_package_requirements_to_update(self, publish, *args): package = self.db.query(Package).filter_by(name=u'bodhi').one() package.requirements = u'upgradepath rpmlint' self.db.flush() args = self.get_update(u'bodhi-2.0.0-3.fc17') # Don't specify any requirements so that they cascade from the package del args['requirements'] r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17') self.assertEquals(up['requirements'], 'upgradepath rpmlint') publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_push_untested_critpath_to_release(self, publish, *args): """ Ensure that we cannot push an untested critpath update directly to stable. """ args = self.get_update('kernel-3.11.5-300.fc17') args['request'] = 'stable' up = self.app.post_json('/updates/', args).json_body self.assertTrue(up['critpath']) self.assertEquals(up['request'], 'testing') publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_obsoletion(self, publish, *args): nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) with mock.patch(**mock_uuid4_version1): self.app.post_json('/updates/', args) publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) publish.call_args_list = [] up = self.db.query(Update).filter_by(title=nvr).one() up.status = UpdateStatus.testing up.request = None args = self.get_update('bodhi-2.0.0-3.fc17') with mock.patch(**mock_uuid4_version2): r = self.app.post_json('/updates/', args).json_body self.assertEquals(r['request'], 'testing') # Since we're obsoleting something owned by someone else. self.assertEquals(r['caveats'][0]['description'], 'This update has obsoleted bodhi-2.0.0-2.fc17, ' 'and has inherited its bugs and notes.') # Check for the comment multiple ways # Note that caveats above don't support markdown, but comments do. expected_comment = ( u'This update has obsoleted [bodhi-2.0.0-2.fc17]({}), ' u'and has inherited its bugs and notes.') expected_comment = expected_comment.format( urlparse.urljoin(config['base_address'], '/updates/FEDORA-2016-033713b73b')) self.assertEquals(r['comments'][-1]['text'], expected_comment) publish.assert_called_with( topic='update.request.testing', msg=mock.ANY) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.status, UpdateStatus.obsolete) expected_comment = u'This update has been obsoleted by [bodhi-2.0.0-3.fc17]({}).' expected_comment = expected_comment.format( urlparse.urljoin(config['base_address'], '/updates/FEDORA-2016-53345602d5')) self.assertEquals(up.comments[-1].text, expected_comment) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') @mock.patch('bodhi.server.services.updates.Update.new', side_effect=IOError('oops!')) def test_unexpected_exception(self, publish, *args): """Ensure that an unexpected Exception is handled by new_update().""" update = self.get_update('bodhi-2.3.2-1.fc17') r = self.app.post_json('/updates/', update, status=400) self.assertEquals(r.json_body['status'], 'error') self.assertEquals(r.json_body['errors'][0]['description'], "Unable to create update. oops!") # Despite the Exception, the Build should still exist in the database build = self.db.query(Build).filter(Build.nvr == u'bodhi-2.3.2-1.fc17').one() self.assertEqual(build.package.name, 'bodhi') @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.services.updates.Update.obsolete_older_updates', side_effect=RuntimeError("bet you didn't see this coming!")) def test_obsoletion_with_exception(self, *args): """ Assert that an exception during obsoletion is properly handled. """ nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) with mock.patch(**mock_uuid4_version1): self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=nvr).one() up.status = UpdateStatus.testing up.request = None args = self.get_update('bodhi-2.0.0-3.fc17') with mock.patch(**mock_uuid4_version2): r = self.app.post_json('/updates/', args).json_body self.assertEquals(r['request'], 'testing') # The exception handler should have put an error message in the caveats. self.assertEquals(r['caveats'][0]['description'], "Problem obsoleting older updates: bet you didn't see this coming!") # Check for the comment multiple ways. The comment will be about the update being submitted # for testing instead of being about the obsoletion, since the obsoletion failed. # Note that caveats above don't support markdown, but comments do. expected_comment = 'This update has been submitted for testing by guest. ' expected_comment = expected_comment.format( urlparse.urljoin(config['base_address'], '/updates/FEDORA-2016-033713b73b')) self.assertEquals(r['comments'][-1]['text'], expected_comment) up = self.db.query(Update).filter_by(title=nvr).one() # The old update failed to get obsoleted. self.assertEquals(up.status, UpdateStatus.testing) expected_comment = u'This update has been submitted for testing by guest. ' self.assertEquals(up.comments[-1].text, expected_comment) class TestUpdatesService(bodhi.tests.server.functional.base.BaseWSGICase): def test_home_html(self): resp = self.app.get('/', headers={'Accept': 'text/html'}) self.assertIn('Fedora Updates System', resp) self.assertIn('&copy;', resp) # FIXME: make it easy to tweak the tag of an update in our buildsys during unit tests #def test_invalid_tag(self): # map(self.db.delete, self.db.query(Update).all()) # map(self.db.delete, self.db.query(Build).all()) # num = self.db.query(Update).count() # assert num == 0, num # res = self.app.post_json('/updates/', self.get_update(u'bodhi-1.0-1.fc17'), # status=400) # assert 'Invalid tag' in res, res @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_provenpackager_edit_anything(self, publish, *args): "Ensure provenpackagers can edit updates for any package" nvr = u'bodhi-2.1-1.fc17' user = User(name=u'lloyd') self.db.add(user) self.db.add(User(name=u'ralph')) # Add a non proventester self.db.flush() group = self.db.query(Group).filter_by(name=u'provenpackager').one() user.groups.append(group) app = TestApp(main({}, testing=u'ralph', session=self.db, **self.app_settings)) up_data = self.get_update(nvr) up_data['csrf_token'] = app.get('/csrf').json_body['csrf_token'] res = app.post_json('/updates/', up_data) assert 'does not have commit access to bodhi' not in res, res publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) app = TestApp(main({}, testing=u'lloyd', session=self.db, **self.app_settings)) update = self.get_update(nvr) update['csrf_token'] = app.get('/csrf').json_body['csrf_token'] update['notes'] = u'testing!!!' update['edited'] = nvr res = app.post_json('/updates/', update) assert 'bodhi does not have commit access to bodhi' not in res, res build = self.db.query(Build).filter_by(nvr=nvr).one() assert build.update is not None self.assertEquals(build.update.notes, u'testing!!!') #publish.assert_called_once_with( # topic='update.request.testing', msg=mock.ANY) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_provenpackager_request_privs(self, publish, *args): "Ensure provenpackagers can change the request for any update" nvr = u'bodhi-2.1-1.fc17' user = User(name=u'bob') self.db.add(user) self.db.add(User(name=u'ralph')) # Add a non proventester self.db.add(User(name=u'someuser')) # An unrelated user with no privs self.db.flush() group = self.db.query(Group).filter_by(name=u'provenpackager').one() user.groups.append(group) app = TestApp(main({}, testing=u'ralph', session=self.db, **self.app_settings)) up_data = self.get_update(nvr) up_data['csrf_token'] = app.get('/csrf').json_body['csrf_token'] res = app.post_json('/updates/', up_data) assert 'does not have commit access to bodhi' not in res, res publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) build = self.db.query(Build).filter_by(nvr=nvr).one() eq_(build.update.request, UpdateRequest.testing) # Try and submit the update to stable as a non-provenpackager app = TestApp(main({}, testing=u'ralph', session=self.db, **self.app_settings)) post_data = dict(update=nvr, request='stable', csrf_token=app.get('/csrf').json_body['csrf_token']) res = app.post_json('/updates/%s/request' % str(nvr), post_data, status=400) # Ensure we can't push it until it meets the requirements eq_(res.json_body['status'], 'error') eq_(res.json_body['errors'][0]['description'], config.get('not_yet_tested_msg')) update = self.db.query(Update).filter_by(title=nvr).one() eq_(update.stable_karma, 3) eq_(update.locked, False) eq_(update.request, UpdateRequest.testing) # Pretend it was pushed to testing update.request = None update.status = UpdateStatus.testing update.pushed = True self.db.flush() eq_(update.karma, 0) update.comment(self.db, u"foo", 1, u'foo') update = self.db.query(Update).filter_by(title=nvr).one() eq_(update.karma, 1) eq_(update.request, None) update.comment(self.db, u"foo", 1, u'bar') update = self.db.query(Update).filter_by(title=nvr).one() eq_(update.karma, 2) eq_(update.request, None) update.comment(self.db, u"foo", 1, u'biz') update = self.db.query(Update).filter_by(title=nvr).one() eq_(update.karma, 3) eq_(update.request, UpdateRequest.stable) # Set it back to testing update.request = UpdateRequest.testing # Try and submit the update to stable as a proventester app = TestApp(main({}, testing=u'bob', session=self.db, **self.app_settings)) res = app.post_json('/updates/%s/request' % str(nvr), dict(update=nvr, request='stable', csrf_token=app.get('/csrf').json_body['csrf_token']), status=200) eq_(res.json_body['update']['request'], 'stable') app = TestApp(main({}, testing=u'bob', session=self.db, **self.app_settings)) res = app.post_json('/updates/%s/request' % str(nvr), dict(update=nvr, request='obsolete', csrf_token=app.get('/csrf').json_body['csrf_token']), status=200) eq_(res.json_body['update']['request'], None) eq_(update.request, None) eq_(update.status, UpdateStatus.obsolete) # Test that bob has can_edit True, provenpackager app = TestApp(main({}, testing=u'bob', session=self.db, **self.app_settings)) res = app.get('/updates/%s' % str(nvr), status=200) eq_(res.json_body['can_edit'], True) # Test that ralph has can_edit True, they submitted it. app = TestApp(main({}, testing=u'ralph', session=self.db, **self.app_settings)) res = app.get('/updates/%s' % str(nvr), status=200) eq_(res.json_body['can_edit'], True) # Test that someuser has can_edit False, they are unrelated # This check *failed* with the old acls code. app = TestApp(main({}, testing=u'someuser', session=self.db, **self.app_settings)) res = app.get('/updates/%s' % str(nvr), status=200) eq_(res.json_body['can_edit'], False) # Test that an anonymous user has can_edit False, obv. # This check *crashed* with the code on 2015-09-24. anonymous_settings = copy.copy(self.app_settings) anonymous_settings.update({ 'authtkt.secret': 'whatever', 'authtkt.secure': True, }) app = TestApp(main({}, session=self.db, **anonymous_settings)) res = app.get('/updates/%s' % str(nvr), status=200) eq_(res.json_body['can_edit'], False) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_old_bodhi1_redirect(self, publish, *args): # Create it title = 'bodhi-2.0.0-1.fc17' self.app.post_json('/updates/', self.get_update(title)) publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) # Get it once with just the title url = '/updates/%s' % title res = self.app.get(url) update = res.json_body['update'] # Now try the old bodhi1 url. Redirect should take place. url = '/updates/%s/%s' % (update['alias'], update['title']) res = self.app.get(url, status=302) target = 'http://localhost/updates/%s' % update['alias'] self.assertEquals(res.headers['Location'], target) def test_404(self): self.app.get('/a', status=404) def test_get_single_update(self): res = self.app.get('/updates/bodhi-2.0-1.fc17') self.assertEquals(res.json_body['update']['title'], 'bodhi-2.0-1.fc17') self.assertIn('application/json', res.headers['Content-Type']) def test_get_single_update_jsonp(self): res = self.app.get('/updates/bodhi-2.0-1.fc17', {'callback': 'callback'}, headers={'Accept': 'application/javascript'}) self.assertIn('application/javascript', res.headers['Content-Type']) self.assertIn('callback', res) self.assertIn('bodhi-2.0-1.fc17', res) def test_get_single_update_rss(self): self.app.get('/updates/bodhi-2.0-1.fc17', headers={'Accept': 'application/atom+xml'}, status=406) def test_get_single_update_html(self): id = 'bodhi-2.0-1.fc17' resp = self.app.get('/updates/%s' % id, headers={'Accept': 'text/html'}) self.assertIn('text/html', resp.headers['Content-Type']) self.assertIn(id, resp) self.assertIn('&copy;', resp) def test_list_updates(self): res = self.app.get('/updates/') body = res.json_body self.assertEquals(len(body['updates']), 1) alias = u'FEDORA-%s-a3bbe1a8f2' % YEAR up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['submitter'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], alias) self.assertEquals(up['karma'], 1) self.assertEquals(up['url'], (urlparse.urljoin(config['base_address'], '/updates/%s' % alias))) def test_list_updates_jsonp(self): res = self.app.get('/updates/', {'callback': 'callback'}, headers={'Accept': 'application/javascript'}) self.assertIn('application/javascript', res.headers['Content-Type']) self.assertIn('callback', res) self.assertIn('bodhi-2.0-1.fc17', res) def test_list_updates_rss(self): res = self.app.get('/rss/updates/', headers={'Accept': 'application/atom+xml'}) self.assertIn('application/rss+xml', res.headers['Content-Type']) self.assertIn('bodhi-2.0-1.fc17', res) def test_list_updates_html(self): res = self.app.get('/updates/', headers={'Accept': 'text/html'}) self.assertIn('text/html', res.headers['Content-Type']) self.assertIn('bodhi-2.0-1.fc17', res) self.assertIn('&copy;', res) def test_search_updates(self): res = self.app.get('/updates/', {'like': 'odh'}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') res = self.app.get('/updates/', {'like': 'wat'}) body = res.json_body self.assertEquals(len(body['updates']), 0) @mock.patch(**mock_valid_requirements) def test_list_updates_pagination(self, *args): # First, stuff a second update in there self.app.post_json('/updates/', self.get_update('bodhi-2.0.0-2.fc17')) # Then, test pagination res = self.app.get('/updates/', {"rows_per_page": 1}) body = res.json_body self.assertEquals(len(body['updates']), 1) update1 = body['updates'][0] res = self.app.get('/updates/', {"rows_per_page": 1, "page": 2}) body = res.json_body self.assertEquals(len(body['updates']), 1) update2 = body['updates'][0] self.assertNotEquals(update1, update2) def test_list_updates_by_approved_since(self): now = datetime.utcnow() # Try with no approved updates first res = self.app.get('/updates/', {"approved_since": now.strftime("%Y-%m-%d")}) body = res.json_body self.assertEquals(len(body['updates']), 0) # Now approve one self.db.query(Update).first().date_approved = now self.db.flush() # And try again res = self.app.get('/updates/', {"approved_since": now.strftime("%Y-%m-%d")}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_approved'], now.strftime("%Y-%m-%d %H:%M:%S")) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) self.assertEquals(len(up['bugs']), 1) self.assertEquals(up['bugs'][0]['bug_id'], 12345) # https://github.com/fedora-infra/bodhi/issues/270 self.assertEquals(len(up['test_cases']), 1) self.assertEquals(up['test_cases'][0]['name'], u'Wat') def test_list_updates_by_invalid_approved_since(self): res = self.app.get('/updates/', {"approved_since": "forever"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'approved_since') self.assertEquals(res.json_body['errors'][0]['description'], 'Invalid date') def test_list_updates_by_approved_before(self): # Approve an update now = datetime.utcnow() self.db.query(Update).first().date_approved = now self.db.flush() # First check we get no result for an old date res = self.app.get('/updates/', {"approved_before": "1984-11-01"}) body = res.json_body self.assertEquals(len(body['updates']), 0) # Now check we get the update if we use tomorrow tomorrow = datetime.utcnow() + timedelta(days=1) tomorrow = tomorrow.strftime("%Y-%m-%d") res = self.app.get('/updates/', {"approved_before": tomorrow}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_approved'], now.strftime("%Y-%m-%d %H:%M:%S")) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) self.assertEquals(len(up['bugs']), 1) self.assertEquals(up['bugs'][0]['bug_id'], 12345) def test_list_updates_by_invalid_approved_before(self): res = self.app.get('/updates/', {"approved_before": "forever"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'approved_before') self.assertEquals(res.json_body['errors'][0]['description'], 'Invalid date') def test_list_updates_by_bugs(self): res = self.app.get('/updates/', {"bugs": '12345'}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) self.assertEquals(len(up['bugs']), 1) self.assertEquals(up['bugs'][0]['bug_id'], 12345) def test_list_updates_by_invalid_bug(self): res = self.app.get('/updates/', {"bugs": "cockroaches"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'bugs') self.assertEquals(res.json_body['errors'][0]['description'], "Invalid bug ID specified: [u'cockroaches']") def test_list_updates_by_unexisting_bug(self): res = self.app.get('/updates/', {"bugs": "19850110"}) body = res.json_body self.assertEquals(len(body['updates']), 0) def test_list_updates_by_critpath(self): res = self.app.get('/updates/', {"critpath": "false"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_invalid_critpath(self): res = self.app.get('/updates/', {"critpath": "lalala"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'critpath') self.assertEquals(res.json_body['errors'][0]['description'], '"lalala" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')') def test_list_updates_by_cves(self): res = self.app.get("/updates/", {"cves": "CVE-1985-0110"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) #self.assertEquals(up['cves'][0]['cve_id'], "CVE-1985-0110") def test_list_updates_by_unexisting_cve(self): res = self.app.get('/updates/', {"cves": "CVE-2013-1015"}) body = res.json_body self.assertEquals(len(body['updates']), 0) def test_list_updates_by_invalid_cve(self): res = self.app.get('/updates/', {"cves": "WTF-ZOMG-BBQ"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'cves.0') self.assertEquals(res.json_body['errors'][0]['description'], '"WTF-ZOMG-BBQ" is not a valid CVE id') def test_list_updates_by_date_submitted_invalid_date(self): """test filtering by submitted date with an invalid date""" res = self.app.get('/updates/', {"submitted_since": "11-01-1984"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(body['errors'][0]['name'], 'submitted_since') self.assertEquals(body['errors'][0]['description'], 'Invalid date') def test_list_updates_by_date_submitted_future_date(self): """test filtering by submitted date with future date""" tomorrow = datetime.utcnow() + timedelta(days=1) tomorrow = tomorrow.strftime("%Y-%m-%d") res = self.app.get('/updates/', {"submitted_since": tomorrow}) body = res.json_body self.assertEquals(len(body['updates']), 0) def test_list_updates_by_date_submitted_valid(self): """test filtering by submitted date with valid data""" res = self.app.get('/updates/', {"submitted_since": "1984-11-01"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_date_submitted_before_invalid_date(self): """test filtering by submitted before date with an invalid date""" res = self.app.get('/updates/', {"submitted_before": "11-01-1984"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(body['errors'][0]['name'], 'submitted_before') self.assertEquals(body['errors'][0]['description'], 'Invalid date') def test_list_updates_by_date_submitted_before_old_date(self): """test filtering by submitted before date with old date""" res = self.app.get('/updates/', {"submitted_before": "1975-01-01"}) body = res.json_body self.assertEquals(len(body['updates']), 0) def test_list_updates_by_date_submitted_before_valid(self): """test filtering by submitted before date with valid date""" today = datetime.utcnow().strftime("%Y-%m-%d") res = self.app.get('/updates/', {"submitted_before": today}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_locked(self): res = self.app.get('/updates/', {"locked": "true"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_invalid_locked(self): res = self.app.get('/updates/', {"locked": "maybe"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'locked') self.assertEquals(res.json_body['errors'][0]['description'], '"maybe" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')') def test_list_updates_by_modified_since(self): now = datetime.utcnow() # Try with no modified updates first res = self.app.get('/updates/', {"modified_since": now.strftime("%Y-%m-%d")}) body = res.json_body self.assertEquals(len(body['updates']), 0) # Now approve one self.db.query(Update).first().date_modified = now self.db.flush() # And try again res = self.app.get('/updates/', {"modified_since": now.strftime("%Y-%m-%d")}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], now.strftime("%Y-%m-%d %H:%M:%S")) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) self.assertEquals(len(up['bugs']), 1) self.assertEquals(up['bugs'][0]['bug_id'], 12345) def test_list_updates_by_invalid_modified_since(self): res = self.app.get('/updates/', {"modified_since": "the dawn of time"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'modified_since') self.assertEquals(res.json_body['errors'][0]['description'], 'Invalid date') def test_list_updates_by_modified_before(self): now = datetime.utcnow() tomorrow = now + timedelta(days=1) tomorrow = tomorrow.strftime("%Y-%m-%d") # Try with no modified updates first res = self.app.get('/updates/', {"modified_before": now.strftime("%Y-%m-%d")}) body = res.json_body self.assertEquals(len(body['updates']), 0) # Now approve one self.db.query(Update).first().date_modified = now self.db.flush() # And try again res = self.app.get('/updates/', {"modified_before": tomorrow}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], now.strftime("%Y-%m-%d %H:%M:%S")) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) self.assertEquals(len(up['bugs']), 1) self.assertEquals(up['bugs'][0]['bug_id'], 12345) def test_list_updates_by_invalid_modified_before(self): res = self.app.get('/updates/', {"modified_before": "the dawn of time"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'modified_before') self.assertEquals(res.json_body['errors'][0]['description'], 'Invalid date') def test_list_updates_by_package(self): res = self.app.get('/updates/', {"packages": "bodhi"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_builds(self): res = self.app.get('/updates/', {"builds": "bodhi-3.0-1.fc17"}) body = res.json_body self.assertEquals(len(body['updates']), 0) res = self.app.get('/updates/', {"builds": "bodhi-2.0-1.fc17"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_unexisting_package(self): res = self.app.get('/updates/', {"packages": "flash-player"}) body = res.json_body self.assertEquals(len(body['updates']), 0) def test_list_updates_by_pushed(self): res = self.app.get('/updates/', {"pushed": "false"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) self.assertEquals(up['pushed'], False) def test_list_updates_by_invalid_pushed(self): res = self.app.get('/updates/', {"pushed": "who knows?"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'pushed') self.assertEquals(res.json_body['errors'][0]['description'], '"who knows?" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')') def test_list_updates_by_pushed_since(self): now = datetime.utcnow() # Try with no pushed updates first res = self.app.get('/updates/', {"pushed_since": now.strftime("%Y-%m-%d")}) body = res.json_body self.assertEquals(len(body['updates']), 0) # Now approve one self.db.query(Update).first().date_pushed = now self.db.flush() # And try again res = self.app.get('/updates/', {"pushed_since": now.strftime("%Y-%m-%d")}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], now.strftime("%Y-%m-%d %H:%M:%S")) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) self.assertEquals(len(up['bugs']), 1) self.assertEquals(up['bugs'][0]['bug_id'], 12345) def test_list_updates_by_invalid_pushed_since(self): res = self.app.get('/updates/', {"pushed_since": "a while ago"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'pushed_since') self.assertEquals(res.json_body['errors'][0]['description'], 'Invalid date') def test_list_updates_by_pushed_before(self): now = datetime.utcnow() tomorrow = now + timedelta(days=1) tomorrow = tomorrow.strftime("%Y-%m-%d") # Try with no pushed updates first res = self.app.get('/updates/', {"pushed_before": now.strftime("%Y-%m-%d")}) body = res.json_body self.assertEquals(len(body['updates']), 0) # Now approve one self.db.query(Update).first().date_pushed = now self.db.flush() # And try again res = self.app.get('/updates/', {"pushed_before": tomorrow}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], now.strftime("%Y-%m-%d %H:%M:%S")) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) self.assertEquals(len(up['bugs']), 1) self.assertEquals(up['bugs'][0]['bug_id'], 12345) def test_list_updates_by_invalid_pushed_before(self): res = self.app.get('/updates/', {"pushed_before": "a while ago"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'pushed_before') self.assertEquals(res.json_body['errors'][0]['description'], 'Invalid date') def test_list_updates_by_release_name(self): res = self.app.get('/updates/', {"releases": "F17"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_release_version(self): res = self.app.get('/updates/', {"releases": "17"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_unexisting_release(self): res = self.app.get('/updates/', {"releases": "WinXP"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'releases') self.assertEquals(res.json_body['errors'][0]['description'], 'Invalid releases specified: WinXP') def test_list_updates_by_request(self): res = self.app.get('/updates/', {'request': "testing"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_unexisting_request(self): res = self.app.get('/updates/', {"request": "impossible"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'request') self.assertEquals(res.json_body['errors'][0]['description'], '"impossible" is not one of unpush, testing, revoke,' ' obsolete, stable') def test_list_updates_by_severity(self): res = self.app.get('/updates/', {"severity": "unspecified"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_unexisting_severity(self): res = self.app.get('/updates/', {"severity": "schoolmaster"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'severity') self.assertEquals(res.json_body['errors'][0]['description'], '"schoolmaster" is not one of high, urgent, medium, low, unspecified') def test_list_updates_by_status(self): res = self.app.get('/updates/', {"status": "pending"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_unexisting_status(self): res = self.app.get('/updates/', {"status": "single"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'status') self.assertEquals(res.json_body['errors'][0]['description'], '"single" is not one of testing, processing, obsolete, stable, unpushed, pending') def test_list_updates_by_suggest(self): res = self.app.get('/updates/', {"suggest": "unspecified"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_unexisting_suggest(self): res = self.app.get('/updates/', {"suggest": "no idea"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'suggest') self.assertEquals(res.json_body['errors'][0]['description'], '"no idea" is not one of logout, reboot, unspecified') def test_list_updates_by_type(self): res = self.app.get('/updates/', {"type": "bugfix"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_unexisting_type(self): res = self.app.get('/updates/', {"type": "not_my"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'type') self.assertEquals(res.json_body['errors'][0]['description'], '"not_my" is not one of newpackage, bugfix, security, enhancement') def test_list_updates_by_username(self): res = self.app.get('/updates/', {"user": "guest"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'Useful details!') self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00') self.assertEquals(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], True) self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR) self.assertEquals(up['karma'], 1) def test_list_updates_by_unexisting_username(self): res = self.app.get('/updates/', {"user": "santa"}, status=400) body = res.json_body self.assertEquals(len(body.get('updates', [])), 0) self.assertEquals(res.json_body['errors'][0]['name'], 'user') self.assertEquals(res.json_body['errors'][0]['description'], "Invalid user specified: santa") @mock.patch(**mock_uuid4_version1) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_update(self, publish, *args): args = self.get_update('bodhi-2.0.0-2.fc17') r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) args['edited'] = args['builds'] args['builds'] = 'bodhi-2.0.0-3.fc17' args['requirements'] = 'upgradepath' r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['user']['name'], u'guest') self.assertEquals(up['release']['name'], u'F17') self.assertEquals(up['type'], u'bugfix') self.assertEquals(up['severity'], u'unspecified') self.assertEquals(up['suggest'], u'unspecified') self.assertEquals(up['close_bugs'], True) self.assertEquals(up['notes'], u'this is a test update') self.assertIsNotNone(up['date_submitted']) self.assertIsNotNone(up['date_modified'], None) self.assertEquals(up['date_approved'], None) self.assertEquals(up['date_pushed'], None) self.assertEquals(up['locked'], False) self.assertEquals(up['alias'], u'FEDORA-%s-033713b73b' % YEAR) self.assertEquals(up['karma'], 0) self.assertEquals(up['requirements'], 'upgradepath') comment = textwrap.dedent(""" guest edited this update. New build(s): - bodhi-2.0.0-3.fc17 Removed build(s): - bodhi-2.0.0-2.fc17 """).strip() self.assertMultiLineEqual(up['comments'][-1]['text'], comment) self.assertEquals(len(up['builds']), 1) self.assertEquals(up['builds'][0]['nvr'], u'bodhi-2.0.0-3.fc17') self.assertEquals(self.db.query(Build).filter_by(nvr=u'bodhi-2.0.0-2.fc17').first(), None) self.assertEquals(len(publish.call_args_list), 2) publish.assert_called_with(topic='update.edit', msg=ANY) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_testing_update_with_new_builds(self, publish, *args): nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Mark it as testing upd = Update.get(nvr, self.db) upd.status = UpdateStatus.testing upd.request = None self.db.flush() args['edited'] = args['builds'] args['builds'] = 'bodhi-2.0.0-3.fc17' r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') #assert False, '\n'.join([c['text'] for c in up['comments']]) self.assertEquals(up['comments'][-1]['text'], u'This update has been submitted for testing by guest. ') comment = textwrap.dedent(""" guest edited this update. New build(s): - bodhi-2.0.0-3.fc17 Removed build(s): - bodhi-2.0.0-2.fc17 """).strip() self.assertMultiLineEqual(up['comments'][-2]['text'], comment) self.assertEquals(up['comments'][-3]['text'], u'This update has been submitted for testing by guest. ') self.assertEquals(len(up['builds']), 1) self.assertEquals(up['builds'][0]['nvr'], u'bodhi-2.0.0-3.fc17') self.assertEquals(self.db.query(Build).filter_by(nvr=u'bodhi-2.0.0-2.fc17').first(), None) self.assertEquals(len(publish.call_args_list), 3) publish.assert_called_with(topic='update.edit', msg=ANY) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_testing_update_with_new_builds_with_stable_request(self, publish, *args): nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Mark it as testing upd = Update.get(nvr, self.db) upd.status = UpdateStatus.testing upd.request = UpdateRequest.stable self.db.flush() args['edited'] = args['builds'] args['builds'] = 'bodhi-2.0.0-3.fc17' r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17') self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') self.assertEquals(up['comments'][-1]['text'], u'This update has been submitted for testing by guest. ') comment = textwrap.dedent(""" guest edited this update. New build(s): - bodhi-2.0.0-3.fc17 Removed build(s): - bodhi-2.0.0-2.fc17 """).strip() self.assertMultiLineEqual(up['comments'][-2]['text'], comment) self.assertEquals(up['comments'][-3]['text'], u'This update has been submitted for testing by guest. ') self.assertEquals(len(up['builds']), 1) self.assertEquals(up['builds'][0]['nvr'], u'bodhi-2.0.0-3.fc17') self.assertEquals(self.db.query(Build).filter_by(nvr=u'bodhi-2.0.0-2.fc17').first(), None) self.assertEquals(len(publish.call_args_list), 3) publish.assert_called_with(topic='update.edit', msg=ANY) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_update_with_different_release(self, publish, *args): """Test editing an update for one release with builds from another.""" nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update('bodhi-2.0.0-2.fc17') r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Add another release and package Release._tag_cache = None release = Release( name=u'F18', long_name=u'Fedora 18', id_prefix=u'FEDORA', version=u'18', dist_tag=u'f18', stable_tag=u'f18-updates', testing_tag=u'f18-updates-testing', candidate_tag=u'f18-updates-candidate', pending_signing_tag=u'f18-updates-testing-signing', pending_testing_tag=u'f18-updates-testing-pending', pending_stable_tag=u'f18-updates-pending', override_tag=u'f18-override', branch=u'f18') self.db.add(release) pkg = Package(name=u'nethack') self.db.add(pkg) args = self.get_update('bodhi-2.0.0-2.fc17,nethack-4.0.0-1.fc18') args['edited'] = nvr r = self.app.post_json('/updates/', args, status=400) up = r.json_body self.assertEquals(up['status'], 'error') self.assertEquals(up['errors'][0]['description'], 'Cannot add a F18 build to an F17 update') @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_stable_update(self, publish, *args): """Make sure we can't edit stable updates""" self.assertEquals(publish.call_args_list, []) # First, create a testing update nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) r = self.app.post_json('/updates/', args, status=200) publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) # Then, switch it to stable behind the scenes up = self.db.query(Update).filter_by(title=nvr).one() up.status = UpdateStatus.stable # Then, try to edit it through the api again args['edited'] = args['builds'] args['builds'] = 'bodhi-2.0.0-3.fc17' r = self.app.post_json('/updates/', args, status=400) up = r.json_body self.assertEquals(up['status'], 'error') self.assertEquals(up['errors'][0]['description'], "Cannot edit stable updates") self.assertEquals(len(publish.call_args_list), 1) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_locked_update(self, publish, *args): """Make sure some changes are prevented""" nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) r = self.app.post_json('/updates/', args, status=200) publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=nvr).one() up.locked = True up.status = UpdateStatus.testing up.request = None up_id = up.id build = self.db.query(Build).filter_by(nvr=nvr).one() # Changing the notes should work args['edited'] = args['builds'] args['notes'] = 'Some new notes' up = self.app.post_json('/updates/', args, status=200).json_body self.assertEquals(up['notes'], 'Some new notes') # Changing the builds should fail args['notes'] = 'And yet some other notes' args['builds'] = 'bodhi-2.0.0-3.fc17' r = self.app.post_json('/updates/', args, status=400).json_body self.assertEquals(r['status'], 'error') self.assertIn('errors', r) self.assertIn({u'description': u"Can't add builds to a locked update", u'location': u'body', u'name': u'builds'}, r['errors']) up = self.db.query(Update).get(up_id) self.assertEquals(up.notes, 'Some new notes') self.assertEquals(up.builds, [build]) # Changing the request should fail args['notes'] = 'Still new notes' args['builds'] = args['edited'] args['request'] = 'stable' r = self.app.post_json('/updates/', args, status=400).json_body self.assertEquals(r['status'], 'error') self.assertIn('errors', r) self.assertIn({u'description': u"Can't change the request on a " "locked update", u'location': u'body', u'name': u'builds'}, r['errors']) up = self.db.query(Update).get(up_id) self.assertEquals(up.notes, 'Some new notes') self.assertEquals(up.builds, [build]) self.assertEquals(up.request, None) # At the end of the day, two fedmsg messages should have gone out. self.assertEquals(len(publish.call_args_list), 2) publish.assert_called_with(topic='update.edit', msg=ANY) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_obsoletion_locked_with_open_request(self, publish, *args): nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=nvr).one() up.locked = True self.db.flush() args = self.get_update('bodhi-2.0.0-3.fc17') r = self.app.post_json('/updates/', args).json_body self.assertEquals(r['request'], 'testing') up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.status, UpdateStatus.pending) self.assertEquals(up.request, UpdateRequest.testing) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_obsoletion_unlocked_with_open_request(self, publish, *args): nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) self.app.post_json('/updates/', args) args = self.get_update('bodhi-2.0.0-3.fc17') r = self.app.post_json('/updates/', args).json_body self.assertEquals(r['request'], 'testing') up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.status, UpdateStatus.obsolete) self.assertEquals(up.request, None) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_obsoletion_unlocked_with_open_stable_request(self, publish, *args): """ Ensure that we don't obsolete updates that have a stable request """ nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=nvr).one() up.request = UpdateRequest.stable self.db.flush() args = self.get_update('bodhi-2.0.0-3.fc17') r = self.app.post_json('/updates/', args).json_body self.assertEquals(r['request'], 'testing') up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.status, UpdateStatus.pending) self.assertEquals(up.request, UpdateRequest.stable) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_push_to_stable_for_obsolete_update(self, publish, *args): """ Obsolete update should not be submitted to testing Test Push to Stable option for obsolete update """ nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) with mock.patch(**mock_uuid4_version1): self.app.post_json('/updates/', args) publish.assert_called_once_with( topic='update.request.testing', msg=mock.ANY) publish.call_args_list = [] up = self.db.query(Update).filter_by(title=nvr).one() up.status = UpdateStatus.testing up.request = None new_nvr = 'bodhi-2.0.0-3.fc17' args = self.get_update(new_nvr) with mock.patch(**mock_uuid4_version2): r = self.app.post_json('/updates/', args).json_body self.assertEquals(r['request'], 'testing') publish.assert_called_with( topic='update.request.testing', msg=mock.ANY) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.status, UpdateStatus.obsolete) expected_comment = u'This update has been obsoleted by [bodhi-2.0.0-3.fc17]({}).' expected_comment = expected_comment.format( urlparse.urljoin(config['base_address'], '/updates/FEDORA-2016-53345602d5')) self.assertEquals(up.comments[-1].text, expected_comment) # Check Push to Stable button for obsolete update id = 'bodhi-2.0.0-2.fc17' resp = self.app.get('/updates/%s' % id, headers={'Accept': 'text/html'}) self.assertIn('text/html', resp.headers['Content-Type']) self.assertIn(id, resp) self.assertNotIn('Push to Stable', resp) @mock.patch(**mock_valid_requirements) def test_enabled_button_for_autopush(self, *args): """Test Enabled button on Update page when autopush is True""" nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = True resp = self.app.post_json('/updates/', args) resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'}) self.assertIn('text/html', resp.headers['Content-Type']) self.assertIn(nvr, resp) self.assertIn('Enabled', resp) @mock.patch(**mock_valid_requirements) def test_disabled_button_for_autopush(self, *args): """Test Disabled button on Update page when autopush is False""" nvr = 'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = False resp = self.app.post_json('/updates/', args) resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'}) self.assertIn('text/html', resp.headers['Content-Type']) self.assertIn(nvr, resp) self.assertIn('Disabled', resp) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) def test_invalid_request(self, *args): """Test submitting an invalid request""" args = self.get_update() resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'foo','csrf_token': self.get_csrf_token()}, status=400) resp = resp.json_body eq_(resp['status'], 'error') eq_(resp['errors'][0]['description'], u'"foo" is not one of unpush, testing, revoke, obsolete, stable') # Now try with None resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': None, 'csrf_token': self.get_csrf_token()}, status=400) resp = resp.json_body eq_(resp['status'], 'error') eq_(resp['errors'][0]['name'], 'request') eq_(resp['errors'][0]['description'], 'Required') @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_testing_request(self, publish, *args): """Test submitting a valid testing request""" Update.get(u'bodhi-2.0-1.fc17', self.db).locked = False args = self.get_update() args['request'] = None resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'testing', 'csrf_token': self.get_csrf_token()}) eq_(resp.json['update']['request'], 'testing') self.assertEquals(publish.call_args_list, []) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_revoke_action_for_stable_request(self, publish, *args): """ Test revoke action for stable request on testing update and check status after revoking the request """ args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing up.request = UpdateRequest.stable self.db.flush() resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'revoke', 'csrf_token': self.get_csrf_token()}) eq_(resp.json['update']['request'], None) eq_(resp.json['update']['status'], 'testing') publish.assert_called_with( topic='update.request.revoke', msg=mock.ANY) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_revoke_action_for_testing_request(self, publish, *args): """ Test revoke action for testing request on pending update and check status after revoking the request """ args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.pending up.request = UpdateRequest.testing self.db.flush() resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'revoke', 'csrf_token': self.get_csrf_token()}) eq_(resp.json['update']['request'], None) eq_(resp.json['update']['status'], 'unpushed') publish.assert_called_with( topic='update.request.revoke', msg=mock.ANY) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_obsolete_if_unstable_with_autopush_enabled_when_pending(self, publish, *args): """ Send update to obsolete state if it reaches unstable karma on pending state where request is testing when Autopush is enabled. Make sure that it does not go to update-testing state. """ nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = True args['stable_karma'] = 1 args['unstable_karma'] = -1 resp = self.app.post_json('/updates/', args) up = Update.get(nvr, self.db) up.status = UpdateStatus.pending up.request = UpdateRequest.testing up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1) self.db.flush() up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.karma, -1) self.assertEquals(up.status, UpdateStatus.obsolete) self.assertEquals(up.request, None) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_obsolete_if_unstable_with_autopush_disabled_when_pending(self, publish, *args): """ Don't automatically send update to obsolete state if it reaches unstable karma on pending state when Autopush is disabled. """ nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = False args['stable_karma'] = 1 args['unstable_karma'] = -1 resp = self.app.post_json('/updates/', args) up = Update.get(nvr, self.db) up.status = UpdateStatus.pending up.request = UpdateRequest.testing up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1) self.db.flush() up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.karma, -1) self.assertEquals(up.status, UpdateStatus.pending) self.assertEquals(up.request, UpdateRequest.testing) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_obsolete_if_unstable_karma_not_reached_with_autopush_enabled_when_pending(self, publish, *args): """ Don't send update to obsolete state if it does not reach unstable karma threshold on pending state when Autopush is enabled. """ nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = True args['stable_karma'] = 2 args['unstable_karma'] = -2 resp = self.app.post_json('/updates/', args) up = Update.get(nvr, self.db) up.status = UpdateStatus.pending up.request = UpdateRequest.testing up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1) self.db.flush() up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.karma, -1) self.assertEquals(up.status, UpdateStatus.pending) self.assertEquals(up.request, UpdateRequest.testing) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_obsolete_if_unstable_with_autopush_enabled_when_testing(self, publish, *args): """ Send update to obsolete state if it reaches unstable karma threshold on testing state where request is stable when Autopush is enabled. Make sure that it does not go to stable state. """ nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = True args['stable_karma'] = 1 args['unstable_karma'] = -1 resp = self.app.post_json('/updates/', args) up = Update.get(nvr, self.db) up.status = UpdateStatus.testing up.request = UpdateRequest.stable up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1) self.db.flush() up = self.db.query(Update).filter_by(title=nvr).one() self.assertEquals(up.karma, -1) self.assertEquals(up.status, UpdateStatus.obsolete) self.assertEquals(up.request, None) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_request_after_unpush(self, publish, *args): """Test request of this update after unpushing""" args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing up.request = UpdateRequest.stable self.db.flush() resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'unpush', 'csrf_token': self.get_csrf_token()}) eq_(resp.json['update']['request'], None) eq_(resp.json['update']['status'], 'unpushed') publish.assert_called_with( topic='update.request.unpush', msg=mock.ANY) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) def test_invalid_stable_request(self, *args): """Test submitting a stable request for an update that has yet to meet the stable requirements""" Update.get(u'bodhi-2.0-1.fc17', self.db).locked = False args = self.get_update() resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'stable', 'csrf_token': self.get_csrf_token()}, status=400) eq_(resp.json['status'], 'error') eq_(resp.json['errors'][0]['description'], config.get('not_yet_tested_msg')) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) def test_request_to_stable_based_on_stable_karma(self, *args): """ Test request to stable before an update reaches stable karma and after it reaches stable karma when autokarma is disabled """ user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = False args['stable_karma'] = 1 resp = self.app.post_json('/updates/', args) up = Update.get(nvr, self.db) up.status = UpdateStatus.testing up.request = None self.db.flush() # Checks failure for requesting to stable push before the update reaches stable karma up.comment(self.db, u'Not working', author=u'ralph', karma=0) up = Update.get(nvr, self.db) resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'stable', 'csrf_token': self.get_csrf_token()}, status=400) self.assertEquals(up.request, None) self.assertEquals(up.status, UpdateStatus.testing) # Checks Success for requesting to stable push after the update reaches stable karma up.comment(self.db, u'LGTM', author=u'ralph', karma=1) up = Update.get(nvr, self.db) resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'stable', 'csrf_token': self.get_csrf_token()}, status=200) self.assertEquals(up.request, UpdateRequest.stable) self.assertEquals(up.status, UpdateStatus.testing) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_stable_request_after_testing(self, publish, *args): """Test submitting a stable request to an update that has met the minimum amount of time in testing""" args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing up.request = None up.comment(self.db, 'This update has been pushed to testing', author='bodhi') up.date_testing = up.comments[-1].timestamp - timedelta(days=7) self.db.flush() eq_(up.days_in_testing, 7) eq_(up.meets_testing_requirements, True) resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'stable', 'csrf_token': self.get_csrf_token()}) eq_(resp.json['update']['request'], 'stable') publish.assert_called_with( topic='update.request.stable', msg=mock.ANY) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_request_to_archived_release(self, publish, *args): """Test submitting a stable request to an update for an archived/EOL release. https://github.com/fedora-infra/bodhi/issues/725 """ args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.pending up.request = None up.release.state = ReleaseState.archived self.db.flush() resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'testing', 'csrf_token': self.get_csrf_token()}, status=400) eq_(resp.json['status'], 'error') eq_(resp.json['errors'][0]['description'], "Can't change request for an archived release") @mock.patch(**mock_failed_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_stable_request_failed_taskotron_results(self, publish, *args): """Test submitting a stable request, but with bad taskotron results""" args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing up.request = None up.comment(self.db, 'This update has been pushed to testing', author='bodhi') up.date_testing = up.comments[-1].timestamp - timedelta(days=7) self.db.flush() eq_(up.days_in_testing, 7) eq_(up.meets_testing_requirements, True) resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'stable', 'csrf_token': self.get_csrf_token()}, status=400) self.assertIn('errors', resp) self.assertIn('Required task', resp) @mock.patch(**mock_absent_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_stable_request_absent_taskotron_results(self, publish, *args): """Test submitting a stable request, but with absent task results""" args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing up.request = None up.comment(self.db, 'This update has been pushed to testing', author='bodhi') up.date_testing = up.comments[-1].timestamp - timedelta(days=7) self.db.flush() eq_(up.days_in_testing, 7) eq_(up.meets_testing_requirements, True) resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'stable', 'csrf_token': self.get_csrf_token()}, status=400) self.assertIn('errors', resp) self.assertIn('No result found for', resp) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_stable_request_when_stable(self, publish, *args): """Test submitting a stable request to an update that already been pushed to stable""" args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.stable up.request = None up.comment(self.db, 'This update has been pushed to testing', author='bodhi') up.date_testing = up.comments[-1].timestamp - timedelta(days=14) up.comment(self.db, 'This update has been pushed to stable', author='bodhi') self.db.flush() eq_(up.days_in_testing, 14) eq_(up.meets_testing_requirements, True) resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'stable', 'csrf_token': self.get_csrf_token()}) eq_(resp.json['update']['status'], 'stable') eq_(resp.json['update']['request'], None) try: publish.assert_called_with( topic='update.request.stable', msg=mock.ANY) assert False, "request.stable fedmsg shouldn't have fired" except AssertionError: pass @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_testing_request_when_testing(self, publish, *args): """Test submitting a testing request to an update that already been pushed to testing""" args = self.get_update('bodhi-2.0.0-3.fc17') resp = self.app.post_json('/updates/', args) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing up.request = None up.comment(self.db, 'This update has been pushed to testing', author='bodhi') up.date_testing = up.comments[-1].timestamp - timedelta(days=14) self.db.flush() eq_(up.days_in_testing, 14) eq_(up.meets_testing_requirements, True) resp = self.app.post_json( '/updates/%s/request' % args['builds'], {'request': 'testing', 'csrf_token': self.get_csrf_token()}) eq_(resp.json['update']['status'], 'testing') eq_(resp.json['update']['request'], None) try: publish.assert_called_with( topic='update.request.testing', msg=mock.ANY) assert False, "request.testing fedmsg shouldn't have fired" except AssertionError: pass @mock.patch(**mock_valid_requirements) def test_update_with_older_build_in_testing_from_diff_user(self, r): """ Test submitting an update for a package that has an older build within a multi-build update currently in testing submitted by a different maintainer. https://github.com/fedora-infra/bodhi/issues/78 """ title = u'bodhi-2.0-2.fc17 python-3.0-1.fc17' args = self.get_update(title) resp = self.app.post_json('/updates/', args) newuser = User(name=u'bob') self.db.add(newuser) up = self.db.query(Update).filter_by(title=title).one() up.status = UpdateStatus.testing up.request = None up.user = newuser self.db.flush() newtitle = u'bodhi-2.0-3.fc17' args = self.get_update(newtitle) resp = self.app.post_json('/updates/', args) # Note that this does **not** obsolete the other update self.assertEquals(len(resp.json_body['caveats']), 1) self.assertEquals(resp.json_body['caveats'][0]['description'], "Please be aware that there is another update in " "flight owned by bob, containing " "bodhi-2.0-2.fc17. Are you coordinating with " "them?") # Ensure the second update was created successfully self.db.query(Update).filter_by(title=newtitle).one() @mock.patch(**mock_valid_requirements) def test_updateid_alias(self, *args): res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0.0-3.fc17')) json = res.json_body self.assertEquals(json['alias'], json['updateid']) def test_list_updates_by_lowercase_release_name(self): res = self.app.get('/updates/', {"releases": "f17"}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], u'bodhi-2.0-1.fc17') def test_redirect_to_package(self): "When you visit /updates/package, redirect to /updates/?packages=..." res = self.app.get('/updates/bodhi', status=302) target = 'http://localhost/updates/?packages=bodhi' self.assertEquals(res.headers['Location'], target) # But be sure that we don't redirect if the package doesn't exist res = self.app.get('/updates/non-existant', status=404) def test_list_updates_by_alias_and_updateid(self): upd = self.db.query(Update).filter(Update.alias != None).first() res = self.app.get('/updates/', {"alias": upd.alias}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], upd.title) self.assertEquals(up['alias'], upd.alias) res = self.app.get('/updates/', {"updateid": upd.alias}) body = res.json_body self.assertEquals(len(body['updates']), 1) up = body['updates'][0] self.assertEquals(up['title'], upd.title) res = self.app.get('/updates/', {"updateid": 'BLARG'}) body = res.json_body self.assertEquals(len(body['updates']), 0) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_submitting_multi_release_updates(self, publish, *args): """ https://github.com/fedora-infra/bodhi/issues/219 """ # Add another release and package Release._tag_cache = None release = Release( name=u'F18', long_name=u'Fedora 18', id_prefix=u'FEDORA', version=u'18', dist_tag=u'f18', stable_tag=u'f18-updates', testing_tag=u'f18-updates-testing', candidate_tag=u'f18-updates-candidate', pending_signing_tag=u'f18-updates-testing-signing', pending_testing_tag=u'f18-updates-testing-pending', pending_stable_tag=u'f18-updates-pending', override_tag=u'f18-override', branch=u'f18') self.db.add(release) pkg = Package(name=u'nethack') self.db.add(pkg) # A multi-release submission!!! This should create *two* updates args = self.get_update('bodhi-2.0.0-2.fc17,bodhi-2.0.0-2.fc18') r = self.app.post_json('/updates/', args) data = r.json_body self.assertIn('caveats', data) self.assertEquals(len(data['caveats']), 1) self.assertEquals(data['caveats'][0]['description'], "Your update is being split into 2, one for each release.") self.assertIn('updates', data) self.assertEquals(len(data['updates']), 2) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Make sure two fedmsg messages were published self.assertEquals(len(publish.call_args_list), 2) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_update_bugs(self, publish, *args): build = 'bodhi-2.0.0-2.fc17' args = self.get_update('bodhi-2.0.0-2.fc17') args['bugs'] = '56789' r = self.app.post_json('/updates/', args) self.assertEquals(len(r.json['bugs']), 1) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Pretend it was pushed to testing update = self.db.query(Update).filter_by(title=build).one() update.request = None update.status = UpdateStatus.testing update.pushed = True self.db.flush() # Mark it as testing args['edited'] = args['builds'] args['builds'] = 'bodhi-2.0.0-3.fc17' args['bugs'] = '56789,98765' r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(len(up['bugs']), 2) bug_ids = [bug['bug_id'] for bug in up['bugs']] self.assertIn(56789, bug_ids) self.assertIn(98765, bug_ids) self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') # now remove a bug args['edited'] = args['builds'] args['builds'] = 'bodhi-2.0.0-3.fc17' args['bugs'] = '98765' r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(len(up['bugs']), 1) bug_ids = [bug['bug_id'] for bug in up['bugs']] self.assertIn(98765, bug_ids) self.assertEquals(up['status'], u'pending') self.assertEquals(up['request'], u'testing') @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_missing_update(self, publish, *args): """ Attempt to edit an update that doesn't exist """ build = 'bodhi-2.0.0-2.fc17' edited = 'bodhi-1.0-1.fc17' args = self.get_update(build) args['edited'] = edited r = self.app.post_json('/updates/', args, status=400).json_body self.assertEquals(r['status'], 'error') self.assertEquals(r['errors'][0]['description'], 'Cannot find update to edit: %s' % edited) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_update_and_disable_features(self, publish, *args): build = 'bodhi-2.0.0-2.fc17' args = self.get_update('bodhi-2.0.0-2.fc17') r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) up = r.json_body self.assertEquals(up['require_testcases'], True) self.assertEquals(up['require_bugs'], False) self.assertEquals(up['stable_karma'], 3) self.assertEquals(up['unstable_karma'], -3) # Pretend it was pushed to testing update = self.db.query(Update).filter_by(title=build).one() update.request = None update.status = UpdateStatus.testing update.pushed = True self.db.flush() # Mark it as testing args['edited'] = args['builds'] # Toggle a bunch of the booleans args['autokarma'] = False args['require_testcases'] = False args['require_bugs'] = True r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['status'], u'testing') self.assertEquals(up['request'], None) self.assertEquals(up['require_bugs'], True) self.assertEquals(up['require_testcases'], False) self.assertEquals(up['stable_karma'], None) self.assertEquals(up['unstable_karma'], None) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_update_change_type(self, publish, *args): build = 'bodhi-2.0.0-2.fc17' args = self.get_update('bodhi-2.0.0-2.fc17') args['type'] = 'newpackage' r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) up = r.json_body self.assertEquals(up['type'], u'newpackage') # Pretend it was pushed to testing update = self.db.query(Update).filter_by(title=build).one() update.request = None update.status = UpdateStatus.testing update.pushed = True self.db.flush() # Mark it as testing args['edited'] = args['builds'] args['type'] = 'bugfix' r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['status'], u'testing') self.assertEquals(up['request'], None) self.assertEquals(up['type'], u'bugfix') def test_update_meeting_requirements_present(self): """ Check that the requirements boolean is present in our JSON """ res = self.app.get('/updates/bodhi-2.0-1.fc17') actual = res.json_body['update']['meets_testing_requirements'] expected = False self.assertEquals(actual, expected) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_testing_update_reset_karma(self, publish, *args): nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Mark it as testing and give it 2 karma upd = Update.get(nvr, self.db) upd.status = UpdateStatus.testing upd.request = None upd.comment(self.db, u'LGTM', author=u'bob', karma=1) upd.comment(self.db, u'LGTM2ME2', author=u'other_bob', karma=1) self.db.flush() self.assertEqual(upd.karma, 2) # Then.. edit it and change the builds! args['edited'] = args['builds'] args['builds'] = 'bodhi-2.0.0-3.fc17' r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17') # This is what we really want to test here. self.assertEquals(up['karma'], 0) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_testing_update_reset_karma_with_same_tester(self, publish, *args): """ Ensure that someone who gave an update karma can do it again after a reset. https://github.com/fedora-infra/bodhi/issues/659 """ user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Mark it as testing upd = Update.get(nvr, self.db) upd.status = UpdateStatus.testing upd.request = None self.db.flush() # Have bob +1 it upd.comment(self.db, u'LGTM', author=u'bob', karma=1) upd = Update.get(nvr, self.db) self.assertEquals(upd.karma, 1) # Then.. edit it and change the builds! new_nvr = u'bodhi-2.0.0-3.fc17' args['edited'] = args['builds'] args['builds'] = new_nvr r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], new_nvr) # This is what we really want to test here. self.assertEquals(up['karma'], 0) # Have bob +1 it again upd = Update.get(new_nvr, self.db) upd.comment(self.db, u'Ship it!', author=u'bob', karma=1) # Bob should be able to give karma again since the reset self.assertEquals(upd.karma, 1) # Then.. edit it and change the builds! newer_nvr = u'bodhi-2.0.0-4.fc17' args['edited'] = args['builds'] args['builds'] = newer_nvr r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], newer_nvr) # This is what we really want to test here. self.assertEquals(up['karma'], 0) # Have bob +1 it again upd = Update.get(newer_nvr, self.db) upd.comment(self.db, u'Ship it!', author=u'bob', karma=1) # Bob should be able to give karma again since the reset self.assertEquals(upd.karma, 1) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test__composite_karma_with_one_negative(self, publish, *args): """The test asserts that _composite_karma returns (0, -1) when an update receives one negative karma""" user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = 'bodhi-2.1-1.fc17' args = self.get_update(nvr) resp = self.app.post_json('/updates/', args).json_body publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=nvr).one() up.request = None up.status = UpdateStatus.testing self.db.flush() # The user gives negative karma first up.comment(self.db, u'Failed to work', author=u'luke', karma=-1) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (0, -1)) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test__composite_karma_with_changed_karma(self, publish, *args): """ This test asserts that _composite_karma returns (1, 0) when a user posts negative karma and then later posts positive karma. """ user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = 'bodhi-2.1-1.fc17' args = self.get_update(nvr) resp = self.app.post_json('/updates/', args).json_body publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=nvr).one() up.request = None up.status = UpdateStatus.testing self.db.flush() # The user gives negative karma first up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (0, -1)) # The same user gives positive karma later up.comment(self.db, u'wfm', author=u'ralph', karma=1) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (1, 0)) self.assertEquals(up.karma, 1) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test__composite_karma_with_positive_karma_first(self, publish, *args): """ This test asserts that _composite_karma returns (1, -1) when one user posts positive karma and then another user posts negative karma. """ user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'bodhi-2.1-1.fc17' args = self.get_update(nvr) resp = self.app.post_json('/updates/', args).json_body publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=nvr).one() up.request = None up.status = UpdateStatus.testing self.db.flush() # user gives positive karma first up.comment(self.db, u'Works for me', author=u'ralph', karma=1) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (1, 0)) # Another user gives negative karma later up.comment(self.db, u'Failed to work', author=u'bowlofeggs', karma=-1) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (1, -1)) self.assertEquals(up.karma, 0) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test__composite_karma_with_no_negative_karma(self, publish, *args): """The test asserts that _composite_karma returns (*, 0) when there is no negative karma.""" user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = 'bodhi-2.1-1.fc17' args = self.get_update(nvr) resp = self.app.post_json('/updates/', args).json_body publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=nvr).one() up.request = None up.status = UpdateStatus.testing self.db.flush() up.comment(self.db, u'LGTM', author=u'mac', karma=1) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (1, 0)) # Karma with no comment up.comment(self.db, u' ', author=u'bowlofeggs', karma=1) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (2, 0)) self.assertEquals(up.karma, 2) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test__composite_karma_with_anonymous_comment(self, publish, *args): """ The test asserts that _composite_karma returns (0, 0) when an anonymous user gives negative karma to an update. """ user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = 'bodhi-2.1-1.fc17' args = self.get_update(nvr) resp = self.app.post_json('/updates/', args).json_body publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=nvr).one() up.request = None up.status = UpdateStatus.testing self.db.flush() up.comment(self.db, u'Not working', author='me', anonymous=True, karma=-1) up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (0, 0)) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test__composite_karma_with_no_feedback(self, publish, *args): """This test asserts that _composite_karma returns (0, 0) when an update has no feedback.""" user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = 'bodhi-2.1-1.fc17' args = self.get_update(nvr) resp = self.app.post_json('/updates/', args).json_body publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=nvr).one() up.request = None up.status = UpdateStatus.testing self.db.flush() up = self.db.query(Update).filter_by(title=nvr).one() self.assertEqual(up._composite_karma, (0, 0)) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_disable_autopush_for_critical_updates(self, publish, *args): """Make sure that autopush is disabled if a critical update receives any negative karma""" user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'kernel-3.11.5-300.fc17' args = self.get_update(nvr) args['autokarma'] = True resp = self.app.post_json('/updates/', args) self.assertTrue(resp.json['critpath']) self.assertEquals(resp.json['request'], 'testing') publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing up.request = None self.db.flush() # A user gives negative karma first up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() # Another user gives positive karma up.comment(self.db, u'wfm', author=u'bowlofeggs', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() self.assertEquals(up.karma, 0) self.assertEquals(up.status, UpdateStatus.testing) self.assertEquals(up.request, None) # Autopush gets disabled since there is a negative karma from ralph self.assertEquals(up.autokarma, False) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_autopush_critical_update_with_no_negative_karma(self, publish, *args): """Autopush critical update when it has no negative karma""" user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'kernel-3.11.5-300.fc17' args = self.get_update(nvr) args['autokarma'] = True args['stable_karma'] = 2 args['unstable_karma'] = -2 resp = self.app.post_json('/updates/', args) self.assertTrue(resp.json['critpath']) self.assertEquals(resp.json['request'], 'testing') publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing self.db.flush() up.comment(self.db, u'LGTM', author=u'ralph', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'LGTM', author=u'bowlofeggs', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() self.assertEquals(up.karma, 2) # No negative karma: Update gets automatically marked as stable self.assertEquals(up.autokarma, True) up = self.db.query(Update).filter_by(title=resp.json['title']).one() self.assertEquals(up.request, UpdateRequest.stable) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_manually_push_critical_update_with_negative_karma(self, publish, *args): """ Manually push critical update when it has negative karma Autopush gets disabled after it receives negative karma A user gives negative karma, but another 3 users give positive karma The critical update should be manually pushed because of the negative karma """ user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'kernel-3.11.5-300.fc17' args = self.get_update(nvr) args['autokarma'] = True args['stable_karma'] = 3 args['unstable_karma'] = -3 resp = self.app.post_json('/updates/', args) self.assertTrue(resp.json['critpath']) self.assertEquals(resp.json['request'], 'testing') publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing self.db.flush() up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'LGTM', author=u'bowlofeggs', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'wfm', author=u'luke', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'LGTM', author=u'puiterwijk', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'LGTM', author=u'trishnag', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() self.assertEquals(up.karma, 3) self.assertEquals(up.autokarma, False) # The request should still be at testing. This assertion tests for # https://github.com/fedora-infra/bodhi/issues/989 where karma comments were resetting the # request to None. self.assertEquals(up.request, UpdateRequest.testing) self.assertEquals(up.status, UpdateStatus.testing) id = 'kernel-3.11.5-300.fc17' resp = self.app.get('/updates/%s' % id, headers={'Accept': 'text/html'}) self.assertIn('text/html', resp.headers['Content-Type']) self.assertIn(id, resp) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_manually_push_critical_update_with_autopush_turned_off(self, publish, *args): """ Manually push critical update when it has Autopush turned off and make sure the update doesn't get Autopushed """ user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'kernel-3.11.5-300.fc17' args = self.get_update(nvr) args['autokarma'] = False args['stable_karma'] = 3 args['unstable_karma'] = -3 resp = self.app.post_json('/updates/', args) self.assertTrue(resp.json['critpath']) self.assertEquals(resp.json['request'], 'testing') publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing self.db.flush() up.comment(self.db, u'LGTM Now', author=u'ralph', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'wfm', author=u'luke', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'LGTM', author=u'puiterwijk', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() self.assertEquals(up.karma, 3) self.assertEquals(up.autokarma, False) # The request should still be at testing. This assertion tests for # https://github.com/fedora-infra/bodhi/issues/989 where karma comments were resetting the # request to None. self.assertEquals(up.request, UpdateRequest.testing) self.assertEquals(up.status, UpdateStatus.testing) id = 'kernel-3.11.5-300.fc17' resp = self.app.get('/updates/%s' % id, headers={'Accept': 'text/html'}) self.assertIn('text/html', resp.headers['Content-Type']) self.assertIn(id, resp) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_autopush_non_critical_update_with_negative_karma(self, publish, *args): """Autopush Non Critical update even though it receives negative karma""" user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = True args['stable_karma'] = 3 args['unstable_karma'] = -3 resp = self.app.post_json('/updates/', args) self.assertEquals(resp.json['request'], 'testing') publish.assert_called_with(topic='update.request.testing', msg=ANY) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.status = UpdateStatus.testing self.db.flush() up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'LGTM Now', author=u'ralph', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'wfm', author=u'luke', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() up.comment(self.db, u'LGTM', author=u'puiterwijk', karma=1) up = self.db.query(Update).filter_by(title=resp.json['title']).one() self.assertEquals(up.karma, 3) self.assertEquals(up.autokarma, True) up = self.db.query(Update).filter_by(title=resp.json['title']).one() self.assertEquals(up.request, UpdateRequest.stable) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_manually_push_to_stable_based_on_karma(self, publish, *args): """ Test manually push to stable when autokarma is disabled and karma threshold is reached """ user = User(name=u'bob') self.db.add(user) self.db.flush() # Makes autokarma disabled # Sets stable karma to 1 nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) args['autokarma'] = False args['stable_karma'] = 1 resp = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Marks it as testing upd = Update.get(nvr, self.db) upd.status = UpdateStatus.testing upd.request = None upd.date_testing = datetime.now() - timedelta(days=1) self.db.flush() # Checks karma threshold is reached # Makes sure stable karma is not None # Ensures Request doesn't get set to stable automatically since autokarma is disabled upd.comment(self.db, u'LGTM', author=u'ralph', karma=1) upd = Update.get(nvr, self.db) self.assertEquals(upd.karma, 1) self.assertEquals(upd.stable_karma, 1) self.assertEquals(upd.status, UpdateStatus.testing) self.assertEquals(upd.request, None) text = config.get('testing_approval_msg_based_on_karma') upd.comment(self.db, text, author=u'bodhi') # Checks Push to Stable text in the html page for this update id = 'bodhi-2.0.0-2.fc17' resp = self.app.get('/updates/%s' % id, headers={'Accept': 'text/html'}) self.assertIn('text/html', resp.headers['Content-Type']) self.assertIn(id, resp) self.assertIn('Push to Stable', resp) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_update_with_expired_override(self, publish, *args): """ """ user = User(name=u'bob') self.db.add(user) self.db.flush() nvr = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr) r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Create a new expired override upd = Update.get(nvr, self.db) override = BuildrootOverride( build=upd.builds[0], submitter=user, notes=u'testing', expiration_date=datetime.utcnow(), expired_date=datetime.utcnow()) self.db.add(override) self.db.flush() # Edit it and change the builds new_nvr = u'bodhi-2.0.0-3.fc17' args['edited'] = args['builds'] args['builds'] = new_nvr r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], new_nvr) # Change it back to ensure we can still reference the older build args['edited'] = args['builds'] args['builds'] = nvr r = self.app.post_json('/updates/', args) up = r.json_body self.assertEquals(up['title'], nvr) @mock.patch(**mock_taskotron_results) @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_submit_older_build_to_stable(self, publish, *args): """ Ensure we cannot submit an older build to stable when a newer one already exists there. """ update = self.db.query(Update).one() update.status = UpdateStatus.stable update.request = None self.db.flush() oldbuild = 'bodhi-1.0-1.fc17' # Create a newer build build = Build(nvr=oldbuild, package=update.builds[0].package) self.db.add(build) update = Update(title=oldbuild, builds=[build], type=UpdateType.bugfix, request=UpdateRequest.testing, notes=u'second update', user=update.user, release=update.release) update.comment(self.db, u"foo1", 1, u'foo1') update.comment(self.db, u"foo2", 1, u'foo2') update.comment(self.db, u"foo3", 1, u'foo3') self.db.add(update) self.db.flush() # Try and submit an older build to stable resp = self.app.post_json('/updates/%s/request' % oldbuild, {'request': 'stable', 'csrf_token': self.get_csrf_token()}, status=400) eq_(resp.json['status'], 'error') eq_(resp.json['errors'][0]['description'], "Cannot submit bodhi ('0', '1.0', '1.fc17') to stable since it is older than ('0', '2.0', '1.fc17')") @mock.patch(**mock_valid_requirements) @mock.patch('bodhi.server.notifications.publish') def test_edit_testing_update_with_build_from_different_update(self, publish, *args): """ https://github.com/fedora-infra/bodhi/issues/803 """ # Create an update with a build that we will try and add to another update nvr1 = u'bodhi-2.0.0-2.fc17' args = self.get_update(nvr1) r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Mark it as testing upd = Update.get(nvr1, self.db) upd.status = UpdateStatus.testing upd.request = None self.db.flush() # Create an update for a different build nvr2 = u'koji-2.0.0-1.fc17' args = self.get_update(nvr2) r = self.app.post_json('/updates/', args) publish.assert_called_with(topic='update.request.testing', msg=ANY) # Mark it as testing upd = Update.get(nvr2, self.db) upd.status = UpdateStatus.testing upd.request = None self.db.flush() # Edit the nvr2 update and add nvr1 args['edited'] = args['builds'] args['builds'] = '%s,%s' % (nvr1, nvr2) r = self.app.post_json('/updates/', args, status=400) up = r.json_body self.assertEquals(up['status'], 'error') self.assertEquals(up['errors'][0]['description'], 'Update for bodhi-2.0.0-2.fc17 already exists') up = Update.get(nvr2, self.db) self.assertEquals(up.title, nvr2) # nvr1 shouldn't be able to be added self.assertEquals(up.status, UpdateStatus.testing) self.assertEquals(len(up.builds), 1) self.assertEquals(up.builds[0].nvr, nvr2) # nvr1 update should remain intact up = Update.get(nvr1, self.db) self.assertEquals(up.title, nvr1) self.assertEquals(up.status, UpdateStatus.testing) self.assertEquals(len(up.builds), 1) self.assertEquals(up.builds[0].nvr, nvr1)
AdamWill/bodhi
bodhi/tests/server/services/test_updates.py
Python
gpl-2.0
144,771
[ "VisIt" ]
ef1d62949eb76a1f1cc3f09910aff4793015c9815a78db76b34fbc191386663b
from itertools import chain import json import os import sys from core import fasta json_format_version = 'mdrscan/json/0.4.0' def get_program(blast_output): for line in blast_output: words = line.split() if words: return words[0] def get_database(blast_output): for line in blast_output: words = line.split() if words and words[0] == "Database:": return words[1] def post_process(queries): for query in queries: for hit in query['hits']: hit['alignments'] = ''.join(hit['alignments']) for i, region in enumerate(hit['regions']): start, stop = region hit['regions'][i] = dict(start=start, length=stop - start + 1) def parse_blast(blast_output, query_file): if isinstance(blast_output, basestring): blast_output = blast_output.splitlines(True) blast_output = iter(blast_output) query_lengths = dict((q.id, len(q)) for q in fasta.entries(query_file)) results = dict(program=get_program(blast_output), database=get_database(blast_output)) queries = [] results['queries'] = queries query = None hit = None region = None parsing_alignments = False for line in blast_output: if line.startswith('Query='): words = line.split(None, 2) id = words[1] description = words[2] if len(words) > 2 else '' query = dict(id=id, description=description, length=query_lengths[id], hits=[]) queries.append(query) elif line.startswith('>'): parsing_alignments = True words = line.split(None, 1) id = words[0].split('|')[-1] description = None if len(words) == 2: description = words[1] hit = dict(id=id, evalue=None, description=description, regions=[], alignments=[line]) query['hits'].append(hit) elif parsing_alignments: if line.startswith('Lambda'): parsing_alignments = False continue hit['alignments'].append(line) if line.startswith(' Score'): if hit['evalue'] is None: words = line.split() i = (i for i, w in enumerate(words) if w.startswith('Expect')).next() hit['evalue'] = float(words[i + 2].rstrip(',')) region = [sys.maxint, 0] hit['regions'].append(region) elif line.startswith('Query'): words = line.split() start = int(words[1]) stop = int(words[-1]) if start > stop: start, stop = stop, start region[0] = min(start, region[0]) region[1] = max(stop, region[1]) post_process(queries) return results def get_hit_ids(results): ids = [] for q in results['queries']: for hit in q['hits']: id = hit['id'] if id not in ids: ids.append(id) return ids def filter_sequences(db, ids): for entry in fasta.iter_entries(db): if entry.id in ids: yield entry ids.remove(entry.id) if not ids: return if ids: raise RuntimeError('not all sequences were found: ' + str(list(ids))) def save_hit_fasta(hit_file, db, results): ids = get_hit_ids(results) entries = fasta.FastaDict() for entry in filter_sequences(db, set(ids)): entries[entry.id] = entry for id in ids: print >> hit_file, entries[id] def get_dbfiles(dbdir, alias_file): for line in open(os.path.join(dbdir, alias_file)): if line.startswith('DBLIST'): return [os.path.join(dbdir, f) for f in line.split()[1:]] if __name__ == '__main__': db_file = sys.argv[1] query_file = sys.argv[2] if len(sys.argv) > 2 else 'query.fasta' blast_file = sys.argv[3] if len(sys.argv) > 3 else 'results.blast' results_file = sys.argv[4] if len(sys.argv) > 4 else 'noduleblast.json' hit_file = sys.argv[5] if len(sys.argv) > 5 else 'hits.fa' results = parse_blast(open(blast_file), open(query_file)) info = dict(format=json_format_version, results=results) json.dump(info, open(results_file, 'w')) dbdir, dbfile = os.path.split(db_file) if dbfile == 'all': db = chain(*[open(f) for f in get_dbfiles(dbdir, dbfile + '.nal')]) else: db = open(db_file) save_hit_fasta(open(hit_file, 'w'), db, results)
BILS/agda
agda/datisca/parse_blast.py
Python
mit
4,756
[ "BLAST" ]
2a1ca59d0838e7b23578b5cb8abcbac677307679dd08d40db99f980452f1102b
"""Multiple views testing.""" # pylint: disable=unused-argument,redefined-outer-name,invalid-name from time import sleep from numpy import random as rng import pandas as pd import pytest from bowtie import App, View from bowtie.control import Nouislider, Button from bowtie.visual import Table from bowtie.tests.utils import reset_uuid, server_check reset_uuid() table = Table() ctrl = Nouislider() ctrl2 = Button() def callback(*args): """dummy function""" df = pd.DataFrame(rng.randn(10, 10)) table.do_data(df) @pytest.fixture def multiple_views(build_reset, monkeypatch): """Create multiple views app.""" app = App(__name__, sidebar=True) view1 = View() # pylint: disable=unused-variable assert view1._uuid == 2 # pylint: disable=protected-access view2 = View() view2.add(table) app.add_route(view2, 'view2') app.add(table) app.add_sidebar(ctrl) app.add_sidebar(ctrl2) app.subscribe(ctrl.on_change)(app.subscribe(ctrl2.on_click)(callback)) app._build() # pylint: disable=protected-access with server_check(app) as server: yield server # pylint: disable=redefined-outer-name,unused-argument def test_multiple(multiple_views, chrome_driver): """Test multiple views app.""" chrome_driver.get('http://localhost:9991') chrome_driver.implicitly_wait(5) assert chrome_driver.title == 'Bowtie App' button = chrome_driver.find_element_by_class_name('ant-btn') data = chrome_driver.find_element_by_class_name('ant-table-body').text assert len(data.split('\n')) == 1 button.click() sleep(2) data = chrome_driver.find_element_by_class_name('ant-table-body').text assert len(data.split('\n')) == 20 logs = chrome_driver.get_log('browser') for log in logs: if log['level'] == 'SEVERE': raise Exception(log['message']) chrome_driver.get('http://localhost:9991/view2') data = chrome_driver.find_element_by_class_name('ant-table-body').text assert len(data.split('\n')) == 20 chrome_driver.implicitly_wait(5) logs = chrome_driver.get_log('browser') for log in logs: if log['level'] == 'SEVERE': raise Exception(log['message']) chrome_driver.get('http://localhost:9991/view1') assert chrome_driver.title == '404 Not Found'
jwkvam/bowtie
bowtie/tests/test_multiple.py
Python
mit
2,332
[ "Bowtie" ]
3bf70eaf81eb0785b7f2228c2f213e2089e2bacd9718102d68937b5a3e5ff6f0
# -*- coding: utf-8 -*- import pytest from ..helpers.movie_data import Movie,Actor,Director from .fixtures import backend from blitzdb.backends.sql.relations import ManyToManyProxy def prepare_data(backend): backend.init_schema() backend.create_schema() francis_coppola = Director({'name' : 'Francis Coppola'}) stanley_kubrick = Director({'name' : 'Stanley Kubrick'}) robert_de_niro = Actor({'name' : 'Robert de Niro','movies' : []}) harrison_ford = Actor({'name' : 'Harrison Ford'}) andreas_dewes = Actor({'name' : 'Andreas Dewes'}) brian_de_palma = Director({'name' : 'Brian de Palma'}) al_pacino = Actor({'name' : 'Al Pacino','movies' : [],'salary' : {'amount' : 100000000,'currency' : u'€'}}) scarface = Movie({'title' : 'Scarface','director' : brian_de_palma}) the_godfather = Movie({'title' : 'The Godfather', 'director' : francis_coppola}) space_odyssey = Movie({'title' : '2001 - A space odyssey', 'director' : stanley_kubrick}) clockwork_orange = Movie({'title' : 'A Clockwork Orange', 'director' : stanley_kubrick}) robert_de_niro.movies.append(the_godfather) al_pacino.movies.append(the_godfather) al_pacino.movies.append(scarface) apocalypse_now = Movie({'title' : 'Apocalypse Now'}) star_wars_v = Movie({'title' : 'Star Wars V: The Empire Strikes Back'}) harrison_ford.movies = [star_wars_v] backend.save(robert_de_niro) backend.save(al_pacino) backend.save(francis_coppola) backend.save(andreas_dewes) backend.save(stanley_kubrick) backend.save(brian_de_palma) backend.save(harrison_ford) backend.update(the_godfather,{'best_actor' : al_pacino}) backend.update(scarface,{'best_actor' : al_pacino}) backend.update(stanley_kubrick,{'favorite_actor' : al_pacino}) backend.update(francis_coppola,{'favorite_actor' : robert_de_niro}) backend.save(the_godfather) backend.save(clockwork_orange) backend.save(space_odyssey) backend.save(scarface) backend.commit() def test_one_to_many_include(backend): if str(backend.engine.url).startswith('sqlite://'): import sqlite3 version = [int(s) for s in sqlite3.sqlite_version.split('.')] if version[0] < 3 or (version[0] == 3 and version[1] < 8): print("No support for common table expression in your SQLite version, skipping this test...") return prepare_data(backend) al_pacino = backend.get(Actor,{'name' : 'Al Pacino'},include = ('best_movies',)) scarface = backend.get(Movie,{'title' : 'Scarface'}) the_godfather = backend.get(Movie,{'title' : 'The Godfather'}) results = backend.filter(Movie,{'best_actor' : al_pacino}) assert len(results) == 2 assert len(results.filter({'title' : 'Scarface'})) == 1
M4rtinK/tsubame
core/bundle/blitzdb/tests/sql/test_intersect.py
Python
gpl-3.0
2,887
[ "Brian" ]
c191c96e922266fb2481f9dffbb6b20dba8481fc7367448b482866190cf20507
import logging from pathlib import Path import matplotlib.pyplot as plt import numpy as np from astropy import units as u from ipywidgets import fixed, interact from ._utils import which_epi_janus_resonance from .meta import get_all_resonances from .ringcube import RingCube logger = logging.getLogger(__name__) resonance_table = get_all_resonances() interpolators = [ "none", "nearest", "bilinear", "bicubic", "spline16", "spline36", "hanning", "hamming", "hermite", "kaiser", "quadric", "catrom", "gaussian", "bessel", "mitchell", "sinc", "lanczos", ] def lookup_rcparam(rcParams, pattern): """Look up a pattern in the matplotlib rcParams dict. Just a little helper to get to the right MPL settings faster. """ return [i for i in rcParams.keys() if pattern in i] def myimshow(img, vmin, vmax, i, cmap="gray"): _, ax = plt.subplots(nrows=2, figsize=(10, 10)) ax, ax2 = ax ax.imshow( img, vmin=vmin, vmax=vmax, aspect="auto", interpolation=interpolators[i], cmap=cmap, ) ax.set_title( "vmin: {:.2f}, vmax: {:.2f}, interpolator:{}".format( vmin, vmax, interpolators[i] ) ) tohist = img[~np.isnan(img)] p1, p99 = np.percentile(tohist, (0.5, 99.5)) ax2.hist(img[~np.isnan(img)], 100, range=(p1, p99)) plt.show() def myinteract(img): min_ = round(np.nanmin(img), 4) max_ = round(np.nanmax(img), 4) p30, p70 = np.percentile(img[~np.isnan(img)], (30, 70)) delta = round((p30 - min_) / 50, 5) interact( myimshow, img=fixed(img), vmin=(min_, p30, delta), vmax=(p70, max_, delta), i=(0, len(interpolators) - 1), ) def imshowlowhigh(data, low=10, high=90): fig, ax = plt.subplots() plow, phigh = np.percentile(data[~np.isnan(data)], (low, high)) ax.imshow(data, vmin=plow, vmax=phigh, cmap="gray", interpolation="sinc") return fig def add_ticks_to_x(ax, newticks, newnames): """Add new ticks to an axis. I use this for the right-hand plotting of resonance names in my plots. """ ticks = list(ax.get_xticks()) ticks.extend(newticks) ax.set_xticks(ticks) names = list(ax.get_xticklabels()) names.extend(newnames) ax.set_xticklabels(names) def get_res_radius_from_res_name(res_name, cube): moon, resonance = res_name.split() moon = which_epi_janus_resonance(moon, cube.imagetime) row = resonance_table.query("moon==@moon and reson==@resonance") return row.squeeze()["radius"] * u.km def soliton_plot( cube, solitons, ax=None, solitoncolor="red", resonances=None, draw_prediction=True, soliton_controls_radius=False, saveroot=None, ifmin=None, ifmax=None, rmin=None, rmax=None, ): if ax is None: # fig, ax = plt.subplots(figsize=(12, 9), nrows=2) fig, ax = plt.subplots(nrows=2) else: fig = ax.get_figure() # set resonances to True to get all (warning: in A ring too many to be useful) if resonances is None: # setting some reasonable defaults here: resonances = ["janus", "prometheus", "epimetheus"] cube.imshow(show_resonances=resonances, ax=ax[0], fig=fig, set_extent=True) ticks = [] names = [] if draw_prediction: for k, v in solitons.items(): ax[0].axhline( y=v.to("Mm").value, alpha=1, color=solitoncolor, linestyle="dashdot", lw=3, xmin=0.0, xmax=0.25, ) # the following is only really required if i want to show more # than one prediction line # ticks.append(v.to('Mm').value) # names.append(k) # soliton name and value, only using first found soliton # TODO: create function that deals with more than one soliton res_name, soliton_radius = next(iter(solitons.items())) res_radius = get_res_radius_from_res_name(res_name, cube) ax[0].axhline( y=res_radius.to("Mm").value, alpha=0.5, color="cyan", linestyle="dotted", lw=3, xmin=0.75, xmax=1.0, ) soliton_ax = None # soliton_ax = ax[0].twinx() # soliton_ax.ticklabel_format(useOffset=False) # soliton_ax.set_yticks(np.array(ticks)) # soliton_ax.set_yticklabels(names) # soliton_ax.axhline(y=res_radius.to('Mm').value, alpha=0.5, # color='cyan', linestyle='dotted', lw=3, # xmin=0.7, xmax=1.0) if soliton_controls_radius: radius_low = (res_radius - 20 * u.km).to(u.Mm) radius_high = radius_low + 200 * u.km for tempax in [ax[0], cube.resonance_axis]: tempax.set_ybound(radius_low.value, radius_high.value) if soliton_ax: soliton_ax.set_ybound(radius_low.value, radius_high.value) elif any([rmin is not None, rmax is not None]): for tempax in [ax[0], cube.resonance_axis]: tempax.set_ybound(rmin, rmax) else: # the min/max image radii otherwise control the plot in cube.imshow() # so set the soliton display axis to the same values soliton_ax.set_ybound(cube.minrad.value, cube.maxrad.value) ax[1].plot( np.linspace(*cube.extent[2:], cube.img.shape[0]), np.nanmedian(cube.img, axis=1), color="white", lw=1, ) if any([ifmin is not None, ifmax is not None]): ax[1].set_ylim(ifmin, ifmax) ticks = [] names = [] if draw_prediction: for k, v in solitons.items(): ax[1].axvline( x=v.to("Mm").value, alpha=1, color=solitoncolor, linestyle="dashdot", lw=4, ) ticks.append(v.to("Mm").value) names.append(k) ax[1].axvline( x=res_radius.to("Mm").value, alpha=0.5, color="cyan", linestyle="dotted", lw=3 ) ax[1].set_axis_bgcolor("black") ax[1].set_title("Longitude-median profile over radius") ax[1].set_xlabel("Radius [Mm]") ax[1].set_ylabel("I/F") if soliton_controls_radius: ax[1].set_xlim(radius_low.value, radius_high.value) elif any([rmin is not None, rmax is not None]): ax[1].set_xlim(rmin, rmax) else: ax[1].set_xlim(cube.minrad.value, cube.maxrad.value) savepath = "{}_{}.png".format(cube.pm.img_id, "_".join(res_name.split())) if saveroot is not None: root = Path(saveroot) root.mkdir(exist_ok=True) savepath = root / savepath fig.savefig(str(savepath), dpi=100) def resonance_plot( img_id, ax=None, cube=None, saveroot=None, ifmin=None, ifmax=None, rmin=None, rmax=None, ): if cube is None: cube = RingCube(img_id) if ax is None: fig, ax = plt.subplots(nrows=2) else: fig = ax[0].get_figure() for axes in fig.axes: if axes not in ax: axes.remove() cube.imshow(show_resonances=["janus"], ax=ax[0], set_extent=True) # soliton name and value, only using first found soliton # TODO: create function that deals with more than one soliton row_filter = cube.inside_resonances.moon == cube.janus_swap_phase if any(row_filter): cols = ["radius", "reson"] res_radius, res_name = cube.inside_resonances.loc[row_filter, cols].squeeze() res_radius *= u.km ax[0].axhline( y=res_radius.to("Mm").value, alpha=0.5, color="cyan", linestyle="dotted", lw=3, xmin=0.75, xmax=1.0, ) if any([rmin is not None, rmax is not None]): radius_low = rmin radius_high = rmax else: radius_low = (res_radius - 20 * u.km).to(u.Mm) radius_high = radius_low + 200 * u.km for tempax in [ax[0], cube.resonance_axis]: tempax.set_ybound(radius_low.value, radius_high.value) else: res_name = "no_janus_res" ifs = np.nan_to_num(cube.median_profile) ifs[ifs < 0] = 0 ax[1].plot( np.linspace(*cube.extent[2:], cube.img.shape[0]), ifs, color="white", lw=1 ) if any([ifmin is not None, ifmax is not None]): iflow = ifmin ifhigh = ifmax else: iflow, ifhigh = np.percentile(ifs[~np.isnan(ifs)], (0.5, 99.5)) ax[1].set_ylim(iflow / 1.1, ifhigh * 1.1) if any(row_filter): ax[1].axvline( x=res_radius.to("Mm").value, alpha=0.5, color="cyan", linestyle="dotted", lw=3, ) ax[1].set_xlim(radius_low.value, radius_high.value) ax[1].set_facecolor("black") ax[1].set_title("Longitude-median profile over radius") ax[1].set_xlabel("Radius [Mm]") ax[1].set_ylabel("I/F") if saveroot is not None: savepath = f"{cube.pm.img_id}_{res_name.replace(':', '_')}.png" root = Path(saveroot) root.mkdir(exist_ok=True) savepath = root / savepath print(savepath) logger.info("Saving file at %s", str(savepath)) fig.savefig(str(savepath), dpi=200) return fig, cube.resonance_axis
michaelaye/pyciss
pyciss/plotting.py
Python
isc
9,363
[ "Gaussian" ]
455a071ad88f3e8131edcc08d7da56653a8b73a0345d841aca36855c563704e3
from __future__ import print_function import os import sys from six import StringIO import unittest2 as unittest from nose.plugins.attrib import attr from contextlib import contextmanager from jnpr.junos import Device from jnpr.junos.exception import RpcError, SwRollbackError, RpcTimeoutError from jnpr.junos.utils.sw import SW from jnpr.junos.facts.swver import version_info from ncclient.manager import Manager, make_device_handler from ncclient.transport import SSHSession from lxml import etree from mock import patch, MagicMock, call, mock_open if sys.version < '3': builtin_string = '__builtin__' else: builtin_string = 'builtins' __author__ = "Nitin Kumar, Rick Sherman" __credits__ = "Jeremy Schulman" facts = {'domain': None, 'hostname': 'firefly', 'ifd_style': 'CLASSIC', 'version_info': version_info('12.1X46-D15.3'), '2RE': True, 'serialnumber': 'aaf5fe5f9b88', 'fqdn': 'firefly', 'virtual': True, 'switch_style': 'NONE', 'version': '12.1X46-D15.3', 'HOME': '/cf/var/home/rick', 'srx_cluster': False, 'version_RE0': '16.1-20160925.0', 'version_RE1': '16.1-20160925.0', 'model': 'FIREFLY-PERIMETER', 'junos_info': {'re0': {'text': '16.1-20160925.0'}, 're1': {'text': '16.1-20160925.0'}}, 'RE0': {'status': 'Testing', 'last_reboot_reason': 'Router rebooted after a ' 'normal shutdown.', 'model': 'FIREFLY-PERIMETER RE', 'up_time': '6 hours, 29 minutes, 30 seconds'}, 'current_re': ['re0', 'master'], 'vc_capable': False, 'personality': 'SRX_BRANCH'} @attr('unit') class TestSW(unittest.TestCase): @patch('ncclient.manager.connect') def setUp(self, mock_connect): mock_connect.side_effect = self._mock_manager self.dev = Device(host='1.1.1.1', user='rick', password='password123', gather_facts=False) self.dev.open() self.dev.facts = facts self.sw = self.get_sw() @patch('jnpr.junos.Device.execute') def get_sw(self, mock_execute): mock_execute.side_effect = self._mock_manager return SW(self.dev) @patch('ncclient.operations.session.CloseSession.request') def tearDown(self, mock_session): self.dev.close() def test_sw_hashfile(self): with patch(builtin_string + '.open', mock_open(), create=True): import jnpr.junos.utils.sw with open('foo') as h: h.read.side_effect = ('abc', 'a', '') jnpr.junos.utils.sw._hashfile(h, MagicMock()) self.assertEqual(h.read.call_count, 3) @patch('jnpr.junos.Device.execute') def test_sw_constructor_multi_re(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw = SW(self.dev) self.assertTrue(self.sw._multi_RE) @patch('jnpr.junos.Device.execute') def test_sw_constructor_multi_vc(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw = SW(self.dev) self.assertFalse(self.sw._multi_VC) @patch(builtin_string + '.open') def test_sw_local_sha256(self, mock_built_open): package = 'test.tgz' self.assertEqual(SW.local_checksum(package, algorithm='sha256'), 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934' 'ca495991b7852b855') @patch(builtin_string + '.open') def test_sw_local_md5(self, mock_built_open): package = 'test.tgz' self.assertEqual(self.sw.local_checksum(package, algorithm='md5'), 'd41d8cd98f00b204e9800998ecf8427e') @patch(builtin_string + '.open') def test_sw_local_sha1(self, mock_built_open): package = 'test.tgz' self.assertEqual(SW.local_checksum(package, algorithm='sha1'), 'da39a3ee5e6b4b0d3255bfef95601890afd80709') def test_sw_local_checksum_unknown_alg(self): self.assertRaises(ValueError, SW.local_checksum, 'foo.tgz', algorithm='foo') def test_sw_progress(self): with self.capture(SW.progress, self.dev, 'running') as output: self.assertEqual('1.1.1.1: running\n', output) @patch('jnpr.junos.Device.execute') @patch('paramiko.SSHClient') @patch('scp.SCPClient.put') def test_sw_progress_true(self, scp_put, mock_paramiko, mock_execute): mock_execute.side_effect = self._mock_manager with self.capture(SW.progress, self.dev, 'testing') as output: self.sw.install('test.tgz', progress=True, checksum=345, cleanfs=False) self.assertEqual('1.1.1.1: testing\n', output) @patch('paramiko.SSHClient') @patch('scp.SCPClient.put') def test_sw_put(self, mock_scp_put, mock_scp): package = 'test.tgz' self.sw.put(package) self.assertTrue( call( 'test.tgz', '/var/tmp') in mock_scp_put.mock_calls) @patch('jnpr.junos.utils.sw.FTP') def test_sw_put_ftp(self, mock_ftp_put): dev = Device(host='1.1.1.1', user='rick', password='password123', mode='telnet', port=23, gather_facts=False) dev.facts = facts sw = SW(dev) sw.put(package='test.tgz') self.assertTrue( call( 'test.tgz', '/var/tmp') in mock_ftp_put.mock_calls) @patch('jnpr.junos.utils.scp.SCP.__exit__') @patch('jnpr.junos.utils.scp.SCP.__init__') @patch('jnpr.junos.utils.scp.SCP.__enter__') def test_sw_put_progress(self, mock_enter, mock_scp, mock_exit): package = 'test.tgz' mock_scp.side_effect = self._fake_scp with self.capture(self.sw.put, package, progress=self._my_scp_progress) as output: self.assertEqual('test.tgz 100 50\n', output) def _fake_scp(self, *args, **kwargs): progress = kwargs['progress'] progress('test.tgz', 100, 50) @patch('jnpr.junos.Device.execute') def test_sw_pkgadd(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'test.tgz' self.assertTrue(self.sw.pkgadd(package)) @patch('jnpr.junos.Device.execute') @patch('jnpr.junos.utils.sw.SW.local_md5') def test_sw_install_url_in_pkg_set(self, mock_md5, mock_execute): mock_md5.return_value = '96a35ab371e1ca10408c3caecdbd8a67' mock_execute.side_effect = self._mock_manager self.sw.put = MagicMock() self.sw._mixed_VC = True self.assertTrue(self.sw.install( pkg_set=['safecopy.tgz', 'safecopy.tgz', 'ftp://server/path/test.tgz'])) @patch('jnpr.junos.Device.execute') def test_sw_install_via_url(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertTrue(self.sw.install(package='ftp://server/path/test.tgz')) @patch('jnpr.junos.Device.execute') def test_sw_install_single_re_on_multi_re(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_RE = True self.assertTrue(self.sw.install('test.tgz', all_re=False, no_copy=True)) @patch('jnpr.junos.Device.execute') def test_sw_install_single_re(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_RE = False self.assertTrue(self.sw.install('test.tgz', no_copy=True)) @patch('jnpr.junos.Device.execute') def test_sw_install_srx_branch_cluster(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_RE = False self.sw._dev.facts['personality'] = 'SRX_BRANCH' self.sw._dev.facts['srx_cluster'] = True self.assertTrue(self.sw.install('test.tgz', no_copy=True)) @patch('jnpr.junos.Device.execute') def test_sw_install_no_package_result(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_RE = False self.assertTrue(self.sw.install('test_no_result.tgz', no_copy=True)) @patch('jnpr.junos.Device.execute') def test_sw_install_issu(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'test.tgz' self.assertTrue(self.sw.install(package, issu=True, no_copy=True)) @patch('jnpr.junos.Device.execute') def test_sw_install_nssu(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'test.tgz' self.assertTrue(self.sw.install(package, nssu=True, no_copy=True)) @patch('jnpr.junos.Device.execute') def test_sw_install_issu_nssu_both_error(self, mock_execute): mock_execute.side_effect = self._mock_manager try: self.sw.install('test.tgz', issu=True, nssu=True) except TypeError as ex: self.assertEqual( str(ex), 'install function can either take issu or nssu not both') @patch('jnpr.junos.Device.execute') def test_sw_install_issu_single_re_error(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_RE = False try: self.sw.install('test.tgz', issu=True) except TypeError as ex: self.assertEqual(str(ex), 'ISSU/NSSU requires Multi RE setup') @patch('jnpr.junos.Device.execute') def test_sw_install_issu_nssu_single_re_error(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'test.tgz' self.sw._multi_RE = False self.assertRaises(TypeError, self.sw.install, package, nssu=True, issu=True) @patch('jnpr.junos.Device.execute') def test_sw_pkgaddISSU(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'test.tgz' self.assertTrue(self.sw.pkgaddISSU(package)) @patch('jnpr.junos.Device.execute') def test_sw_pkgaddNSSU(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'test.tgz' self.assertTrue(self.sw.pkgaddNSSU(package)) @patch('jnpr.junos.Device.execute') def test_sw_pkgadd_pkg_set(self, mock_execute): mock_execute.side_effect = self._mock_manager pkg_set = ['abc.tgz', 'pqr.tgz'] self.sw._mixed_VC = True self.sw.pkgadd(pkg_set) self.assertEqual([i.text for i in mock_execute.call_args[0][0].findall('set')], pkg_set) @patch('jnpr.junos.Device.execute') def test_sw_validate(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertTrue(self.sw.validate('package.tgz')) @patch('jnpr.junos.Device.execute') def test_sw_validate_nssu(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw.log = MagicMock() # get_config returns false self.assertFalse(self.sw.validate('package.tgz', nssu=True)) self.sw.log.assert_called_with( 'Requirement FAILED: GRES is not Enabled in configuration') @patch('jnpr.junos.Device.execute') def test_sw_validate_issu(self, mock_execute): mock_execute.side_effect = self._mock_manager self.dev.rpc.get_config = MagicMock() self.assertTrue(self.sw.validate('package.tgz', issu=True)) @patch('jnpr.junos.Device.execute') def test_sw_val_issu_request_shell_execute_gres_on(self, mock_execute): mock_execute.side_effect = self._mock_manager self.dev.rpc.get_config = MagicMock() self.dev.rpc.request_shell_execute = MagicMock() self.dev.rpc.request_shell_execute.return_value = etree.fromstring( """<rpc-reply> <output>Graceful switchover: On</output> </rpc-reply>""") self.assertTrue(self.sw.validate('package.tgz', issu=True)) @patch('jnpr.junos.Device.execute') def test_sw_validate_issu_2re_false(self, mock_execute): mock_execute.side_effect = self._mock_manager self.dev.facts['2RE'] = False self.assertFalse(self.sw.validate('package.tgz', issu=True)) self.dev.facts['2RE'] = True @patch('paramiko.SSHClient') @patch('jnpr.junos.utils.start_shell.StartShell.wait_for') def test_sw_validate_issu_request_shell_execute(self, mock_ss, mock_ssh): self._issu_test_helper() with patch('jnpr.junos.utils.start_shell.StartShell.run') as ss: ss.return_value = (True, 'Graceful switchover: On') self.assertTrue(self.sw.validate('package.tgz', issu=True)) @patch('paramiko.SSHClient') @patch('jnpr.junos.utils.start_shell.StartShell.wait_for') def test_sw_validate_issu_ss_login_other_re_fail(self, mock_ss, mock_ssh): self._issu_test_helper() with patch('jnpr.junos.utils.start_shell.StartShell.run') as ss: ss.return_value = (False, 'Graceful switchover: On') self.assertFalse(self.sw.validate('package.tgz', issu=True)) self.sw.log.assert_called_with( 'Requirement FAILED: Not able run "show system switchover"') @patch('paramiko.SSHClient') @patch('jnpr.junos.utils.start_shell.StartShell.wait_for') def test_sw_validate_issu_ss_graceful_off(self, mock_ss, mock_ssh): self._issu_test_helper() with patch('jnpr.junos.utils.start_shell.StartShell.run') as ss: ss.return_value = (True, 'Graceful switchover: Off') self.assertFalse(self.sw.validate('package.tgz', issu=True)) self.sw.log.assert_called_with( 'Requirement FAILED: Graceful switchover status is not On') def _issu_test_helper(self): self.sw.log = MagicMock() self.dev.rpc.request_shell_execute = MagicMock() self.dev.rpc = MagicMock() self.dev.rpc.get_routing_task_replication_state.return_value = \ self._read_file('get-routing-task-replication-state.xml') self.dev.rpc.check_in_service_upgrade.return_value = \ self._read_file('check-in-service-upgrade.xml') self.dev.rpc.request_shell_execute.side_effect = \ RpcError(rsp='not ok') @patch('jnpr.junos.Device.execute') def test_sw_validate_issu_stateful_replication_off(self, mock_execute): mock_execute.side_effect = self._mock_manager self.dev.rpc.get_config = MagicMock() self.dev.rpc.get_routing_task_replication_state = MagicMock() self.sw.log = MagicMock() self.assertFalse(self.sw.validate('package.tgz', issu=True)) self.sw.log.assert_called_with( 'Requirement FAILED: Either Stateful Replication is not Enabled ' 'or RE mode\nis not Master') @patch('jnpr.junos.Device.execute') def test_sw_validate_issu_commit_sync_off(self, mock_execute): mock_execute.side_effect = self._mock_manager self.dev.rpc.get_config = MagicMock() self.dev.rpc.get_config.return_value = etree.fromstring(""" <configuration> <chassis> <redundancy> <graceful-switchover> </graceful-switchover> </redundancy> </chassis> </configuration>""") self.sw.log = MagicMock() self.assertFalse(self.sw.validate('package.tgz', issu=True)) self.sw.log.assert_called_with( 'Requirement FAILED: commit synchronize is not Enabled ' 'in configuration') @patch('jnpr.junos.Device.execute') def test_sw_validate_issu_nonstop_routing_off(self, mock_execute): mock_execute.side_effect = self._mock_manager self.dev.rpc.get_config = MagicMock() self.dev.rpc.get_config.side_effect = iter([etree.fromstring(""" <configuration> <chassis> <redundancy> <graceful-switchover> </graceful-switchover> </redundancy> </chassis> </configuration>"""), etree.fromstring(""" <configuration> <system> <commit> <synchronize/> </commit> </system> </configuration>"""), etree.fromstring("""<configuration> <routing-options></routing-options> </configuration>""")]) self.sw.log = MagicMock() self.assertFalse(self.sw.validate('package.tgz', issu=True)) self.sw.log.assert_called_with( 'Requirement FAILED: NSR is not Enabled in configuration') @patch('jnpr.junos.Device.execute') def test_sw_validate_issu_validation_succeeded(self, mock_execute): rpc_reply = """<rpc-reply><output>mgd: commit complete Validation succeeded </output> <package-result>1</package-result> </rpc-reply>""" mock_execute.side_effect = etree.fromstring(rpc_reply) package = 'package.tgz' self.assertFalse(self.sw.validate(package, issu=True)) @patch('jnpr.junos.Device.execute') def test_sw_remote_checksum_not_found(self, mock_execute): xml = '''<rpc-error> <error-severity>error</error-severity> <error-message> md5: /var/tmp/123: No such file or directory </error-message> </rpc-error>''' mock_execute.side_effect = RpcError(rsp=etree.fromstring(xml)) package = 'test.tgz' self.assertEqual(self.sw.remote_checksum(package), None) @patch('jnpr.junos.Device.execute') def test_sw_remote_checksum_not_rpc_error(self, mock_execute): xml = '''<rpc-error> <error-severity>error</error-severity> <error-message> something else! </error-message> </rpc-error>''' mock_execute.side_effect = RpcError(rsp=etree.fromstring(xml)) package = 'test.tgz' with self.assertRaises(RpcError): self.sw.remote_checksum(package) @patch('jnpr.junos.Device.execute') def test_sw_remote_checksum_md5(self, mock_execute): xml = '''<rpc-reply> <checksum-information> <file-checksum> <computation-method>MD5</computation-method> <input-file>/var/tmp/foo.tgz</input-file> <checksum>8a04cfc475e21507be5145bc0e82ce09</checksum> </file-checksum> </checksum-information> </rpc-reply>''' mock_execute.side_effect = etree.fromstring(xml) package = 'foo.tgz' self.assertEqual(self.sw.remote_checksum(package), '8a04cfc475e21507be5145bc0e82ce09') @patch('jnpr.junos.Device.execute') def test_sw_remote_checksum_sha1(self, mock_execute): xml = ''' <rpc-reply> <checksum-information> <file-checksum> <computation-method>SHA1</computation-method> <input-file>/var/tmp/foo.tgz</input-file> <checksum>33c12913e81599452270ee849511e2e7578db00c</checksum> </file-checksum> </checksum-information> </rpc-reply>''' mock_execute.side_effect = etree.fromstring(xml) package = 'foo.tgz' self.assertEqual(self.sw.remote_checksum(package, algorithm='sha1'), '33c12913e81599452270ee849511e2e7578db00c') @patch('jnpr.junos.Device.execute') def test_sw_remote_checksum_sha256(self, mock_execute): xml = ''' <rpc-reply> <checksum-information> <file-checksum> <computation-method>SHA256</computation-method> <input-file>/var/tmp/foo.tgz</input-file> <checksum>27bccf64babe4ea6687d3461e6d724d165aa140933e77b582af615dad4f02170</checksum> </file-checksum> </checksum-information> </rpc-reply>''' mock_execute.side_effect = etree.fromstring(xml) package = 'foo.tgz' self.assertEqual( self.sw.remote_checksum(package, algorithm='sha256'), '27bccf64babe4ea6687d3461e6d724d165aa140933e77b582af615dad4f02170') def test_sw_remote_checksum_unknown_alg(self): self.assertRaises(ValueError, self.sw.remote_checksum, 'foo.tgz', algorithm='foo') @patch('jnpr.junos.Device.execute') def test_sw_safe_copy(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'safecopy.tgz' self.sw.put = MagicMock() with patch('jnpr.junos.utils.sw.SW.local_md5'): self.assertTrue( self.sw.safe_copy(package, progress=self._myprogress, cleanfs=True, checksum='96a35ab371e1ca10408c3caecdbd8a67')) @patch('jnpr.junos.Device.execute') @patch('jnpr.junos.utils.sw.SW.local_checksum') def test_sw_safe_copy_missing_local_file(self, mock_checksum, mock_execute): mock_execute.side_effect = self._mock_manager mock_checksum.side_effect = IOError() package = 'foo.tgz' self.assertFalse(self.sw.safe_copy(package, progress=self._myprogress, cleanfs=True)) @patch('jnpr.junos.Device.execute') def test_sw_safe_copy_cleanfs_fail(self, mock_execute): mock_execute.side_effect = RpcError() package = 'foo.tgz' self.assertFalse(self.sw.safe_copy( package, progress=self._myprogress, cleanfs=True, checksum='96a35ab371e1ca10408c3caecdbd8a67')) @patch('jnpr.junos.Device.execute') def test_sw_safe_copy_return_false(self, mock_execute): # not passing checksum value, will get random from magicmock mock_execute.side_effect = self._mock_manager package = 'safecopy.tgz' self.sw.put = MagicMock() with patch('jnpr.junos.utils.sw.SW.local_md5'): self.assertFalse(self.sw.safe_copy(package, progress=self._myprogress, cleanfs=True)) @patch('jnpr.junos.Device.execute') def test_sw_safe_copy_checksum_none(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'safecopy.tgz' self.sw.put = MagicMock() with patch('jnpr.junos.utils.sw.SW.local_md5', MagicMock(return_value='96a35ab371e1ca10408c3caecdbd8a67')): self.assertTrue(self.sw.safe_copy(package, progress=self._myprogress, cleanfs=True)) @patch('jnpr.junos.Device.execute') def test_sw_safe_install(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'install.tgz' self.sw.put = MagicMock() with patch('jnpr.junos.utils.sw.SW.local_md5', MagicMock(return_value='96a35ab371e1ca10408c3caecdbd8a67')): self.assertTrue( self.sw.install( package, progress=self._myprogress, cleanfs=True)) @patch('jnpr.junos.utils.sw.SW.safe_copy') def test_sw_safe_install_copy_fail(self, mock_copy): mock_copy.return_value = False self.assertFalse(self.sw.install('file')) @patch('jnpr.junos.utils.sw.SW.validate') def test_sw_install_validate(self, mock_validate): mock_validate.return_value = False self.assertFalse(self.sw.install('file', validate=True, no_copy=True)) @patch(builtin_string + '.print') @patch('jnpr.junos.utils.sw.SW.pkgadd') def test_sw_install_multi_mx(self, mock_pkgadd, mock_print): mock_pkgadd.return_value = True self.sw._multi_RE = True self.sw._multi_MX = True self.assertTrue(self.sw.install('file', no_copy=True, progress=True)) @patch('jnpr.junos.utils.sw.SW.pkgadd') def test_sw_install_multi_vc(self, mock_pkgadd): mock_pkgadd.return_value = True self.sw._multi_RE = True self.sw._multi_VC = True self.sw._RE_list = ('version_RE0', 'version_RE1') self.assertTrue(self.sw.install('file', no_copy=True)) @patch('jnpr.junos.utils.sw.SW.pkgadd') def test_sw_install_mixed_vc(self, mock_pkgadd): mock_pkgadd.return_value = True self.sw._mixed_VC = True self.sw._RE_list = ('version_RE0', 'version_RE1') self.assertTrue(self.sw.install(pkg_set=['abc.tgz', 'pqr.tgz'], no_copy=True)) @patch('jnpr.junos.utils.sw.SW.pkgadd') def test_sw_install_multi_vc_mode_disabled(self, mock_pkgadd): mock_pkgadd.return_value = True self.dev._facts = {'2RE': True, 'domain': None, 'RE1': { 'status': 'OK', 'model': 'RE-EX8208', 'mastership_state': 'backup'}, 'ifd_style': 'SWITCH', 'version_RE1': '12.3R7.7', 'version_RE0': '12.3', 'serialnumber': 'XXXXXX', 'fqdn': 'XXXXXX', 'RE0': {'status': 'OK', 'model': 'RE-EX8208', 'mastership_state': 'master'}, 'switch_style': 'VLAN', 'version': '12.3R5-S3.1', 'master': 'RE0', 'hostname': 'XXXXXX', 'HOME': '/var/home/sn', 'vc_mode': 'Disabled', 'model': 'EX8208', 'vc_capable': True, 'personality': 'SWITCH'} sw = self.get_sw() sw.install(package='abc.tgz', no_copy=True) self.assertFalse(sw._multi_VC) calls = [call('/var/tmp/abc.tgz', dev_timeout=1800, vmhost=False, re0=True), call('/var/tmp/abc.tgz', dev_timeout=1800, re1=True, vmhost=False)] mock_pkgadd.assert_has_calls(calls) @patch('jnpr.junos.utils.sw.SW.pkgadd') def test_sw_install_mixed_vc_with_copy(self, mock_pkgadd): mock_pkgadd.return_value = True self.sw._mixed_VC = True self.sw.put = MagicMock() self.sw.remote_checksum = MagicMock( return_value='d41d8cd98f00b204e9800998ecf8427e') self.sw._RE_list = ('version_RE0', 'version_RE1') with patch('jnpr.junos.utils.sw.SW.local_md5', MagicMock(return_value='d41d8cd98f00b204e9800998ecf8427e')): self.assertTrue( self.sw.install( pkg_set=[ 'install.tgz', 'install.tgz'], cleanfs=False)) @patch('jnpr.junos.utils.sw.SW.pkgadd') def test_sw_install_mixed_vc_safe_copy_false(self, mock_pkgadd): mock_pkgadd.return_value = True self.sw._mixed_VC = True self.sw.safe_copy = MagicMock(return_value=False) self.sw.remote_checksum = MagicMock( return_value='d41d8cd98f00b204e9800998ecf8427e') self.sw._RE_list = ('version_RE0', 'version_RE1') with patch('jnpr.junos.utils.sw.SW.local_md5', MagicMock(return_value='d41d8cd98f00b204e9800998ecf8427e')): self.assertFalse( self.sw.install( pkg_set=[ 'install.tgz', 'install.tgz'], cleanfs=False)) @patch('jnpr.junos.utils.sw.SW.pkgadd') def test_sw_install_mixed_vc_ValueError(self, mock_pkgadd): mock_pkgadd.return_value = True self.sw._mixed_VC = True self.sw.remote_checksum = MagicMock( return_value='d41d8cd98f00b204e9800998ecf8427e') self.sw._RE_list = ('version_RE0', 'version_RE1') with patch('jnpr.junos.utils.sw.SW.local_md5', MagicMock(return_value='d41d8cd98f00b204e9800998ecf8427e')): self.assertRaises( ValueError, self.sw.install, pkg_set='install.tgz', cleanfs=False) @patch('jnpr.junos.utils.sw.SW.pkgadd') def test_sw_install_mixed_vc_TypeError(self, mock_pkgadd): self.assertRaises(TypeError, self.sw.install, cleanfs=False) @patch('jnpr.junos.Device.execute') def test_sw_install_vmhost(self, mock_execute): mock_execute.side_effect = self._mock_manager package = 'test.tgz' self.assertTrue(self.sw.install(package, no_copy=True, vmhost=True)) @patch('jnpr.junos.Device.execute') def test_sw_install_kwargs_force_host(self, mock_execute): self.sw.install('file', no_copy=True, force_host=True) rpc = [ '<request-package-add><force-host/><no-validate/><re1/><package-name>/var/tmp/file</package-name></request-package-add>', '<request-package-add><package-name>/var/tmp/file</package-name><no-validate/><force-host/><re1/></request-package-add>', '<request-package-add><package-name>/var/tmp/file</package-name><no-validate/><re1/><force-host/></request-package-add>', '<request-package-add><force-host/><no-validate/><package-name>/var/tmp/file</package-name><re1/></request-package-add>', '<request-package-add><force-host/><re1/><no-validate/><package-name>/var/tmp/file</package-name></request-package-add>', '<request-package-add><no-validate/><re1/><package-name>/var/tmp/file</package-name><force-host/></request-package-add>', '<request-package-add><no-validate/><package-name>/var/tmp/file</package-name><force-host/><re1/></request-package-add>', '<request-package-add><force-host/><package-name>/var/tmp/file</package-name><no-validate/><re1/></request-package-add>', '<request-package-add><re1/><no-validate/><package-name>/var/tmp/file</package-name><force-host/></request-package-add>', '<request-package-add><re1/><force-host/><package-name>/var/tmp/file</package-name><no-validate/></request-package-add>', '<request-package-add><re1/><package-name>/var/tmp/file</package-name><force-host/><no-validate/></request-package-add>', '<request-package-add><re1/><force-host/><no-validate/><package-name>/var/tmp/file</package-name></request-package-add>', '<request-package-add><no-validate/><force-host/><re1/><package-name>/var/tmp/file</package-name></request-package-add>', '<request-package-add><package-name>/var/tmp/file</package-name><force-host/><no-validate/><re1/></request-package-add>', '<request-package-add><no-validate/><re1/><force-host/><package-name>/var/tmp/file</package-name></request-package-add>', '<request-package-add><package-name>/var/tmp/file</package-name><force-host/><re1/><no-validate/></request-package-add>', '<request-package-add><no-validate/><force-host/><package-name>/var/tmp/file</package-name><re1/></request-package-add>', '<request-package-add><force-host/><no-validate/><package-name>/var/tmp/file</package-name></request-package-add>', '<request-package-add><force-host/><package-name>/var/tmp/file</package-name><no-validate/></request-package-add>', '<request-package-add><package-name>/var/tmp/file</package-name><no-validate/><force-host/></request-package-add>', '<request-package-add><no-validate/><force-host/><package-name>/var/tmp/file</package-name></request-package-add>', '<request-package-add><no-validate/><package-name>/var/tmp/file</package-name><force-host/></request-package-add>', '<request-package-add><package-name>/var/tmp/file</package-name><force-host/><no-validate/></request-package-add>', '<request-package-add><package-name>/var/tmp/file</package-name><re1/><no-validate/><force-host/></request-package-add>', '<request-package-add><package-name>/var/tmp/file</package-name><re1/><force-host/><no-validate/></request-package-add>', '<request-package-add><force-host/><package-name>/var/tmp/file</package-name><re1/><no-validate/></request-package-add>', '<request-package-add><re1/><package-name>/var/tmp/file</package-name><no-validate/><force-host/></request-package-add>', '<request-package-add><no-validate/><package-name>/var/tmp/file</package-name><re1/><force-host/></request-package-add>', '<request-package-add><re1/><no-validate/><force-host/><package-name>/var/tmp/file</package-name></request-package-add>', '<request-package-add><force-host/><re1/><package-name>/var/tmp/file</package-name><no-validate/></request-package-add>'] self.assertTrue(etree.tostring( mock_execute.call_args[0][0]).decode('utf-8') in rpc) @patch('jnpr.junos.Device.execute') def test_sw_rollback(self, mock_execute): rsp = '<rpc-reply><output>junos-vsrx-12.1X46-D30.2-domestic will ' \ 'become active at next reboot</output></rpc-reply>' mock_execute.side_effect = etree.XML(rsp) msg = 'junos-vsrx-12.1X46-D30.2-domestic will become active ' \ 'at next reboot' self.assertEqual(self.sw.rollback(), msg) @patch('jnpr.junos.Device.execute') def test_sw_rollback_multi(self, mock_execute): mock_execute.side_effect = self._mock_manager msg = {'fpc1': "Junos version 'D10.2' will become active at next reboot", 'fpc0': 'JUNOS version "D10.2" will become active at next reboot'} self.assertEqual(eval(self.sw.rollback()), msg) @patch('jnpr.junos.Device.execute') def test_sw_rollback_multi_exception(self, mock_execute): fname = 'request-package-rollback-multi-error.xml' mock_execute.side_effect = self._read_file(fname) self.assertRaises(SwRollbackError, self.sw.rollback) @patch('jnpr.junos.Device.execute') def test_sw_rollback_exception(self, mock_execute): rsp = '<rpc-reply><output>WARNING: Cannot rollback, ' \ '/packages/junos.old is not valid</output></rpc-reply>' mock_execute.side_effect = etree.XML(rsp) self.assertRaises(SwRollbackError, self.sw.rollback) def test_sw_inventory(self): self.sw.dev.rpc.file_list = \ MagicMock(side_effect=self._mock_manager) self.assertEqual( self.sw.inventory, { 'current': None, 'rollback': None}) @patch('jnpr.junos.Device.execute') def test_sw_reboot(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_MX = True self.assertTrue('Shutdown NOW' in self.sw.reboot()) @patch('jnpr.junos.Device.execute') def test_sw_reboot_at(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertTrue('Shutdown at' in self.sw.reboot(at='201407091815')) @patch('jnpr.junos.Device.execute') def test_sw_reboot_multi_re_vc(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_RE = True self.sw._multi_VC = False self.assertTrue('Shutdown NOW' in self.sw.reboot()) @patch('jnpr.junos.Device.execute') def test_sw_reboot_mixed_vc(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._mixed_VC = True self.sw._multi_VC = True self.sw.reboot() self.assertTrue('all-members' in (etree.tostring( mock_execute.call_args[0][0]).decode('utf-8'))) @patch('jnpr.junos.Device.execute') def test_sw_reboot_mixed_vc_all_re_false(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._mixed_VC = True self.sw._multi_VC = True self.sw.reboot(all_re=False) self.assertTrue('all-members' not in (etree.tostring( mock_execute.call_args[0][0]).decode('utf-8'))) @patch('jnpr.junos.Device.execute') def test_sw_reboot_exception(self, mock_execute): rsp = etree.XML('<rpc-reply><a>test</a></rpc-reply>') mock_execute.side_effect = RpcError(rsp=rsp) self.assertRaises(Exception, self.sw.reboot) @patch('jnpr.junos.Device.execute') def test_sw_reboot_exception_RpcTimeoutError(self, mock_execute): rsp = (self.dev, 'request-reboot', 60) mock_execute.side_effect = RpcTimeoutError(*rsp) self.assertRaises(Exception, self.sw.reboot) @patch('jnpr.junos.Device.execute') def test_sw_poweroff(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_MX = True self.assertTrue('Shutdown NOW' in self.sw.poweroff()) @patch('jnpr.junos.Device.execute') def test_sw_poweroff_exception(self, mock_execute): rsp = etree.XML('<rpc-reply><a>test</a></rpc-reply>') mock_execute.side_effect = RpcError(rsp=rsp) self.assertRaises(Exception, self.sw.poweroff) @patch('jnpr.junos.Device.execute') def test_sw_poweroff_multi_re_vc(self, mock_execute): mock_execute.side_effect = self._mock_manager self.sw._multi_RE = True self.sw._multi_VC = False self.assertTrue('Shutdown NOW' in self.sw.poweroff()) def _myprogress(self, dev, report): pass def _my_scp_progress(self, _path, _total, _xfrd): print (_path, _total, _xfrd) @contextmanager def capture(self, command, *args, **kwargs): out, sys.stdout = sys.stdout, StringIO() command(*args, **kwargs) sys.stdout.seek(0) yield sys.stdout.read() sys.stdout = out def _read_file(self, fname): from ncclient.xml_ import NCElement fpath = os.path.join(os.path.dirname(__file__), 'rpc-reply', fname) foo = open(fpath).read() rpc_reply = NCElement( foo, self.dev._conn._device_handler.transform_reply() )._NCElement__doc[0] return rpc_reply def _mock_manager(self, *args, **kwargs): if kwargs: # Little hack for mocked execute if 'dev_timeout' in kwargs: if (args and args[0].findtext('package-name') == '/var/tmp/test_no_result.tgz'): return self._read_file(args[0].tag + '.no_result.xml') else: return self._read_file(args[0].tag + '.xml') if 'path' in kwargs: if kwargs['path'] == '/packages': return self._read_file('file-list_dir.xml') device_params = kwargs['device_params'] device_handler = make_device_handler(device_params) session = SSHSession(device_handler) return Manager(session, device_handler) elif args: if args[0].find('at') is not None: return self._read_file('request-reboot-at.xml') else: return self._read_file(args[0].tag + '.xml') if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestSW) unittest.TextTestRunner(verbosity=2).run(suite)
pklimai/py-junos-eznc
tests/unit/utils/test_sw.py
Python
apache-2.0
40,404
[ "Firefly" ]
312a85894f1bf4344b29761caad7249b93ee1c750ca241f1d2dfbd798bfb933a
import ocl import camvtk import time import vtk import datetime import math def drawLoops(myscreen, loops, loopcolor): nloop = 0 for lop in loops: n = 0 N = len(lop) first_point=ocl.Point(-1,-1,5) previous=ocl.Point(-1,-1,5) for p in lop: if n==0: # don't draw anything on the first iteration previous=p first_point = p elif n== (N-1): # the last point myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) ) # the normal line # and a line from p to the first point myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopcolor) ) else: myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) ) previous=p n=n+1 print("rendered loop ",nloop, " with ", len(lop), " points") if len(lop) == 2: for p in lop: print(p) myscreen.addActor( camvtk.Sphere(center=(p.x,p.y,p.z),radius=0.1,color=camvtk.green) ) nloop = nloop+1 if __name__ == "__main__": print(ocl.version()) myscreen = camvtk.VTKScreen() #stl = camvtk.STLSurf("../../stl/demo.stl") #stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl") #stl = camvtk.STLSurf("../../stl/porche.stl") #stl = camvtk.STLSurf("../../stl/ktoolcav.stl") #stl = camvtk.STLSurf("../../stl/ktoolcor.stl") stl = camvtk.STLSurf("../../stl/sphere_cutout.stl") #myscreen.addActor(stl) #stl.SetWireframe() # render tux as wireframe #stl.SetSurface() # render tux as surface #stl.SetColor(camvtk.cyan) polydata = stl.src.GetOutput() # get polydata from vtk-surface s = ocl.STLSurf() camvtk.vtkPolyData2OCLSTL(polydata, s) #put triangles on ocl-surface #s.rotate(-math.pi/2,math.pi,0) stl2 = camvtk.STLSurf(triangleList= s.getTriangles() ) myscreen.addActor(stl2) stl2.SetSurface() stl2.SetColor(camvtk.cyan) print("STL surface read,", s.size(), "triangles") zh=-0.5 zheights=[ -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, -0.05] # for cavity zheights=[ -0.1, 0.0, 0.1, 0.2, 0.3, 0.4 , 0.5, 0.6, 0.7] # for core zheights=[ 10, 20, 30, 40, 50, 60] # for waterline1.stl diam = 6 length = 100 loops = [] #cutter = ocl.CylCutter( diam , length ) cutter = ocl.BallCutter( diam , length ) #cutter = ocl.BullCutter( diam , diam/5, length ) """ wl = ocl.Waterline() wl.setSTL(s) wl.setCutter(cutter) wl.setZ(zh) wl.setSampling(0.02) #wl.setThreads(5) t_before = time.time() wl.run2() t_after = time.time() calctime = t_after-t_before print(" Waterline done in ", calctime," s") cutter_loops = wl.getLoops() for l in cutter_loops: loops.append(l) """ sampling=1 minSampling=0.1 aloops = [] for zh in zheights: awl = ocl.AdaptiveWaterline() awl.setSTL(s) awl.setCutter(cutter) awl.setZ(zh) awl.setSampling(sampling) awl.setMinSampling(minSampling) #wl.setThreads(5) t_before = time.time() awl.run() t_after = time.time() calctime = t_after-t_before print(" AdaptiveWaterline done in ", calctime," s") acutter_loops = awl.getLoops() for l in acutter_loops: aloops.append(l) drawLoops(myscreen, aloops, camvtk.red) print("done.") myscreen.camera.SetPosition(185, 153, 167) myscreen.camera.SetFocalPoint(5, 5, 0) camvtk.drawArrows(myscreen,center=(0,-4,0)) camvtk.drawOCLtext(myscreen) myscreen.render() myscreen.iren.Start() #raw_input("Press Enter to terminate")
aewallin/opencamlib
examples/python/waterline/waterline_7_cavity.py
Python
lgpl-2.1
3,896
[ "VTK" ]
c0e42dc951bf1689c350760c65aa5479a5ff53fd813bc6183c816648034a0891
# -*- coding: utf-8 -*- """ http://www.astroml.org/sklearn_tutorial/dimensionality_reduction.html """ print (__doc__) import numpy as np import copy from sklearn.cluster import k_means from sklearn.manifold import spectral_embedding from sklearn.utils import check_random_state import nslkdd.preprocessing as preprocessing import sugarbee.reduction as reduction import sugarbee.distance as distance import sugarbee.affinity as affinity #def assign_undirected_weight(W, i, j, v): # W[i,j] = W[j,i] = v if __name__ == '__main__': df, headers, gmms = preprocessing.get_preprocessed_data() df = df[0:10] df_train = copy.deepcopy(df) df_train.drop('attack',1,inplace=True) df_train.drop('difficulty',1,inplace=True) proj = reduction.gmm_reduction(df_train, headers, gmms) # A = affinity.get_affinity_matrix(proj, metric_method=distance.dist, metric_param='euclidean', knn=3) # A = affinity.get_affinity_matrix(proj, metric_method=distance.dist, metric_param='manhattan', knn=2) # A = affinity.get_affinity_matrix(proj, metric_method=distance.gaussian, knn=5) A = affinity.get_affinity_matrix(proj, metric_method=distance.cosdist, knn=5) D = affinity.get_degree_matrix(A) print A print D W = A D[D == 0] = 1e-8 #don't laugh, there is a core package in R actually does this D_hat = D**(-0.5) L = D_hat * (D - W) * D_hat U, S, V = np.linalg.svd(L, full_matrices=True) target = np.argmax(np.absolute(np.diff(S))) print V.transpose()[:,target+1]
zedoul/AnomalyDetection
test_discretization/test_affinity.py
Python
mit
1,535
[ "Gaussian" ]
6a3348e132511bfa91180ba504637c7210a6cb22372aac28670232d3a87c0c4d
""" IO for ADF files. """ import os import re from monty.io import reverse_readline from monty.itertools import chunks from monty.json import MSONable from pymatgen.core.structure import Molecule __author__ = "Xin Chen, chenxin13@mails.tsinghua.edu.cn" def is_numeric(s): """ Return True is the string ``s`` is a numeric string. Parameters ---------- s : str A string. Returns ------- res : bool If True, ``s`` is a numeric string and can be converted to an int or a float. Otherwise False will be returned. """ try: float(s) except ValueError: return False else: return True def iterlines(s): r""" A generator form of s.split('\n') for reducing memory overhead. Parameters ---------- s : str A multi-line string. Yields ------ line : str A string. """ prevnl = -1 while True: nextnl = s.find("\n", prevnl + 1) if nextnl < 0: yield s[(prevnl + 1) :] break yield s[(prevnl + 1) : nextnl] prevnl = nextnl class AdfInputError(Exception): """ The default error class for ADF. """ pass class AdfOutputError(Exception): """ The default error class for errors raised by ``AdfOutput``. """ pass class AdfKey(MSONable): """ The basic input unit for ADF. A key is a string of characters that does not contain a delimiter (blank, comma or equal sign). A key may have multiple subkeys and a set of options. """ block_keys = { "SCF", "GEOMETRY", "XC", "UNITS", "ATOMS", "CHARGE", "BASIS", "SYMMETRY", "RELATIVISTIC", "OCCUPATIONS", "SAVE", "A1FIT", "INTEGRATION", "UNRESTRICTED", "ZLMFIT", "TITLE", "EXACTDENSITY", "TOTALENERGY", "ANALYTICALFREQ", } sub_keys = {"AtomDepQuality"} # Full blocks are blocks that must have an 'END'. _full_blocks = {"GEOMETRY", "SCF", "UNITS", "BASIS", "ANALYTICALFREQ"} def __init__(self, name, options=None, subkeys=None): """ Initialization method. Parameters ---------- name : str The name of this key. options : Sized The options for this key. Each element can be a primitive object or a tuple/list with two elements: the first is the name and the second is a primitive object. subkeys : Sized The subkeys for this key. Raises ------ ValueError If elements in ``subkeys`` are not ``AdfKey`` objects. """ self.name = name self.options = options if options is not None else [] self.subkeys = subkeys if subkeys is not None else [] if len(self.subkeys) > 0: for k in subkeys: if not isinstance(k, AdfKey): raise ValueError("Not all subkeys are ``AdfKey`` objects!") self._sized_op = None if len(self.options) > 0: self._sized_op = isinstance(self.options[0], (list, tuple)) def _options_string(self): """ Return the option string. """ if len(self.options) > 0: s = "" for op in self.options: if self._sized_op: s += "{:s}={:s} ".format(*map(str, op)) else: s += f"{str(op):s} " return s.strip() return "" def is_block_key(self): """ Return True if this key is a block key. """ return bool(self.name.upper() in self.block_keys) @property def key(self): """ Return the name of this key. If this is a block key, the name will be converted to upper cases. """ if self.is_block_key(): return self.name.upper() return self.name def __str__(self): """ Return the string representation of this ``AdfKey``. Notes ----- If this key is 'Atoms' and the coordinates are in Cartesian form, a different string format will be used. """ s = f"{self.key:s}" if len(self.options) > 0: s += f" {self._options_string():s}" s += "\n" if len(self.subkeys) > 0: if self.key.lower() == "atoms": for subkey in self.subkeys: s += "{:2s} {: 14.8f} {: 14.8f} {: 14.8f}\n".format(subkey.name, *subkey.options) else: for subkey in self.subkeys: s += str(subkey) if self.is_block_key(): s += "END\n" else: s += "subend\n" elif self.key.upper() in self._full_blocks: s += "END\n" return s def __eq__(self, other): if not isinstance(other, AdfKey): return False return str(self) == str(other) def has_subkey(self, subkey): """ Return True if this AdfKey contains the given subkey. Parameters ---------- subkey : str or AdfKey A key name or an AdfKey object. Returns ------- has : bool True if this key contains the given key. Otherwise False. """ if isinstance(subkey, str): key = subkey elif isinstance(subkey, AdfKey): key = subkey.key else: raise ValueError("The subkey should be an AdfKey or a string!") if len(self.subkeys) > 0: if key in map(lambda k: k.key, self.subkeys): return True return False def add_subkey(self, subkey): """ Add a new subkey to this key. Parameters ---------- subkey : AdfKey A new subkey. Notes ----- Duplicate check will not be performed if this is an 'Atoms' block. """ if self.key.lower() == "atoms" or not self.has_subkey(subkey): self.subkeys.append(subkey) def remove_subkey(self, subkey): """ Remove the given subkey, if existed, from this AdfKey. Parameters ---------- subkey : str or AdfKey The subkey to remove. """ if len(self.subkeys) > 0: key = subkey if isinstance(subkey, str) else subkey.key for i, v in enumerate(self.subkeys): if v.key == key: self.subkeys.pop(i) break def add_option(self, option): """ Add a new option to this key. Parameters ---------- option : Sized or str or int or float A new option to add. This must have the same format with exsiting options. Raises ------ TypeError If the format of the given ``option`` is different. """ if len(self.options) == 0: self.options.append(option) else: sized_op = isinstance(option, (list, tuple)) if self._sized_op != sized_op: raise TypeError("Option type is mismatched!") self.options.append(option) def remove_option(self, option): """ Remove an option. Parameters ---------- option : str or int The name (str) or index (int) of the option to remove. Raises ------ TypeError If the option has a wrong type. """ if len(self.options) > 0: if self._sized_op: if not isinstance(option, str): raise TypeError("``option`` should be a name string!") for i, v in enumerate(self.options): if v[0] == option: self.options.pop(i) break else: if not isinstance(option, int): raise TypeError("``option`` should be an integer index!") self.options.pop(option) def has_option(self, option): """ Return True if the option is included in this key. Parameters ---------- option : str The option. Returns ------- has : bool True if the option can be found. Otherwise False will be returned. """ if len(self.options) == 0: return False for op in self.options: if (self._sized_op and op[0] == option) or (op == option): return True return False def as_dict(self): """ A JSON serializable dict representation of self. """ d = { "@module": self.__class__.__module__, "@class": self.__class__.__name__, "name": self.name, "options": self.options, } if len(self.subkeys) > 0: subkeys = [] for subkey in self.subkeys: subkeys.append(subkey.as_dict()) d.update({"subkeys": subkeys}) return d def to_json(self): """ Return a json string representation of the MSONable AdfKey object. """ return super().to_json() @classmethod def from_dict(cls, d): """ Construct a MSONable AdfKey object from the JSON dict. Parameters ---------- d : dict A dict of saved attributes. Returns ------- adfkey : AdfKey An AdfKey object recovered from the JSON dict ``d``. """ key = d.get("name") options = d.get("options", None) subkey_list = d.get("subkeys", []) if len(subkey_list) > 0: subkeys = list(map(lambda k: AdfKey.from_dict(k), subkey_list)) else: subkeys = None return cls(key, options, subkeys) @staticmethod def from_string(string): """ Construct an AdfKey object from the string. Parameters ---------- string : str A string. Returns ------- adfkey : AdfKey An AdfKey object recovered from the string. Raises ------ ValueError Currently nested subkeys are not supported. If ``subend`` was found a ValueError would be raised. Notes ----- Only the first block key will be returned. """ def is_float(s): return "." in s or "E" in s or "e" in s if string.find("\n") == -1: el = string.split() if len(el) > 1: if string.find("=") != -1: options = list(map(lambda s: s.split("="), el[1:])) else: options = el[1:] for i, op in enumerate(options): if isinstance(op, list) and is_numeric(op[1]): op[1] = float(op[1]) if is_float(op[1]) else int(op[1]) elif is_numeric(op): options[i] = float(op) if is_float(op) else int(op) else: options = None return AdfKey(el[0], options) if string.find("subend") != -1: raise ValueError("Nested subkeys are not supported!") key = None for line in iterlines(string): if line == "": continue el = line.strip().split() if len(el) == 0: continue if el[0].upper() in AdfKey.block_keys: if key is None: key = AdfKey.from_string(line) else: return key elif el[0].upper() == "END": return key elif key is not None: key.add_subkey(AdfKey.from_string(line)) else: raise Exception("IncompleteKey: 'END' is missing!") class AdfTask(MSONable): """ Basic task for ADF. All settings in this class are independent of molecules. Notes ----- Unlike other quantum chemistry packages (NWChem, Gaussian, ...), ADF does not support calculating force/gradient. """ operations = { "energy": "Evaluate the single point energy.", "optimize": "Minimize the energy by varying the molecular structure.", "frequencies": "Compute second derivatives and print out an analysis of molecular vibrations.", "freq": "Same as frequencies.", "numerical_frequencies": "Compute molecular frequencies using numerical method.", } def __init__( self, operation="energy", basis_set=None, xc=None, title="ADF_RUN", units=None, geo_subkeys=None, scf=None, other_directives=None, ): """ Initialization method. Parameters ---------- operation : str The target operation. basis_set : AdfKey The basis set definitions for this task. Defaults to 'DZ/Large'. xc : AdfKey The exchange-correlation functionals. Defaults to PBE. title : str The title of this ADF task. units : AdfKey The units. Defaults to Angstroms/Degree. geo_subkeys : Sized The subkeys for the block key 'GEOMETRY'. scf : AdfKey The scf options. other_directives : Sized User-defined directives. """ if operation not in self.operations.keys(): raise AdfInputError(f"Invalid ADF task {operation:s}") self.operation = operation self.title = title self.basis_set = basis_set if basis_set is not None else self.get_default_basis_set() self.xc = xc if xc is not None else self.get_default_xc() self.units = units if units is not None else self.get_default_units() self.scf = scf if scf is not None else self.get_default_scf() self.other_directives = other_directives if other_directives is not None else [] self._setup_task(geo_subkeys) @staticmethod def get_default_basis_set(): """ Returns: Default basis set """ return AdfKey.from_string("Basis\ntype DZ\ncore small\nEND") @staticmethod def get_default_scf(): """ Returns: ADF using default SCF. """ return AdfKey.from_string("SCF\niterations 300\nEND") @staticmethod def get_default_geo(): """ Returns: ADFKey using default geometry. """ return AdfKey.from_string("GEOMETRY SinglePoint\nEND") @staticmethod def get_default_xc(): """ Returns: ADFKey using default XC. """ return AdfKey.from_string("XC\nGGA PBE\nEND") @staticmethod def get_default_units(): """ Returns: Default units. """ return AdfKey.from_string("Units\nlength angstrom\nangle degree\nEnd") def _setup_task(self, geo_subkeys): """ Setup the block 'Geometry' given subkeys and the task. Parameters ---------- geo_subkeys : Sized User-defined subkeys for the block 'Geometry'. Notes ----- Most of the run types of ADF are specified in the Geometry block except the 'AnalyticFreq'. """ self.geo = AdfKey("Geometry", subkeys=geo_subkeys) if self.operation.lower() == "energy": self.geo.add_option("SinglePoint") if self.geo.has_subkey("Frequencies"): self.geo.remove_subkey("Frequencies") elif self.operation.lower() == "optimize": self.geo.add_option("GeometryOptimization") if self.geo.has_subkey("Frequencies"): self.geo.remove_subkey("Frequencies") elif self.operation.lower() == "numerical_frequencies": self.geo.add_subkey(AdfKey("Frequencies")) else: self.other_directives.append(AdfKey("AnalyticalFreq")) if self.geo.has_subkey("Frequencies"): self.geo.remove_subkey("Frequencies") def __str__(self): s = """TITLE {title}\n {units} {xc} {basis_set} {scf} {geo}""".format( title=self.title, units=str(self.units), xc=str(self.xc), basis_set=str(self.basis_set), scf=str(self.scf), geo=str(self.geo), ) s += "\n" for block_key in self.other_directives: if not isinstance(block_key, AdfKey): raise ValueError(f"{str(block_key)} is not an AdfKey!") s += str(block_key) + "\n" return s def as_dict(self): """ A JSON serializable dict representation of self. """ return { "@module": self.__class__.__module__, "@class": self.__class__.__name__, "operation": self.operation, "title": self.title, "xc": self.xc.as_dict(), "basis_set": self.basis_set.as_dict(), "units": self.units.as_dict(), "scf": self.scf.as_dict(), "geo": self.geo.as_dict(), "others": [k.as_dict() for k in self.other_directives], } def to_json(self): """ Return a json string representation of the MSONable AdfTask object. """ return super().to_json() @classmethod def from_dict(cls, d): """ Construct a MSONable AdfTask object from the JSON dict. Parameters ---------- d : dict A dict of saved attributes. Returns ------- task : AdfTask An AdfTask object recovered from the JSON dict ``d``. """ def _from_dict(_d): return AdfKey.from_dict(_d) if _d is not None else None operation = d.get("operation") title = d.get("title") basis_set = _from_dict(d.get("basis_set")) xc = _from_dict(d.get("xc")) units = _from_dict(d.get("units")) scf = _from_dict(d.get("scf")) others = [AdfKey.from_dict(o) for o in d.get("others", [])] geo = _from_dict(d.get("geo")) return cls(operation, basis_set, xc, title, units, geo.subkeys, scf, others) class AdfInput: """ A basic ADF input file writer. """ def __init__(self, task): """ Initialization method. Parameters ---------- task : AdfTask An ADF task. """ self.task = task def write_file(self, molecule, inpfile): """ Write an ADF input file. Parameters ---------- molecule : Molecule The molecule for this task. inpfile : str The name where the input file will be saved. """ mol_blocks = [] atom_block = AdfKey("Atoms", options=["cartesian"]) for site in molecule: atom_block.add_subkey(AdfKey(str(site.specie), list(site.coords))) mol_blocks.append(atom_block) if molecule.charge != 0: netq = molecule.charge ab = molecule.spin_multiplicity - 1 charge_block = AdfKey("Charge", [netq, ab]) mol_blocks.append(charge_block) if ab != 0: unres_block = AdfKey("Unrestricted") mol_blocks.append(unres_block) with open(inpfile, "w+") as f: for block in mol_blocks: f.write(str(block) + "\n") f.write(str(self.task) + "\n") f.write("END INPUT") class AdfOutput: """ A basic ADF output file parser. Attributes ---------- is_failed : bool True is the ADF job is terminated without success. Otherwise False. is_internal_crash : bool True if the job is terminated with internal crash. Please read 'TAPE13' of the ADF manual for more detail. error : str The error description. run_type : str The RunType of this ADF job. Possible options are: 'SinglePoint', 'GeometryOptimization', 'AnalyticalFreq' and 'NUmericalFreq'. final_energy : float The final molecule energy (a.u). final_structure : GMolecule The final structure of the molecule. energies : Sized The energy of each cycle. structures : Sized The structure of each cycle If geometry optimization is performed. frequencies : array_like The frequencies of the molecule. normal_modes : array_like The normal modes of the molecule. freq_type : str Either 'Analytical' or 'Numerical'. """ def __init__(self, filename): """ Initialization method. Parameters ---------- filename : str The ADF output file to parse. """ self.filename = filename self._parse() def _parse(self): """ Parse the ADF outputs. There are two files: one is 'logfile', the other is the ADF output file. The final energy and structures are parsed from the 'logfile'. Frequencies and normal modes are parsed from the ADF output file. """ workdir = os.path.dirname(self.filename) logfile = os.path.join(workdir, "logfile") if not os.path.isfile(logfile): raise OSError("The ADF logfile can not be accessed!") self.is_failed = False self.error = None self.final_energy = None self.final_structure = None self.energies = [] self.structures = [] self.frequencies = [] self.normal_modes = None self.freq_type = None self.run_type = None self.is_internal_crash = False self._parse_logfile(logfile) if not self.is_failed and self.run_type != "SinglePoint": self._parse_adf_output() @staticmethod def _sites_to_mol(sites): """ Return a ``Molecule`` object given a list of sites. Parameters ---------- sites : list A list of sites. Returns ------- mol : Molecule A ``Molecule`` object. """ return Molecule([site[0] for site in sites], [site[1] for site in sites]) def _parse_logfile(self, logfile): """ Parse the formatted logfile. """ cycle_patt = re.compile(r"Coordinates\sin\sGeometry\sCycle\s(\d+)") coord_patt = re.compile(r"\s+([0-9]+)\.([A-Za-z]+)" + 3 * r"\s+([-\.0-9]+)") energy_patt = re.compile(r"<.*>\s<.*>\s+current\senergy\s+([-\.0-9]+)\sHartree") final_energy_patt = re.compile(r"<.*>\s<.*>\s+Bond\sEnergy\s+([-\.0-9]+)\sa\.u\.") error_patt = re.compile(r"<.*>\s<.*>\s+ERROR\sDETECTED:\s(.*)") runtype_patt = re.compile(r"<.*>\s<.*>\s+RunType\s+:\s(.*)") end_patt = re.compile(r"<.*>\s<.*>\s+END") parse_cycle = False sites = [] last_cycle = -1 parse_final = False # Stop parsing the logfile is this job is not terminated successfully. # The last non-empty line of the logfile must match the end pattern. # Otherwise the job has some internal failure. The TAPE13 part of the # ADF manual has a detailed explanantion. with open(logfile) as f: for line in reverse_readline(f): if line == "": continue if end_patt.search(line) is None: self.is_internal_crash = True self.error = "Internal crash. TAPE13 is generated!" self.is_failed = True return break with open(logfile) as f: for line in f: m = error_patt.search(line) if m: self.is_failed = True self.error = m.group(1) break if self.run_type is None: m = runtype_patt.search(line) if m: if m.group(1) == "FREQUENCIES": self.freq_type = "Numerical" self.run_type = "NumericalFreq" elif m.group(1) == "GEOMETRY OPTIMIZATION": self.run_type = "GeometryOptimization" elif m.group(1) == "CREATE": self.run_type = None elif m.group(1) == "SINGLE POINT": self.run_type = "SinglePoint" else: raise AdfOutputError("Undefined Runtype!") elif self.run_type == "SinglePoint": m = coord_patt.search(line) if m: sites.append([m.groups()[0], list(map(float, m.groups()[2:]))]) else: m = final_energy_patt.search(line) if m: self.final_energy = float(m.group(1)) self.final_structure = self._sites_to_mol(sites) elif self.run_type == "GeometryOptimization": m = cycle_patt.search(line) if m: cycle = int(m.group(1)) if cycle <= 0: raise AdfOutputError(f"Wrong cycle {cycle}") if cycle > last_cycle: parse_cycle = True last_cycle = cycle else: parse_final = True elif parse_cycle: m = coord_patt.search(line) if m: sites.append([m.groups()[1], list(map(float, m.groups()[2:]))]) else: m = energy_patt.search(line) if m: self.energies.append(float(m.group(1))) mol = self._sites_to_mol(sites) self.structures.append(mol) parse_cycle = False sites = [] elif parse_final: m = final_energy_patt.search(line) if m: self.final_energy = float(m.group(1)) elif self.run_type == "NumericalFreq": break if not self.is_failed: if self.run_type == "GeometryOptimization": if len(self.structures) > 0: self.final_structure = self.structures[-1] if self.final_energy is None: raise AdfOutputError("The final energy can not be read!") elif self.run_type == "SinglePoint": if self.final_structure is None: raise AdfOutputError("The final structure is missing!") if self.final_energy is None: raise AdfOutputError("The final energy can not be read!") def _parse_adf_output(self): """ Parse the standard ADF output file. """ numerical_freq_patt = re.compile(r"\s+\*\s+F\sR\sE\sQ\sU\sE\sN\sC\sI\sE\sS\s+\*") analytic_freq_patt = re.compile(r"\s+\*\s+F\sR\sE\sQ\sU\sE\sN\sC\sY\s+A\sN\sA\sL\sY\sS\sI\sS\s+\*") freq_on_patt = re.compile(r"Vibrations\sand\sNormal\sModes\s+\*+.*\*+") freq_off_patt = re.compile(r"List\sof\sAll\sFrequencies:") mode_patt = re.compile(r"\s+(\d+)\.([A-Za-z]+)\s+(.*)") coord_patt = re.compile(r"\s+(\d+)\s+([A-Za-z]+)" + 6 * r"\s+([0-9\.-]+)") coord_on_patt = re.compile(r"\s+\*\s+R\sU\sN\s+T\sY\sP\sE\s:\sFREQUENCIES\s+\*") parse_freq = False parse_mode = False nnext = 0 nstrike = 0 sites = [] self.frequencies = [] self.normal_modes = [] if self.final_structure is None: find_structure = True parse_coord = False natoms = 0 else: find_structure = False parse_coord = False natoms = self.final_structure.num_sites with open(self.filename) as f: for line in f: if self.run_type == "NumericalFreq" and find_structure: if not parse_coord: m = coord_on_patt.search(line) if m: parse_coord = True else: m = coord_patt.search(line) if m: sites.append([m.group(2), list(map(float, m.groups()[2:5]))]) nstrike += 1 elif nstrike > 0: find_structure = False self.final_structure = self._sites_to_mol(sites) natoms = self.final_structure.num_sites elif self.freq_type is None: if numerical_freq_patt.search(line): self.freq_type = "Numerical" elif analytic_freq_patt.search(line): self.freq_type = "Analytical" self.run_type = "AnalyticalFreq" elif freq_on_patt.search(line): parse_freq = True elif parse_freq: if freq_off_patt.search(line): break el = line.strip().split() if 1 <= len(el) <= 3 and line.find(".") != -1: nnext = len(el) parse_mode = True parse_freq = False self.frequencies.extend(map(float, el)) for i in range(nnext): self.normal_modes.append([]) elif parse_mode: m = mode_patt.search(line) if m: v = list(chunks(map(float, m.group(3).split()), 3)) if len(v) != nnext: raise AdfOutputError("Odd Error!") for i, k in enumerate(range(-nnext, 0, 1)): self.normal_modes[k].extend(v[i]) if int(m.group(1)) == natoms: parse_freq = True parse_mode = False if isinstance(self.final_structure, list): self.final_structure = self._sites_to_mol(self.final_structure) if self.freq_type is not None: if len(self.frequencies) != len(self.normal_modes): raise AdfOutputError("The number of normal modes is wrong!") if len(self.normal_modes[0]) != natoms * 3: raise AdfOutputError("The dimensions of the modes are wrong!")
vorwerkc/pymatgen
pymatgen/io/adf.py
Python
mit
31,369
[ "ADF", "Gaussian", "NWChem", "pymatgen" ]
7c568b63766fa3a14a976fbb2374d8c3eb5ad439535fa7144040bd8fc2c00fb2
"""Test string and unicode support in VTK-Python The following string features have to be tested for string and unicode - Pass a string arg by value - Pass a string arg by reference - Return a string arg by value - Return a string arg by reference The following features are not supported - Pointers to strings, arrays of strings - Passing a string arg by reference and returning a value in it Created on May 12, 2010 by David Gobbi """ import sys import vtk from vtk.test import Testing if sys.hexversion >= 0x03000000: cedilla = 'Fran\xe7ois' nocedilla = 'Francois' eightbit = 'Francois'.encode('ascii') else: cedilla = unicode('Fran\xe7ois', 'latin1') nocedilla = unicode('Francois') eightbit = 'Francois' class TestString(Testing.vtkTest): def testPassByValue(self): """Pass string by value... hard to find examples of this, because "const char *" methods shadow "vtkStdString" methods. """ self.assertEqual('y', 'y') def testReturnByValue(self): """Return a string by value.""" a = vtk.vtkArray.CreateArray(1, vtk.VTK_INT) a.Resize(1,1) a.SetDimensionLabel(0, 'x') s = a.GetDimensionLabel(0) self.assertEqual(s, 'x') def testPassByReference(self): """Pass a string by reference.""" a = vtk.vtkArray.CreateArray(0, vtk.VTK_STRING) a.SetName("myarray") s = a.GetName() self.assertEqual(s, "myarray") def testReturnByReference(self): """Return a string by reference.""" a = vtk.vtkStringArray() s = "hello" a.InsertNextValue(s) t = a.GetValue(0) self.assertEqual(t, s) def testPassAndReturnUnicodeByReference(self): """Pass a unicode string by const reference""" a = vtk.vtkUnicodeStringArray() a.InsertNextValue(cedilla) u = a.GetValue(0) self.assertEqual(u, cedilla) def testPassBytesAsUnicode(self): """Pass 8-bit string when unicode is expected. Should fail.""" a = vtk.vtkUnicodeStringArray() self.assertRaises(TypeError, a.InsertNextValue, eightbit) def testPassUnicodeAsString(self): """Pass unicode where string is expected. Should succeed.""" a = vtk.vtkStringArray() a.InsertNextValue(nocedilla) s = a.GetValue(0) self.assertEqual(s, 'Francois') def testPassBytesAsString(self): """Pass 8-bit string where string is expected. Should succeed.""" a = vtk.vtkStringArray() a.InsertNextValue(eightbit) s = a.GetValue(0) self.assertEqual(s, 'Francois') def testPassEncodedString(self): """Pass encoded 8-bit strings.""" a = vtk.vtkStringArray() # latin1 encoded string will be returned as "bytes", which is # just a normal str object in Python 2 encoded = cedilla.encode('latin1') a.InsertNextValue(encoded) result = a.GetValue(0) self.assertEqual(type(result), bytes) self.assertEqual(result, encoded) # utf-8 encoded string will be returned as "str", which is # actually unicode in Python 3 a = vtk.vtkStringArray() encoded = cedilla.encode('utf-8') a.InsertNextValue(encoded) result = a.GetValue(0) self.assertEqual(type(result), str) if sys.hexversion >= 0x03000000: self.assertEqual(result.encode('utf-8'), encoded) else: self.assertEqual(result, encoded) if __name__ == "__main__": Testing.main([(TestString, 'test')])
HopeFOAM/HopeFOAM
ThirdParty-0.1/ParaView-5.0.1/VTK/Common/Core/Testing/Python/TestStrings.py
Python
gpl-3.0
3,620
[ "VTK" ]
8599af3c0738c74a43cb1b6eaa2aca4052a0559325bb39cd74fd28ab9a2796d0
#!/usr/bin/env python """ Created on Jun 19, 2012 """ import unittest import os import warnings from pymongo import MongoClient from pymongo.errors import ConnectionFailure from pymatgen.apps.borg.queen import BorgQueen from pymatgen.entries.computed_entries import ComputedEntry from pymatgen.electronic_structure.dos import CompleteDos from pymatgen.core.structure import Structure from pymatgen.db.query_engine import QueryEngine from pymatgen.db.creator import VaspToDbTaskDrone from pymatgen.db.tests import common __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyue@mit.edu" __date__ = "Jun 19, 2012" test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_files") has_mongo = common.has_mongo() class VaspToDbTaskDroneTest(unittest.TestCase): @classmethod def setUpClass(cls): if has_mongo: try: cls.conn = MongoClient() except ConnectionFailure: cls.conn = None else: cls.conn = None @unittest.skipUnless(has_mongo, "MongoDB connection required") def test_get_valid_paths(self): drone = VaspToDbTaskDrone(simulate_mode=True) all_paths = [] for path in os.walk(os.path.join(test_dir, "db_test")): all_paths.extend(drone.get_valid_paths(path)) self.assertEqual(len(all_paths), 6) @unittest.skipUnless(has_mongo, "MongoDB connection required") def test_to_from_dict(self): drone = VaspToDbTaskDrone(database="wacky", simulate_mode=True) d = drone.as_dict() drone = VaspToDbTaskDrone.from_dict(d) self.assertTrue(drone.simulate) self.assertEqual(drone.database, "wacky") @unittest.skipUnless(has_mongo, "MongoDB connection required") def test_assimilate(self): """Borg assimilation code. This takes too long for a unit test! """ simulate = True if VaspToDbTaskDroneTest.conn is None else False drone = VaspToDbTaskDrone( database="creator_unittest", simulate_mode=simulate, parse_dos=True, compress_dos=1, ) queen = BorgQueen(drone) queen.serial_assimilate(os.path.join(test_dir, "db_test")) data = queen.get_data() self.assertEqual(len(data), 6) if VaspToDbTaskDroneTest.conn: db = VaspToDbTaskDroneTest.conn["creator_unittest"] data = db.tasks.find() self.assertEqual(db.tasks.count_documents({}), 6) warnings.warn("Actual db insertion mode.") for d in data: dir_name = d["dir_name"] if dir_name.endswith("killed_mp_aflow"): self.assertEqual(d["state"], "killed") self.assertFalse(d["is_hubbard"]) self.assertEqual(d["pretty_formula"], "SiO2") elif dir_name.endswith("stopped_mp_aflow"): self.assertEqual(d["state"], "stopped") self.assertEqual(d["pretty_formula"], "ThFe5P3") elif dir_name.endswith("success_mp_aflow"): self.assertEqual(d["state"], "successful") self.assertEqual(d["pretty_formula"], "TbZn(BO2)5") self.assertAlmostEqual(d["output"]["final_energy"], -526.66747274, 4) elif dir_name.endswith("Li2O_aflow"): self.assertEqual(d["state"], "successful") self.assertEqual(d["pretty_formula"], "Li2O") self.assertAlmostEqual(d["output"]["final_energy"], -14.31446494, 6) self.assertEqual(len(d["calculations"]), 2) self.assertEqual(d["input"]["is_lasph"], False) self.assertEqual(d["input"]["xc_override"], None) self.assertEqual(d["oxide_type"], "oxide") elif dir_name.endswith("Li2O"): self.assertEqual(d["state"], "successful") self.assertEqual(d["pretty_formula"], "Li2O") self.assertAlmostEqual(d["output"]["final_energy"], -14.31337758, 6) self.assertEqual(len(d["calculations"]), 1) self.assertEqual(len(d["custodian"]), 1) self.assertEqual(len(d["custodian"][0]["corrections"]), 1) elif dir_name.endswith("Li2O_aflow_lasph"): self.assertEqual(d["state"], "successful") self.assertEqual(d["pretty_formula"], "Li2O") self.assertAlmostEqual(d["output"]["final_energy"], -13.998171, 6) self.assertEqual(len(d["calculations"]), 2) self.assertEqual(d["input"]["is_lasph"], True) self.assertEqual(d["input"]["xc_override"], "PS") if VaspToDbTaskDroneTest.conn: warnings.warn("Testing query engine mode.") qe = QueryEngine(database="creator_unittest") self.assertEqual(len(qe.query()), 6) # Test mappings by query engine. for r in qe.query( criteria={"pretty_formula": "Li2O"}, properties=[ "dir_name", "energy", "calculations", "input", "oxide_type", ], ): if r["dir_name"].endswith("Li2O_aflow"): self.assertAlmostEqual(r["energy"], -14.31446494, 4) self.assertEqual(len(r["calculations"]), 2) self.assertEqual(r["input"]["is_lasph"], False) self.assertEqual(r["input"]["xc_override"], None) self.assertEqual(r["oxide_type"], "oxide") elif r["dir_name"].endswith("Li2O"): self.assertAlmostEqual(r["energy"], -14.31337758, 4) self.assertEqual(len(r["calculations"]), 1) self.assertEqual(r["input"]["is_lasph"], False) self.assertEqual(r["input"]["xc_override"], None) # Test lasph e = qe.get_entries({"dir_name": {"$regex": "lasph"}}) self.assertEqual(len(e), 1) self.assertEqual(e[0].parameters["is_lasph"], True) self.assertEqual(e[0].parameters["xc_override"], "PS") # Test query one. d = qe.query_one(criteria={"pretty_formula": "TbZn(BO2)5"}, properties=["energy"]) self.assertAlmostEqual(d["energy"], -526.66747274, 4) d = qe.get_entries_in_system(["Li", "O"]) self.assertEqual(len(d), 3) self.assertIsInstance(d[0], ComputedEntry) self.assertEqual(d[0].data["oxide_type"], "oxide") s = qe.get_structure_from_id(d[0].entry_id) self.assertIsInstance(s, Structure) self.assertEqual(s.formula, "Li2 O1") self.assertIsInstance(qe.get_dos_from_id(d[0].entry_id), CompleteDos) @classmethod def tearDownClass(cls): if cls.conn is not None: cls.conn.drop_database("creator_unittest") if __name__ == "__main__": unittest.main()
materialsproject/pymatgen-db
pymatgen/db/tests/test_creator_and_query_engine.py
Python
mit
7,165
[ "pymatgen" ]
4402286ba2dc87a4617e7b8420e9401ce4fd36c1ba02985be46fd27efb342e4b
# coding: utf-8 # In[46]: ##imports import numpy as np import networkx as nx import math import matplotlib.pyplot as plt # In[1]: #function to generate real valued som for graph input def initialise_network(X, num_neurons, w): #network will be a one dimensional list network = nx.Graph() #dimension of data in X d = len(X[0]) for i in range(num_neurons): ##position network.add_node(i) ##weight network.node[i]['v'] = 2 * (np.random.rand(d) - 0.5) * w ##list of closest nodes network.node[i]['ls'] = [] ##error of neuron network.node[i]['e'] = 0 ##som for neuron network.node[i]['n'] = [] ## signal frequency network.node[i]['r'] = 0 ##add edges for j in range(max(0, i-2), i): network.add_edge(i, j) #return network return network # In[6]: # function to train SOM on given graph def train_network(X, network, num_epochs, eta_0, sigma_0, N): #initial learning rate eta = eta_0 #initial neighbourhood size sigma = sigma_0 #list if all patterns to visit training_patterns = [p for p in range(len(X))] for e in range(num_epochs): #shuffle nodes np.random.shuffle(training_patterns) # iterate through N nodes of graph for i in range(N): #data point to consider x = X[training_patterns[i]] #determine winning neuron win_neuron = winning_neuron(x, network) # update weights update_weights(x, network, win_neuron, eta, sigma) # drop neighbourhood sigma = sigma_0 * np.exp(-2 * sigma_0 * e / num_epochs); # ##decay signal frequency # for i in network.nodes(): # network.node[i]['r'] -= 0.05 * network.node[i]['r'] # return network # In[43]: # winning neuron def winning_neuron(x, network): # minimum distance so far min_dist = math.inf win_neuron = [] # iterate through network for i in network.nodes(): #unpack network v = network.node[i]['v'] if len(v) != len(x): print() print(v) print(x) print() #distance between input vector and neuron weight distance = np.linalg.norm(x - v) # if we have a new closest neuron if distance < min_dist: min_dist = distance win_neuron = i ##increment signal frequency network.node[win_neuron]['r'] += 1 #return return win_neuron # In[4]: # function to update weights def update_weights(x, network, win_neuron, eta, sigma): # iterate through all neurons in network for i in network.nodes(): #unpack v = network.node[i]['v'] #new v -- move along shortest path by move distance v += eta * neighbourhood(network, i, win_neuron, sigma) * (x - v) #save to network network.node[i]['v'] = v ##decay signal frequency network.node[i]['r'] -= 0.05 * network.node[i]['r'] # return network # In[3]: # neighbourhood function def neighbourhood(network, r, win_neuron, sigma): return np.exp(-(nx.shortest_path_length(network, r, win_neuron)) ** 2 / (2 * sigma ** 2)) # In[1]: # assign nodes into clusters def assign_nodes(G, X, network, layer): #number of neurons in network num_neurons = nx.number_of_nodes(network) #number of nodes num_nodes = nx.number_of_nodes(G) # clear existing closest node list for i in network.nodes(): network.node[i]['ls'] = [] network.node[i]['e'] = 0 # assign colour to each node for n in range(num_nodes): #data point to assign x = X[n] #intialise distance to be infinity min_distance = math.inf #closest reference vector to this ndoe closest_ref = [] # find which neuron's referece vector this node is closest to for i in network.nodes(): #unpack network v = network.node[i]['v'] # calculate distance to that reference vector d = np.linalg.norm(x - v) if d < min_distance: min_distance = d closest_ref = i #unable to find closest ref if closest_ref == []: continue #add node to closest nodes list network.node[closest_ref]['ls'].append(G.nodes()[n]) #increase e by distance network.node[closest_ref]['e'] += min_distance #return assignment list # return network # In[15]: ##function to return lattice grid of errors def update_errors(network): #mean network error mqe = 0; #number of nodes num_neurons = nx.number_of_nodes(network) #iterate over all neurons and average distance for i in network.nodes(): #unpack network e = network.node[i]['e'] ls = network.node[i]['ls'] if len(ls) == 0: continue #divide by len(ls) for mean e /= len(ls) #sum total errors mqe += e #save error to network network.node[i]['e'] = e #mean mqe /= num_neurons #return network # return network, mqe return mqe # In[26]: ##function to identify neuron with greatest error def identify_error_unit(network): #first node n = network.nodes()[0] #initial value for maximum error found max_e = network.node[n]['e'] #initial index to return error_node = n for i in network.nodes(): #unpack e = network.node[i]['e'] #check if this unit has greater error than maximum if e > max_e: max_e = e error_node = i #return id of unit with maximum error return error_node # In[20]: ##function to expand som using given error unit def expand_network(network, error_unit): #identify neighbour pointing furthest away error_unit_neighbours = network.neighbors(error_unit) #id of new node id = nx.number_of_nodes(network) #v of error unit ve = network.node[error_unit]['v'] #dimension d = len(ve) ## if len(error_unit_neighbours) == 0: ##random position ##position network.add_node(id) ##weight v = 2 * (np.random.rand(d) - 0.5) * 1e-2 network.node[id]['v'] = v ##list of closest nodes ls = [] network.node[id]['ls'] = ls ##error of neuron e = 0 network.node[id]['e'] = e ##som for neuron n = [] network.node[id]['n'] = n ## signal frequency r = 0 network.node[id]['r'] = r ##add edge network.add_edge(error_unit, id) elif len(error_unit_neighbours) == 1: #neighbour neighbour = error_unit_neighbours[0] #v of neighbour vn = network.node[neighbour]['v'] ##position network.add_node(id) ##weight v = (ve + vn) / 2 network.node[id]['v'] = v ##list of closest nodes ls = [] network.node[id]['ls'] = ls ##error of neuron e = 0 network.node[id]['e'] = e ##som for neuron n = [] network.node[id]['n'] = n ## signal frequency r = 0 network.node[id]['r'] = r ##add edge network.add_edge(error_unit, id) network.add_edge(neighbour, id) else: #neighbour id n1 = furthest_neuron(network, error_unit, error_unit_neighbours) ##v of n1 v_n1 = network.node[n1]['v'] #now we have identified neighbour pointing furthest away in input space #take mean and produce new neuron ##must find mutual neighbours neighbour_neighbours = network.neighbors(n1) ##mutual neighbours mutual_neighbours = [n for n in error_unit_neighbours if n in neighbour_neighbours] ##second furthest node n2 = furthest_neuron(network, error_unit, mutual_neighbours) #v of n2 v_n2 = network.node[n2]['v'] ##position network.add_node(id) ##weight v = (ve + v_n1 + v_n2) / 3 network.node[id]['v'] = v ##list of closest nodes ls = [] network.node[id]['ls'] = ls ##error of neuron e = 0 network.node[id]['e'] = e ##som for neuron n = [] network.node[id]['n'] = n ## signal frequency r = 0 network.node[id]['r'] = r #remove edge from n1 and n2 network.remove_edge(n1, n2) #connect new node to all nodes that are connected to both neighbours (including error unit) for neuron in network.nodes(): if network.has_edge(n1, neuron) and network.has_edge(n2, neuron): ##add edges network.add_edge(neuron, id) network.add_edge(n1, id) network.add_edge(n2, id) #return network # return network # In[42]: ##function to expand som using given error unit def expand_network2(G, network, error_unit, layer): #id of new node id = nx.number_of_nodes(network) #v of error unit ve = network.node[error_unit]['v'] #dimension d = len(ve) ##position network.add_node(id) ##weight network.node[id]['v'] = ve ##list of closest nodes network.node[id]['ls'] = [] ##error of neuron network.node[id]['e'] = 0 ##som for neuron network.node[id]['n'] = [] ## signal frequency network.node[id]['r'] = 0 #point error unit to furthest datapoint network.node[error_unit]['v'] = furthest_datapoint(G, network, error_unit, layer) #identify neighbour pointing furthest away error_unit_neighbours = network.neighbors(error_unit) print('furthest datapoint',network.node[error_unit]['v']) ## if len(error_unit_neighbours) == 0: ##add edge network.add_edge(error_unit, id) elif len(error_unit_neighbours) == 1: #neighbour neighbour = error_unit_neighbours[0] ##add edge network.add_edge(error_unit, id) network.add_edge(neighbour, id) else: #neighbour id n1 = furthest_neuron(network, error_unit, error_unit_neighbours) #now we have identified neighbour pointing furthest away in input space #take mean and produce new neuron ##must find mutual neighbours neighbour_neighbours = network.neighbors(n1) ##mutual neighbours mutual_neighbours = [n for n in error_unit_neighbours if n in neighbour_neighbours] ##second furthest node n2 = furthest_neuron(network, error_unit, mutual_neighbours) #remove edge from n1 and n2 network.remove_edge(n1, n2) #connect new node to all nodes that are connected to both neighbours (including error unit) for neuron in network.nodes(): if network.has_edge(n1, neuron) and network.has_edge(n2, neuron): ##add edges network.add_edge(neuron, id) network.add_edge(n1, id) network.add_edge(n2, id) #return network # return network # In[41]: ##find vector of furthest datapoint def furthest_datapoint(G, network, n, layer): v = network.node[n]['v'] ls = network.node[n]['ls'] if len(ls) == 0: print('empty list',network.node[n]['e']) max_dist = -math.inf furthest_x = [] for node in ls: # x = G.node[node]['embedding'+str(layer)] x = G.node[node]['embedding0'] d = np.linalg.norm(x - v) if d > max_dist: max_dist = d furthest_x = x return furthest_x # In[42]: ##function to find neuron pointing furthest away in list def furthest_neuron(network, error_unit, ls): vi = network.node[error_unit]['v'] max_dist = -math.inf #neighbour id furthest_node = [] #iterate through neighbours for i in ls: #unpack neighbour v = network.node[i]['v'] #distance in input space d = np.linalg.norm(v - vi) #is d > max_dist? if d > max_dist: max_dist = d furthest_node = i return furthest_node # In[22]: ##delete a neuron def delete_neurons(network): deleted = False # print('number of neurons:',len(network)) for n in network.nodes(): r = network.node[n]['r'] # print('r',r) if r < 0.09: print('deleting neuron',n) neighbours = network.neighbors(n) num_neighbours = len(neighbours) for n1 in range(num_neighbours): for n2 in range(n1+1,num_neighbours): network.add_edge(neighbours[n1], neighbours[n2]) network.remove_node(n) deleted = True return deleted # In[5]: ##function to visualise graph def visualise_graph(G, colours, layer): ## create new figure for graph plot fig, ax = plt.subplots() # graph layout pos = nx.spring_layout(G) #attributes in this graph attributes = np.unique([v for k,v in nx.get_node_attributes(G, 'community'+str(layer)).items()]) # draw nodes -- colouring by cluster for i in range(min(len(colours), len(attributes))): node_list = [n for n in G.nodes() if G.node[n]['community'+str(layer)] == attributes[i]] colour = [colours[i] for n in range(len(node_list))] nx.draw_networkx_nodes(G, pos, nodelist=node_list, node_color=colour) #draw edges nx.draw_networkx_edges(G, pos) # draw labels nx.draw_networkx_labels(G, pos, ) #title of plot plt.title('Nodes coloured by cluster, layer: '+str(layer)) #show plot plt.show() # In[4]: ## visualise graph based on network clusters def visualise_network(network, colours, layer): #num neurons in lattice num_neurons = len(network) ##create new figure for lattice plot fig, ax = plt.subplots() # graph layout pos = nx.spring_layout(network) # draw nodes -- colouring by cluster for i in range(len(colours)): nx.draw_networkx_nodes(network, pos, nodelist = [network.nodes()[i]], node_color = colours[i]) #draw edges nx.draw_networkx_edges(network, pos) # draw labels nx.draw_networkx_labels(network, pos) #label axes plt.title('Neurons in lattice, layer: '+str(layer)) #show lattice plot plt.show()
DavidMcDonald1993/ghsom
som_functions.py
Python
gpl-2.0
15,610
[ "NEURON", "VisIt" ]
beb903436a577964b50887e9a04aaa0e317e81187a2ce056ea5c0ae9f00f4b67
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Nodes for PPAPI IDL AST""" # # IDL Node # # IDL Node defines the IDLAttribute and IDLNode objects which are constructed # by the parser as it processes the various 'productions'. The IDLAttribute # objects are assigned to the IDLNode's property dictionary instead of being # applied as children of The IDLNodes, so they do not exist in the final tree. # The AST of IDLNodes is the output from the parsing state and will be used # as the source data by the various generators. # import hashlib import sys from idl_log import ErrOut, InfoOut, WarnOut from idl_propertynode import IDLPropertyNode from idl_namespace import IDLNamespace from idl_release import IDLRelease, IDLReleaseMap # IDLAttribute # # A temporary object used by the parsing process to hold an Extended Attribute # which will be passed as a child to a standard IDLNode. # class IDLAttribute(object): def __init__(self, name, value): self.cls = 'ExtAttribute' self.name = name self.value = value def __str__(self): return '%s=%s' % (self.name, self.value) # # IDLNode # # This class implements the AST tree, providing the associations between # parents and children. It also contains a namepsace and propertynode to # allow for look-ups. IDLNode is derived from IDLRelease, so it is # version aware. # class IDLNode(IDLRelease): # Set of object IDLNode types which have a name and belong in the namespace. NamedSet = set(['Enum', 'EnumItem', 'File', 'Function', 'Interface', 'Member', 'Param', 'Struct', 'Type', 'Typedef']) show_versions = False def __init__(self, cls, filename, lineno, pos, children=None): # Initialize with no starting or ending Version IDLRelease.__init__(self, None, None) self.cls = cls self.lineno = lineno self.pos = pos self.filename = filename self.filenode = None self.hashes = {} self.deps = {} self.errors = 0 self.namespace = None self.typelist = None self.parent = None self.property_node = IDLPropertyNode() self.unique_releases = None # A list of unique releases for this node self.releases = None # A map from any release, to the first unique release self.first_release = None # self.children is a list of children ordered as defined self.children = [] # Process the passed in list of children, placing ExtAttributes into the # property dictionary, and nodes into the local child list in order. In # addition, add nodes to the namespace if the class is in the NamedSet. if not children: children = [] for child in children: if child.cls == 'ExtAttribute': self.SetProperty(child.name, child.value) else: self.AddChild(child) # # String related functions # # # Return a string representation of this node def __str__(self): name = self.GetName() ver = IDLRelease.__str__(self) if name is None: name = '' if not IDLNode.show_versions: ver = '' return '%s(%s%s)' % (self.cls, name, ver) # Return file and line number for where node was defined def Location(self): return '%s(%d)' % (self.filename, self.lineno) # Log an error for this object def Error(self, msg): self.errors += 1 ErrOut.LogLine(self.filename, self.lineno, 0, ' %s %s' % (str(self), msg)) if self.filenode: errcnt = self.filenode.GetProperty('ERRORS', 0) self.filenode.SetProperty('ERRORS', errcnt + 1) # Log a warning for this object def Warning(self, msg): WarnOut.LogLine(self.filename, self.lineno, 0, ' %s %s' % (str(self), msg)) def GetName(self): return self.GetProperty('NAME') def GetNameVersion(self): name = self.GetProperty('NAME', default='') ver = IDLRelease.__str__(self) return '%s%s' % (name, ver) # Dump this object and its children def Dump(self, depth=0, comments=False, out=sys.stdout): if self.cls in ['Comment', 'Copyright']: is_comment = True else: is_comment = False # Skip this node if it's a comment, and we are not printing comments if not comments and is_comment: return tab = ''.rjust(depth * 2) if is_comment: out.write('%sComment\n' % tab) for line in self.GetName().split('\n'): out.write('%s "%s"\n' % (tab, line)) else: ver = IDLRelease.__str__(self) if self.releases: release_list = ': ' + ' '.join(self.releases) else: release_list = ': undefined' out.write('%s%s%s%s\n' % (tab, self, ver, release_list)) if self.typelist: out.write('%s Typelist: %s\n' % (tab, self.typelist.GetReleases()[0])) properties = self.property_node.GetPropertyList() if properties: out.write('%s Properties\n' % tab) for p in properties: if is_comment and p == 'NAME': # Skip printing the name for comments, since we printed above already continue out.write('%s %s : %s\n' % (tab, p, self.GetProperty(p))) for child in self.children: child.Dump(depth+1, comments=comments, out=out) # # Search related functions # # Check if node is of a given type def IsA(self, *typelist): if self.cls in typelist: return True return False # Get a list of objects for this key def GetListOf(self, *keys): out = [] for child in self.children: if child.cls in keys: out.append(child) return out def GetOneOf(self, *keys): out = self.GetListOf(*keys) if out: return out[0] return None def SetParent(self, parent): self.property_node.AddParent(parent) self.parent = parent def AddChild(self, node): node.SetParent(self) self.children.append(node) # Get a list of all children def GetChildren(self): return self.children # Get a list of all children of a given version def GetChildrenVersion(self, version): out = [] for child in self.children: if child.IsVersion(version): out.append(child) return out # Get a list of all children in a given range def GetChildrenRange(self, vmin, vmax): out = [] for child in self.children: if child.IsRange(vmin, vmax): out.append(child) return out def FindVersion(self, name, version): node = self.namespace.FindNode(name, version) if not node and self.parent: node = self.parent.FindVersion(name, version) return node def FindRange(self, name, vmin, vmax): nodes = self.namespace.FindNodes(name, vmin, vmax) if not nodes and self.parent: nodes = self.parent.FindVersion(name, vmin, vmax) return nodes def GetType(self, release): if not self.typelist: return None return self.typelist.FindRelease(release) def GetHash(self, release): hashval = self.hashes.get(release, None) if hashval is None: hashval = hashlib.sha1() hashval.update(self.cls) for key in self.property_node.GetPropertyList(): val = self.GetProperty(key) hashval.update('%s=%s' % (key, str(val))) typeref = self.GetType(release) if typeref: hashval.update(typeref.GetHash(release)) for child in self.GetChildren(): if child.IsA('Copyright', 'Comment', 'Label'): continue if not child.IsRelease(release): continue hashval.update( child.GetHash(release) ) self.hashes[release] = hashval return hashval.hexdigest() def GetDeps(self, release, visited=None): visited = visited or set() # If this release is not valid for this object, then done. if not self.IsRelease(release) or self.IsA('Comment', 'Copyright'): return set([]) # If we have cached the info for this release, return the cached value deps = self.deps.get(release, None) if deps is not None: return deps # If we are already visited, then return if self in visited: return set([self]) # Otherwise, build the dependency list visited |= set([self]) deps = set([self]) # Get child deps for child in self.GetChildren(): deps |= child.GetDeps(release, visited) visited |= set(deps) # Get type deps typeref = self.GetType(release) if typeref: deps |= typeref.GetDeps(release, visited) self.deps[release] = deps return deps def GetVersion(self, release): filenode = self.GetProperty('FILE') if not filenode: return None return filenode.release_map.GetVersion(release) def GetUniqueReleases(self, releases): """Return the unique set of first releases corresponding to input Since we are returning the corresponding 'first' version for a release, we may return a release version prior to the one in the list.""" my_min, my_max = self.GetMinMax(releases) if my_min > releases[-1] or my_max < releases[0]: return [] out = set() for rel in releases: remapped = self.first_release[rel] if not remapped: continue out |= set([remapped]) # Cache the most recent set of unique_releases self.unique_releases = sorted(out) return self.unique_releases def LastRelease(self, release): # Get the most recent release from the most recently generated set of # cached unique releases. if self.unique_releases and self.unique_releases[-1] > release: return False return True def GetRelease(self, version): filenode = self.GetProperty('FILE') if not filenode: return None return filenode.release_map.GetRelease(version) def _GetReleases(self, releases): if not self.releases: my_min, my_max = self.GetMinMax(releases) my_releases = [my_min] if my_max != releases[-1]: my_releases.append(my_max) my_releases = set(my_releases) for child in self.GetChildren(): if child.IsA('Copyright', 'Comment', 'Label'): continue my_releases |= child.GetReleases(releases) self.releases = my_releases return self.releases def _GetReleaseList(self, releases, visited=None): visited = visited or set() if not self.releases: # If we are unversionable, then return first available release if self.IsA('Comment', 'Copyright', 'Label'): self.releases = [] return self.releases # Generate the first and if deprecated within this subset, the # last release for this node my_min, my_max = self.GetMinMax(releases) if my_max != releases[-1]: my_releases = set([my_min, my_max]) else: my_releases = set([my_min]) # Break cycle if we reference ourselves if self in visited: return [my_min] visited |= set([self]) # Files inherit all their releases from items in the file if self.IsA('AST', 'File'): my_releases = set() # Visit all children child_releases = set() # Exclude sibling results from parent visited set cur_visits = visited for child in self.children: child_releases |= set(child._GetReleaseList(releases, cur_visits)) visited |= set(child_releases) # Visit my type type_releases = set() if self.typelist: type_list = self.typelist.GetReleases() for typenode in type_list: type_releases |= set(typenode._GetReleaseList(releases, cur_visits)) type_release_list = sorted(type_releases) if my_min < type_release_list[0]: type_node = type_list[0] self.Error('requires %s in %s which is undefined at %s.' % ( type_node, type_node.filename, my_min)) for rel in child_releases | type_releases: if rel >= my_min and rel <= my_max: my_releases |= set([rel]) self.releases = sorted(my_releases) return self.releases def GetReleaseList(self): return self.releases def BuildReleaseMap(self, releases): unique_list = self._GetReleaseList(releases) my_min, my_max = self.GetMinMax(releases) self.first_release = {} last_rel = None for rel in releases: if rel in unique_list: last_rel = rel self.first_release[rel] = last_rel if rel == my_max: last_rel = None def SetProperty(self, name, val): self.property_node.SetProperty(name, val) def GetProperty(self, name, default=None): return self.property_node.GetProperty(name, default) def Traverse(self, data, func): func(self, data) for child in self.children: child.Traverse(data, func) # # IDLFile # # A specialized version of IDLNode which tracks errors and warnings. # class IDLFile(IDLNode): def __init__(self, name, children, errors=0): attrs = [IDLAttribute('NAME', name), IDLAttribute('ERRORS', errors)] if not children: children = [] IDLNode.__init__(self, 'File', name, 1, 0, attrs + children) self.release_map = IDLReleaseMap([('M13', 1.0)]) # # Tests # def StringTest(): errors = 0 name_str = 'MyName' text_str = 'MyNode(%s)' % name_str name_node = IDLAttribute('NAME', name_str) node = IDLNode('MyNode', 'no file', 1, 0, [name_node]) if node.GetName() != name_str: ErrOut.Log('GetName returned >%s< not >%s<' % (node.GetName(), name_str)) errors += 1 if node.GetProperty('NAME') != name_str: ErrOut.Log('Failed to get name property.') errors += 1 if str(node) != text_str: ErrOut.Log('str() returned >%s< not >%s<' % (str(node), text_str)) errors += 1 if not errors: InfoOut.Log('Passed StringTest') return errors def ChildTest(): errors = 0 child = IDLNode('child', 'no file', 1, 0) parent = IDLNode('parent', 'no file', 1, 0, [child]) if child.parent != parent: ErrOut.Log('Failed to connect parent.') errors += 1 if [child] != parent.GetChildren(): ErrOut.Log('Failed GetChildren.') errors += 1 if child != parent.GetOneOf('child'): ErrOut.Log('Failed GetOneOf(child)') errors += 1 if parent.GetOneOf('bogus'): ErrOut.Log('Failed GetOneOf(bogus)') errors += 1 if not parent.IsA('parent'): ErrOut.Log('Expecting parent type') errors += 1 parent = IDLNode('parent', 'no file', 1, 0, [child, child]) if [child, child] != parent.GetChildren(): ErrOut.Log('Failed GetChildren2.') errors += 1 if not errors: InfoOut.Log('Passed ChildTest') return errors def Main(): errors = StringTest() errors += ChildTest() if errors: ErrOut.Log('IDLNode failed with %d errors.' % errors) return -1 return 0 if __name__ == '__main__': sys.exit(Main())
GeyerA/android_external_chromium_org
ppapi/generators/idl_node.py
Python
bsd-3-clause
14,696
[ "VisIt" ]
407fe5793af98f6f255c1975d40b153ac6d94e505f7ae01a9b9ac991350ff687
#!/usr/local/bin/python -i # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html # Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories # # Copyright (2005) Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. This software is distributed under # the GNU General Public License. # Change log: # 8/05, Steve Plimpton (SNL): original version # 12/09, David Hart (SNL): except hook for Tkinter no-display error # 5/11, David Hart (SNL): began list of excludes for no-display machines # ToDo list: # Help strings: version = "7 Feb 2014" intro = """ Pizza.py (%s), a toolkit written in Python type ? for help, CTRL-D to quit """ help = """ pizza.py switch arg(s) switch arg(s) ... -s silent (else print start-up help) -t log dump raster load only these tools -x raster rasmol load all tools except these -f mine.py arg1 arg2 run script file with args -c "vec = range(100)" run Python command -q quit (else interactive) Everything typed at the ">" prompt is a Python command Additional commands available at ">" prompt: ? print help message ?? one-line for each tool and script ? raster list tool commands or script syntax ?? energy.py full documentation of tool or script !ls -l shell command @cd .. cd to a new directory @log tmp.log log all commands typed so far to file @run block.py arg1 arg2 run script file with args @time d = dump("*.dump") time a command Tools: """ # ------------------------------------------------------------------------- # modules needed by pizza.py import sys, commands, os, string, exceptions, glob, re from time import clock # readline not available in all Pythons try: import readline readline_flag = 1 except ImportError, exception: print "readline option not available" readline_flag = 0 # create global Tk root if Tkinter is loaded # used by all tools that do GUIs via Tkinter nodisplay = False try: import Tkinter tkroot = Tkinter.Tk() tkroot.withdraw() except ImportError, exception: nodisplay = True pass except Exception, exception: nodisplay = True pass # ------------------------------------------------------------------------- # error trap that enables special commands at interactive prompt def trap(type,value,tback): global argv # only check SyntaxErrors if not isinstance(value,exceptions.SyntaxError): sys.__excepthook__(type,value,tback) return # special commands at top level only, not in indented text entry if value.text[0].isspace(): sys.__excepthook__(type,value,tback) return # ? = top-level help # ?? = one-line description of each tool and script # ? name = one-line for each tool command or script purpose/syntax # ?? name = entire documentation for tool or script # name with no .py suffix is tool, name with .py suffix is script if value.text[0] == "?": words = value.text.split() if len(words) == 1 and words[0] == "?": print intro[1:] % version print help[1:]," ", for tool in tools: print tool, print elif len(words) == 1 and words[0] == "??": for tool in tools: exec "oneline = oneline_%s" % tool print "%-11s%s" % (tool,oneline) print scripts = [] for dir in PIZZA_SCRIPTS[1:]: list = glob.glob("%s/*.py" % dir) list.sort() scripts += list for script in scripts: filename = os.path.basename(script) lines = open(script,'r').readlines() flag = 0 for line in lines: if line.find("Purpose:") >= 0: flag = 1 break if flag: doc = line[line.find("Purpose:")+8:] else: doc = " not available\n" print "%-20s%s" % (filename,doc), elif len(words) == 2 and words[0] == "?": if words[1][-3:] == ".py": fileflag = 0 for dir in PIZZA_SCRIPTS: filename = "%s/%s" % (dir,words[1]) if os.path.isfile(filename): fileflag = 1 lineflag = 0 lines = open(filename,'r').readlines() for line in lines: if line.find("# Purpose:") >= 0: print line[2:], if line.find("# Syntax:") >= 0: lineflag = 1 break if not lineflag: print "%s has no Syntax line" % words[1] else: print line[2:], break if not fileflag: print "%s is not a recognized script" % words[1] else: if words[1] in tools: exec "txt = docstr_%s" % words[1] txt = re.sub("\n\s*\n","\n",txt) txt = re.sub("\n .*","",txt) exec "print oneline_%s" % words[1] print txt else: print "%s is not a recognized tool" % words[1] elif len(words) == 2 and words[0] == "??": if words[1][-3:] == ".py": fileflag = 0 for dir in PIZZA_SCRIPTS: filename = "%s/%s" % (dir,words[1]) if os.path.isfile(filename): fileflag = 1 lines = open(filename,'r').readlines() for line in lines: if len(line.strip()) == 0: continue if line[0] == '#': print line, else: break break if not fileflag: print "%s is not a recognized script" % words[1] else: if words[1] in tools: exec "print oneline_%s" % words[1] exec "print docstr_%s" % words[1] else: print "%s is not a recognized class" % words[1] return # shell command like !ls, !ls -l if value.text[0] == "!": os.system(value.text[1:]) return # @ commands = @cd, @log, @run, @time # for run and time, use namespace in execfile and exec commands # else variables defined in script/command # won't be set in top-level Pizza.py if value.text[0] == "@": words = value.text.split() if words[0][1:] == "cd": os.chdir(words[1]) return elif words[0][1:] == "log": if readline_flag == 0: print "cannot use @log without readline module" return f = open(words[1],"w") print >>f,"# pizza.py log file\n" nlines = readline.get_current_history_length() for i in xrange(1,nlines): print >>f,readline.get_history_item(i) f.close() return elif words[0][1:] == "run": argv = words[1:] file = argv[0] flag = 0 for dir in PIZZA_SCRIPTS: fullfile = dir + '/' + file if os.path.exists(fullfile): flag = 1 print "Executing file:",fullfile execfile(fullfile,namespace) break if not flag: print "Could not find file",file return elif words[0][1:] == "time": cmd = string.join(words[1:]) t1 = clock() exec cmd in namespace t2 = clock() print "CPU time = ",t2-t1 return # unrecognized command, let system handle error sys.__excepthook__(type,value,tback) # ------------------------------------------------------------------------- # process command-line switches # store scripts and commands in tasks list silent = 0 yes_tools = [] no_tools = [] tasks = [] quitflag = 0 iarg = 1 while (iarg < len(sys.argv)): if (sys.argv[iarg][0] != '-'): print "ERROR: arg is not a switch: %s" % (sys.argv[iarg]) sys.exit() if (sys.argv[iarg] == "-s"): silent = 1 iarg += 1 elif (sys.argv[iarg] == "-t"): jarg = iarg + 1 while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'): yes_tools.append(sys.argv[jarg]) jarg += 1 iarg = jarg elif (sys.argv[iarg] == "-x"): jarg = iarg + 1 while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'): no_tools.append(sys.argv[jarg]) jarg += 1 iarg = jarg # allow for "--" as arg to script and not Pizza.py arg elif (sys.argv[iarg] == "-f"): jarg = iarg + 1 list = [] while (jarg < len(sys.argv) and (sys.argv[jarg][0] != '-' or (len(sys.argv[jarg]) >= 3 and sys.argv[jarg][0:2] == "--"))): list.append(sys.argv[jarg]) jarg += 1 task = ("script",list) tasks.append(task) iarg = jarg elif (sys.argv[iarg] == "-c"): jarg = iarg + 1 list = [] while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'): list.append(sys.argv[jarg]) jarg += 1 task = ("command",list) tasks.append(task) iarg = jarg elif (sys.argv[iarg] == "-q"): quitflag = 1 iarg += 1 else: print "ERROR: unknown switch: %s" % (sys.argv[iarg]) sys.exit() # print intro message if not silent: print intro[1:] % version, # error test on m,x command-line switches if len(yes_tools) > 0 and len(no_tools) > 0: print "ERROR: cannot use -t and -x switches together" sys.exit() # ------------------------------------------------------------------------- # tools = list of tool names to import # if -t switch was used, tools = just those files # else scan for *.py files in all dirs in PIZZA_TOOLS list # and then Pizza.py src dir (sys.path[0]) if not silent: print "Loading tools ..." if not silent and nodisplay: print "Display not available ... no GUIs" try: from DEFAULTS import PIZZA_TOOLS except: PIZZA_TOOLS = [] PIZZA_TOOLS = map(os.path.expanduser,PIZZA_TOOLS) PIZZA_TOOLS.append(sys.path[0]) if len(yes_tools) > 0: tools = yes_tools else: tools = [] for dir in PIZZA_TOOLS: tools += glob.glob(dir + "/*.py") for i in range(len(tools)): tools[i] = os.path.basename(tools[i]) tools[i] = tools[i][:-3] # remove duplicate entries, reverse enables removing all but first entry tools.reverse() for tool in tools: while tools.count(tool) > 1: tools.remove(tool) tools.reverse() # remove tools in EXCLUDE list and command-line -x list try: from DEFAULTS import PIZZA_EXCLUDE except: PIZZA_EXCLUDE = [] for tool in PIZZA_EXCLUDE: if tool in tools: tools.remove(tool) for tool in no_tools: if tool in tools: tools.remove(tool) # add PIZZA_TOOLS dirs to front of module search path (sys.path) # import each tool as a Python module and its documentation strings # restore sys.path sys.path = PIZZA_TOOLS + sys.path failed = [] for tool in tools: #print "loading tool '%s'"%tool if nodisplay and tool in ['gl']: failed.append(tool) continue try: exec "from %s import %s" % (tool,tool) exec "from %s import oneline as oneline_%s" % (tool,tool) exec "from %s import docstr as docstr_%s" % (tool,tool) except Exception, exception: print "%s tool did not load:" % tool print " ",exception failed.append(tool) for dir in PIZZA_TOOLS: sys.path = sys.path[1:] # final list of tools: remove tools where import failed, sort them for tool in failed: tools.remove(tool) tools.sort() # add current working dir to sys.path so user can import own modules # cwd isn't in sys.path when Pizza.py is launched sys.path.insert(0,'') # ------------------------------------------------------------------------- # PIZZA_SCRIPTS = list of dirs to look in to find scripts try: from DEFAULTS import PIZZA_SCRIPTS except: PIZZA_SCRIPTS = [] PIZZA_SCRIPTS = map(os.path.expanduser,PIZZA_SCRIPTS) PIZZA_SCRIPTS.insert(0,'.') PIZZA_SCRIPTS.append(sys.path[1][:-3] + "scripts") # path for pizza.py # run specified script files and commands in order specified # put arguments in argv so script can access them # check list of PIZZA_SCRIPTS dirs to find script file # catch errors so pizza.py will continue even if script is bad # traceback logic prints where in the script the error occurred for task in tasks: if task[0] == "script": argv = task[1] file = argv[0] try: flag = 0 for dir in PIZZA_SCRIPTS: fullfile = dir + '/' + file if os.path.exists(fullfile): print "Executing file:",fullfile execfile(fullfile) flag = 1 break if not flag: print "Could not find file",file except StandardError, exception: (type,value,tback) = sys.exc_info() print type,value,tback type = str(type) type = type[type.find(".")+1:] print "%s with value: %s" % (type,value) tback = tback.tb_next while tback: print "error on line %d of file %s" % \ (tback.tb_lineno,tback.tb_frame.f_code.co_filename) tback = tback.tb_next elif task[0] == "command": argv = task[1] cmd = "" for arg in argv: cmd += arg + " " exec cmd # ------------------------------------------------------------------------- # store global namespace # swap in a new exception handler # change interactive prompts namespace = sys.modules['__main__'].__dict__ sys.excepthook = trap sys.ps1 = "> " sys.ps2 = ". " # should now go interactive if launched with "python -i" # unless -q switch used if quitflag > 0: sys.exit()
eddiejessup/pizza
src/pizza.py
Python
gpl-2.0
13,329
[ "RasMol" ]
201019a3f953621232d5ce7816c2fb1e61e5941610c219fdfce3ddee782949e9
#!/usr/bin/env python import os.path import time import fitsio import astropy.io.fits as pyfits import opscore.protocols.keys as keys import opscore.protocols.types as types from opscore.utility.qstr import qstr import actorcore.utility.fits as actorFits class HxCmd(object): def __init__(self, actor): # This lets us access the rest of the actor. self.actor = actor self.logger = self.actor.logger # Declare the commands we implement. When the actor is started # these are registered with the parser, which will call the # associated methods when matched. The callbacks will be # passed a single argument, the parsed and typed command. # self.vocab = [ ('hx', '@raw', self.hxRaw), ('bounce', '', self.bounce), ('hxconfig', '[<configName>]', self.hxconfig), ('getVoltages', '', self.getVoltages), ('setVoltage', '<voltageName> <voltage>', self.setVoltage), ('ramp', '[<nramp>] [<nreset>] [<nread>] [<ngroup>] [<ndrop>] [<itime>] [@splitRamps] [<seqno>] [<exptype>] [<objname>]', self.takeRamp), ('reloadLogic', '', self.reloadLogic), ] # Define typed command arguments for the above commands. self.keys = keys.KeysDictionary("hx", (1, 2), keys.Key("seqno", types.Int(), default=None, help='If set, the assigned sequence number.'), keys.Key("nramp", types.Int(), default=1, help='number of ramps to take.'), keys.Key("nreset", types.Int(), default=1, help='number of resets to make.'), keys.Key("nread", types.Int(), default=2, help='number of readss to take.'), keys.Key("ngroup", types.Int(), default=1, help='number of groups.'), keys.Key("ndrop", types.Int(), default=0, help='number of drops to waste.'), keys.Key("itime", types.Float(), default=None, help='desired integration time'), keys.Key("exptype", types.String(), default=None, help='What to put in IMAGETYP/DATA-TYP.'), keys.Key("objname", types.String(), default=None, help='What to put in OBJECT.'), keys.Key("configName", types.String(), default=None, help='configuration name'), keys.Key("voltageName", types.String(), default=None, help='voltage name'), keys.Key("voltage", types.Float(), default=None, help='voltage'), ) self.backend = 'hxhal' self.rampConfig = None if self.actor.instrument == "CHARIS": self.dataRoot = "/home/data/charis" self.dataPrefix = "CRSA" filenameFunc = None else: self.dataRoot = "/data/pfsx" self.dataPrefix = "PFJA" def filenameFunc(dataRoot, seqno): # Write the full ramp fileName = self.actor.spectroIds.makeFitsName(visit=seqno, fileType='B') return os.path.join(dataRoot, fileName), from hxActor.charis import seqPath self.fileGenerator = seqPath.NightFilenameGen(self.dataRoot, namesFunc = filenameFunc, filePrefix=self.dataPrefix) @property def controller(self): return self.actor.controllers.get(self.backend, None) @property def sam(self): ctrlr = self.actor.controllers.get(self.backend, None) return ctrlr.sam def bounce(self, cmd): self.controller.disconnect() def hxconfig(self, cmd): """Set the given hxhal configuration. """ if self.backend is not 'hxhal' or self.controller is None: cmd.fail('text="No hxhal controller"') return cmdKeys = cmd.cmd.keywords configName = cmdKeys['configName'].values[0] sam = self.sam try: configGroup, configName = configName.split('.') except: configGroup = 'h4rgConfig' if self.actor.instrument == 'PFS' else 'h2rgConfig' sam.updateHxRgConfigParameters(configGroup, configName) cmd.finish() def setVoltage(self, cmd): """Set a songle Hx bias voltage. """ if self.backend is not 'hxhal' or self.controller is None: cmd.fail('text="No hxhal controller"') return cmdKeys = cmd.cmd.keywords voltageName = cmdKeys['voltageName'].values[0] voltage = cmdKeys['voltage'].values[0] sam = self.sam try: newVoltage = sam.setBiasVoltage(voltageName, voltage) except Exception as e: cmd.fail('text="Failed to set voltage %s=%s: %s"' % (voltageName, voltage, e)) cmd.finish() def hxRaw(self, cmd): """ Tunnel a rawCmd command to the HX program. """ cmdKeys = cmd.cmd.keywords ctrl = self.controller rawCmd = cmdKeys['raw'].values[0] cmd.fail('text="not implemented"') def _calcAcquireTimeout(self, expType='ramp', cmd=None): """ Return the best estimate of the actual expected time for our current rampConfig. """ frameTime = self.sam.frameTime if expType == 'ramp': return frameTime * (self.rampConfig['nread'] + self.rampConfig['nreset'] + self.rampConfig['ndrop']) elif expType == 'single': return frameTime * (1 + self.rampConfig['nreset']) elif expType == 'CDS': return frameTime * (2 + self.rampConfig['nreset']) else: raise RuntimeError("unknown expType %s" % (expType)) def flushProgramInput(self, cmd, doFinish=True): debris = '' while True: try: ret = self.controller.getOneChar(timeout=0.2) debris = debris + ret except RuntimeError: break except: raise if debris != '': cmd.warn('text="flushed stray input: %r"' % (debris)) if doFinish: cmd.finish() def getSubaruHeader(self, frameId, timeout=1.0, fullHeader=True, exptype='TEST', cmd=None): itime = self.sam.frameTime if exptype.lower() == 'nohdr': return pyfits.Header() headerTask = subaru.FetchHeader(fullHeader=True, frameId=frameId, itime=itime, exptype=exptype) self.logger.debug('text="starting header task timeout=%s frameId=%s"' % (timeout, frameId)) headerTask.start() headerQ = headerTask.q self.logger.info('text="header q: %s"' % (headerQ)) try: hdrString = headerQ.get(True, timeout) if hdrString is None: self.logger.debug('text=".get header: %s"' % (hdrString)) else: self.logger.debug('text=".get header: %s"' % (len(hdrString))) except Exception as e: self.logger.warn('text="failed to .getHeader header: %s"' % (e)) cmd.warn('text="failed to .getHeader header: %s"' % (e)) hdrString = '' finally: headerTask.terminate() time.sleep(0.1) hdr = pyfits.Header.fromstring(hdrString) return hdr def getHeader(self, frameId, fullHeader=True, exptype='TEST', objname='TEST', timeout=1.0, cmd=None): try: hdr = self.getSubaruHeader(frameId, fullHeader=fullHeader, exptype=exptype, timeout=timeout, cmd=cmd) except Exception as e: self.logger.warn('text="failed to fetch Subaru header: %s"' % (e)) cmd.warn('text="failed to fetch Subaru header: %s"' % (e)) hdr = pyfits.Header() hdr.set('OBJECT', objname, before=1) hxCards = self.getHxCards(cmd) for c in hxCards: hdr.append(c) scexaoCards = self.getSCExAOCards(cmd) for c in scexaoCards: hdr.append(c) charisCards = self.getCharisCards(cmd) for c in charisCards: hdr.append(c) return hdr def getHxCards(self, cmd=None): # voltageList = self.controller.getAllBiasVoltages return [] def getVoltages(self, cmd): ret = self.sam.getAllBiasVoltages() for nv in ret: name, voltage = nv cmd.inform('%s=%0.3f' % (name, voltage)) cmd.finish() def takeRamp(self, cmd): """Main exposure entry point. """ cmdKeys = cmd.cmd.keywords nramp = cmdKeys['nramp'].values[0] if ('nramp' in cmdKeys) else 1 nreset = cmdKeys['nreset'].values[0] if ('nreset' in cmdKeys) else 1 nread = cmdKeys['nread'].values[0] if ('nread' in cmdKeys) else 1 ndrop = cmdKeys['ndrop'].values[0] if ('ndrop' in cmdKeys) else 0 ngroup = cmdKeys['ngroup'].values[0] if ('ngroup' in cmdKeys) else 1 itime = cmdKeys['itime'].values[0] if ('itime' in cmdKeys) else None seqno = cmdKeys['seqno'].values[0] if ('seqno' in cmdKeys) else None exptype = cmdKeys['exptype'].values[0] if ('exptype' in cmdKeys) else 'TEST' objname = cmdKeys['objname'].values[0] if ('objname' in cmdKeys) else 'TEST' cmd.diag('text="ramps=%s resets=%s reads=%s rdrops=%s rgroups=%s itime=%s seqno=%s exptype=%s"' % (nramp, nreset, nread, ndrop, ngroup, itime, seqno, exptype)) if itime is not None: if 'nread' in cmdKeys: cmd.fail('text="cannot specify both nread= and itime="') return nread = int(itime / self.sam.frameTime) + 1 dosplit = 'splitRamps' in cmdKeys nrampCmds = nramp if dosplit else 1 if nread <= 0 or nramp <= 0 or ngroup <= 0: cmd.fail('text="all of nramp,ngroup,(nread or itime) must be positive"') return cmd.inform('text="configuring ramp..."') cmd.inform('ramp=%d,%d,%d,%d,%d' % (nramp,ngroup,nreset,nread,ndrop)) if self.backend == 'hxhal': t0 = time.time() sam = self.sam sam.fileGenerator = self.fileGenerator def readCB(ramp, group, read, filename, image): cmd.inform('hxread=%s,%d,%d,%d' % (filename, ramp, group, read)) if nread == read: cmd.inform('filename=%s' % (filename)) def headerCB(ramp, group, read, seqno): if self.actor.instrument == 'CHARIS': hdr = self.getCharisHeader(seqno=seqno, fullHeader=(read == 1), cmd=cmd) return hdr.cards elif self.actor.instrument == 'PFS': return [] else: raise RuntimeError(f'actor.instrument is not a known device: {self.actor.instrument}') def pfsHeaderCB(ramp, group, read, seqno): return [] filenames = sam.takeRamp(nResets=nreset, nReads=nread, noReturn=True, nRamps=nramp, seqno=seqno, exptype=exptype, headerCallback=pfsHeaderCB, readCallback=readCB) else: self.flushProgramInput(cmd, doFinish=False) ctrlr = self.controller ret = ctrlr.sendOneCommand('setRampParam(%d,%d,%d,%d,%d)' % (nreset,nread,ngroup,ndrop,(1 if dosplit else nramp)), cmd=cmd) if ret != '0:succeeded': cmd.fail('text="failed to configure for ramp: %s"' % (ret)) return self.winGetconfig(cmd, doFinish=False) timeout = self._calcAcquireTimeout(expType='ramp') if not dosplit: timeout *= nramp timeout += 10 t0 = time.time() for r_i in range(nrampCmds): cmd.inform('text="acquireramp command %d of %d"' % (r_i+1, nrampCmds)) ctrlr.sendOneCommand('acquireramp', cmd=cmd, timeout=timeout, noResponse=True) self._consumeRamps((1 if dosplit else nramp), ngroup,nreset,nread,ndrop, cmd=cmd) ret = ctrlr.getOneResponse(cmd=cmd) if ret != '0:Ramp acquisition succeeded': cmd.fail('text="IDL gave unexpected response at end of ramp: %s"' % (ret)) return t1 = time.time() dt = t1-t0 cmd.finish('text="%d ramps, elapsed=%0.3f, perRamp=%0.3f, perRead=%0.3f"' % (nramp, dt, dt/nramp, dt/(nramp*(nread+nreset+ndrop)))) def reloadLogic(self, cmd): self.sam.reloadLogic() cmd.finish()
CraigLoomis/ics_hxActor
python/hxActor/Commands/HxCmd.py
Python
mit
14,461
[ "VisIt" ]
1762f3b0798c939e450201f518c8eb121bcf617b298d90e6c0cb0361fdebd769
""" Handling of the local glacier map and masks. Defines the first tasks to be realized by any OGGM pre-processing workflow. """ # Built ins import os import logging import warnings from distutils.version import LooseVersion from functools import partial # External libs import numpy as np import shapely.ops import pandas as pd import xarray as xr import shapely.geometry as shpg import scipy.signal from scipy.ndimage.measurements import label from scipy.ndimage import binary_erosion from scipy.ndimage.morphology import distance_transform_edt from scipy.interpolate import griddata from scipy import optimize as optimization # Optional libs try: import salem from salem.gis import transform_proj except ImportError: pass try: import pyproj except ImportError: pass try: import geopandas as gpd except ImportError: pass try: import skimage.draw as skdraw except ImportError: pass try: import rasterio from rasterio.warp import reproject, Resampling from rasterio.mask import mask as riomask try: # rasterio V > 1.0 from rasterio.merge import merge as merge_tool except ImportError: from rasterio.tools.merge import merge as merge_tool except ImportError: pass # Locals from oggm import entity_task, utils import oggm.cfg as cfg from oggm.exceptions import (InvalidParamsError, InvalidGeometryError, InvalidDEMError, GeometryError, InvalidWorkflowError) from oggm.utils import (tuple2int, get_topo_file, is_dem_source_available, nicenumber, ncDataset, tolist) # Module logger log = logging.getLogger(__name__) # Needed later label_struct = np.ones((3, 3)) def _parse_source_text(): fp = os.path.join(os.path.abspath(os.path.dirname(cfg.__file__)), 'data', 'dem_sources.txt') out = dict() cur_key = None with open(fp, 'r', encoding='utf-8') as fr: this_text = [] for l in fr.readlines(): l = l.strip() if l and (l[0] == '[' and l[-1] == ']'): if cur_key: out[cur_key] = '\n'.join(this_text) this_text = [] cur_key = l.strip('[]') continue this_text.append(l) out[cur_key] = '\n'.join(this_text) return out DEM_SOURCE_INFO = _parse_source_text() def gaussian_blur(in_array, size): """Applies a Gaussian filter to a 2d array. Parameters ---------- in_array : numpy.array The array to smooth. size : int The half size of the smoothing window. Returns ------- a smoothed numpy.array """ # expand in_array to fit edge of kernel padded_array = np.pad(in_array, size, 'symmetric') # build kernel x, y = np.mgrid[-size:size + 1, -size:size + 1] g = np.exp(-(x**2 / float(size) + y**2 / float(size))) g = (g / g.sum()).astype(in_array.dtype) # do the Gaussian blur return scipy.signal.fftconvolve(padded_array, g, mode='valid') def _interp_polygon(polygon, dx): """Interpolates an irregular polygon to a regular step dx. Interior geometries are also interpolated if they are longer then 3*dx, otherwise they are ignored. Parameters ---------- polygon: The shapely.geometry.Polygon instance to interpolate dx : the step (float) Returns ------- an interpolated shapely.geometry.Polygon class instance. """ # remove last (duplex) point to build a LineString from the LinearRing line = shpg.LineString(np.asarray(polygon.exterior.xy).T) e_line = [] for distance in np.arange(0.0, line.length, dx): e_line.append(*line.interpolate(distance).coords) e_line = shpg.LinearRing(e_line) i_lines = [] for ipoly in polygon.interiors: line = shpg.LineString(np.asarray(ipoly.xy).T) if line.length < 3*dx: continue i_points = [] for distance in np.arange(0.0, line.length, dx): i_points.append(*line.interpolate(distance).coords) i_lines.append(shpg.LinearRing(i_points)) return shpg.Polygon(e_line, i_lines) def _polygon_to_pix(polygon): """Transforms polygon coordinates to integer pixel coordinates. It makes the geometry easier to handle and reduces the number of points. Parameters ---------- polygon: the shapely.geometry.Polygon instance to transform. Returns ------- a shapely.geometry.Polygon class instance. """ def project(x, y): return np.rint(x).astype(np.int64), np.rint(y).astype(np.int64) def project_coarse(x, y, c=2): return ((np.rint(x/c)*c).astype(np.int64), (np.rint(y/c)*c).astype(np.int64)) poly_pix = shapely.ops.transform(project, polygon) # simple trick to correct invalid polys: tmp = poly_pix.buffer(0) # try to deal with a bug in buffer where the corrected poly would be null c = 2 while tmp.length == 0 and c < 7: project = partial(project_coarse, c=c) poly_pix = shapely.ops.transform(project_coarse, polygon) tmp = poly_pix.buffer(0) c += 1 # We tried all we could if tmp.length == 0: raise InvalidGeometryError('This glacier geometry is not valid for ' 'OGGM.') # sometimes the glacier gets cut out in parts if tmp.type == 'MultiPolygon': # If only small arms are cut out, remove them area = np.array([_tmp.area for _tmp in tmp.geoms]) _tokeep = np.argmax(area).item() tmp = tmp.geoms[_tokeep] # check that the other parts really are small, # otherwise replace tmp with something better area = area / area[_tokeep] for _a in area: if _a != 1 and _a > 0.05: # these are extremely thin glaciers # eg. RGI40-11.01381 RGI40-11.01697 params.d1 = 5. and d2 = 8. # make them bigger until its ok for b in np.arange(0., 1., 0.01): tmp = shapely.ops.transform(project, polygon.buffer(b)) tmp = tmp.buffer(0) if tmp.type == 'MultiPolygon': continue if tmp.is_valid: break if b == 0.99: raise InvalidGeometryError('This glacier geometry is not ' 'valid for OGGM.') if not tmp.is_valid: raise InvalidGeometryError('This glacier geometry is not valid ' 'for OGGM.') return tmp def glacier_grid_params(gdir): """Define the glacier grid map based on the user params.""" # Get the local map proj params and glacier extent gdf = gdir.read_shapefile('outlines') # Get the map proj utm_proj = salem.check_crs(gdf.crs) # Get glacier extent xx, yy = gdf.iloc[0]['geometry'].exterior.xy # Define glacier area to use area = gdir.rgi_area_km2 # Choose a spatial resolution with respect to the glacier area dxmethod = cfg.PARAMS['grid_dx_method'] if dxmethod == 'linear': dx = np.rint(cfg.PARAMS['d1'] * area + cfg.PARAMS['d2']) elif dxmethod == 'square': dx = np.rint(cfg.PARAMS['d1'] * np.sqrt(area) + cfg.PARAMS['d2']) elif dxmethod == 'fixed': dx = np.rint(cfg.PARAMS['fixed_dx']) else: raise InvalidParamsError('grid_dx_method not supported: {}' .format(dxmethod)) # Additional trick for varying dx if dxmethod in ['linear', 'square']: dx = utils.clip_scalar(dx, cfg.PARAMS['d2'], cfg.PARAMS['dmax']) log.debug('(%s) area %.2f km, dx=%.1f', gdir.rgi_id, area, dx) # Safety check border = cfg.PARAMS['border'] if border > 1000: raise InvalidParamsError("You have set a cfg.PARAMS['border'] value " "of {}. ".format(cfg.PARAMS['border']) + 'This a very large value, which is ' 'currently not supported in OGGM.') # For tidewater glaciers we force border to 10 if gdir.is_tidewater and cfg.PARAMS['clip_tidewater_border']: border = 10 # Corners, incl. a buffer of N pix ulx = np.min(xx) - border * dx lrx = np.max(xx) + border * dx uly = np.max(yy) + border * dx lry = np.min(yy) - border * dx # n pixels nx = int((lrx - ulx) / dx) ny = int((uly - lry) / dx) return utm_proj, nx, ny, ulx, uly, dx @entity_task(log, writes=['glacier_grid', 'dem', 'outlines']) def define_glacier_region(gdir, entity=None, source=None): """Very first task after initialization: define the glacier's local grid. Defines the local projection (Transverse Mercator), centered on the glacier. There is some options to set the resolution of the local grid. It can be adapted depending on the size of the glacier with:: dx (m) = d1 * AREA (km) + d2 ; clipped to dmax or be set to a fixed value. See ``params.cfg`` for setting these options. Default values of the adapted mode lead to a resolution of 50 m for Hintereisferner, which is approx. 8 km2 large. After defining the grid, the topography and the outlines of the glacier are transformed into the local projection. The default interpolation for the topography is `cubic`. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` where to write the data entity : geopandas.GeoSeries the glacier geometry to process - DEPRECATED. It is now ignored source : str or list of str, optional If you want to force the use of a certain DEM source. Available are: - 'USER' : file set in cfg.PATHS['dem_file'] - 'SRTM' : http://srtm.csi.cgiar.org/ - 'GIMP' : https://bpcrc.osu.edu/gdg/data/gimpdem - 'RAMP' : http://nsidc.org/data/docs/daac/nsidc0082_ramp_dem.gd.html - 'REMA' : https://www.pgc.umn.edu/data/rema/ - 'DEM3' : http://viewfinderpanoramas.org/ - 'ASTER' : https://lpdaac.usgs.gov/products/astgtmv003/ - 'TANDEM' : https://geoservice.dlr.de/web/dataguide/tdm90/ - 'ARCTICDEM' : https://www.pgc.umn.edu/data/arcticdem/ - 'AW3D30' : https://www.eorc.jaxa.jp/ALOS/en/aw3d30 - 'MAPZEN' : https://registry.opendata.aws/terrain-tiles/ - 'ALASKA' : https://www.the-cryosphere.net/8/503/2014/ - 'COPDEM30' : Copernicus DEM GLO-30 https://bit.ly/2T98qqs - 'COPDEM90' : Copernicus DEM GLO-90 https://bit.ly/2T98qqs - 'NASADEM': https://lpdaac.usgs.gov/products/nasadem_hgtv001/ """ utm_proj, nx, ny, ulx, uly, dx = glacier_grid_params(gdir) # Back to lon, lat for DEM download/preparation tmp_grid = salem.Grid(proj=utm_proj, nxny=(nx, ny), x0y0=(ulx, uly), dxdy=(dx, -dx), pixel_ref='corner') minlon, maxlon, minlat, maxlat = tmp_grid.extent_in_crs(crs=salem.wgs84) # Open DEM # We test DEM availability for glacier only (maps can grow big) if not is_dem_source_available(source, *gdir.extent_ll): log.warning('Source: {} may not be available for glacier {} with ' 'border {}'.format(source, gdir.rgi_id, cfg.PARAMS['border'])) dem_list, dem_source = get_topo_file((minlon, maxlon), (minlat, maxlat), rgi_id=gdir.rgi_id, dx_meter=dx, source=source) log.debug('(%s) DEM source: %s', gdir.rgi_id, dem_source) log.debug('(%s) N DEM Files: %s', gdir.rgi_id, len(dem_list)) # Decide how to tag nodata def _get_nodata(rio_ds): nodata = rio_ds[0].meta.get('nodata', None) if nodata is None: # badly tagged geotiffs, let's do it ourselves nodata = -32767 if source == 'TANDEM' else -9999 return nodata # A glacier area can cover more than one tile: if len(dem_list) == 1: dem_dss = [rasterio.open(dem_list[0])] # if one tile, just open it dem_data = rasterio.band(dem_dss[0], 1) if LooseVersion(rasterio.__version__) >= LooseVersion('1.0'): src_transform = dem_dss[0].transform else: src_transform = dem_dss[0].affine nodata = _get_nodata(dem_dss) else: dem_dss = [rasterio.open(s) for s in dem_list] # list of rasters nodata = _get_nodata(dem_dss) dem_data, src_transform = merge_tool(dem_dss, nodata=nodata) # merge # Use Grid properties to create a transform (see rasterio cookbook) dst_transform = rasterio.transform.from_origin( ulx, uly, dx, dx # sign change (2nd dx) is done by rasterio.transform ) # Set up profile for writing output profile = dem_dss[0].profile profile.update({ 'crs': utm_proj.srs, 'transform': dst_transform, 'nodata': nodata, 'width': nx, 'height': ny, 'driver': 'GTiff' }) # Could be extended so that the cfg file takes all Resampling.* methods if cfg.PARAMS['topo_interp'] == 'bilinear': resampling = Resampling.bilinear elif cfg.PARAMS['topo_interp'] == 'cubic': resampling = Resampling.cubic else: raise InvalidParamsError('{} interpolation not understood' .format(cfg.PARAMS['topo_interp'])) dem_reproj = gdir.get_filepath('dem') profile.pop('blockxsize', None) profile.pop('blockysize', None) profile.pop('compress', None) with rasterio.open(dem_reproj, 'w', **profile) as dest: dst_array = np.empty((ny, nx), dtype=dem_dss[0].dtypes[0]) reproject( # Source parameters source=dem_data, src_crs=dem_dss[0].crs, src_transform=src_transform, src_nodata=nodata, # Destination parameters destination=dst_array, dst_transform=dst_transform, dst_crs=utm_proj.srs, dst_nodata=nodata, # Configuration resampling=resampling) dest.write(dst_array, 1) for dem_ds in dem_dss: dem_ds.close() # Glacier grid x0y0 = (ulx+dx/2, uly-dx/2) # To pixel center coordinates glacier_grid = salem.Grid(proj=utm_proj, nxny=(nx, ny), dxdy=(dx, -dx), x0y0=x0y0) glacier_grid.to_json(gdir.get_filepath('glacier_grid')) # Write DEM source info gdir.add_to_diagnostics('dem_source', dem_source) source_txt = DEM_SOURCE_INFO.get(dem_source, dem_source) with open(gdir.get_filepath('dem_source'), 'w') as fw: fw.write(source_txt) fw.write('\n\n') fw.write('# Data files\n\n') for fname in dem_list: fw.write('{}\n'.format(os.path.basename(fname))) def rasterio_to_gdir(gdir, input_file, output_file_name, resampling='cubic'): """Reprojects a file that rasterio can read into the glacier directory. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` the glacier directory input_file : str path to the file to reproject output_file_name : str name of the output file (must be in cfg.BASENAMES) resampling : str nearest', 'bilinear', 'cubic', 'cubic_spline', or one of https://rasterio.readthedocs.io/en/latest/topics/resampling.html """ output_file = gdir.get_filepath(output_file_name) assert '.tif' in output_file, 'output_file should end with .tif' if not gdir.has_file('dem'): raise InvalidWorkflowError('Need a dem.tif file to reproject to') with rasterio.open(input_file) as src: kwargs = src.meta.copy() data = src.read(1) with rasterio.open(gdir.get_filepath('dem')) as tpl: kwargs.update({ 'crs': tpl.crs, 'transform': tpl.transform, 'width': tpl.width, 'height': tpl.height }) with rasterio.open(output_file, 'w', **kwargs) as dst: for i in range(1, src.count + 1): dest = np.zeros(shape=(tpl.height, tpl.width), dtype=data.dtype) reproject( source=rasterio.band(src, i), destination=dest, src_transform=src.transform, src_crs=src.crs, dst_transform=tpl.transform, dst_crs=tpl.crs, resampling=getattr(Resampling, resampling) ) dst.write(dest, indexes=i) def read_geotiff_dem(gdir): """Reads (and masks out) the DEM out of the gdir's geotiff file. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` the glacier directory Returns ------- 2D np.float32 array """ with rasterio.open(gdir.get_filepath('dem'), 'r', driver='GTiff') as ds: topo = ds.read(1).astype(rasterio.float32) topo[topo <= -999.] = np.NaN topo[ds.read_masks(1) == 0] = np.NaN return topo class GriddedNcdfFile(object): """Creates or opens a gridded netcdf file template. The other variables have to be created and filled by the calling routine. """ def __init__(self, gdir, basename='gridded_data', reset=False): self.fpath = gdir.get_filepath(basename) self.grid = gdir.grid if reset and os.path.exists(self.fpath): os.remove(self.fpath) def __enter__(self): if os.path.exists(self.fpath): # Already there - just append self.nc = ncDataset(self.fpath, 'a', format='NETCDF4') return self.nc # Create and fill nc = ncDataset(self.fpath, 'w', format='NETCDF4') nc.createDimension('x', self.grid.nx) nc.createDimension('y', self.grid.ny) nc.author = 'OGGM' nc.author_info = 'Open Global Glacier Model' nc.pyproj_srs = self.grid.proj.srs x = self.grid.x0 + np.arange(self.grid.nx) * self.grid.dx y = self.grid.y0 + np.arange(self.grid.ny) * self.grid.dy v = nc.createVariable('x', 'f4', ('x',), zlib=True) v.units = 'm' v.long_name = 'x coordinate of projection' v.standard_name = 'projection_x_coordinate' v[:] = x v = nc.createVariable('y', 'f4', ('y',), zlib=True) v.units = 'm' v.long_name = 'y coordinate of projection' v.standard_name = 'projection_y_coordinate' v[:] = y self.nc = nc return nc def __exit__(self, exc_type, exc_value, exc_traceback): self.nc.close() @entity_task(log, writes=['gridded_data']) def process_dem(gdir): """Reads the DEM from the tiff, attempts to fill voids and apply smooth. The data is then written to `gridded_data.nc`. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` where to write the data """ # open srtm tif-file: dem = read_geotiff_dem(gdir) # Grid nx = gdir.grid.nx ny = gdir.grid.ny # Correct the DEM valid_mask = np.isfinite(dem) if np.all(~valid_mask): raise InvalidDEMError('Not a single valid grid point in DEM') if np.any(~valid_mask): # We interpolate if np.sum(~valid_mask) > (0.25 * nx * ny): log.info('({}) more than 25% NaNs in DEM'.format(gdir.rgi_id)) xx, yy = gdir.grid.ij_coordinates pnan = np.nonzero(~valid_mask) pok = np.nonzero(valid_mask) points = np.array((np.ravel(yy[pok]), np.ravel(xx[pok]))).T inter = np.array((np.ravel(yy[pnan]), np.ravel(xx[pnan]))).T try: dem[pnan] = griddata(points, np.ravel(dem[pok]), inter, method='linear') except ValueError: raise InvalidDEMError('DEM interpolation not possible.') log.info(gdir.rgi_id + ': DEM needed interpolation.') gdir.add_to_diagnostics('dem_needed_interpolation', True) gdir.add_to_diagnostics('dem_invalid_perc', len(pnan[0]) / (nx * ny)) isfinite = np.isfinite(dem) if np.any(~isfinite): # interpolation will still leave NaNs in DEM: # extrapolate with NN if needed (e.g. coastal areas) xx, yy = gdir.grid.ij_coordinates pnan = np.nonzero(~isfinite) pok = np.nonzero(isfinite) points = np.array((np.ravel(yy[pok]), np.ravel(xx[pok]))).T inter = np.array((np.ravel(yy[pnan]), np.ravel(xx[pnan]))).T try: dem[pnan] = griddata(points, np.ravel(dem[pok]), inter, method='nearest') except ValueError: raise InvalidDEMError('DEM extrapolation not possible.') log.info(gdir.rgi_id + ': DEM needed extrapolation.') gdir.add_to_diagnostics('dem_needed_extrapolation', True) gdir.add_to_diagnostics('dem_extrapol_perc', len(pnan[0]) / (nx * ny)) if np.min(dem) == np.max(dem): raise InvalidDEMError('({}) min equal max in the DEM.' .format(gdir.rgi_id)) # Clip topography to 0 m a.s.l. utils.clip_min(dem, 0, out=dem) # Smooth DEM? if cfg.PARAMS['smooth_window'] > 0.: gsize = np.rint(cfg.PARAMS['smooth_window'] / gdir.grid.dx) smoothed_dem = gaussian_blur(dem, int(gsize)) else: smoothed_dem = dem.copy() # Clip topography to 0 m a.s.l. utils.clip_min(smoothed_dem, 0, out=smoothed_dem) # Write to file with GriddedNcdfFile(gdir, reset=True) as nc: v = nc.createVariable('topo', 'f4', ('y', 'x',), zlib=True) v.units = 'm' v.long_name = 'DEM topography' v[:] = dem v = nc.createVariable('topo_smoothed', 'f4', ('y', 'x',), zlib=True) v.units = 'm' v.long_name = ('DEM topography smoothed with radius: ' '{:.1} m'.format(cfg.PARAMS['smooth_window'])) v[:] = smoothed_dem # If there was some invalid data store this as well v = nc.createVariable('topo_valid_mask', 'i1', ('y', 'x',), zlib=True) v.units = '-' v.long_name = 'DEM validity mask according to geotiff input (1-0)' v[:] = valid_mask.astype(int) # add some meta stats and close nc.max_h_dem = np.nanmax(dem) nc.min_h_dem = np.nanmin(dem) @entity_task(log, writes=['gridded_data', 'geometries']) def glacier_masks(gdir): """Makes a gridded mask of the glacier outlines that can be used by OGGM. For a more robust solution (not OGGM compatible) see simple_glacier_masks. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` where to write the data """ # In case nominal, just raise if gdir.is_nominal: raise GeometryError('{} is a nominal glacier.'.format(gdir.rgi_id)) if not os.path.exists(gdir.get_filepath('gridded_data')): # In a possible future, we might actually want to raise a # deprecation warning here process_dem(gdir) # Geometries geometry = gdir.read_shapefile('outlines').geometry[0] # Interpolate shape to a regular path glacier_poly_hr = _interp_polygon(geometry, gdir.grid.dx) # Transform geometry into grid coordinates # It has to be in pix center coordinates because of how skimage works def proj(x, y): grid = gdir.grid.center_grid return grid.transform(x, y, crs=grid.proj) glacier_poly_hr = shapely.ops.transform(proj, glacier_poly_hr) # simple trick to correct invalid polys: # http://stackoverflow.com/questions/20833344/ # fix-invalid-polygon-python-shapely glacier_poly_hr = glacier_poly_hr.buffer(0) if not glacier_poly_hr.is_valid: raise InvalidGeometryError('This glacier geometry is not valid.') # Rounded nearest pix glacier_poly_pix = _polygon_to_pix(glacier_poly_hr) if glacier_poly_pix.exterior is None: raise InvalidGeometryError('Problem in converting glacier geometry ' 'to grid resolution.') # Compute the glacier mask (currently: center pixels + touched) nx, ny = gdir.grid.nx, gdir.grid.ny glacier_mask = np.zeros((ny, nx), dtype=np.uint8) glacier_ext = np.zeros((ny, nx), dtype=np.uint8) (x, y) = glacier_poly_pix.exterior.xy glacier_mask[skdraw.polygon(np.array(y), np.array(x))] = 1 for gint in glacier_poly_pix.interiors: x, y = tuple2int(gint.xy) glacier_mask[skdraw.polygon(y, x)] = 0 glacier_mask[y, x] = 0 # on the nunataks, no x, y = tuple2int(glacier_poly_pix.exterior.xy) glacier_mask[y, x] = 1 glacier_ext[y, x] = 1 # Because of the 0 values at nunataks boundaries, some "Ice Islands" # can happen within nunataks (e.g.: RGI40-11.00062) # See if we can filter them out easily regions, nregions = label(glacier_mask, structure=label_struct) if nregions > 1: log.debug('(%s) we had to cut an island in the mask', gdir.rgi_id) # Check the size of those region_sizes = [np.sum(regions == r) for r in np.arange(1, nregions+1)] am = np.argmax(region_sizes) # Check not a strange glacier sr = region_sizes.pop(am) for ss in region_sizes: assert (ss / sr) < 0.1 glacier_mask[:] = 0 glacier_mask[np.where(regions == (am+1))] = 1 # Write geometries geometries = dict() geometries['polygon_hr'] = glacier_poly_hr geometries['polygon_pix'] = glacier_poly_pix geometries['polygon_area'] = geometry.area gdir.write_pickle(geometries, 'geometries') # write out the grids in the netcdf file with GriddedNcdfFile(gdir) as nc: if 'glacier_mask' not in nc.variables: v = nc.createVariable('glacier_mask', 'i1', ('y', 'x', ), zlib=True) v.units = '-' v.long_name = 'Glacier mask' else: v = nc.variables['glacier_mask'] v[:] = glacier_mask if 'glacier_ext' not in nc.variables: v = nc.createVariable('glacier_ext', 'i1', ('y', 'x', ), zlib=True) v.units = '-' v.long_name = 'Glacier external boundaries' else: v = nc.variables['glacier_ext'] v[:] = glacier_ext dem = nc.variables['topo'][:] if 'topo_valid_mask' not in nc.variables: msg = ('You seem to be running from old preprocessed directories. ' 'See https://github.com/OGGM/oggm/issues/1095 for a fix.') raise InvalidWorkflowError(msg) valid_mask = nc.variables['topo_valid_mask'][:] # Last sanity check based on the masked dem tmp_max = np.max(dem[np.where(glacier_mask == 1)]) tmp_min = np.min(dem[np.where(glacier_mask == 1)]) if tmp_max < (tmp_min + 1): raise InvalidDEMError('({}) min equal max in the masked DEM.' .format(gdir.rgi_id)) # Log DEM that needed processing within the glacier mask if gdir.get_diagnostics().get('dem_needed_interpolation', False): pnan = (valid_mask == 0) & glacier_mask gdir.add_to_diagnostics('dem_invalid_perc_in_mask', np.sum(pnan) / np.sum(glacier_mask)) # add some meta stats and close dem_on_g = dem[np.where(glacier_mask)] nc.max_h_glacier = np.nanmax(dem_on_g) nc.min_h_glacier = np.nanmin(dem_on_g) @entity_task(log, writes=['gridded_data', 'hypsometry']) def simple_glacier_masks(gdir, write_hypsometry=False): """Compute glacier masks based on much simpler rules than OGGM's default. This is therefore more robust: we use this function to compute glacier hypsometries. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` where to write the data write_hypsometry : bool whether to write out the hypsometry file or not - it is used by e.g, rgitools """ # In case nominal, just raise if gdir.is_nominal: raise GeometryError('{} is a nominal glacier.'.format(gdir.rgi_id)) if not os.path.exists(gdir.get_filepath('gridded_data')): # In a possible future, we might actually want to raise a # deprecation warning here process_dem(gdir) # Geometries geometry = gdir.read_shapefile('outlines').geometry[0] # rio metadata with rasterio.open(gdir.get_filepath('dem'), 'r', driver='GTiff') as ds: data = ds.read(1).astype(rasterio.float32) profile = ds.profile # simple trick to correct invalid polys: # http://stackoverflow.com/questions/20833344/ # fix-invalid-polygon-python-shapely geometry = geometry.buffer(0) if not geometry.is_valid: raise InvalidDEMError('This glacier geometry is not valid.') # Compute the glacier mask using rasterio # Small detour as mask only accepts DataReader objects profile['dtype'] = 'int16' profile.pop('nodata', None) with rasterio.io.MemoryFile() as memfile: with memfile.open(**profile) as dataset: dataset.write(data.astype(np.int16)[np.newaxis, ...]) dem_data = rasterio.open(memfile.name) masked_dem, _ = riomask(dem_data, [shpg.mapping(geometry)], filled=False) glacier_mask = ~masked_dem[0, ...].mask # Same without nunataks with rasterio.io.MemoryFile() as memfile: with memfile.open(**profile) as dataset: dataset.write(data.astype(np.int16)[np.newaxis, ...]) dem_data = rasterio.open(memfile.name) poly = shpg.mapping(shpg.Polygon(geometry.exterior)) masked_dem, _ = riomask(dem_data, [poly], filled=False) glacier_mask_nonuna = ~masked_dem[0, ...].mask # Glacier exterior excluding nunataks erode = binary_erosion(glacier_mask_nonuna) glacier_ext = glacier_mask_nonuna ^ erode glacier_ext = np.where(glacier_mask_nonuna, glacier_ext, 0) dem = read_geotiff_dem(gdir) # Last sanity check based on the masked dem tmp_max = np.nanmax(dem[glacier_mask]) tmp_min = np.nanmin(dem[glacier_mask]) if tmp_max < (tmp_min + 1): raise InvalidDEMError('({}) min equal max in the masked DEM.' .format(gdir.rgi_id)) # write out the grids in the netcdf file with GriddedNcdfFile(gdir) as nc: if 'glacier_mask' not in nc.variables: v = nc.createVariable('glacier_mask', 'i1', ('y', 'x', ), zlib=True) v.units = '-' v.long_name = 'Glacier mask' else: v = nc.variables['glacier_mask'] v[:] = glacier_mask if 'glacier_ext' not in nc.variables: v = nc.createVariable('glacier_ext', 'i1', ('y', 'x', ), zlib=True) v.units = '-' v.long_name = 'Glacier external boundaries' else: v = nc.variables['glacier_ext'] v[:] = glacier_ext # Log DEM that needed processing within the glacier mask if 'topo_valid_mask' not in nc.variables: msg = ('You seem to be running from old preprocessed directories. ' 'See https://github.com/OGGM/oggm/issues/1095 for a fix.') raise InvalidWorkflowError(msg) valid_mask = nc.variables['topo_valid_mask'][:] if gdir.get_diagnostics().get('dem_needed_interpolation', False): pnan = (valid_mask == 0) & glacier_mask gdir.add_to_diagnostics('dem_invalid_perc_in_mask', np.sum(pnan) / np.sum(glacier_mask)) # add some meta stats and close nc.max_h_dem = np.nanmax(dem) nc.min_h_dem = np.nanmin(dem) dem_on_g = dem[np.where(glacier_mask)] nc.max_h_glacier = np.nanmax(dem_on_g) nc.min_h_glacier = np.nanmin(dem_on_g) # Last sanity check if nc.max_h_glacier < (nc.min_h_glacier + 1): raise InvalidDEMError('({}) min equal max in the masked DEM.' .format(gdir.rgi_id)) # hypsometry if asked for if not write_hypsometry: return bsize = 50. dem_on_ice = dem[glacier_mask] bins = np.arange(nicenumber(dem_on_ice.min(), bsize, lower=True), nicenumber(dem_on_ice.max(), bsize) + 0.01, bsize) h, _ = np.histogram(dem_on_ice, bins) h = h / np.sum(h) * 1000 # in permil # We want to convert the bins to ints but preserve their sum to 1000 # Start with everything rounded down, then round up the numbers with the # highest fractional parts until the desired sum is reached. hi = np.floor(h).astype(int) hup = np.ceil(h).astype(int) aso = np.argsort(hup - h) for i in aso: hi[i] = hup[i] if np.sum(hi) == 1000: break # slope sy, sx = np.gradient(dem, gdir.grid.dx) aspect = np.arctan2(np.mean(-sx[glacier_mask]), np.mean(sy[glacier_mask])) aspect = np.rad2deg(aspect) if aspect < 0: aspect += 360 slope = np.arctan(np.sqrt(sx ** 2 + sy ** 2)) avg_slope = np.rad2deg(np.mean(slope[glacier_mask])) # write df = pd.DataFrame() df['RGIId'] = [gdir.rgi_id] df['GLIMSId'] = [gdir.glims_id] df['Zmin'] = [dem_on_ice.min()] df['Zmax'] = [dem_on_ice.max()] df['Zmed'] = [np.median(dem_on_ice)] df['Area'] = [gdir.rgi_area_km2] df['Slope'] = [avg_slope] df['Aspect'] = [aspect] for b, bs in zip(hi, (bins[1:] + bins[:-1])/2): df['{}'.format(np.round(bs).astype(int))] = [b] df.to_csv(gdir.get_filepath('hypsometry'), index=False) @entity_task(log, writes=['glacier_mask']) def rasterio_glacier_mask(gdir, source=None): """Writes a 1-0 glacier mask GeoTiff with the same dimensions as dem.tif Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` the glacier in question source : str - None (default): the task reads `dem.tif` from the GDir root - 'ALL': try to open any folder from `utils.DEM_SOURCE` and use first - any of `utils.DEM_SOURCE`: try only that one """ if source is None: dempath = gdir.get_filepath('dem') elif source in utils.DEM_SOURCES: dempath = os.path.join(gdir.dir, source, 'dem.tif') else: for src in utils.DEM_SOURCES: dempath = os.path.join(gdir.dir, src, 'dem.tif') if os.path.isfile(dempath): break if not os.path.isfile(dempath): raise ValueError('The specified source does not give a valid DEM file') # read dem profile with rasterio.open(dempath, 'r', driver='GTiff') as ds: profile = ds.profile # don't even bother reading the actual DEM, just mimic it data = np.zeros((ds.height, ds.width)) # Read RGI outlines geometry = gdir.read_shapefile('outlines').geometry[0] # simple trick to correct invalid polys: # http://stackoverflow.com/questions/20833344/ # fix-invalid-polygon-python-shapely geometry = geometry.buffer(0) if not geometry.is_valid: raise InvalidDEMError('This glacier geometry is not valid.') # Compute the glacier mask using rasterio # Small detour as mask only accepts DataReader objects with rasterio.io.MemoryFile() as memfile: with memfile.open(**profile) as dataset: dataset.write(data.astype(profile['dtype'])[np.newaxis, ...]) dem_data = rasterio.open(memfile.name) masked_dem, _ = riomask(dem_data, [shpg.mapping(geometry)], filled=False) glacier_mask = ~masked_dem[0, ...].mask # parameters to for the new tif nodata = -32767 dtype = rasterio.int16 # let's use integer out = glacier_mask.astype(dtype) # and check for sanity if not np.all(np.unique(out) == np.array([0, 1])): raise InvalidDEMError('({}) masked DEM does not consist of 0/1 only.' .format(gdir.rgi_id)) # Update existing profile for output profile.update({ 'dtype': dtype, 'nodata': nodata, }) with rasterio.open(gdir.get_filepath('glacier_mask'), 'w', **profile) as r: r.write(out.astype(dtype), 1) @entity_task(log, writes=['gridded_data']) def gridded_attributes(gdir): """Adds attributes to the gridded file, useful for thickness interpolation. This could be useful for distributed ice thickness models. The raster data are added to the gridded_data file. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` where to write the data """ # Variables grids_file = gdir.get_filepath('gridded_data') with ncDataset(grids_file) as nc: topo_smoothed = nc.variables['topo_smoothed'][:] glacier_mask = nc.variables['glacier_mask'][:] # Glacier exterior including nunataks erode = binary_erosion(glacier_mask) glacier_ext = glacier_mask ^ erode glacier_ext = np.where(glacier_mask == 1, glacier_ext, 0) # Intersects between glaciers gdfi = gpd.GeoDataFrame(columns=['geometry']) if gdir.has_file('intersects'): # read and transform to grid gdf = gdir.read_shapefile('intersects') salem.transform_geopandas(gdf, to_crs=gdir.grid, inplace=True) gdfi = pd.concat([gdfi, gdf[['geometry']]]) # Ice divide mask # Probably not the fastest way to do this, but it works dist = np.array([]) jj, ii = np.where(glacier_ext) for j, i in zip(jj, ii): dist = np.append(dist, np.min(gdfi.distance(shpg.Point(i, j)))) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=RuntimeWarning) pok = np.where(dist <= 1) glacier_ext_intersect = glacier_ext * 0 glacier_ext_intersect[jj[pok], ii[pok]] = 1 # Distance from border mask - Scipy does the job dx = gdir.grid.dx dis_from_border = 1 + glacier_ext_intersect - glacier_ext dis_from_border = distance_transform_edt(dis_from_border) * dx # Slope glen_n = cfg.PARAMS['glen_n'] sy, sx = np.gradient(topo_smoothed, dx, dx) slope = np.arctan(np.sqrt(sy**2 + sx**2)) min_slope = np.deg2rad(cfg.PARAMS['distributed_inversion_min_slope']) slope_factor = utils.clip_array(slope, min_slope, np.pi/2) slope_factor = 1 / slope_factor**(glen_n / (glen_n+2)) aspect = np.arctan2(-sx, sy) aspect[aspect < 0] += 2 * np.pi with ncDataset(grids_file, 'a') as nc: vn = 'glacier_ext_erosion' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'i1', ('y', 'x', )) v.units = '-' v.long_name = 'Glacier exterior with binary erosion method' v[:] = glacier_ext vn = 'ice_divides' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'i1', ('y', 'x', )) v.units = '-' v.long_name = 'Glacier ice divides' v[:] = glacier_ext_intersect vn = 'slope' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = 'rad' v.long_name = 'Local slope based on smoothed topography' v[:] = slope vn = 'aspect' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = 'rad' v.long_name = 'Local aspect based on smoothed topography' v[:] = aspect vn = 'slope_factor' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = '-' v.long_name = 'Slope factor as defined in Farinotti et al 2009' v[:] = slope_factor vn = 'dis_from_border' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = 'm' v.long_name = 'Distance from glacier boundaries' v[:] = dis_from_border def _all_inflows(cls, cl): """Find all centerlines flowing into the centerline examined. Parameters ---------- cls : list all centerlines of the examined glacier cline : Centerline centerline to control Returns ------- list of strings of centerlines """ ixs = [str(cls.index(cl.inflows[i])) for i in range(len(cl.inflows))] for cl in cl.inflows: ixs.extend(_all_inflows(cls, cl)) return ixs @entity_task(log) def gridded_mb_attributes(gdir): """Adds mass-balance related attributes to the gridded data file. This could be useful for distributed ice thickness models. The raster data are added to the gridded_data file. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` where to write the data """ from oggm.core.massbalance import LinearMassBalance, ConstantMassBalance from oggm.core.centerlines import line_inflows # Get the input data with ncDataset(gdir.get_filepath('gridded_data')) as nc: topo_2d = nc.variables['topo_smoothed'][:] glacier_mask_2d = nc.variables['glacier_mask'][:] glacier_mask_2d = glacier_mask_2d == 1 catchment_mask_2d = glacier_mask_2d * np.NaN cls = gdir.read_pickle('centerlines') # Catchment areas cis = gdir.read_pickle('geometries')['catchment_indices'] for j, ci in enumerate(cis): catchment_mask_2d[tuple(ci.T)] = j # Make everything we need flat catchment_mask = catchment_mask_2d[glacier_mask_2d].astype(int) topo = topo_2d[glacier_mask_2d] # Prepare the distributed mass-balance data rho = cfg.PARAMS['ice_density'] dx2 = gdir.grid.dx ** 2 # Linear def to_minimize(ela_h): mbmod = LinearMassBalance(ela_h[0]) smb = mbmod.get_annual_mb(heights=topo) return np.sum(smb)**2 ela_h = optimization.minimize(to_minimize, [0.], method='Powell') mbmod = LinearMassBalance(float(ela_h['x'])) lin_mb_on_z = mbmod.get_annual_mb(heights=topo) * cfg.SEC_IN_YEAR * rho if not np.isclose(np.sum(lin_mb_on_z), 0, atol=10): raise RuntimeError('Spec mass-balance should be zero but is: {}' .format(np.sum(lin_mb_on_z))) # Normal OGGM (a bit tweaked) df = gdir.read_json('local_mustar') def to_minimize(mu_star): mbmod = ConstantMassBalance(gdir, mu_star=mu_star, bias=0, check_calib_params=False, y0=df['t_star']) smb = mbmod.get_annual_mb(heights=topo) return np.sum(smb)**2 mu_star = optimization.minimize(to_minimize, [0.], method='Powell') mbmod = ConstantMassBalance(gdir, mu_star=float(mu_star['x']), bias=0, check_calib_params=False, y0=df['t_star']) oggm_mb_on_z = mbmod.get_annual_mb(heights=topo) * cfg.SEC_IN_YEAR * rho if not np.isclose(np.sum(oggm_mb_on_z), 0, atol=10): raise RuntimeError('Spec mass-balance should be zero but is: {}' .format(np.sum(oggm_mb_on_z))) # Altitude based mass balance catch_area_above_z = topo * np.NaN lin_mb_above_z = topo * np.NaN oggm_mb_above_z = topo * np.NaN for i, h in enumerate(topo): catch_area_above_z[i] = np.sum(topo >= h) * dx2 lin_mb_above_z[i] = np.sum(lin_mb_on_z[topo >= h]) * dx2 oggm_mb_above_z[i] = np.sum(oggm_mb_on_z[topo >= h]) * dx2 # Hardest part - MB per catchment catchment_area = topo * np.NaN lin_mb_above_z_on_catch = topo * np.NaN oggm_mb_above_z_on_catch = topo * np.NaN # First, find all inflows indices and min altitude per catchment inflows = [] lowest_h = [] for i, cl in enumerate(cls): lowest_h.append(np.min(topo[catchment_mask == i])) inflows.append([cls.index(l) for l in line_inflows(cl, keep=False)]) for i, (catch_id, h) in enumerate(zip(catchment_mask, topo)): if h == np.min(topo): t = 1 # Find the catchment area of the point itself by eliminating points # below the point altitude. We assume we keep all of them first, # then remove those we don't want sel_catchs = inflows[catch_id].copy() for catch in inflows[catch_id]: if h >= lowest_h[catch]: for cc in np.append(inflows[catch], catch): try: sel_catchs.remove(cc) except ValueError: pass # At the very least we need or own catchment sel_catchs.append(catch_id) # Then select all the catchment points sel_points = np.isin(catchment_mask, sel_catchs) # And keep the ones above our altitude sel_points = sel_points & (topo >= h) # Compute lin_mb_above_z_on_catch[i] = np.sum(lin_mb_on_z[sel_points]) * dx2 oggm_mb_above_z_on_catch[i] = np.sum(oggm_mb_on_z[sel_points]) * dx2 catchment_area[i] = np.sum(sel_points) * dx2 # Make 2D again def _fill_2d_like(data): out = topo_2d * np.NaN out[glacier_mask_2d] = data return out catchment_area = _fill_2d_like(catchment_area) catch_area_above_z = _fill_2d_like(catch_area_above_z) lin_mb_above_z = _fill_2d_like(lin_mb_above_z) oggm_mb_above_z = _fill_2d_like(oggm_mb_above_z) lin_mb_above_z_on_catch = _fill_2d_like(lin_mb_above_z_on_catch) oggm_mb_above_z_on_catch = _fill_2d_like(oggm_mb_above_z_on_catch) # Save to file with ncDataset(gdir.get_filepath('gridded_data'), 'a') as nc: vn = 'catchment_area' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = 'm^2' v.long_name = 'Catchment area above point' v.description = ('This is a very crude method: just the area above ' 'the points elevation on glacier.') v[:] = catch_area_above_z vn = 'catchment_area_on_catch' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x',)) v.units = 'm^2' v.long_name = 'Catchment area above point on flowline catchments' v.description = ('Uses the catchments masks of the flowlines to ' 'compute the area above the altitude of the given ' 'point.') v[:] = catchment_area vn = 'lin_mb_above_z' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = 'kg/year' v.long_name = 'MB above point from linear MB model, without catchments' v.description = ('Mass-balance cumulated above the altitude of the' 'point, hence in unit of flux. Note that it is ' 'a coarse approximation of the real flux. ' 'The mass-balance model is a simple linear function' 'of altitude.') v[:] = lin_mb_above_z vn = 'lin_mb_above_z_on_catch' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = 'kg/year' v.long_name = 'MB above point from linear MB model, with catchments' v.description = ('Mass-balance cumulated above the altitude of the' 'point in a flowline catchment, hence in unit of ' 'flux. Note that it is a coarse approximation of the ' 'real flux. The mass-balance model is a simple ' 'linear function of altitude.') v[:] = lin_mb_above_z_on_catch vn = 'oggm_mb_above_z' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = 'kg/year' v.long_name = 'MB above point from OGGM MB model, without catchments' v.description = ('Mass-balance cumulated above the altitude of the' 'point, hence in unit of flux. Note that it is ' 'a coarse approximation of the real flux. ' 'The mass-balance model is a calibrated temperature ' 'index model like OGGM.') v[:] = oggm_mb_above_z vn = 'oggm_mb_above_z_on_catch' if vn in nc.variables: v = nc.variables[vn] else: v = nc.createVariable(vn, 'f4', ('y', 'x', )) v.units = 'kg/year' v.long_name = 'MB above point from OGGM MB model, with catchments' v.description = ('Mass-balance cumulated above the altitude of the' 'point in a flowline catchment, hence in unit of ' 'flux. Note that it is a coarse approximation of the ' 'real flux. The mass-balance model is a calibrated ' 'temperature index model like OGGM.') v[:] = oggm_mb_above_z_on_catch def merged_glacier_masks(gdir, geometry): """Makes a gridded mask of a merged glacier outlines. This is a simplified version of glacier_masks. We don't need fancy corrections or smoothing here: The flowlines for the actual model run are based on a proper call of glacier_masks. This task is only to get outlines etc. for visualization! Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` where to write the data geometry: shapely.geometry.multipolygon.MultiPolygon united outlines of the merged glaciers """ # open srtm tif-file: dem = read_geotiff_dem(gdir) if np.min(dem) == np.max(dem): raise RuntimeError('({}) min equal max in the DEM.' .format(gdir.rgi_id)) # Clip topography to 0 m a.s.l. utils.clip_min(dem, 0, out=dem) # Interpolate shape to a regular path glacier_poly_hr = tolist(geometry) for nr, poly in enumerate(glacier_poly_hr): # transform geometry to map _geometry = salem.transform_geometry(poly, to_crs=gdir.grid.proj) glacier_poly_hr[nr] = _interp_polygon(_geometry, gdir.grid.dx) glacier_poly_hr = shpg.MultiPolygon(glacier_poly_hr) # Transform geometry into grid coordinates # It has to be in pix center coordinates because of how skimage works def proj(x, y): grid = gdir.grid.center_grid return grid.transform(x, y, crs=grid.proj) glacier_poly_hr = shapely.ops.transform(proj, glacier_poly_hr) # simple trick to correct invalid polys: # http://stackoverflow.com/questions/20833344/ # fix-invalid-polygon-python-shapely glacier_poly_hr = glacier_poly_hr.buffer(0) if not glacier_poly_hr.is_valid: raise RuntimeError('This glacier geometry is not valid.') # Rounded geometry to nearest nearest pix # I can not use _polyg # glacier_poly_pix = _polygon_to_pix(glacier_poly_hr) def project(x, y): return np.rint(x).astype(np.int64), np.rint(y).astype(np.int64) glacier_poly_pix = shapely.ops.transform(project, glacier_poly_hr) glacier_poly_pix_iter = tolist(glacier_poly_pix) # Compute the glacier mask (currently: center pixels + touched) nx, ny = gdir.grid.nx, gdir.grid.ny glacier_mask = np.zeros((ny, nx), dtype=np.uint8) glacier_ext = np.zeros((ny, nx), dtype=np.uint8) for poly in glacier_poly_pix_iter: (x, y) = poly.exterior.xy glacier_mask[skdraw.polygon(np.array(y), np.array(x))] = 1 for gint in poly.interiors: x, y = tuple2int(gint.xy) glacier_mask[skdraw.polygon(y, x)] = 0 glacier_mask[y, x] = 0 # on the nunataks, no x, y = tuple2int(poly.exterior.xy) glacier_mask[y, x] = 1 glacier_ext[y, x] = 1 # Last sanity check based on the masked dem tmp_max = np.max(dem[np.where(glacier_mask == 1)]) tmp_min = np.min(dem[np.where(glacier_mask == 1)]) if tmp_max < (tmp_min + 1): raise RuntimeError('({}) min equal max in the masked DEM.' .format(gdir.rgi_id)) # write out the grids in the netcdf file with GriddedNcdfFile(gdir, reset=True) as nc: v = nc.createVariable('topo', 'f4', ('y', 'x', ), zlib=True) v.units = 'm' v.long_name = 'DEM topography' v[:] = dem v = nc.createVariable('glacier_mask', 'i1', ('y', 'x', ), zlib=True) v.units = '-' v.long_name = 'Glacier mask' v[:] = glacier_mask v = nc.createVariable('glacier_ext', 'i1', ('y', 'x', ), zlib=True) v.units = '-' v.long_name = 'Glacier external boundaries' v[:] = glacier_ext # add some meta stats and close nc.max_h_dem = np.nanmax(dem) nc.min_h_dem = np.nanmin(dem) dem_on_g = dem[np.where(glacier_mask)] nc.max_h_glacier = np.max(dem_on_g) nc.min_h_glacier = np.min(dem_on_g) geometries = dict() geometries['polygon_hr'] = glacier_poly_hr geometries['polygon_pix'] = glacier_poly_pix geometries['polygon_area'] = geometry.area gdir.write_pickle(geometries, 'geometries') @entity_task(log) def gridded_data_var_to_geotiff(gdir, varname, fname=None): """Writes a NetCDF variable to a georeferenced geotiff file. The geotiff file will be written in the gdir directory. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` where to write the data varname : str variable name in gridded_data.nc fname : str output file name (should end with `tif`), default is `varname.tif` """ # Assign the output path if fname is None: fname = varname+'.tif' outpath = os.path.join(gdir.dir, fname) # Locate gridded_data.nc file and read it nc_path = gdir.get_filepath('gridded_data') with xr.open_dataset(nc_path) as ds: # Prepare the profile dict crs = ds.pyproj_srs var = ds[varname] grid = ds.salem.grid data = var.data data_type = data.dtype.name height, width = var.data.shape dx, dy = grid.dx, grid.dy x0, y0 = grid.x0, grid.y0 profile = {'driver': 'GTiff', 'dtype': data_type, 'nodata': None, 'width': width, 'height': height, 'count': 1, 'crs': crs, 'transform': rasterio.Affine(dx, 0.0, x0, 0.0, dy, y0), 'tiled': True, 'interleave': 'band'} # Write GeoTiff file with rasterio.open(outpath, 'w', **profile) as dst: dst.write(data, 1)
TimoRoth/oggm
oggm/core/gis.py
Python
bsd-3-clause
55,626
[ "Gaussian", "NetCDF" ]
8cd35807458690fcd72fa6d852f8b5cd63ac9c5febbe2edb564f9efc2cd940b5
# -*- coding: UTF-8 -*- # Practical 5 # Galaxy Data Structures gMap = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] name = "" coords = (0, 0) data = {} def recruit(name, coords): data[name] = coords row = gMap[coords[1]] row[coords[0]] = "E" def retire(name): if name not in data: print("Sorry %s was not found in the databse!") else: coords = data[name] row = gMap[coords[1]] row[coords[0]] = 0 def printMap(gMap): for row in gMap: for g in row: print("%s " % g, end="") print("") def ships(data): print("%d Ships." % len(data)) def crew(data): for x in data: print("Name: %s" % x) coords = data[x] coords = [x+1 for x in coords] print("Coords: %s\n" % coords) quit = False while not quit: ans = input("What do you want to do? 1.Recruit 2.Retire 3.PrintMap 4.PrintShips 5.PrintCrew 6.Quit (1, 2, 3, 4, 5 or 6): ") if ans == "1": name = input("Who do you want to recruit? ") coords = input("Where do you want them given in co-ords e.g. '1,4': ") coords = [int(x) for x in coords.split(",")] x, y = int(coords[0]), int(coords[1]) coords = (x-1, y-1) recruit(name, coords) elif ans == "2": name = input("Who do you want to retire? ") retire(name) elif ans == "3": printMap(gMap) elif ans == "4": ships(data) elif ans == "5": crew(data) elif ans == "6": print("Quitting Program...") quit = True else: print("Command not understood!")
AlexNewson/UniPracticals
Solutions/Practical 5/Excercise.py
Python
mit
1,655
[ "Galaxy" ]
a95c732aac56d32beac86f0e5395d0d8d864b894f02a3fbec4a0dfda2654fb85
#!/usr/bin/env python # An example from scipy cookbook demonstrating the use of numpy arrys in vtk import vtk from numpy import * basename = "output" infofile = basename+".txt" filename = basename+".raw" f = open(infofile,"r") nx,ny,nz = [int(x) for x in f.readline().split()] f.close() print nx,ny,nz data = fromfile(filename, dtype=float) # We begin by creating the data we want to render. # For this tutorial, we create a 3D-image containing three overlaping cubes. # This data can of course easily be replaced by data from a medical CT-scan or anything else three dimensional. # The only limit is that the data must be reduced to unsigned 8 bit or 16 bit integers. #data_matrix = zeros([750, 750, 750], dtype=uint8) #data_matrix[0:350, 0:350, 0:350] = 50 #data_matrix[250:550, 250:550, 250:550] = 100 #data_matrix[450:740, 450:740, 450:740] = 150 maxval = max(data) minval = min(data) print maxval print minval data_matrix = (data - minval)/(maxval-minval)*255 print min(data_matrix) print max(data_matrix) print (all(data_matrix < 256)) # For VTK to be able to use the data, it must be stored as a VTK-image. This can be done by the vtkImageImport-class which # imports raw data and stores it. dataImporter = vtk.vtkImageImport() # The preaviusly created array is converted to a string of chars and imported. data_string = data_matrix.tostring() dataImporter.CopyImportVoidPointer(data_string, len(data_string)) # The type of the newly imported data is set to unsigned char (uint8) #dataImporter.SetDataScalarTypeToUnsignedChar() dataImporter.SetDataScalarTypeToUnsignedShort() # Because the data that is imported only contains an intensity value (it isnt RGB-coded or someting similar), the importer # must be told this is the case. dataImporter.SetNumberOfScalarComponents(1) # The following two functions describe how the data is stored and the dimensions of the array it is stored in. For this # simple case, all axes are of length 75 and begins with the first element. For other data, this is probably not the case. # I have to admit however, that I honestly dont know the difference between SetDataExtent() and SetWholeExtent() although # VTK complains if not both are used. #dataImporter.SetDataExtent(0, 749, 0, 749, 0, 749) #dataImporter.SetWholeExtent(0, 749, 0, 749, 0, 749) dataImporter.SetDataExtent(0, nx-1, 0, ny-1, 0, nz-1) dataImporter.SetWholeExtent(0, nx-1, 0, ny-1, 0, nz-1) # The following class is used to store transparencyv-values for later retrival. In our case, we want the value 0 to be # completly opaque whereas the three different cubes are given different transperancy-values to show how it works. alphaChannelFunc = vtk.vtkPiecewiseFunction() alphaChannelFunc.AddPoint(0, 0.0) alphaChannelFunc.AddPoint(50, 0.5) alphaChannelFunc.AddPoint(100, 0.3) alphaChannelFunc.AddPoint(150, 0.02) # This class stores color data and can create color tables from a few color points. For this demo, we want the three cubes # to be of the colors red green and blue. colorFunc = vtk.vtkColorTransferFunction() colorFunc.AddRGBPoint(50, 1.0, 0.0, 0.0) colorFunc.AddRGBPoint(100, 0.0, 1.0, 0.0) colorFunc.AddRGBPoint(150, 0.0, 0.0, 1.0) # The preavius two classes stored properties. Because we want to apply these properties to the volume we want to render, # we have to store them in a class that stores volume prpoperties. volumeProperty = vtk.vtkVolumeProperty() volumeProperty.SetColor(colorFunc) volumeProperty.SetScalarOpacity(alphaChannelFunc) # This class describes how the volume is rendered (through ray tracing). compositeFunction = vtk.vtkVolumeRayCastCompositeFunction() # We can finally create our volume. We also have to specify the data for it, as well as how the data will be rendered. volumeMapper = vtk.vtkVolumeRayCastMapper() volumeMapper.SetVolumeRayCastFunction(compositeFunction) volumeMapper.SetInputConnection(dataImporter.GetOutputPort()) # The class vtkVolume is used to pair the preaviusly declared volume as well as the properties to be used when rendering that volume. volume = vtk.vtkVolume() volume.SetMapper(volumeMapper) volume.SetProperty(volumeProperty) # With almost everything else ready, its time to initialize the renderer and window, as well as creating a method for exiting the application renderer = vtk.vtkRenderer() renderWin = vtk.vtkRenderWindow() renderWin.AddRenderer(renderer) renderInteractor = vtk.vtkRenderWindowInteractor() renderInteractor.SetRenderWindow(renderWin) # We add the volume to the renderer ... renderer.AddVolume(volume) # ... set background color to white ... renderer.SetBackground(0,0,0) # ... and set window size. renderWin.SetSize(1024, 768) # A simple function to be called when the user decides to quit the application. def exitCheck(obj, event): if obj.GetEventPending() != 0: obj.SetAbortRender(1) # Tell the application to use the function as an exit check. renderWin.AddObserver("AbortCheckEvent", exitCheck) renderInteractor.Initialize() # Because nothing will be rendered without any input, we order the first render manually before control is handed over to the main-loop. renderWin.Render() renderInteractor.Start()
egaburov/volren
astra/render.py
Python
apache-2.0
5,161
[ "VTK" ]
6e14bd77bf694bcc16c6c9a880405cf0dc165eb156aeecaf287240655276493e
""" Created on Thurs, 9/04/2014 @author: mdistasio Uses BioPython """ import os import glob import math import random import re import numpy as np import itertools import subprocess class Shuffler: def __init__(self, p): #p = Paths object (from mmdSetPaths.py) np.set_printoptions(suppress=True) np.random.seed(10081981) self.p = p def setup(self): self.inputFilenames = glob.glob(os.path.join(self.p.fastq_dir,'*_1.fastq')) if (len(self.inputFilenames) < 1): self.inputFilenames = glob.glob(os.path.join(self.p.fastq_dir,'*.fastq')) self.singleEnd = True else: self.singleEnd = False self.ReadFileLens() def run(self): print("{} line counts found in file.".format(len(self.linecounts))) print("{} input files found.".format(len(self.inputFilenames))) self.OpenOutputFiles(len(self.linecounts)) c = 0 for f in self.inputFilenames: f1 = f if1 = open(f1,'r') if not self.singleEnd: f2 = re.sub('_1\.fastq','_2.fastq', f1) if2 = open(f2,'r') while 1: read1 = [] for i in range(0,4): line1 = if1.readline() read1.append(line1) if not self.singleEnd: read2 = [] for i in range(0,4): line2 = if2.readline() read2.append(line2) else: line2 = True if not line1 or not line2: break else: fn = self.ChooseOutputFile() for i in range(0,4): self.outfileHandles_1[fn].write(read1[i]) if not self.singleEnd: self.outfileHandles_2[fn].write(read2[i]) c = c + 1 print("Done reading file {}. [{}/{}]".format(f, c, len(self.inputFilenames))) self.CloseOutputFiles() def file_len(self, fname): with open(fname) as f: i = -1 for i, l in enumerate(f): pass return i + 1 def ReadFileLens(self, fname=0): # This function takes a file of line counts. # Generate it with: # 'wc -l *.fastq > FASTQ_linecounts.txt' if fname == 0: fname = os.path.join(self.p.fastq_dir, "FASTQ_linecounts.txt") if not os.path.exists(fname): print("Counting lines in FASTQ files, using system call to wc -l") try: os.system("wc -l " + os.path.join(self.p.fastq_dir,"*.fastq") + " > " + fname) except OSError: print("Counting lines in fastq files with wc failed.") return -1 print("Reading line counts for FASTQ files from " + fname) if self.singleEnd: self.linecounts = np.genfromtxt(fname, delimiter=" ")[:-1:1,0] else: self.linecounts = np.genfromtxt(fname, delimiter=" ")[:-1:2,0] self.fileChooserIndex = np.cumsum(self.linecounts/(max(1,np.std(self.linecounts)))) def ChooseOutputFile(self): roll = np.random.rand()*np.max(self.fileChooserIndex) return np.min(np.nonzero(self.fileChooserIndex>roll)) def OpenOutputFiles(self, n=4, pairedEndFiles=True): self.outfileHandles_1 = [] if not self.singleEnd: self.outfileHandles_2 = [] for i in range(1,n+1): if not self.singleEnd: self.outfileHandles_1.append(open(os.path.join(self.p.virtual_fastq_dir,"mmdVirtualCell_%03d_1.fastq" % i),'w+')) self.outfileHandles_2.append(open(os.path.join(self.p.virtual_fastq_dir,"mmdVirtualCell_%03d_2.fastq" % i),'w+')) else: self.outfileHandles_1.append(open(os.path.join(self.p.virtual_fastq_dir,"mmdVirtualCell_%03d.fastq" % i),'w+')) def CloseOutputFiles(self, pairedEndFiles=True): for i in self.outfileHandles_1: i.close() if pairedEndFiles: for i in self.outfileHandles_2: i.close() def CreateVirtualCellsFromAllReadsMerged(self, n=430, pairedEndFiles=True): print("Writing virtual cells.") INFILE1 = os.path.join(self.virtual_fastq_dir, "AllReads_Merged_1.fastq") if pairedEndFiles: INFILE2 = os.path.join(self.virtual_fastq_dir, "AllReads_Merged_2.fastq") print("Total number of reads: " + str(TotalReads)) return 0
ChellyD65/shoelace
python/shoelace/lib/Shuffler.py
Python
gpl-2.0
4,694
[ "Biopython" ]
a93336772b0a97835ce584c0a8f1fb4e312364bb32f4d0f98dd25ce34f43dbae
#!/usr/bin/env python """ Utilities for using M2Crypto SSL with DIRAC. """ import os import tempfile from M2Crypto import SSL, m2, X509 from DIRAC.Core.DISET import DEFAULT_SSL_CIPHERS, DEFAULT_SSL_METHODS from DIRAC.Core.Security import Locations from DIRAC.Core.Security.m2crypto.X509Chain import X509Chain # Verify depth of peer certs VERIFY_DEPTH = 50 DEBUG_M2CRYPTO = os.getenv("DIRAC_DEBUG_M2CRYPTO", "No").lower() in ("yes", "true") def __loadM2SSLCTXHostcert(ctx): """Load hostcert & key from the default location and set them as the credentials for SSL context ctx. Returns None. """ certKeyTuple = Locations.getHostCertificateAndKeyLocation() if not certKeyTuple: raise RuntimeError("Hostcert/key location not set") hostcert, hostkey = certKeyTuple if not os.path.isfile(hostcert): raise RuntimeError("Hostcert file (%s) is missing" % hostcert) if not os.path.isfile(hostkey): raise RuntimeError("Hostkey file (%s) is missing" % hostkey) # Make sure we never stall on a password prompt if the hostkey has a password # by specifying a blank string. ctx.load_cert(hostcert, hostkey, callback=lambda: "") def __loadM2SSLCTXProxy(ctx, proxyPath=None): """Load proxy from proxyPath (or default location if not specified) and set it as the certificate & key to use for this SSL context. Returns None. """ if not proxyPath: proxyPath = Locations.getProxyLocation() if not proxyPath: raise RuntimeError("Proxy location not set") if not os.path.isfile(proxyPath): raise RuntimeError("Proxy file (%s) is missing" % proxyPath) # See __loadM2SSLCTXHostcert for description of why lambda is needed. ctx.load_cert_chain(proxyPath, proxyPath, callback=lambda: "") def ssl_verify_callback_print_error(ok, store): """This callback method does nothing but printing the error. It prints a few more useful info than the exception :param ok: current validation status :param store: pointer to the X509_CONTEXT_STORE """ errnum = store.get_error() if errnum: print("SSL DEBUG ERRNUM %s ERRMSG %s" % (errnum, m2.x509_get_verify_error(errnum))) # pylint: disable=no-member return ok def getM2SSLContext(ctx=None, **kwargs): """Gets an M2Crypto.SSL.Context configured using the standard DIRAC connection keywords from kwargs. The keywords are: - clientMode: Boolean, if False hostcerts are always used. If True a proxy is used unless other flags are set. - useCertificates: Boolean, Set to true to use hostcerts in client mode. - proxyString: String, allow a literal proxy string to be provided. - proxyLocation: String, Path to file to use as proxy, defaults to usual location(s) if not set. - skipCACheck: Boolean, if True, don't verify peer certificates. - sslMethods: String, List of SSL algorithms to enable in OpenSSL style cipher format, e.g. "SSLv3:TLSv1". - sslCiphers: String, OpenSSL style cipher string of ciphers to allow on this connection. If an existing context "ctx" is provided, it is just reconfigured with the selected arguments. Returns the new or updated context. """ if not ctx: ctx = SSL.Context() # Set certificates for connection # CHRIS: I think clientMode was just an internal of pyGSI implementation # if kwargs.get('clientMode', False) and not kwargs.get('useCertificates', False): # if not kwargs.get('useCertificates', False): if kwargs.get("bServerMode", False) or kwargs.get("useCertificates", False): # Server mode always uses hostcert __loadM2SSLCTXHostcert(ctx) else: # Client mode has a choice of possible options if kwargs.get("proxyString", None): # M2Crypto cannot take an inmemory location or a string, so # so write it to a temp file and use proxyLocation with tempfile.NamedTemporaryFile(mode="w") as tmpFile: tmpFilePath = tmpFile.name tmpFile.write(kwargs["proxyString"]) __loadM2SSLCTXProxy(ctx, proxyPath=tmpFilePath) else: # Use normal proxy __loadM2SSLCTXProxy(ctx, proxyPath=kwargs.get("proxyLocation", None)) verify_callback = ssl_verify_callback_print_error if DEBUG_M2CRYPTO else None # Set peer verification if kwargs.get("skipCACheck", False): # Don't validate peer, but still request creds ctx.set_verify(SSL.verify_none, VERIFY_DEPTH, callback=verify_callback) else: # Do validate peer ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, VERIFY_DEPTH, callback=verify_callback) # Set CA location caPath = Locations.getCAsLocation() if not caPath: raise RuntimeError("Failed to find CA location") if not os.path.isdir(caPath): raise RuntimeError("CA path (%s) is not a valid directory" % caPath) ctx.load_verify_locations(capath=caPath) # If the version of M2Crypto is recent enough, there is an API # to accept proxy certificate, and we do not need to rely on # OPENSSL_ALLOW_PROXY_CERT environment variable # which was removed as of openssl 1.1 # We need this to be merged in M2Crypto: https://gitlab.com/m2crypto/m2crypto/merge_requests/236 # We set the proper verify flag to the X509Store of the context # as described here https://www.openssl.org/docs/man1.1.1/man7/proxy-certificates.html if hasattr(SSL, "verify_allow_proxy_certs"): ctx.get_cert_store().set_flags(SSL.verify_allow_proxy_certs) # pylint: disable=no-member # As of M2Crypto 0.37, the `verify_allow_proxy_certs` flag was moved # to X509 (https://gitlab.com/m2crypto/m2crypto/-/merge_requests/238) # It is more consistent with all the other flags, # but pySSL had it in SSL. Well... if hasattr(X509, "verify_allow_proxy_certs"): ctx.get_cert_store().set_flags(X509.verify_allow_proxy_certs) # pylint: disable=no-member # Other parameters sslMethods = kwargs.get("sslMethods", DEFAULT_SSL_METHODS) if sslMethods: # Pylint can't see the m2 constants due to the way the library is loaded # We just have to disable that warning for the next bit... # pylint: disable=no-member methods = [("SSLv2", m2.SSL_OP_NO_SSLv2), ("SSLv3", m2.SSL_OP_NO_SSLv3), ("TLSv1", m2.SSL_OP_NO_TLSv1)] allowed_methods = sslMethods.split(":") # If a method isn't explicitly allowed, set the flag to disable it... for method, method_flag in methods: if method not in allowed_methods: ctx.set_options(method_flag) # SSL_OP_NO_SSLv2, SSL_OP_NO_SSLv3, SSL_OP_NO_TLSv1 ciphers = kwargs.get("sslCiphers", DEFAULT_SSL_CIPHERS) ctx.set_cipher_list(ciphers) # log the debug messages if DEBUG_M2CRYPTO: ctx.set_info_callback() return ctx def getM2PeerInfo(conn): """Gets the details of the current peer as a standard dict. The peer details are obtained from the supplied M2 SSL Connection obj "conn". The details returned are those from ~X509Chain.getCredentials, without Registry info: DN - Full peer DN as string x509Chain - Full chain of peer isProxy - Boolean, True if chain ends with proxy isLimitedProxy - Boolean, True if chain ends with limited proxy group - String, DIRAC group for this peer, if known Returns a dict of details. """ chain = X509Chain.generateX509ChainFromSSLConnection(conn) creds = chain.getCredentials(withRegistryInfo=False) if not creds["OK"]: raise RuntimeError("Failed to get SSL peer info (%s)." % creds["Message"]) peer = creds["Value"] peer["x509Chain"] = chain isProxy = chain.isProxy() if not isProxy["OK"]: raise RuntimeError("Failed to get SSL peer isProxy (%s)." % isProxy["Message"]) peer["isProxy"] = isProxy["Value"] if peer["isProxy"]: peer["DN"] = creds["Value"]["identity"] else: peer["DN"] = creds["Value"]["subject"] isLimited = chain.isLimitedProxy() if not isLimited["OK"]: raise RuntimeError("Failed to get SSL peer isProxy (%s)." % isLimited["Message"]) peer["isLimitedProxy"] = isLimited["Value"] return peer
DIRACGrid/DIRAC
src/DIRAC/Core/DISET/private/Transports/SSL/M2Utils.py
Python
gpl-3.0
8,509
[ "DIRAC" ]
f47da441c2f9e1874e822acfc6a8a47c47c942233180fa9622e6a03138ac78c0
#!/usr/bin/env python3 #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html from scipy.optimize import curve_fit import numpy as np def linearFit(T, k): """ Linear least-squares fit to data """ assert len(T) > 1, 'Input lists must have at least two elements' assert len(T) == len(k), 'Input lists must be equal length' def func(T, a0, a1): return a0 + a1 * T popt, pcov = curve_fit(func, T, k) def outputFunc(temperatures): return func(np.asarray(temperatures), *popt) return outputFunc def fourthOrderFit(T, k): """ Fourth-order polynomial least-squares fit to data """ assert len(T) > 4, 'Input lists must have at least five elements' assert len(T) == len(k), 'Input lists must be equal length' def func(T, a0, a1, a2, a3, a4): return a0 + a1 * T + a2 * T**2 + a3 * T**3 + a4 * T**4 popt, pcov = curve_fit(func, T, k) def outputFunc(temperatures): return func(np.asarray(temperatures), *popt) return outputFunc def maierKellyFit(T, k): """ Maier-Kelly least-squares fit to data """ assert len(T) > 4, 'Input lists must have at least five elements' assert len(T) == len(k), 'Input lists must be equal length' assert T[0] > 0, 'Temperature cannot be zero' def func(T, a0, a1, a2, a3, a4): return a0 * np.log(T) + a1 + a2 * T + a3 / T + a4 / T**2 popt, pcov = curve_fit(func, T, k) def outputFunc(temperatures): return func(np.asarray(temperatures), *popt) return outputFunc def fillMissingValues(temperatures, values, fit_type, missing_value): """ Generate values to replace missing values in the database, primarily for equilibrium constants defined at temperature points. If a data point is missing (default value of 500.00000), then use the functional form for the database to replace missing values with fitted values. An important consideration is where too few values are present to allow a fit to the given function (for example, the fourth-order polynomial used in the GWB database requires five data points). In these cases, a linear fit is used if there are at least two values in the array. If only one value is defined, then it is copied to all remaining temperature points. For species where missing values have been filled in using a fit, a note is added as a seperate field named 'note'. For example, consider the species Fe(OH)3 in the original GWB database ***** Fe(OH)3 charge= 0 ion size= 4.0 A mole wt.= 106.8689 g 3 species in reaction -3.000 H+ 1.000 Fe+++ 3.000 H2O 13.7601 12.0180 9.9003 7.8005 5.4494 3.1992 500.0000 500.0000 ***** In this case, the two missing values are filled using a fourth-order polynomial fit (fit used in GWB database), so that the species data in the MOOSE JSON database is ***** "Fe(OH)3": { "species": { "H+": -3.0, "Fe+++": 1.0, "H2O": 3.0 }, "charge": 0.0, "radius": 4.0, "molecular weight": 106.8689, "logk": [ 13.7601, 12.018, 9.9003, 7.8005, 5.4494, 3.1992, 0.9064, -1.4986 ], "note": "Missing array values in original database have been filled using a fourth-order fit. Original values are [13.7601, 12.0180, 9.9003, 7.8005, 5.4494, 3.1992, 500.0000, 500.0000]" }, ***** """ # Remove missing_value from values vals = [] temp = [] note = "" filled_values = values dplaces = 4 if missing_value in values: # Number of valid values numvals = len(values) - values.count(missing_value) if numvals == 1: fit_type = "constant" elif numvals > 1 and numvals < 5: fit_type = "linear" else: fit_type = fit_type # Add note with original data strlist = ", ".join([str(item) for item in values]) note = "Missing array values in original database have been filled using a " + fit_type + " fit. Original values are [" + strlist + "]" for i in range(len(temperatures)): if values[i] != missing_value: temp.append(float(temperatures[i])) vals.append(float(values[i])) # If there is only one value, fill the filled values array with that value if fit_type == 'constant': for i in range(len(temperatures)): if filled_values[i] == missing_value: filled_values[i] = vals[0] # If there is between two and four values, use a linear fit to fill array elif fit_type == 'linear': fit = linearFit(temp, vals) for i in range(len(temperatures)): if filled_values[i] == missing_value: filled_values[i] = round(fit(temperatures[i]), dplaces) else: if fit_type == 'fourth-order': fit = fourthOrderFit(temp, vals) elif fit_type == 'maier-kelly': fit = maierKellyFit(temp, vals) else: raise ValueError("fit_type " + fit_type + " not supported") for i in range(len(temperatures)): if filled_values[i] == missing_value: filled_values[i] = round(fit(temperatures[i]), dplaces) return [float(val) for val in filled_values], note
nuclear-wizard/moose
modules/geochemistry/python/readers/reader_utils.py
Python
lgpl-2.1
5,815
[ "MOOSE" ]
a0ef1ed7af9e4a5422a9a995c03f6cdcbcc2122712e3552b8912c82120993b82
import numpy as np from DesOptPy.scaling import denormalize def OptSciPy(self, x0, xL, xU, SysEq): def ObjFnSciPy(xVal): if xVal.tolist() in [ list(item) for item in self.xAll ]: i = np.where((self.xAll == xVal).all(axis=1))[0][0] fVal = self.fAll[i].copy() else: self.fVal, self.gVal, flag = SysEq(xVal) self.xLast = xVal.copy() self.fAll.append(self.fVal.copy()) self.gAll.append(self.gVal.copy()) self.xAll.append(xVal.copy()) fVal = self.fVal.copy() return fVal def ConFnSciPy(xVal): if xVal.tolist() in [ list(item) for item in self.xAll ]: i = np.where((self.xAll == xVal).all(axis=1))[0][0] gVal = self.gAll[i].copy() else: self.fVal, self.gVal, flag = SysEq(xVal) self.xLast = xVal.copy() self.fAll.append(self.fVal.copy()) self.gAll.append(self.gVal.copy()) self.xAll.append(xVal.copy()) gVal = self.gVal.copy() return gVal """ SciPy TODO: options need to be mapped to main options TODO: constraint formulation with upper or lower The minimize function supports the following methods: minimize(method=’Nelder-Mead’) minimize(method=’Powell’) minimize(method=’CG’) minimize(method=’BFGS’) minimize(method=’Newton-CG’) minimize(method=’L-BFGS-B’) minimize(method=’TNC’) minimize(method=’COBYLA’) minimize(method=’SLSQP’) minimize(method=’trust-constr’) minimize(method=’dogleg’) minimize(method=’trust-ncg’) minimize(method=’trust-krylov’) minimize(method=’trust-exact’) """ from scipy import optimize as spopt self.gAll = [] self.fAll = [] self.xAll = [] if 'SLSQP' in (self.Alg).upper(): Results = spopt.minimize( ObjFnSciPy, x0, args=(), method='SLSQP', jac='2-point', hess='2-point', bounds=spopt.Bounds(xL, xU), constraints=spopt.NonlinearConstraint(ConFnSciPy, -np.inf, 0), tol=1e-06, callback=None, options={ 'maxiter': 100, 'disp': False, 'ftol': 1e-06, 'iprint': 1, 'eps': self.xDelta, 'finite_diff_rel_step': None, }, ) elif 'trust-constr' in (self.Alg).lower(): Results = spopt.minimize( ObjFnSciPy, x0, method='trust-constr', bounds=spopt.Bounds(xL, xU), constraints=spopt.NonlinearConstraint(ConFnSciPy, -np.inf, 0), options={ 'xtol': 1e-08, 'gtol': 1e-08, 'barrier_tol': 1e-08, 'sparse_jacobian': None, 'maxiter': 1000, 'verbose': 0, 'finite_diff_rel_step': None, 'initial_constr_penalty': 1.0, 'initial_tr_radius': 1.0, 'initial_barrier_parameter': 0.1, 'initial_barrier_tolerance': 0.1, 'factorization_method': None, 'disp': False, }, ) #'grad': None, elif 'differential_evolution' in (self.Alg).lower(): """ https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.differential_evolution.html#scipy.optimize.differential_evolutionhttps://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.differential_evolution.html#scipy.optimize.differential_evolution """ Results = spopt.differential_evolution( ObjFnSciPy, x0=x0, bounds=spopt.Bounds(xL, xU), args=(), strategy='best1bin', maxiter=1000, popsize=15, tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None, disp=False, polish=True, init='latinhypercube', atol=0, updating='immediate', workers=1, constraints=spopt.NonlinearConstraint(ConFnSciPy, -np.inf, 0), ) elif 'COBYLA' in (self.Alg).upper(): """ Not working, no bounds https://docs.scipy.org/doc/scipy/reference/optimize.minimize-cobyla.html need to have bounds as nonlinear constraints. """ Results = spopt.minimize( ObjFnSciPy, x0, args=(), bounds=spopt.Bounds(xL, xU), method='COBYLA', constraints=spopt.NonlinearConstraint(ConFnSciPy, -np.inf, 0), tol=None, callback=None, options={ 'rhobeg': 1.0, 'maxiter': 1000, 'disp': False, 'catol': 0.0002, }, ) Results.nit = None Results.njev = None Results.jac = None # elif 'dual_annealing' in (self.Alg).lower(): # """ # No constraint # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.dual_annealing.html # """ # Results = spopt.dual_annealing(ObjFnSciPy, # args=(), # bounds=tuple((np.block([xL, xU]).T).tolist()), # #bounds=([0, 1], [0, 1]), #(xL, xU), # maxiter=1000, # local_search_options={}, # initial_temp=5230.0, # restart_temp_ratio=2e-05, # visit=2.62, # accept=- 5.0, # maxfun=10000000.0, # seed=None, # no_local_search=False, # callback=None, # x0=x0) # Results.jac = None elif 'shgo' in (self.Alg).lower(): """ https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.shgo.html """ Results = spopt.shgo( ObjFnSciPy, x0, args=(), bounds=spopt.Bounds(xL, xU), constraints=spopt.NonlinearConstraint(ConFnSciPy, -np.inf, 0), n=None, iters=1, callback=None, minimizer_kwargs=None, options=None, sampling_method='simplicial', ) else: raise Exception( 'Not a valid SciPy algorithm for constrained nonlinear optimization' ) xOpt = np.array(Results.x) fOpt = np.array([Results.fun]) self.xNorm0 = x0 self.x0 = self.xAll[0] # self.f0 = self.fAll[0] self.g0 = self.gAll[0] if (self.Alg).lower() in ['slsqp', 'trust-constr']: self.fNablaOpt = Results.jac self.nIt = Results.nit self.xIt = None self.fIt = None self.gIt = None # Todo this is ugly self.nIt = Results.nfev try: self.nIt = Results.njev self.nSensEval = Results.njev except: self.nSensEval = None self.inform = Results.success if (self.Alg).lower() in ['slsqp', 'trust-constr']: self.fNablaOpt = Results.jac elif 'trust-constr' in (self.Alg).lower(): self.fNablaOpt = Results.grad self.gNablaOpt = Results.jac[0] if self.g is not None: self.gMax = np.max(self.gAll, 1) # Denormalization # TODO move to scaling!! self.xOpt = [None] * self.nx self.xNormOpt = xOpt self.fNormOpt = fOpt for i in range(self.nx): if self.xNorm[i]: self.xOpt[i] = denormalize(xOpt[i], self.xL[i], self.xU[i]) else: self.xOpt[i] = xOpt[i] self.xNormOpt[i] = xOpt[i] self.xOpt = np.array(self.xOpt) if self.fNorm[0]: if self.f0 == 0: self.fOpt = fOpt / self.fNormMultiplier else: self.fOpt = fOpt * abs(self.f0) / self.fNormMultiplier else: self.fOpt = fOpt self.gOpt = self.gVal
e-dub/DesOptPy
DesOptPy/interfaces/SciPy.py
Python
gpl-3.0
8,414
[ "VisIt" ]
0a27770d9aa4022dcbf836fbce1f49af7d7c07923777fac43d6febfaa3dccc83
#!/usr/bin/python # -*- coding: utf-8 -*- # # --- BEGIN_HEADER --- # # userscriptgen - Generator backend for user scripts # Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter # # This file is part of MiG. # # MiG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # MiG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # -- END_HEADER --- # # TODO: finish generator for automatic testing of scripts # TODO: filter exit code from cgi output and use in own exit code? # TODO: mig-ls.* -r fib.out incorrectly lists entire home recursively # TODO: ls -r is not recursive -> use -R! """Generate MiG user scripts for the specified programming languages. Called without arguments the generator creates scripts for all supported languages. If one or more languages are supplied as arguments, only those languages will be generated. """ import sys import getopt # Generator version (automagically updated by svn) __version__ = '$Revision$' # Save original __version__ before truncate with wild card import _userscript_version = __version__ from publicscriptgen import * _publicscript_version = __version__ __version__ = '%s,%s' % (_userscript_version, _publicscript_version) # ###################################### # Script generator specific functions # # ###################################### # Generator usage def usage(): print 'Usage: userscriptgen.py OPTIONS [LANGUAGE ... ]' print 'Where OPTIONS include:' print ' -c CURL_CMD\t: Use curl from CURL_CMD' print ' -d DST_DIR\t: write scripts to DST_DIR' print ' -h\t\t: Print this help' print ' -l\t\t: Do not generate shared library module' print ' -p PYTHON_CMD\t: Use PYTHON_CMD as python interpreter' print ' -s SH_CMD\t: Use SH_CMD as sh interpreter' print ' -t\t\t: Generate self testing script' print ' -v\t\t: Verbose output' print ' -V\t\t: Show version' def version(): print 'MiG User Script Generator: %s' % __version__ def version_function(lang): s = '' s += begin_function(lang, 'version', [], 'Show version details') if lang == 'sh': s += " echo 'MiG User Scripts: %s'" % __version__ elif lang == 'python': s += " print 'MiG User Scripts: %s'" % __version__ s += end_function(lang, 'version') return s # ########################## # Script helper functions # # ########################## def shared_usage_function(op, lang, extension): """General wrapper for the specific usage functions. Simply rewrites first arg to function name.""" return eval('%s_usage_function' % op)(lang, extension) def cancel_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] JOBID [JOBID ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def cat_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def cp_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] SRC [SRC...] DST'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def doc_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] [TOPIC ...]' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def filemetaio_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] ACTION PATH [ARG ...]' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) action_usage_string = 'ACTION\t\tlist : List PATH directory meta-data entries' action_usage_string2 = '\t\tget_dir : Get PATH directory meta-data for extension=EXT' action_usage_string3 = '\t\tget_file : Get meta-data for file PATH' action_usage_string4 = '\t\tput_dir : Set PATH directory meta-data for extension=EXT' action_usage_string5 = '\t\tput_file : Set meta-data for file PATH' action_usage_string6 = '\t\tremove_dir : Remove PATH directory meta-data for extension=[EXT]' image_usage_string = '-i\t\tDisplay image meta-data' if lang == 'sh': s += '\n echo "%s"' % action_usage_string s += '\n echo "%s"' % action_usage_string2 s += '\n echo "%s"' % action_usage_string3 s += '\n echo "%s"' % action_usage_string4 s += '\n echo "%s"' % action_usage_string5 s += '\n echo "%s"' % action_usage_string6 elif lang == 'python': s += '\n print "%s"' % action_usage_string s += '\n print "%s"' % action_usage_string2 s += '\n print "%s"' % action_usage_string3 s += '\n print "%s"' % action_usage_string4 s += '\n print "%s"' % action_usage_string5 s += '\n print "%s"' % action_usage_string6 s += end_function(lang, 'usage') return s def get_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...] FILE'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) recursive_usage_string = '-r\t\tact recursively' if lang == 'sh': s += '\n echo "%s"' % recursive_usage_string elif lang == 'python': s += '\n print "%s"' % recursive_usage_string s += end_function(lang, 'usage') return s def head_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) lines_usage_string = '-n N\t\tShow first N lines of the file(s)' if lang == 'sh': s += '\n echo "%s"' % lines_usage_string elif lang == 'python': s += '\n print "%s"' % lines_usage_string s += end_function(lang, 'usage') return s def jobaction_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] ACTION JOBID [JOBID ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def liveio_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] ACTION JOBID SRC [SRC ...] DST' % \ (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def ls_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] [FILE ...]' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) all_usage_string = "-a\t\tDo not hide entries starting with '.'" long_usage_string = '-l\t\tDisplay long format' recursive_usage_string = '-r\t\tact recursively' if lang == 'sh': s += '\n echo "%s"' % all_usage_string s += '\n echo "%s"' % long_usage_string s += '\n echo "%s"' % recursive_usage_string elif lang == 'python': s += '\n print "%s"' % all_usage_string s += '\n print "%s"' % long_usage_string s += '\n print "%s"' % recursive_usage_string s += end_function(lang, 'usage') return s def mkdir_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] DIRECTORY [DIRECTORY ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) parents_usage_string = '-p\t\tmake parent directories as needed' if lang == 'sh': s += '\n echo "%s"' % parents_usage_string elif lang == 'python': s += '\n print "%s"' % parents_usage_string s += end_function(lang, 'usage') return s def mqueue_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] ACTION QUEUE [MSG]' % \ (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def mv_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] SRC [SRC...] DST'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def put_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...] FILE' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) package_usage_string = \ '-p\t\tSubmit mRSL files (also in packages if -x is specified) after upload' recursive_usage_string = '-r\t\tact recursively' extract_usage_string = \ '-x\t\tExtract package (.zip etc) after upload' if lang == 'sh': s += '\n echo "%s"' % package_usage_string s += '\n echo "%s"' % recursive_usage_string s += '\n echo "%s"' % extract_usage_string elif lang == 'python': s += '\n print "%s"' % package_usage_string s += '\n print "%s"' % recursive_usage_string s += '\n print "%s"' % extract_usage_string s += end_function(lang, 'usage') return s def read_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] START END SRC DST'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def resubmit_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] JOBID [JOBID ...]' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def rm_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) recursive_usage_string = '-r\t\tact recursively' if lang == 'sh': s += '\n echo "%s"' % recursive_usage_string elif lang == 'python': s += '\n print "%s"' % recursive_usage_string s += end_function(lang, 'usage') return s def rmdir_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] DIRECTORY [DIRECTORY ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) parents_usage_string = '-p\t\tremove parent directories as needed' if lang == 'sh': s += '\n echo "%s"' % parents_usage_string elif lang == 'python': s += '\n print "%s"' % parents_usage_string s += end_function(lang, 'usage') return s def stat_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [...]' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def status_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] [JOBID ...]' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) max_jobs_usage_string = '-m M\t\tShow status for at most M jobs' sort_jobs_usage_string = '-S\t\tSort jobs by modification time' if lang == 'sh': s += '\n echo "%s"' % max_jobs_usage_string s += '\n echo "%s"' % sort_jobs_usage_string elif lang == 'python': s += '\n print "%s"' % max_jobs_usage_string s += '\n print "%s"' % sort_jobs_usage_string s += end_function(lang, 'usage') return s def submit_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...]' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def tail_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) lines_usage_string = '-n N\t\tShow last N lines of the file(s)' if lang == 'sh': s += '\n echo "%s"' % lines_usage_string elif lang == 'python': s += '\n print "%s"' % lines_usage_string s += end_function(lang, 'usage') return s def test_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] [OPERATION ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def touch_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] [FILE ...]' % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def truncate_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) lines_usage_string = '-n N\t\tTruncate file(s) to at most N bytes' if lang == 'sh': s += '\n echo "%s"' % lines_usage_string elif lang == 'python': s += '\n print "%s"' % lines_usage_string s += end_function(lang, 'usage') return s def unzip_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] SRC [SRC...] DST'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def wc_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] FILE [FILE ...]'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) bytes_usage_string = '-b N\t\tShow byte count' lines_usage_string = '-l N\t\tShow line count' words_usage_string = '-w N\t\tShow word count' if lang == 'sh': s += '\n echo "%s"' % bytes_usage_string s += '\n echo "%s"' % lines_usage_string s += '\n echo "%s"' % words_usage_string elif lang == 'python': s += '\n print "%s"' % bytes_usage_string s += '\n print "%s"' % lines_usage_string s += '\n print "%s"' % words_usage_string s += end_function(lang, 'usage') return s def write_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] START END SRC DST'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) s += end_function(lang, 'usage') return s def zip_usage_function(lang, extension): # Extract op from function name op = sys._getframe().f_code.co_name.replace('_usage_function', '') usage_str = 'Usage: %s%s.%s [OPTIONS] SRC [SRC...] DST'\ % (mig_prefix, op, extension) s = '' s += begin_function(lang, 'usage', [], 'Usage help for %s' % op) s += basic_usage_options(usage_str, lang) curdir_usage_string = '-w PATH\t\tUse PATH as remote working directory' if lang == 'sh': s += '\n echo "%s"' % curdir_usage_string elif lang == 'python': s += '\n print "%s"' % curdir_usage_string s += end_function(lang, 'usage') return s # ########################## # Communication functions # # ########################## def shared_op_function(op, lang, curl_cmd): """General wrapper for the specific op functions. Simply rewrites first arg to function name.""" return eval('%s_function' % op)(lang, curl_cmd) def cancel_function(lang, curl_cmd, curl_flags=''): relative_url = '"cgi-bin/jobaction.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;action=cancel;$job_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;action=cancel;%s' % (server_flags, job_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'cancel_job', ['job_list'], 'Execute the corresponding server operation') s += format_list(lang, 'job_list', 'job_id') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'cancel_job') return s def cat_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/cat.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'cat_file', ['path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'cat_file') return s def cp_function(lang, curl_cmd, curl_flags='--compressed'): """Call the corresponding cgi script with the string 'src_list' as argument. Thus the variable 'src_list' should be on the form \"src=pattern1[;src=pattern2[ ... ]]\" This may seem a bit awkward but it's difficult to do in a better way when begin_function() doesn't support variable length or array args. """ relative_url = '"cgi-bin/cp.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;dst=$dst;$src_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;dst=%s;%s' % (server_flags, dst, src_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'cp_file', ['src_list', 'dst'], 'Execute the corresponding server operation') s += format_list(lang, 'src_list', 'src') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'cp_file') return s def doc_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/docs.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;search=$search;show=$show"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;search=%s;show=%s' % (server_flags, search, show)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'show_doc', ['search', 'show'], 'Execute the corresponding server operation') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'show_doc') return s def expand_function(lang, curl_cmd, curl_flags='--compressed'): """Call the expand cgi script with the string 'path_list' as argument. Thus the variable 'path_list' should be on the form \"path=pattern1[;path=pattern2[ ... ]]\" This may seem a bit awkward but it's difficult to do in a better way when begin_function() doesn't support variable length or array args. """ relative_url = '"cgi-bin/expand.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;$path_list;with_dest=$destinations"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s;with_dest=%s' % (server_flags, path_list, destinations)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'expand_name', ['path_list', 'server_flags', 'destinations'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'expand_name') return s def filemetaio_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/filemetaio.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;action=$action;path=$path;$arg_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;action=%s;path=%s;%s' % (server_flags, action, path, arg_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'filemetaio', ['action', 'path', 'arg_list'], 'Execute the corresponding server operation') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'filemetaio') return s def get_function(lang, curl_cmd, curl_flags='--compressed --create-dirs' ): post_data = '""' query = '""' if lang == 'sh': # TODO: should we handle below double slash problem here, too? relative_url = '"cert_redirect/$src_path"' curl_target = '"--output $dst_path"' elif lang == 'python': # Apache chokes on possible double slash in url and that causes # fatal errors in migfs-fuse - remove it from src_path. relative_url = '"cert_redirect/%s" % src_path.lstrip("/")' curl_target = "'--output %s' % dst_path" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'get_file', ['src_path', 'dst_path'], 'Execute the corresponding server operation') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, curl_target, ) s += end_function(lang, 'get_file') return s def head_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/head.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;$path_list;lines=$lines"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s;lines=%s' % (server_flags, path_list, lines)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'head_file', ['lines', 'path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'head_file') return s def jobaction_function(lang, curl_cmd, curl_flags=''): relative_url = '"cgi-bin/jobaction.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;action=$action;$job_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;action=%s;%s' % (server_flags, action, job_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'job_action', ['action', 'job_list'], 'Execute the corresponding server operation') s += format_list(lang, 'job_list', 'job_id') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'job_action') return s def liveio_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/liveio.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;action=$action;job_id=$job_id;$src_list;dst=$dst"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;action=%s;job_id=%s;%s;dst=%s' % (server_flags, action, job_id, src_list, dst)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'job_liveio', ['action', 'job_id', 'src_list', 'dst'], 'Execute the corresponding server operation') s += format_list(lang, 'src_list', 'src') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'job_liveio') return s def ls_function(lang, curl_cmd, curl_flags='--compressed'): """Call the ls cgi script with the string 'path_list' as argument. Thus the variable 'path_list' should be on the form \"path=pattern1[;path=pattern2[ ... ]]\" This may seem a bit awkward but it's difficult to do in a better way when begin_function() doesn't support variable length or array args. """ relative_url = '"cgi-bin/ls.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'ls_file', ['path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'ls_file') return s def mkdir_function(lang, curl_cmd, curl_flags=''): """Call the mkdir cgi script with 'path' as argument.""" relative_url = '"cgi-bin/mkdir.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'mk_dir', ['path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'mk_dir') return s def mqueue_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/mqueue.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;action=$action;queue=$queue;msg=$msg"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;action=%s;queue=%s;msg=%s' % (server_flags, action, queue, msg)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'job_mqueue', ['action', 'queue', 'msg'], 'Execute the corresponding server operation') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'job_mqueue') return s def mv_function(lang, curl_cmd, curl_flags='--compressed'): """Call the corresponding cgi script with the string 'src_list' as argument. Thus the variable 'src_list' should be on the form \"src=pattern1[;src=pattern2[ ... ]]\" This may seem a bit awkward but it's difficult to do in a better way when begin_function() doesn't support variable length or array args. """ relative_url = '"cgi-bin/mv.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;dst=$dst;$src_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;dst=%s;%s' % (server_flags, dst, src_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'mv_file', ['src_list', 'dst'], 'Execute the corresponding server operation') s += format_list(lang, 'src_list', 'src') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'mv_file') return s def put_function(lang, curl_cmd, curl_flags='--compressed'): post_data = '""' query = '""' if lang == 'sh': # TODO: should we handle below double slash problem here, too? relative_url = '"$dst_path"' curl_target = \ '"--upload-file $src_path --header $content_type -X CERTPUT"' elif lang == 'python': # Apache chokes on possible double slash in url and that causes # fatal errors in migfs-fuse - remove it from src_path. relative_url = '"%s" % dst_path.lstrip("/")' curl_target = \ "'--upload-file %s --header %s -X CERTPUT' % (src_path, content_type)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'put_file', ['src_path', 'dst_path', 'submit_mrsl', 'extract_package'], 'Execute the corresponding server operation') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) if lang == 'sh': s += \ """ content_type="''" if [ $submit_mrsl -eq 1 ] && [ $extract_package -eq 1 ]; then content_type='Content-Type:submitandextract' elif [ $submit_mrsl -eq 1 ]; then content_type='Content-Type:submitmrsl' elif [ $extract_package -eq 1 ]; then content_type='Content-Type:extractpackage' fi """ elif lang == 'python': s += \ """ content_type = "''" if submit_mrsl and extract_package: content_type = 'Content-Type:submitandextract' elif submit_mrsl: content_type = 'Content-Type:submitmrsl' elif extract_package: content_type = 'Content-Type:extractpackage' """ else: print 'Error: %s not supported!' % lang return '' s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, curl_target, ) s += end_function(lang, 'put_file') return s def read_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/rangefileaccess.py"' post_data = '""' if lang == 'sh': query = \ '"?output_format=txt;flags=$server_flags;file_startpos=$first;file_endpos=$last;path=$src_path"' curl_target = '"--output $dst_path"' elif lang == 'python': query = \ "'?output_format=txt;flags=%s;file_startpos=%s;file_endpos=%s;path=%s' % (server_flags, first, last, src_path)" curl_target = "'--output %s' % dst_path" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'read_file', ['first', 'last', 'src_path' , 'dst_path'], 'Execute the corresponding server operation') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, curl_target, ) s += end_function(lang, 'read_file') return s def resubmit_function(lang, curl_cmd, curl_flags=''): relative_url = '"cgi-bin/resubmit.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;$job_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, job_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'resubmit_job', ['job_list'], 'Execute the corresponding server operation') s += format_list(lang, 'job_list', 'job_id') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'resubmit_job') return s def rm_function(lang, curl_cmd, curl_flags=''): """Call the rm cgi script with the string 'path_list' as argument. Thus the variable 'path_list' should be on the form \"path=pattern1[;path=pattern2[ ... ]]\" This may seem a bit awkward but it's difficult to do in a better way when begin_function() doesn't support variable length or array args. """ relative_url = '"cgi-bin/rm.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'rm_file', ['path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'rm_file') return s def rmdir_function(lang, curl_cmd, curl_flags=''): """Call the rmdir cgi script with 'path' as argument.""" relative_url = '"cgi-bin/rmdir.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'rm_dir', ['path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'rm_dir') return s def stat_function(lang, curl_cmd, curl_flags='--compressed'): """Call the corresponding cgi script with the string 'path_list' as argument. Thus the variable 'path_list' should be on the form \"path=pattern1[;path=pattern2[ ... ]]\" This may seem a bit awkward but it's difficult to do in a better way when begin_function() doesn't support variable length or array args. """ relative_url = '"cgi-bin/stat.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'stat_file', ['path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'stat_file') return s def status_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/jobstatus.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;max_jobs=$max_job_count;$job_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;max_jobs=%s;%s' % (server_flags, max_job_count, job_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'job_status', ['job_list', 'max_job_count' ], 'Execute the corresponding server operation') s += format_list(lang, 'job_list', 'job_id') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'job_status') return s def submit_function(lang, curl_cmd, curl_flags=''): # Simply use Put function s = put_function(lang, curl_cmd, curl_flags) return s.replace('put_file', 'submit_file') def tail_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/tail.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;lines=$lines;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;lines=%s;%s' % (server_flags, lines, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'tail_file', ['lines', 'path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'tail_file') return s def test_function(lang, curl_cmd, curl_flags=''): # TODO: pass original -c and -s options on to tested scripts s = '' s += begin_function(lang, 'test_op', ['op'], 'Execute simple function tests') if lang == 'sh': s += \ """ valid=0 valid_ops=(%s) for valid_op in ${valid_ops[*]}; do if [ $op = $valid_op ]; then valid=1 break fi done if [ $valid -eq 0 ]; then echo \"Ignoring test of invalid operation: $op\" return 1 fi path_prefix=`dirname $0` echo \"running $op test(s)\" cmd=\"$path_prefix/%s${op}.%s\" declare -a cmd_args declare -a verify_cmd case $op in 'cancel') pre_cmd=\"$path_prefix/migsubmit.sh mig-test.mRSL\" cmd_args[1]='DUMMY_JOB_ID' ;; 'cat') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt' post_cmd=\"$path_prefix/migrm.sh -r mig-test.txt\" ;; 'doc') cmd_args[1]='' ;; 'get') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt .' post_cmd=\"$path_prefix/migrm.sh -r mig-test.txt\" ;; 'head') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt' post_cmd=\"$path_prefix/migrm.sh -r mig-test.txt\" ;; 'jobaction') pre_cmd=\"$path_prefix/migsubmit.sh mig-test.mRSL\" cmd_args[1]='cancel DUMMY_JOB_ID' ;; 'ls') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt' post_cmd=\"$path_prefix/migrm.sh -r mig-test.txt\" ;; 'mkdir') pre_cmd=\"$path_prefix/migrm.sh -r mig-test-dir\" cmd_args[1]='mig-test-dir' verify_cmd[1]=\"$path_prefix/migls.sh mig-test-dir\" post_cmd=\"$path_prefix/migrm.sh -r mig-test-dir\" ;; 'mv') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt mig-test-new.txt' post_cmd=\"$path_prefix/migrm.sh mig-test-new.txt\" ;; 'put') pre_cmd[1]=\"$path_prefix/migrm.sh mig-test.txt\" cmd_args[1]='mig-test.txt .' verify_cmd[1]=\"$path_prefix/migls.sh mig-test.txt\" post_cmd[1]=\"$path_prefix/migrm.sh mig-test.txt\" cmd_args[2]='mig-test.t*t mig-test.txt' verify_cmd[2]=\"$path_prefix/migrm.sh mig-test.txt\" cmd_args[3]='mig-test.txt mig-test.txt' verify_cmd[3]=\"$path_prefix/migrm.sh mig-test.txt\" cmd_args[4]='mig-test.txt mig-remote-test.txt' verify_cmd[4]=\"$path_prefix/migrm.sh mig-remote-test.txt\" cmd_args[5]='mig-test.txt mig-test-dir/' verify_cmd[5]=\"$path_prefix/migrm.sh mig-test-dir/mig-test.txt\" cmd_args[6]='mig-test.txt mig-test-dir/mig-remote-test.txt' verify_cmd[6]=\"$path_prefix/migrm.sh mig-test-dir/mig-remote-test.txt\" # Disabled since put doesn't support wildcards in destination (yet?) # cmd_args[]='mig-test.txt 'mig-test-d*/'' # cmd_args[]='mig-test.txt 'mig-test-d*/mig-remote-test.txt'' # verify_cmd[]=\"$path_prefix/migrm.sh mig-test-dir/mig-remote-test.txt\" # verify_cmd[]=\"$path_prefix/migrm.sh mig-test-dir/mig-remote-test.txt\" ;; 'read') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='0 16 mig-test.txt -' post_cmd=\"$path_prefix/migrm.sh -r mig-test.txt\" ;; 'rm') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt' verify_cmd[1]=\"$path_prefix/migls.sh mig-test.txt\" ;; 'rmdir') pre_cmd=\"$path_prefix/migmkdir.sh mig-test-dir\" cmd_args[1]='mig-test-dir' verify_cmd[1]=\"$path_prefix/migls.sh mig-test-dir\" post_cmd=\"$path_prefix/migrm.sh -r mig-test-dir\" ;; 'stat') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt' post_cmd=\"$path_prefix/migrm.sh -r mig-test.txt\" ;; 'status') cmd_args[1]='' ;; 'submit') cmd_args[1]='mig-test.mRSL' ;; 'tail') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt' post_cmd[1]=\"$path_prefix/migrm.sh mig-test.txt\" ;; 'touch') pre_cmd[1]=\"$path_prefix/migrm.sh mig-test.txt\" cmd_args[1]='mig-test.txt' verify_cmd[1]=\"$path_prefix/migls.sh mig-test.txt\" post_cmd[1]=\"$path_prefix/migrm.sh mig-test.txt\" ;; 'truncate') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='mig-test.txt' post_cmd[1]=\"$path_prefix/migrm.sh mig-test.txt\" ;; 'wc') pre_cmd=\"$path_prefix/migput.sh mig-test.txt\" cmd_args[1]='mig-test.txt' post_cmd=\"$path_prefix/migrm.sh -r mig-test.txt\" ;; 'write') pre_cmd=\"$path_prefix/migput.sh mig-test.txt .\" cmd_args[1]='4 8 mig-test.txt mig-test.txt' post_cmd=\"$path_prefix/migrm.sh -r mig-test.txt\" ;; *) echo \"No test available for $op!\" return 1 ;; esac index=1 for args in \"${cmd_args[@]}\"; do echo \"test $index: $cmd $test_flags $args\" pre=\"${pre_cmd[index]}\" if [ -n \"$pre\" ]; then echo \"setting up with: $pre\" $pre >& /dev/null fi ./$cmd $test_flags $args >& /dev/null ret=$? if [ $ret -eq 0 ]; then echo \" $op test $index SUCCEEDED\" else echo \" $op test $index FAILED!\" fi verify=\"${verify_cmd[index]}\" if [ -n \"$verify\" ]; then echo \"verifying with: $verify\" $verify fi post=\"${post_cmd[index]}\" if [ -n \"$post\" ]; then echo \"cleaning up with: $post\" $post >& /dev/null fi index=$((index+1)) done return $ret """\ % (' '.join(script_ops), mig_prefix, 'sh') elif lang == 'python': s += """ print \"running %s test\" % (op) """ else: print 'Error: %s not supported!' % lang return '' s += end_function(lang, 'test_op') return s def touch_function(lang, curl_cmd, curl_flags=''): """Call the touch cgi script with 'path' as argument.""" relative_url = '"cgi-bin/touch.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'touch_file', ['path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'touch_file') return s def truncate_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/truncate.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;size=$size;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;size=%s;%s' % (server_flags, size, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'truncate_file', ['size', 'path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'truncate_file') return s def unzip_function(lang, curl_cmd, curl_flags='--compressed'): """Call the corresponding cgi script with the string 'src_list' as argument. Thus the variable 'src_list' should be on the form \"src=pattern1[;src=pattern2[ ... ]]\" This may seem a bit awkward but it's difficult to do in a better way when begin_function() doesn't support variable length or array args. """ relative_url = '"cgi-bin/unzip.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;dst=$dst;$src_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;dst=%s;%s' % (server_flags, dst, src_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'unzip_file', ['src_list', 'dst'], 'Execute the corresponding server operation') s += format_list(lang, 'src_list', 'src') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'unzip_file') return s def wc_function(lang, curl_cmd, curl_flags=''): relative_url = '"cgi-bin/wc.py"' query = '""' if lang == 'sh': post_data = '"output_format=txt;flags=$server_flags;$path_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;%s' % (server_flags, path_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'wc_file', ['path_list'], 'Execute the corresponding server operation') s += format_list(lang, 'path_list', 'path') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'wc_file') return s def write_function(lang, curl_cmd, curl_flags='--compressed'): relative_url = '"cgi-bin/rangefileaccess.py"' post_data = '""' if lang == 'sh': query = \ '"?output_format=txt;flags=$server_flags;file_startpos=$first;file_endpos=$last;path=$dst_path"' curl_target = '"--upload-file $src_path"' elif lang == 'python': query = \ "'?output_format=txt;flags=%s;file_startpos=%s;file_endpos=%s;path=%s' % (server_flags, first, last, dst_path)" curl_target = "'--upload-file %s' % src_path" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'write_file', ['first', 'last', 'src_path' , 'dst_path'], 'Execute the corresponding server operation') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, curl_target, ) s += end_function(lang, 'write_file') return s def zip_function(lang, curl_cmd, curl_flags='--compressed'): """Call the corresponding cgi script with the string 'src_list' as argument. Thus the variable 'src_list' should be on the form \"src=pattern1[;src=pattern2[ ... ]]\" This may seem a bit awkward but it's difficult to do in a better way when begin_function() doesn't support variable length or array args. """ relative_url = '"cgi-bin/zip.py"' query = '""' if lang == 'sh': post_data = \ '"output_format=txt;flags=$server_flags;current_dir=$current_dir;dst=$dst;$src_list"' elif lang == 'python': post_data = \ "'output_format=txt;flags=%s;current_dir=%s;dst=%s;%s' % (server_flags, current_dir, dst, src_list)" else: print 'Error: %s not supported!' % lang return '' s = '' s += begin_function(lang, 'zip_file', ['current_dir', 'src_list', 'dst'], 'Execute the corresponding server operation') s += format_list(lang, 'src_list', 'src') s += ca_check_init(lang) s += password_check_init(lang) s += timeout_check_init(lang) s += curl_perform( lang, relative_url, post_data, query, curl_cmd, curl_flags, ) s += end_function(lang, 'zip_file') return s # ####################### # Main part of scripts # # ####################### def expand_list( lang, input_list, expanded_list, destinations=False, warnings=True, ): """Inline expansion of remote filenames from a list of patterns possibly with wild cards. Output from CGI script is on the form: Exit code: 0 Description OK (done in 0.012s) Title: MiG Files ___MIG FILES___ test.txt test.txt """ s = '' if lang == 'sh': s += \ """ declare -a %s # Save original args orig_args=(\"${%s[@]}\") index=1 for pattern in \"${orig_args[@]}\"; do expanded_path=$(expand_name \"path=$pattern\" \"$server_flags\" \"%s\" 2> /dev/null) set -- $expanded_path shift; shift exit_code=\"$1\" shift; shift; shift; shift; shift; shift; shift; shift; shift; shift; shift if [ \"$exit_code\" -ne \"0\" ]; then """\ % (expanded_list, input_list, str(destinations).lower()) if warnings: s += \ """ # output warning/error message(s) from expand echo \"$0: $@\" """ s += \ """ continue fi while [ \"$#\" -gt \"0\" ]; do %s[$index]=$1 index=$((index+1)) shift done done """\ % expanded_list elif lang == 'python': s += \ """ %s = [] for pattern in %s: (status, out) = expand_name('path=' + pattern, server_flags, '%s') result = [line.strip() for line in out if line.strip()] status = result[0].split()[2] src_list = result[3:] if status != '0': """\ % (expanded_list, input_list, str(destinations).lower()) if warnings: s += \ """ # output warning/error message(s) from expand print sys.argv[0] + ": " + ' '.join(src_list) """ s += """ continue %s += src_list """ % expanded_list else: print 'Error: %s not supported!' % lang return '' return s def shared_main(op, lang): """General wrapper for the specific main functions. Simply rewrites first arg to function name.""" return eval('%s_main' % op)(lang) # TODO: switch to new array/list argument format in all multi target function calls # (No need to manually build var=a;var=b;.. in main when functions handle lists) def cancel_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. Currently 'sh' and 'python' are supported. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the job_id string used directly: # 'job_id="$1";job_id="$2";...;job_id=$N' orig_args=("$@") job_id_list=\"job_id=$1\" shift while [ \"$#\" -gt \"0\" ]; do job_id_list=\"$job_id_list;job_id=$1\" shift done cancel_job $job_id_list """ elif lang == 'python': s += \ """ # Build the job_id string used directly: # 'job_id="$1";job_id="$2";...;job_id=$N' job_id_list = \"job_id=%s\" % \";job_id=\".join(sys.argv[1:]) (status, out) = cancel_job(job_id_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def cat_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' orig_args=("$@") path_list="path=$1" shift while [ $# -gt "0" ]; do path_list="$path_list;path=$1" shift done cat_file $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = cat_file(path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def cp_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # cp cgi supports wild cards natively so no need to use # expand here s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 2, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the src string used directly: # 'src="$1";src="$2";...;src=$N' orig_args=("$@") src_list="src=$1" shift while [ $# -gt 1 ]; do src_list="$src_list;src=$1" shift done dst=$1 cp_file $src_list $dst """ elif lang == 'python': s += \ """ # Build the src string used directly: # 'src="$1";src="$2";...;src=$N' src_list = \"src=%s\" % \";src=\".join(sys.argv[1:-1]) dst = sys.argv[-1] (status, out) = cp_file(src_list, dst) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def doc_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, None, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ if [ $# -gt 0 ]; then # SearchList=() TopicList=(\"$@\") else SearchList=(\"*\") # TopicList=() fi for Search in \"${SearchList[@]}\"; do show_doc \"$Search\" \"\" done for Topic in \"${TopicList[@]}\"; do show_doc \"\" \"$Topic\" done """ elif lang == 'python': s += \ """ if len(sys.argv) - 1 > 0: SearchList = "" TopicList = sys.argv[1:] else: SearchList = '*' TopicList = "" out = [] for Search in SearchList: (status, search_out) = show_doc(Search, "") out += search_out for Topic in TopicList: (status, topic_out) = show_doc("", Topic) out += topic_out print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def filemetaio_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. Currently 'sh' and 'python' are supported. """ s = '' s += basic_main_init(lang) if lang == 'sh': s += parse_options(lang, 'i', ' i) server_flags="${server_flags}i";;' ) elif lang == 'python': s += parse_options(lang, 'i', ''' elif opt == "-i": server_flags += "i"''') s += arg_count_check(lang, 2, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the action and argument strings used directly: # action="$1" path="$2";arg="$3";...;arg="$N" orig_args=("$@") action=\"$1\" shift path=\"$1\" shift arg_list=\"$1\" shift while [ \"$#\" -gt \"0\" ]; do arg_list=\"$arg_list;$1\" shift done filemetaio $action $path $arg_list """ elif lang == 'python': s += \ """ # Build the action and arg strings used directly: # action=$1 "$2";"$3";...;"$N" action = \"%s\" % sys.argv[1] path = \"%s\" % sys.argv[2] arg_list = \"%s\" % \";\".join(sys.argv[3:]) (status, out) = filemetaio(action, path, arg_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def get_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) if lang == 'sh': s += parse_options(lang, 'r', ' r) server_flags="${server_flags}r";;' ) elif lang == 'python': s += parse_options(lang, 'r', ''' elif opt == "-r": server_flags += "r"''') s += arg_count_check(lang, 2, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': # Advice about parsing taken from: # http://www.shelldorado.com/goodcoding/cmdargs.html s += \ """ orig_args=(\"$@\") src_list=(\"$@\") raw_dst=\"${src_list[$(($#-1))]}\" unset src_list[$(($#-1))] """ s += expand_list(lang, 'src_list', 'expanded_list', True) s += \ """ # Use '--' to handle case where no expansions succeeded set -- \"${expanded_list[@]}\" while [ $# -gt 0 ]; do src=$1 dest=$2 shift; shift dst=\"$raw_dst/$dest\" get_file \"$src\" \"$dst\" done """ elif lang == 'python': s += """ raw_dst = sys.argv[-1] src_list = sys.argv[1:-1] """ s += expand_list(lang, 'src_list', 'expanded_list', True) s += \ """ # Expand does not automatically split the outputlines, so they are still on # the src\tdest form for line in expanded_list: src, dest = line.split() dst = raw_dst + os.sep + dest (status, out) = get_file(src, dst) sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def head_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) if lang == 'sh': s += 'lines=20\n' s += parse_options(lang, 'n:', ' n) lines="$OPTARG";;') elif lang == 'python': s += 'lines = 20\n' s += parse_options(lang, 'n:', ''' elif opt == "-n": lines = val ''') s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' orig_args=("$@") path_list="path=$1" shift while [ $# -gt "0" ]; do path_list="$path_list;path=$1" shift done head_file $lines $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = head_file(lines, path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def jobaction_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. Currently 'sh' and 'python' are supported. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 2, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the action and job_id strings used directly: # action="$1" job_id="$2";job_id="$3";...;job_id="$N" orig_args=("$@") action=\"$1\" shift job_id_list=\"job_id=$1\" shift while [ \"$#\" -gt \"0\" ]; do job_id_list=\"$job_id_list;job_id=$1\" shift done job_action $action $job_id_list """ elif lang == 'python': s += \ """ # Build the action and job_id strings used directly: # action=$1 job_id="$2";job_id="$3";...;job_id="$N" action = \"%s\" % sys.argv[1] job_id_list = \"job_id=%s\" % \";job_id=\".join(sys.argv[2:]) (status, out) = job_action(action, job_id_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def liveio_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 4, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the action, job_id, src and dst strings used directly: # action="$1" job_id="$2" src="$2";src="$3";...;src=$((N-1) dst="$N" orig_args=("$@") action=\"$1\" shift job_id=\"$1\" shift src_list=\"src=$1\" shift while [ \"$#\" -gt \"1\" ]; do src_list=\"$src_list;src=$1\" shift done dst=\"$1\" job_liveio $action $job_id $src_list $dst """ elif lang == 'python': s += \ """ # Build the action, job_id, src and dst strings used directly: # action="$1" job_id="$2" src="$2";src="$3";...;src=$((N-1) dst="$N" action = \"%s\" % sys.argv[1] job_id = \"%s\" % sys.argv[2] src_list = \"src=%s\" % \";src=\".join(sys.argv[3:-1]) dst = \"%s\" % sys.argv[-1] (status, out) = job_liveio(action, job_id, src_list, dst) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def ls_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # ls cgi supports wild cards natively so no need to use # expand here s = '' s += basic_main_init(lang) if lang == 'sh': s += parse_options(lang, 'alr', ''' a) server_flags="${server_flags}a" flags="${flags} -a";; l) server_flags="${server_flags}l" flags="${flags} -l";; r) server_flags="${server_flags}r" flags="${flags} -r";;''') elif lang == 'python': s += parse_options(lang, 'alr', ''' elif opt == "-a": server_flags += "a" elif opt == "-l": server_flags += "l" elif opt == "-r": server_flags += "r"''') s += arg_count_check(lang, None, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' orig_args=("$@") if [ $# -gt 0 ]; then path_list="path=$1" shift else path_list="path=." fi while [ $# -gt "0" ]; do path_list="$path_list;path=$1" shift done ls_file $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' if len(sys.argv) == 1: sys.argv.append(".") path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = ls_file(path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def mkdir_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # Client side wild card expansion doesn't make sense for mkdir s = '' s += basic_main_init(lang) if lang == 'sh': s += parse_options(lang, 'p', ' p) server_flags="${server_flags}p"\n flags="${flags} -p";;' ) elif lang == 'python': s += parse_options(lang, 'p', ' elif opt == "-p":\n server_flags += "p"' ) s += arg_count_check(lang, 1, 2) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' orig_args=(\"$@\") path_list=\"path=$1\" shift while [ \"$#\" -gt \"0\" ]; do path_list=\"$path_list;path=$1\" shift done mk_dir $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = mk_dir(path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def mqueue_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 2, 3) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += """ # optional third argument depending on action - add dummy job_mqueue $@ '' """ elif lang == 'python': s += \ """ # optional third argument depending on action - add dummy sys.argv.append('') (status, out) = job_mqueue(*(sys.argv[1:4])) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def mv_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # mv cgi supports wild cards natively so no need to use # expand here s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 2, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the src string used directly: # 'src="$1";src="$2";...;src=$N' orig_args=("$@") src_list="src=$1" shift while [ $# -gt 1 ]; do src_list="$src_list;src=$1" shift done dst=$1 mv_file $src_list $dst """ elif lang == 'python': s += \ """ # Build the src string used directly: # 'src="$1";src="$2";...;src=$N' src_list = \"src=%s\" % \";src=\".join(sys.argv[1:-1]) dst = sys.argv[-1] (status, out) = mv_file(src_list, dst) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def put_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # TODO: can we support wildcards in destination? (do we want to?) # - destination like somedir*/somefile ? # - when somedir* and somefile exists # - when somedir* exists but somefile doesn't exists there # -> we need to expand dirname alone too for both to work! # - destination like somedir*/somefile* ? # - what about ambiguous expansions? # We should handle uploads like this: # migput localfile . -> localfile # migput localfile remotefile -> remotefile # migput localfile remotedir -> remotedir/localfile # migput ../localdir/localfile remotedir -> upload as file and expect server ERROR # migput ../localdir/localfile remotedir/ -> remotedir/localfile # migput ../localdir . -> ERROR? # migput -r ../localdir . -> localdir # migput -r ../localdir remotedir -> remotedir/localdir # -> remotedir/localdir/* s = '' s += basic_main_init(lang) if lang == 'sh': s += 'submit_mrsl=0\n' s += 'recursive=0\n' s += 'extract_package=0\n' s += parse_options(lang, 'prx', ' p) submit_mrsl=1;;\n r) recursive=1;;\n x) extract_package=1;;' ) elif lang == 'python': s += 'submit_mrsl = False\n' s += 'recursive = False\n' s += 'extract_package = False\n' s += parse_options(lang, 'prx', ''' elif opt == "-p": submit_mrsl = True elif opt == "-r": recursive = True elif opt == "-x": extract_package = True''') s += arg_count_check(lang, 2, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ src_list=(\"$@\") raw_dst=\"${src_list[$(($#-1))]}\" unset src_list[$(($#-1))] # remove single '.' to avoid problems with missing ending slash if [ \".\" = \"$raw_dst\" ]; then dst=\"\" else dst=\"$raw_dst\" fi # The for loop automatically expands any wild cards in src_list for src in ${src_list[@]}; do if [ ! -e \"$src\" ]; then echo \"No such file or directory: $src !\" continue fi if [ -d \"$src\" ]; then if [ $recursive -ne 1 ]; then echo \"Nonrecursive put skipping directory: $src\" continue fi # Recursive dirs may not exist - create them first src_parent=`dirname $src` src_target=`basename $src` dirs=`cd $src_parent && find $src_target -type d` # force mkdir -p old_flags=\"$server_flags\" server_flags=\"p\" dir_list=\"\" for dir in $dirs; do dir_list=\"$dir_list;path=$dst/$dir\" done mk_dir \"$dir_list\" server_flags=\"$old_flags\" sources=`cd $src_parent && find $src_target -type f` for path in $sources; do put_file \"$src_parent/$path\" \"$dst/$path\" $submit_mrsl $extract_package done else put_file \"$src\" \"$dst\" $submit_mrsl $extract_package fi done """ elif lang == 'python': s += \ """ from glob import glob raw_list = sys.argv[1:-1] raw_dst = sys.argv[-1] if \".\" == raw_dst: dst = \"\" else: dst = raw_dst # Expand sources src_list = [] for src in raw_list: expanded = glob(src) if expanded: src_list += expanded else: # keep bogus pattern for correct output order src_list += [src] for src in src_list: if not os.path.exists(src): print \"No such file or directory: %s !\" % src continue if os.path.isdir(src): if not recursive: print \"Nonrecursive put skipping directory: %s\" % src continue src_parent = os.path.abspath(os.path.dirname(src)) for root, dirs, files in os.walk(os.path.abspath(src)): # Recursive dirs may not exist - create them first # force mkdir -p old_flags = \"$server_flags\" server_flags = \"p\" rel_root = root.replace(src_parent, '', 1).lstrip(os.sep) dir_list = ';'.join(['path=%s' % os.path.join(dst, rel_root, i) for i in dirs]) # add current root dir_list += ';path=%s' % os.path.join(dst, rel_root) mk_dir(dir_list) server_flags = \"$old_flags\" for name in files: src_path = os.path.join(root, name) dst_path = os.path.join(dst, rel_root, name) (status, out) = put_file(src_path, dst_path, submit_mrsl, extract_package) print ''.join(out), else: (status, out) = put_file(src, dst, submit_mrsl, extract_package) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def read_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 4, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += """ read_file $@ """ elif lang == 'python': s += \ """ (status, out) = read_file(*(sys.argv[1:])) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def resubmit_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the job_id string used directly: # 'job_id="$1";job_id="$2";...;job_id=$N' orig_args=("$@") job_id_list=\"job_id=$1\" shift while [ \"$#\" -gt \"0\" ]; do job_id_list=\"$job_id_list;job_id=$1\" shift done resubmit_job $job_id_list """ elif lang == 'python': s += \ """ # Build the job_id_list string used directly: # 'job_id="$1";job_id="$2";...;job_id=$N' job_id_list = \"job_id=%s\" % \";job_id=\".join(sys.argv[1:]) (status, out) = resubmit_job(job_id_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def rm_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # rm cgi supports wild cards natively so no need to use # expand here s = '' s += basic_main_init(lang) if lang == 'sh': s += parse_options(lang, 'r', ' r) server_flags="${server_flags}r"\n flags="${flags} -r";;' ) elif lang == 'python': s += parse_options(lang, 'r', ' elif opt == "-r":\n server_flags += "r"' ) s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' orig_args=(\"$@\") path_list=\"path=$1\" shift while [ \"$#\" -gt \"0\" ]; do path_list=\"$path_list;path=$1\" shift done rm_file $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = rm_file(path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def rmdir_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # Client side wild card expansion doesn't make sense for rmdir s = '' s += basic_main_init(lang) if lang == 'sh': s += parse_options(lang, 'p', ' p) server_flags="${server_flags}p"\n flags="${flags} -p";;' ) elif lang == 'python': s += parse_options(lang, 'p', ' elif opt == "-p":\n server_flags += "p"' ) s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' orig_args=(\"$@\") path_list=\"path=$1\" shift while [ \"$#\" -gt \"0\" ]; do path_list=\"$path_list;path=$1\" shift done rm_dir $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = rm_dir(path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def stat_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' orig_args=("$@") path_list=\"path=$1\" shift while [ \"$#\" -gt \"0\" ]; do path_list=\"$path_list;path=$1\" shift done stat_file $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = stat_file(path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def status_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) if lang == 'sh': s += "max_job_count=''\n" s += parse_options(lang, 'm:S', ''' m) max_job_count="$OPTARG";; S) server_flags="${server_flags}s" flags="${flags} -S";;''') elif lang == 'python': s += "max_job_count = ''\n" s += parse_options(lang, 'm:S', ''' elif opt == "-m": max_job_count = val elif opt == "-S": server_flags += "s"''') s += arg_count_check(lang, None, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the job_id string used directly: # 'job_id="$1";job_id="$2";...;job_id=$N' orig_args=("$@") job_id_list=\"job_id=$1\" shift while [ \"$#\" -gt \"0\" ]; do job_id_list=\"$job_id_list;job_id=$1\" shift done job_status $job_id_list $max_job_count """ elif lang == 'python': s += \ """ # Build the job_id string used directly: # 'job_id="$1";job_id="$2";...;job_id=$N' job_id_list = \"job_id=%s\" % \";job_id=\".join(sys.argv[1:]) (status, out) = job_status(job_id_list, max_job_count) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def submit_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ extract_package=1 submit_mrsl=1 src_list=(\"$@\") for src in \"${src_list[@]}\"; do dst=`basename \"$src\"` submit_file \"$src\" $dst $submit_mrsl $extract_package done """ elif lang == 'python': s += \ """ extract_package = True submit_mrsl = True src_list = sys.argv[1:] for src in src_list: dst = os.path.basename(src) (status, out) = submit_file(src, dst, submit_mrsl, extract_package) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def tail_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) if lang == 'sh': s += 'lines=20\n' s += parse_options(lang, 'n:', ' n) lines="$OPTARG";;') elif lang == 'python': s += 'lines = 20\n' s += parse_options(lang, 'n:', ''' elif opt == "-n": lines = val ''') s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' orig_args=("$@") path_list=\"path=$1\" shift while [ \"$#\" -gt \"0\" ]; do path_list=\"$path_list;path=$1\" shift done tail_file $lines $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = tail_file(lines, path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def test_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, None, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Prepare for file operations echo 'this is a test file used by the MiG self test' > mig-test.txt echo '::EXECUTE::' > mig-test.mRSL echo 'pwd' >> mig-test.mRSL echo 'Upload test file used in other tests' put_file mig-test.txt . 0 0 >& /dev/null if [ $? -ne 0 ]; then echo 'Upload failed!' exit 1 else echo 'Upload succeeded' fi if [ $# -eq 0 ]; then op_list=(%s) else op_list=(\"$@\") fi for op in \"${op_list[@]}\"; do test_op \"$op\" done """\ % ' '.join(script_ops) elif lang == 'python': s += \ """ if len(sys.argv) - 1 == 0: op_list = %s else: op_list = sys.argv[1:] for op in op_list: test_op(op) """\ % script_ops else: print 'Error: %s not supported!' % lang return s def touch_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # touch cgi supports wild cards natively so no need to use # expand here # Client side wild card expansion doesn't make sense for touch s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' orig_args=(\"$@\") path_list=\"path=$1\" shift while [ \"$#\" -gt \"0\" ]; do path_list=\"$path_list;path=$1\" shift done touch_file $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = touch_file(path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def truncate_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) if lang == 'sh': s += 'size=0\n' s += parse_options(lang, 'n:', ' n) size="$OPTARG";;') elif lang == 'python': s += 'size = 0\n' s += parse_options(lang, 'n:', ''' elif opt == "-n": size = val ''') s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' orig_args=("$@") path_list=\"path=$1\" shift while [ \"$#\" -gt \"0\" ]; do path_list=\"$path_list;path=$1\" shift done truncate_file $size $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path="$1";path="$2";...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = truncate_file(size, path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def unzip_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # unzip cgi supports wild cards natively so no need to use # expand here s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 2, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the src string used directly: # 'src="$1";src="$2";...;src=$N' orig_args=("$@") src_list="src=$1" shift while [ $# -gt 1 ]; do src_list="$src_list;src=$1" shift done dst=$1 unzip_file $src_list $dst """ elif lang == 'python': s += \ """ # Build the src string used directly: # 'src="$1";src="$2";...;src=$N' src_list = \"src=%s\" % \";src=\".join(sys.argv[1:-1]) dst = sys.argv[-1] (status, out) = unzip_file(src_list, dst) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def wc_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) if lang == 'sh': s += parse_options(lang, 'blw', ''' b) server_flags="${server_flags}b" flags="${flags} -b";; l) server_flags="${server_flags}l" flags="${flags} -l";; w) server_flags="${server_flags}w" flags="${flags} -w";;''') elif lang == 'python': s += parse_options(lang, 'blw', ''' elif opt == "-b": server_flags += "b" elif opt == "-l": server_flags += "l" elif opt == "-w": server_flags += "w"''') s += arg_count_check(lang, 1, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' orig_args=(\"$@\") path_list=\"path=$1\" shift while [ \"$#\" -gt \"0\" ]; do path_list=\"$path_list;path=$1\" shift done wc_file $path_list """ elif lang == 'python': s += \ """ # Build the path string used directly: # 'path=$1;path=$2;...;path=$N' path_list = \"path=%s\" % \";path=\".join(sys.argv[1:]) (status, out) = wc_file(path_list) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def write_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ s = '' s += basic_main_init(lang) s += parse_options(lang, None, None) s += arg_count_check(lang, 4, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += """ write_file $@ """ elif lang == 'python': s += \ """ (status, out) = write_file(*(sys.argv[1:])) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s def zip_main(lang): """ Generate main part of corresponding scripts. lang specifies which script language to generate in. """ # zip cgi supports wild cards natively so no need to use # expand here s = '' s += basic_main_init(lang) if lang == 'sh': s += 'current_dir=""\n' s += parse_options(lang, 'w:', ' w) current_dir="$OPTARG";;') elif lang == 'python': s += 'current_dir = ""\n' s += parse_options(lang, 'w:', ''' elif opt == "-w": current_dir = val ''') s += arg_count_check(lang, 2, None) s += check_conf_readable(lang) s += configure(lang) if lang == 'sh': s += \ """ # Build the src string used directly: # 'src="$1";src="$2";...;src=$N' orig_args=("$@") src_list="src=$1" shift while [ $# -gt 1 ]; do src_list="$src_list;src=$1" shift done dst=$1 # current_dir may be empty zip_file \"$current_dir\" $src_list $dst """ elif lang == 'python': s += \ """ # Build the src string used directly: # 'src="$1";src="$2";...;src=$N' src_list = \"src=%s\" % \";src=\".join(sys.argv[1:-1]) dst = sys.argv[-1] (status, out) = zip_file(current_dir, src_list, dst) print ''.join(out), sys.exit(status) """ else: print 'Error: %s not supported!' % lang return s # ###################### # Generator functions # # ###################### def generate_cancel(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_cat(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_cp(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_doc(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_filemetaio(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) def generate_get(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += expand_function(lang, curl_cmd) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_head(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_jobaction(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_lib(script_ops, scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate shared lib for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += check_var_function(lang) script += read_conf_function(lang) script += expand_function(lang, curl_cmd) for function in script_ops: script += shared_op_function(function, lang, curl_cmd) script += basic_main_init(lang) script += check_conf_readable(lang) script += configure(lang) write_script(script, dest_dir + os.sep + script_name, mode=0644) return True def generate_liveio(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_ls(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_mkdir(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_mqueue(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_mv(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_put(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) # Recursive put requires mkdir script += mkdir_function(lang, curl_cmd) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_read(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_resubmit(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_rm(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_rmdir(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_stat(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_status(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_submit(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_tail(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_test(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) # use put function for preparation script += shared_op_function('put', lang, curl_cmd) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_touch(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_truncate(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_unzip(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_wc(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_write(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True def generate_zip(scripts_languages, dest_dir='.'): # Extract op from function name op = sys._getframe().f_code.co_name.replace('generate_', '') # Generate op script for each of the languages in scripts_languages for (lang, interpreter, extension) in scripts_languages: verbose(verbose_mode, 'Generating %s script for %s' % (op, lang)) script_name = '%s%s.%s' % (mig_prefix, op, extension) script = '' script += init_script(op, lang, interpreter) script += version_function(lang) script += shared_usage_function(op, lang, extension) script += check_var_function(lang) script += read_conf_function(lang) script += shared_op_function(op, lang, curl_cmd) script += shared_main(op, lang) write_script(script, dest_dir + os.sep + script_name) return True # Defaults verbose_mode = False shared_lib = True test_script = True include_license = True # Supported MiG operations (don't add 'test' as it is optional) # TODO: add find, *freeze, *re, grep, jobfeasible, jobschedule, mrslview # settings, vm*, script_ops = [ 'cancel', 'cat', 'cp', 'doc', 'filemetaio', 'get', 'head', 'jobaction', 'liveio', 'ls', 'mkdir', 'mqueue', 'mv', 'put', 'read', 'rm', 'rmdir', 'stat', 'status', 'submit', 'resubmit', 'tail', 'touch', 'truncate', 'unzip', 'wc', 'write', 'zip', ] # Script prefix for all user scripts mig_prefix = 'mig' # Default commands: sh_lang = 'sh' sh_cmd = '/bin/sh' sh_ext = 'sh' python_lang = 'python' # python_cmd is only actually used on un*x so don't worry about path python_cmd = '/usr/bin/python' python_ext = 'py' # curl_cmd must be generic for cross platform support curl_cmd = 'curl' dest_dir = '.' # ########### # ## Main ### # ########### # Only run interactive commands if called directly as executable if __name__ == '__main__': opts_str = 'c:d:hlp:s:tvV' try: (opts, args) = getopt.getopt(sys.argv[1:], opts_str) except getopt.GetoptError, goe: print 'Error: %s' % goe usage() sys.exit(1) for (opt, val) in opts: if opt == '-c': curl_cmd = val elif opt == '-d': dest_dir = val elif opt == '-i': include_license = False elif opt == '-l': shared_lib = False elif opt == '-p': python_cmd = val elif opt == '-s': sh_cmd = val elif opt == '-t': test_script = True elif opt == '-v': verbose_mode = True elif opt == '-V': version() sys.exit(0) elif opt == '-h': usage() sys.exit(0) else: print 'Error: %s not supported!' % opt usage() sys.exit(1) verbose(verbose_mode, 'using curl from: %s' % curl_cmd) verbose(verbose_mode, 'using sh from: %s' % sh_cmd) verbose(verbose_mode, 'using python from: %s' % python_cmd) verbose(verbose_mode, 'writing script to: %s' % dest_dir) if not os.path.isdir(dest_dir): print "Error: destination directory doesn't exist!" sys.exit(1) argc = len(args) if argc == 0: # Add new languages here languages = [(sh_lang, sh_cmd, sh_ext), (python_lang, python_cmd, python_ext)] for (lang, cmd, ext) in languages: print 'Generating %s scripts' % lang else: languages = [] # check arguments for lang in args: if lang == 'sh': interpreter = sh_cmd extension = sh_ext elif lang == 'python': interpreter = python_cmd extension = python_ext else: print 'Unknown script language: %s - ignoring!' % lang continue print 'Generating %s scripts' % lang languages.append((lang, interpreter, extension)) # Generate all scripts for op in script_ops: generator = 'generate_%s' % op eval(generator)(languages, dest_dir) if shared_lib: generate_lib(script_ops, languages, dest_dir) if test_script: generate_test(languages, dest_dir) if include_license: write_license(dest_dir) sys.exit(0)
heromod/migrid
mig/shared/userscriptgen.py
Python
gpl-2.0
129,151
[ "Brian" ]
7443fca7c51a3c50458c8b8728f275d7569bd5a68a2acc75f1951d355d571318
# Copyright (C) 2012-2016 Max Planck Institute for Polymer Research # Copyright (C) 2008-2011 Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" **************************** espressopp.interaction.LJcos **************************** if :math:`r^2 \leq border_{pot}`, then: .. math:: U = 4(\frac{1}{r^{12}} - \frac{1}{r^6}) + 1 - \phi else: .. math:: U = \frac{1}{2}\phi (cos(\alpha r^2+\beta) - 1) .. function:: espressopp.interaction.LJcos(phi) :param phi: (default: 1.0) :type phi: real .. function:: espressopp.interaction.VerletListLJcos(vl) :param vl: :type vl: .. function:: espressopp.interaction.VerletListLJcos.getPotential(type1, type2) :param type1: :param type2: :type type1: :type type2: :rtype: .. function:: espressopp.interaction.VerletListLJcos.getVerletList() :rtype: A Python list of lists. .. function:: espressopp.interaction.VerletListLJcos.setPotential(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.VerletListAdressLJcos(vl, fixedtupleList) :param vl: :param fixedtupleList: :type vl: :type fixedtupleList: .. function:: espressopp.interaction.VerletListAdressLJcos.setPotentialAT(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.VerletListAdressLJcos.setPotentialCG(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.VerletListHadressLJcos(vl, fixedtupleList) :param vl: :param fixedtupleList: :type vl: :type fixedtupleList: .. function:: espressopp.interaction.VerletListHadressLJcos.setPotentialAT(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.VerletListHadressLJcos.setPotentialCG(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.CellListLJcos(stor) :param stor: :type stor: .. function:: espressopp.interaction.CellListLJcos.setPotential(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.FixedPairListLJcos(system, vl, potential) :param system: :param vl: :param potential: :type system: :type vl: :type potential: .. function:: espressopp.interaction.FixedPairListLJcos.getFixedPairList() :rtype: A Python list of lists. .. function:: espressopp.interaction.FixedPairListLJcos.setFixedPairList(fixedpairlist) :param fixedpairlist: :type fixedpairlist: .. function:: espressopp.interaction.FixedPairListLJcos.setPotential(potential) :param potential: :type potential: """ from espressopp import pmi, infinity from espressopp.esutil import * from espressopp.interaction.Potential import * from espressopp.interaction.Interaction import * from _espressopp import interaction_LJcos, \ interaction_VerletListLJcos, \ interaction_VerletListAdressLJcos, \ interaction_VerletListHadressLJcos, \ interaction_CellListLJcos, \ interaction_FixedPairListLJcos class LJcosLocal(PotentialLocal, interaction_LJcos): def __init__(self, phi=1.0): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_LJcos, phi) class VerletListLJcosLocal(InteractionLocal, interaction_VerletListLJcos): def __init__(self, vl): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_VerletListLJcos, vl) def setPotential(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, type1, type2, potential) def getPotential(self, type1, type2): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): return self.cxxclass.getPotential(self, type1, type2) def getVerletListLocal(self): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): return self.cxxclass.getVerletList(self) class VerletListAdressLJcosLocal(InteractionLocal, interaction_VerletListAdressLJcos): def __init__(self, vl, fixedtupleList): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_VerletListAdressLJcos, vl, fixedtupleList) def setPotentialAT(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotentialAT(self, type1, type2, potential) def setPotentialCG(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotentialCG(self, type1, type2, potential) class VerletListHadressLJcosLocal(InteractionLocal, interaction_VerletListHadressLJcos): def __init__(self, vl, fixedtupleList): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_VerletListHadressLJcos, vl, fixedtupleList) def setPotentialAT(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotentialAT(self, type1, type2, potential) def setPotentialCG(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotentialCG(self, type1, type2, potential) class CellListLJcosLocal(InteractionLocal, interaction_CellListLJcos): def __init__(self, stor): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_CellListLJcos, stor) def setPotential(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, type1, type2, potential) class FixedPairListLJcosLocal(InteractionLocal, interaction_FixedPairListLJcos): def __init__(self, system, vl, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_FixedPairListLJcos, system, vl, potential) def setPotential(self, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, potential) def setFixedPairList(self, fixedpairlist): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setFixedPairList(self, fixedpairlist) def getFixedPairList(self): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): return self.cxxclass.getFixedPairList(self) if pmi.isController: class LJcos(Potential): 'The Lennard-Jones potential.' pmiproxydefs = dict( cls = 'espressopp.interaction.LJcosLocal', pmiproperty = ['phi','sigma'] ) class VerletListLJcos(Interaction, metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.interaction.VerletListLJcosLocal', pmicall = ['setPotential', 'getPotential', 'getVerletList'] ) class VerletListAdressLJcos(Interaction, metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.interaction.VerletListAdressLJcosLocal', pmicall = ['setPotentialAT', 'setPotentialCG'] ) class VerletListHadressLJcos(Interaction, metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.interaction.VerletListHadressLJcosLocal', pmicall = ['setPotentialAT', 'setPotentialCG'] ) class CellListLJcos(Interaction, metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.interaction.CellListLJcosLocal', pmicall = ['setPotential'] ) class FixedPairListLJcos(Interaction, metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.interaction.FixedPairListLJcosLocal', pmicall = ['setPotential', 'setFixedPairList','getFixedPairList' ] )
espressopp/espressopp
src/interaction/LJcos.py
Python
gpl-3.0
10,827
[ "ESPResSo" ]
fe3baca224d5d88f56744b4f0e588e504a8c8d1c7428ed54a130ae78026619c9
"""Base classes for parameters of algorithms with biomod functionality""" from zope.interface import provider from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm from zope.schema.interfaces import IVocabularyFactory brt_var_monotone_vocab = SimpleVocabulary([ SimpleTerm(-1, '-1', u'-1'), SimpleTerm(1, '+1', u'+1'), ]) @provider(IVocabularyFactory) def brt_var_monotone_vocab_factory(context): return brt_var_monotone_vocab brt_family_vocab = SimpleVocabulary([ SimpleTerm('bernoulli', 'bernoulli', 'bernoulli (binomial)'), SimpleTerm('poisson', 'poisson', 'poisson'), SimpleTerm('laplace', 'laplace', 'laplace'), SimpleTerm('gaussian', 'gaussian', 'gaussian'), ]) @provider(IVocabularyFactory) def brt_family_vocab_factory(context): return brt_family_vocab lm_na_action_vocab = SimpleVocabulary([ SimpleTerm('na.fail', 'na.fail', 'na.fail'), SimpleTerm('na.omit', 'na.omit', 'na.omit'), SimpleTerm('na.exclude', 'na.exclude', 'na.exclude'), SimpleTerm(None, 'NULL', 'NULL') ]) @provider(IVocabularyFactory) def lm_na_action_vocab_factory(context): return lm_na_action_vocab pa_strategy_vocab = SimpleVocabulary([ SimpleTerm('random', 'random', 'random'), SimpleTerm('sre', 'sre', 'sre'), SimpleTerm('disk', 'disk', 'disk'), SimpleTerm('none', 'none', 'none'), ]) @provider(IVocabularyFactory) def pa_strategy_vocab_factory(context): return pa_strategy_vocab
BCCVL/org.bccvl.compute
src/org/bccvl/compute/vocabularies.py
Python
gpl-2.0
1,461
[ "Gaussian" ]
d9c67aebeb1c568209529914ff0184664674386b8789d3ff52e59492f717f6a6
"""This module defines a linear response TDDFT-class. """ from math import sqrt import sys import numpy as np from ase.units import Hartree import _gpaw import gpaw.mpi as mpi MASTER = mpi.MASTER from gpaw import debug from gpaw.poisson import PoissonSolver from gpaw.output import initialize_text_stream from gpaw.lrtddft.excitation import Excitation, ExcitationList from gpaw.lrtddft.kssingle import KSSingles from gpaw.lrtddft.omega_matrix import OmegaMatrix from gpaw.lrtddft.apmb import ApmB ##from gpaw.lrtddft.transition_density import TransitionDensity from gpaw.utilities import packed_index from gpaw.utilities.lapack import diagonalize from gpaw.xc import XC from gpaw.lrtddft.spectrum import spectrum __all__ = ['LrTDDFT', 'photoabsorption_spectrum', 'spectrum'] class LrTDDFT(ExcitationList): """Linear Response TDDFT excitation class Input parameters: calculator: the calculator object after a ground state calculation nspins: number of spins considered in the calculation Note: Valid only for unpolarised ground state calculation eps: Minimal occupation difference for a transition (default 0.001) istart: First occupied state to consider jend: Last unoccupied state to consider xc: Exchange-Correlation approximation in the Kernel derivative_level: 0: use Exc, 1: use vxc, 2: use fxc if available filename: read from a file """ def __init__(self, calculator=None, nspins=None, eps=0.001, istart=0, jend=None, energy_range=None, xc=None, derivative_level=1, numscale=0.00001, txt=None, filename=None, finegrid=2, force_ApmB=False, # for tests eh_comm=None # parallelization over eh-pairs ): self.nspins = None self.istart = None self.jend = None if isinstance(calculator, str): ExcitationList.__init__(self, None, txt) return self.read(calculator) else: ExcitationList.__init__(self, calculator, txt) if filename is not None: return self.read(filename) self.filename = None self.calculator = None self.eps = None self.xc = None self.derivative_level = None self.numscale = numscale self.finegrid = finegrid self.force_ApmB = force_ApmB if eh_comm is None: eh_comm = mpi.serial_comm elif isinstance(eh_comm, (mpi.world.__class__, mpi.serial_comm.__class__)): # Correct type already. pass else: # world should be a list of ranks: eh_comm = mpi.world.new_communicator(np.asarray(eh_comm)) self.eh_comm = eh_comm if calculator is not None: calculator.converge_wave_functions() if calculator.density.nct_G is None: calculator.set_positions() self.update(calculator, nspins, eps, istart, jend, energy_range, xc, derivative_level, numscale) def analyse(self, what=None, out=None, min=0.1): """Print info about the transitions. Parameters: 1. what: I list of excitation indicees, None means all 2. out : I where to send the output, None means sys.stdout 3. min : I minimal contribution to list (0<min<1) """ if what is None: what = range(len(self)) elif isinstance(what, int): what = [what] if out is None: out = sys.stdout for i in what: print >> out, str(i) + ':', self[i].analyse(min=min) def update(self, calculator=None, nspins=None, eps=0.001, istart=0, jend=None, energy_range=None, xc=None, derivative_level=None, numscale=0.001): changed = False if self.calculator != calculator or \ self.nspins != nspins or \ self.eps != eps or \ self.istart != istart or \ self.jend != jend : changed = True if not changed: return self.calculator = calculator self.nspins = nspins self.eps = eps self.istart = istart self.jend = jend self.energy_range = energy_range self.xc = xc self.derivative_level = derivative_level self.numscale = numscale self.forced_update() def forced_update(self): """Recalc yourself.""" self.kss = KSSingles(calculator=self.calculator, nspins=self.nspins, eps=self.eps, istart=self.istart, jend=self.jend, energy_range=self.energy_range, txt=self.txt) if not self.force_ApmB: Om = OmegaMatrix name = 'LrTDDFT' if self.xc: xc = XC(self.xc) if hasattr(xc, 'hybrid') and xc.hybrid > 0.0: Om = ApmB name = 'LrTDDFThyb' else: Om = ApmB name = 'LrTDDFThyb' self.Om = Om(self.calculator, self.kss, self.xc, self.derivative_level, self.numscale, finegrid=self.finegrid, eh_comm=self.eh_comm, txt=self.txt) self.name = name def diagonalize(self, istart=None, jend=None, energy_range=None): self.Om.diagonalize(istart, jend, energy_range) # remove old stuff while len(self): self.pop() print >> self.txt, 'LrTDDFT digonalized:' for j in range(len(self.Om.kss)): self.append(LrTDDFTExcitation(self.Om,j)) print >> self.txt, ' ', str(self[-1]) def get_Om(self): return self.Om def read(self, filename=None, fh=None): """Read myself from a file""" if fh is None: if filename.endswith('.gz'): try: import gzip f = gzip.open(filename) except: f = open(filename, 'r') else: f = open(filename, 'r') self.filename = filename else: f = fh self.filename = None # get my name s = f.readline().replace('\n','') self.name = s.split()[1] self.xc = f.readline().replace('\n','').split()[0] values = f.readline().split() self.eps = float(values[0]) if len(values) > 1: self.derivative_level = int(values[1]) self.numscale = float(values[2]) self.finegrid = int(values[3]) else: # old writing style, use old defaults self.numscale = 0.001 self.kss = KSSingles(filehandle=f) if self.name == 'LrTDDFT': self.Om = OmegaMatrix(kss=self.kss, filehandle=f, txt=self.txt) else: self.Om = ApmB(kss=self.kss, filehandle=f, txt=self.txt) self.Om.Kss(self.kss) # check if already diagonalized p = f.tell() s = f.readline() if s != '# Eigenvalues\n': # go back to previous position f.seek(p) else: # load the eigenvalues n = int(f.readline().split()[0]) for i in range(n): l = f.readline().split() E = float(l[0]) me = [float(l[1]), float(l[2]), float(l[3])] self.append(LrTDDFTExcitation(e=E, m=me)) if fh is None: f.close() # update own variables self.istart = self.Om.fullkss.istart self.jend = self.Om.fullkss.jend def singlets_triplets(self): """Split yourself into a singlet and triplet object""" slr = LrTDDFT(None, self.nspins, self.eps, self.istart, self.jend, self.xc, self.derivative_level, self.numscale) tlr = LrTDDFT(None, self.nspins, self.eps, self.istart, self.jend, self.xc, self.derivative_level, self.numscale) slr.Om, tlr.Om = self.Om.singlets_triplets() for lr in [slr, tlr]: lr.kss = lr.Om.fullkss return slr, tlr def single_pole_approximation(self, i, j): """Return the excitation according to the single pole approximation. See e.g.: Grabo et al, Theochem 501 (2000) 353-367 """ for ij, kss in enumerate(self.kss): if kss.i == i and kss.j == j: return sqrt(self.Om.full[ij][ij]) * Hartree return self.Om.full[ij][ij] / kss.energy * Hartree def __str__(self): string = ExcitationList.__str__(self) string += '# derived from:\n' string += self.kss.__str__() return string def write(self, filename=None, fh=None): """Write current state to a file. 'filename' is the filename. If the filename ends in .gz, the file is automatically saved in compressed gzip format. 'fh' is a filehandle. This can be used to write into already opened files. """ if mpi.rank == mpi.MASTER: if fh is None: if filename.endswith('.gz'): try: import gzip f = gzip.open(filename,'wb') except: f = open(filename, 'w') else: f = open(filename, 'w') else: f = fh f.write('# ' + self.name + '\n') xc = self.xc if xc is None: xc = 'RPA' if self.calculator is not None: xc += ' ' + self.calculator.get_xc_functional() f.write(xc + '\n') f.write('%g %d %g %d' % (self.eps, int(self.derivative_level), self.numscale, int(self.finegrid)) + '\n') self.kss.write(fh=f) self.Om.write(fh=f) if len(self): f.write('# Eigenvalues\n') istart = self.istart if istart is None: istart = self.kss.istart jend = self.jend if jend is None: jend = self.kss.jend f.write('%d %d %d'%(len(self), istart, jend) + '\n') for ex in self: f.write(ex.outstring()) f.write('# Eigenvectors\n') for ex in self: for w in ex.f: f.write('%g '%w) f.write('\n') if fh is None: f.close() def d2Excdnsdnt(dup, ddn): """Second derivative of Exc polarised""" res = [[0, 0], [0, 0]] for ispin in range(2): for jspin in range(2): res[ispin][jspin]=np.zeros(dup.shape) _gpaw.d2Excdnsdnt(dup, ddn, ispin, jspin, res[ispin][jspin]) return res def d2Excdn2(den): """Second derivative of Exc unpolarised""" res = np.zeros(den.shape) _gpaw.d2Excdn2(den, res) return res class LrTDDFTExcitation(Excitation): def __init__(self,Om=None,i=None, e=None,m=None): # define from the diagonalized Omega matrix if Om is not None: if i is None: raise RuntimeError ev = Om.eigenvalues[i] if ev < 0: # we reached an instability, mark it with a negative value self.energy = -sqrt(-ev) else: self.energy = sqrt(ev) self.f = Om.eigenvectors[i] self.kss = Om.kss self.me = 0. for f,k in zip(self.f, self.kss): self.me += f * k.me return # define from energy and matrix element if e is not None: if m is None: raise RuntimeError self.energy = e self.me = m return raise RuntimeError def density_change(self,paw): """get the density change associated with this transition""" raise NotImplementedError def outstring(self): str = '%g ' % self.energy str += ' ' for m in self.me: str += ' %g' % m str += '\n' return str def __str__(self): m2 = np.sum(self.me * self.me) m = sqrt(m2) if m > 0: me = self.me/m else: me = self.me str = "<LrTDDFTExcitation> om=%g[eV] |me|=%g (%.2f,%.2f,%.2f)" % \ (self.energy * Hartree, m, me[0], me[1], me[2]) return str def analyse(self,min=.1): """Return an analysis string of the excitation""" s='E=%.3f'%(self.energy * Hartree)+' eV, f=%.3g'\ %(self.get_oscillator_strength()[0])+'\n' def sqr(x): return x*x spin = ['u','d'] min2 = sqr(min) rest = np.sum(self.f**2) for f,k in zip(self.f,self.kss): f2 = sqr(f) if f2>min2: s += ' %d->%d ' % (k.i,k.j) + spin[k.pspin] + ' ' s += '%.3g \n'%f2 rest -= f2 s+=' rest=%.3g'%rest return s def photoabsorption_spectrum(excitation_list, spectrum_file=None, e_min=None, e_max=None, delta_e = None, folding='Gauss', width=0.1, comment=None): """Uniform absorption spectrum interface Parameters: ================= =================================================== ``exlist`` ExcitationList ``spectrum_file`` File name for the output file, STDOUT if not given ``e_min`` min. energy, set to cover all energies if not given ``e_max`` max. energy, set to cover all energies if not given ``delta_e`` energy spacing ``energyunit`` Energy unit, default 'eV' ``folding`` Gauss (default) or Lorentz ``width`` folding width in terms of the chosen energyunit ================= =================================================== all energies in [eV] """ spectrum(exlist=excitation_list, filename=spectrum_file, emin=e_min, emax=e_max, de=delta_e, energyunit='eV', folding=folding, width=width, comment=comment)
ajylee/gpaw-rtxs
gpaw/lrtddft/__init__.py
Python
gpl-3.0
15,015
[ "ASE", "GPAW" ]
a1b0a28dfc325fae0d4bb1a493dc7b6a11f8be2fdec4f0ca5ab0d62efa142187
""" BoltzTraT2 is a python software interpolating band structures and computing materials properties from dft band structure using Boltzmann semi-classical transport theory. This module provides a pymatgen interface to BoltzTraT2. Some of the code is written following the examples provided in BoltzTraP2 BoltzTraT2 has been developed by Georg Madsen, Jesús Carrete, Matthieu J. Verstraete. https://gitlab.com/sousaw/BoltzTraP2 https://www.sciencedirect.com/science/article/pii/S0010465518301632 References are: Georg K.H.Madsen, Jesús Carrete, Matthieu J.Verstraete BoltzTraP2, a program for interpolating band structures and calculating semi-classical transport coefficients Computer Physics Communications 231, 140-145, 2018 Madsen, G. K. H., and Singh, D. J. (2006). BoltzTraP. A code for calculating band-structure dependent quantities. Computer Physics Communications, 175, 67-71 TODO: - spin polarized bands - read first derivative of the eigenvalues from vasprun.xml (mommat) - handle magnetic moments (magmom) """ import numpy as np import matplotlib.pyplot as plt from monty.serialization import dumpfn from pymatgen.symmetry.bandstructure import HighSymmKpath from pymatgen.electronic_structure.bandstructure import \ BandStructureSymmLine, Kpoint, Spin from pymatgen.io.vasp import Vasprun from pymatgen.io.ase import AseAtomsAdaptor from pymatgen.electronic_structure.dos import Dos, CompleteDos, Orbital from pymatgen.electronic_structure.boltztrap import BoltztrapError from pymatgen.electronic_structure.plotter import BSPlotter, DosPlotter try: from BoltzTraP2 import sphere from BoltzTraP2 import fite from BoltzTraP2 import bandlib as BL from BoltzTraP2 import units except ImportError: raise BoltztrapError("BoltzTraP2 has to be installed and working") __author__ = "Francesco Ricci" __copyright__ = "Copyright 2018, The Materials Project" __version__ = "1.0" __maintainer__ = "Fracesco Ricci" __email__ = "frankyricci@gmail.com" __status__ = "Development" __date__ = "August 2018" class BandstructureLoader: """Loader for Bandsstrcture object""" def __init__(self, bs_obj, structure=None, nelect=None, spin=None): """ :param bs_obj: :param structure: :param nelect: :param spin: """ self.kpoints = np.array([kp.frac_coords for kp in bs_obj.kpoints]) if structure is None: self.structure = bs_obj.structure else: self.structure = structure self.atoms = AseAtomsAdaptor.get_atoms(self.structure) self.proj = None if len(bs_obj.bands) == 1: e = list(bs_obj.bands.values())[0] self.ebands = e * units.eV self.dosweight = 2.0 if bs_obj.projections: self.proj = bs_obj.projections[Spin.up].transpose((1, 0, 3, 2)) elif len(bs_obj.bands) == 2: if not spin: raise BaseException("spin-polarized bs, you need to select a spin") elif spin in (-1, 1): e = bs_obj.bands[Spin(spin)] self.ebands = e * units.eV self.dosweight = 1.0 if bs_obj.projections: self.proj = bs_obj.projections[Spin(spin)].transpose((1, 0, 3, 2)) self.lattvec = self.atoms.get_cell().T * units.Angstrom self.mommat = None self.magmom = None self.fermi = bs_obj.efermi * units.eV self.nelect = nelect self.UCvol = self.structure.volume * units.Angstrom ** 3 self.spin = spin if not bs_obj.is_metal() and not spin: self.vbm_idx = list(bs_obj.get_vbm()['band_index'].values())[0][-1] self.cbm_idx = list(bs_obj.get_cbm()['band_index'].values())[0][0] def get_lattvec(self): """ :return: The lattice vectors. """ try: self.lattvec except AttributeError: self.lattvec = self.atoms.get_cell().T * units.Angstrom return self.lattvec def bandana(self, emin=-np.inf, emax=np.inf): """Cut out bands outside the range (emin,emax)""" bandmin = np.min(self.ebands, axis=1) bandmax = np.max(self.ebands, axis=1) ii = np.nonzero(bandmin < emax) nemax = ii[0][-1] ii = np.nonzero(bandmax > emin) nemin = ii[0][0] # BoltzTraP2.misc.info("BANDANA output") # for iband in range(len(self.ebands)): # BoltzTraP2.misc.info(iband, bandmin[iband], bandmax[iband], ( # (bandmin[iband] < emax) & (bandmax[iband] > emin))) self.ebands = self.ebands[nemin:nemax + 1] if isinstance(self.proj, np.ndarray): self.proj = self.proj[:, nemin:nemax + 1, :, :] if self.mommat is not None: self.mommat = self.mommat[:, nemin:nemax + 1, :] # Removing bands may change the number of valence electrons if self.nelect is not None: self.nelect -= self.dosweight * nemin return nemin, nemax def set_upper_lower_bands(self, e_lower, e_upper): """ Set fake upper/lower bands, useful to set the same energy range in the spin up/down bands when calculating the DOS """ lower_band = e_lower * np.ones((1, self.ebands.shape[1])) upper_band = e_upper * np.ones((1, self.ebands.shape[1])) self.ebands = np.vstack((lower_band, self.ebands, upper_band)) if isinstance(self.proj, np.ndarray): proj_lower = self.proj[:, 0:1, :, :] proj_upper = self.proj[:, -1:, :, :] self.proj = np.concatenate((proj_lower, self.proj, proj_upper), axis=1) def get_volume(self): """ :return: Volume """ try: self.UCvol except AttributeError: lattvec = self.get_lattvec() self.UCvol = np.abs(np.linalg.det(lattvec)) return self.UCvol class VasprunLoader: """Loader for Vasprun object""" def __init__(self, vrun_obj=None): """ :param vrun_obj: Vasprun object. """ if vrun_obj: self.kpoints = np.array(vrun_obj.actual_kpoints) self.structure = vrun_obj.final_structure self.atoms = AseAtomsAdaptor.get_atoms(self.structure) self.proj = None if len(vrun_obj.eigenvalues) == 1: e = list(vrun_obj.eigenvalues.values())[0] self.ebands = e[:, :, 0].transpose() * units.eV self.dosweight = 2.0 if vrun_obj.projected_eigenvalues: self.proj = list(vrun_obj.projected_eigenvalues.values())[0] elif len(vrun_obj.eigenvalues) == 2: raise BoltztrapError("spin bs case not implemented") self.lattvec = self.atoms.get_cell().T * units.Angstrom # TODO: read mommat from vasprun self.mommat = None self.magmom = None self.spin = None self.fermi = vrun_obj.efermi * units.eV self.nelect = vrun_obj.parameters['NELECT'] self.UCvol = self.structure.volume * units.Angstrom ** 3 def from_file(self, vasprun_file): """Get a vasprun.xml file and return a VasprunLoader""" vrun_obj = Vasprun(vasprun_file, parse_projected_eigen=True) return VasprunLoader(vrun_obj) def get_lattvec(self): """ :return: Lattice vectors """ try: self.lattvec except AttributeError: self.lattvec = self.atoms.get_cell().T * units.Angstrom return self.lattvec def bandana(self, emin=-np.inf, emax=np.inf): """Cut out bands outside the range (emin,emax)""" bandmin = np.min(self.ebands, axis=1) bandmax = np.max(self.ebands, axis=1) ii = np.nonzero(bandmin < emax) nemax = ii[0][-1] ii = np.nonzero(bandmax > emin) nemin = ii[0][0] # BoltzTraP2.misc.info("BANDANA output") # for iband in range(len(self.ebands)): # BoltzTraP2.misc.info(iband, bandmin[iband], bandmax[iband], ( # (bandmin[iband] < emax) & (bandmax[iband] > emin))) self.ebands = self.ebands[nemin:nemax] if isinstance(self.proj, np.ndarray): self.proj = self.proj[:, nemin:nemax, :, :] if self.mommat is not None: self.mommat = self.mommat[:, nemin:nemax, :] # Removing bands may change the number of valence electrons if self.nelect is not None: self.nelect -= self.dosweight * nemin return nemin, nemax def get_volume(self): """ :return: Volume of cell """ try: self.UCvol except AttributeError: lattvec = self.get_lattvec() self.UCvol = np.abs(np.linalg.det(lattvec)) return self.UCvol class BztInterpolator: """ Interpolate the dft band structures """ def __init__(self, data, lpfac=10, energy_range=1.5, curvature=True): """ Args: data: A loader lpfac: the number of interpolation points in the real space. By default 10 gives 10 time more points in the real space than the number of kpoints given in reciprocal space. energy_range: usually the interpolation is not needed on the entire energy range but on a specific range around the fermi level. This energy in eV fix the range around the fermi level (E_fermi-energy_range,E_fermi+energy_range) of bands that will be interpolated and taken into account to calculate the transport properties. curvature: boolean value to enable/disable the calculation of second derivative related trasport properties (Hall coefficient). Example: data = VasprunLoader().from_file('vasprun.xml') bztInterp = BztInterpolator(data) """ self.data = data num_kpts = self.data.kpoints.shape[0] self.efermi = self.data.fermi self.nemin, self.nemax = self.data.bandana(emin=self.efermi - (energy_range * units.eV), emax=self.efermi + (energy_range * units.eV)) self.equivalences = sphere.get_equivalences(self.data.atoms, self.data.magmom, num_kpts * lpfac) self.coeffs = fite.fitde3D(self.data, self.equivalences) self.eband, self.vvband, self.cband = fite.getBTPbands(self.equivalences, self.coeffs, self.data.lattvec, curvature=curvature) def get_band_structure(self): """Return a BandStructureSymmLine object interpolating bands along a High symmetry path calculated from the structure using HighSymmKpath function""" kpath = HighSymmKpath(self.data.structure) kpt_line = [Kpoint(k, self.data.structure.lattice) for k in kpath.get_kpoints(coords_are_cartesian=False)[ 0]] kpoints = np.array( [kp.frac_coords for kp in kpt_line]) labels_dict = {l: k for k, l in zip( *kpath.get_kpoints(coords_are_cartesian=False)) if l} lattvec = self.data.get_lattvec() egrid, vgrid = fite.getBands(kpoints, self.equivalences, lattvec, self.coeffs) bands_dict = {Spin.up: (egrid / units.eV)} sbs = BandStructureSymmLine(kpoints, bands_dict, self.data.structure.lattice.reciprocal_lattice, self.efermi / units.eV, labels_dict=labels_dict) return sbs def get_dos(self, partial_dos=False, npts_mu=10000, T=None): """ Return a Dos object interpolating bands Args: partial_dos: if True, projections will be interpolated as well and partial doses will be return. Projections must be available in the loader. npts_mu: number of energy points of the Dos T: parameter used to smooth the Dos """ spin = self.data.spin if isinstance(self.data.spin, int) else 1 energies, densities, vvdos, cdos = BL.BTPDOS(self.eband, self.vvband, npts=npts_mu) if T is not None: densities = BL.smoothen_DOS(energies, densities, T) tdos = Dos(self.efermi / units.eV, energies / units.eV, {Spin(spin): densities}) if partial_dos: tdos = self.get_partial_doses(tdos=tdos, npts_mu=npts_mu, T=T) return tdos def get_partial_doses(self, tdos, npts_mu, T): """ Return a CompleteDos object interpolating the projections tdos: total dos previously calculated npts_mu: number of energy points of the Dos T: parameter used to smooth the Dos """ spin = self.data.spin if isinstance(self.data.spin, int) else 1 if not isinstance(self.data.proj, np.ndarray): raise BoltztrapError("No projections loaded.") bkp_data_ebands = np.copy(self.data.ebands) pdoss = {} # for spin in self.data.proj: for isite, site in enumerate(self.data.structure.sites): if site not in pdoss: pdoss[site] = {} for iorb, orb in enumerate(Orbital): if iorb == self.data.proj.shape[-1]: break if orb not in pdoss[site]: pdoss[site][orb] = {} self.data.ebands = self.data.proj[:, :, isite, iorb].T coeffs = fite.fitde3D(self.data, self.equivalences) proj, vvproj, cproj = fite.getBTPbands(self.equivalences, coeffs, self.data.lattvec) edos, pdos = BL.DOS(self.eband, npts=npts_mu, weights=np.abs(proj.real)) if T is not None: pdos = BL.smoothen_DOS(edos, pdos, T) pdoss[site][orb][Spin(spin)] = pdos self.data.ebands = bkp_data_ebands return CompleteDos(self.data.structure, total_dos=tdos, pdoss=pdoss) class BztTransportProperties: """ Compute Seebeck, Conductivity, Electrical part of thermal conductivity and Hall coefficient, conductivity effective mass, Power Factor tensors w.r.t. the chemical potential and temperatures, from dft band structure via interpolation. """ def __init__(self, BztInterpolator, temp_r=np.arange(100, 1400, 100), doping=10. ** np.arange(16, 23), npts_mu=4000, CRTA=1e-14, margin=None): """ Args: BztInterpolator: a BztInterpolator previously generated temp_r: numpy array of temperatures at which to calculate trasport properties doping: doping levels at which to calculate trasport properties npts_mu: number of energy points at which to calculate trasport properties CRTA: constant value of the relaxation time Upon creation, it contains properties tensors w.r.t. the chemical potential of size (len(temp_r),npts_mu,3,3): Conductivity_mu (S/m), Seebeck_mu (microV/K), Kappa_mu (W/(m*K)), Power_Factor_mu (milliW/K); cond_Effective_mass_mu (m_e) calculated as Ref. Also: Carrier_conc_mu: carrier concentration of size (len(temp_r),npts_mu) Hall_carrier_conc_trace_mu: trace of Hall carrier concentration of size (len(temp_r),npts_mu) mu_r_eV: array of energies in eV and with E_fermi at 0.0 where all the properties are calculated. Example: bztTransp = BztTransportProperties(bztInterp,temp_r = np.arange(100,1400,100)) """ self.CRTA = CRTA self.temp_r = temp_r self.doping = doping self.dosweight = BztInterpolator.data.dosweight self.epsilon, self.dos, self.vvdos, self.cdos = BL.BTPDOS(BztInterpolator.eband, BztInterpolator.vvband, npts=npts_mu, cband=BztInterpolator.cband) if margin is None: margin = 9. * units.BOLTZMANN * temp_r.max() mur_indices = np.logical_and(self.epsilon > self.epsilon.min() + margin, self.epsilon < self.epsilon.max() - margin) self.mu_r = self.epsilon[mur_indices] N, L0, L1, L2, Lm11 = BL.fermiintegrals( self.epsilon, self.dos, self.vvdos, mur=self.mu_r, Tr=temp_r, dosweight=self.dosweight, cdos=self.cdos) self.efermi = BztInterpolator.data.fermi / units.eV self.mu_r_eV = self.mu_r / units.eV - self.efermi self.nelect = BztInterpolator.data.nelect self.volume = BztInterpolator.data.get_volume() # Compute the Onsager coefficients from those Fermi integrals self.Conductivity_mu, self.Seebeck_mu, self.Kappa_mu, Hall_mu = BL.calc_Onsager_coefficients(L0, L1, L2, self.mu_r, temp_r, self.volume, Lm11=Lm11) # Common properties rescaling self.Conductivity_mu *= CRTA # S / m self.Seebeck_mu *= 1e6 # microvolt / K self.Kappa_mu *= CRTA # W / (m K) self.Hall_carrier_conc_trace_mu = units.Coulomb * 1e-6 / (np.abs(Hall_mu[:, :, 0, 1, 2] + Hall_mu[:, :, 2, 0, 1] + Hall_mu[:, :, 1, 2, 0]) / 3) self.Carrier_conc_mu = (N + self.nelect) / (self.volume / (units.Meter / 100.) ** 3) # Derived properties cond_eff_mass = np.zeros((len(self.temp_r), len(self.mu_r), 3, 3)) for t in range(len(self.temp_r)): for i in range(len(self.mu_r)): try: cond_eff_mass[t, i] = np.linalg.inv(self.Conductivity_mu[t, i]) * self.Carrier_conc_mu[ t, i] * units.qe_SI ** 2 / units.me_SI * 1e6 except np.linalg.LinAlgError: pass self.Effective_mass_mu = cond_eff_mass * CRTA self.Power_Factor_mu = (self.Seebeck_mu @ self.Seebeck_mu) @ self.Conductivity_mu self.Power_Factor_mu *= 1e-9 # milliWatt / m / K**2 # self.props_as_dict() def compute_properties_doping(self, doping, temp_r=None): """ Calculate all the properties w.r.t. the doping levels in input. Args: doping: numpy array specifing the doping levels When executed, it add the following variable at the BztTransportProperties object: Conductivity_doping, Seebeck_doping, Kappa_doping, Power_Factor_doping, cond_Effective_mass_doping are dictionaries with 'n' and 'p' keys and arrays of dim (len(temp_r),len(doping),3,3) as values doping_carriers: number of carriers for each doping level mu_doping_eV: the chemical potential corrispondent to each doping level """ if temp_r is None: temp_r = self.temp_r self.Conductivity_doping, self.Seebeck_doping, self.Kappa_doping = {}, {}, {} # self.Hall_doping = {} self.Power_Factor_doping, self.Effective_mass_doping = {}, {} mu_doping = {} doping_carriers = [dop * (self.volume / (units.Meter / 100.) ** 3) for dop in doping] for dop_type in ['n', 'p']: sbk = np.zeros((len(temp_r), len(doping), 3, 3)) cond = np.zeros((len(temp_r), len(doping), 3, 3)) kappa = np.zeros((len(temp_r), len(doping), 3, 3)) hall = np.zeros((len(temp_r), len(doping), 3, 3, 3)) if dop_type == 'p': doping_carriers = [-dop for dop in doping_carriers] mu_doping[dop_type] = np.zeros((len(temp_r), len(doping))) for t, temp in enumerate(temp_r): for i, dop_car in enumerate(doping_carriers): mu_doping[dop_type][t, i] = self.find_mu_doping(self.epsilon, self.dos, self.nelect + dop_car, temp, self.dosweight) N, L0, L1, L2, Lm11 = BL.fermiintegrals(self.epsilon, self.dos, self.vvdos, mur=mu_doping[dop_type][t], Tr=np.array([temp]), dosweight=self.dosweight) cond[t], sbk[t], kappa[t], hall[t] = BL.calc_Onsager_coefficients(L0, L1, L2, mu_doping[dop_type][t], np.array([temp]), self.volume, Lm11) self.Conductivity_doping[dop_type] = cond * self.CRTA # S / m self.Seebeck_doping[dop_type] = sbk * 1e6 # microVolt / K self.Kappa_doping[dop_type] = kappa * self.CRTA # W / (m K) # self.Hall_doping[dop_type] = hall self.Power_Factor_doping[dop_type] = (sbk @ sbk) @ cond * self.CRTA * 1e3 cond_eff_mass = np.zeros((len(temp_r), len(doping), 3, 3)) for t in range(len(temp_r)): for i, dop in enumerate(doping): try: cond_eff_mass[t, i] = np.linalg.inv(cond[t, i]) * dop * units.qe_SI ** 2 / units.me_SI * 1e6 except np.linalg.LinAlgError: pass self.Effective_mass_doping[dop_type] = cond_eff_mass self.doping_carriers = doping_carriers self.doping = doping self.mu_doping = mu_doping self.mu_doping_eV = {k: v / units.eV - self.efermi for k, v in mu_doping.items()} def find_mu_doping(self, epsilon, dos, N0, T, dosweight=2.): """ Find the mu. :param epsilon: :param dos: :param N0: :param T: :param dosweight: :return: """ delta = np.empty_like(epsilon) for i, e in enumerate(epsilon): delta[i] = BL.calc_N(epsilon, dos, e, T, dosweight) + N0 delta = np.abs(delta) # Find the position optimizing this distance pos = np.abs(delta).argmin() return epsilon[pos] def props_as_dict(self): """ :return: Get the properties as a dict. """ props = ("Conductivity", "Seebeck", "Kappa") # ,"Hall" props_unit = (r"$\mathrm{kS\,m^{-1}}$", r"$\mu$V/K", r"") p_dict = {'Temps': self.temp_r, 'mu': self.mu_r / units.eV - self.efermi} for prop, unit in zip(props, props_unit): p_array = eval("self." + prop) if prop is not None: p_dict[prop] = {'units': unit} else: continue for it, temp in enumerate(self.temp_r): p_dict[prop][str(temp)] = {} p_dict[prop][str(temp)]['tensor'] = p_array[it] p_dict[prop][str(temp)]['eigs'] = np.linalg.eigh(p_array[it])[0] p_dict[prop][str(temp)]['avg_eigs'] = p_dict[prop][str(temp)]['eigs'].mean(axis=1) self.props_dict = p_dict def save(self, fname="Transport_Properties.json"): """ Writes the properties to a json file. :param fname: Filename """ dumpfn(self.props_dict, fname) class BztPlotter: """ Plotter to plot transport properties, interpolated bands along some high symmetry k-path, and fermisurface Example: bztPlotter = BztPlotter(bztTransp,bztInterp) """ def __init__(self, bzt_transP=None, bzt_interp=None): """ :param bzt_transP: :param bzt_interp: """ self.bzt_transP = bzt_transP self.bzt_interp = bzt_interp def plot_props(self, prop_y, prop_x, prop_z='temp', output='avg_eigs', dop_type='n', doping=None, temps=None, xlim=(-2, 2), ax=None): """ Function to plot the transport properties. Args: prop_y: property to plot among ("Conductivity","Seebeck","Kappa","Carrier_conc", "Hall_carrier_conc_trace"). Abbreviations are possible, like "S" for "Seebeck" prop_x: independent variable in the x-axis among ('mu','doping','temp') prop_z: third variable to plot multiple curves ('doping','temp') output: 'avg_eigs' to plot the average of the eigenvalues of the properties tensors; 'eigs' to plot the three eigenvalues of the properties tensors. dop_type: 'n' or 'p' to specify the doping type in plots that use doping levels as prop_x or prop_z doping: list of doping level to plot, useful to reduce the number of curves when prop_z='doping' temps: list of temperatures to plot, useful to reduce the number of curves when prop_z='temp' xlim: chemical potential range, useful when prop_x='mu' ax: figure.axes where to plot. If None, a new figure is produced. Example: bztPlotter.plot_props('S','mu','temp',temps=[600,900,1200]).show() more example are provided in the notebook "How to use Boltztra2 interface.ipynb". """ props = ("Conductivity", "Seebeck", "Kappa", "Effective_mass", "Power_Factor", "Carrier_conc", "Hall_carrier_conc_trace") props_lbl = ("Conductivity", "Seebeck", "$K_{el}$", "Effective mass", "Power Factor", "Carrier concentration", "Hall carrier conc.") props_unit = (r"$(\mathrm{kS\,m^{-1}})$", r"($\mu$V/K)", r"$(W / (m \cdot K))$", r"$(m_e)$", r"$( mW / (m\cdot K^2)$", r"$(cm^{-3})$", r"$(cm^{-3})$") props_short = [p[:len(prop_y)] for p in props] if prop_y not in props_short: raise BoltztrapError("prop_y not valid") if prop_x not in ('mu', 'doping', 'temp'): raise BoltztrapError("prop_x not valid") if prop_z not in ('doping', 'temp'): raise BoltztrapError("prop_z not valid") idx_prop = props_short.index(prop_y) leg_title = "" mu = self.bzt_transP.mu_r_eV if prop_z == 'doping' and prop_x == 'temp': p_array = eval("self.bzt_transP." + props[idx_prop] + '_' + prop_z) else: p_array = eval("self.bzt_transP." + props[idx_prop] + '_' + prop_x) if ax is None: plt.figure(figsize=(10, 8)) temps_all = self.bzt_transP.temp_r.tolist() if temps is None: temps = self.bzt_transP.temp_r.tolist() doping_all = self.bzt_transP.doping.tolist() if doping is None: doping = self.bzt_transP.doping.tolist() # special case of carrier and hall carrier concentration 2d arrays (temp,mu) if idx_prop in [5, 6]: if prop_z == 'temp' and prop_x == 'mu': for temp in temps: ti = temps_all.index(temp) prop_out = p_array[ti] if idx_prop == 6 else np.abs(p_array[ti]) plt.semilogy(mu, prop_out, label=str(temp) + ' K') plt.xlabel(r"$\mu$ (eV)", fontsize=30) plt.xlim(xlim) else: raise BoltztrapError("only prop_x=mu and prop_z=temp are available for c.c. and Hall c.c.!") elif prop_z == 'temp' and prop_x == 'mu': for temp in temps: ti = temps_all.index(temp) prop_out = np.linalg.eigh(p_array[ti])[0] if output == 'avg_eigs': plt.plot(mu, prop_out.mean(axis=1), label=str(temp) + ' K') elif output == 'eigs': for i in range(3): plt.plot(mu, prop_out[:, i], label='eig ' + str(i) + ' ' + str(temp) + ' K') plt.xlabel(r"$\mu$ (eV)", fontsize=30) plt.xlim(xlim) elif prop_z == 'temp' and prop_x == 'doping': for temp in temps: ti = temps_all.index(temp) prop_out = np.linalg.eigh(p_array[dop_type][ti])[0] if output == 'avg_eigs': plt.semilogx(doping_all, prop_out.mean(axis=1), 's-', label=str(temp) + ' K') elif output == 'eigs': for i in range(3): plt.plot(doping_all, prop_out[:, i], 's-', label='eig ' + str(i) + ' ' + str(temp) + ' K') plt.xlabel(r"Carrier conc. $cm^{-3}$", fontsize=30) leg_title = dop_type + "-type" elif prop_z == 'doping' and prop_x == 'temp': for dop in doping: di = doping_all.index(dop) prop_out = np.linalg.eigh(p_array[dop_type][:, di])[0] if output == 'avg_eigs': plt.plot(temps_all, prop_out.mean(axis=1), 's-', label=str(dop) + ' $cm^{-3}$') elif output == 'eigs': for i in range(3): plt.plot(temps_all, prop_out[:, i], 's-', label='eig ' + str(i) + ' ' + str(dop) + ' $cm^{-3}$') plt.xlabel(r"Temperature (K)", fontsize=30) leg_title = dop_type + "-type" plt.ylabel(props_lbl[idx_prop] + ' ' + props_unit[idx_prop], fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.legend(title=leg_title if leg_title != "" else "", fontsize=15) plt.tight_layout() plt.grid() return plt def plot_bands(self): """ Plot a band structure on symmetry line using BSPlotter() """ if self.bzt_interp is None: raise BoltztrapError("BztInterpolator not present") sbs = self.bzt_interp.get_band_structure() return BSPlotter(sbs).get_plot() def plot_dos(self, T=None, npoints=10000): """ Plot the total Dos using DosPlotter() """ if self.bzt_interp is None: raise BoltztrapError("BztInterpolator not present") tdos = self.bzt_interp.get_dos(T=T, npts_mu=npoints) # print(npoints) dosPlotter = DosPlotter() dosPlotter.add_dos('Total', tdos) return dosPlotter def merge_up_down_doses(dos_up, dos_dn): """ Merge the up and down DOSs. :param dos_up: Up DOS. :param dos_dn: Down DOS :return: CompleteDos. """ cdos = Dos(dos_up.efermi, dos_up.energies, {Spin.up: dos_up.densities[Spin.up], Spin.down: dos_dn.densities[Spin.down]}) if hasattr(dos_up, 'pdos') and hasattr(dos_dn, 'pdos'): pdoss = {} for site in dos_up.pdos: pdoss.setdefault(site, {}) for orb in dos_up.pdos[site]: pdoss[site].setdefault(orb, {}) pdoss[site][orb][Spin.up] = dos_up.pdos[site][orb][Spin.up] pdoss[site][orb][Spin.down] = dos_dn.pdos[site][orb][Spin.down] cdos = CompleteDos(dos_up.structure, total_dos=cdos, pdoss=pdoss) return cdos
gVallverdu/pymatgen
pymatgen/electronic_structure/boltztrap2.py
Python
mit
31,894
[ "ASE", "BoltzTrap", "VASP", "pymatgen" ]
01f0ee216abedaecc4be5ca501fbd41b138a2e95066c3896c62463a0486865b5
'''apport package hook for min12xxw (c) 2009 Canonical Ltd. Author: Brian Murray <brian@ubuntu.com> ''' from apport.hookutils import * def add_info(report): attach_hardware(report) attach_printing(report)
Alberto-Beralix/Beralix
i386-squashfs-root/usr/share/apport/package-hooks/source_min12xxw.py
Python
gpl-3.0
216
[ "Brian" ]
1c8df8337a5f23858bc46aedccb07a2a1d4b14f4c0efbb5da6ee0b76ecdad9b1
#! /usr/bin/env python #import __main__ #__main__.pymol_argv = ['pymol','-qc'] ##__main__.pymol_argv = ['pymol',''] #import sys,time,os #import pymol # #pymol.finish_launching() #pymol.cmd.feedback("disable","all","actions") #pymol.cmd.feedback("disable","all","results") #sys.path.append("/home/scratch/software/Pymol-script-repo-master") import MDAnalysis from MDAnalysis import * from MDAnalysis.analysis.distances import * import numpy import math import sys my_traj = sys.argv[1] #pymol.cmd.load(my_traj,my_traj[:-4]) #pymol.cmd.split_states(my_traj[:-4]) #states = pymol.cmd.get_object_list() # # #for state in states: # # #dih_angle = pymol.cmd.get_dihedral("%s//A/2/N1"%state, "%s//A/2/C6"%state, "%s//A/2/N6"%state, "%s//A/2/H61"%state) # dih_angle = pymol.cmd.get_dihedral("%s//A/2/N1"%state, "%s//A/2/C6"%state, "%s//A/2/N6"%state, "%s//A/2/H61"%state) # print dih_angle u = Universe("init.pdb",my_traj) v = Universe("init.pdb") u = Universe(my_traj,my_traj) v = Universe(my_traj) end = my_traj.find('.pdb') fout_name = my_traj[0:end] + '_dist.dat' a1 = u.selectAtoms("segid A and resid 2 and name H51") b1 = u.selectAtoms("segid A and resid 1 and name O6") #da1 = u.selectAtoms("segid A and resid 2") f = open(fout_name,'w') for ts in u.trajectory: distance1 = numpy.linalg.norm(a1.centerOfMass() - b1.centerOfMass()) f.write('%7.3f\n' % (distance1)) f.close()
demharters/git_scripts
dist_rna_me1.py
Python
apache-2.0
1,413
[ "MDAnalysis", "PyMOL" ]
08ca39763f1ae87b7eb88ebe6c73934bbcbf6e4c134550acf7630e851fb32635
# Copyright (C) 2004-2008 Paul Cochrane # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ## @file imageLoadExample.py """ Example of loading and viewing an image using pyvisi Will hopefully help me write a decent interface. """ method = 'pyvisi' format = 'pnm' if method == 'pyvisi': ## this is the pyvisi code import sys # import the python visualisation interface from pyvisi import * # this is now where the renderer is specified from pyvisi.renderers.vtk import * # set up a scene scene = Scene() if format == 'jpeg': # add a jpeg image to the scene, and then load the file jpegImage = JpegImage(scene) jpegImage.load(fname="Flinders_eval.jpg") jpegImage.render() # this should be done at the scene.render step elif format == 'png': # add png image to the scene, and then load the file pngImage = PngImage(scene) pngImage.load(fname="Flinders_eval.png") pngImage.render() elif format == 'bmp': # add bmp image to the scene, and then load the file bmpImage = BmpImage(scene) bmpImage.load(fname="Flinders_eval.bmp") bmpImage.render() elif format == 'tiff': # add tiff image to the scene, and then load the file tiffImage = TiffImage(scene) tiffImage.load(fname="Flinders_eval.tiff") tiffImage.render() elif format == 'pnm': # add pnm (ppm, pgm, pbm) image to the scene, and then load the file pnmImage = PnmImage(scene) pnmImage.load(fname="Flinders_eval.pnm") pnmImage.render() else: raise ValueError, "Unknown format: %s" % format # render the scene, pausing so that the opengl window doesn't disappear scene.render(pause=True,interactive=True) elif method == 'vtk': ## this is the original vtk code import vtk _ren = vtk.vtkRenderer() _renWin = vtk.vtkRenderWindow() _renWin.AddRenderer(_ren) _imgActor = vtk.vtkImageActor() if format == 'jpeg': _jpegReader = vtk.vtkJPEGReader() _jpegReader.SetFileName("Flinders_eval.jpg") _imgActor.SetInput(_jpegReader.GetOutput()) elif format == 'png': _pngReader = vtk.vtkPNGReader() _pngReader.SetFileName("Flinders_eval.png") _imgActor.SetInput(_pngReader.GetOutput()) elif format == 'bmp': _bmpReader = vtk.vtkBMPReader() _bmpReader.SetFileName("Flinders_eval.bmp") _imgActor.SetInput(_bmpReader.GetOutput()) elif format == 'tiff': _tiffReader = vtk.vtkTIFFReader() _tiffReader.SetFileName("Flinders_eval.tiff") _imgActor.SetInput(_tiffReader.GetOutput()) elif format == 'pnm': _pnmReader = vtk.vtkPNMReader() _pnmReader.SetFileName("Flinders_eval.pnm") _imgActor.SetInput(_pnmReader.GetOutput()) else: raise ValueError, "Unknown format: %s" % format _ren.AddActor(_imgActor) _renWin.SetSize(400,400) _ren.SetBackground(0.1,0.2,0.4) _renWin.Render() raw_input("Press any key to continue") else: print "Eeek! What plotting method am I supposed to use???" # vim: expandtab shiftwidth=4:
paultcochrane/pyvisi
examples/imageLoadExample.py
Python
gpl-2.0
3,883
[ "VTK" ]
1a4736d379c5b464ecd8dcd55f044e22070ad14d6dead292b22dfd6dcd92ccb2
#!/usr/bin/env python # -*- coding: utf-8 -*- # r""" A JSON data encoder and decoder. This Python module implements the JSON (http://json.org/) data encoding format; a subset of ECMAScript (aka JavaScript) for encoding primitive data types (numbers, strings, booleans, lists, and associative arrays) in a language-neutral simple text-based syntax. It can encode or decode between JSON formatted strings and native Python data types. Normally you would use the encode() and decode() functions defined by this module, but if you want more control over the processing you can use the JSON class. This implementation tries to be as completely cormforming to all intricacies of the standards as possible. It can operate in strict mode (which only allows JSON-compliant syntax) or a non-strict mode (which allows much more of the whole ECMAScript permitted syntax). This includes complete support for Unicode strings (including surrogate-pairs for non-BMP characters), and all number formats including negative zero and IEEE 754 non-numbers such a NaN or Infinity. The JSON/ECMAScript to Python type mappings are: ---JSON--- ---Python--- null None undefined undefined (note 1) Boolean (true,false) bool (True or False) Integer int or long (note 2) Float float String str or unicode ( "..." or u"..." ) Array [a, ...] list ( [...] ) Object {a:b, ...} dict ( {...} ) -- Note 1. an 'undefined' object is declared in this module which represents the native Python value for this type when in non-strict mode. -- Note 2. some ECMAScript integers may be up-converted to Python floats, such as 1e+40. Also integer -0 is converted to float -0, so as to preserve the sign (which ECMAScript requires). -- Note 3. numbers requiring more significant digits than can be represented by the Python float type will be converted into a Python Decimal type, from the standard 'decimal' module. In addition, when operating in non-strict mode, several IEEE 754 non-numbers are also handled, and are mapped to specific Python objects declared in this module: NaN (not a number) nan (float('nan')) Infinity, +Infinity inf (float('inf')) -Infinity neginf (float('-inf')) When encoding Python objects into JSON, you may use types other than native lists or dictionaries, as long as they support the minimal interfaces required of all sequences or mappings. This means you can use generators and iterators, tuples, UserDict subclasses, etc. To make it easier to produce JSON encoded representations of user defined classes, if the object has a method named json_equivalent(), then it will call that method and attempt to encode the object returned from it instead. It will do this recursively as needed and before any attempt to encode the object using it's default strategies. Note that any json_equivalent() method should return "equivalent" Python objects to be encoded, not an already-encoded JSON-formatted string. There is no such aid provided to decode JSON back into user-defined classes as that would dramatically complicate the interface. When decoding strings with this module it may operate in either strict or non-strict mode. The strict mode only allows syntax which is conforming to RFC 7159 (JSON), while the non-strict allows much more of the permissible ECMAScript syntax. The following are permitted when processing in NON-STRICT mode: * Unicode format control characters are allowed anywhere in the input. * All Unicode line terminator characters are recognized. * All Unicode white space characters are recognized. * The 'undefined' keyword is recognized. * Hexadecimal number literals are recognized (e.g., 0xA6, 0177). * String literals may use either single or double quote marks. * Strings may contain \x (hexadecimal) escape sequences, as well as the \v and \0 escape sequences. * Lists may have omitted (elided) elements, e.g., [,,,,,], with missing elements interpreted as 'undefined' values. * Object properties (dictionary keys) can be of any of the types: string literals, numbers, or identifiers (the later of which are treated as if they are string literals)---as permitted by ECMAScript. JSON only permits strings literals as keys. Concerning non-strict and non-ECMAScript allowances: * Octal numbers: If you allow the 'octal_numbers' behavior (which is never enabled by default), then you can use octal integers and octal character escape sequences (per the ECMAScript standard Annex B.1.2). This behavior is allowed, if enabled, because it was valid JavaScript at one time. * Multi-line string literals: Strings which are more than one line long (contain embedded raw newline characters) are never permitted. This is neither valid JSON nor ECMAScript. Some other JSON implementations may allow this, but this module considers that behavior to be a mistake. References: * JSON (JavaScript Object Notation) <http://json.org/> * RFC 7159. The application/json Media Type for JavaScript Object Notation (JSON) <http://www.ietf.org/rfc/rfc7159.txt> * ECMA-262 3rd edition (1999) <http://www.ecma-international.org/publications/files/ecma-st/ECMA-262.pdf> * IEEE 754-1985: Standard for Binary Floating-Point Arithmetic. <http://www.cs.berkeley.edu/~ejr/Projects/ieee754/> """ __author__ = "Deron Meranda <http://deron.meranda.us/>" __homepage__ = "http://deron.meranda.us/python/demjson/" __date__ = "2014-06-25" __version__ = "2.2.2" __version_info__ = ( 2, 2, 2 ) # Will be converted into a namedtuple below __credits__ = """Copyright (c) 2006-2014 Deron E. Meranda <http://deron.meranda.us/> Licensed under GNU LGPL (GNU Lesser General Public License) version 3.0 or later. See LICENSE.txt included with this software. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/> or <http://www.fsf.org/licensing/>. """ # ---------------------------------------------------------------------- # Set demjson version try: from collections import namedtuple as _namedtuple __version_info__ = _namedtuple('version_info', ['major', 'minor', 'micro'])( *__version_info__ ) except ImportError: raise ImportError("demjson %s requires a Python 2.6 or later" % __version__ ) version, version_info = __version__, __version_info__ # Determine Python version _py_major, _py_minor = None, None def _get_pyver(): global _py_major, _py_minor import sys vi = sys.version_info try: _py_major, _py_minor = vi.major, vi.minor except AttributeError: _py_major, _py_minor = vi[0], vi[1] _get_pyver() # ---------------------------------------------------------------------- # Useful global constants content_type = 'application/json' file_ext = 'json' class _dummy_context_manager(object): """A context manager that does nothing on entry or exit.""" def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): return False _dummy_context_manager = _dummy_context_manager() # ---------------------------------------------------------------------- # Decimal and float types. # # If a JSON number can not be stored in a Python float without loosing # precision and the Python has the decimal type, then we will try to # use decimal instead of float. To make this determination we need to # know the limits of the float type, but Python doesn't have an easy # way to tell what the largest floating-point number it supports. So, # we detemine the precision and scale of the float type by testing it. try: # decimal module was introduced in Python 2.4 import decimal except ImportError: decimal = None def determine_float_limits( number_type=float ): """Determines the precision and range of the given float type. The passed in 'number_type' argument should refer to the type of floating-point number. It should either be the built-in 'float', or decimal context or constructor; i.e., one of: # 1. FLOAT TYPE determine_float_limits( float ) # 2. DEFAULT DECIMAL CONTEXT determine_float_limits( decimal.Decimal ) # 3. CUSTOM DECIMAL CONTEXT ctx = decimal.Context( prec=75 ) determine_float_limits( ctx ) Returns a named tuple with components: ( significant_digits, max_exponent, min_exponent ) Where: * significant_digits -- maximum number of *decimal* digits that can be represented without any loss of precision. This is conservative, so if there are 16 1/2 digits, it will return 16, not 17. * max_exponent -- The maximum exponent (power of 10) that can be represented before an overflow (or rounding to infinity) occurs. * min_exponent -- The minimum exponent (negative power of 10) that can be represented before either an underflow (rounding to zero) or a subnormal result (loss of precision) occurs. Note this is conservative, as subnormal numbers are excluded. """ if decimal: numeric_exceptions = (ValueError,decimal.Overflow,decimal.Underflow) else: numeric_exceptions = (ValueError,) if decimal and number_type == decimal.Decimal: number_type = decimal.DefaultContext if decimal and isinstance(number_type, decimal.Context): # Passed a decimal Context, extract the bound creator function. create_num = number_type.create_decimal decimal_ctx = decimal.localcontext(number_type) is_zero_or_subnormal = lambda n: n.is_zero() or n.is_subnormal() elif number_type == float: create_num = number_type decimal_ctx = _dummy_context_manager is_zero_or_subnormal = lambda n: n==0 else: raise TypeError("Expected a float type, e.g., float or decimal context") with decimal_ctx: zero = create_num('0.0') # Find signifianct digits by comparing floats of increasing # number of digits, differing in the last digit only, until # they numerically compare as being equal. sigdigits = None n = 0 while True: n = n + 1 pfx = '0.' + '1'*n a = create_num( pfx + '0') for sfx in '123456789': # Check all possible last digits to # avoid any partial-decimal. b = create_num( pfx + sfx ) if (a+zero) == (b+zero): sigdigits = n break if sigdigits: break # Find exponent limits. First find order of magnitude and # then use a binary search to find the exact exponent. base = '1.' + '1'*(sigdigits-1) base0 = '1.' + '1'*(sigdigits-2) minexp, maxexp = None, None for expsign in ('+','-'): minv = 0; maxv = 10 # First find order of magnitude of exponent limit while True: try: s = base + 'e' + expsign + str(maxv) s0 = base0 + 'e' + expsign + str(maxv) f = create_num( s ) + zero f0 = create_num( s0 ) + zero except numeric_exceptions: f = None if not f or not str(f)[0].isdigit() or is_zero_or_subnormal(f) or f==f0: break else: minv = maxv maxv = maxv * 10 # Now do a binary search to find exact limit while True: if minv+1 == maxv: if expsign=='+': maxexp = minv else: minexp = minv break elif maxv < minv: if expsign=='+': maxexp = None else: minexp = None break m = (minv + maxv) // 2 try: s = base + 'e' + expsign + str(m) s0 = base0 + 'e' + expsign + str(m) f = create_num( s ) + zero f0 = create_num( s0 ) + zero except numeric_exceptions: f = None else: if not f or not str(f)[0].isdigit(): f = None elif is_zero_or_subnormal(f) or f==f0: f = None if not f: # infinite maxv = m else: minv = m return _namedtuple('float_limits', ['significant_digits', 'max_exponent', 'min_exponent'])( sigdigits, maxexp, -minexp ) float_sigdigits, float_maxexp, float_minexp = determine_float_limits( float ) # For backwards compatibility with older demjson versions: def determine_float_precision(): v = determine_float_limits( float ) return ( v.significant_digits, v.max_exponent ) # ---------------------------------------------------------------------- # The undefined value. # # ECMAScript has an undefined value (similar to yet distinct from null). # Neither Python or strict JSON have support undefined, but to allow # JavaScript behavior we must simulate it. class _undefined_class(object): """Represents the ECMAScript 'undefined' value.""" __slots__ = [] def __repr__(self): return self.__module__ + '.undefined' def __str__(self): return 'undefined' def __nonzero__(self): return False undefined = _undefined_class() syntax_error = _undefined_class() # same as undefined, but has separate identity del _undefined_class # ---------------------------------------------------------------------- # Non-Numbers: NaN, Infinity, -Infinity # # ECMAScript has official support for non-number floats, although # strict JSON does not. Python doesn't either. So to support the # full JavaScript behavior we must try to add them into Python, which # is unfortunately a bit of black magic. If our python implementation # happens to be built on top of IEEE 754 we can probably trick python # into using real floats. Otherwise we must simulate it with classes. def _nonnumber_float_constants(): """Try to return the Nan, Infinity, and -Infinity float values. This is necessarily complex because there is no standard platform-independent way to do this in Python as the language (opposed to some implementation of it) doesn't discuss non-numbers. We try various strategies from the best to the worst. If this Python interpreter uses the IEEE 754 floating point standard then the returned values will probably be real instances of the 'float' type. Otherwise a custom class object is returned which will attempt to simulate the correct behavior as much as possible. """ try: # First, try (mostly portable) float constructor. Works under # Linux x86 (gcc) and some Unices. nan = float('nan') inf = float('inf') neginf = float('-inf') except ValueError: try: # Try the AIX (PowerPC) float constructors nan = float('NaNQ') inf = float('INF') neginf = float('-INF') except ValueError: try: # Next, try binary unpacking. Should work under # platforms using IEEE 754 floating point. import struct, sys xnan = '7ff8000000000000'.decode('hex') # Quiet NaN xinf = '7ff0000000000000'.decode('hex') xcheck = 'bdc145651592979d'.decode('hex') # -3.14159e-11 # Could use float.__getformat__, but it is a new python feature, # so we use sys.byteorder. if sys.byteorder == 'big': nan = struct.unpack('d', xnan)[0] inf = struct.unpack('d', xinf)[0] check = struct.unpack('d', xcheck)[0] else: nan = struct.unpack('d', xnan[::-1])[0] inf = struct.unpack('d', xinf[::-1])[0] check = struct.unpack('d', xcheck[::-1])[0] neginf = - inf if check != -3.14159e-11: raise ValueError('Unpacking raw IEEE 754 floats does not work') except (ValueError, TypeError): # Punt, make some fake classes to simulate. These are # not perfect though. For instance nan * 1.0 == nan, # as expected, but 1.0 * nan == 0.0, which is wrong. class nan(float): """An approximation of the NaN (not a number) floating point number.""" def __repr__(self): return 'nan' def __str__(self): return 'nan' def __add__(self,x): return self def __radd__(self,x): return self def __sub__(self,x): return self def __rsub__(self,x): return self def __mul__(self,x): return self def __rmul__(self,x): return self def __div__(self,x): return self def __rdiv__(self,x): return self def __divmod__(self,x): return (self,self) def __rdivmod__(self,x): return (self,self) def __mod__(self,x): return self def __rmod__(self,x): return self def __pow__(self,exp): return self def __rpow__(self,exp): return self def __neg__(self): return self def __pos__(self): return self def __abs__(self): return self def __lt__(self,x): return False def __le__(self,x): return False def __eq__(self,x): return False def __neq__(self,x): return True def __ge__(self,x): return False def __gt__(self,x): return False def __complex__(self,*a): raise NotImplementedError('NaN can not be converted to a complex') if decimal: nan = decimal.Decimal('NaN') else: nan = nan() class inf(float): """An approximation of the +Infinity floating point number.""" def __repr__(self): return 'inf' def __str__(self): return 'inf' def __add__(self,x): return self def __radd__(self,x): return self def __sub__(self,x): return self def __rsub__(self,x): return self def __mul__(self,x): if x is neginf or x < 0: return neginf elif x == 0: return nan else: return self def __rmul__(self,x): return self.__mul__(x) def __div__(self,x): if x == 0: raise ZeroDivisionError('float division') elif x < 0: return neginf else: return self def __rdiv__(self,x): if x is inf or x is neginf or x is nan: return nan return 0.0 def __divmod__(self,x): if x == 0: raise ZeroDivisionError('float divmod()') elif x < 0: return (nan,nan) else: return (self,self) def __rdivmod__(self,x): if x is inf or x is neginf or x is nan: return (nan, nan) return (0.0, x) def __mod__(self,x): if x == 0: raise ZeroDivisionError('float modulo') else: return nan def __rmod__(self,x): if x is inf or x is neginf or x is nan: return nan return x def __pow__(self, exp): if exp == 0: return 1.0 else: return self def __rpow__(self, x): if -1 < x < 1: return 0.0 elif x == 1.0: return 1.0 elif x is nan or x is neginf or x < 0: return nan else: return self def __neg__(self): return neginf def __pos__(self): return self def __abs__(self): return self def __lt__(self,x): return False def __le__(self,x): if x is self: return True else: return False def __eq__(self,x): if x is self: return True else: return False def __neq__(self,x): if x is self: return False else: return True def __ge__(self,x): return True def __gt__(self,x): return True def __complex__(self,*a): raise NotImplementedError('Infinity can not be converted to a complex') if decimal: inf = decimal.Decimal('Infinity') else: inf = inf() class neginf(float): """An approximation of the -Infinity floating point number.""" def __repr__(self): return '-inf' def __str__(self): return '-inf' def __add__(self,x): return self def __radd__(self,x): return self def __sub__(self,x): return self def __rsub__(self,x): return self def __mul__(self,x): if x is self or x < 0: return inf elif x == 0: return nan else: return self def __rmul__(self,x): return self.__mul__(self) def __div__(self,x): if x == 0: raise ZeroDivisionError('float division') elif x < 0: return inf else: return self def __rdiv__(self,x): if x is inf or x is neginf or x is nan: return nan return -0.0 def __divmod__(self,x): if x == 0: raise ZeroDivisionError('float divmod()') elif x < 0: return (nan,nan) else: return (self,self) def __rdivmod__(self,x): if x is inf or x is neginf or x is nan: return (nan, nan) return (-0.0, x) def __mod__(self,x): if x == 0: raise ZeroDivisionError('float modulo') else: return nan def __rmod__(self,x): if x is inf or x is neginf or x is nan: return nan return x def __pow__(self,exp): if exp == 0: return 1.0 else: return self def __rpow__(self, x): if x is nan or x is inf or x is inf: return nan return 0.0 def __neg__(self): return inf def __pos__(self): return self def __abs__(self): return inf def __lt__(self,x): return True def __le__(self,x): return True def __eq__(self,x): if x is self: return True else: return False def __neq__(self,x): if x is self: return False else: return True def __ge__(self,x): if x is self: return True else: return False def __gt__(self,x): return False def __complex__(self,*a): raise NotImplementedError('-Infinity can not be converted to a complex') if decimal: neginf = decimal.Decimal('-Infinity') else: neginf = neginf(0) return nan, inf, neginf nan, inf, neginf = _nonnumber_float_constants() del _nonnumber_float_constants # ---------------------------------------------------------------------- # Integers class json_int( (1L).__class__ ): # Have to specify base this way to satisfy 2to3 """A subclass of the Python int/long that remembers its format (hex,octal,etc). Initialize it the same as an int, but also accepts an additional keyword argument 'number_format' which should be one of the NUMBER_FORMAT_* values. n = json_int( x[, base, number_format=NUMBER_FORMAT_DECIMAL] ) """ def __new__(cls, *args, **kwargs): if 'number_format' in kwargs: number_format = kwargs['number_format'] del kwargs['number_format'] if number_format not in (NUMBER_FORMAT_DECIMAL, NUMBER_FORMAT_HEX, NUMBER_FORMAT_OCTAL, NUMBER_FORMAT_LEGACYOCTAL, NUMBER_FORMAT_BINARY): raise TypeError("json_int(): Invalid value for number_format argument") else: number_format = NUMBER_FORMAT_DECIMAL obj = super(json_int,cls).__new__(cls,*args,**kwargs) obj._jsonfmt = number_format return obj @property def number_format(self): """The original radix format of the number""" return self._jsonfmt def json_format(self): """Returns the integer value formatted as a JSON literal""" fmt = self._jsonfmt if fmt == NUMBER_FORMAT_HEX: return format(self, '#x') elif fmt == NUMBER_FORMAT_OCTAL: return format(self, '#o') elif fmt == NUMBER_FORMAT_BINARY: return format(self, '#b') elif fmt == NUMBER_FORMAT_LEGACYOCTAL: if self==0: return '0' # For some reason Python's int doesn't do '00' elif self < 0: return '-0%o' % (-self) else: return '0%o' % self else: return str(self) # ---------------------------------------------------------------------- # String processing helpers def skipstringsafe( s, start=0, end=None ): i = start #if end is None: # end = len(s) unsafe = helpers.unsafe_string_chars while i < end and s[i] not in unsafe: #c = s[i] #if c in unsafe_string_chars: # break i += 1 return i def skipstringsafe_slow( s, start=0, end=None ): i = start if end is None: end = len(s) while i < end: c = s[i] if c == '"' or c == "'" or c == '\\' or ord(c) <= 0x1f: break i += 1 return i def extend_list_with_sep( orig_seq, extension_seq, sepchar='' ): if not sepchar: orig_seq.extend( extension_seq ) else: for i, x in enumerate(extension_seq): if i > 0: orig_seq.append( sepchar ) orig_seq.append( x ) def extend_and_flatten_list_with_sep( orig_seq, extension_seq, separator='' ): for i, part in enumerate(extension_seq): if i > 0 and separator: orig_seq.append( separator ) orig_seq.extend( part ) # ---------------------------------------------------------------------- # Unicode UTF-32 # ---------------------------------------------------------------------- def _make_raw_bytes( byte_list ): """Takes a list of byte values (numbers) and returns a bytes (Python 3) or string (Python 2) """ if _py_major >= 3: b = bytes( byte_list ) else: b = ''.join(chr(n) for n in byte_list) return b import codecs class utf32(codecs.CodecInfo): """Unicode UTF-32 and UCS4 encoding/decoding support. This is for older Pythons whch did not have UTF-32 codecs. JSON requires that all JSON implementations must support the UTF-32 encoding (as well as UTF-8 and UTF-16). But earlier versions of Python did not provide a UTF-32 codec, so we must implement UTF-32 ourselves in case we need it. See http://en.wikipedia.org/wiki/UTF-32 """ BOM_UTF32_BE = _make_raw_bytes([ 0, 0, 0xFE, 0xFF ]) #'\x00\x00\xfe\xff' BOM_UTF32_LE = _make_raw_bytes([ 0xFF, 0xFE, 0, 0 ]) #'\xff\xfe\x00\x00' @staticmethod def lookup( name ): """A standard Python codec lookup function for UCS4/UTF32. If if recognizes an encoding name it returns a CodecInfo structure which contains the various encode and decoder functions to use. """ ci = None name = name.upper() if name in ('UCS4BE','UCS-4BE','UCS-4-BE','UTF32BE','UTF-32BE','UTF-32-BE'): ci = codecs.CodecInfo( utf32.utf32be_encode, utf32.utf32be_decode, name='utf-32be') elif name in ('UCS4LE','UCS-4LE','UCS-4-LE','UTF32LE','UTF-32LE','UTF-32-LE'): ci = codecs.CodecInfo( utf32.utf32le_encode, utf32.utf32le_decode, name='utf-32le') elif name in ('UCS4','UCS-4','UTF32','UTF-32'): ci = codecs.CodecInfo( utf32.encode, utf32.decode, name='utf-32') return ci @staticmethod def encode( obj, errors='strict', endianness=None, include_bom=True ): """Encodes a Unicode string into a UTF-32 encoded byte string. Returns a tuple: (bytearray, num_chars) The errors argument should be one of 'strict', 'ignore', or 'replace'. The endianness should be one of: * 'B', '>', or 'big' -- Big endian * 'L', '<', or 'little' -- Little endien * None -- Default, from sys.byteorder If include_bom is true a Byte-Order Mark will be written to the beginning of the string, otherwise it will be omitted. """ import sys, struct # Make a container that can store bytes if _py_major >= 3: f = bytearray() write = f.extend def tobytes(): return bytes(f) else: try: import cStringIO as sio except ImportError: import StringIO as sio f = sio.StringIO() write = f.write tobytes = f.getvalue if not endianness: endianness = sys.byteorder if endianness.upper()[0] in ('B>'): big_endian = True elif endianness.upper()[0] in ('L<'): big_endian = False else: raise ValueError("Invalid endianness %r: expected 'big', 'little', or None" % endianness) pack = struct.pack packspec = '>L' if big_endian else '<L' num_chars = 0 if include_bom: if big_endian: write( utf32.BOM_UTF32_BE ) else: write( utf32.BOM_UTF32_LE ) num_chars += 1 for pos, c in enumerate(obj): n = ord(c) if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32 if errors == 'ignore': pass elif errors == 'replace': n = 0xFFFD else: raise UnicodeEncodeError('utf32',obj,pos,pos+1,"surrogate code points from U+D800 to U+DFFF are not allowed") write( pack( packspec, n) ) num_chars += 1 return (tobytes(), num_chars) @staticmethod def utf32le_encode( obj, errors='strict', include_bom=False ): """Encodes a Unicode string into a UTF-32LE (little endian) encoded byte string.""" return utf32.encode( obj, errors=errors, endianness='L', include_bom=include_bom ) @staticmethod def utf32be_encode( obj, errors='strict', include_bom=False ): """Encodes a Unicode string into a UTF-32BE (big endian) encoded byte string.""" return utf32.encode( obj, errors=errors, endianness='B', include_bom=include_bom ) @staticmethod def decode( obj, errors='strict', endianness=None ): """Decodes a UTF-32 byte string into a Unicode string. Returns tuple (bytearray, num_bytes) The errors argument shold be one of 'strict', 'ignore', 'replace', 'backslashreplace', or 'xmlcharrefreplace'. The endianness should either be None (for auto-guessing), or a word that starts with 'B' (big) or 'L' (little). Will detect a Byte-Order Mark. If a BOM is found and endianness is also set, then the two must match. If neither a BOM is found nor endianness is set, then big endian order is assumed. """ import struct, sys maxunicode = sys.maxunicode unpack = struct.unpack # Detect BOM if obj.startswith( utf32.BOM_UTF32_BE ): bom_endianness = 'B' start = len(utf32.BOM_UTF32_BE) elif obj.startswith( utf32.BOM_UTF32_LE ): bom_endianness = 'L' start = len(utf32.BOM_UTF32_LE) else: bom_endianness = None start = 0 num_bytes = start if endianness == None: if bom_endianness == None: endianness = sys.byteorder.upper()[0] # Assume platform default else: endianness = bom_endianness else: endianness = endianness[0].upper() if bom_endianness and endianness != bom_endianness: raise UnicodeDecodeError('utf32',obj,0,start,'BOM does not match expected byte order') # Check for truncated last character if ((len(obj)-start) % 4) != 0: raise UnicodeDecodeError('utf32',obj,start,len(obj), 'Data length not a multiple of 4 bytes') # Start decoding characters chars = [] packspec = '>L' if endianness=='B' else '<L' i = 0 for i in range(start, len(obj), 4): seq = obj[i:i+4] n = unpack( packspec, seq )[0] num_bytes += 4 if n > maxunicode or (0xD800 <= n <= 0xDFFF): if errors == 'strict': raise UnicodeDecodeError('utf32',obj,i,i+4,'Invalid code point U+%04X' % n) elif errors == 'replace': chars.append( unichr(0xFFFD) ) elif errors == 'backslashreplace': if n > 0xffff: esc = "\\u%04x" % (n,) else: esc = "\\U%08x" % (n,) for esc_c in esc: chars.append( esc_c ) elif errors == 'xmlcharrefreplace': esc = "&#%d;" % (n,) for esc_c in esc: chars.append( esc_c ) else: # ignore pass else: chars.append( helpers.safe_unichr(n) ) return (u''.join( chars ), num_bytes) @staticmethod def utf32le_decode( obj, errors='strict' ): """Decodes a UTF-32LE (little endian) byte string into a Unicode string.""" return utf32.decode( obj, errors=errors, endianness='L' ) @staticmethod def utf32be_decode( obj, errors='strict' ): """Decodes a UTF-32BE (big endian) byte string into a Unicode string.""" return utf32.decode( obj, errors=errors, endianness='B' ) # ---------------------------------------------------------------------- # Helper functions # ---------------------------------------------------------------------- def _make_unsafe_string_chars(): import unicodedata unsafe = [] for c in [unichr(i) for i in range(0x100)]: if c == u'"' or c == u'\\' \ or unicodedata.category( c ) in ['Cc','Cf','Zl','Zp']: unsafe.append( c ) return u''.join( unsafe ) class helpers(object): """A set of utility functions.""" hexdigits = '0123456789ABCDEFabcdef' octaldigits = '01234567' unsafe_string_chars = _make_unsafe_string_chars() import sys maxunicode = sys.maxunicode always_use_custom_codecs = False # If True use demjson's codecs # before system codecs. This # is mainly here for testing. javascript_reserved_words = frozenset([ # Keywords (plus "let") (ECMAScript 6 section 11.6.2.1) 'break','case','catch','class','const','continue', 'debugger','default','delete','do','else','export', 'extends','finally','for','function','if','import', 'in','instanceof','let','new','return','super', 'switch','this','throw','try','typeof','var','void', 'while','with','yield', # Future reserved words (ECMAScript 6 section 11.6.2.2) 'enum','implements','interface','package', 'private','protected','public','static', # null/boolean literals 'null','true','false' ]) @staticmethod def make_raw_bytes( byte_list ): """Constructs a byte array (bytes in Python 3, str in Python 2) from a list of byte values (0-255). """ return _make_raw_bytes( byte_list ) @staticmethod def is_hex_digit( c ): """Determines if the given character is a valid hexadecimal digit (0-9, a-f, A-F).""" return (c in helpers.hexdigits) @staticmethod def is_octal_digit( c ): """Determines if the given character is a valid octal digit (0-7).""" return (c in helpers.octaldigits) @staticmethod def is_binary_digit( c ): """Determines if the given character is a valid binary digit (0 or 1).""" return (c == '0' or c == '1') @staticmethod def char_is_json_ws( c ): """Determines if the given character is a JSON white-space character""" return c in ' \t\n\r' @staticmethod def safe_unichr( codepoint ): """Just like Python's unichr() but works in narrow-Unicode Pythons.""" if codepoint >= 0x10000 and codepoint > helpers.maxunicode: # Narrow-Unicode python, construct a UTF-16 surrogate pair. w1, w2 = helpers.make_surrogate_pair( codepoint ) if w2 is None: c = unichr(w1) else: c = unichr(w1) + unichr(w2) else: c = unichr(codepoint) return c @staticmethod def char_is_unicode_ws( c ): """Determines if the given character is a Unicode space character""" if not isinstance(c,unicode): c = unicode(c) if c in u' \t\n\r\f\v': return True import unicodedata return unicodedata.category(c) == 'Zs' @staticmethod def char_is_json_eol( c ): """Determines if the given character is a JSON line separator""" return c in '\n\r' @staticmethod def char_is_unicode_eol( c ): """Determines if the given character is a Unicode line or paragraph separator. These correspond to CR and LF as well as Unicode characters in the Zl or Zp categories. """ return c in u'\r\n\u2028\u2029' @staticmethod def char_is_identifier_leader( c ): """Determines if the character may be the first character of a JavaScript identifier. """ return c.isalpha() or c in '_$' @staticmethod def char_is_identifier_tail( c ): """Determines if the character may be part of a JavaScript identifier. """ return c.isalnum() or c in u'_$\u200c\u200d' @staticmethod def extend_and_flatten_list_with_sep( orig_seq, extension_seq, separator='' ): for i, part in enumerate(extension_seq): if i > 0 and separator: orig_seq.append( separator ) orig_seq.extend( part ) @staticmethod def strip_format_control_chars( txt ): """Filters out all Unicode format control characters from the string. ECMAScript permits any Unicode "format control characters" to appear at any place in the source code. They are to be ignored as if they are not there before any other lexical tokenization occurs. Note that JSON does not allow them, except within string literals. * Ref. ECMAScript section 7.1. * http://en.wikipedia.org/wiki/Unicode_control_characters There are dozens of Format Control Characters, for example: U+00AD SOFT HYPHEN U+200B ZERO WIDTH SPACE U+2060 WORD JOINER """ import unicodedata txt2 = filter( lambda c: unicodedata.category(unicode(c)) != 'Cf', txt ) # 2to3 NOTE: The following is needed to work around a broken # Python3 conversion in which filter() will be transformed # into a list rather than a string. if not isinstance(txt2,basestring): txt2 = u''.join(txt2) return txt2 @staticmethod def lookup_codec( encoding ): """Wrapper around codecs.lookup(). Returns None if codec not found, rather than raising a LookupError. """ import codecs if isinstance( encoding, codecs.CodecInfo ): return encoding encoding = encoding.lower() import codecs if helpers.always_use_custom_codecs: # Try custom utf32 first, then standard python codecs cdk = utf32.lookup(encoding) if not cdk: try: cdk = codecs.lookup( encoding ) except LookupError: cdk = None else: # Try standard python codecs first, then custom utf32 try: cdk = codecs.lookup( encoding ) except LookupError: cdk = utf32.lookup( encoding ) return cdk @staticmethod def auto_detect_encoding( s ): """Takes a string (or byte array) and tries to determine the Unicode encoding it is in. Returns the encoding name, as a string. """ if not s or len(s)==0: return "utf-8" # Get the byte values of up to the first 4 bytes ords = [] for i in range(0, min(len(s),4)): x = s[i] if isinstance(x, basestring): x = ord(x) ords.append( x ) # Look for BOM marker import sys, codecs bom2, bom3, bom4 = None, None, None if len(s) >= 2: bom2 = s[:2] if len(s) >= 3: bom3 = s[:3] if len(s) >= 4: bom4 = s[:4] # Assign values of first four bytes to: a, b, c, d; and last byte to: z a, b, c, d, z = None, None, None, None, None if len(s) >= 1: a = ords[0] if len(s) >= 2: b = ords[1] if len(s) >= 3: c = ords[2] if len(s) >= 4: d = ords[3] z = s[-1] if isinstance(z, basestring): z = ord(z) if bom4 and ( (hasattr(codecs,'BOM_UTF32_LE') and bom4 == codecs.BOM_UTF32_LE) or (bom4 == utf32.BOM_UTF32_LE) ): encoding = 'utf-32le' s = s[4:] elif bom4 and ( (hasattr(codecs,'BOM_UTF32_BE') and bom4 == codecs.BOM_UTF32_BE) or (bom4 == utf32.BOM_UTF32_BE) ): encoding = 'utf-32be' s = s[4:] elif bom2 and bom2 == codecs.BOM_UTF16_LE: encoding = 'utf-16le' s = s[2:] elif bom2 and bom2 == codecs.BOM_UTF16_BE: encoding = 'utf-16be' s = s[2:] elif bom3 and bom3 == codecs.BOM_UTF8: encoding = 'utf-8' s = s[3:] # No BOM, so autodetect encoding used by looking at first four # bytes according to RFC 4627 section 3. The first and last bytes # in a JSON document will be ASCII. The second byte will be ASCII # unless the first byte was a quotation mark. elif len(s)>=4 and a==0 and b==0 and c==0 and d!=0: # UTF-32BE (0 0 0 x) encoding = 'utf-32be' elif len(s)>=4 and a!=0 and b==0 and c==0 and d==0 and z==0: # UTF-32LE (x 0 0 0 [... 0]) encoding = 'utf-32le' elif len(s)>=2 and a==0 and b!=0: # UTF-16BE (0 x) encoding = 'utf-16be' elif len(s)>=2 and a!=0 and b==0 and z==0: # UTF-16LE (x 0 [... 0]) encoding = 'utf-16le' elif ord('\t') <= a <= 127: # First byte appears to be ASCII, so guess UTF-8. encoding = 'utf8' else: raise ValueError("Can not determine the Unicode encoding for byte stream") return encoding @staticmethod def unicode_decode( txt, encoding=None ): """Takes a string (or byte array) and tries to convert it to a Unicode string. Returns a named tuple: (string, codec, bom) The 'encoding' argument, if supplied, should either the name of a character encoding, or an instance of codecs.CodecInfo. If the encoding argument is None or "auto" then the encoding is automatically determined, if possible. Any BOM (Byte Order Mark) that is found at the beginning of the input will be stripped off and placed in the 'bom' portion of the returned value. """ if isinstance(txt, unicode): res = _namedtuple('DecodedString',['string','codec','bom'])( txt, None, None ) else: if encoding is None or encoding == 'auto': encoding = helpers.auto_detect_encoding( txt ) cdk = helpers.lookup_codec( encoding ) if not cdk: raise LookupError("Can not find codec for encoding %r" % encoding) try: # Determine if codec takes arguments; try a decode of nothing cdk.decode( helpers.make_raw_bytes([]), errors='strict' ) except TypeError: cdk_kw = {} # This coded doesn't like the errors argument else: cdk_kw = {'errors': 'strict'} unitxt, numbytes = cdk.decode( txt, **cdk_kw ) # DO THE DECODE HERE! # Remove BOM if present if len(unitxt) > 0 and unitxt[0] == u'\uFEFF': bom = cdk.encode(unitxt[0])[0] unitxt = unitxt[1:] elif len(unitxt) > 0 and unitxt[0] == u'\uFFFE': # Reversed BOM raise UnicodeDecodeError(cdk.name,txt,0,0,"Wrong byte order, found reversed BOM U+FFFE") else: bom = None res = _namedtuple('DecodedString',['string','codec','bom'])( unitxt, cdk, bom ) return res @staticmethod def surrogate_pair_as_unicode( c1, c2 ): """Takes a pair of unicode surrogates and returns the equivalent unicode character. The input pair must be a surrogate pair, with c1 in the range U+D800 to U+DBFF and c2 in the range U+DC00 to U+DFFF. """ n1, n2 = ord(c1), ord(c2) if n1 < 0xD800 or n1 > 0xDBFF or n2 < 0xDC00 or n2 > 0xDFFF: raise JSONDecodeError('illegal Unicode surrogate pair',(c1,c2)) a = n1 - 0xD800 b = n2 - 0xDC00 v = (a << 10) | b v += 0x10000 return helpers.safe_unichr(v) @staticmethod def unicode_as_surrogate_pair( c ): """Takes a single unicode character and returns a sequence of surrogate pairs. The output of this function is a tuple consisting of one or two unicode characters, such that if the input character is outside the BMP range then the output is a two-character surrogate pair representing that character. If the input character is inside the BMP then the output tuple will have just a single character...the same one. """ n = ord(c) w1, w2 = helpers.make_surrogate_pair(n) if w2 is None: return (unichr(w1),) else: return (unichr(w1), unichr(w2)) @staticmethod def make_surrogate_pair( codepoint ): """Given a Unicode codepoint (int) returns a 2-tuple of surrogate codepoints.""" if codepoint < 0x10000: return (codepoint,None) # in BMP, surrogate pair not required v = codepoint - 0x10000 vh = (v >> 10) & 0x3ff # highest 10 bits vl = v & 0x3ff # lowest 10 bits w1 = 0xD800 | vh w2 = 0xDC00 | vl return (w1, w2) @staticmethod def isnumbertype( obj ): """Is the object of a Python number type (excluding complex)?""" return isinstance(obj, (int,long,float)) \ and not isinstance(obj, bool) \ or obj is nan or obj is inf or obj is neginf \ or (decimal and isinstance(obj, decimal.Decimal)) @staticmethod def is_negzero( n ): """Is the number value a negative zero?""" if isinstance( n, float ): return n == 0.0 and repr(n).startswith('-') elif decimal and isinstance( n, decimal.Decimal ): return n.is_zero() and n.is_signed() else: return False @staticmethod def is_nan( n ): """Is the number a NaN (not-a-number)?""" if isinstance( n, float ): return n is nan or n.hex() == 'nan' or n != n elif decimal and isinstance( n, decimal.Decimal ): return n.is_nan() else: return False @staticmethod def is_infinite( n ): """Is the number infinite?""" if isinstance( n, float ): return n is inf or n is neginf or n.hex() in ('inf','-inf') elif decimal and isinstance( n, decimal.Decimal ): return n.is_infinite() else: return False @staticmethod def isstringtype( obj ): """Is the object of a Python string type?""" if isinstance(obj, basestring): return True # Must also check for some other pseudo-string types import types, UserString return isinstance(obj, types.StringTypes) \ or isinstance(obj, UserString.UserString) ## or isinstance(obj, UserString.MutableString) @staticmethod def decode_hex( hexstring ): """Decodes a hexadecimal string into it's integer value.""" # We don't use the builtin 'hex' codec in python since it can # not handle odd numbers of digits, nor raise the same type # of exceptions we want to. n = 0 for c in hexstring: if '0' <= c <= '9': d = ord(c) - ord('0') elif 'a' <= c <= 'f': d = ord(c) - ord('a') + 10 elif 'A' <= c <= 'F': d = ord(c) - ord('A') + 10 else: raise ValueError('Not a hexadecimal number', hexstring) # Could use ((n << 4 ) | d), but python 2.3 issues a FutureWarning. n = (n * 16) + d return n @staticmethod def decode_octal( octalstring ): """Decodes an octal string into it's integer value.""" n = 0 for c in octalstring: if '0' <= c <= '7': d = ord(c) - ord('0') else: raise ValueError('Not an octal number', octalstring) # Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning. n = (n * 8) + d return n @staticmethod def decode_binary( binarystring ): """Decodes a binary string into it's integer value.""" n = 0 for c in binarystring: if c == '0': d = 0 elif c == '1': d = 1 else: raise ValueError('Not an binary number', binarystring) # Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning. n = (n * 2) + d return n @staticmethod def format_timedelta_iso( td ): """Encodes a datetime.timedelta into ISO-8601 Time Period format. """ d = td.days s = td.seconds ms = td.microseconds m, s = divmod(s,60) h, m = divmod(m,60) a = ['P'] if d: a.append( '%dD' % d ) if h or m or s or ms: a.append( 'T' ) if h: a.append( '%dH' % h ) if m: a.append( '%dM' % m ) if s or ms: if ms: a.append( '%d.%06d' % (s,ms) ) else: a.append( '%d' % s ) if len(a)==1: a.append('T0S') return ''.join(a) # ---------------------------------------------------------------------- # File position indicator # ---------------------------------------------------------------------- class position_marker(object): """A position marks a specific place in a text document. It consists of the following attributes: * line - The line number, starting at 1 * column - The column on the line, starting at 0 * char_position - The number of characters from the start of the document, starting at 0 * text_after - (optional) a short excerpt of the text of document starting at the current position Lines are separated by any Unicode line separator character. As an exception a CR+LF character pair is treated as being a single line separator demarcation. Columns are simply a measure of the number of characters after the start of a new line, starting at 0. Visual effects caused by Unicode characters such as combining characters, bidirectional text, zero-width characters and so on do not affect the computation of the column regardless of visual appearance. The char_position is a count of the number of characters since the beginning of the document, starting at 0. As used within the buffered_stream class, if the document starts with a Unicode Byte Order Mark (BOM), the BOM prefix is NOT INCLUDED in the count. """ def __init__(self, offset=0, line=1, column=0, text_after=None): self.__char_position = offset self.__line = line self.__column = column self.__text_after = text_after self.__at_end = False self.__last_was_cr = False @property def line(self): """The current line within the document, starts at 1.""" return self.__line @property def column(self): """The current character column from the beginning of the document, starts at 0. """ return self.__column @property def char_position(self): """The current character offset from the beginning of the document, starts at 0. """ return self.__char_position @property def at_start(self): """Returns True if the position is at the start of the document.""" return (self.char_position == 0) @property def at_end(self): """Returns True if the position is at the end of the document. This property must be set by the user. """ return self.__at_end @at_end.setter def at_end(self, b): """Sets the at_end property to True or False. """ self.__at_end = bool(b) @property def text_after(self): """Returns a textual excerpt starting at the current position. This property must be set by the user. """ return self.__at_end @text_after.setter def text_after(self, value): """Sets the text_after property to a given string. """ self.__text_after = value def __repr__(self): s = "%s(offset=%r,line=%r,column=%r" \ % (self.__class__.__name__, self.__char_position, self.__line, self.__column) if self.text_after: s += ",text_after=%r" % (self.text_after,) s += ")" return s def describe(self, show_text=True): """Returns a human-readable description of the position, in English.""" s = "line %d, column %d, offset %d" % (self.__line, self.__column, self.__char_position) if self.at_start: s += " (AT-START)" elif self.at_end: s += " (AT-END)" if show_text and self.text_after: s += ", text %r" % (self.text_after) return s def __str__(self): """Same as the describe() function.""" return self.describe( show_text=True ) def copy( self ): """Create a copy of the position object.""" p = self.__class__() p.__char_position = self.__char_position p.__line = self.__line p.__column = self.__column p.text_after = self.__text_after p.at_end = self.at_end p.__last_was_cr = self.__last_was_cr return p def rewind( self ): """Set the position to the start of the document.""" if not self.at_start: self.text_after = None self.at_end = False self.__char_position = 0 self.__line = 1 self.__column = 0 self.__last_was_cr = False def advance( self, s ): """Advance the position from its current place according to the given string of characters. """ if s: self.text_after = None for c in s: self.__char_position += 1 if c == '\n' and self.__last_was_cr: self.__last_was_cr = False elif helpers.char_is_unicode_eol(c): self.__line += 1 self.__column = 0 self.__last_was_cr = (c == '\r') else: self.__column += 1 self.__last_was_cr = False # ---------------------------------------------------------------------- # Buffered Stream Reader # ---------------------------------------------------------------------- class buffered_stream(object): """A helper class for the JSON parser. It allows for reading an input document, while handling some low-level Unicode issues as well as tracking the current position in terms of line and column position. """ def __init__(self, txt='', encoding=None): self.reset() self.set_text( txt, encoding ) def reset(self): """Clears the state to nothing.""" self.__pos = position_marker() self.__saved_pos = [] # Stack of saved positions self.__bom = helpers.make_raw_bytes([]) # contains copy of byte-order mark, if any self.__codec = None # The CodecInfo self.__encoding = None # The name of the codec's encoding self.__input_is_bytes = False self.__rawbuf = None self.__raw_bytes = None self.__cmax = 0 self.num_ws_skipped = 0 def save_position(self): self.__saved_pos.append( self.__pos.copy() ) return True def clear_saved_position(self): if self.__saved_pos: self.__saved_pos.pop() return True else: return False def restore_position(self): try: old_pos = self.__saved_pos.pop() # Can raise IndexError except IndexError, err: raise IndexError("Attempt to restore buffer position that was never saved") else: self.__pos = old_pos return True def _find_codec(self, encoding): if encoding is None: self.__codec = None self.__encoding = None elif isinstance(encoding, codecs.CodecInfo): self.__codec = encoding self.__encoding = self.__codec.name else: self.__encoding = encoding self.__codec = helpers.lookup_codec( encoding ) if not self.__codec: raise JSONDecodeError('no codec available for character encoding',encoding) return self.__codec def set_text( self, txt, encoding=None ): """Changes the input text document and rewinds the position to the start of the new document. """ import sys self.rewind() self.__codec = None self.__bom = None self.__rawbuf = u'' self.__cmax = 0 # max number of chars in input try: decoded = helpers.unicode_decode( txt, encoding ) except JSONError: raise except Exception, err: # Re-raise as a JSONDecodeError e2 = sys.exc_info() newerr = JSONDecodeError("a Unicode decoding error occurred") # Simulate Python 3's: "raise X from Y" exception chaining newerr.__cause__ = err newerr.__traceback__ = e2[2] raise newerr else: self.__codec = decoded.codec self.__bom = decoded.bom self.__rawbuf = decoded.string self.__cmax = len(self.__rawbuf) def __repr__(self): return '<%s at %r text %r>' % (self.__class__.__name__, self.__pos, self.text_context) def rewind(self): """Resets the position back to the start of the input text.""" self.__pos.rewind() @property def codec(self): """The codec object used to perform Unicode decoding, or None.""" return self.__codec @property def bom(self): """The Unicode Byte-Order Mark (BOM), if any, that was present at the start of the input text. The returned BOM is a string of the raw bytes, and is not Unicode-decoded. """ return self.__bom @property def cpos(self): """The current character offset from the start of the document.""" return self.__pos.char_position @property def position(self): """The current position (as a position_marker object). Returns a copy. """ p = self.__pos.copy() p.text_after = self.text_context p.at_end = self.at_end return p @property def at_start(self): """Returns True if the position is currently at the start of the document, or False otherwise. """ return self.__pos.at_start @property def at_end(self): """Returns True if the position is currently at the end of the document, of False otherwise. """ c = self.peek() return (not c) def at_ws(self, allow_unicode_whitespace=True): """Returns True if the current position contains a white-space character. """ c = self.peek() if not c: return False elif allow_unicode_whitespace: return helpers.char_is_unicode_ws(c) else: return helpers.char_is_json_ws(c) def at_eol(self, allow_unicode_eol=True): """Returns True if the current position contains an end-of-line control character. """ c = self.peek() if not c: return True # End of file is treated as end of line elif allow_unicode_eol: return helpers.char_is_unicode_eol(c) else: return helpers.char_is_json_eol(c) def peek( self, offset=0 ): """Returns the character at the current position, or at a given offset away from the current position. If the position is beyond the limits of the document size, then an empty string '' is returned. """ i = self.cpos + offset if i < 0 or i >= self.__cmax: return '' return self.__rawbuf[i] def peekstr( self, span=1, offset=0 ): """Returns one or more characters starting at the current position, or at a given offset away from the current position, and continuing for the given span length. If the offset and span go outside the limit of the current document size, then the returned string may be shorter than the requested span length. """ i = self.cpos + offset j = i + span if i < 0 or i >= self.__cmax: return '' return self.__rawbuf[i : j] @property def text_context( self, context_size = 20 ): """A short human-readable textual excerpt of the document at the current position, in English. """ context_size = max( context_size, 4 ) s = self.peekstr(context_size + 1) if not s: return '' if len(s) > context_size: s = s[:context_size - 3] + "..." return s def startswith( self, s ): """Determines if the text at the current position starts with the given string. See also method: pop_if_startswith() """ s2 = self.peekstr( len(s) ) return s == s2 def skip( self, span=1 ): """Advances the current position by one (or the given number) of characters. Will not advance beyond the end of the document. Returns the number of characters skipped. """ i = self.cpos self.__pos.advance( self.peekstr(span) ) return self.cpos - i def skipuntil( self, testfn ): """Advances the current position until a given predicate test function succeeds, or the end of the document is reached. Returns the actual number of characters skipped. The provided test function should take a single unicode character and return a boolean value, such as: lambda c : c == '.' # Skip to next period See also methods: skipwhile() and popuntil() """ i = self.cpos while True: c = self.peek() if not c or testfn(c): break else: self.__pos.advance(c) return self.cpos - i def skipwhile( self, testfn ): """Advances the current position until a given predicate test function fails, or the end of the document is reached. Returns the actual number of characters skipped. The provided test function should take a single unicode character and return a boolean value, such as: lambda c : c.isdigit() # Skip all digits See also methods: skipuntil() and popwhile() """ return self.skipuntil( lambda c: not testfn(c) ) def skip_to_next_line( self, allow_unicode_eol=True ): """Advances the current position to the start of the next line. Will not advance beyond the end of the file. Note that the two-character sequence CR+LF is recognized as being just a single end-of-line marker. """ ln = self.__pos.line while True: c = self.pop() if not c or self.__pos.line > ln: if c == '\r' and self.peek() == '\n': self.skip() break def skipws( self, allow_unicode_whitespace=True ): """Advances the current position past all whitespace, or until the end of the document is reached. """ if allow_unicode_whitespace: n = self.skipwhile( helpers.char_is_unicode_ws ) else: n = self.skipwhile( helpers.char_is_json_ws ) self.num_ws_skipped += n return n def pop( self ): """Returns the character at the current position and advances the position to the next character. At the end of the document this function returns an empty string. """ c = self.peek() if c: self.__pos.advance( c ) return c def popstr( self, span=1, offset=0 ): """Returns a string of one or more characters starting at the current position, and advances the position to the following character after the span. Will not go beyond the end of the document, so the returned string may be shorter than the requested span. """ s = self.peekstr(span) if s: self.__pos.advance( s ) return s def popif( self, testfn ): """Just like the pop() function, but only returns the character if the given predicate test function succeeds. """ c = self.peek() if c and testfn(c): self.__pos.advance( c ) return c return '' def pop_while_in( self, chars ): """Pops a sequence of characters at the current position as long as each of them is in the given set of characters. """ if not isinstance( chars, (set,frozenset)): cset = set( chars ) c = self.peek() if c and c in cset: s = self.popwhile( lambda c: c and c in cset ) return s return None def pop_identifier( self, match=None ): """Pops the sequence of characters at the current position that match the syntax for a JavaScript identifier. """ c = self.peek() if c and helpers.char_is_identifier_leader(c): s = self.popwhile( helpers.char_is_identifier_tail ) return s return None def pop_if_startswith( self, s ): """Pops the sequence of characters if they match the given string. See also method: startswith() """ s2 = self.peekstr( len(s) ) if s2 != s: return NULL self.__pos.advance( s2 ) return s2 def popwhile( self, testfn, maxchars=None ): """Pops all the characters starting at the current position as long as each character passes the given predicate function test. If maxchars a numeric value instead of None then then no more than that number of characters will be popped regardless of the predicate test. See also methods: skipwhile() and popuntil() """ s = [] i = 0 while maxchars is None or i < maxchars: c = self.popif( testfn ) if not c: break s.append( c ) i += 1 return ''.join(s) def popuntil( self, testfn, maxchars=None ): """Just like popwhile() method except the predicate function should return True to stop the sequence rather than False. See also methods: skipuntil() and popwhile() """ return popwhile( lambda c: not testfn(c), maxchars=maxchars ) def __getitem__( self, index ): """Returns the character at the given index relative to the current position. If the index goes beyond the end of the input, or prior to the start when negative, then '' is returned. If the index provided is a slice object, then that range of characters is returned as a string. Note that a stride value other than 1 is not supported in the slice. To use a slice, do: s = my_stream[ 1:4 ] """ if isinstance( index, slice ): return self.peekstr( index.stop - index.start, index.start ) else: return self.peek( index ) # ---------------------------------------------------------------------- # Exception classes. # ---------------------------------------------------------------------- class JSONException(Exception): """Base class for all JSON-related exceptions. """ pass class JSONSkipHook(JSONException): """An exception to be raised by user-defined code within hook callbacks to indicate the callback does not want to handle the situation. """ pass class JSONStopProcessing(JSONException): """Can be raised by anyplace, including inside a hook function, to cause the entire encode or decode process to immediately stop with an error. """ pass class JSONAbort(JSONException): pass class JSONError(JSONException): """Base class for all JSON-related errors. In addition to standard Python exceptions, these exceptions may also have additional properties: * severity - One of: 'fatal', 'error', 'warning', 'info' * position - An indication of the position in the input where the error occured. * outer_position - A secondary position (optional) that gives the location of the outer data item in which the error occured, such as the beginning of a string or an array. * context_description - A string that identifies the context in which the error occured. Default is "Context". """ severities = frozenset(['fatal','error','warning','info']) def __init__(self, message, *args, **kwargs ): self.severity = 'error' self._position = None self.outer_position = None self.context_description = None for kw,val in kwargs.items(): if kw == 'severity': if val not in self.severities: raise TypeError("%s given invalid severity %r" % (self.__class__.__name__, val)) self.severity = val elif kw == 'position': self.position = val elif kw == 'outer_position': self.outer_position = val elif kw == 'context_description' or kw=='context': self.context_description = val else: raise TypeError("%s does not accept %r keyword argument" % (self.__class__.__name__, kw)) super( JSONError, self ).__init__( message, *args ) self.message = message @property def position(self): return self._position @position.setter def position(self, pos): if pos == 0: self._position = 0 #position_marker() # start of input else: self._position = pos def __repr__(self): s = "%s(%r" % (self.__class__.__name__, self.message) for a in self.args[1:]: s += ", %r" % (a,) if self.position: s += ", position=%r" % (self.position,) if self.outer_position: s += ", outer_position=%r" % (self.outer_position,) s += ", severity=%r)" % (self.severity,) return s def pretty_description(self, show_positions=True, filename=None): if filename: pfx = filename.rstrip().rstrip(':') + ':' else: pfx = '' # Print file position as numeric abbreviation err = pfx if self.position == 0: err += '0:0:' elif self.position: err += '%d:%d:' % (self.position.line, self.position.column) else: err += ' ' # Print severity and main error message err += " %s: %s" % (self.severity.capitalize(), self.message) if len(self.args) > 1: err += ': ' for anum, a in enumerate(self.args[1:]): if anum > 1: err += ', ' astr = repr(a) if len(astr) > 30: astr = astr[:30] + '...' err += astr # Print out exception chain e2 = self while e2: if hasattr(e2,'__cause__') and isinstance(e2.__cause__,Exception): e2 = e2.__cause__ e2desc = str(e2).strip() if not e2desc: e2desc = repr(e2).strip() err += "\n | Cause: %s" % e2desc.strip().replace('\n','\n | ') else: e2 = None # Show file position if show_positions and self.position is not None: if self.position == 0: err += "\n | At start of input" else: err += "\n | At %s" % (self.position.describe(show_text=False),) if self.position.text_after: err += "\n | near text: %r" % (self.position.text_after,) # Show context if show_positions and self.outer_position: if self.context_description: cdesc = self.context_description.capitalize() else: cdesc = "Context" err += "\n | %s started at %s" % (cdesc, self.outer_position.describe(show_text=False),) if self.outer_position.text_after: err += "\n | with text: %r" % (self.outer_position.text_after,) return err class JSONDecodeError(JSONError): """An exception class raised when a JSON decoding error (syntax error) occurs.""" pass class JSONDecodeHookError(JSONDecodeError): """An exception that occured within a decoder hook. The original exception is available in the 'hook_exception' attribute. """ def __init__(self, hook_name, exc_info, encoded_obj, *args, **kwargs): self.hook_name = hook_name if not exc_info: exc_info = (None, None, None) exc_type, self.hook_exception, self.hook_traceback = exc_info self.object_type = type(encoded_obj) msg = "Hook %s raised %r while decoding type <%s>" % (hook_name, self.hook_exception.__class__.__name__, self.object_type.__name__) if len(args) >= 1: msg += ": " + args[0] args = args[1:] super(JSONDecodeHookError,self).__init__(msg, *args,**kwargs) class JSONEncodeError(JSONError): """An exception class raised when a python object can not be encoded as a JSON string.""" pass class JSONEncodeHookError(JSONEncodeError): """An exception that occured within an encoder hook. The original exception is available in the 'hook_exception' attribute. """ def __init__(self, hook_name, exc_info, encoded_obj, *args, **kwargs): self.hook_name = hook_name if not exc_info: exc_info = (None, None, None) exc_type, self.hook_exception, self.hook_traceback = exc_info self.object_type = type(encoded_obj) msg = "Hook %s raised %r while encoding type <%s>" % (self.hook_name, self.hook_exception.__class__.__name__, self.object_type.__name__) if len(args) >= 1: msg += ": " + args[0] args = args[1:] super(JSONEncodeHookError,self).__init__(msg, *args, **kwargs) #---------------------------------------------------------------------- # Encoder state object #---------------------------------------------------------------------- class encode_state(object): """An internal transient object used during JSON encoding to record the current construction state. """ def __init__(self, jsopts=None, parent=None ): import sys self.chunks = [] if not parent: self.parent = None self.nest_level = 0 self.options = jsopts self.escape_unicode_test = False # or a function f(unichar)=>True/False else: self.parent = parent self.nest_level = parent.nest_level + 1 self.escape_unicode_test = parent.escape_unicode_test self.options = parent.options def make_substate(self): return encode_state( parent=self ) def join_substate(self, other_state): self.chunks.extend( other_state.chunks ) other_state.chunks = [] def append(self, s): """Adds a string to the end of the current JSON document""" self.chunks.append(s) def combine(self): """Returns the accumulated string and resets the state to empty""" s = ''.join( self.chunks ) self.chunks = [] return s def __eq__(self, other_state): return self.nest_level == other_state.nest_level and \ self.chunks == other_state.chunks def __lt__(self, other_state): if self.nest_level != other_state.nest_level: return self.nest_level < other_state.nest_level return self.chunks < other_state.chunks #---------------------------------------------------------------------- # Decoder statistics #---------------------------------------------------------------------- class decode_statistics(object): """An object that records various statistics about a decoded JSON document. """ int8_max = 0x7f int8_min = - 0x7f - 1 int16_max = 0x7fff int16_min = - 0x7fff - 1 int32_max = 0x7fffffff int32_min = - 0x7fffffff - 1 int64_max = 0x7fffffffffffffff int64_min = - 0x7fffffffffffffff - 1 double_int_max = 2**53 - 1 double_int_min = - (2**53 - 1) def __init__(self): # Nesting self.max_depth = 0 self.max_items_in_array = 0 self.max_items_in_object = 0 # Integer stats self.num_ints = 0 self.num_ints_8bit = 0 self.num_ints_16bit = 0 self.num_ints_32bit = 0 self.num_ints_53bit = 0 # ints which will overflow IEEE doubles self.num_ints_64bit = 0 self.num_ints_long = 0 self.num_negative_zero_ints = 0 # Floating-point stats self.num_negative_zero_floats = 0 self.num_floats = 0 self.num_floats_decimal = 0 # overflowed 'float' # String stats self.num_strings = 0 self.max_string_length = 0 self.total_string_length = 0 self.min_codepoint = None self.max_codepoint = None # Other data type stats self.num_arrays = 0 self.num_objects = 0 self.num_bools = 0 self.num_nulls = 0 self.num_undefineds = 0 self.num_nans = 0 self.num_infinities = 0 self.num_comments = 0 self.num_identifiers = 0 # JavaScript identifiers self.num_excess_whitespace = 0 @property def num_infinites(self): """Misspelled 'num_infinities' for backwards compatibility""" return self.num_infinities def pretty_description(self, prefix=''): import unicodedata lines = [ "Number of integers:", " 8-bit: %5d (%d to %d)" % (self.num_ints_8bit, self.int8_min, self.int8_max), " 16-bit: %5d (%d to %d)" % (self.num_ints_16bit, self.int16_min, self.int16_max), " 32-bit: %5d (%d to %d)" % (self.num_ints_32bit, self.int32_min, self.int32_max), " > 53-bit: %5d (%d to %d - overflows JavaScript)" % (self.num_ints_53bit, self.double_int_min, self.double_int_max), " 64-bit: %5d (%d to %d)" % (self.num_ints_64bit, self.int64_min, self.int64_max), " > 64 bit: %5d (not portable, may require a \"Big Num\" package)" % self.num_ints_long, " total ints: %5d" % self.num_ints, " Num -0: %5d (negative-zero integers are not portable)" % self.num_negative_zero_ints, "Number of floats:", " doubles: %5d" % self.num_floats, " > doubles: %5d (will overflow IEEE doubles)" % self.num_floats_decimal, " total flts: %5d" % (self.num_floats + self.num_floats_decimal), " Num -0.0: %5d (negative-zero floats are usually portable)" % self.num_negative_zero_floats, "Number of:", " nulls: %5d" % self.num_nulls, " booleans: %5d" % self.num_bools, " arrays: %5d" % self.num_arrays, " objects: %5d" % self.num_objects, "Strings:", " number: %5d strings" % self.num_strings, " max length: %5d characters" % self.max_string_length, " total chars: %5d across all strings" % self.total_string_length, ] if self.min_codepoint is not None: cp = 'U+%04X' % self.min_codepoint try: charname = unicodedata.name(unichr(self.min_codepoint)) except ValueError: charname = '? UNKNOWN CHARACTER' lines.append(" min codepoint: %6s (%s)" % (cp, charname)) else: lines.append(" min codepoint: %6s" % ('n/a',)) if self.max_codepoint is not None: cp = 'U+%04X' % self.max_codepoint try: charname = unicodedata.name(unichr(self.max_codepoint)) except ValueError: charname = '? UNKNOWN CHARACTER' lines.append(" max codepoint: %6s (%s)" % (cp, charname)) else: lines.append(" max codepoint: %6s" % ('n/a',)) lines.extend([ "Other JavaScript items:", " NaN: %5d" % self.num_nans, " Infinite: %5d" % self.num_infinities, " undefined: %5d" % self.num_undefineds, " Comments: %5d" % self.num_comments, " Identifiers: %5d" % self.num_identifiers, "Max items in any array: %5d" % self.max_items_in_array, "Max keys in any object: %5d" % self.max_items_in_object, "Max nesting depth: %5d" % self.max_depth, ]) if self.total_chars == 0: lines.append("Unnecessary whitespace: 0 of 0 characters") else: lines.append( "Unnecessary whitespace: %5d of %d characters (%.2f%%)" \ % (self.num_excess_whitespace, self.total_chars, self.num_excess_whitespace * 100.0 / self.total_chars) ) if prefix: return '\n'.join([ prefix+s for s in lines ]) + '\n' else: return '\n'.join( lines ) + '\n' #---------------------------------------------------------------------- # Decoder state object #---------------------------------------------------------------------- class decode_state(object): """An internal transient object used during JSON decoding to record the current parsing state and error messages. """ def __init__(self, options=None): self.reset() self.options = options def reset(self): """Clears all errors, statistics, and input text.""" self.buf = None self.errors = [] self.obj = None self.cur_depth = 0 # how deep in nested structures are we? self.stats = decode_statistics() self._have_warned_nonbmp = False self._have_warned_long_string = False self._have_warned_max_depth = False @property def should_stop(self): if self.has_fatal: return True return False @property def has_errors(self): """Have any errors been seen already?""" return len([err for err in self.errors if err.severity in ('fatal','error')]) > 0 @property def has_fatal(self): """Have any errors been seen already?""" return len([err for err in self.errors if err.severity in ('fatal',)]) > 0 def set_input( self, txt, encoding=None ): """Initialize the state by setting the input document text.""" import sys self.reset() try: self.buf = buffered_stream( txt, encoding=encoding ) except JSONError as err: err.position = 0 # set position to start of file err.severity = 'fatal' self.push_exception( err ) except Exception as err: # Re-raise as JSONDecodeError e2 = sys.exc_info() newerr = JSONDecodeError("Error while reading input", position=0, severity='fatal') self.push_exception( err ) self.buf = None else: if self.buf.bom: self.push_cond( self.options.bom, "JSON document was prefixed by a BOM (Byte Order Mark)", self.buf.bom ) if not self.buf: self.push_fatal( "Aborting, can not read JSON document.", position=0 ) def push_exception(self, exc): """Add an already-built exception to the error list.""" self.errors.append(exc) def push_fatal(self, message, *args, **kwargs): """Create a fatal error.""" kwargs['severity'] = 'fatal' self.__push_err( message, *args, **kwargs) def push_error(self, message, *args, **kwargs): """Create an error.""" kwargs['severity'] = 'error' self.__push_err( message, *args, **kwargs) def push_warning(self, message, *args, **kwargs): """Create a warning.""" kwargs['severity'] = 'warning' self.__push_err( message, *args, **kwargs) def push_info(self, message, *args, **kwargs): """Create a informational message.""" kwargs['severity'] = 'info' self.__push_err( message, *args, **kwargs) def push_cond(self, behavior_value, message, *args, **kwargs): """Creates an conditional error or warning message. The behavior value (from json_options) controls whether a message will be pushed and whether it is an error or warning message. """ if behavior_value == ALLOW: return elif behavior_value == WARN: kwargs['severity'] = 'warning' else: kwargs['severity'] = 'error' self.__push_err( message, *args, **kwargs ) def __push_err(self, message, *args, **kwargs): """Stores an error in the error list.""" position = None outer_position = None severity = 'error' context_description = None for kw, val in kwargs.items(): if kw == 'position': position = val elif kw == 'outer_position': outer_position = val elif kw == 'severity': severity = val elif kw == 'context_description' or kw == 'context': context_description=val else: raise TypeError('Unknown keyword argument',kw) if position is None and self.buf: position = self.buf.position # Current position err = JSONDecodeError( message, position=position, outer_position=outer_position, context_description=context_description, severity=severity, *args) self.push_exception( err ) def update_depth_stats(self, **kwargs): st = self.stats st.max_depth = max(st.max_depth, self.cur_depth) if not self._have_warned_max_depth and self.cur_depth > self.options.warn_max_depth: self._have_warned_max_depth = True self.push_cond( self.options.non_portable, "Arrays or objects nested deeper than %d levels may not be portable" \ % self.options.warn_max_depth ) def update_string_stats(self, s, **kwargs): st = self.stats st.num_strings += 1 st.max_string_length = max(st.max_string_length, len(s)) st.total_string_length += len(s) if self.options.warn_string_length and len(s) > self.options.warn_string_length and not self._have_warned_long_string: self._have_warned_long_string = True self.push_cond( self.options.non_portable, "Strings longer than %d may not be portable" % self.options.warn_string_length, **kwargs ) if len(s) > 0: mincp = ord(min(s)) maxcp = ord(max(s)) if st.min_codepoint is None: st.min_codepoint = mincp st.max_codepoint = maxcp else: st.min_codepoint = min( st.min_codepoint, mincp ) st.max_codepoint = max( st.max_codepoint, maxcp ) if maxcp > 0xffff and not self._have_warned_nonbmp: self._have_warned_nonbmp = True self.push_cond( self.options.non_portable, "Strings containing non-BMP characters (U+%04X) may not be portable" % maxcp, **kwargs ) def update_negzero_int_stats(self, **kwargs): st = self.stats st.num_negative_zero_ints += 1 if st.num_negative_zero_ints == 1: # Only warn once self.push_cond( self.options.non_portable, "Negative zero (-0) integers are usually not portable", **kwargs ) def update_negzero_float_stats(self, **kwargs): st = self.stats st.num_negative_zero_floats += 1 if st.num_negative_zero_floats == 1: # Only warn once self.push_cond( self.options.non_portable, "Negative zero (-0.0) numbers may not be portable", **kwargs) def update_float_stats(self, float_value, **kwargs): st = self.stats if 'sign' in kwargs: del kwargs['sign'] if helpers.is_negzero( float_value ): self.update_negzero_float_stats( **kwargs ) if helpers.is_infinite( float_value ): st.num_infinities += 1 if isinstance(float_value, decimal.Decimal): st.num_floats_decimal += 1 if st.num_floats_decimal == 1: # Only warn once self.push_cond( self.options.non_portable, "Floats larger or more precise than an IEEE \"double\" may not be portable", **kwargs) elif isinstance(float_value, float): st.num_floats += 1 def update_integer_stats(self, int_value, **kwargs ): sign=kwargs.get('sign', 1) if 'sign' in kwargs: del kwargs['sign'] if int_value == 0 and sign < 0: self.update_negzero_int_stats( **kwargs ) if sign < 0: int_value = - int_value st = self.stats st.num_ints += 1 if st.int8_min <= int_value <= st.int8_max: st.num_ints_8bit += 1 elif st.int16_min <= int_value <= st.int16_max: st.num_ints_16bit += 1 elif st.int32_min <= int_value <= st.int32_max: st.num_ints_32bit += 1 elif st.int64_min <= int_value <= st.int64_max: st.num_ints_64bit += 1 else: st.num_ints_long += 1 if int_value < st.double_int_min or st.double_int_max < int_value: st.num_ints_53bit += 1 if st.num_ints_53bit == 1: # Only warn once self.push_cond( self.options.non_portable, "Integers larger than 53-bits are not portable", **kwargs ) # ---------------------------------------------------------------------- # JSON strictness options # ---------------------------------------------------------------------- STRICTNESS_STRICT = 'strict' STRICTNESS_WARN = 'warn' STRICTNESS_TOLERANT = 'tolerant' ALLOW = 'allow' WARN = 'warn' FORBID = 'forbid' # For float_type option NUMBER_AUTO = 'auto' NUMBER_FLOAT = 'float' NUMBER_DECIMAL = 'decimal' # For json_int class NUMBER_FORMAT_DECIMAL = 'decimal' NUMBER_FORMAT_HEX = 'hex' NUMBER_FORMAT_LEGACYOCTAL = 'legacyoctal' NUMBER_FORMAT_OCTAL = 'octal' NUMBER_FORMAT_BINARY = 'binary' class _behaviors_metaclass(type): """Meta class used to establish a set of "behavior" options. Classes that use this meta class must defined a class-level variable called '_behaviors' that is a list of tuples, each of which describes one behavior and is like: (behavior_name, documentation). Also define a second class-level variable called '_behavior_values' which is a list of the permitted values for each behavior, each being strings. For each behavior (e.g., pretty), and for each value (e.g., yes) the following methods/properties will be created: * pretty - value of 'pretty' behavior (read-write) * ispretty_yes - returns True if 'pretty' is 'yes' For each value (e.g., pink) the following methods/properties will be created: * all_behaviors - set of all behaviors (read-only) * pink_behaviors - set of behaviors with value of 'pink' (read-only) * set_all('pink') * set_all_pink() - set all behaviors to value of 'pink' """ def __new__(cls, clsname, bases, attrs): values = attrs.get('_behavior_values') attrs['values'] = property( lambda self: set(self._behavior_values), doc='Set of possible behavior values') behaviors = attrs.get('_behaviors') def get_behavior(self, name): """Returns the value for a given behavior""" try: return getattr( self, '_behavior_'+name ) except AttributeError: raise ValueError('Unknown behavior',name) attrs['get_behavior'] = get_behavior def set_behavior(self, name, value): """Changes the value for a given behavior""" if value not in self._behavior_values: raise ValueError('Unknown value for behavior',value) varname = '_behavior_'+name if hasattr(self,varname): setattr( self, varname, value ) else: raise ValueError('Unknown behavior',name) attrs['set_behavior'] = set_behavior def describe_behavior(self,name): """Returns documentation about a given behavior.""" for n, doc in self._behaviors: if n==name: return doc else: raise AttributeError('No such behavior',name) attrs['describe_behavior'] = describe_behavior for name, doc in behaviors: attrs['_behavior_'+name] = True for v in values: vs = v + '_' + name def getx(self,name=name,forval=v): return self.get_behavior(name) == forval attrs['is_'+v+'_'+name] = property(getx,doc=v.capitalize()+' '+doc) # method value_name() fnset = lambda self,_name=name,_value=v: self.set_behavior(_name,_value) fnset.__name__ = v+'_'+name fnset.__doc__ = 'Set behavior ' + name + ' to ' + v + "." attrs[fnset.__name__] = fnset def get_value_for_behavior(self,name=name): return self.get_behavior(name) def set_value_for_behavior(self,value,name=name): self.set_behavior(name,value) attrs[name] = property(get_value_for_behavior,set_value_for_behavior,doc=doc) @property def all_behaviors(self): """Returns the names of all known behaviors.""" return set([t[0] for t in self._behaviors]) attrs['all_behaviors'] = all_behaviors def set_all(self,value): """Changes all behaviors to have the given value.""" if value not in self._behavior_values: raise ValueError('Unknown behavior',value) for name in self.all_behaviors: setattr(self, '_behavior_'+name, value) attrs['set_all'] = set_all def is_all(self,value): """Determines if all the behaviors have the given value.""" if value not in self._behavior_values: raise ValueError('Unknown behavior',value) for name in self.all_behaviors: if getattr(self, '_behavior_'+name) != value: return False return True attrs['is_all'] = is_all for v in values: # property value_behaviors def getbehaviorsfor(self,value=v): return set([name for name in self.all_behaviors if getattr(self,name)==value]) attrs[v+'_behaviors'] = property(getbehaviorsfor,doc='Return the set of behaviors with the value '+v+'.') # method set_all_value() setfn = lambda self,_value=v: set_all(self,_value) setfn.__name__ = 'set_all_'+v setfn.__doc__ = 'Set all behaviors to value ' + v + "." attrs[setfn.__name__] = setfn # property is_all_value attrs['is_all_'+v] = property( lambda self,v=v: is_all(self,v), doc='Determines if all the behaviors have the value '+v+'.') def behaviors_eq(self, other): """Determines if two options objects are equivalent.""" if self.all_behaviors != other.all_behaviors: return False return self.allowed_behaviors == other.allowed_behaviors attrs['__eq__'] = behaviors_eq return super(_behaviors_metaclass, cls).__new__(cls, clsname, bases, attrs) SORT_NONE = 'none' SORT_PRESERVE = 'preserve' SORT_ALPHA = 'alpha' SORT_ALPHA_CI = 'alpha_ci' SORT_SMART = 'smart' sorting_methods = { SORT_NONE: "Do not sort, resulting order may be random", SORT_PRESERVE: "Preserve original order when reformatting", SORT_ALPHA: "Sort strictly alphabetically", SORT_ALPHA_CI: "Sort alphabetically case-insensitive", SORT_SMART: "Sort alphabetically and numerically (DEFAULT)" } sorting_method_aliases = { 'ci': SORT_ALPHA_CI } def smart_sort_transform( key ): numfmt = '%012d' digits = '0123456789' zero = ord('0') if not key: key = '' elif isinstance( key, (int,long) ): key = numfmt % key elif isinstance( key, basestring ): keylen = len(key) words = [] i=0 while i < keylen: if key[i] in digits: num = 0 while i < keylen and key[i] in digits: num *= 10 num += ord(key[i]) - zero i += 1 words.append( numfmt % num ) else: words.append( key[i].upper() ) i += 1 key = ''.join(words) else: key = str(key) return key # Find Enum type (introduced in Python 3.4) try: from enum import Enum as _enum except ImportError: _enum = None # Find OrderedDict type try: from collections import OrderedDict as _OrderedDict except ImportError: _OrderedDict = None class json_options(object): """Options to determine how strict the decoder or encoder should be.""" __metaclass__ = _behaviors_metaclass _behavior_values = (ALLOW, WARN, FORBID) _behaviors = ( ("all_numeric_signs", "Numbers may be prefixed by any \'+\' and \'-\', e.g., +4, -+-+77"), ("any_type_at_start", "A JSON document may start with any type, not just arrays or objects"), ("comments", "JavaScript comments, both /*...*/ and //... styles"), ("control_char_in_string", "Strings may contain raw control characters without \\u-escaping"), ("hex_numbers", "Hexadecimal numbers, e.g., 0x1f"), ("binary_numbers", "Binary numbers, e.g., 0b1001"), ("octal_numbers", "New-style octal numbers, e.g., 0o731 (see leading-zeros for legacy octals)"), ("initial_decimal_point", "Floating-point numbers may start with a decimal point (no units digit)"), ("extended_unicode_escapes", "Extended Unicode escape sequence \\u{..} for non-BMP characters"), ("js_string_escapes", "All JavaScript character \\-escape sequences may be in strings"), ("leading_zeros", "Numbers may have extra leading zeros (see --leading-zero-radix option)"), ("non_numbers", "Non-numbers may be used, such as NaN or Infinity"), ("nonescape_characters", "Unknown character \\-escape sequences stand for that character (\\Q -> 'Q')"), ("identifier_keys", "JavaScript identifiers are converted to strings when used as object keys"), ("nonstring_keys", "Value types other than strings (or identifiers) may be used as object keys"), ("omitted_array_elements", "Arrays may have omitted/elided elements, e.g., [1,,3] == [1,undefined,3]"), ("single_quoted_strings", "Strings may be delimited with both double (\") and single (\') quotation marks"), ("trailing_comma", "A final comma may end the list of array or object members"), ("trailing_decimal_point", "Floating-point number may end with a decimal point and no following fractional digits"), ("undefined_values", "The JavaScript 'undefined' value may be used"), ("format_control_chars", "Unicode \"format control characters\" may appear in the input"), ("unicode_whitespace", "Treat any Unicode whitespace character as valid whitespace"), # Never legal ("leading_zeros", "Numbers may have leading zeros"), # Normally warnings ("duplicate_keys", "Objects may have repeated keys"), ("zero_byte", "Strings may contain U+0000, which may not be safe for C-based programs"), ("bom", "A JSON document may start with a Unicode BOM (Byte Order Mark)"), ("non_portable", "Anything technically valid but likely to cause data portablibity issues"), ) # end behavior list def reset_to_defaults(self): # Plain attrs (other than above behaviors) are simply copied # by value, either during initialization (via keyword # arguments) or via the copy() method. self._plain_attrs = ['leading_zero_radix', 'encode_namedtuple_as_object', 'encode_enum_as', 'encode_compactly', 'escape_unicode', 'always_escape_chars', 'warn_string_length', 'warn_max_depth', 'int_as_float', 'decimal_context', 'float_type', 'keep_format', 'date_format', 'datetime_format', 'time_format', 'timedelta_format', 'sort_keys', 'indent_amount', 'indent_tab_width', 'indent_limit', 'max_items_per_line', 'py2str_encoding', 'encode_quoted_property_names', # mcedit ] self.strictness = STRICTNESS_WARN self._leading_zero_radix = 8 # via property: leading_zero_radix self._sort_keys = SORT_SMART # via property: sort_keys self.int_as_float = False self.float_type = NUMBER_AUTO self.decimal_context = (decimal.DefaultContext if decimal else None) self.keep_format = False # keep track of when numbers are hex, octal, etc. self.encode_namedtuple_as_object = True self._encode_enum_as = 'name' # via property self.encode_compactly = True self.escape_unicode = False self.always_escape_chars = None # None, or a set of Unicode characters to always escape self.warn_string_length = 0xfffd # with 16-bit length prefix self.warn_max_depth = 64 self.date_format = 'iso' # or strftime format self.datetime_format = 'iso' # or strftime format self.time_format = 'iso' # or strftime format self.timedelta_format = 'iso' # or 'hms' self.sort_keys = SORT_ALPHA self.indent_amount = 2 self.indent_tab_width = 0 # 0, or number of equivalent spaces self.indent_limit = None self.max_items_per_line = 1 # When encoding how many items per array/object # before breaking into multiple lines # For interpreting Python 2 'str' types: if _py_major == 2: self.py2str_encoding = 'ascii' else: self.py2str_encoding = None # For command block output self.encode_quoted_property_names = True def __init__(self, **kwargs): """Set JSON encoding and decoding options. If 'strict' is set to True, then only strictly-conforming JSON output will be produced. Note that this means that some types of values may not be convertable and will result in a JSONEncodeError exception. If 'compactly' is set to True, then the resulting string will have all extraneous white space removed; if False then the string will be "pretty printed" with whitespace and indentation added to make it more readable. If 'escape_unicode' is set to True, then all non-ASCII characters will be represented as a unicode escape sequence; if False then the actual real unicode character will be inserted if possible. The 'escape_unicode' can also be a function, which when called with a single argument of a unicode character will return True if the character should be escaped or False if it should not. """ self.reset_to_defaults() if 'strict' in kwargs: # Do this keyword first, so other keywords may override specific behaviors self.strictness = kwargs['strict'] for kw,val in kwargs.items(): if kw == 'compactly': # alias for 'encode_compactly' self.encode_compactly = val elif kw == 'strict': pass # Already handled elif kw == 'warnings': if val: self.suppress_warnings() elif kw == 'html_safe' or kw == 'xml_safe': if bool(val): if self.always_escape_chars is None: self.always_escape_chars = set(u'<>/&') else: self.always_escape_chars.update( set(u'<>/&') ) elif kw == 'always_escape': if val: if self.always_escape_chars is None: self.always_escape_chars = set(val) else: self.always_escape_chars.update( set(val) ) elif kw == 'int_as_float': self.int_as_float = bool(val) elif kw == 'keep_format': self.keep_format = bool(val) elif kw == 'float_type': if val in (NUMBER_AUTO, NUMBER_FLOAT, NUMBER_DECIMAL): self.float_type = val else: raise ValueError("Unknown option %r for argument %r to initialize %s" % (val,kw,self.__class__.__name__)) elif kw == 'decimal' or kw == 'decimal_context': if decimal: if not val or val == 'default': self.decimal_context = decimal.DefaultContext elif val == 'basic': self.decimal_context = decimal.BasicContext elif val == 'extended': self.decimal_context = decimal.ExtendedContext elif isinstance(val, decimal.Context): self.decimal_context = val elif isinstance(val,(int,long)) or val[0].isdigit: prec = int(val) self.decimal_context = decimal.Context( prec=prec ) else: raise ValueError("Option for %r should be a decimal.Context, a number of significant digits, or one of 'default','basic', or 'extended'." % (kw,)) elif kw in ('allow','warn','forbid','prevent','deny'): action = {'allow':ALLOW, 'warn':WARN, 'forbid':FORBID, 'prevent':FORBID, 'deny':FORBID}[ kw ] if isinstance(val,basestring): val = [b.replace('-','_') for b in val.replace(',',' ').split()] for behavior in val: self.set_behavior( behavior, action ) elif kw.startswith('allow_') or kw.startswith('forbid_') or kw.startswith('prevent_') or kw.startswith('deny_') or kw.startswith('warn_'): action, behavior = kw.split('_',1) if action == 'allow': if val: self.set_behavior( behavior, ALLOW ) else: self.set_behavior( behavior, FORBID ) elif action in ('forbid','prevent','deny'): if val: self.set_behavior( behavior, FORBID ) else: self.set_behavior( behavior, ALLOW ) elif action == 'warn': if val: self.set_behavior( behavior, WARN ) else: self.set_behavior( behavior, ALLOW ) elif kw in self._plain_attrs: setattr(self, kw, val) else: raise ValueError("Unknown keyword argument %r to initialize %s" % (kw,self.__class__.__name__)) def copy(self): other = self.__class__() other.copy_from( self ) return other def copy_from(self, other): if self is other: return # Myself! self.strictness = other.strictness # sets behaviors in bulk for name in self.all_behaviors: self.set_behavior( name, other.get_behavior(name) ) for name in self._plain_attrs: val = getattr(other,name) if isinstance(val, set): val = val.copy() elif decimal and isinstance(val, decimal.Decimal): val = val.copy() setattr(self, name, val) def spaces_to_next_indent_level( self, min_spaces=1, subtract=0 ): n = self.indent_amount - subtract if n < 0: n = 0 n = max( min_spaces, n ) return ' ' * n def indentation_for_level( self, level=0 ): """Returns a whitespace string used for indenting.""" if self.indent_limit is not None and level > self.indent_limit: n = self.indent_limit else: n = level n *= self.indent_amount if self.indent_tab_width: tw, sw = divmod(n, self.indent_tab_width) return '\t'*tw + ' '*sw else: return ' ' * n def set_indent( self, num_spaces, tab_width=0, limit=None ): """Changes the indentation properties when outputting JSON in non-compact mode. 'num_spaces' is the number of spaces to insert for each level of indentation, which defaults to 2. 'tab_width', if not 0, is the number of spaces which is equivalent to one tab character. Tabs will be output where possible rather than runs of spaces. 'limit', if not None, is the maximum indentation level after which no further indentation will be output. """ n = int(num_spaces) if n < 0: raise ValueError("indentation amount can not be negative",n) self.indent_amount = n self.indent_tab_width = tab_width self.indent_limit = limit @property def sort_keys(self): """The method used to sort dictionary keys when encoding JSON """ return self._sort_keys @sort_keys.setter def sort_keys(self, method): if not method: self._sort_keys = SORT_NONE elif callable(method): self._sort_keys = method elif method in sorting_methods: self._sort_keys = method elif method in sorting_method_aliases: # alias self._sort_keys = sorting_method_aliases[method] elif method == True: self._sort_keys = SORT_ALPHA else: raise ValueError("Not a valid sorting method: %r" % method) @property def encode_enum_as(self): """The strategy for encoding Python Enum values. """ return self._encode_enum_as @encode_enum_as.setter def encode_enum_as(self, val): if val not in ('name','qname','value'): raise ValueError("encode_enum_as must be one of 'name','qname', or 'value'") self._encode_enum_as = val @property def zero_float(self): """The numeric value 0.0, either a float or a decimal.""" if decimal and self.float_type == NUMBER_DECIMAL: return self.decimal_context.create_decimal('0.0') else: return 0.0 @property def negzero_float(self): """The numeric value -0.0, either a float or a decimal.""" if decimal and self.float_type == NUMBER_DECIMAL: return self.decimal_context.create_decimal('-0.0') else: return -0.0 @property def nan(self): """The numeric value NaN, either a float or a decimal.""" if decimal and self.float_type == NUMBER_DECIMAL: return self.decimal_context.create_decimal('NaN') else: return nan @property def inf(self): """The numeric value Infinity, either a float or a decimal.""" if decimal and self.float_type == NUMBER_DECIMAL: return self.decimal_context.create_decimal('Infinity') else: return inf @property def neginf(self): """The numeric value -Infinity, either a float or a decimal.""" if decimal and self.float_type == NUMBER_DECIMAL: return self.decimal_context.create_decimal('-Infinity') else: return neginf def make_int( self, s, sign=None, number_format=NUMBER_FORMAT_DECIMAL ): """Makes an integer value according to the current options. First argument should be a string representation of the number, or an integer. Returns a number value, which could be an int, float, or decimal. """ if isinstance(sign, (int,long)): if sign < 0: sign = '-' else: sign = '+' if isinstance(s,basestring): if s.startswith('-') or s.startswith('+'): sign = s[0] s = s[1:] if self.int_as_float: # Making a float/decimal if isinstance(s, (int,long)): if self.float_type == NUMBER_DECIMAL: n = self.decimal_context.create_decimal( s ) if sign=='-': n = n.copy_negate() elif s == 0 and sign=='-': n = self.negzero_float elif -999999999999999 <= s <= 999999999999999: n = float(s) if sign=='-': n *= -1 else: n = float(s) if (n == inf or int(n) != s) and self.float_type != NUMBER_FLOAT: n = self.decimal_context.create_decimal( s ) if sign=='-': n = n.copy_negate() elif sign=='-': n *= -1 else: # not already an int n = self.make_float( s, sign ) n2 = self.make_float( s[:-1] + ('9' if s[-1]<='5' else '0'), sign ) if (n==inf or n==n2) and self.float_type != NUMBER_FLOAT: n = self.make_decimal( s, sign ) elif isinstance( s, (int,long) ): # already an integer n = s if sign=='-': if n == 0: n = self.negzero_float else: n *= -1 else: # Making an actual integer try: n = int( s ) except ValueError: n = self.nan else: if sign=='-': if n==0: n = self.negzero_float else: n *= -1 if isinstance(n,(int,long)) and self.keep_format: n = json_int(n, number_format=number_format) return n def make_decimal( self, s, sign='+' ): """Converts a string into a decimal or float value.""" if not decimal or self.float_type == NUMBER_FLOAT: return self.make_float( s, sign ) if s.startswith('-') or s.startswith('+'): sign = s[0] s = s[1:] elif isinstance(sign, (int,long)): if sign < 0: sign = '-' else: sign = '+' try: f = self.decimal_context.create_decimal( s ) except decimal.InvalidOperation: f = self.decimal_context.create_decimal( 'NaN' ) except decimal.Overflow: if sign=='-': f = self.decimal_context.create_decimal( '-Infinity' ) else: f = self.decimal_context.create_decimal( 'Infinity' ) else: if sign=='-': f = f.copy_negate() return f def make_float( self, s, sign='+' ): """Converts a string into a float or decimal value.""" if decimal and self.float_type == NUMBER_DECIMAL: return self.make_decimal( s, sign ) if s.startswith('-') or s.startswith('+'): sign = s[0] s = s[1:] elif isinstance(sign, (int,long)): if sign < 0: sign = '-' else: sign = '+' try: f = float(s) except ValueError: f = nan else: if sign=='-': f *= -1 return f @property def leading_zero_radix(self): """The radix to be used for numbers with leading zeros. 8 or 10 """ return self._leading_zero_radix @leading_zero_radix.setter def leading_zero_radix(self, radix): if isinstance(radix,basestring): try: radix = int(radix) except ValueError: radix = radix.lower() if radix=='octal' or radix=='oct' or radix=='8': radix = 8 elif radix=='decimal' or radix=='dec': radix = 10 if radix not in (8,10): raise ValueError("Radix must either be 8 (octal) or 10 (decimal)") self._leading_zero_radix = radix @property def leading_zero_radix_as_word(self): return {8:'octal', 10:'decimal'}[ self._leading_zero_radix ] def suppress_warnings(self): for name in self.warn_behaviors: self.set_behavior(name, 'allow') @property def allow_or_warn_behaviors(self): """Returns the set of all behaviors that are not forbidden (i.e., are allowed or warned).""" return self.allow_behaviors.union( self.warn_behaviors ) @property def strictness(self): return self._strictness @strictness.setter def strictness(self, strict): """Changes whether the options should be re-configured for strict JSON conformance.""" if strict == STRICTNESS_WARN: self._strictness = STRICTNESS_WARN self.set_all_warn() elif strict == STRICTNESS_STRICT or strict is True: self._strictness = STRICTNESS_STRICT self.keep_format = False self.set_all_forbid() self.warn_duplicate_keys() self.warn_zero_byte() self.warn_bom() self.warn_non_portable() elif strict == STRICTNESS_TOLERANT or strict is False: self._strictness = STRICTNESS_TOLERANT self.set_all_allow() self.warn_duplicate_keys() self.warn_zero_byte() self.warn_leading_zeros() self.leading_zero_radix = 8 self.warn_bom() self.allow_non_portable() else: raise ValueError("Unknown strictness options %r" % strict) self.allow_any_type_at_start() # ---------------------------------------------------------------------- # The main JSON encoder/decoder class. # ---------------------------------------------------------------------- class JSON(object): """An encoder/decoder for JSON data streams. Usually you will call the encode() or decode() methods. The other methods are for lower-level processing. Whether the JSON parser runs in strict mode (which enforces exact compliance with the JSON spec) or the more forgiving non-string mode can be affected by setting the 'strict' argument in the object's initialization; or by assigning True or False to the 'strict' property of the object. You can also adjust a finer-grained control over strictness by allowing or forbidding specific behaviors. You can get a list of all the available behaviors by accessing the 'behaviors' property. Likewise the 'allowed_behaviors' and 'forbidden_behaviors' list which behaviors will be allowed and which will not. Call the allow() or forbid() methods to adjust these. """ _string_quotes = '"\'' _escapes_json = { # character escapes in JSON '"': '"', '/': '/', '\\': '\\', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', } _escapes_js = { # character escapes in Javascript '"': '"', '\'': '\'', '\\': '\\', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '0': '\x00' } # Following is a reverse mapping of escape characters, used when we # output JSON. Only those escapes which are always safe (e.g., in JSON) # are here. It won't hurt if we leave questionable ones out. _rev_escapes = {'\n': '\\n', '\t': '\\t', '\b': '\\b', '\r': '\\r', '\f': '\\f', '"': '\\"', '\\': '\\\\' } _optional_rev_escapes = { '/': '\\/' } # only escaped if forced to do so json_syntax_characters = u"{}[]\"\\,:0123456789.-+abcdefghijklmnopqrstuvwxyz \t\n\r" all_hook_names = ('decode_number', 'decode_float', 'decode_object', 'decode_array', 'decode_string', 'encode_value', 'encode_dict', 'encode_dict_key', 'encode_sequence', 'encode_bytes', 'encode_default') def __init__(self, **kwargs): """Creates a JSON encoder/decoder object. You may pass encoding and decoding options either by passing an argument named 'json_options' with an instance of a json_options class; or with individual keyword/values that will be used to initialize a new json_options object. You can also set hooks by using keyword arguments using the hook name; e.g., encode_dict=my_hook_func. """ import sys, unicodedata, re kwargs = kwargs.copy() # Initialize hooks for hookname in self.all_hook_names: if hookname in kwargs: self.set_hook( hookname, kwargs[hookname] ) del kwargs[hookname] else: self.set_hook( hookname, None ) # Set options if 'json_options' in kwargs: self._options = kwargs['json_options'] else: self._options = json_options(**kwargs) # The following is a boolean map of the first 256 characters # which will quickly tell us which of those characters never # need to be escaped. self._asciiencodable = \ [32 <= c < 128 \ and not self._rev_escapes.has_key(chr(c)) \ and not unicodedata.category(unichr(c)) in ['Cc','Cf','Zl','Zp'] for c in range(0,256)] @property def options(self): """The optional behaviors used, e.g., the JSON conformance strictness. Returns an instance of json_options. """ return self._options def clear_hook(self, hookname): """Unsets a hook callback, as previously set with set_hook().""" self.set_hook( hookname, None ) def clear_all_hooks(self): """Unsets all hook callbacks, as previously set with set_hook().""" for hookname in self.all_hook_names: self.clear_hook( hookname ) def set_hook(self, hookname, function): """Sets a user-defined callback function used during encoding or decoding. The 'hookname' argument must be a string containing the name of one of the available hooks, listed below. The 'function' argument must either be None, which disables the hook, or a callable function. Hooks do not stack, if you set a hook it will undo any previously set hook. Netsted values. When decoding JSON that has nested objects or arrays, the decoding hooks will be called once for every corresponding value, even if nested. Generally the decoding hooks will be called from the inner-most value outward, and then left to right. Skipping. Any hook function may raise a JSONSkipHook exception if it does not wish to handle the particular invocation. This will have the effect of skipping the hook for that particular value, as if the hook was net set. AVAILABLE HOOKS: * decode_string Called for every JSON string literal with the Python-equivalent string value as an argument. Expects to get a Python object in return. * decode_float: Called for every JSON number that looks like a float (has a "."). The string representation of the number is passed as an argument. Expects to get a Python object in return. * decode_number: Called for every JSON number. The string representation of the number is passed as an argument. Expects to get a Python object in return. NOTE: If the number looks like a float and the 'decode_float' hook is set, then this hook will not be called. * decode_array: Called for every JSON array. A Python list is passed as the argument, and expects to get a Python object back. NOTE: this hook will get called for every array, even for nested arrays. * decode_object: Called for every JSON object. A Python dictionary is passed as the argument, and expects to get a Python object back. NOTE: this hook will get called for every object, even for nested objects. * encode_value: Called for every Python object which is to be encoded into JSON. * encode_dict: Called for every Python dictionary or anything that looks like a dictionary. * encode_dict_key: Called for every dictionary key. * encode_sequence: Called for every Python sequence-like object that is not a dictionary or string. This includes lists and tuples. * encode_bytes: Called for every Python bytes or bytearray type; or for any memoryview with a byte ('B') item type. (Python 3 only) * encode_default: Called for any Python type which can not otherwise be converted into JSON, even after applying any other encoding hooks. """ if hookname in self.all_hook_names: att = hookname + '_hook' if function != None and not callable(function): raise ValueError("Hook %r must be None or a callable function" % hookname) setattr( self, att, function ) else: raise ValueError("Unknown hook name %r" % hookname) def has_hook(self, hook_name): if not hook_name or hook_name not in self.all_hook_names: return False hook = getattr( self, hook_name + '_hook' ) return callable(hook) def call_hook(self, hook_name, input_object, position=None, *args, **kwargs): """Wrapper function to invoke a user-supplied hook function. This will capture any exceptions raised by the hook and do something appropriate with it. """ import sys if hook_name not in self.all_hook_names: raise AttributeError("No such hook %r" % hook_name) hook = getattr( self, hook_name + '_hook' ) if not callable(hook): raise TypeError("Hook is not callable: %r" % (hook,)) try: rval = hook( input_object, *args, **kwargs ) except JSONSkipHook: raise # Do nothing except Exception, err: exc_info = sys.exc_info() if hook_name.startswith('encode_'): ex_class = JSONEncodeHookError else: ex_class = JSONDecodeHookError if isinstance(err, JSONStopProcessing): severity = 'fatal' else: severity = 'error' newerr = ex_class( hook_name, exc_info, input_object, *args, position=position, severity=severity ) # Simulate Python 3's: "raise X from Y" exception chaining newerr.__cause__ = err newerr.__traceback__ = exc_info[2] raise newerr return rval def isws(self, c): """Determines if the given character is considered as white space. Note that Javscript is much more permissive on what it considers to be whitespace than does JSON. Ref. ECMAScript section 7.2 """ if not self.options.unicode_whitespace: return c in ' \t\n\r' else: if not isinstance(c,unicode): c = unicode(c) if c in u' \t\n\r\f\v': return True import unicodedata return unicodedata.category(c) == 'Zs' def islineterm(self, c): """Determines if the given character is considered a line terminator. Ref. ECMAScript section 7.3 """ if c == '\r' or c == '\n': return True if c == u'\u2028' or c == u'\u2029': # unicodedata.category(c) in ['Zl', 'Zp'] return True return False def recover_parser(self, state): """Try to recover after a syntax error by locating the next "known" position.""" buf = state.buf buf.skipuntil( lambda c: c in ",:[]{}\"\';" or helpers.char_is_unicode_eol(c) ) stopchar = buf.peek() self.skipws(state) if buf.at_end: state.push_info("Could not recover parsing after previous error",position=buf.position) else: state.push_info("Recovering parsing after character %r" % stopchar, position=buf.position) return stopchar def decode_null(self, state): """Intermediate-level decoder for ECMAScript 'null' keyword. Takes a string and a starting index, and returns a Python None object and the index of the next unparsed character. """ buf = state.buf start_position = buf.position kw = buf.pop_identifier() if not kw or kw != 'null': state.push_error("Expected a 'null' keyword'", kw, position=start_position) else: state.stats.num_nulls += 1 return None def encode_undefined(self, state): """Produces the ECMAScript 'undefined' keyword.""" state.append('undefined') def encode_null(self, state): """Produces the JSON 'null' keyword.""" state.append('null') def decode_boolean(self, state): """Intermediate-level decode for JSON boolean literals. Takes a string and a starting index, and returns a Python bool (True or False) and the index of the next unparsed character. """ buf = state.buf start_position = buf.position kw = buf.pop_identifier() if not kw or kw not in ('true','false'): state.push_error("Expected a 'true' or 'false' keyword'", kw, position=start_position) else: state.stats.num_bools += 1 return (kw == 'true') def encode_boolean(self, bval, state): """Encodes the Python boolean into a JSON Boolean literal.""" state.append( 'true' if bool(bval) else 'false' ) def decode_number(self, state): """Intermediate-level decoder for JSON numeric literals. Takes a string and a starting index, and returns a Python suitable numeric type and the index of the next unparsed character. The returned numeric type can be either of a Python int, long, or float. In addition some special non-numbers may also be returned such as nan, inf, and neginf (technically which are Python floats, but have no numeric value.) Ref. ECMAScript section 8.5. """ buf = state.buf self.skipws(state) start_position = buf.position # Use external number parser hook if available if self.has_hook('decode_number') or self.has_hook('decode_float'): c = buf.peek() if c and c in '-+0123456789.': # First chars for a number-like value buf.save_position() nbr = buf.pop_while_in( '-+0123456789abcdefABCDEF' 'NaN' 'Infinity.' ) if '.' in nbr and self.has_hook('decode_float'): hook_name = 'decode_float' elif self.has_hook('decode_number'): hook_name = 'decode_number' else: hook_name = None if hook_name: try: val = self.call_hook( hook_name, nbr, position=start_position ) except JSONSkipHook: pass except JSONError, err: state.push_exception(err) val = undefined else: buf.clear_saved_position() return val # Hook didn't handle it, restore old position buf.restore_position() # Detect initial sign character(s) sign = +1 sign_count = 0 sign_saw_plus = False sign_saw_ws = False c = buf.peek() while c and c in '+-': if c == '-': sign = sign * -1 elif c == '+': sign_saw_plus = True sign_count += 1 buf.skip() if self.skipws_nocomments(state) > 0: sign_saw_ws = True c = buf.peek() if sign_count > 1 or sign_saw_plus: state.push_cond( self.options.all_numeric_signs, 'Numbers may only have a single "-" as a sign prefix', position=start_position) if sign_saw_ws: state.push_error('Spaces may not appear between a +/- number sign and the digits', position=start_position) # Check for ECMAScript symbolic non-numbers if not c: state.push_error('Missing numeric value after sign', position=start_position) self.recover_parser(state) self.stats.num_undefineds += 1 return undefined elif c.isalpha() or c in '_$': kw = buf.popwhile( lambda c: c.isalnum() or c in '_$' ) if kw == 'NaN': state.push_cond( self.options.non_numbers, 'NaN literals are not allowed in strict JSON', position=start_position) state.stats.num_nans += 1 return self.options.nan elif kw == 'Infinity': state.push_cond( self.options.non_numbers, 'Infinity literals are not allowed in strict JSON', position=start_position) state.stats.num_infinities += 1 if sign < 0: return self.options.neginf else: return self.options.inf else: state.push_error('Unknown numeric value keyword', kw, position=start_position) return undefined # Check for radix-prefixed numbers elif c == '0' and (buf.peek(1) in [u'x',u'X']): # ----- HEX NUMBERS 0x123 prefix = buf.popstr(2) digits = buf.popwhile( helpers.is_hex_digit ) state.push_cond( self.options.hex_numbers, 'Hexadecimal literals are not allowed in strict JSON', prefix+digits, position=start_position ) if len(digits)==0: state.push_error('Hexadecimal number is invalid', position=start_position) self.recover_parser(state) return undefined ival = helpers.decode_hex( digits ) state.update_integer_stats( ival, sign=sign, position=start_position ) n = state.options.make_int( ival, sign, number_format=NUMBER_FORMAT_HEX ) return n elif c == '0' and (buf.peek(1) in [u'o','O']): # ----- NEW-STYLE OCTAL NUMBERS 0o123 prefix = buf.popstr(2) digits = buf.popwhile( helpers.is_octal_digit ) state.push_cond( self.options.octal_numbers, "Octal literals are not allowed in strict JSON", prefix+digits, position=start_position ) if len(digits)==0: state.push_error("Octal number is invalid", position=start_position) self.recover_parser(state) return undefined ival = helpers.decode_octal( digits ) state.update_integer_stats( ival, sign=sign, position=start_position ) n = state.options.make_int( ival, sign, number_format=NUMBER_FORMAT_OCTAL ) return n elif c == '0' and (buf.peek(1) in [u'b','B']): # ----- NEW-STYLE BINARY NUMBERS 0b1101 prefix = buf.popstr(2) digits = buf.popwhile( helpers.is_binary_digit ) state.push_cond( self.options.binary_numbers, "Binary literals are not allowed in strict JSON", prefix+digits, position=start_position ) if len(digits)==0: state.push_error("Binary number is invalid", position=start_position) self.recover_parser(state) return undefined ival = helpers.decode_binary( digits ) state.update_integer_stats( ival, sign=sign, position=start_position ) n = state.options.make_int( ival, sign, number_format=NUMBER_FORMAT_BINARY ) return n else: # ----- DECIMAL OR LEGACY-OCTAL NUMBER. 123, 0123 # General syntax is: \d+[\.\d+][e[+-]?\d+] number = buf.popwhile( lambda c: c in '0123456789.+-eE' ) imax = len(number) if imax == 0: state.push_error('Missing numeric value', position=start_position) has_leading_zero = False units_digits = [] # digits making up whole number portion fraction_digits = [] # digits making up fractional portion exponent_digits = [] # digits making up exponent portion (excluding sign) esign = '+' # sign of exponent sigdigits = 0 # number of significant digits (approximate) saw_decimal_point = False saw_exponent = False # Break number into parts in a first pass...use a mini state machine in_part = 'units' for i, c in enumerate(number): if c == '.': if in_part != 'units': state.push_error('Bad number', number, position=start_position) self.recover_parser(state) return undefined in_part = 'fraction' saw_decimal_point = True elif c in 'eE': if in_part == 'exponent': state.push_error('Bad number', number, position=start_position) self.recover_parser(state) return undefined in_part = 'exponent' saw_exponent = True elif c in '+-': if in_part != 'exponent' or exponent_digits: state.push_error('Bad number', number, position=start_position) self.recover_parser(state) return undefined esign = c else: #digit if in_part == 'units': units_digits.append( c ) elif in_part == 'fraction': fraction_digits.append( c ) elif in_part == 'exponent': exponent_digits.append( c ) units_s = ''.join(units_digits) fraction_s = ''.join(fraction_digits) exponent_s = ''.join(exponent_digits) # Basic syntax rules checking is_integer = not (saw_decimal_point or saw_exponent) if not units_s and not fraction_s: state.push_error('Bad number', number, position=start_position) self.recover_parser(state) return undefined if saw_decimal_point and not fraction_s: state.push_cond( self.options.trailing_decimal_point, 'Bad number, decimal point must be followed by at least one digit', number, position=start_position) fraction_s = '0' if saw_exponent and not exponent_s: state.push_error('Bad number, exponent is missing', number, position=start_position) self.recover_parser(state) return undefined if not units_s: state.push_cond( self.options.initial_decimal_point, 'Bad number, decimal point must be preceded by at least one digit', number, position=start_position) units = '0' elif len(units_s) > 1 and units_s[0] == '0': has_leading_zero = True if self.options.is_forbid_leading_zeros: state.push_cond( self.options.leading_zeros, 'Numbers may not have extra leading zeros', number, position=start_position) elif self.options.is_warn_leading_zeros: state.push_cond( self.options.leading_zeros, 'Numbers may not have leading zeros; interpreting as %s' \ % self.options.leading_zero_radix_as_word, number, position=start_position) # Estimate number of significant digits sigdigits = len( (units_s + fraction_s).replace('0',' ').strip() ) # Handle legacy octal integers. if has_leading_zero and is_integer and self.options.leading_zero_radix == 8: # ----- LEGACY-OCTAL 0123 try: ival = helpers.decode_octal( units_s ) except ValueError: state.push_error('Bad number, not a valid octal value', number, position=start_position) self.recover_parser(state) return self.options.nan # undefined state.update_integer_stats( ival, sign=sign, position=start_position ) n = state.options.make_int( ival, sign, number_format=NUMBER_FORMAT_LEGACYOCTAL ) return n # Determine the exponential part if exponent_s: try: exponent = int(exponent_s) except ValueError: state.push_error('Bad number, bad exponent', number, position=start_position) self.recover_parser(state) return undefined if esign == '-': exponent = - exponent else: exponent = 0 # Try to make an int/long first. if not saw_decimal_point and exponent >= 0: # ----- A DECIMAL INTEGER ival = int(units_s) if exponent != 0: ival *= 10**exponent state.update_integer_stats( ival, sign=sign, position=start_position ) n = state.options.make_int( ival, sign ) else: # ----- A FLOATING-POINT NUMBER try: if exponent < float_minexp or exponent > float_maxexp or sigdigits > float_sigdigits: n = state.options.make_decimal( number, sign ) else: n = state.options.make_float( number, sign ) except ValueError as err: state.push_error('Bad number, %s' % err.message, number, position=start_position) n = undefined else: state.update_float_stats( n, sign=sign, position=start_position ) return n def encode_number(self, n, state): """Encodes a Python numeric type into a JSON numeric literal. The special non-numeric values of float('nan'), float('inf') and float('-inf') are translated into appropriate JSON literals. Note that Python complex types are not handled, as there is no ECMAScript equivalent type. """ if isinstance(n, complex): if n.imag: raise JSONEncodeError('Can not encode a complex number that has a non-zero imaginary part',n) n = n.real if isinstance(n, json_int): state.append( n.json_format() ) return if isinstance(n, (int,long)): state.append( str(n) ) return if decimal and isinstance(n, decimal.Decimal): if n.is_nan(): # Could be 'NaN' or 'sNaN' state.append( 'NaN' ) elif n.is_infinite(): if n.is_signed(): state.append( '-Infinity' ) else: state.append( 'Infinity' ) else: s = str(n).lower() if 'e' not in s and '.' not in s: s = s + '.0' state.append( s ) return global nan, inf, neginf if n is nan: state.append( 'NaN' ) elif n is inf: state.append( 'Infinity' ) elif n is neginf: state.append( '-Infinity' ) elif isinstance(n, float): # Check for non-numbers. # In python nan == inf == -inf, so must use repr() to distinguish reprn = repr(n).lower() if ('inf' in reprn and '-' in reprn) or n == neginf: state.append( '-Infinity' ) elif 'inf' in reprn or n is inf: state.append( 'Infinity' ) elif 'nan' in reprn or n is nan: state.append( 'NaN' ) else: # A normal float. state.append( repr(n) ) else: raise TypeError('encode_number expected an integral, float, or decimal number type',type(n)) def decode_string(self, state): """Intermediate-level decoder for JSON string literals. Takes a string and a starting index, and returns a Python string (or unicode string) and the index of the next unparsed character. """ buf = state.buf self.skipws(state) quote = buf.peek() if quote == '"': pass elif quote == "'": state.push_cond( self.options.single_quoted_strings, 'String literals must use double quotation marks in strict JSON' ) else: state.push_error('String literal must be properly quoted') return undefined string_position = buf.position buf.skip() if self.options.is_forbid_js_string_escapes: escapes = self._escapes_json else: escapes = self._escapes_js ccallowed = not self.options.is_forbid_control_char_in_string chunks = [] _append = chunks.append # Used to track the last seen high-surrogate character high_surrogate = None highsur_position = None # Used to track if errors occured so we don't keep reporting multiples had_lineterm_error = False # Start looping character by character until the final quotation mark saw_final_quote = False should_stop = False while not saw_final_quote and not should_stop: if buf.at_end: state.push_error("String literal is not terminated", outer_position=string_position, context='String') break c = buf.peek() # Make sure a high surrogate is immediately followed by a low surrogate if high_surrogate: if 0xdc00 <= ord(c) <= 0xdfff: low_surrogate = buf.pop() try: uc = helpers.surrogate_pair_as_unicode( high_surrogate, low_surrogate ) except ValueError as err: state.push_error( 'Illegal Unicode surrogate pair', (high_surrogate, low_surrogate), position=highsur_position, outer_position=string_position, context='String') should_stop = state.should_stop uc = u'\ufffd' # replacement char _append( uc ) high_surrogate = None highsur_position = None continue # ==== NEXT CHAR elif buf.peekstr(2) != '\\u': state.push_error('High unicode surrogate must be followed by a low surrogate', position=highsur_position, outer_position=string_position, context='String') should_stop = state.should_stop _append( u'\ufffd' ) # replacement char high_surrogate = None highsur_position = None if c == quote: buf.skip() # skip over closing quote saw_final_quote = True break elif c == '\\': # Escaped character escape_position = buf.position buf.skip() # skip over backslash c = buf.peek() if not c: state.push_error('Escape in string literal is incomplete', position=escape_position, outer_position=string_position, context='String') should_stop = state.should_stop break elif helpers.is_octal_digit(c): # Handle octal escape codes first so special \0 doesn't kick in yet. # Follow Annex B.1.2 of ECMAScript standard. if '0' <= c <= '3': maxdigits = 3 else: maxdigits = 2 digits = buf.popwhile( helpers.is_octal_digit, maxchars=maxdigits ) n = helpers.decode_octal(digits) if n == 0: state.push_cond( self.options.zero_byte, 'Zero-byte character (U+0000) in string may not be universally safe', "\\"+digits, position=escape_position, outer_position=string_position, context='String') else: # n != 0 state.push_cond( self.options.octal_numbers, "JSON does not allow octal character escapes other than \"\\0\"", "\\"+digits, position=escape_position, outer_position=string_position, context='String') should_stop = state.should_stop if n < 128: _append( chr(n) ) else: _append( helpers.safe_unichr(n) ) elif escapes.has_key(c): buf.skip() _append( escapes[c] ) elif c == 'u' or c == 'x': buf.skip() esc_opener = '\\' + c esc_closer = '' if c == 'u': if buf.peek() == '{': buf.skip() esc_opener += '{' esc_closer = '}' maxdigits = None state.push_cond( self.options.extended_unicode_escapes, "JSON strings do not allow \\u{...} escapes", position=escape_position, outer_position=string_position, context='String') else: maxdigits = 4 else: # c== 'x' state.push_cond( self.options.js_string_escapes, "JSON strings may not use the \\x hex-escape", position=escape_position, outer_position=string_position, context='String') should_stop = state.should_stop maxdigits = 2 digits = buf.popwhile( helpers.is_hex_digit, maxchars=maxdigits ) if esc_closer: if buf.peek() != esc_closer: state.push_error( "Unicode escape sequence is missing closing \'%s\'" % esc_closer, esc_opener+digits, position=escape_position, outer_position=string_position, context='String') should_stop = state.should_stop else: buf.skip() esc_sequence = esc_opener + digits + esc_closer if not digits: state.push_error('numeric character escape sequence is truncated', esc_sequence, position=escape_position, outer_position=string_position, context='String') should_stop = state.should_stop codepoint = 0xfffd # replacement char else: if maxdigits and len(digits) != maxdigits: state.push_error('escape sequence has too few hexadecimal digits', esc_sequence, position=escape_position, outer_position=string_position, context='String') codepoint = helpers.decode_hex( digits ) if codepoint > 0x10FFFF: state.push_error( 'Unicode codepoint is beyond U+10FFFF', esc_opener+digits+esc_closer, position=escape_position, outer_position=string_position, context='String') codepoint = 0xfffd # replacement char if high_surrogate: # Decode surrogate pair and clear high surrogate low_surrogate = unichr(codepoint) try: uc = helpers.surrogate_pair_as_unicode( high_surrogate, low_surrogate ) except ValueError as err: state.push_error( 'Illegal Unicode surrogate pair', (high_surrogate, low_surrogate), position=highsur_position, outer_position=string_position, context='String') should_stop = state.should_stop uc = u'\ufffd' # replacement char _append( uc ) high_surrogate = None highsur_position = None elif codepoint < 128: # ASCII chars always go in as a str if codepoint==0: state.push_cond( self.options.zero_byte, 'Zero-byte character (U+0000) in string may not be universally safe', position=escape_position, outer_position=string_position, context='String') should_stop = state.should_stop _append( chr(codepoint) ) elif 0xd800 <= codepoint <= 0xdbff: # high surrogate high_surrogate = unichr(codepoint) # remember until we get to the low surrogate highsur_position = escape_position.copy() elif 0xdc00 <= codepoint <= 0xdfff: # low surrogate state.push_error('Low unicode surrogate must be proceeded by a high surrogate', position=escape_position, outer_position=string_position, context='String') should_stop = state.should_stop _append( u'\ufffd' ) # replacement char else: # Other chars go in as a unicode char _append( helpers.safe_unichr(codepoint) ) else: # Unknown escape sequence state.push_cond( self.options.nonescape_characters, 'String escape code is not allowed in strict JSON', '\\'+c, position=escape_position, outer_position=string_position, context='String') should_stop = state.should_stop _append( c ) buf.skip() elif ord(c) <= 0x1f: # A control character if ord(c) == 0: state.push_cond( self.options.zero_byte, 'Zero-byte character (U+0000) in string may not be universally safe', position=buf.position, outer_position=string_position, context='String') should_stop = state.should_stop if self.islineterm(c): if not had_lineterm_error: state.push_error('Line terminator characters must be escaped inside string literals', 'U+%04X'%ord(c), position=buf.position, outer_position=string_position, context='String') should_stop = state.should_stop had_lineterm_error = True _append( c ) buf.skip() elif ccallowed: _append( c ) buf.skip() else: state.push_error('Control characters must be escaped inside JSON string literals', 'U+%04X'%ord(c), position=buf.position, outer_position=string_position, context='String') should_stop = state.should_stop buf.skip() elif 0xd800 <= ord(c) <= 0xdbff: # a raw high surrogate high_surrogate = buf.pop() # remember until we get to the low surrogate highsur_position = buf.position.copy() else: # A normal character; not an escape sequence or end-quote. # Find a whole sequence of "safe" characters so we can append them # all at once rather than one a time, for speed. chunk = buf.popwhile( lambda c: c not in helpers.unsafe_string_chars and c != quote ) if not chunk: _append( c ) buf.skip() else: _append( chunk ) # Check proper string termination if high_surrogate: state.push_error('High unicode surrogate must be followed by a low surrogate', position=highsur_position, outer_position=string_position, context='String') _append( u'\ufffd' ) # replacement char high_surrogate = None highsur_position = None if not saw_final_quote: state.push_error('String literal is not terminated with a quotation mark', position=buf.position, outer_position=string_position, context='String') if state.should_stop: return undefined # Compose the python string and update stats s = ''.join( chunks ) state.update_string_stats( s, position=string_position ) # Call string hook if self.has_hook('decode_string'): try: s = self.call_hook( 'decode_string', s, position=string_position ) except JSONSkipHook: pass except JSONError, err: state.push_exception(err) s = undefined return s def encode_string(self, s, state, quote=True): """Encodes a Python string into a JSON string literal. """ # Must handle instances of UserString specially in order to be # able to use ord() on it's simulated "characters". Also # convert Python2 'str' types to unicode strings first. import unicodedata, sys import UserString py2strenc = self.options.py2str_encoding if isinstance(s, UserString.UserString): def tochar(c): c2 = c.data if py2strenc and not isinstance(c2,unicode): return c2.decode( py2strenc ) else: return c2 elif py2strenc and not isinstance(s,unicode): s = s.decode( py2strenc ) tochar = None else: # Could use "lambda c:c", but that is too slow. So we set to None # and use an explicit if test inside the loop. tochar = None chunks = [] if quote: chunks.append('"') revesc = self._rev_escapes optrevesc = self._optional_rev_escapes asciiencodable = self._asciiencodable always_escape = state.options.always_escape_chars encunicode = state.escape_unicode_test i = 0 imax = len(s) while i < imax: if tochar: c = tochar(s[i]) else: c = s[i] cord = ord(c) if cord < 256 and asciiencodable[cord] and isinstance(encunicode, bool) \ and not (always_escape and c in always_escape): # Contiguous runs of plain old printable ASCII can be copied # directly to the JSON output without worry (unless the user # has supplied a custom is-encodable function). j = i i += 1 while i < imax: if tochar: c = tochar(s[i]) else: c = s[i] cord = ord(c) if cord < 256 and asciiencodable[cord] \ and not (always_escape and c in always_escape): i += 1 else: break chunks.append( unicode(s[j:i]) ) elif revesc.has_key(c): # Has a shortcut escape sequence, like "\n" chunks.append(revesc[c]) i += 1 elif cord <= 0x1F: # Always unicode escape ASCII-control characters chunks.append(r'\u%04x' % cord) i += 1 elif 0xD800 <= cord <= 0xDFFF: # A raw surrogate character! # This should ONLY happen in "narrow" Python builds # where (sys.maxunicode == 65535) as Python itself # uses UTF-16. But for "wide" Python builds, a raw # surrogate should never happen. handled_raw_surrogates = False if sys.maxunicode == 0xFFFF and 0xD800 <= cord <= 0xDBFF and (i+1) < imax: # In a NARROW Python, output surrogate pair as-is hsurrogate = cord i += 1 if tochar: c = tochar(s[i]) else: c = s[i] cord = ord(c) i += 1 if 0xDC00 <= cord <= 0xDFFF: lsurrogate = cord chunks.append(r'\u%04x\u%04x' % (hsurrogate,lsurrogate)) handled_raw_surrogates = True if not handled_raw_surrogates: cname = 'U+%04X' % cord raise JSONEncodeError('can not include or escape a Unicode surrogate character',cname) elif cord <= 0xFFFF: # Other BMP Unicode character if always_escape and c in always_escape: doesc = True elif unicodedata.category( c ) in ['Cc','Cf','Zl','Zp']: doesc = True elif callable(encunicode): doesc = encunicode( c ) else: doesc = encunicode if doesc: if optrevesc.has_key(c): chunks.append(optrevesc[c]) else: chunks.append(r'\u%04x' % cord) else: chunks.append( c ) i += 1 else: # ord(c) >= 0x10000 # Non-BMP Unicode if always_escape and c in always_escape: doesc = True elif unicodedata.category( c ) in ['Cc','Cf','Zl','Zp']: doesc = True elif callable(encunicode): doesc = encunicode( c ) else: doesc = encunicode if doesc: for surrogate in helpers.unicode_as_surrogate_pair(c): chunks.append(r'\u%04x' % ord(surrogate)) else: chunks.append( c ) i += 1 if quote: chunks.append('"') state.append( ''.join( chunks ) ) def decode_identifier(self, state, identifier_as_string=False): """Decodes an identifier/keyword. """ buf = state.buf self.skipws(state) start_position = buf.position obj = None kw = buf.pop_identifier() if not kw: state.push_error("Expected an identifier", position=start_position) elif kw == 'null': obj = None state.stats.num_nulls += 1 elif kw == 'true': obj = True state.stats.num_bools += 1 elif kw == 'false': obj = False state.stats.num_bools += 1 elif kw == 'undefined': state.push_cond( self.options.undefined_values, "Strict JSON does not allow the 'undefined' keyword", kw, position=start_position) obj = undefined state.stats.num_undefineds += 1 elif kw == 'NaN' or kw == 'Infinity': state.push_cond( self.options.non_numbers, "%s literals are not allowed in strict JSON" % kw, kw, position=start_position) if self.has_hook('decode_float'): try: val = self.call_hook( 'decode_float', kw, position=start_position ) except JSONSkipHook: pass except JSONError, err: state.push_exception(err) return undefined else: return val elif self.has_hook('decode_number'): try: val = self.call_hook( 'decode_number', kw, position=start_position ) except JSONSkipHook: pass except JSONError, err: state.push_exception(err) return undefined else: return val if kw == 'NaN': state.stats.num_nans += 1 obj = state.options.nan else: state.stats.num_infinities += 1 obj = state.options.inf else: # Convert unknown identifiers into strings if identifier_as_string: if kw in helpers.javascript_reserved_words: state.push_warning( "Identifier is a JavaScript reserved word", kw, position=start_position) state.push_cond( self.options.identifier_keys, "JSON does not allow identifiers to be used as strings", kw, position=start_position) state.stats.num_identifiers += 1 obj = self.decode_javascript_identifier( kw ) else: state.push_error("Unknown identifier", kw, position=start_position) obj = undefined state.stats.num_identifiers += 1 return obj def skip_comment(self, state): """Skips an ECMAScript comment, either // or /* style. The contents of the comment are returned as a string, as well as the index of the character immediately after the comment. """ buf = state.buf uniws = self.options.unicode_whitespace s = buf.peekstr(2) if s != '//' and s != '/*': return None state.push_cond( self.options.comments, 'Comments are not allowed in strict JSON' ) start_position = buf.position buf.skip(2) multiline = (s == '/*') saw_close = False while not buf.at_end: if multiline: if buf.peekstr(2) == '*/': buf.skip(2) saw_close = True break elif buf.peekstr(2) == '/*': state.push_error('Multiline /* */ comments may not nest', outer_position=start_position, context='Comment') else: if buf.at_eol( uniws ): buf.skip_to_next_line( uniws ) saw_close = True break buf.pop() if not saw_close and multiline: state.push_error('Comment was never terminated', outer_position=start_position, context='Comment') state.stats.num_comments += 1 def skipws_nocomments(self, state): """Skips whitespace (will not allow comments). """ return state.buf.skipws( not self.options.is_forbid_unicode_whitespace ) def skipws(self, state): """Skips all whitespace, including comments and unicode whitespace Takes a string and a starting index, and returns the index of the next non-whitespace character. If the 'skip_comments' behavior is True and not running in strict JSON mode, then comments will be skipped over just like whitespace. """ buf = state.buf uniws = not self.options.unicode_whitespace while not buf.at_end: c = buf.peekstr(2) if c == '/*' or c == '//': cmt = self.skip_comment( state ) elif buf.at_ws( uniws ): buf.skipws( uniws ) else: break def decode_composite(self, state): """Intermediate-level JSON decoder for composite literal types (array and object). """ if state.should_stop: return None buf = state.buf self.skipws(state) opener = buf.peek() if opener not in '{[': state.push_error('Composite data must start with "[" or "{"') return None start_position = buf.position buf.skip() if opener == '[': isdict = False closer = ']' obj = [] else: isdict = True closer = '}' if state.options.sort_keys == SORT_PRESERVE and _OrderedDict: obj = _OrderedDict() else: obj = {} num_items = 0 self.skipws(state) c = buf.peek() if c == closer: # empty composite buf.skip() done = True else: saw_value = False # set to false at beginning and after commas done = False while not done and not buf.at_end and not state.should_stop: self.skipws(state) c = buf.peek() if c == '': break # will report error futher down because done==False elif c == ',': if not saw_value: # no preceeding value, an elided (omitted) element if isdict: state.push_error('Can not omit elements of an object (dictionary)', outer_position=start_position, context='Object') else: state.push_cond( self.options.omitted_array_elements, 'Can not omit elements of an array (list)', outer_position=start_position, context='Array') obj.append( undefined ) if state.stats: state.stats.num_undefineds += 1 buf.skip() # skip over comma saw_value = False continue elif c == closer: if not saw_value: if isdict: state.push_cond( self.options.trailing_comma, 'Strict JSON does not allow a final comma in an object (dictionary) literal', outer_position=start_position, context='Object') else: state.push_cond( self.options.trailing_comma, 'Strict JSON does not allow a final comma in an array (list) literal', outer_position=start_position, context='Array') buf.skip() # skip over closer done = True break elif c in ']}': if isdict: cdesc='Object' else: cdesc='Array' state.push_error("Expected a '%c' but saw '%c'" % (closer,c), outer_position=start_position, context=cdesc) done = True break if state.should_stop: break # Decode the item/value value_position = buf.position if isdict: val = self.decodeobj(state, identifier_as_string=True) else: val = self.decodeobj(state, identifier_as_string=False) if val is syntax_error: recover_c = self.recover_parser(state) if recover_c not in ':': continue if state.should_stop: break if saw_value: # Two values without a separating comma if isdict: cdesc='Object' else: cdesc='Array' state.push_error('Values must be separated by a comma', position=value_position, outer_position=start_position, context=cdesc) saw_value = True self.skipws(state) if state.should_stop: break if isdict: skip_item = False key = val # Ref 11.1.5 key_position = value_position if not helpers.isstringtype(key): if helpers.isnumbertype(key): state.push_cond( self.options.nonstring_keys, 'JSON only permits string literals as object properties (keys)', position=key_position, outer_position=start_position, context='Object') else: state.push_error('Object properties (keys) must be string literals, numbers, or identifiers', position=key_position, outer_position=start_position, context='Object') skip_item = True c = buf.peek() if c != ':': state.push_error('Missing value for object property, expected ":"', position=value_position, outer_position=start_position, context='Object') buf.skip() # skip over colon self.skipws(state) rval = self.decodeobj(state) self.skipws(state) if not skip_item: if key in obj: state.push_cond( self.options.duplicate_keys, 'Object contains duplicate key', key, position=key_position, outer_position=start_position, context='Object') if key == '': state.push_cond( self.options.non_portable, 'Using an empty string "" as an object key may not be portable', position=key_position, outer_position=start_position, context='Object') obj[ key ] = rval num_items += 1 else: # islist obj.append( val ) num_items += 1 # end while if state.stats: if isdict: state.stats.max_items_in_object = max(state.stats.max_items_in_object, num_items) else: state.stats.max_items_in_array = max(state.stats.max_items_in_array, num_items) if state.should_stop: return obj # Make sure composite value is properly terminated if not done: if isdict: state.push_error('Object literal (dictionary) is not terminated', outer_position=start_position, context='Object') else: state.push_error('Array literal (list) is not terminated', outer_position=start_position, context='Array') # Update stats and run hooks if isdict: state.stats.num_objects += 1 if self.has_hook('decode_object'): try: obj = self.call_hook( 'decode_object', obj, position=start_position ) except JSONSkipHook: pass except JSONError, err: state.push_exception(err) obj = undefined else: state.stats.num_arrays += 1 if self.has_hook('decode_array'): try: obj = self.call_hook( 'decode_array', obj, position=start_position ) except JSONSkipHook: pass except JSONError, err: state.push_exception(err) obj = undefined return obj def decode_javascript_identifier(self, name): """Convert a JavaScript identifier into a Python string object. This method can be overriden by a subclass to redefine how JavaScript identifiers are turned into Python objects. By default this just converts them into strings. """ return name def decodeobj(self, state, identifier_as_string=False, at_document_start=False): """Intermediate-level JSON decoder. Takes a string and a starting index, and returns a two-tuple consting of a Python object and the index of the next unparsed character. If there is no value at all (empty string, etc), then None is returned instead of a tuple. """ buf = state.buf obj = None self.skipws(state) if buf.at_end: state.push_error('Unexpected end of input') c = buf.peek() if c in '{[': state.cur_depth += 1 try: state.update_depth_stats() obj = self.decode_composite(state) finally: state.cur_depth -= 1 else: if at_document_start: state.push_cond( self.options.any_type_at_start, 'JSON document must start with an object or array type only' ) if c in self._string_quotes: obj = self.decode_string(state) elif c.isdigit() or c in '.+-': obj = self.decode_number(state) elif c.isalpha() or c in'_$': obj = self.decode_identifier(state, identifier_as_string=identifier_as_string) else: state.push_error('Can not decode value starting with character %r' % c) buf.skip() self.recover_parser(state) obj = syntax_error return obj def decode(self, txt, encoding=None, return_errors=False, return_stats=False): """Decodes a JSON-encoded string into a Python object. The 'return_errors' parameter controls what happens if the input JSON has errors in it. * False: the first error will be raised as a Python exception. If there are no errors then the corresponding Python object will be returned. * True: the return value is always a 2-tuple: (object, error_list) """ import sys state = decode_state( options=self.options ) # Prepare the input state.set_input( txt, encoding=encoding ) # Do the decoding if not state.has_errors: self.__sanity_check_start( state ) if not state.has_errors: try: self._do_decode( state ) # DECODE! except JSONException, err: state.push_exception( err ) except Exception, err: # Mainly here to catch maximum recursion depth exceeded e2 = sys.exc_info() raise newerr = JSONDecodeError("An unexpected failure occured", severity='fatal', position=state.buf.position) newerr.__cause__ = err newerr.__traceback__ = e2[2] state.push_exception( newerr ) if return_stats and state.buf: state.stats.num_excess_whitespace = state.buf.num_ws_skipped state.stats.total_chars = state.buf.position.char_position # Handle the errors result_type = _namedtuple('json_results',['object','errors','stats']) if return_errors: if return_stats: return result_type(state.obj, state.errors, state.stats) else: return result_type(state.obj, state.errors, None) else: # Don't cause warnings to raise an error errors = [err for err in state.errors if err.severity in ('fatal','error')] if errors: raise errors[0] if return_stats: return result_type(state.obj, None, state.stats) else: return state.obj def __sanity_check_start(self, state): """Check that the document seems sane by looking at the first couple characters. Check that the decoding seems sane. Per RFC 4627 section 3: "Since the first two characters of a JSON text will always be ASCII characters [RFC0020], ..." [WAS removed from RFC 7158, but still valid via the grammar.] This check is probably not necessary, but it allows us to raise a suitably descriptive error rather than an obscure syntax error later on. Note that the RFC requirements of two ASCII characters seems to be an incorrect statement as a JSON string literal may have as it's first character any unicode character. Thus the first two characters will always be ASCII, unless the first character is a quotation mark. And in non-strict mode we can also have a few other characters too. """ is_sane = True unitxt = state.buf.peekstr(2) if len(unitxt) >= 2: first, second = unitxt[:2] if first in self._string_quotes: pass # second can be anything inside string literal else: if ((ord(first) < 0x20 or ord(first) > 0x7f) or \ (ord(second) < 0x20 or ord(second) > 0x7f)) and \ (not self.isws(first) and not self.isws(second)): # Found non-printable ascii, must check unicode # categories to see if the character is legal. # Only whitespace, line and paragraph separators, # and format control chars are legal here. import unicodedata catfirst = unicodedata.category(unicode(first)) catsecond = unicodedata.category(unicode(second)) if catfirst not in ('Zs','Zl','Zp','Cf') or \ catsecond not in ('Zs','Zl','Zp','Cf'): state.push_fatal( 'The input is gibberish, is the Unicode encoding correct?' ) return is_sane def _do_decode(self, state): """This is the internal function that does the JSON decoding. Called by the decode() method, after it has performed any Unicode decoding, etc. """ buf = state.buf self.skipws(state) if buf.at_end: state.push_error('No value to decode') else: if state.options.decimal_context: dec_ctx = decimal.localcontext( state.options.decimal_context ) else: dec_ctx = _dummy_context_manager with dec_ctx: state.obj = self.decodeobj(state, at_document_start=True ) if not state.should_stop: # Make sure there's nothing at the end self.skipws(state) if not buf.at_end: state.push_error('Unexpected text after end of JSON value') def _classify_for_encoding( self, obj ): import datetime c = 'other' if obj is None: c = 'null' elif obj is undefined: c = 'undefined' elif isinstance(obj,bool): c = 'bool' elif isinstance(obj, (int,long,float,complex)) or\ (decimal and isinstance(obj, decimal.Decimal)): c = 'number' elif isinstance(obj, basestring) or helpers.isstringtype(obj): c = 'string' else: if isinstance(obj,dict): c = 'dict' elif isinstance(obj,tuple) and hasattr(obj,'_asdict') and callable(obj._asdict): # Have a named tuple enc_nt = self.options.encode_namedtuple_as_object if enc_nt and (enc_nt is True or (callable(enc_nt) and enc_nt(obj))): c = 'namedtuple' else: c = 'sequence' elif isinstance(obj, (list,tuple,set,frozenset)): c = 'sequence' elif hasattr(obj,'iterkeys') or (hasattr(obj,'__getitem__') and hasattr(obj,'keys')): c = 'dict' elif isinstance(obj, datetime.datetime): # Check datetime before date because it is a subclass! c = 'datetime' elif isinstance(obj, datetime.date): c = 'date' elif isinstance(obj, datetime.time): c = 'time' elif isinstance(obj, datetime.timedelta): c = 'timedelta' elif _py_major >= 3 and isinstance(obj,(bytes,bytearray)): c = 'bytes' elif _py_major >= 3 and isinstance(obj,memoryview): c = 'memoryview' elif _enum is not None and isinstance(obj,_enum): c = 'enum' else: c = 'other' return c def encode(self, obj, encoding=None ): """Encodes the Python object into a JSON string representation. This method will first attempt to encode an object by seeing if it has a json_equivalent() method. If so than it will call that method and then recursively attempt to encode the object resulting from that call. Next it will attempt to determine if the object is a native type or acts like a squence or dictionary. If so it will encode that object directly. Finally, if no other strategy for encoding the object of that type exists, it will call the encode_default() method. That method currently raises an error, but it could be overridden by subclasses to provide a hook for extending the types which can be encoded. """ import sys, codecs # Make a fresh encoding state state = encode_state( self.options ) # Find the codec to use. CodecInfo will be in 'cdk' and name in 'encoding'. # # Also set the state's 'escape_unicode_test' property which is used to # determine what characters to \u-escape. if encoding is None: cdk = None elif isinstance(encoding, codecs.CodecInfo): cdk = encoding encoding = cdk.name else: cdk = helpers.lookup_codec( encoding ) if not cdk: raise JSONEncodeError('no codec available for character encoding',encoding) if self.options.escape_unicode and callable(self.options.escape_unicode): # User-supplied repertoire test function state.escape_unicode_test = self.options.escape_unicode else: if self.options.escape_unicode==True or not cdk or cdk.name.lower() == 'ascii': # ASCII, ISO8859-1, or and Unknown codec -- \u escape anything not ASCII state.escape_unicode_test = lambda c: ord(c) >= 0x80 elif cdk.name == 'iso8859-1': state.escape_unicode_test = lambda c: ord(c) >= 0x100 elif cdk and cdk.name.lower().startswith('utf'): # All UTF-x encodings can do the whole Unicode repertoire, so # do nothing special. state.escape_unicode_test = False else: # An unusual codec. We need to test every character # to see if it is in the codec's repertoire to determine # if we should \u escape that character. enc_func = cdk.encode def escape_unicode_hardway( c ): try: enc_func( c ) except UnicodeEncodeError: return True else: return False state.escape_unicode_test = escape_unicode_hardway # Make sure the encoding is not degenerate: it can encode the minimal # number of characters needed by the JSON syntax rules. if encoding is not None: try: output, nchars = cdk.encode( JSON.json_syntax_characters ) except UnicodeError, err: raise JSONEncodeError("Output encoding %s is not sufficient to encode JSON" % cdk.name) # Do the JSON encoding! self._do_encode( obj, state ) if not self.options.encode_compactly: state.append('\n') unitxt = state.combine() # Do the final Unicode encoding if encoding is None: output = unitxt else: try: output, nchars = cdk.encode( unitxt ) except UnicodeEncodeError, err: # Re-raise as a JSONDecodeError e2 = sys.exc_info() newerr = JSONEncodeError("a Unicode encoding error occurred") # Simulate Python 3's: "raise X from Y" exception chaining newerr.__cause__ = err newerr.__traceback__ = e2[2] raise newerr return output def _do_encode(self, obj, state, quoteStrings=True): """Internal encode function.""" obj_classification = self._classify_for_encoding( obj ) if self.has_hook('encode_value'): orig_obj = obj try: obj = self.call_hook( 'encode_value', obj ) except JSONSkipHook: pass if obj is not orig_obj: prev_cls = obj_classification obj_classification = self._classify_for_encoding( obj ) if obj_classification != prev_cls: # Got a different type of object, re-encode again self._do_encode( obj, state ) return if hasattr(obj, 'json_equivalent'): success = self.encode_equivalent( obj, state ) if success: return if obj_classification == 'null': self.encode_null( state ) elif obj_classification == 'undefined': if not self.options.is_forbid_undefined_values: self.encode_undefined( state ) else: raise JSONEncodeError('strict JSON does not permit "undefined" values') elif obj_classification == 'bool': self.encode_boolean( obj, state ) elif obj_classification == 'number': try: self.encode_number( obj, state ) except JSONEncodeError, err1: # Bad number, probably a complex with non-zero imaginary part. # Let the default encoders take a shot at encoding. try: self.try_encode_default(obj, state) except Exception, err2: # Default handlers couldn't deal with it, re-raise original exception. raise err1 elif obj_classification == 'string': self.encode_string( obj, state, quoteStrings) elif obj_classification == 'enum': # Python 3.4 enum.Enum self.encode_enum( obj, state ) elif obj_classification == 'datetime': # Python datetime.datetime self.encode_datetime( obj, state ) elif obj_classification == 'date': # Python datetime.date self.encode_date( obj, state ) elif obj_classification == 'time': # Python datetime.time self.encode_time( obj, state ) elif obj_classification == 'timedelta': # Python datetime.time self.encode_timedelta( obj, state ) else: # Anything left is probably composite, or an unconvertable type. self.encode_composite( obj, state ) def encode_enum(self, val, state): """Encode a Python Enum value into JSON.""" eas = self.options.encode_enum_as if eas == 'qname': self.encode_string( str(obj), state ) elif eas == 'value': self._do_encode( obj.value, state ) else: # eas == 'name' self.encode_string( obj.name, state ) def encode_date(self, dt, state): fmt = self.options.date_format if not fmt or fmt == 'iso': fmt = '%Y-%m-%d' self.encode_string( dt.strftime(fmt), state ) def encode_datetime(self, dt, state): fmt = self.options.datetime_format is_iso = not fmt or fmt == 'iso' if is_iso: if dt.microsecond == 0: fmt = '%Y-%m-%dT%H:%M:%S%z' else: fmt = '%Y-%m-%dT%H:%M:%S.%f%z' s = dt.strftime(fmt) if is_iso and s.endswith('-00:00') or s.endswith('+00:00'): s = s[:-6] + 'Z' # Change UTC to use 'Z' notation self.encode_string( s, state ) def encode_time(self, t, state): fmt = self.options.datetime_format is_iso = not fmt or fmt == 'iso' if is_iso: if dt.microsecond == 0: fmt = 'T%H:%M:%S%z' else: fmt = 'T%H:%M:%S.%f%z' s = t.strftime(fmt) if is_iso and s.endswith('-00:00') or s.endswith('+00:00'): s = s[:-6] + 'Z' # Change UTC to use 'Z' notation self.encode_string( s, state ) def encode_timedelta(self, td, state): fmt = self.options.timedelta_format if not fmt or fmt == 'iso': s = helpers.format_timedelta_iso( td ) elif fmt == 'hms': s = str(td) else: raise ValueError("Unknown timedelta_format %r" % fmt) self.encode_string( s, state ) def encode_composite(self, obj, state, obj_classification=None): """Encodes just composite objects: dictionaries, lists, or sequences. Basically handles any python type for which iter() can create an iterator object. This method is not intended to be called directly. Use the encode() method instead. """ import sys if not obj_classification: obj_classification = self._classify_for_encoding(obj) # Convert namedtuples to dictionaries if obj_classification == 'namedtuple': obj = obj._asdict() obj_classification = 'dict' # Convert 'unsigned byte' memory views into plain bytes if obj_classification == 'memoryview' and obj.format == 'B': obj = obj.tobytes() obj_classification = 'bytes' # Run hooks hook_name = None if obj_classification == 'dict': hook_name = 'encode_dict' elif obj_classification == 'sequence': hook_name = 'encode_sequence' elif obj_classification == 'bytes': hook_name = 'encode_bytes' if self.has_hook(hook_name): try: new_obj = self.call_hook( hook_name, obj ) except JSONSkipHook: pass else: if new_obj is not obj: obj = new_obj prev_cls = obj_classification obj_classification = self._classify_for_encoding( obj ) if obj_classification != prev_cls: # Transformed to a different kind of object, call # back to the general encode() method. self._do_encode( obj, state ) return # Else, fall through # At his point we have decided to do with an object or an array isdict = (obj_classification == 'dict') # Get iterator it = None if isdict and hasattr(obj,'iterkeys'): try: it = obj.iterkeys() except AttributeError: pass else: try: it = iter(obj) except TypeError: pass # Convert each member to JSON if it is not None: # Try to get length, but don't fail if we can't try: numitems = len(obj) except TypeError: numitems = 0 # Output the opening bracket or brace compactly = self.options.encode_compactly if not compactly: indent0 = self.options.indentation_for_level( state.nest_level ) indent = self.options.indentation_for_level( state.nest_level+1 ) spaces_after_opener = '' if isdict: opener = '{' closer = '}' if compactly: dictcolon = ':' else: dictcolon = ' : ' else: opener = '[' closer = ']' if not compactly: #opener = opener + ' ' spaces_after_opener = self.options.spaces_to_next_indent_level(subtract=len(opener)) state.append( opener ) state.append( spaces_after_opener ) # Now iterate through all the items and collect their representations parts = [] # Collects each of the members part_keys = [] # For dictionary key sorting, tuples (key,index) try: # while not StopIteration part_idx = 0 while True: obj2 = it.next() part_idx += 1 # Note, will start counting at 1 if obj2 is obj: raise JSONEncodeError('trying to encode an infinite sequence',obj) if isdict: obj3 = obj[obj2] # Dictionary key is in obj2 and value in obj3. # Let any hooks transform the key. if self.has_hook('encode_value'): try: newobj = self.call_hook( 'encode_value', obj2 ) except JSONSkipHook: pass else: obj2 = newobj if self.has_hook('encode_dict_key'): try: newkey = self.call_hook( 'encode_dict_key', obj2 ) except JSONSkipHook: pass else: obj2 = newkey # Check JSON restrictions on key types if not helpers.isstringtype(obj2): if helpers.isnumbertype(obj2): if not self.options.is_allow_nonstring_keys: raise JSONEncodeError('object properties (dictionary keys) must be strings in strict JSON',obj2) else: raise JSONEncodeError('object properties (dictionary keys) can only be strings or numbers in ECMAScript',obj2) part_keys.append( (obj2, part_idx-1) ) # Encode this item in the sequence and put into item_chunks substate = state.make_substate() self._do_encode( obj2, substate, self.options.encode_quoted_property_names ) if isdict: substate.append( dictcolon ) substate2 = substate.make_substate() self._do_encode( obj3, substate2 ) substate.join_substate( substate2 ) parts.append( substate ) # Next item iteration except StopIteration: pass # Sort dictionary keys if isdict: srt = self.options.sort_keys if srt == SORT_PRESERVE: if _OrderedDict and isinstance(obj,_OrderedDict): srt = SORT_NONE # Will keep order else: srt = SORT_SMART if not srt or srt in (SORT_NONE, SORT_PRESERVE): srt = None elif callable(srt): part_keys.sort( key=(lambda t: (srt(t[0]),t[0])) ) elif srt == SORT_SMART: part_keys.sort( key=(lambda t: (smart_sort_transform(t[0]),t[0])) ) elif srt == SORT_ALPHA_CI: part_keys.sort( key=(lambda t: (unicode(t[0]).upper(),t[0])) ) elif srt or srt == SORT_ALPHA: part_keys.sort( key=(lambda t: unicode(t[0])) ) # Now make parts match the new sort order if srt is not None: parts = [parts[pk[1]] for pk in part_keys] if compactly: sep = ',' elif len(parts) <= self.options.max_items_per_line: sep = ', ' else: #state.append(spaces_after_opener) state.append('\n' + indent) sep = ',\n' + indent for pnum, substate in enumerate(parts): if pnum > 0: state.append( sep ) state.join_substate( substate ) if not compactly: if numitems > self.options.max_items_per_line: state.append('\n' + indent0) else: state.append(' ') state.append(closer) # final '}' or ']' else: # Can't create an iterator for the object self.try_encode_default( obj, state ) def encode_equivalent( self, obj, state ): """This method is used to encode user-defined class objects. The object being encoded should have a json_equivalent() method defined which returns another equivalent object which is easily JSON-encoded. If the object in question has no json_equivalent() method available then None is returned instead of a string so that the encoding will attempt the next strategy. If a caller wishes to disable the calling of json_equivalent() methods, then subclass this class and override this method to just return None. """ if hasattr(obj, 'json_equivalent') \ and callable(getattr(obj,'json_equivalent')): obj2 = obj.json_equivalent() if obj2 is obj: # Try to prevent careless infinite recursion raise JSONEncodeError('object has a json_equivalent() method that returns itself',obj) self._do_encode( obj2, state ) return True else: return False def try_encode_default( self, obj, state ): orig_obj = obj if self.has_hook('encode_default'): try: obj = self.call_hook( 'encode_default', obj ) except JSONSkipHook: pass else: if obj is not orig_obj: # Hook made a transformation, re-encode it return self._do_encode( obj, state ) # End of the road. raise JSONEncodeError('can not encode object into a JSON representation',obj) # ------------------------------ def encode( obj, encoding=None, **kwargs ): r"""Encodes a Python object into a JSON-encoded string. * 'strict' (Boolean, default False) If 'strict' is set to True, then only strictly-conforming JSON output will be produced. Note that this means that some types of values may not be convertable and will result in a JSONEncodeError exception. * 'compactly' (Boolean, default True) If 'compactly' is set to True, then the resulting string will have all extraneous white space removed; if False then the string will be "pretty printed" with whitespace and indentation added to make it more readable. * 'encode_namedtuple_as_object' (Boolean or callable, default True) If True, then objects of type namedtuple, or subclasses of 'tuple' that have an _asdict() method, will be encoded as an object rather than an array. If can also be a predicate function that takes a namedtuple object as an argument and returns True or False. * 'indent_amount' (Integer, default 2) The number of spaces to output for each indentation level. If 'compactly' is True then indentation is ignored. * 'indent_limit' (Integer or None, default None) If not None, then this is the maximum limit of indentation levels, after which further indentation spaces are not inserted. If None, then there is no limit. CONCERNING CHARACTER ENCODING: The 'encoding' argument should be one of: * None - The return will be a Unicode string. * encoding_name - A string which is the name of a known encoding, such as 'UTF-8' or 'ascii'. * codec - A CodecInfo object, such as as found by codecs.lookup(). This allows you to use a custom codec as well as those built into Python. If an encoding is given (either by name or by codec), then the returned value will be a byte array (Python 3), or a 'str' string (Python 2); which represents the raw set of bytes. Otherwise, if encoding is None, then the returned value will be a Unicode string. The 'escape_unicode' argument is used to determine which characters in string literals must be \u escaped. Should be one of: * True -- All non-ASCII characters are always \u escaped. * False -- Try to insert actual Unicode characters if possible. * function -- A user-supplied function that accepts a single unicode character and returns True or False; where True means to \u escape that character. Regardless of escape_unicode, certain characters will always be \u escaped. Additionaly any characters not in the output encoding repertoire for the encoding codec will be \u escaped as well. """ # Do the JSON encoding j = JSON( **kwargs ) output = j.encode( obj, encoding ) return output def decode( txt, encoding=None, **kwargs ): """Decodes a JSON-encoded string into a Python object. == Optional arguments == * 'encoding' (string, default None) This argument provides a hint regarding the character encoding that the input text is assumed to be in (if it is not already a unicode string type). If set to None then autodetection of the encoding is attempted (see discussion above). Otherwise this argument should be the name of a registered codec (see the standard 'codecs' module). * 'strict' (Boolean, default False) If 'strict' is set to True, then those strings that are not entirely strictly conforming to JSON will result in a JSONDecodeError exception. * 'return_errors' (Boolean, default False) Controls the return value from this function. If False, then only the Python equivalent object is returned on success, or an error will be raised as an exception. If True then a 2-tuple is returned: (object, error_list). The error_list will be an empty list [] if the decoding was successful, otherwise it will be a list of all the errors encountered. Note that it is possible for an object to be returned even if errors were encountered. * 'return_stats' (Boolean, default False) Controls whether statistics about the decoded JSON document are returns (and instance of decode_statistics). If True, then the stats object will be added to the end of the tuple returned. If return_errors is also set then a 3-tuple is returned, otherwise a 2-tuple is returned. * 'write_errors' (Boolean OR File-like object, default False) Controls what to do with errors. - If False, then the first decoding error is raised as an exception. - If True, then errors will be printed out to sys.stderr. - If a File-like object, then errors will be printed to that file. The write_errors and return_errors arguments can be set independently. * 'filename_for_errors' (string or None) Provides a filename to be used when writting error messages. * 'allow_xxx', 'warn_xxx', and 'forbid_xxx' (Booleans) These arguments allow for fine-adjustments to be made to the 'strict' argument, by allowing or forbidding specific syntaxes. There are many of these arguments, named by replacing the "xxx" with any number of possible behavior names (See the JSON class for more details). Each of these will allow (or forbid) the specific behavior, after the evaluation of the 'strict' argument. For example, if strict=True then by also passing 'allow_comments=True' then comments will be allowed. If strict=False then forbid_comments=True will allow everything except comments. Unicode decoding: ----------------- The input string can be either a python string or a python unicode string (or a byte array in Python 3). If it is already a unicode string, then it is assumed that no character set decoding is required. However, if you pass in a non-Unicode text string (a Python 2 'str' type or a Python 3 'bytes' or 'bytearray') then an attempt will be made to auto-detect and decode the character encoding. This will be successful if the input was encoded in any of UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE), and of course plain ASCII works too. Note though that if you know the character encoding, then you should convert to a unicode string yourself, or pass it the name of the 'encoding' to avoid the guessing made by the auto detection, as with python_object = demjson.decode( input_bytes, encoding='utf8' ) Callback hooks: --------------- You may supply callback hooks by using the hook name as the named argument, such as: decode_float=decimal.Decimal See the hooks documentation on the JSON.set_hook() method. """ import sys # Initialize the JSON object return_errors = False return_stats = False write_errors = False filename_for_errors = None write_stats = False kwargs = kwargs.copy() todel = [] for kw,val in kwargs.items(): if kw == "return_errors": return_errors = bool(val) todel.append(kw) elif kw == 'return_stats': return_stats = bool(val) todel.append(kw) elif kw == "write_errors": write_errors = val todel.append(kw) elif kw == "filename_for_errors": filename_for_errors = val todel.append(kw) elif kw == "write_stats": write_stats = val todel.append(kw) # next keyword argument for kw in todel: del kwargs[kw] j = JSON( **kwargs ) # Now do the actual JSON decoding result = j.decode( txt, encoding=encoding, return_errors=(return_errors or write_errors), return_stats=(return_stats or write_stats) ) if write_errors: import sys if write_errors is True: write_errors = sys.stderr for err in result.errors: write_errors.write( err.pretty_description(filename=filename_for_errors) + "\n" ) if write_stats: import sys if write_stats is True: write_stats = sys.stderr if result.stats: write_stats.write( "%s----- Begin JSON statistics\n" % filename_for_errors ) write_stats.write( result.stats.pretty_description( prefix=" | " ) ) write_stats.write( "%s----- End of JSON statistics\n" % filename_for_errors ) return result def encode_to_file( filename, obj, encoding='utf-8', overwrite=False, **kwargs ): """Encodes a Python object into JSON and writes into the given file. If no encoding is given, then UTF-8 will be used. See the encode() function for a description of other possible options. If the file already exists and the 'overwrite' option is not set to True, then the existing file will not be overwritten. (Note, there is a subtle race condition in the check so there are possible conditions in which a file may be overwritten) """ import os, errno if not encoding: encoding = 'utf-8' if not isinstance(filename,basestring) or not filename: raise TypeError("Expected a file name") if not overwrite and os.path.exists(filename): raise IOError(errno.EEXIST, "File exists: %r" % filename) jsondata = encode( obj, encoding=encoding, **kwargs ) try: fp = open(filename, 'wb') except Exception: raise else: try: fp.write( jsondata ) finally: fp.close() def decode_file( filename, encoding=None, **kwargs ): """Decodes JSON found in the given file. See the decode() function for a description of other possible options. """ if isinstance(filename,basestring): try: fp = open(filename, 'rb') except Exception: raise else: try: jsondata = fp.read() finally: fp.close() else: raise TypeError("Expected a file name") return decode( jsondata, encoding=encoding, **kwargs ) # ====================================================================== class jsonlint(object): """This class contains most of the logic for the "jsonlint" command. You generally create an instance of this class, to defined the program's environment, and then call the main() method. A simple wrapper to turn this into a script might be: import sys, demjson if __name__ == '__main__': lint = demjson.jsonlint( sys.argv[0] ) return lint.main( sys.argv[1:] ) """ _jsonlint_usage = r"""Usage: %(program_name)s [<options> ...] [--] inputfile.json ... With no input filename, or "-", it will read from standard input. The return status will be 0 if the file is conforming JSON (per the RFC 7159 specification), or non-zero otherwise. GENERAL OPTIONS: -v | --verbose Show details of lint checking -q | --quiet Don't show any output (except for reformatting) STRICTNESS OPTIONS (WARNINGS AND ERRORS): -W | --tolerant Be tolerant, but warn about non-conformance (default) -s | --strict Be strict in what is considered conforming JSON -S | --nonstrict Be tolerant in what is considered conforming JSON --allow=... -\ --warn=... |-- These options let you pick specific behaviors. --forbid=... -/ Use --help-behaviors for more STATISTICS OPTIONS: --stats Show statistics about JSON document REFORMATTING OPTIONS: -f | --format Reformat the JSON text (if conforming) to stdout -F | --format-compactly Reformat the JSON simlar to -f, but do so compactly by removing all unnecessary whitespace -o filename | --output filename The filename to which reformatted JSON is to be written. Without this option the standard output is used. --[no-]keep-format Try to preserve numeric radix, e.g., hex, octal, etc. --html-safe Escape characters that are not safe to embed in HTML/XML. --sort <kind> How to sort object/dictionary keys, <kind> is one of: %(sort_options_help)s --indent tabs | <nnn> Number of spaces to use per indentation level, or use tab characters if "tabs" given. UNICODE OPTIONS: -e codec | --encoding=codec Set both input and output encodings --input-encoding=codec Set the input encoding --output-encoding=codec Set the output encoding These options set the character encoding codec (e.g., "ascii", "utf-8", "utf-16"). The -e will set both the input and output encodings to the same thing. The output encoding is used when reformatting with the -f or -F options. Unless set, the input encoding is guessed and the output encoding will be "utf-8". OTHER OPTIONS: --recursion-limit=nnn Set the Python recursion limit to number --leading-zero-radix=8|10 The radix to use for numbers with leading zeros. 8=octal, 10=decimal. REFORMATTING / PRETTY-PRINTING: When reformatting JSON with -f or -F, output is only produced if the input passed validation. By default the reformatted JSON will be written to standard output, unless the -o option was given. The default output codec is UTF-8, unless an encoding option is provided. Any Unicode characters will be output as literal characters if the encoding permits, otherwise they will be \u-escaped. You can use "--output-encoding ascii" to force all Unicode characters to be escaped. MORE INFORMATION: Use '%(program_name)s --version [-v]' to see versioning information. Use '%(program_name)s --copyright' to see author and copyright details. Use '%(program_name)s [-W|-s|-S] --help-behaviors' for help on specific checks. %(program_name)s is distributed as part of the "demjson" Python module. See %(homepage)s """ SUCCESS_FAIL = 'E' SUCCESS_WARNING = 'W' SUCCESS_OK = 'OK' def __init__(self, program_name='jsonlint', stdin=None, stdout=None, stderr=None ): """Create an instance of a "jsonlint" program. You can optionally pass options to define the program's environment: * program_name - the name of the program, usually sys.argv[0] * stdin - the file object to use for input, default sys.stdin * stdout - the file object to use for outut, default sys.stdout * stderr - the file object to use for error output, default sys.stderr After creating an instance, you typically call the main() method. """ import os, sys self.program_path = program_name self.program_name = os.path.basename(program_name) if stdin: self.stdin = stdin else: self.stdin = sys.stdin if stdout: self.stdout = stdout else: self.stdout = sys.stdout if stderr: self.stderr = stderr else: self.stderr = sys.stderr @property def usage(self): """A multi-line string containing the program usage instructions. """ sorthelp = '\n'.join([ " %12s - %s" % (sm, sd) for sm, sd in sorted(sorting_methods.items()) if sm != SORT_NONE ]) return self._jsonlint_usage % {'program_name':self.program_name, 'homepage':__homepage__, 'sort_options_help': sorthelp } def _lintcheck_data( self, jsondata, verbose_fp=None, reformat=False, show_stats=False, input_encoding=None, output_encoding=None, escape_unicode=True, pfx='', jsonopts=None ): global decode, encode success = self.SUCCESS_FAIL reformatted = None if show_stats: stats_fp = verbose_fp else: stats_fp = None try: results = decode( jsondata, encoding=input_encoding, return_errors=True, return_stats=True, write_errors=verbose_fp, write_stats=stats_fp, filename_for_errors=pfx, json_options=jsonopts ) except JSONError, err: success = self.SUCCESS_FAIL if verbose_fp: verbose_fp.write('%s%s\n' % (pfx, err.pretty_description()) ) except Exception, err: success = self.SUCCESS_FAIL if verbose_fp: verbose_fp.write('%s%s\n' % (pfx, str(err) )) else: errors = [err for err in results.errors if err.severity in ('fatal','error')] warnings = [err for err in results.errors if err.severity in ('warning',)] if errors: success = self.SUCCESS_FAIL elif warnings: success = self.SUCCESS_WARNING else: success = self.SUCCESS_OK if reformat: encopts = jsonopts.copy() encopts.strictness = STRICTNESS_TOLERANT if reformat == 'compactly': encopts.encode_compactly = True else: encopts.encode_compactly = False reformatted = encode(results.object, encoding=output_encoding, json_options=encopts) return (success, reformatted) def _lintcheck( self, filename, output_filename, verbose=False, reformat=False, show_stats=False, input_encoding=None, output_encoding=None, escape_unicode=True, jsonopts=None ): import sys verbose_fp = None if not filename or filename == "-": pfx = '<stdin>: ' jsondata = self.stdin.read() if verbose: verbose_fp = self.stderr else: pfx = '%s: ' % filename try: fp = open( filename, 'rb' ) jsondata = fp.read() fp.close() except IOError, err: self.stderr.write('%s: %s\n' % (pfx, str(err)) ) return False if verbose: verbose_fp = self.stdout success, reformatted = self._lintcheck_data( jsondata, verbose_fp=verbose_fp, reformat=reformat, show_stats=show_stats, input_encoding=input_encoding, output_encoding=output_encoding, pfx=pfx, jsonopts=jsonopts ) if success != self.SUCCESS_FAIL and reformat: if output_filename: try: fp = open( output_filename, 'wb' ) fp.write( reformatted ) except IOError, err: self.stderr.write('%s: %s\n' % (pfx, str(err)) ) success = False else: self.stdout.write( reformatted ) elif success == self.SUCCESS_OK and verbose_fp: verbose_fp.write('%sok\n' % pfx) elif success == self.SUCCESS_WARNING and verbose_fp: verbose_fp.write('%sok, with warnings\n' % pfx) elif verbose_fp: verbose_fp.write("%shas errors\n" % pfx) return success def main( self, argv ): """The main routine for program "jsonlint". Should be called with sys.argv[1:] as its sole argument. Note sys.argv[0] which normally contains the program name should not be passed to main(); instead this class itself is initialized with sys.argv[0]. Use "--help" for usage syntax, or consult the 'usage' member. """ import sys, os, getopt, unicodedata recursion_limit = None success = True verbose = 'auto' # one of 'auto', True, or False reformat = False show_stats = False output_filename = None input_encoding = None output_encoding = 'utf-8' kwoptions = { # Will be used to initialize json_options "sort_keys": SORT_SMART, "strict": STRICTNESS_WARN, "keep_format": True, "decimal_context": 100, } try: opts, args = getopt.getopt( argv, 'vqfFe:o:sSW', ['verbose','quiet', 'format','format-compactly', 'stats', 'output', 'strict','nonstrict','warn', 'html-safe','xml-safe', 'encoding=', 'input-encoding=','output-encoding=', 'sort=', 'recursion-limit=', 'leading-zero-radix=', 'keep-format', 'no-keep-format', 'indent=', 'indent-amount=', 'indent-limit=', 'indent-tab-width=', 'max-items-per-line=', 'allow=', 'warn=', 'forbid=', 'deny=', 'help', 'help-behaviors', 'version','copyright'] ) except getopt.GetoptError, err: self.stderr.write( "Error: %s. Use \"%s --help\" for usage information.\n" \ % (err.msg, self.program_name) ) return 1 # Set verbose before looking at any other options for opt, val in opts: if opt in ('-v', '--verbose'): verbose=True # Process all options for opt, val in opts: if opt in ('-h', '--help'): self.stdout.write( self.usage ) return 0 elif opt == '--help-behaviors': self.stdout.write(""" BEHAVIOR OPTIONS: These set of options let you control which checks are to be performed. They may be turned on or off by listing them as arguments to one of the options --allow, --warn, or --forbid ; for example: %(program_name)s --allow comments,hex-numbers --forbid duplicate-keys """ % {"program_name":self.program_name}) self.stdout.write("The default shown is for %s mode\n\n" % kwoptions['strict']) self.stdout.write('%-7s %-25s %s\n' % ("Default", "Behavior_name", "Description")) self.stdout.write('-'*7 + ' ' + '-'*25 + ' ' + '-'*50 + '\n') j = json_options( **kwoptions ) for behavior in sorted(j.all_behaviors): v = j.get_behavior( behavior ) desc = j.describe_behavior( behavior ) self.stdout.write('%-7s %-25s %s\n' % (v.lower(), behavior.replace('_','-'), desc)) return 0 elif opt == '--version': self.stdout.write( '%s (%s) version %s (%s)\n' \ % (self.program_name, __name__, __version__, __date__) ) if verbose == True: self.stdout.write( 'demjson from %r\n' % (__file__,) ) if verbose == True: self.stdout.write( 'Python version: %s\n' % (sys.version.replace('\n',' '),) ) self.stdout.write( 'This python implementation supports:\n' ) self.stdout.write( ' * Max unicode: U+%X\n' % (sys.maxunicode,) ) self.stdout.write( ' * Unicode version: %s\n' % (unicodedata.unidata_version,) ) self.stdout.write( ' * Floating-point significant digits: %d\n' % (float_sigdigits,) ) self.stdout.write( ' * Floating-point max 10^exponent: %d\n' % (float_maxexp,) ) if str(0.0)==str(-0.0): szero = 'No' else: szero = 'Yes' self.stdout.write( ' * Floating-point has signed-zeros: %s\n' % (szero,) ) if decimal: has_dec = 'Yes' else: has_dec = 'No' self.stdout.write( ' * Decimal (bigfloat) support: %s\n' % (has_dec,) ) return 0 elif opt == '--copyright': self.stdout.write( "%s is distributed as part of the \"demjson\" python package.\n" \ % (self.program_name,) ) self.stdout.write( "See %s\n\n\n" % (__homepage__,) ) self.stdout.write( __credits__ ) return 0 elif opt in ('-v', '--verbose'): verbose = True elif opt in ('-q', '--quiet'): verbose = False elif opt in ('-s', '--strict'): kwoptions['strict'] = STRICTNESS_STRICT kwoptions['keep_format'] = False elif opt in ('-S', '--nonstrict'): kwoptions['strict'] = STRICTNESS_TOLERANT elif opt in ('-W', '--tolerant'): kwoptions['strict'] = STRICTNESS_WARN elif opt in ('-f', '--format'): reformat = True kwoptions['encode_compactly'] = False elif opt in ('-F', '--format-compactly'): kwoptions['encode_compactly'] = True reformat = 'compactly' elif opt in ('--stats',): show_stats=True elif opt in ('-o', '--output'): output_filename = val elif opt in ('-e','--encoding'): input_encoding = val output_encoding = val escape_unicode = False elif opt in ('--output-encoding'): output_encoding = val escape_unicode = False elif opt in ('--input-encoding'): input_encoding = val elif opt in ('--html-safe','--xml-safe'): kwoptions['html_safe'] = True elif opt in ('--allow','--warn','--forbid'): action = opt[2:] if action in kwoptions: kwoptions[action] += "," + val else: kwoptions[action] = val elif opt in ('--keep-format',): kwoptions['keep_format']=True elif opt in ('--no-keep-format',): kwoptions['keep_format']=False elif opt == '--leading-zero-radix': kwoptions['leading_zero_radix'] = val elif opt in ('--indent', '--indent-amount'): if val in ('tab','tabs'): kwoptions['indent_amount'] = 8 kwoptions['indent_tab_width'] = 8 else: try: kwoptions['indent_amount'] = int(val) except ValueError: self.stderr.write("Indentation amount must be a number\n") return 1 elif opt == 'indent-tab-width': try: kwoptions['indent_tab_width'] = int(val) except ValueError: self.stderr.write("Indentation tab width must be a number\n") return 1 elif opt == '--max-items-per-line': try: kwoptions['max_items_per_line'] = int(val) except ValueError: self.stderr.write("Max items per line must be a number\n") return 1 elif opt == '--sort': val = val.lower() if val == 'alpha': kwoptions['sort_keys'] = SORT_ALPHA elif val == 'alpha_ci': kwoptions['sort_keys'] = SORT_ALPHA_CI elif val == 'preserve': kwoptions['sort_keys'] = SORT_PRESERVE else: kwoptions['sort_keys'] = SORT_SMART elif opt == '--recursion-limit': try: recursion_limit = int(val) except ValueError: self.stderr.write("Recursion limit must be a number: %r\n" % val) return 1 else: max_limit = 100000 old_limit = sys.getrecursionlimit() if recursion_limit > max_limit: self.stderr.write("Recursion limit must be a number between %d and %d\n" % (old_limit,max_limit)) return 1 elif recursion_limit > old_limit: sys.setrecursionlimit( recursion_limit ) else: self.stderr.write('Unknown option %r\n' % opt) return 1 # Make the JSON options kwoptions['decimal_context'] = 100 jsonopts = json_options( **kwoptions ) # Now decode each file... if not args: args = [None] for fn in args: try: if not self._lintcheck( fn, output_filename=output_filename, verbose=verbose, reformat=reformat, show_stats=show_stats, input_encoding=input_encoding, output_encoding=output_encoding, jsonopts=jsonopts ): success = False except KeyboardInterrupt, err: sys.stderr.write("\njsonlint interrupted!\n") sys.exit(1) if not success: return 1 return 0 # end file
vorburger/mcedit2
src/mceditlib/util/demjson.py
Python
bsd-3-clause
247,568
[ "CDK" ]
5d04f76a4c2189a6967895fbe94893e058ac6b6e016aa322daf6ff2f218e1fd6