text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
#! /usr/bin/env python ########### ## REMAP ## ########### ## ## first operational version: Matthias Aechner -- 2014 ## rewrite + generalization + options + optimization: Aymeric SPIGA -- March 2015 ## import netCDF4 as nc import ctypes as ct import numpy as np import os import time from optparse import OptionParser ### TBR by argparse from remap_func \ import ll,apply_weights,compute_distribution ## parallel or not parallel=False parallel=True if parallel: from mpi4py import MPI timechar="time_counter" daysec = 38052. ########## ## DICT ## ########## grid_types = { "dynamico:mesh": {"lon_name": "bounds_lon_i","lat_name": "bounds_lat_i","pole": [0,0,0]}, "dynamico:vort": {"lon_name": "bounds_lon_v","lat_name": "bounds_lat_v","pole": [0,0,0]}, "dynamico:restart": {"lon_name": "lon_i_vertices","lat_name": "lat_i_vertices","pole": [0,0,0]}, "test:polygon": {"lon_name": "bounds_lon","lat_name": "bounds_lat","pole": [0,0,0]}, "test:latlon": {"lon_name": "bounds_lon","lat_name": "bounds_lat","pole": [0,0,1]}, "ll": {"func": ll,"pole": [0,0,1]}, } interp_types = {"FV1": 1,"FV2": 2} ###################################### # define parser with version and usage ###################################### parser = OptionParser() parser.version = \ ''' REMAPPER `remap.py -h` for usage and options ''' ## parser.usage = \ ''' remap.py [options] srcfile dstfile --> srcfile is an input netCDF file --> dstfile is either a number for simple latlon grid or a netCDF file indicating destination grid ''' ## parser.add_option('-F','--forceweights',action='store_true',dest='forceweights',default=False,\ help="force computing of weights [F]") parser.add_option('-W','--weightfile',action='store',dest='weightfile',type="string",default=None,\ help="prescribe name of weight file (either existing or not) [None]") parser.add_option('-S','--srctype',action='store',dest='srctype',default="test:polygon",\ help="grid type of source [test:polygon]") parser.add_option('-D','--dsttype',action='store',dest='dsttype',default="ll",\ help="grid type of destination [ll]") parser.add_option('-o','--outfile',action='store',dest='outfile',type="string",default="outremap.nc",\ help="output file [outremap.nc]") parser.add_option('-R','--reshaped',action='store_true',dest='reshaped',default=False,\ help="output reshaped fields on a 2D grid [F]") parser.add_option('-i','--interp',action='store',dest='interp',type="string",default="FV1",\ help="interpolation method (FV1 FV2) conservative Finite Volume [FV1]") parser.add_option('-v','--var2d',action='append',dest='var2d',type="string",default=None,\ help="2D field [append is possible, default None]") parser.add_option('-V','--var3d',action='append',dest='var3d',type="string",default=None,\ help="3D field [append is possible, default None]") parser.add_option('-Z','--vert',action='store',dest='vertchar',type="string",default="presnivs",\ help="vertical coordinate [presnivs]") parser.add_option('-z','--level',action='append',dest='z',type="int",default=None,\ help="choose vertical indexes to be interpolated [append is possible, default None]") parser.add_option('-t','--time',action='append',dest='t',type="int",default=None,\ help="choose time indexes to be interpolated [append is possible, default None]") parser.add_option('-P','--plot',action='store_true',dest='plot',default=False,\ help="plot fields [F]") ## (opt,args) = parser.parse_args() if (len(args) == 0): parser.print_version() ; exit() ## SRCFILE if len(args) != 2: parser.print_usage() ; exit(2) else: srcfile = args[0] ; dstfile = args[1] ## SRCTYPE try: srctype = grid_types[opt.srctype] except KeyError: print "Error: srctype needs to be one of the following: " + " ".join(grid_types.keys()) + "." exit(2) ## DSTTYPE try: dsttype = grid_types[opt.dsttype] except KeyError: print "Error: dsttype needs to be one of the following: " + " ".join(grid_types.keys()) + "." exit(2) ## FIELDCHAR if (opt.var3d is None) and (opt.var2d is None): print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" print " WARNING! No fields indicated with -V or -v." print " WARNING! Only considering computing weights." print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" onlyweights = True else: onlyweights = False ## NO SPECIFIC OPERATION NEEDED vertchar = opt.vertchar interp = opt.interp ## ## test if we have to compute weights ## if opt.weightfile is None: wfile = srcfile+"_weights" if "func" in dsttype: wfile = wfile + "_" + dstfile wfile = wfile+'.nc' else: wfile = opt.weightfile if opt.forceweights: computeweights = True else: computeweights = not(os.path.isfile(wfile)) ## ## test if we have to compute barycentres ## if "func" in dsttype: computebary = False else: computebary = True #### #### LOAD remap LIBRARY #### if computeweights or computebary: remap = ct.cdll.LoadLibrary(os.path.realpath('libmapper.so')) remap.mpi_init() rank = remap.mpi_rank() size = remap.mpi_size() else: rank = 0 size = 1 ### ### MAIN PROGRAM ### ############ ### GRID ### ############ print "**** GRID ****" print "Get grids either from files or from computations" stime = time.time() if "reader" in srctype: src_lon, src_lat = srctype["reader"](srcfile) else: src = nc.Dataset(srcfile) # the following two lines do not perform the actual read # the file is read later when assigning to the ctypes array # -> no unnecessary array copying in memory src_lon = src.variables[srctype["lon_name"]] src_lat = src.variables[srctype["lat_name"]] if "reader" in dsttype: dst_lon, dst_lat = dsttype["reader"](dstfile) elif "func" in dsttype: dst_lon, dst_lat, dst_centre_lon, dst_centre_lat = dsttype["func"](dstfile) else: dst = nc.Dataset(dstfile) dst_lon = dst.variables[dsttype["lon_name"]] dst_lat = dst.variables[dsttype["lat_name"]] ## prepare dimensions and arrays for later use in library and computations dst_ncell, dst_nvert = dst_lon.shape dst_ncell_loc, dst_loc_start = compute_distribution(dst_ncell,rank,size) dstpole = (ct.c_double * (3))() ; dstpole[:] = dsttype["pole"] c_dst_ncell = ct.c_int(dst_ncell_loc) c_dst_nvert = ct.c_int(dst_nvert) order = ct.c_int(interp_types[interp]) if computeweights or computebary: print "convert and reshape to C-type arrays for remap library" ## lon c_dst_lon = (ct.c_double * (dst_ncell_loc*dst_nvert))() zelen = len(c_dst_lon) c_dst_lon[:] = nc.numpy.reshape(dst_lon[dst_loc_start:dst_loc_start+dst_ncell_loc,:], (zelen,1)) ## lat c_dst_lat = (ct.c_double * (dst_ncell_loc*dst_nvert))() c_dst_lat[:] = nc.numpy.reshape(dst_lat[dst_loc_start:dst_loc_start+dst_ncell_loc,:], (zelen,1)) gtime = time.time() - stime ############### ### WEIGHTS ### ############### print "**** WEIGHTS ****" stime = time.time() ### -- if weight file does not exist, calculate weights and create file ### -- if weight file does exist, read weights if rank == 0: if computeweights: print "Calling remap library to compute weights." c_nweight = ct.c_int() ## convert to C-type arrays for remap library src_ncell, src_nvert = src_lon.shape src_ncell_loc, src_loc_start = compute_distribution(src_ncell,rank,size) c_src_lon = (ct.c_double * (src_ncell_loc*src_nvert))() zelen = len(c_src_lon) c_src_lon[:] = nc.numpy.reshape(src_lon[src_loc_start:src_loc_start+src_ncell_loc,:], (zelen,1)) c_src_lat = (ct.c_double * (src_ncell_loc*src_nvert))() c_src_lat[:] = nc.numpy.reshape(src_lat[src_loc_start:src_loc_start+src_ncell_loc,:], (zelen,1)) srcpole = (ct.c_double * (3))() ; srcpole[:] = srctype["pole"] c_src_ncell = ct.c_int(src_ncell_loc) c_src_nvert = ct.c_int(src_nvert) ## print "remap_get_num_weights" remap.remap_get_num_weights(c_src_lon, c_src_lat, c_src_nvert, c_src_ncell, srcpole, c_dst_lon, c_dst_lat, c_dst_nvert, c_dst_ncell, dstpole, order, ct.byref(c_nweight)) ## nwgt = c_nweight.value c_weights = (ct.c_double * nwgt)() c_dst_idx = (ct.c_int * nwgt)() c_src_idx = (ct.c_int * nwgt)() ## print "remap_get_weights" remap.remap_get_weights(c_weights, c_src_idx, c_dst_idx) ## if parallel: wgt_glo = MPI.COMM_WORLD.gather(c_weights[:]) src_idx_glo = MPI.COMM_WORLD.gather(c_src_idx[:]) dst_idx_glo = MPI.COMM_WORLD.gather(c_dst_idx[:]) else: wgt_glo = c_weights[:] src_idx_glo = c_src_idx[:] dst_idx_glo = c_dst_idx[:] ### change lists to numpy arrays to be saved and used in calculations wgt_glo = np.hstack(wgt_glo) src_idx_glo = np.hstack(src_idx_glo) dst_idx_glo = np.hstack(dst_idx_glo) ### create netCDF file nwgt_glo = wgt_glo.size print "Writing", nwgt_glo, "weights to netCDF-file '" + wfile + "'." f = nc.Dataset(wfile,'w') f.createDimension('n_src', src_ncell) f.createDimension('n_dst', dst_ncell) f.createDimension('n_weight', nwgt_glo) var = f.createVariable('src_idx', 'i', ('n_weight')) ; var[:] = src_idx_glo var = f.createVariable('dst_idx', 'i', ('n_weight')) ; var[:] = dst_idx_glo var = f.createVariable('weight', 'd', ('n_weight')) ; var[:] = wgt_glo f.close() else: print "Reading weights from netCDF file "+wfile f = nc.Dataset(wfile) src_idx_glo = f.variables['src_idx'][:] dst_idx_glo = f.variables['dst_idx'][:] wgt_glo = f.variables['weight'][:] f.close() wtime = time.time() - stime ############# ### REMAP ### ############# stime = time.time() if not onlyweights: print "**** REMAP ****" ### Barycentres and areas if needed if computebary: print 'Get barycentres and areas' ## c_centre_lon = (ct.c_double * dst_ncell_loc)() c_centre_lat = (ct.c_double * dst_ncell_loc)() c_areas = (ct.c_double * dst_ncell_loc)() remap.remap_get_barycentres_and_areas(c_dst_lon, c_dst_lat, c_dst_nvert, c_dst_ncell, dstpole, c_centre_lon, c_centre_lat, c_areas) ## if parallel: dst_centre_lon_glo = MPI.COMM_WORLD.gather(np.array(c_centre_lon[:])) dst_centre_lat_glo = MPI.COMM_WORLD.gather(np.array(c_centre_lat[:])) else: dst_centre_lon_glo = np.array(c_centre_lon[:]) dst_centre_lat_glo = np.array(c_centre_lat[:]) ## if rank == 0: dst_centre_lon = np.hstack(dst_centre_lon_glo) dst_centre_lat = np.hstack(dst_centre_lat_glo) ### determine vertical levels presnivs=src.variables[vertchar] nz = len(presnivs) if opt.z is None: vertrange = range(nz) else: vertrange = opt.z ; nz = len(vertrange) ### determine time zetime=src.variables[timechar] nt = len(zetime) if opt.t is None: timerange = range(nt) else: timerange = opt.t ; nt = len(timerange) ### Prepare netCDF file for write f = nc.Dataset(opt.outfile,'w',format='NETCDF3_CLASSIC') ### first treat vertical coordinates f.createDimension(vertchar, nz) var = f.createVariable(vertchar, 'd', (vertchar)) var.setncattr("long_name", "vertical coordinate") var.setncattr('axis', 'Z') if opt.z is None: var[:] = presnivs[:] else: var[:] = presnivs[opt.z] ### second, horizontal coordinates ### -- two modes: based on cells (default), or based on lat/lon (if reshaped=True) if opt.reshaped: ps = np.zeros( (nt,dst_ncell) ) temp = np.zeros( (nt,nz,dst_ncell) ) ## N = np.int(np.sqrt(dst_ncell/2)) shp = (nt,N,N*2) shp3 = (nt,nz,N,N*2) shphor = (N,N*2) ## f.createDimension('longitude', N*2) f.createDimension('latitude', N) f.createDimension(timechar, None) #nt) ## var = f.createVariable('longitude', 'd', ('longitude')) var.setncattr("long_name", "longitude") var.setncattr("units", "deg north") var[:] = np.unique(dst_centre_lon)[:] ## var = f.createVariable('latitude', 'd', ('latitude')) var.setncattr("long_name", "latitude") var.setncattr("units", "deg east") var[:] = np.unique(dst_centre_lat)[:] ## var = f.createVariable(timechar, 'd', (timechar)) try: ### get time values yorgl = nc.Dataset(srcfile) zevar = yorgl.variables[timechar] var[:] = zevar[timerange[:]] / daysec except: print "--- had a problem reading time values. use indexes for dest file." var[:] = timerange[:] else: ### TBD: add Time!! # nq=src.dimensions['nq'] f.createDimension('nvert', dst_nvert) f.createDimension('cell', dst_ncell) # f.createDimension('nq', len(nq)) var = f.createVariable('latitude', 'd', ('cell')) var.setncattr("long_name", "latitude") var.setncattr("units", "degrees_north") var.setncattr("bounds", "bounds_lat") var[:] = dst_centre_lat var = f.createVariable('longitude', 'd', ('cell')) var.setncattr("long_name", "longitude") var.setncattr("units", "degrees_east") var.setncattr("bounds", "bounds_lon") var[:] = dst_centre_lon var = f.createVariable('bounds_lon', 'd', ('cell','nvert')) var[:] = dst_lon var = f.createVariable('bounds_lat', 'd', ('cell','nvert')) var[:] = dst_lat ######################################################################## ### THE A MATRIX TO CHANGE COORDINATES ### if rank == 0: from scipy import sparse A = sparse.csr_matrix(sparse.coo_matrix((wgt_glo,(dst_idx_glo,src_idx_glo)))) ######################################################################## # 2D FIELD # ... if 2D fields are requested if opt.var2d is not None: # ... for all 2D fields for var2d in opt.var2d: print "remapping... %s with %i time samples" % (var2d,nt) src_val_loc = src.variables[var2d] dim = len(src_val_loc.shape) tmptime = time.time() # ... for all stored time samples count = 0 for tt in timerange: if dim == 2: tab_loc = np.array(src_val_loc[tt,:]) elif dim == 1: tab_loc = np.array(src_val_loc[:]) elif dim > 2: print "are you sure this is a 2D field?" ; exit() if parallel: src_val_glo = MPI.COMM_WORLD.gather(tab_loc) else: src_val_glo = tab_loc if rank == 0: dst_val = apply_weights(src_val_glo,A) if not opt.reshaped: ps = f.createVariable(var2d, 'd', (timechar,'cell')) ps.setncattr("coordinates", "time lon lat") ps[count,:] = dst_val count = count + 1 ## display time test = time.time() - tmptime if test > 5.: print "5s elapsed. done up to time %i/%i" % (count+1,nt) tmptime = time.time() # reshape if necessary if opt.reshaped: print "reshaping and writing...",var2d var = f.createVariable(var2d, 'd', (timechar,'latitude','longitude')) var[:,:,:] = np.reshape(ps,shp) # 3D FIELD # ... if 3D fields are requested if opt.var3d is not None: # ... for all 3D fields for var3d in opt.var3d: print "remapping... %s with %i levels %i time samples" % (var3d,nz,nt) src_val_loc = src.variables[var3d] dim = len(src_val_loc.shape) if not opt.reshaped: temp = f.createVariable(var3d, 'd', (timechar,vertchar,'cell')) temp.setncattr("coordinates", "time presnivs lon lat") tmptime = time.time() # ... for all vertical levels countlev=0 for l in vertrange: # ... for all stored time samples count=0 for tt in timerange: ## if dim == 3: tab_loc = np.array(src_val_loc[tt,l,:]) elif dim == 2: tab_loc = np.array(src_val_loc[l,:]) if parallel: src_val_glo = MPI.COMM_WORLD.gather(tab_loc) else: src_val_glo = tab_loc if rank == 0: dst_val = apply_weights(src_val_glo,A) temp[count,countlev,:] = dst_val ## display time test = time.time() - tmptime if test > 5.: print "5s elapsed. done up to vertical level %i/%i time %i/%i" % (countlev+1,nz,count+1,nt) tmptime = time.time() # increment time count = count+1 # increment level countlev = countlev+1 # reshape if necessary if opt.reshaped: print "reshaping and writing...",var3d var = f.createVariable(var3d, 'd', (timechar,vertchar,'latitude','longitude')) var[:,:,:,:] = np.reshape(temp,shp3) print "...done" f.close() rtime = time.time() - stime ############ ### PLOT ### ############ if not onlyweights: if ("func" in dsttype) and (opt.plot): import ppplot print "**** PLOT ****" ### GUESS SIZE N = np.int(np.sqrt(dst_ncell/2)) shp = (N,N*2) shp3 = (nz,N,N*2) ### PLOT pl = ppplot.plot2d() ### RESHAPE (should be at no cost) and ASSIGN to PLOT field = ps field = temp[0,:] pl.f = np.reshape(field,shp) pl.y = np.reshape(dst_centre_lat,shp) pl.x = np.reshape(dst_centre_lon,shp) ### PLOT SETTINGS and MAKE pl.proj = "cyl"#"ortho" pl.blat = 45. pl.makeshow() ### A ZONAL SECTION tab = np.reshape(temp,shp3) pl.f = np.mean(tab,axis=2) pl.x = None ; pl.y = None pl.makeshow() ########### ### END ### ########### if not "reader" in srctype: src.close() if not "reader" in dsttype: if not "func" in dsttype: dst.close() print "**** TIMES ****" print "GRID %.2f sec // WEIGHTS %.2f sec // REMAP %.2f sec // TOTAL %.2f sec" % (gtime,wtime,rtime,gtime+wtime+rtime)
aymeric-spiga/remap
py/remap.py
Python
gpl-2.0
17,599
[ "NetCDF" ]
72d54f5d7a3638e057e836ae6e14c8b47e921abb38118cdb8e66fac41a334d99
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Ior(AutotoolsPackage): """The IOR software is used for benchmarking parallel file systems using POSIX, MPI-IO, or HDF5 interfaces.""" homepage = "https://github.com/LLNL/ior" url = "https://github.com/LLNL/ior/archive/3.0.1.tar.gz" version('3.0.1', '71150025e0bb6ea1761150f48b553065') variant('hdf5', default=False, description='support IO with HDF5 backend') variant('ncmpi', default=False, description='support IO with NCMPI backend') depends_on('autoconf', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('m4', type='build') depends_on('mpi') depends_on('hdf5+mpi', when='+hdf5') depends_on('parallel-netcdf', when='+ncmpi') @run_before('autoreconf') def bootstrap(self): Executable('./bootstrap')() def configure_args(self): spec = self.spec config_args = [] env['CC'] = spec['mpi'].mpicc if '+hdf5' in spec: config_args.append('--with-hdf5') config_args.append('CFLAGS=-D H5_USE_16_API') else: config_args.append('--without-hdf5') if '+ncmpi' in spec: config_args.append('--with-ncmpi') else: config_args.append('--without-ncmpi') return config_args
EmreAtes/spack
var/spack/repos/builtin/packages/ior/package.py
Python
lgpl-2.1
2,584
[ "NetCDF" ]
21e6b771af6e3fd83757f8066b2dd16073b320a05cc12a1818fc6d121f896d18
''' Created on 28/09/2012 @author: Amr Hassan ''' import os import sys import struct import string import math import numpy from random import randrange import logging import PGDBInterface import h5py import numpy import time import ProcessTreeTraversal class SAGEDataReader: #The Module handles the data reading from SAGE output to a memory data structure. CurrentInputFilePath="" CurrentGlobalTreeID=0 FormatMapping={'int':'i','float':'f','long long':'q'} def __init__(self,CurrentSAGEStruct,Options,PGDB,CommSize,CommRank): #Initialize the Class to handle a specific file path self.CurrentInputFilePath=Options['RunningSettings:InputFile'] self.CurrentSAGEStruct=CurrentSAGEStruct self.Options=Options self.PGDB=PGDB self.CommSize=CommSize self.CommRank=CommRank self.SimulationBoxX=float(self.Options['RunningSettings:SimulationBoxX']) self.SimulationBoxY=float(self.Options['RunningSettings:SimulationBoxX']) self.BSPCellSize=float(self.Options['RunningSettings:BSPCellSize']) self.CellsInX=int(math.ceil(self.SimulationBoxX/self.BSPCellSize)) self.CellsInY=int(math.ceil(self.SimulationBoxY/self.BSPCellSize)) serverscount=int(self.Options['PGDB:ServersCount']) self.BigTableID=(self.CellsInX*self.CellsInY)+(CommRank%serverscount) logging.info("Big Table ID="+str(self.BigTableID)) def ProcessAllTrees(self): self.InputFile=h5py.File(self.CurrentInputFilePath,'r') #Process All the Non-Empty Files ListOfUpProcessedTrees=self.PGDB.GetListofUnProcessedTrees(self.CommSize,self.CommRank) TotalNumberofUnPrcoessedTrees=len(ListOfUpProcessedTrees) TreeCounter=0 for UnProcessedTree in ListOfUpProcessedTrees: # Updating the user with what is going on logging.info(str(self.CommRank)+":Processing Tree ("+str(TreeCounter)+"/"+str(TotalNumberofUnPrcoessedTrees-1)+"):"+str(UnProcessedTree[0])) start_time = time.time() self.ProcessTree(UnProcessedTree) logging.info(">>>> Importing Tree Execution time="+str( time.time() - start_time)+ " seconds") start_time = time.time() self.PGDB.SetTreeAsProcessed(UnProcessedTree[0]) logging.info(">>>> Set Tree as Processed time="+str( time.time() - start_time)+ " seconds") TreeCounter=TreeCounter+1 def GenerateDictFromFields(self,TreeLoadingID,TreeData): TreeDict=[] pgcopy_dtype = [('num_fields','>i2')] FieldsList=[] FieldsIndex=0 for field, dtype in TreeData.dtype.descr: FieldsList+=[self.CurrentSAGEStruct[FieldsIndex][0]] FieldName=self.CurrentSAGEStruct[FieldsIndex][0] pgcopy_dtype += [(FieldName + '_length', '>i4'),(FieldName, dtype.replace('<', '>'))] FieldsIndex=FieldsIndex+1 ####### Add Generated Fields (Computed) ############################### FieldName='TreeID' pgcopy_dtype += [(FieldName + '_length', '>i4'),(FieldName, '>i8')] FieldName='CentralGalaxyGlobalID' pgcopy_dtype += [(FieldName + '_length', '>i4'),(FieldName, '>i8')] FieldName='breadthfirst_traversalorder' pgcopy_dtype += [(FieldName + '_length', '>i4'),(FieldName, '>i8')] FieldName='depthfirst_traversalorder' pgcopy_dtype += [(FieldName + '_length', '>i4'),(FieldName, '>i8')] FieldName='subtree_count' pgcopy_dtype += [(FieldName + '_length', '>i4'),(FieldName, '>i8')] FieldsList+=['treeid'] FieldsList+=['centralgalaxyglobalid'] FieldsList+=['breadthfirst_traversalorder'] FieldsList+=['depthfirst_traversalorder'] FieldsList+=['subtree_count'] ######################################################################### if(FieldsList.count('LocalGalaxyID')>0): logging.info("### LocalGalaxyID already Exists. No Data Will be generated") else: logging.info("### LocalGalaxyID is Missing. Regenerate Local GalaxyID") if(FieldsList.count('LocalGalaxyID')==0): FieldName='LocalGalaxyID' pgcopy_dtype += [(FieldName + '_length', '>i4'),(FieldName, '>i4')] pgcopy = numpy.empty(TreeData.shape, pgcopy_dtype) pgcopy['TreeID_length'] = numpy.dtype('>i8').alignment pgcopy['CentralGalaxyGlobalID_length'] = numpy.dtype('>i8').alignment pgcopy['breadthfirst_traversalorder_length']=numpy.dtype('>i8').alignment pgcopy['depthfirst_traversalorder_length']=numpy.dtype('>i8').alignment pgcopy['subtree_count_length']=numpy.dtype('>i8').alignment GeneratedFields=0 if(FieldsList.count('LocalGalaxyID')==0): GeneratedFields=6 else: GeneratedFields=5 pgcopy['num_fields'] = len(TreeData.dtype)+GeneratedFields for i in range(0,len(TreeData.dtype)): field = self.CurrentSAGEStruct[i][0] pgcopy[field + '_length'] = TreeData.dtype[i].alignment pgcopy[field] = TreeData[TreeData.dtype.names[i]] pgcopy['TreeID'].fill(TreeLoadingID) if(FieldsList.count('LocalGalaxyID')==0): pgcopy['LocalGalaxyID']=range(0,len(TreeData)) pgcopy['LocalGalaxyID_length'] = numpy.dtype('>i4').alignment ManageTreeIndexObj=ProcessTreeTraversal.ManageTreeIndex(self.Options) ManageTreeIndexObj.BuildTree(TreeData) ManageTreeIndexObj.BreadthFirst(ManageTreeIndexObj.ParentNode) ManageTreeIndexObj.DepthFirst_PreOrder(ManageTreeIndexObj.ParentNode) ManageTreeIndexObj.CountChildNodes(ManageTreeIndexObj.ParentNode) NodesList={} ManageTreeIndexObj.TreeToList(ManageTreeIndexObj.ParentNode,NodesList) for TreeField in pgcopy: GlobalIndex=TreeField['GlobalIndex'] TreeField['breadthfirst_traversalorder']=NodesList[GlobalIndex]['BreadthFirstIndex'] TreeField['depthfirst_traversalorder']=NodesList[GlobalIndex]['DepthFirstIndex'] TreeField['subtree_count']=NodesList[GlobalIndex]['SubTreeSize'] return pgcopy def ComputeFields(self,TreeData): #print TreeData if "CentralGal" in TreeData.dtype.fields: for TreeField in TreeData: CentralGalaxyLocalID=TreeField['CentralGal'] CentralGalaxy=TreeData[CentralGalaxyLocalID] TreeField['CentralGalaxyGlobalID']=CentralGalaxy['GlobalIndex'] else: logging.info('#### Central Galaxy Field Does not exist. Skipping Compute Fields #####') return TreeData def ProcessTree(self,UnProcessedTree): LoadingTreeID= UnProcessedTree[0] StartIndex=UnProcessedTree[2] GalaxiesCount=UnProcessedTree[1] logging.info('\t '+str(self.CommRank)+': Number of Galaxies in Tree ('+str(LoadingTreeID)+')='+str(GalaxiesCount)) if GalaxiesCount>0: start_time = time.time() TreeData=self.InputFile['galaxies'][StartIndex:StartIndex+GalaxiesCount] logging.info("Reading Data="+str( time.time() - start_time)+ " seconds") start_time = time.time() TreeData=self.GenerateDictFromFields(LoadingTreeID,TreeData) logging.info("Convert to Dict="+str( time.time() - start_time)+ " seconds") start_time = time.time() self.ComputeFields(TreeData) logging.info("Compute Fields="+str( time.time() - start_time)+ " seconds") start_time = time.time() TableID=self.MapTreetoTableID(TreeData) logging.info("Get TableID="+str( time.time() - start_time)+ " seconds") start_time = time.time() self.PGDB.CreateNewTree(TableID,TreeData) logging.info("Insert to Database="+str( time.time() - start_time)+ " seconds") def IntersectTwoRect(self,RectA,RectB): ## Rect=[X1,X2,Y1,Y2] if (RectA[0] < RectB[1] and RectA[1] > RectB[0] and RectA[2] < RectB[3] and RectA[3] > RectB[2]): return True; else: return False; def MapTreetoTableID(self,TreeData): #self.SimulationBoxX=float(self.Options['RunningSettings:SimulationBoxX']) #self.SimulationBoxY=float(self.Options['RunningSettings:SimulationBoxX']) #self.BSPCellSize=float(self.Options['RunningSettings:BSPCellSize']) #self.CellsInX=int(math.ceil(self.SimulationBoxX/self.BSPCellSize)) #self.CellsInY=int(math.ceil(self.SimulationBoxY/self.BSPCellSize)) logging.info('Calculating Tree Bounding Box for '+ str(len(TreeData))+' Galaxy!') ## Get Tree Bounding Rectangle MinX=TreeData[0]['PosX'] MaxX=TreeData[0]['PosX'] MinY=TreeData[0]['PosY'] MaxY=TreeData[0]['PosY'] for TreeItem in TreeData: MinX=min(MinX,TreeItem['PosX']) MaxX=max(MaxX,TreeItem['PosX']) MinY=min(MinY,TreeItem['PosY']) MaxY=max(MaxY,TreeItem['PosY']) #logging.info(str(TreeItem['PosX'])+","+str(TreeItem['PosY'])+","+str(TreeItem['PosZ'])) Rect1=[MinX,MaxX,MinY,MaxY] logging.info('Tree Box'+ str(Rect1)) XLocation=-1 YLocation=-1 StepSize=self.BSPCellSize PossibleTables=[] if MaxX>self.SimulationBoxX or MaxY>self.SimulationBoxY: raise Exception("Error In Coordinate Values or in the simulation Box Size:("+str(MaxX)+","+str(MaxY)+") > ("+str(self.SimulationBoxX)+","+str(self.SimulationBoxY)) ### Intersection between two Rectangles ### http://silentmatt.com/rectangle-intersection/ for X in numpy.arange(0,self.SimulationBoxX,StepSize): XLocation=XLocation+1 YLocation=-1 for Y in numpy.arange(0,self.SimulationBoxY,StepSize): YLocation=YLocation+1 BX1=X; BX2=X+StepSize BY1=Y BY2=Y+StepSize Rect2=[BX1,BX2,BY1,BY2] if self.IntersectTwoRect(Rect1, Rect2)==True: GetIntersectionWithCurrentBoundingRect="INSERT INTO TreeMapping VALUES("+str(TreeData[0]['TreeID'])+","+str(XLocation)+","+str(YLocation)+"); " self.PGDB.DBConnection.ExecuteNoQuerySQLStatment(GetIntersectionWithCurrentBoundingRect) PTableID=int((XLocation*self.CellsInX)+YLocation) PossibleTables=numpy.hstack([PossibleTables,PTableID]) FinalTableID=-1 if len(PossibleTables)==1: FinalTableID=int(PossibleTables[0]) elif len(PossibleTables)<=10 and len(PossibleTables)>0: FinalTableID=int(PossibleTables[randrange(len(PossibleTables))]) else: FinalTableID=self.BigTableID#self.CellsInX*self.CellsInY logging.info("Final Table ID="+str(FinalTableID)) return FinalTableID
IntersectAustralia/asvo-tao
core/sageimport_mpi_HDF/SAGEReader.py
Python
gpl-3.0
12,776
[ "Galaxy" ]
fa48f024a1f18a29da72737d67bf4f0cb0daafffedd126cca9219a14ecb5202e
#!/usr/bin/env python # Author: Samuel Ponc\'e # Date: 30/04/2013 -- 11/09/2014 # Version: 1.3 # Script to compute the ZPR import sys import os import copy try: from rf_mods_seq import system except ImportError: import warnings warnings.warn("The system module is missing!") raise from rf_mods_seq import zpm import multiprocessing from datetime import datetime try: import numpy as N except ImportError: import warnings warnings.warn("The numpy module is missing!") raise from numpy import zeros try: import netCDF4 as nc except ImportError: import warnings warnings.warn("The netCDF4 module is missing!") raise start = datetime.now() print 'Start on %s/%s/%s at %sh%s ' %(start.day,start.month,start.year,start.hour,start.minute) ############# # Constants # ############# tol6 = 1E-6 tol8 = 1E-8 Ha2eV = 27.21138386 kb_HaK = 3.1668154267112283e-06 ###################################################################################### # Interaction with the user print """ ____ ____ _ _ | _ \| _ \ | |_ ___ _ __ ___ _ __ ___ _ __ __ _| |_ _ _ _ __ ___ | |_) | |_) |____| __/ _ \ '_ ` _ \| '_ \ / _ \ '__/ _` | __| | | | '__/ _ \ | __/| __/_____| || __/ | | | | | |_) | __/ | | (_| | |_| |_| | | | __/ |_| |_| \__\___|_| |_| |_| .__/ \___|_| \__,_|\__|\__,_|_| \___| |_| Version 1.3 """ print '\nThis script compute the static/dynamic zero-point motion \n\ and the temperature dependance of eigenenergies due to electron-phonon interaction.\n\ The static electronic lifetime can also be computed. \n\n\ WARNING: The first Q-point MUST be the Gamma point.\n' # Enter the number of cpu on which you want to multi-thread user_input = raw_input('Enter the number of cpu on which you want to multi-thread\n') nb_cpus = user_input try: nb_cpus = int(user_input) except ValueError: raise Exception('The value you enter is not an integer!') # Type of calculation the user want to perform user_input = raw_input('Define the type of calculation you want to perform. Type:\n\ 1 if you want to run a static AHC calculation\n \ 2 if you want to run a dynamic AHC calculation\n \ 3 if you want to run a static AHC calculation with control over active space\n\ Note that option 2 and 3 requires _FAN.nc files obtained through ABINIT option "ieig2rf 4\n') type = N.int(user_input) # Define the output file name user_input = raw_input('Enter name of the output file\n') output = user_input.strip() # Enter the value of the smearing parameter for dynamic AHC if (type == 2 or type == 3 ): user_input = raw_input('Enter value of the smearing parameter (in eV)\n') smearing = N.float(user_input) smearing = smearing/Ha2eV else: smearing = None # Temperature dependence analysis? user_input = raw_input('Do you want to compute the change of eigenergies with temperature? [y/n]\n') temperature =user_input.split()[0] if temperature == 'y': temperature = True user_input = raw_input('Introduce the starting temperature, max temperature and steps. e.g. 0 2000 100\n') temp_info = user_input.split() else: temperature = False temp_info = None # Broadening lifetime of the electron user_input = raw_input('Do you want to compute the lifetime of the electrons? [y/n]\n') tmp =user_input.split()[0] if tmp == 'y': lifetime = True else: lifetime = False # Get the nb of random Q-points from user user_input = raw_input('Enter the number of random Q-points you have\n') try: nbQ = int(user_input) except ValueError: raise Exception('The value you enter is not an integer!') # Get the path of the DDB files from user DDB_files = [] for ii in N.arange(nbQ): user_input = raw_input('Enter the name of the %s DDB file\n' %ii) if len(user_input.split()) != 1: raise Exception("You should provide only 1 file") else: # Append and TRIM the input string with STRIP DDB_files.append(user_input.strip(' \t\n\r')) # Test if the first file is at the Gamma point DDBtmp = system(directory='.',filename=DDB_files[0]) if N.allclose(DDBtmp.iqpt,[0.0,0.0,0.0]) == False: raise Exception('The first Q-point is not Gamma!') # Get the path of the eigq files from user eigq_files = [] for ii in N.arange(nbQ): user_input = raw_input('Enter the name of the %s eigq file\n' %ii) if len(user_input.split()) != 1: raise Exception("You should provide only 1 file") else: eigq_files.append(user_input.strip(' \t\n\r')) # Get the path of the EIGR2D files from user EIGR2D_files = [] for ii in N.arange(nbQ): user_input = raw_input('Enter the name of the %s EIGR2D file\n' %ii) if len(user_input.split()) != 1: raise Exception("You should provide only 1 file") else: EIGR2D_files.append(user_input.strip(' \t\n\r')) # Get the path of the EIGI2D files from user if lifetime: EIGI2D_files = [] for ii in N.arange(nbQ): user_input = raw_input('Enter the name of the %s EIGI2D file\n' %ii) if len(user_input.split()) != 1: raise Exception("You should provide only 1 file") else: EIGI2D_files.append(user_input.strip(' \t\n\r')) # Get the path of the FAN files from user if dynamical calculation if (type == 2 or type == 3): FAN_files = [] for ii in N.arange(nbQ): user_input = raw_input('Enter the name of the %s FAN file\n' %ii) if len(user_input.split()) != 1: raise Exception("You should provide only 1 file") else: FAN_files.append(user_input.strip(' \t\n\r')) # Take the EIG at Gamma user_input = raw_input('Enter the name of the unperturbed EIG.nc file at Gamma\n') if len(user_input.split()) != 1: raise Exception("You sould only provide 1 file") else: eig0 = system(directory='.',filename=user_input.strip(' \t\n\r')) # Read the EIGR2D file at Gamma and save it in ddw_save EIGR2D = system(directory='.',filename=EIGR2D_files[0]) ddw_save = zeros((EIGR2D.nkpt,EIGR2D.nband,3,EIGR2D.natom,3,EIGR2D.natom),dtype=complex) ddw_save = copy.deepcopy(EIGR2D.EIG2D) if (type == 2 or type == 3): FAN = system(directory='.',filename=FAN_files[0]) ddw_save2 = zeros((FAN.nkpt,FAN.nband,3,FAN.natom,3,FAN.natom,FAN.nband),dtype=complex) ddw_save2 = copy.deepcopy(FAN.FAN) if (type == 1): # We put dummy argument ddw_save2 = 0.0 # Find the degenerate eigenstates degen = zeros((EIGR2D.nkpt,EIGR2D.nband),dtype=int) for ikpt in N.arange(EIGR2D.nkpt): count = 0 for iband in N.arange(EIGR2D.nband): if iband != EIGR2D.nband-1: if N.allclose(eig0.EIG[0,ikpt,iband+1], eig0.EIG[0,ikpt,iband]): degen[ikpt,iband] = count else: degen[ikpt,iband] = count count += 1 continue else: if N.allclose(eig0.EIG[0,ikpt,iband-1], eig0.EIG[0,ikpt,iband]): degen[ikpt,iband] = count if iband != 0: if N.allclose(eig0.EIG[0,ikpt,iband-1], eig0.EIG[0,ikpt,iband]): degen[ikpt,iband] = count else: if N.allclose(eig0.EIG[0,ikpt,iband+1], eig0.EIG[0,ikpt,iband]): degen[ikpt,iband] = count # Create the random Q-integration (wtq=1/nqpt): if (EIGR2D.wtq == 0): wtq = N.ones((nbQ)) wtq = wtq*(1.0/nbQ) else: wtq = N.zeros((nbQ)) #DBSP #wtq = N.ones((nbQ)) #END nbqpt = N.arange(nbQ) # Compute phonon freq. and eigenvector for each Q-point # from each DDB (1 qpt per DDB file) vkpt = EIGR2D.nkpt vband = EIGR2D.nband tkpt = zeros((EIGR2D.nkpt,3)) tkpt = EIGR2D.kpt[:,:] eig0_pass = copy.deepcopy(eig0.EIG) if temperature: temp_info = N.arange(N.float(temp_info[0]),N.float(temp_info[1]),N.float(temp_info[2])) if (type == 1): total = zpm(zip(nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files),ddw_save,ddw_save2,nb_cpus,type,temperature,\ temp_info,smearing,eig0_pass,degen,lifetime=False) if lifetime: print "Now compute broadening ..." broadening = zpm(zip(nbqpt,wtq,eigq_files,DDB_files,EIGI2D_files),ddw_save,ddw_save2,nb_cpus,type,temperature,\ temp_info,smearing,eig0_pass,degen,lifetime) if (type == 2): if ((FAN.nkpt*FAN.nband*3*FAN.natom*3*FAN.natom*FAN.nband)*8.0)/(1024**2) < 1000: # If FAN file is smaller than 1000MB total = zpm(zip(nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files,FAN_files),ddw_save,ddw_save2,nb_cpus,type,temperature,\ temp_info,smearing,eig0_pass,degen,lifetime=False) else: # Use sequential version without multiprocess threading ==> otherwise it breaks down total = zpm(zip(nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files,FAN_files),ddw_save,ddw_save2,1,type,temperature,\ temp_info,smearing,eig0_pass,degen,lifetime=False) if lifetime: #raise Exception("Dynamical lifetime is not yet implemented.") print "WARNING:" print "Dynamical lifetime is not yet implemented...proceed with static lifetime" print "Now compute broadening ..." broadening = zpm(zip(nbqpt,wtq,eigq_files,DDB_files,EIGI2D_files),ddw_save,ddw_save2,nb_cpus,type,temperature,\ temp_info,smearing,eig0_pass,degen,lifetime) if (type == 3): if ((FAN.nkpt*FAN.nband*3*FAN.natom*3*FAN.natom*FAN.nband)*8.0)/(1024**2) < 1000: # If FAN file is smaller than 1000MB total = zpm(zip(nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files,FAN_files),ddw_save,ddw_save2,nb_cpus,type,temperature,\ temp_info,smearing,eig0_pass,degen,lifetime=False) else: # Use sequential version without multiprocess threading ==> otherwise it breaks down total = zpm(zip(nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files,FAN_files),ddw_save,ddw_save2,1,type,temperature,\ temp_info,smearing,eig0_pass,degen,lifetime=False) if lifetime: print "Now compute broadening ..." broadening = zpm(zip(nbqpt,wtq,eigq_files,DDB_files,EIGI2D_files),ddw_save,ddw_save2,nb_cpus,type,temperature,\ temp_info,smearing,eig0_pass,degen,lifetime) total_corr = total.total_corr if lifetime: brd_total = broadening.broadening if (EIGR2D.wtq != 0): total_wtq = total.total_wtq print "Total weigth is ",total_wtq if (total_wtq < 0.9 or total_wtq > 1.1): raise Exception("The total weigth is not equal to 1.0. Check that you provide all the q-points.") # Report wall time (before writing final result to be able to include it) end = datetime.now() print 'End on %s/%s/%s at %s h %s ' %(end.day,end.month,end.year,end.hour,end.minute) runtime = end - start print "Runtime: %s seconds (or %s minutes)" %(runtime.seconds,float(runtime.seconds)/60.0) if temperature: if lifetime: # Write on a NC files with etsf-io name convention ncfile = nc.Dataset(str(output)+'_EP.nc','w') # Read dim from first EIGR2D file root = nc.Dataset(EIGR2D_files[0],'r') # Determine nsppol from reading occ nsppol = len(root.variables['occupations'][:,0,0]) if nsppol > 1: print "WARNING: nsppol > 1 has not been tested." mband = len(root.dimensions['product_mband_nsppol'])/nsppol # Create dimension ncfile.createDimension('number_of_atoms',len(root.dimensions['number_of_atoms'])) ncfile.createDimension('number_of_kpoints',len(root.dimensions['number_of_kpoints'])) ncfile.createDimension('product_mband_nsppol',len(root.dimensions['product_mband_nsppol'])) ncfile.createDimension('cartesian',3) ncfile.createDimension('cplex',2) ncfile.createDimension('number_of_qpoints',nbQ) ncfile.createDimension('number_of_spins',len(root.dimensions['number_of_spins'])) ncfile.createDimension('max_number_of_states',mband) ncfile.createDimension('number_of_temperature',len(temp_info)) # Create variable data = ncfile.createVariable('reduced_coordinates_of_kpoints','d',('number_of_kpoints','cartesian')) data[:,:] = root.variables['reduced_coordinates_of_kpoints'][:,:] data = ncfile.createVariable('eigenvalues','d',('number_of_spins','number_of_kpoints','max_number_of_states')) data[:,:,:] = root.variables['eigenvalues'][:,:,:] data = ncfile.createVariable('occupations','i',('number_of_spins','number_of_kpoints','max_number_of_states')) data[:,:,:] = root.variables['occupations'][:,:,:] data = ncfile.createVariable('primitive_vectors','d',('cartesian','cartesian')) data[:,:] = root.variables['primitive_vectors'][:,:] data = ncfile.createVariable('temperature','d',('number_of_temperature')) data[:] = temp_info data = ncfile.createVariable('zero_point_motion','d',('number_of_spins','number_of_temperature','number_of_kpoints',\ 'max_number_of_states','cplex')) data[0,:,:,:,0] = total_corr[0,:,:,:].real data[0,:,:,:,1] = brd_total[:,:,:].real # Close the file ncfile.close() else: # Write on a NC files with etsf-io name convention ncfile = nc.Dataset(str(output)+'_EP.nc','w') # Read dim from first EIGR2D file root = nc.Dataset(EIGR2D_files[0],'r') # Determine nsppol from reading occ nsppol = len(root.variables['occupations'][:,0,0]) if nsppol > 1: print "WARNING: nsppol > 1 has not been tested." mband = len(root.dimensions['product_mband_nsppol'])/nsppol # Create dimension ncfile.createDimension('number_of_atoms',len(root.dimensions['number_of_atoms'])) ncfile.createDimension('number_of_kpoints',len(root.dimensions['number_of_kpoints'])) ncfile.createDimension('product_mband_nsppol',len(root.dimensions['product_mband_nsppol'])) ncfile.createDimension('cartesian',3) ncfile.createDimension('cplex',2) ncfile.createDimension('number_of_qpoints',nbQ) ncfile.createDimension('number_of_spins',len(root.dimensions['number_of_spins'])) ncfile.createDimension('max_number_of_states',mband) ncfile.createDimension('number_of_temperature',len(temp_info)) # Create variable data = ncfile.createVariable('reduced_coordinates_of_kpoints','d',('number_of_kpoints','cartesian')) data[:,:] = root.variables['reduced_coordinates_of_kpoints'][:,:] data = ncfile.createVariable('eigenvalues','d',('number_of_spins','number_of_kpoints','max_number_of_states')) data[:,:,:] = root.variables['eigenvalues'][:,:,:] data = ncfile.createVariable('occupations','i',('number_of_spins','number_of_kpoints','max_number_of_states')) data[:,:,:] = root.variables['occupations'][:,:,:] data = ncfile.createVariable('primitive_vectors','d',('cartesian','cartesian')) data[:,:] = root.variables['primitive_vectors'][:,:] data = ncfile.createVariable('temperature','d',('number_of_temperature')) data[:] = temp_info data = ncfile.createVariable('zero_point_motion','d',('number_of_spins','number_of_temperature','number_of_kpoints',\ 'max_number_of_states','cplex')) data[0,:,:,:,0] = total_corr[0,:,:,:].real data[0,:,:,:,1] = 0.0 # Close the file ncfile.close() else: if lifetime: # Write on a NC files with etsf-io name convention ncfile = nc.Dataset(str(output)+'_EP.nc','w') # Read dim from first EIGR2D file root = nc.Dataset(EIGR2D_files[0],'r') # Determine nsppol from reading occ nsppol = len(root.variables['occupations'][:,0,0]) if nsppol > 1: print "WARNING: nsppol > 1 has not been tested." mband = len(root.dimensions['product_mband_nsppol'])/nsppol # Create dimension ncfile.createDimension('number_of_atoms',len(root.dimensions['number_of_atoms'])) ncfile.createDimension('number_of_kpoints',len(root.dimensions['number_of_kpoints'])) ncfile.createDimension('product_mband_nsppol',len(root.dimensions['product_mband_nsppol'])) ncfile.createDimension('cartesian',3) ncfile.createDimension('cplex',2) ncfile.createDimension('number_of_qpoints',nbQ) ncfile.createDimension('number_of_spins',len(root.dimensions['number_of_spins'])) ncfile.createDimension('max_number_of_states',mband) ncfile.createDimension('number_of_temperature',1) # Create variable data = ncfile.createVariable('reduced_coordinates_of_kpoints','d',('number_of_kpoints','cartesian')) data[:,:] = root.variables['reduced_coordinates_of_kpoints'][:,:] data = ncfile.createVariable('eigenvalues','d',('number_of_spins','number_of_kpoints','max_number_of_states')) data[:,:,:] = root.variables['eigenvalues'][:,:,:] data = ncfile.createVariable('occupations','i',('number_of_spins','number_of_kpoints','max_number_of_states')) data[:,:,:] = root.variables['occupations'][:,:,:] data = ncfile.createVariable('primitive_vectors','d',('cartesian','cartesian')) data[:,:] = root.variables['primitive_vectors'][:,:] data = ncfile.createVariable('temperature','d',('number_of_temperature')) data[:] = 0.0 data = ncfile.createVariable('zero_point_motion','d',('number_of_spins','number_of_temperature','number_of_kpoints',\ 'max_number_of_states','cplex')) data[0,0,:,:,0] = total_corr[0,:,:].real data[0,0,:,:,1] = brd_total[:,:].real # Close the file ncfile.close() else: # Write on a NC files with etsf-io name convention ncfile = nc.Dataset(str(output)+'_EP.nc','w') # Read dim from first EIGR2D file root = nc.Dataset(EIGR2D_files[0],'r') # Determine nsppol from reading occ nsppol = len(root.variables['occupations'][:,0,0]) if nsppol > 1: print "WARNING: nsppol > 1 has not been tested." mband = len(root.dimensions['product_mband_nsppol'])/nsppol # Create dimension ncfile.createDimension('number_of_atoms',len(root.dimensions['number_of_atoms'])) ncfile.createDimension('number_of_kpoints',len(root.dimensions['number_of_kpoints'])) ncfile.createDimension('product_mband_nsppol',len(root.dimensions['product_mband_nsppol'])) ncfile.createDimension('cartesian',3) ncfile.createDimension('cplex',2) ncfile.createDimension('number_of_qpoints',nbQ) ncfile.createDimension('number_of_spins',len(root.dimensions['number_of_spins'])) ncfile.createDimension('max_number_of_states',mband) ncfile.createDimension('number_of_temperature',1) # Create variable data = ncfile.createVariable('reduced_coordinates_of_kpoints','d',('number_of_kpoints','cartesian')) data[:,:] = root.variables['reduced_coordinates_of_kpoints'][:,:] data = ncfile.createVariable('eigenvalues','d',('number_of_spins','number_of_kpoints','max_number_of_states')) data[:,:,:] = root.variables['eigenvalues'][:,:,:] data = ncfile.createVariable('occupations','i',('number_of_spins','number_of_kpoints','max_number_of_states')) data[:,:,:] = root.variables['occupations'][:,:,:] data = ncfile.createVariable('primitive_vectors','d',('cartesian','cartesian')) data[:,:] = root.variables['primitive_vectors'][:,:] data = ncfile.createVariable('temperature','d',('number_of_temperature')) data[:] = 0.0 data = ncfile.createVariable('zero_point_motion','d',('number_of_spins','number_of_temperature','number_of_kpoints',\ 'max_number_of_states','cplex')) data[0,0,:,:,0] = total_corr[0,:,:].real data[0,0,:,:,1] = 0.0 # Close the file ncfile.close() # Write the results into the output file if temperature: with open(str(output)+".txt","w") as O: O.write("Total correction of the ZPM (eV) for "+str(nbQ)+" Q points\n") for ikpt in N.arange(vkpt): O.write('Kpt: '+str(tkpt[ikpt,:])+"\n") j = 1 for ii in (total_corr[0,0,ikpt,:].real*Ha2eV): # Create a new line every 6 values if (j%6 == 0 and j !=0): O.write(str(ii)+'\n') j += 1 elif j == vband: O.write(str(ii)+'\n') else: O.write(str(ii)+' ') j += 1 O.write("Temperature dependence at Gamma\n") for iband in N.arange(vband): O.write('Band: '+str(iband)+"\n") tt = 0 for T in temp_info: O.write(str(T)+" "+str(total_corr[0,tt,0,iband].real*Ha2eV)+"\n") tt += 1 O.write("Fan/DDW contribution at Gamma:\n") for iband in N.arange(vband): O.write('Band: '+str(iband)+" FAN: "+str(total_corr[1,0,0,iband].real*Ha2eV)+"\n") O.write(' '+ " DDW: "+str(-total_corr[2,0,0,iband].real*Ha2eV)+"\n") O.write(' '+ " TOTAL: "+str(total_corr[0,0,0,iband].real*Ha2eV)+"\n") O.write("Runtime: "+str(runtime.seconds)+' seconds (or '+str(float(runtime.seconds)/60.0)+' minutes)') if lifetime: with open(str(output)+"_BRD.txt","w") as O: O.write("Total correction of the ZPM (eV) for "+str(nbQ)+" Q points\n") for ikpt in N.arange(vkpt): O.write('Kpt: '+str(tkpt[ikpt,:])+"\n") j = 1 for ii in (brd_total[0,ikpt,:].real*Ha2eV): # Create a new line every 6 values if (j%6 == 0 and j !=0): O.write(str(ii)+'\n') j += 1 elif j == vband: O.write(str(ii)+'\n') else: O.write(str(ii)+' ') j += 1 O.write("Temperature dependence at Gamma\n") for iband in N.arange(vband): O.write('Band: '+str(iband)+"\n") tt = 0 for T in temp_info: O.write(str(T)+" "+str(brd_total[tt,0,iband].real*Ha2eV)+"\n") tt += 1 O.write("Runtime: "+str(runtime.seconds)+' seconds (or '+str(float(runtime.seconds)/60.0)+' minutes)') else: with open(str(output)+".txt","w") as O: O.write("Total correction of the ZPM (eV) for "+str(nbQ)+" Q points\n") for ikpt in N.arange(vkpt): O.write('Kpt: '+str(tkpt[ikpt,:])+"\n") j = 1 for ii in (total_corr[0,ikpt,:].real*Ha2eV): # Create a new line every 6 values if (j%6 == 0 and j !=0): O.write(str(ii)+'\n') j += 1 elif j == vband: O.write(str(ii)+'\n') else: O.write(str(ii)+' ') j += 1 O.write("Fan/DDW contribution at Gamma:\n") for iband in N.arange(vband): O.write('Band: '+str(iband)+" FAN: "+str(total_corr[1,0,iband].real*Ha2eV)+"\n") O.write(' '+ " DDW: "+str(-total_corr[2,0,iband].real*Ha2eV)+"\n") O.write(' '+ " TOTAL: "+str(total_corr[0,0,iband].real*Ha2eV)+"\n") O.write("Runtime: "+str(runtime.seconds)+' seconds (or '+str(float(runtime.seconds)/60.0)+' minutes)') if lifetime: with open(str(output)+"_BRD.txt","w") as O: O.write("Total correction of the ZPM (eV) for "+str(nbQ)+" Q points\n") for ikpt in N.arange(vkpt): O.write('Kpt: '+str(tkpt[ikpt,:])+"\n") j = 1 for ii in (brd_total[ikpt,:].real*Ha2eV): # Create a new line every 6 values if (j%6 == 0 and j !=0): O.write(str(ii)+'\n') j += 1 elif j == vband: O.write(str(ii)+'\n') else: O.write(str(ii)+' ') j += 1 O.write("Runtime: "+str(runtime.seconds)+' seconds (or '+str(float(runtime.seconds)/60.0)+' minutes)')
jmbeuken/abinit
scripts/deprecated/temperature_seq.py
Python
gpl-3.0
22,982
[ "ABINIT" ]
d97ee4e8c22064e830f689b33991ee448e5fd4621b9248ab97a5aeb92ebe919a
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for module colab_evaluation.py.""" import gzip import os import time import math from absl import flags from absl.testing import absltest from absl.testing import parameterized import numpy as np import pandas as pd import colab_evaluation import inference import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS class ColabEvaluationTest(parameterized.TestCase): def _generate_random_inferences(self, n): serialized_inferences = [] accessions_list = [] activations_list = [] for _ in range(n): accession = f"ACCESSION_{time.time()}" activations = np.random.rand(100) accessions_list.append(accession) activations_list.append(activations) serialized_inferences.append( inference.serialize_inference_result(accession, activations)) return serialized_inferences, accessions_list, activations_list @parameterized.parameters([{'batch_size': 1}, {'batch_size': 9}]) def test_batched_inferences_from_dir(self, batch_size, num_examples=100): # Create input inference results. serialized_inferences, accessions_list, activations_list = self._generate_random_inferences( num_examples) shard_1_contents = b"\n".join(serialized_inferences[0:60]) shard_2_contents = b"\n".join(serialized_inferences[60:]) shard_dir = self.create_tempdir() shard_1_filename = shard_dir.create_file('shard_1').full_path shard_2_filename = shard_dir.create_file('shard_2').full_path # Write contents to a gzipped file. with tf.io.gfile.GFile(shard_1_filename, 'wb') as f: with gzip.GzipFile(fileobj=f, mode='wb') as f_gz: f_gz.write(shard_1_contents) with tf.io.gfile.GFile(shard_2_filename, 'wb') as f: with gzip.GzipFile(fileobj=f, mode='wb') as f_gz: f_gz.write(shard_2_contents) # Read these shards. iterator = colab_evaluation.batched_inferences_from_dir( shard_dir.full_path, batch_size=batch_size) actual = list(iterator) # Check output. self.assertEqual(len(actual), math.ceil(num_examples / batch_size)) self.assertEqual(actual[0][0][0], accessions_list[0]) if batch_size > 1: self.assertEqual(actual[1][0][1], accessions_list[batch_size + 1]) np.testing.assert_equal(actual[0][1][0], activations_list[0]) if batch_size > 1: np.testing.assert_equal(actual[1][1][1], activations_list[batch_size + 1]) def test_make_tidy_df_from_seq_names_and_prediction_array(self): vocab = ["ENTRY0", "ENTRY1", "ENTRY2"] sequence_names = ['SEQ0', 'SEQ1'] predictions_array = np.array([[0.1, 0.9, 0.5], [1, 1, 1]]) min_decision_threshold = 0.4 actual_df = colab_evaluation._make_tidy_df_from_seq_names_and_prediction_array( sequence_names, predictions_array, vocab, min_decision_threshold=min_decision_threshold) expected_df = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'], 'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'], 'value': [0.9, 0.5, 1.0, 1.0, 1.0] }) pd.testing.assert_frame_equal(actual_df, expected_df) def test_make_tidy_df_from_ground_truth(self): input_df = pd.DataFrame({ 'sequence_name': ['SEQ0', 'SEQ1', 'SEQ2', 'SEQ3'], 'true_label': [['ENTRY1'], ['ENTRY1', 'ENTRY2'], [], ['ENTRY6']] }) actual_df = colab_evaluation.make_tidy_df_from_ground_truth(input_df) expected_df = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'], 'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'], 'gt': [True, True, True, True] }) pd.testing.assert_frame_equal(actual_df, expected_df) def test_merge_predictions_and_ground_truth(self): pred = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'], 'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'], 'value': [0.9, 0.5, 1.0, 1.0, 1.0] }) gt = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'], 'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'], 'gt': [True, True, True, True] }) actual_df = colab_evaluation.merge_predictions_and_ground_truth( pred, gt) expected_df = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1', 'SEQ3'], 'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2', 'ENTRY6'], 'value': [0.9, 0.5, 1.0, 1.0, 1.0, False], 'gt': [True, False, False, True, True, True] }) pd.testing.assert_frame_equal(actual_df, expected_df) def test_get_pr_curve_df(self): pred = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'], 'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'], 'value': [0.9, 0.5, 1.0, 1.0, 1.0] }) gt = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'], 'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'], 'gt': [True, True, True, True] }) pr_curve = colab_evaluation.get_pr_curve_df(pred, gt, filtered=False) np.testing.assert_almost_equal(pr_curve['recall'], np.array([1, 0.75, 0.75, .5])) np.testing.assert_almost_equal( pr_curve['precision'], np.array([0.6666667, 0.6, 0.75, 0.6666667])) np.testing.assert_almost_equal( pr_curve['f1'], np.array([0.8, 0.6666667, 0.75, 0.5714286])) def test_assign_tp_fp_fn(self): pred = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'], 'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'], 'value': [0.9, 0.5, 1.0, 1.0, 1.0] }) gt = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'], 'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'], 'gt': [True, True, True, True] }) tp_fp_fn = colab_evaluation.assign_tp_fp_fn(pred, gt, threshold=0.5) expected = pd.DataFrame({ 'tp': [True, False, False, True, True, False], 'fp': [False, False, True, False, False, False], 'fn': [False, False, False, False, False, True] }) actual = tp_fp_fn.loc[:, ["tp", "fp", "fn"]] pd.testing.assert_frame_equal(expected, actual) def test_apply_threshold_and_return_stats(self): pred = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'], 'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'], 'value': [0.9, 0.5, 1.0, 1.0, 1.0] }) gt = pd.DataFrame({ 'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'], 'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'], 'gt': [True, True, True, True] }) actual = colab_evaluation.apply_threshold_and_return_stats(pred,gt,grouping = {"ENTRY0":'A',"ENTRY1":'A',"ENTRY2":'A',"ENTRY6":'A'}) expected = pd.DataFrame({ 'group': ['A'], 'tp': [3.0], 'fp': [1.0], 'fn': [1.0], 'precision': [0.75], 'recall': [0.75], 'f1': [0.75], 'count': [4.0], 'proportion': [1.0], 'proportion_text': ['100.0%'], 'threshold': [0.5] }) pd.testing.assert_frame_equal(actual,expected, check_dtype=False) def test_read_blast_table(self): actual = colab_evaluation.read_blast_table("testdata/blast.tsv") expected = pd.DataFrame({'up_id': ['ABC'], 'target': ['DEF'], 'pc_identity': [50], 'alignment_length': [100], 'bit_score': [500]}) pd.testing.assert_frame_equal(actual, expected) if __name__ == '__main__': absltest.main()
google-research/proteinfer
colab_evaluation_test.py
Python
apache-2.0
8,978
[ "BLAST" ]
8615f12979c2bc1ef9ca7b999b2f065477846825d55b4197884674e1739fa4ad
"""This demo program uses the interface to SNES solver for variational inequalities to solve a contact mechanics problems in FEniCS. The example considers a heavy hyperelastic circle in a box of the same size""" # Copyright (C) 2012 Corrado Maurini # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see <http://www.gnu.org/licenses/>. # # Modified by Corrado Maurini 2013 from dolfin import * import matplotlib.pyplot as plt # This demo requires PETSc if not has_petsc(): print("DOLFIN must be compiled with PETSc to run this demo.") exit(0) # Create mesh mesh = Mesh("../circle_yplane.xml.gz") V = VectorFunctionSpace(mesh, "Lagrange", 1) # Define functions du = TrialFunction(V) # Incremental displacement v = TestFunction(V) # Test function u = Function(V) # Displacement from previous iteration B = Constant((0.0, -0.05)) # Body force per unit volume # Kinematics I = Identity(len(u)) # Identity tensor F = I + grad(u) # Deformation gradient C = F.T*F # Right Cauchy-Green tensor # Invariants of deformation tensors Ic = tr(C) J = det(F) # Elasticity parameters E, nu = 10.0, 0.3 mu, lmbda = Constant(E/(2*(1 + nu))), Constant(E*nu/((1 + nu)*(1 - 2*nu))) # Stored strain energy density (compressible neo-Hookean model) psi = (mu/2)*(Ic - 2) - mu*ln(J) + (lmbda/2)*(ln(J))**2 # Total potential energy Pi = psi*dx - dot(B, u)*dx # Compute first variation of Pi (directional derivative about u in the # direction of v) F = derivative(Pi, u, v) # Compute Jacobian of F J = derivative(F, u, du) # Symmetry condition (to block rigid body rotations) tol = mesh.hmin() def symmetry_line(x): return abs(x[0]) < DOLFIN_EPS bc = DirichletBC(V.sub(0), 0., symmetry_line, method="pointwise") # The displacement u must be such that the current configuration x+u # remains in the box [xmin,xmax] x [umin,ymax] constraint_u = Expression(("xmax - x[0]","ymax - x[1]"), xmax=1.0+DOLFIN_EPS, ymax=1.0, degree=1) constraint_l = Expression(("xmin - x[0]","ymin - x[1]"), xmin=-1.0-DOLFIN_EPS, ymin=-1.0, degree=1) umin = interpolate(constraint_l, V) umax = interpolate(constraint_u, V) # Define the solver parameters snes_solver_parameters = {"nonlinear_solver": "snes", "snes_solver": {"linear_solver": "lu", "maximum_iterations": 20, "report": True, "error_on_nonconvergence": False}} # Set up the non-linear problem problem = NonlinearVariationalProblem(F, u, bc, J=J) problem.set_bounds(umin, umax) # Set up the non-linear solver solver = NonlinearVariationalSolver(problem) solver.parameters.update(snes_solver_parameters) info(solver.parameters, True) # Solve the problem (iter, converged) = solver.solve() # Check for convergence if not converged: warning("This demo is a complex nonlinear problem. Convergence is not guaranteed when modifying some parameters or using PETSC 3.2.") # Save solution in VTK format file = File("displacement.pvd") file << u # plot the current configuration plot(u, mode="displacement", wireframe=True, title="Displacement field") plt.show()
FEniCS/dolfin
demo/undocumented/contact-vi-snes/python/demo_contact-vi-snes.py
Python
lgpl-3.0
3,873
[ "VTK" ]
06952a6467cbb39d47ef3cc135f7885bda5bf4fb302adac1c7f9c2826cdf54dd
# -*- coding: utf-8 -*- # HORTON: Helpful Open-source Research TOol for N-fermion systems. # Copyright (C) 2011-2017 The HORTON Development Team # # This file is part of HORTON. # # HORTON is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # HORTON is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/> # # -- """CP2K atomic wavefunctions""" import numpy as np from horton.gbasis.iobas import str_to_shell_types from horton.gbasis.cext import GOBasis, fac2 from horton.meanfield.orbitals import Orbitals __all__ = ['load_atom_cp2k'] def _get_cp2k_norm_corrections(l, alphas): """Compute the corrections for the normalization of the basis functions. This correction is needed because the CP2K atom code works with non-normalized basis functions. HORTON assumes Gaussian primitives are always normalized. Parameters ---------- l : int The angular momentum of the (pure) basis function. (s=0, p=1, ...) alphas : float or np.ndarray The exponent or exponents of the Gaussian primitives for which the correction is to be computed. Returns ------- corrections : float or np.ndarray The scale factor for the expansion coefficients of the wavefunction in terms of primitive Gaussians. The inverse of this correction can be applied to the contraction coefficients. """ expzet = 0.25*(2*l + 3) prefac = np.sqrt(np.sqrt(np.pi)/2.0**(l + 2)*fac2(2*l + 1)) zeta = 2.0*alphas return zeta**expzet/prefac def _read_cp2k_contracted_obasis(f): """Read a contracted basis set from an open CP2K ATOM output file. Parameters ---------- f : file An open readable file object. Returns ------- obasis : GOBasis The orbital basis read from the file. """ # Load the relevant data from the file basis_desc = [] for line in f: if line.startswith(' *******************'): break elif line[3:12] == 'Functions': shell_type = str_to_shell_types(line[1:2], pure=True)[0] a = [] # exponents (alpha) c = [] # contraction coefficients basis_desc.append((shell_type, a, c)) else: values = [float(w) for w in line.split()] a.append(values[0]) # one exponent per line c.append(values[1:]) # many contraction coefficients per line # Convert the basis into HORTON format shell_map = [] shell_types = [] nprims = [] alphas = [] con_coeffs = [] for shell_type, a, c in basis_desc: # get correction to contraction coefficients. CP2K uses different normalization # conventions. corrections = _get_cp2k_norm_corrections(abs(shell_type), np.array(a)) c = np.array(c)/corrections.reshape(-1, 1) # fill in arrays for col in c.T: shell_map.append(0) shell_types.append(shell_type) nprims.append(len(col)) alphas.extend(a) con_coeffs.extend(col) # Create the basis object coordinates = np.zeros((1, 3)) shell_map = np.array(shell_map) nprims = np.array(nprims) shell_types = np.array(shell_types) alphas = np.array(alphas) con_coeffs = np.array(con_coeffs) obasis = GOBasis(coordinates, shell_map, nprims, shell_types, alphas, con_coeffs) return obasis def _read_cp2k_uncontracted_obasis(f): """Read an uncontracted basis set from an open CP2K ATOM output file. Parameters ---------- f : file An open readable file object. Returns ------- obasis : GOBasis The orbital basis read from the file. """ # Load the relevant data from the file basis_desc = [] shell_type = None for line in f: if line.startswith(' *******************'): break elif line[3:13] == 'Exponents:': shell_type = str_to_shell_types(line[1:2], pure=True)[0] words = line.split() if len(words) >= 2: # read the exponent alpha = float(words[-1]) basis_desc.append((shell_type, alpha)) # Convert the basis into HORTON format shell_map = [] shell_types = [] nprims = [] alphas = [] con_coeffs = [] # fill in arrays for shell_type, alpha in basis_desc: correction = _get_cp2k_norm_corrections(abs(shell_type), alpha) shell_map.append(0) shell_types.append(shell_type) nprims.append(1) alphas.append(alpha) con_coeffs.append(1.0 / correction) # Create the basis object centers = np.zeros((1, 3)) shell_map = np.array(shell_map) nprims = np.array(nprims) shell_types = np.array(shell_types) alphas = np.array(alphas) con_coeffs = np.array(con_coeffs) obasis = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs) return obasis def _read_cp2k_obasis(f): """Read a basis set from an open CP2K ATOM output file.""" f.next() # Skip empty line line = f.next() # Check for contracted versus uncontracted if line == ' ********************** Contracted Gaussian Type Orbitals '\ '**********************\n': return _read_cp2k_contracted_obasis(f) elif line == ' ********************* Uncontracted Gaussian Type Orbitals '\ '*********************\n': return _read_cp2k_uncontracted_obasis(f) else: raise IOError('Could not find basis set in CP2K ATOM output.') def _read_cp2k_occupations_energies(f, restricted): """Read orbital occupation numbers and energies from an open CP2K ATOM output file. Parameters ---------- f : file An open readable file object. restricted : bool Is wavefunction restricted or unrestricted? Returns ------- oe_alpha, oe_beta : list A list with orbital properties. Each element is a tuple with the following info: (angular_momentum l, spin component: 'alpha' or 'beta', occupation number, orbital energy). """ oe_alpha = [] oe_beta = [] empty = 0 while empty < 2: line = f.next() words = line.split() if len(words) == 0: empty += 1 continue empty = 0 s = int(words[0]) l = int(words[2 - restricted]) occ = float(words[3 - restricted]) ener = float(words[4 - restricted]) if restricted or words[1] == 'alpha': oe_alpha.append((l, s, occ, ener)) else: oe_beta.append((l, s, occ, ener)) return oe_alpha, oe_beta def _read_cp2k_orbital_coeffs(f, oe): """Read the expansion coefficients of the orbital from an open CP2K ATOM output. Parameters ---------- f : file An open readable file object. oe : list The orbital occupation numbers and energies read with ``_read_cp2k_occupations_energies``. Returns ------- result : dict Key is an (l, s) pair and value is an array with orbital coefficients. """ coeffs = {} f.next() while len(coeffs) < len(oe): line = f.next() assert line.startswith(" ORBITAL L =") words = line.split() l = int(words[3]) s = int(words[6]) c = [] while True: line = f.next() if len(line.strip()) == 0: break c.append(float(line)) coeffs[(l, s)] = np.array(c) return coeffs def _get_norb_nel(oe): """Return number of orbitals and electrons. Parameters ---------- oe : list The orbital occupation numbers and energies read with ``_read_cp2k_occupations_energies``. """ norb = 0 nel = 0 for row in oe: norb += 2*row[0] + 1 nel += row[2] return norb, nel def _fill_orbitals(orb, oe, coeffs, shell_types, restricted): """Fill in orbital coefficients, energies and occupation numbers in ``orb``. Parameters ---------- orb : Orbitals An object to represent the orbitals oe : list The orbital occupation numbers and energies read with ``_read_cp2k_occupations_energies``. coeffs : dict The orbital coefficients read with ``_read_cp2k_orbital_coeffs``. shell_types : np.ndarray The array with shell types of the GOBasis instance. restricted : bool Is wavefunction restricted or unrestricted? """ # Find the offsets for each angular momentum offset = 0 offsets = [] ls = abs(shell_types) for l in sorted(set(ls)): offsets.append(offset) offset += (2*l + 1)*(l == ls).sum() del offset # Fill in the coefficients iorb = 0 for l, s, occ, ener in oe: cs = coeffs.get((l, s)) stride = 2*l + 1 for m in xrange(-l, l+1): im = m + l orb.energies[iorb] = ener orb.occupations[iorb] = occ/float((restricted + 1)*(2*l + 1)) for ic in xrange(len(cs)): orb.coeffs[offsets[l] + stride*ic + im, iorb] = cs[ic] iorb += 1 def load_atom_cp2k(filename): """Load data from a CP2K ATOM computation. Parameters --------- filename : str The name of the cp2k out file Returns ------- results : dict Contains: ``obasis``, ``orb_alpha``, ``coordinates``, ``numbers``, ``energy``, ``pseudo_numbers``. May contain: ``orb_beta``. Notes ----- This function assumes that the following subsections are present in the CP2K ATOM input file, in the section ``ATOM%PRINT``: .. code-block:: text &PRINT &POTENTIAL &END POTENTIAL &BASIS_SET &END BASIS_SET &ORBITALS &END ORBITALS &END PRINT """ with open(filename) as f: # Find the element number number = None for line in f: if line.startswith(' Atomic Energy Calculation'): number = int(line[-5:-1]) break if number is None: raise IOError('Could not find atomic number in CP2K ATOM output: %s.' % filename) # Go to the all-electron basis set and read it. for line in f: if line.startswith(' All Electron Basis'): break ae_obasis = _read_cp2k_obasis(f) # Go to the pseudo basis set and read it. for line in f: if line.startswith(' Pseudopotential Basis'): break pp_obasis = _read_cp2k_obasis(f) # Search for (un)restricted restricted = None for line in f: if line.startswith(' METHOD |'): if 'U' in line: restricted = False break elif 'R' in line: restricted = True break # Search for the core charge (pseudo number) pseudo_number = None for line in f: if line.startswith(' Core Charge'): pseudo_number = float(line[70:]) assert pseudo_number == int(pseudo_number) break elif line.startswith(' Electronic structure'): pseudo_number = float(number) break if pseudo_number is None: raise IOError('Could not find effective core charge in CP2K ATOM output:' ' %s' % filename) # Select the correct basis if pseudo_number == number: obasis = ae_obasis else: obasis = pp_obasis # Search for energy for line in f: if line.startswith(' Energy components [Hartree] Total Energy ::'): energy = float(line[60:]) break # Read orbital energies and occupations for line in f: if line.startswith(' Orbital energies'): break f.next() oe_alpha, oe_beta = _read_cp2k_occupations_energies(f, restricted) # Read orbital expansion coefficients line = f.next() if (line != " Atomic orbital expansion coefficients [Alpha]\n") and \ (line != " Atomic orbital expansion coefficients []\n"): raise IOError('Could not find orbital coefficients in CP2K ATOM output: ' '%s' % filename) coeffs_alpha = _read_cp2k_orbital_coeffs(f, oe_alpha) if not restricted: line = f.next() if line != " Atomic orbital expansion coefficients [Beta]\n": raise IOError('Could not find beta orbital coefficient in CP2K ATOM ' 'output: %s' % filename) coeffs_beta = _read_cp2k_orbital_coeffs(f, oe_beta) # Turn orbital data into a HORTON orbital expansions if restricted: norb, nel = _get_norb_nel(oe_alpha) assert nel % 2 == 0 orb_alpha = Orbitals(obasis.nbasis, norb) orb_beta = None _fill_orbitals(orb_alpha, oe_alpha, coeffs_alpha, obasis.shell_types, restricted) else: norb_alpha = _get_norb_nel(oe_alpha)[0] norb_beta = _get_norb_nel(oe_beta)[0] assert norb_alpha == norb_beta orb_alpha = Orbitals(obasis.nbasis, norb_alpha) orb_beta = Orbitals(obasis.nbasis, norb_beta) _fill_orbitals(orb_alpha, oe_alpha, coeffs_alpha, obasis.shell_types, restricted) _fill_orbitals(orb_beta, oe_beta, coeffs_beta, obasis.shell_types, restricted) result = { 'obasis': obasis, 'orb_alpha': orb_alpha, 'coordinates': obasis.centers, 'numbers': np.array([number]), 'energy': energy, 'pseudo_numbers': np.array([pseudo_number]), } if orb_beta is not None: result['orb_beta'] = orb_beta return result
QuantumElephant/horton
horton/io/cp2k.py
Python
gpl-3.0
14,590
[ "CP2K", "Gaussian" ]
22e7029df253a675436b240d6c361ddb0543fb806deaf70e10e16c3e7289e595
import numpy as np from skimage.restoration._nl_means_denoising import _nl_means_denoising_2d, \ _nl_means_denoising_3d, \ _fast_nl_means_denoising_2d, _fast_nl_means_denoising_3d def nl_means_denoising(image, patch_size=7, patch_distance=11, h=0.1, multichannel=True, fast_mode=True): """ Perform non-local means denoising on 2-D or 3-D grayscale images, and 2-D RGB images. Parameters ---------- image : 2D or 3D ndarray Input image to be denoised, which can be 2D or 3D, and grayscale or RGB (for 2D images only, see ``multichannel`` parameter). patch_size : int, optional Size of patches used for denoising. patch_distance : int, optional Maximal distance in pixels where to search patches used for denoising. h : float, optional Cut-off distance (in gray levels). The higher h, the more permissive one is in accepting patches. A higher h results in a smoother image, at the expense of blurring features. For a Gaussian noise of standard deviation sigma, a rule of thumb is to choose the value of h to be sigma of slightly less. multichannel : bool, optional Whether the last axis of the image is to be interpreted as multiple channels or another spatial dimension. Set to ``False`` for 3-D images. fast_mode : bool, optional If True (default value), a fast version of the non-local means algorithm is used. If False, the original version of non-local means is used. See the Notes section for more details about the algorithms. Returns ------- result : ndarray Denoised image, of same shape as `image`. See Also -------- fast_nl_means_denoising Notes ----- The non-local means algorithm is well suited for denoising images with specific textures. The principle of the algorithm is to average the value of a given pixel with values of other pixels in a limited neighbourhood, provided that the *patches* centered on the other pixels are similar enough to the patch centered on the pixel of interest. In the original version of the algorithm [1]_, corresponding to ``fast=False``, the computational complexity is image.size * patch_size ** image.ndim * patch_distance ** image.ndim Hence, changing the size of patches or their maximal distance has a strong effect on computing times, especially for 3-D images. However, the default behavior corresponds to ``fast_mode=True``, for which another version of non-local means [2]_ is used, corresponding to a complexity of image.size * patch_distance ** image.ndim The computing time depends only weakly on the patch size, thanks to the computation of the integral of patches distances for a given shift, that reduces the number of operations [1]_. Therefore, this algorithm executes faster than `nl_means_denoising`, at the expense of using twice as much memory. Compared to the classic non-local means algorithm implemented in `nl_means_denoising`, all pixels of a patch contribute to the distance to another patch with the same weight, no matter their distance to the center of the patch. This coarser computation of the distance can result in a slightly poorer denoising performance. Moreover, for small images (images with a linear size that is only a few times the patch size), the classic algorithm can be faster due to boundary effects. The image is padded using the `reflect` mode of `skimage.util.pad` before denoising. References ---------- .. [1] Buades, A., Coll, B., & Morel, J. M. (2005, June). A non-local algorithm for image denoising. In CVPR 2005, Vol. 2, pp. 60-65, IEEE. .. [2] Jacques Froment. Parameter-Free Fast Pixelwise Non-Local Means Denoising. Image Processing On Line, 2014, vol. 4, p. 300-326. Examples -------- >>> a = np.zeros((40, 40)) >>> a[10:-10, 10:-10] = 1. >>> a += 0.3*np.random.randn(*a.shape) >>> denoised_a = nl_means_denoising(a, 7, 5, 0.1) """ if image.ndim == 2: image = image[..., np.newaxis] multichannel = True if image.ndim != 3: raise NotImplementedError("Non-local means denoising is only \ implemented for 2D grayscale and RGB images or 3-D grayscale images.") if multichannel: # 2-D images if fast_mode: return np.squeeze(np.array(_fast_nl_means_denoising_2d(image, patch_size, patch_distance, h))) else: return np.squeeze(np.array(_nl_means_denoising_2d(image, patch_size, patch_distance, h))) else: # 3-D grayscale if fast_mode: return np.array(_fast_nl_means_denoising_3d(image, s=patch_size, d=patch_distance, h=h)) else: return np.array(_nl_means_denoising_3d(image, patch_size, patch_distance, h))
Britefury/scikit-image
skimage/restoration/non_local_means.py
Python
bsd-3-clause
5,145
[ "Gaussian" ]
3d62803bb5787464b4e47330d65b08c9af5551700a513ee47fdfa0fe030417f2
def myargs(parser): parser.add_argument("-n", "--numcores", type=int, default=1, help="Number of concurrent jobs to process.") parser.add_argument("-c", "--cores-per-job", type=int, default=1, help="Number of cores to use.") parser.add_argument("-m", "--memory-per-job", default=2, help="Memory in GB to reserve per job.") parser.add_argument("--timeout", default=15, help="Time to wait before giving up starting.") parser.add_argument("--retries", default=0, type=int, help=("Number of retries of failed tasks during " "distributed processing. Default 0 " "(no retries)")) parser.add_argument("-s", "--scheduler", help="Type of scheduler to use.", choices=["lsf", "slurm", "torque", "sge", "pbspro"]) parser.add_argument("-r", "--resources", help="Extra scheduler resource flags.", default=[], action="append") parser.add_argument("-q", "--queue", help="Queue to submit jobs to. Use localrun to use run in parallel locally.") parser.add_argument("-p", "--tag", help="Tag name to label jobs on the cluster", default="bcb-prep") parser.add_argument("-t", "--paralleltype", choices=["local", "ipython"], default="local", help="Run with iptyhon") # parser.add_argument("--local", default=False, action='store_true', help="Run cluster locally") parser.add_argument("--galaxy", help="bcbio galaxy resources.") return parser
lpantano/ich-wrapper
ichwrapper/arguments.py
Python
mit
1,578
[ "Galaxy" ]
b078d9f607a7e01630e3ec22013e03ac918b79a660a250bee219a7401373270e
import numpy as np from gpaw.xc.kernel import XCKernel class LB94(XCKernel): """Correction to LDA to resemble asymptotic -1/r potential. See: van Leeuwen and Baerends, Phys.Rev.A vol 49 (1994) 2421 """ def __init__(self, beta=0.05): XCKernel.__init__(self, 'LDA') self.name = 'LB94' self.type = 'GGA' self.beta = beta def calculate(self, e_g, n_sg, dedn_sg, sigma_xg=None, dedsigma_xg=None, tau_sg=None, dedtau_sg=None): XCKernel.calculate(self, e_g, n_sg, dedn_sg, sigma_xg, dedsigma_xg) for s, n_g in enumerate(n_sg): n_g = n_g * len(n_sg) n_g[n_g < 1e-10] = 1e-10 y_g = n_g**(1 / 3.0) x_g = sigma_xg[2 * s]**0.5 / (y_g * n_g) * len(n_sg) x_g[x_g > 500] = 0.0 dedn_sg[s] -= (self.beta * x_g**2 * y_g / (1 + 3 * self.beta * x_g * np.arcsinh(x_g))) dedsigma_xg[:] = 0.0
qsnake/gpaw
gpaw/xc/lb94.py
Python
gpl-3.0
995
[ "GPAW" ]
3838ab948c3d010bf1f3110a76dc4125e65095e24e8a6abd1c1de3a418988c6a
#* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html """ An Extension is comprised of Component objects, the objects are used for tokenizeing markdown and converting tokens to rendered HTML. """ from ..common import mixins class Extension(mixins.ConfigObject, mixins.TranslatorObject): """ Base class for creating extensions. An extension is simply a mechanism to allow for the creation of reader/renderer components to be added to the translation process. All aspects of the MooseDocs system rely on Extension objects. These extensions are passed to the Translator object. The translator calls the extend method of the extension. Inputs: kwargs: All key-value pairs are treated as configure options, see ConfigObject. """ __TRANSLATOR_METHODS__ = ['init', 'initPage', 'preExecute', 'postExecute', 'preRead', 'postRead', 'preTokenize', 'postTokenize', 'preRender', 'postRender', 'preWrite', 'postWrite'] @staticmethod def defaultConfig(): """Basic Extension configuration options.""" config = mixins.ConfigObject.defaultConfig() config['active'] = (True, "Toggle for disabling the extension. This only changes " "the initial active state, use setActive to control at runtime.") return config def __init__(self, **kwargs): mixins.ConfigObject.__init__(self, self.__class__.__name__.split('.')[-1].replace('Extension', '').lower(), **kwargs) mixins.TranslatorObject.__init__(self) self.__requires = set() self.__active = self.get('active') @property def active(self): """Return the 'active' status of the Extension.""" return self.__active def setActive(self, value): """ Set the active state for the extension. """ self.__active = value def extend(self, reader, renderer): """ Method for adding reader and renderering components. """ pass def requires(self, *args): """ Require that the supplied extension module exists within the Translator object. This method cannot be called before init(). """ self.__requires.update(args) def init(self): """ Called after Translator is set, prior to initializing pages. """ pass def preExecute(self): """ Called by Translator prior to beginning conversion. """ pass def postExecute(self): """ Called by Translator after all conversion is complete. """ pass def preRead(self, page): """ Called after to reading the file. Input: page[pages.Source]: The source object representing the content """ pass def postRead(self, page, content): """ Called after to reading the file. Input: content[str]: The content read from the page page[pages.Source]: The source object representing the content """ pass def preTokenize(self, page, ast): """ Called by Translator prior to tokenization. Inputs: page[pages.Source]: The source object representing the content ast[tokens.Token]: The root node of the token tree """ pass def postTokenize(self, page, ast): """ Called by Translator after tokenization. Inputs: page[pages.Source]: The source object representing the content ast[tokens.Token]: The root node of the token tree """ pass def preRender(self, page, result): """ Called by Translator prior to rendering. Inputs: page[pages.Source]: The source object representing the content result[tree.base.NodeBase]: The root node of the result tree """ pass def postRender(self, page, result): """ Called by Translator after rendering. Inputs: page[pages.Source]: The source object representing the content result[tree.base.NodeBase]: The root node of the result tree """ pass def preWrite(self, page, result): """ Called after renderer has written content. Inputs: page[pages.Source]: The source object representing the content result[tree.base.NodeBase]: The root node of the result tree """ pass def postWrite(self, page): """ Called after renderer has written content. Inputs: page[pages.Source]: The source object representing the content """ pass def setAttribute(self, *args): """ Set a global attribute to be communicated across processors. This is designed to be called from the <pre/post><Read/Tokenize/Render/Write> methods """ self.translator.executioner.setGlobalAttribute(*args) def getAttribute(self, *args): """ Get a global attribute to be communicated across processors. This is designed to be called from the <pre/post><Read/Tokenize/Render/Write> methods """ return self.translator.executioner.getGlobalAttribute(*args) def getAttributeItems(self): """ Return an iterator to the global attributes to be communicated across processors. This is designed to be called from the <pre/post><Read/Tokenize/Render/Write> methods """ return self.translator.executioner.getGlobalAttributeItems()
harterj/moose
python/MooseDocs/base/Extension.py
Python
lgpl-2.1
6,018
[ "MOOSE" ]
332442a0634e7270fd9bb929b4874b343d8ff6986e2a0868bb1ffa4f0485b941
"""Polynomial factorization routines in characteristic zero. """ from sympy.polys.galoistools import ( gf_from_int_poly, gf_to_int_poly, gf_degree, gf_from_dict, gf_lshift, gf_add_mul, gf_mul, gf_div, gf_rem, gf_gcd, gf_gcdex, gf_sqf_p, gf_factor_sqf, gf_factor) from sympy.polys.densebasic import ( dup_LC, dmp_LC, dmp_ground_LC, dup_TC, dmp_TC, dmp_ground_TC, dup_convert, dmp_convert, dup_degree, dmp_degree, dmp_degree_in, dmp_degree_list, dup_from_dict, dmp_from_dict, dmp_zero, dmp_zero_p, dmp_one, dmp_one_p, dmp_nest, dmp_raise, dup_strip, dmp_strip, dmp_ground, dup_inflate, dmp_exclude, dmp_include, dmp_inject, dmp_eject, dup_terms_gcd, dmp_terms_gcd) from sympy.polys.densearith import ( dup_neg, dmp_neg, dup_add, dmp_add, dup_sub, dmp_sub, dup_mul, dmp_mul, dup_sqr, dmp_sqr, dup_pow, dmp_pow, dup_div, dmp_div, dup_rem, dmp_rem, dup_quo, dmp_quo, dup_expand, dmp_expand, dup_add_mul, dmp_add_mul, dup_sub_mul, dmp_sub_mul, dup_lshift, dup_rshift, dup_max_norm, dmp_max_norm, dup_l1_norm, dmp_l1_norm, dup_mul_ground, dmp_mul_ground, dup_quo_ground, dmp_quo_ground) from sympy.polys.densetools import ( dup_clear_denoms, dmp_clear_denoms, dup_trunc, dmp_ground_trunc, dup_content, dmp_ground_content, dup_monic, dmp_ground_monic, dup_primitive, dmp_ground_primitive, dup_eval, dmp_eval_tail, dmp_eval_in, dmp_diff_eval_in, dup_compose, dmp_compose, dup_shift, dup_mirror) from sympy.polys.euclidtools import ( dmp_primitive, dup_gcd, dmp_gcd, dup_inner_gcd, dmp_inner_gcd) from sympy.polys.sqfreetools import ( dup_sqf_p, dmp_sqf_p, dup_sqf_norm, dmp_sqf_norm, dup_sqf_part, dmp_sqf_part) from sympy.polys.polyutils import _sort_factors from sympy.polys.polyconfig import query from sympy.polys.polyerrors import ( ExtraneousFactors, DomainError, CoercionFailed, EvaluationFailed) from sympy.ntheory import nextprime, isprime, factorint from sympy.utilities import subsets, cythonized from math import ceil, log from random import randint @cythonized("k") def dup_trial_division(f, factors, K): """Determine multiplicities of factors using trial division. """ result = [] for factor in factors: k = 0 while True: q, r = dup_div(f, factor, K) if not r: f, k = q, k+1 else: break result.append((factor, k)) return _sort_factors(result) @cythonized("u,k") def dmp_trial_division(f, factors, u, K): """Determine multiplicities of factors using trial division. """ result = [] for factor in factors: k = 0 while True: q, r = dmp_div(f, factor, u, K) if dmp_zero_p(r, u): f, k = q, k+1 else: break result.append((factor, k)) return _sort_factors(result) def dup_zz_mignotte_bound(f, K): """Mignotte bound for univariate polynomials in `K[x]`. """ a = dup_max_norm(f, K) b = abs(dup_LC(f, K)) n = dup_degree(f) return K.sqrt(K(n+1))*2**n*a*b def dmp_zz_mignotte_bound(f, u, K): """Mignotte bound for multivariate polynomials in `K[X]`. """ a = dmp_max_norm(f, u, K) b = abs(dmp_ground_LC(f, u, K)) n = sum(dmp_degree_list(f, u)) return K.sqrt(K(n+1))*2**n*a*b def dup_zz_hensel_step(m, f, g, h, s, t, K): """ One step in Hensel lifting in `Z[x]`. Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s` and `t` such that:: f == g*h (mod m) s*g + t*h == 1 (mod m) lc(f) is not a zero divisor (mod m) lc(h) == 1 deg(f) == deg(g) + deg(h) deg(s) < deg(h) deg(t) < deg(g) returns polynomials `G`, `H`, `S` and `T`, such that:: f == G*H (mod m**2) S*G + T**H == 1 (mod m**2) **References** 1. [Gathen99]_ """ M = m**2 e = dup_sub_mul(f, g, h, K) e = dup_trunc(e, M, K) q, r = dup_div(dup_mul(s, e, K), h, K) q = dup_trunc(q, M, K) r = dup_trunc(r, M, K) u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K) G = dup_trunc(dup_add(g, u, K), M, K) H = dup_trunc(dup_add(h, r, K), M, K) u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K) b = dup_trunc(dup_sub(u, [K.one], K), M, K) c, d = dup_div(dup_mul(s, b, K), H, K) c = dup_trunc(c, M, K) d = dup_trunc(d, M, K) u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K) S = dup_trunc(dup_sub(s, d, K), M, K) T = dup_trunc(dup_sub(t, u, K), M, K) return G, H, S, T @cythonized("l,r,k,d") def dup_zz_hensel_lift(p, f, f_list, l, K): """ Multifactor Hensel lifting in `Z[x]`. Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)` is a unit modulo `p`, monic pair-wise coprime polynomials `f_i` over `Z[x]` satisfying:: f = lc(f) f_1 ... f_r (mod p) and a positive integer `l`, returns a list of monic polynomials `F_1`, `F_2`, ..., `F_r` satisfying:: f = lc(f) F_1 ... F_r (mod p**l) F_i = f_i (mod p), i = 1..r **References** 1. [Gathen99]_ """ r = len(f_list) lc = dup_LC(f, K) if r == 1: F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K) return [ dup_trunc(F, p**l, K) ] m = p k = r // 2 d = int(ceil(log(l, 2))) g = gf_from_int_poly([lc], p) for f_i in f_list[:k]: g = gf_mul(g, gf_from_int_poly(f_i, p), p, K) h = gf_from_int_poly(f_list[k], p) for f_i in f_list[k+1:]: h = gf_mul(h, gf_from_int_poly(f_i, p), p, K) s, t, _ = gf_gcdex(g, h, p, K) g = gf_to_int_poly(g, p) h = gf_to_int_poly(h, p) s = gf_to_int_poly(s, p) t = gf_to_int_poly(t, p) for _ in range(1, d+1): (g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2 return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \ + dup_zz_hensel_lift(p, h, f_list[k:], l, K) @cythonized("l,s") def dup_zz_zassenhaus(f, K): """Factor primitive square-free polynomials in `Z[x]`. """ n = dup_degree(f) if n == 1: return [f] A = dup_max_norm(f, K) b = dup_LC(f, K) B = int(abs(K.sqrt(K(n+1))*2**n*A*b)) C = int((n+1)**(2*n)*A**(2*n-1)) gamma = int(ceil(2*log(C, 2))) bound = int(2*gamma*log(gamma)) for p in xrange(3, bound+1): if not isprime(p) or b % p == 0: continue p = K.convert(p) F = gf_from_int_poly(f, p) if gf_sqf_p(F, p, K): break l = int(ceil(log(2*B + 1, p))) modular = [] for ff in gf_factor_sqf(F, p, K)[1]: modular.append(gf_to_int_poly(ff, p)) g = dup_zz_hensel_lift(p, f, modular, l, K) T = set(range(len(g))) factors, s = [], 1 while 2*s <= len(T): for S in subsets(T, s): G, H = [b], [b] S = set(S) for i in S: G = dup_mul(G, g[i], K) for i in T-S: H = dup_mul(H, g[i], K) G = dup_trunc(G, p**l, K) H = dup_trunc(H, p**l, K) G_norm = dup_l1_norm(G, K) H_norm = dup_l1_norm(H, K) if G_norm*H_norm <= B: T = T - S G = dup_primitive(G, K)[1] f = dup_primitive(H, K)[1] factors.append(G) b = dup_LC(f, K) break else: s += 1 return factors + [f] def dup_zz_irreducible_p(f, K): """Test irreducibility using Eisenstein's criterion. """ lc = dup_LC(f, K) tc = dup_TC(f, K) e_fc = dup_content(f[1:], K) if e_fc: e_ff = factorint(int(e_fc)) for p in e_ff.iterkeys(): if (lc % p) and (tc % p**2): return True @cythonized("n,i") def dup_cyclotomic_p(f, K, irreducible=False): """ Efficiently test if ``f`` is a cyclotomic polnomial. **Examples** >>> from sympy.polys.factortools import dup_cyclotomic_p >>> from sympy.polys.domains import ZZ >>> f = [1, 0, 1, 0, 0, 0,-1, 0, 1, 0,-1, 0, 0, 0, 1, 0, 1] >>> dup_cyclotomic_p(f, ZZ) False >>> g = [1, 0, 1, 0, 0, 0,-1, 0,-1, 0,-1, 0, 0, 0, 1, 0, 1] >>> dup_cyclotomic_p(g, ZZ) True """ if K.is_QQ: try: K0, K = K, K.get_ring() f = dup_convert(f, K0, K) except CoercionFailed: return False elif not K.is_ZZ: return False lc = dup_LC(f, K) tc = dup_TC(f, K) if lc != 1 or (tc != -1 and tc != 1): return False if not irreducible: coeff, factors = dup_factor_list(f, K) if coeff != K.one or factors != [(f, 1)]: return False n = dup_degree(f) g, h = [], [] for i in xrange(n, -1, -2): g.insert(0, f[i]) for i in xrange(n-1, -1, -2): h.insert(0, f[i]) g = dup_sqr(dup_strip(g), K) h = dup_sqr(dup_strip(h), K) F = dup_sub(g, dup_lshift(h, 1, K), K) if K.is_negative(dup_LC(F, K)): F = dup_neg(F, K) if F == f: return True g = dup_mirror(f, K) if K.is_negative(dup_LC(g, K)): g = dup_neg(g, K) if F == g and dup_cyclotomic_p(g, K): return True G = dup_sqf_part(F, K) if dup_sqr(G, K) == F and dup_cyclotomic_p(G, K): return True return False @cythonized("n,p,k") def dup_zz_cyclotomic_poly(n, K): """Efficiently generate n-th cyclotomic polnomial. """ h = [K.one,-K.one] for p, k in factorint(n).iteritems(): h = dup_quo(dup_inflate(h, p, K), h, K) h = dup_inflate(h, p**(k-1), K) return h @cythonized("n,p,k,i") def _dup_cyclotomic_decompose(n, K): H = [[K.one,-K.one]] for p, k in factorint(n).iteritems(): Q = [ dup_quo(dup_inflate(h, p, K), h, K) for h in H ] H.extend(Q) for i in xrange(1, k): Q = [ dup_inflate(q, p, K) for q in Q ] H.extend(Q) return H @cythonized("n") def dup_zz_cyclotomic_factor(f, K): """ Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`. Given a univariate polynomial `f` in `Z[x]` returns a list of factors of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for `n >= 1`. Otherwise returns None. Factorization is performed using using cyclotomic decomposition of `f`, which makes this method much faster that any other direct factorization approach (e.g. Zassenhaus's). **References** 1. [Weisstein09]_ """ lc_f, tc_f = dup_LC(f, K), dup_TC(f, K) if dup_degree(f) <= 0: return None if lc_f != 1 or tc_f not in [-1, 1]: return None if any(bool(cf) for cf in f[1:-1]): return None n = dup_degree(f) F = _dup_cyclotomic_decompose(n, K) if not K.is_one(tc_f): return F else: H = [] for h in _dup_cyclotomic_decompose(2*n, K): if h not in F: H.append(h) return H @cythonized("n") def dup_zz_factor_sqf(f, K): """Factor square-free (non-primitive) polyomials in `Z[x]`. """ cont, g = dup_primitive(f, K) n = dup_degree(g) if dup_LC(g, K) < 0: cont, g = -cont, dup_neg(g, K) if n <= 0: return cont, [] elif n == 1: return cont, [(g, 1)] if query('USE_IRREDUCIBLE_IN_FACTOR'): if dup_zz_irreducible_p(g, K): return cont, [(g, 1)] factors = None if query('USE_CYCLOTOMIC_FACTOR'): factors = dup_zz_cyclotomic_factor(g, K) if factors is None: factors = dup_zz_zassenhaus(g, K) return cont, _sort_factors(factors, multiple=False) @cythonized("n,k") def dup_zz_factor(f, K): """ Factor (non square-free) polynomials in `Z[x]`. Given a univariate polynomial `f` in `Z[x]` computes its complete factorization `f_1, ..., f_n` into irreducibles over integers:: f = content(f) f_1**k_1 ... f_n**k_n The factorization is computed by reducing the input polynomial into a primitive square-free polynomial and factoring it using Zassenhaus algorithm. Trial division is used to recover the multiplicities of factors. The result is returned as a tuple consisting of:: (content(f), [(f_1, k_1), ..., (f_n, k_n)) Consider polynomial `f = 2*x**4 - 2`:: >>> from sympy.polys.factortools import dup_zz_factor >>> from sympy.polys.domains import ZZ >>> dup_zz_factor([2, 0, 0, 0, -2], ZZ) (2, [([1, -1], 1), ([1, 1], 1), ([1, 0, 1], 1)]) In result we got the following factorization:: f = 2 (x - 1) (x + 1) (x**2 + 1) Note that this is a complete factorization over integers, however over Gaussian integers we can factor the last term. By default, polynomials `x**n - 1` and `x**n + 1` are factored using cyclotomic decomposition to speedup computations. To disable this behaviour set cyclotomic=False. **References** 1. [Gathen99]_ """ cont, g = dup_primitive(f, K) n = dup_degree(g) if dup_LC(g, K) < 0: cont, g = -cont, dup_neg(g, K) if n <= 0: return cont, [] elif n == 1: return cont, [(g, 1)] if query('USE_IRREDUCIBLE_IN_FACTOR'): if dup_zz_irreducible_p(g, K): return cont, [(g, 1)] g = dup_sqf_part(g, K) H, factors = None, [] if query('USE_CYCLOTOMIC_FACTOR'): H = dup_zz_cyclotomic_factor(g, K) if H is None: H = dup_zz_zassenhaus(g, K) for h in H: k = 0 while True: q, r = dup_div(f, h, K) if not r: f, k = q, k+1 else: break factors.append((h, k)) return cont, _sort_factors(factors) def dmp_zz_wang_non_divisors(E, cs, ct, K): """Wang/EEZ: Compute a set of valid divisors. """ result = [ cs*ct ] for q in E: q = abs(q) for r in reversed(result): while r != 1: r = K.gcd(r, q) q = q // r if K.is_one(q): return None result.append(q) return result[1:] @cythonized("u,v") def dmp_zz_wang_test_points(f, T, ct, A, u, K): """Wang/EEZ: Test evaluation points for suitability. """ if not dmp_eval_tail(dmp_LC(f, K), A, u-1, K): raise EvaluationFailed('no luck') g = dmp_eval_tail(f, A, u, K) if not dup_sqf_p(g, K): raise EvaluationFailed('no luck') c, h = dup_primitive(g, K) if K.is_negative(dup_LC(h, K)): c, h = -c, dup_neg(h, K) v = u-1 E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ] D = dmp_zz_wang_non_divisors(E, c, ct, K) if D is not None: return c, h, E else: raise EvaluationFailed('no luck') @cythonized("u,v,i,j,k") def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K): """Wang/EEZ: Compute correct leading coefficients. """ C, J, v = [], [0]*len(E), u-1 for h in H: c = dmp_one(v, K) d = dup_LC(h, K)*cs for i in reversed(xrange(len(E))): k, e, (t, _) = 0, E[i], T[i] while not (d % e): d, k = d//e, k+1 if k != 0: c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1 C.append(c) if any(not j for j in J): raise ExtraneousFactors # pragma: no cover CC, HH = [], [] for c, h in zip(C, H): d = dmp_eval_tail(c, A, v, K) lc = dup_LC(h, K) if K.is_one(cs): cc = lc//d else: g = K.gcd(lc, d) d, cc = d//g, lc//g h, cs = dup_mul_ground(h, d, K), cs//d c = dmp_mul_ground(c, cc, v, K) CC.append(c) HH.append(h) if K.is_one(cs): return f, HH, CC CCC, HHH = [], [] for c, h in zip(CC, HH): CCC.append(dmp_mul_ground(c, cs, v, K)) HHH.append(dmp_mul_ground(h, cs, 0, K)) f = dmp_mul_ground(f, cs**(len(H)-1), u, K) return f, HHH, CCC @cythonized("m") def dup_zz_diophantine(F, m, p, K): """Wang/EEZ: Solve univariate Diophantine equations. """ if len(F) == 2: a, b = F f = gf_from_int_poly(a, p) g = gf_from_int_poly(b, p) s, t, G = gf_gcdex(g, f, p, K) s = gf_lshift(s, m, K) t = gf_lshift(t, m, K) q, s = gf_div(s, f, p, K) t = gf_add_mul(t, q, g, p, K) s = gf_to_int_poly(s, p) t = gf_to_int_poly(t, p) result = [s, t] else: G = [F[-1]] for f in reversed(F[1:-1]): G.insert(0, dup_mul(f, G[0], K)) S, T = [], [[1]] for f, g in zip(F, G): t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K) T.append(t) S.append(s) result, S = [], S + [T[-1]] for s, f in zip(S, F): s = gf_from_int_poly(s, p) f = gf_from_int_poly(f, p) r = gf_rem(gf_lshift(s, m, K), f, p, K) s = gf_to_int_poly(r, p) result.append(s) return result @cythonized("u,v,d,n,i,j,k") def dmp_zz_diophantine(F, c, A, d, p, u, K): """Wang/EEZ: Solve multivariate Diophantine equations. """ if not A: S = [ [] for _ in F ] n = dup_degree(c) for i, coeff in enumerate(c): if not coeff: continue T = dup_zz_diophantine(F, n-i, p, K) for j, (s, t) in enumerate(zip(S, T)): t = dup_mul_ground(t, coeff, K) S[j] = dup_trunc(dup_add(s, t, K), p, K) else: n = len(A) e = dmp_expand(F, u, K) a, A = A[-1], A[:-1] B, G = [], [] for f in F: B.append(dmp_quo(e, f, u, K)) G.append(dmp_eval_in(f, a, n, u, K)) C = dmp_eval_in(c, a, n, u, K) v = u - 1 S = dmp_zz_diophantine(G, C, A, d, p, v, K) S = [ dmp_raise(s, 1, v, K) for s in S ] for s, b in zip(S, B): c = dmp_sub_mul(c, s, b, u, K) c = dmp_ground_trunc(c, p, u, K) m = dmp_nest([K.one, -a], n, K) M = dmp_one(n, K) for k in xrange(0, d): if dmp_zero_p(c, u): break M = dmp_mul(M, m, u, K) C = dmp_diff_eval_in(c, k+1, a, n, u, K) if not dmp_zero_p(C, v): C = dmp_quo_ground(C, K.factorial(k+1), v, K) T = dmp_zz_diophantine(G, C, A, d, p, v, K) for i, t in enumerate(T): T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K) for i, (s, t) in enumerate(zip(S, T)): S[i] = dmp_add(s, t, u, K) for t, b in zip(T, B): c = dmp_sub_mul(c, t, b, u, K) c = dmp_ground_trunc(c, p, u, K) S = [ dmp_ground_trunc(s, p, u, K) for s in S ] return S @cythonized("u,v,d,dj,n,i,j,k,w") def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K): """Wang/EEZ: Parallel Hensel lifting algorithm. """ S, n, v = [f], len(A), u-1 H = list(H) for i, a in enumerate(reversed(A[1:])): s = dmp_eval_in(S[0], a, n-i, u-i, K) S.insert(0, dmp_ground_trunc(s, p, v-i, K)) d = max(dmp_degree_list(f, u)[1:]) for j, s, a in zip(xrange(2, n+2), S, A): G, w = list(H), j-1 I, J = A[:j-2], A[j-1:] for i, (h, lc) in enumerate(zip(H, LC)): lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w-1, K) H[i] = [lc] + dmp_raise(h[1:], 1, w-1, K) m = dmp_nest([K.one, -a], w, K) M = dmp_one(w, K) c = dmp_sub(s, dmp_expand(H, w, K), w, K) dj = dmp_degree_in(s, w, w) for k in xrange(0, dj): if dmp_zero_p(c, w): break M = dmp_mul(M, m, w, K) C = dmp_diff_eval_in(c, k+1, a, w, w, K) if not dmp_zero_p(C, w-1): C = dmp_quo_ground(C, K.factorial(k+1), w-1, K) T = dmp_zz_diophantine(G, C, I, d, p, w-1, K) for i, (h, t) in enumerate(zip(H, T)): h = dmp_add_mul(h, dmp_raise(t, 1, w-1, K), M, w, K) H[i] = dmp_ground_trunc(h, p, w, K) h = dmp_sub(s, dmp_expand(H, w, K), w, K) c = dmp_ground_trunc(h, p, w, K) if dmp_expand(H, u, K) != f: raise ExtraneousFactors # pragma: no cover else: return H @cythonized("u,mod,i,j,s_arg,negative") def dmp_zz_wang(f, u, K, mod=None): """ Factor primitive square-free polynomials in `Z[X]`. Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which is primitive and square-free in `x_1`, computes factorization of `f` into irreducibles over integers. The procedure is based on Wang's Enhanced Extended Zassenhaus algorithm. The algorithm works by viewing `f` as a univariate polynomial in `Z[x_2,...,x_n][x_1]`, for which an evaluation mapping is computed:: x_2 -> a_2, ..., x_n -> a_n where `a_i`, for `i = 2, ..., n`, are carefully chosen integers. The mapping is used to transform `f` into a univariate polynomial in `Z[x_1]`, which can be factored efficiently using Zassenhaus algorithm. The last step is to lift univariate factors to obtain true multivariate factors. For this purpose a parallel Hensel lifting procedure is used. **References** 1. [Wang78]_ 2. [Geddes92]_ """ ct, T = dmp_zz_factor(dmp_LC(f, K), u-1, K) b = dmp_zz_mignotte_bound(f, u, K) p = K(nextprime(b)) if mod is None: if u == 1: mod = 2 else: mod = 1 history, configs, A, r = set([]), [], [K.zero]*u, None try: cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K) _, H = dup_zz_factor_sqf(s, K) r = len(H) if r == 1: return [f] bad_points = set([tuple(A)]) configs = [(s, cs, E, H, A)] except EvaluationFailed: pass eez_num_configs = query('EEZ_NUMBER_OF_CONFIGS') eez_num_tries = query('EEZ_NUMBER_OF_TRIES') eez_mod_step = query('EEZ_MODULUS_STEP') while len(configs) < eez_num_configs: for _ in xrange(eez_num_tries): A = [ K(randint(-mod, mod)) for _ in xrange(u) ] if tuple(A) not in history: history.add(tuple(A)) else: continue try: cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K) except EvaluationFailed: continue _, H = dup_zz_factor_sqf(s, K) rr = len(H) if r is not None: if rr != r: # pragma: no cover if rr < r: configs, r = [], rr else: continue else: r = rr if r == 1: return [f] configs.append((s, cs, E, H, A)) if len(configs) == eez_num_configs: break else: mod += eez_mod_step s_norm, s_arg, i = None, 0, 0 for s, _, _, _, _ in configs: _s_norm = dup_max_norm(s, K) if s_norm is not None: if _s_norm < s_norm: s_norm = _s_norm s_arg = i else: s_norm = _s_norm i += 1 _, cs, E, H, A = configs[s_arg] try: f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K) factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K) except ExtraneousFactors: # pragma: no cover if query('EEZ_RESTART_IF_NEEDED'): return dmp_zz_wang(f, u, K, mod+1) else: raise ExtraneousFactors("we need to restart algorithm with better parameters") negative, result = 0, [] for f in factors: _, f = dmp_ground_primitive(f, u, K) if K.is_negative(dmp_ground_LC(f, u, K)): f = dmp_neg(f, u, K) result.append(f) return result @cythonized("u,d,k") def dmp_zz_factor(f, u, K): """ Factor (non square-free) polynomials in `Z[X]`. Given a multivariate polynomial `f` in `Z[x]` computes its complete factorization `f_1, ..., f_n` into irreducibles over integers:: f = content(f) f_1**k_1 ... f_n**k_n The factorization is computed by reducing the input polynomial into a primitive square-free polynomial and factoring it using Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division is used to recover the multiplicities of factors. The result is returned as a tuple consisting of:: (content(f), [(f_1, k_1), ..., (f_n, k_n)) Consider polynomial `f = 2*(x**2 - y**2)`:: >>> from sympy.polys.factortools import dmp_zz_factor >>> from sympy.polys.domains import ZZ >>> dmp_zz_factor([[2], [], [-2, 0, 0]], 1, ZZ) (2, [([[1], [-1, 0]], 1), ([[1], [1, 0]], 1)]) In result we got the following factorization:: f = 2 (x - y) (x + y) **References** 1. [Gathen99]_ """ if not u: return dup_zz_factor(f, K) if dmp_zero_p(f, u): return K.zero, [] cont, g = dmp_ground_primitive(f, u, K) if dmp_ground_LC(g, u, K) < 0: cont, g = -cont, dmp_neg(g, u, K) if all(d <= 0 for d in dmp_degree_list(g, u)): return cont, [] G, g = dmp_primitive(g, u, K) factors = [] if dmp_degree(g, u) > 0: g = dmp_sqf_part(g, u, K) H = dmp_zz_wang(g, u, K) for h in H: k = 0 while True: q, r = dmp_div(f, h, u, K) if dmp_zero_p(r, u): f, k = q, k+1 else: break factors.append((h, k)) for g, k in dmp_zz_factor(G, u-1, K)[1]: factors.insert(0, ([g], k)) return cont, _sort_factors(factors) def dup_ext_factor(f, K): """Factor univariate polynomials over algebraic number fields. """ n, lc = dup_degree(f), dup_LC(f, K) f = dup_monic(f, K) if n <= 0: return lc, [] if n == 1: return lc, [(f, 1)] f, F = dup_sqf_part(f, K), f s, g, r = dup_sqf_norm(f, K) factors = dup_factor_list_include(r, K.dom) if len(factors) == 1: return lc, [(f, n//dup_degree(f))] H = s*K.unit for i, (factor, _) in enumerate(factors): h = dup_convert(factor, K.dom, K) h, _, g = dup_inner_gcd(h, g, K) h = dup_shift(h, H, K) factors[i] = h factors = dup_trial_division(F, factors, K) return lc, factors @cythonized("u") def dmp_ext_factor(f, u, K): """Factor multivariate polynomials over algebraic number fields. """ if not u: return dup_ext_factor(f, K) lc = dmp_ground_LC(f, u, K) f = dmp_ground_monic(f, u, K) if all(d <= 0 for d in dmp_degree_list(f, u)): return lc, [] f, F = dmp_sqf_part(f, u, K), f s, g, r = dmp_sqf_norm(f, u, K) factors = dmp_factor_list_include(r, u, K.dom) if len(factors) == 1: coeff, factors = lc, [f] else: H = dmp_raise([K.one, s*K.unit], u, 0, K) for i, (factor, _) in enumerate(factors): h = dmp_convert(factor, u, K.dom, K) h, _, g = dmp_inner_gcd(h, g, u, K) h = dmp_compose(h, H, u, K) factors[i] = h return lc, dmp_trial_division(F, factors, u, K) @cythonized("i") def dup_gf_factor(f, K): """Factor univariate polynomials over finite fields. """ f = dup_convert(f, K, K.dom) coeff, factors = gf_factor(f, K.mod, K.dom) for i, (f, k) in enumerate(factors): factors[i] = (dup_convert(f, K.dom, K), k) return K.convert(coeff, K.dom), factors def dmp_gf_factor(f, u, K): """Factor multivariate polynomials over finite fields. """ raise DomainError('multivariate polynomials over %s' % K) @cythonized("i,k,u") def dup_factor_list(f, K0): """Factor polynomials into irreducibles in `K[x]`. """ j, f = dup_terms_gcd(f, K0) if not K0.has_CharacteristicZero: coeff, factors = dup_gf_factor(f, K0) elif K0.is_Algebraic: coeff, factors = dup_ext_factor(f, K0) else: if not K0.is_Exact: K0_inexact, K0 = K0, K0.get_exact() f = dup_convert(f, K0_inexact, K0) else: K0_inexact = None if K0.has_Field: K = K0.get_ring() denom, f = dup_clear_denoms(f, K0, K) f = dup_convert(f, K0, K) else: K = K0 if K.is_ZZ: coeff, factors = dup_zz_factor(f, K) elif K.is_Poly: f, u = dmp_inject(f, 0, K) coeff, factors = dmp_factor_list(f, u, K.dom) for i, (f, k) in enumerate(factors): factors[i] = (dmp_eject(f, u, K), k) coeff = K.convert(coeff, K.dom) else: # pragma: no cover raise DomainError('factorization not supported over %s' % K0) if K0.has_Field: for i, (f, k) in enumerate(factors): factors[i] = (dup_convert(f, K, K0), k) coeff = K0.convert(coeff, K) denom = K0.convert(denom, K) coeff = K0.quo(coeff, denom) if K0_inexact is not None: for i, (f, k) in enumerate(factors): factors[i] = (dup_convert(f, K0, K0_inexact), k) coeff = K0_inexact.convert(coeff, K0) if j: factors.insert(0, ([K0.one, K0.zero], j)) return coeff, _sort_factors(factors) def dup_factor_list_include(f, K): """Factor polynomials into irreducibles in `K[x]`. """ coeff, factors = dup_factor_list(f, K) if not factors: return [(dup_strip([coeff]), 1)] else: g = dup_mul_ground(factors[0][0], coeff, K) return [(g, factors[0][1])] + factors[1:] @cythonized("u,v,i,k") def dmp_factor_list(f, u, K0): """Factor polynomials into irreducibles in `K[X]`. """ if not u: return dup_factor_list(f, K0) J, f = dmp_terms_gcd(f, u, K0) if not K0.has_CharacteristicZero: # pragma: no cover coeff, factors = dmp_gf_factor(f, u, K0) elif K0.is_Algebraic: coeff, factors = dmp_ext_factor(f, u, K0) else: if not K0.is_Exact: K0_inexact, K0 = K0, K0.get_exact() f = dmp_convert(f, u, K0_inexact, K0) else: K0_inexact = None if K0.has_Field: K = K0.get_ring() denom, f = dmp_clear_denoms(f, u, K0, K) f = dmp_convert(f, u, K0, K) else: K = K0 if K.is_ZZ: levels, f, v = dmp_exclude(f, u, K) coeff, factors = dmp_zz_factor(f, v, K) for i, (f, k) in enumerate(factors): factors[i] = (dmp_include(f, levels, v, K), k) elif K.is_Poly: f, v = dmp_inject(f, u, K) coeff, factors = dmp_factor_list(f, v, K.dom) for i, (f, k) in enumerate(factors): factors[i] = (dmp_eject(f, v, K), k) coeff = K.convert(coeff, K.dom) else: # pragma: no cover raise DomainError('factorization not supported over %s' % K0) if K0.has_Field: for i, (f, k) in enumerate(factors): factors[i] = (dmp_convert(f, u, K, K0), k) coeff = K0.convert(coeff, K) denom = K0.convert(denom, K) coeff = K0.quo(coeff, denom) if K0_inexact is not None: for i, (f, k) in enumerate(factors): factors[i] = (dmp_convert(f, u, K0, K0_inexact), k) coeff = K0_inexact.convert(coeff, K0) for i, j in enumerate(reversed(J)): if not j: continue term = {(0,)*(u-i) + (1,) + (0,)*i: K0.one} factors.insert(0, (dmp_from_dict(term, u, K0), j)) return coeff, _sort_factors(factors) @cythonized("u") def dmp_factor_list_include(f, u, K): """Factor polynomials into irreducibles in `K[X]`. """ if not u: return dup_factor_list_include(f, K) coeff, factors = dmp_factor_list(f, u, K) if not factors: return [(dmp_ground(coeff, u), 1)] else: g = dmp_mul_ground(factors[0][0], coeff, u, K) return [(g, factors[0][1])] + factors[1:] def dup_irreducible_p(f, K): """Returns ``True`` if ``f`` has no factors over its domain. """ return dmp_irreducible_p(f, 0, K) def dmp_irreducible_p(f, u, K): """Returns ``True`` if ``f`` has no factors over its domain. """ _, factors = dmp_factor_list(f, u, K) if not factors: return True elif len(factors) > 1: return False else: _, k = factors[0] return k == 1
Cuuuurzel/KiPyCalc
sympy_old/polys/factortools.py
Python
mit
33,114
[ "Gaussian" ]
b4439862f1cec6e88d6447d4385133cb86b6e1c52e2aba14aa98d8d5ff01f995
"""PEP484 compatibility code.""" import re from pytype.pytd.parse import visitors class Print484StubVisitor(visitors.Visitor): """Visitor for converting ASTs to the PEP 484 format. This generates a PEP484 "stub" format that contains function signatures, but no code. For example: class MyList(GenericType[T]): def append(self, x: T) -> NoneType: pass """ visits_all_node_types = True INDENT = " " * 4 def _SafeName(self, name): if not re.match(r"^[a-zA-Z_]", name): name = "_" + name return re.sub(r"[^a-zA-Z0-9_]", "_", name) def _MaybeCapitalize(self, s): """Capitalize container types. PEP484 defines some container types in "typing.py". E.g. "List" or "Dict". If we have a base type that corresponds to that, convert it to the corresponding PEP484 name. Args: s: A type name, e.g. "int" or "list" Returns: A type name that can be used as a PEP 484 generic. E.g. "List". """ if s in ["list", "tuple", "dict"]: return s.capitalize() else: return s def VisitTypeDeclUnit(self, node): """Convert the AST for an entire module to a PEP484 stub.""" sections = [node.constants, node.functions, node.classes] sections_as_string = ("\n".join(section_suite) for section_suite in sections if section_suite) return "\n\n".join(sections_as_string) def VisitConstant(self, node): """Convert a class-level or module-level constant to a string.""" return self._SafeName(node.name) + " = Undefined(" + node.type + ")" def VisitClass(self, node): """Visit a class, producing a multi-line, properly indented string.""" parents = list(node.parents) if node.template: parents += ["GenericType[%s]" % ", ".join(node.template)] header = "class " + self._SafeName(node.name) if parents: header += "(" + ", ".join(parents) + ")" header += ":" if node.methods or node.constants: constants = [self.INDENT + m for m in node.constants] method_lines = sum((m.splitlines() for m in node.methods), []) methods = [self.INDENT + m for m in method_lines] else: constants = [] methods = [self.INDENT + "pass"] return "\n".join([header] + constants + methods) + "\n" def VisitFunction(self, node): """Visit function, producing multi-line string (one for each signature).""" overload = "@overload\n" if len(node.signatures) > 1 else "" function_name = self._SafeName(node.name) return "\n".join(overload + "def " + function_name + sig for sig in node.signatures) def VisitSignature(self, node): """Visit a signature, producing a string.""" template = "<" + ", ".join(node.template) + ">" if node.template else "" ret = " -> " + node.return_type optional = ("*args, **kwargs",) if node.has_optional else () body = ":" if node.exceptions: body += "\n" for exc in node.exceptions: body += self.INDENT + "raise %s()\n" % exc else: body += " pass\n" # put 'pass' into the same line return "%s(%s)%s%s" % (template, ", ".join(node.params + optional), ret, body) def VisitParameter(self, node): """Convert a function parameter to a string.""" if node.type == "object": # Abbreviated form. "object" is the default. return node.name elif node.name == "self": return "self" else: return self._SafeName(node.name) + ": " + node.type def VisitMutableParameter(self, node): """Convert a mutable function parameter to a string.""" return self.VisitParameter(node) def VisitTemplateItem(self, node): """Convert a template to a string.""" return node.type_param def VisitNamedType(self, node): """Convert a type to a string.""" return self._SafeName(node.name) def VisitNativeType(self, node): """Convert a native type to a string.""" return self._SafeName(node.python_type.__name__) def VisitAnythingType(self, unused_node): """Convert an anything type to a string.""" return "Any" def VisitNothingType(self, unused_node): """Convert the nothing type to a string.""" return "Nothing" def VisitClassType(self, node): return self._SafeName(node.name) def VisitTypeParameter(self, node): return self._SafeName(node.name) def VisitHomogeneousContainerType(self, node): """Convert a homogeneous container type to a string.""" return self.VisitGenericType(node) def VisitGenericType(self, node): """Convert a generic type (E.g. list<int>) to a string.""" param_str = ", ".join(node.parameters) return self._MaybeCapitalize(node.base_type) + "[" + param_str + "]" def VisitUnionType(self, node): """Convert a union type ("x or y") to a string.""" return "Union[%s]" % ", ".join(node.type_list)
pombredanne/pytype
pytype/pytd/pep484.py
Python
apache-2.0
4,900
[ "VisIt" ]
0862e97b4982c9335f1bc3bdd8f99f8d0be9d13025970cec6a24ff4641abf936
#!/usr/bin/python3 import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import HR_data import time import HR_vocabulary import tensorflow as tf import numpy as np import random from datetime import datetime from Logger import Logger tf.flags.DEFINE_integer("BATCH_SIZE", 50, "Training batch size") tf.flags.DEFINE_integer("NUM_EPOCHS", 500, "Number of training epochs") tf.flags.DEFINE_string("DATASET", "HR-portali", "Dataset to perform training and testing on") tf.flags.DEFINE_string("REGION_SIZES", "3,4,5", "Region sizes for convolutional layer") tf.flags.DEFINE_integer("NUM_FILTERS", 64, "Number of filters per region size") tf.flags.DEFINE_float("MAX_L2_NORM", 0, "Maximum L2 norm for convolutional layer weights") tf.flags.DEFINE_float("REG_LAMBDA", 0, "Lambda regularization parameter for fully-connected layer") tf.flags.DEFINE_float("DROPOUT_PROB", 0.5, "Neuron dropout probability") tf.flags.DEFINE_float("LEARNING_RATE", 3e-4, "Initial learning rate value") tf.flags.DEFINE_float("LEARNING_DECAY_RATE", 0.95, "Rate at which learning rate will exponentially decay during the training") tf.flags.DEFINE_string("MODEL", "CNN_HR_YoonKim", "Neural network model to use") tf.flags.DEFINE_integer("EVAL_CHECKPOINT", 10, "Evaluate the model every this number of epochs") tf.flags.DEFINE_integer("VECTOR_DIM", 64, "Word vector dimension") tf.flags.DEFINE_integer("MAX_DOCUMENT_SIZE", 0, "Size (word number) to which all documents will be aligned. 0 means no alignment.") tf.flags.DEFINE_integer("VOCABULARY_SIZE", 50000, "Number of words for which embeddings will be generated") tf.flags.DEFINE_boolean("GPU_ALLOW_GROWTH", True, "Only grow memory usage as is needed by the process") tf.flags.DEFINE_boolean("SAVE", False, "Model will be saved") FLAGS = tf.flags.FLAGS FLAGS._parse_flags() today = datetime.today() n = FLAGS.MODEL.split("_") n.remove("CNN") id_string = "{}_{}_{:02}-{:02}-{:02}_{:02}-{:02}-{:02}".format( FLAGS.DATASET, "_".join(n), today.day, today.month, int(str(today.year)[-2:]), today.hour, today.minute, today.second ) logger = Logger( id_string+".txt", print_to_stdout=True ) logger.log("ID: "+id_string) logger.log("") logger.log("Hyperparameters:") for param, value in sorted(FLAGS.__flags.items()): logger.log(param + ": " + str(value)) logger.log("") train, test, num_classes, class_dict, max_sentence_length = HR_data.load_dataset(FLAGS.DATASET, FLAGS.VOCABULARY_SIZE, FLAGS.MAX_DOCUMENT_SIZE) logger.log("Train set size: " + str(len(train))) logger.log("Test set size: " + str(len(test))) logger.log("Classes: " + str(num_classes)) logger.log("Max sentence length: " + str(max_sentence_length)) logger.log() # train data prepare for i in range(len(train)): sentence, label = train[i] word_indices = HR_data.index_and_align(sentence, max_sentence_length) train[i]=(word_indices,label) #print(train[i]) #input() # test data prepare for i in range(len(test)): sentence, label = test[i] word_indices = HR_data.index_and_align(sentence, max_sentence_length) test[i]=(word_indices,label) config = tf.ConfigProto() config.gpu_options.allow_growth=FLAGS.GPU_ALLOW_GROWTH with tf.Session(config=config) as sess, logger: model_class = HR_data.get_model_class(FLAGS.MODEL) neural_network = model_class( model_name=id_string, session=sess, learning_rate=FLAGS.LEARNING_RATE, learning_decay_rate=FLAGS.LEARNING_DECAY_RATE, optimizer=tf.train.AdamOptimizer, filter_sizes=[int(region_size) for region_size in FLAGS.REGION_SIZES.split(",")], num_filters=FLAGS.NUM_FILTERS, vocabulary_size=FLAGS.VOCABULARY_SIZE, max_sentence_length=max_sentence_length, num_classes=num_classes, embedding_dim=FLAGS.VECTOR_DIM, max_l2_norm=FLAGS.MAX_L2_NORM, regularization_lambda=FLAGS.REG_LAMBDA, dropout_keep_prob=1-FLAGS.DROPOUT_PROB ) def evaluate(): logger.log("Evaluating...", end=" ") correct=0 for i in range(len(test)): indices, label = test[i] output, predictions = neural_network.feed([indices]) accuracy=label[predictions[0]] correct+=accuracy logger.log("Test set accuracy: " + str(correct/len(test)*100) + " %") start_time = time.time() batch_indices = HR_data.generate_partitions(len(train), FLAGS.BATCH_SIZE) try: # allow user to end training using Ctrl+C for epoch in range(1, FLAGS.NUM_EPOCHS+1): random.shuffle(train) avg_loss=0 for start, end in batch_indices: indices, labels = zip(*train[start:end]) loss = neural_network.train_step(indices, labels) avg_loss+=loss avg_loss/=len(batch_indices) logger.log("Epoch " + str(epoch) + " loss: " + str(avg_loss)) if epoch%FLAGS.EVAL_CHECKPOINT==0: evaluate() except KeyboardInterrupt: pass end_time=time.time() training_minutes=int((end_time-start_time)//60) training_seconds=int((end_time-start_time)-training_minutes*60) logger.log("Training DONE ({} m {} s).".format(training_minutes, training_seconds)) evaluate() if FLAGS.SAVE: HR_data.save_model(neural_network)
bornabesic/cnn-text-classification
HR/HR_train_and_test.py
Python
mit
4,986
[ "NEURON" ]
19f888c0e85e9691e75c1c6e6f076d95d280aec8f241744ffe5cca1db17ceb4c
""" ======= Plotter ======= .. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com> """ from __future__ import print_function import matplotlib import matplotlib.figure import numpy as np import astropy.units as u import copy import inspect from astropy import log # this mess is to handle a nested hell of different versions of matplotlib # (>=1.3 has BoundMethodProxy somewhere, >=3 gets rid of it) and python # (python >=3.4 has WeakMethod, earlier versions don't) try: from matplotlib.cbook import BoundMethodProxy except ImportError: try: from matplotlib.cbook import _BoundMethodProxy as BoundMethodProxy except ImportError: try: from matplotlib.cbook import WeakMethod except ImportError: try: from weakref import WeakMethod except ImportError: try: from weakrefmethod import WeakMethod except ImportError: raise ImportError("Could not import WeakMethod from " "anywhere. Try installing the " "weakrefmethod package or use a more " "recent version of python or matplotlib") class BoundMethodProxy(WeakMethod): @property def func(self): return self() from . import widgets from ..specwarnings import warn interactive_help_message = """ Interactive key commands for plotter. An additional help message may appear if you have initiated the fitter. '?' - bring up this message 'f' - initiate the /f/itter 'b' - initiate the /b/aseliner 'B' - initiate the /b/aseliner (reset the selection too) 'r' - re-attach matplotlib keys 'R' - redraw the plot cleanly 'i' : individual components / show each fitted component """ xlabel_table = {'speed': 'Velocity'} class Plotter(object): """ Class to plot a spectrum """ def __init__(self, Spectrum, autorefresh=True, title="", xlabel=None, silent=True, plotscale=1.0, **kwargs): import matplotlib.pyplot self._pyplot = matplotlib.pyplot self.figure = None self.axis = None self.Spectrum = Spectrum # plot parameters self.offset = 0.0 # vertical offset self.autorefresh = autorefresh self.xlabel = xlabel self.title = title self.errorplot = None self.plotkwargs = kwargs self._xlim = [None,None] self._ylim = [None,None] self.debug = False self.keyclick = None self.silent = silent self.plotscale = plotscale self._xclick1 = None self._xclick2 = None self.automake_fitter_tool = False self._active_gui = None @property def _xunit(self): return self.Spectrum.xarr.unit def _get_prop(xy, minmax): def getprop(self): if xy == 'x': if minmax == 'min': if self._xlim[0] is not None and self._xunit: try: self._xlim[0]._unit = self._xunit except AttributeError: self._xlim[0] = u.Quantity(self._xlim[0], self._xunit) return self._xlim[0] elif minmax == 'max': if self._xlim[1] is not None and self._xunit: try: self._xlim[1]._unit = self._xunit except AttributeError: self._xlim[1] = u.Quantity(self._xlim[1], self._xunit) return self._xlim[1] elif xy == 'y': if minmax == 'min': return self._ylim[0] elif minmax == 'max': return self._ylim[1] return getprop def _set_prop(xy, minmax): def setprop(self, value): if self.debug: frm = inspect.stack() print(frm[1],"Setting %s%s to %s" % (xy,minmax,value)) if xy == 'x': if minmax == 'min': self._xlim[0] = value elif minmax == 'max': self._xlim[1] = value elif xy == 'y': if minmax == 'min': self._ylim[0] = value elif minmax == 'max': self._ylim[1] = value return setprop xmin = property(fget=_get_prop('x','min'),fset=_set_prop('x','min')) xmax = property(fget=_get_prop('x','max'),fset=_set_prop('x','max')) ymin = property(fget=_get_prop('y','min'),fset=_set_prop('y','min')) ymax = property(fget=_get_prop('y','max'),fset=_set_prop('y','max')) def _disconnect_matplotlib_keys(self): """ Disconnected the matplotlib key-press callbacks """ if self.figure is not None: cbs = self.figure.canvas.callbacks.callbacks # this may cause problems since the dict of key press events is a # dict, i.e. not ordered, and we want to pop the first one... mpl_keypress_handler = self.figure.canvas.manager.key_press_handler_id try: self._mpl_key_callbacks = {mpl_keypress_handler: cbs['key_press_event'].pop(mpl_keypress_handler)} except KeyError: bmp = BoundMethodProxy(self.figure.canvas.manager.key_press) self._mpl_key_callbacks = {mpl_keypress_handler: bmp} def _reconnect_matplotlib_keys(self): """ Reconnect the previously disconnected matplotlib keys """ if self.figure is not None and hasattr(self,'_mpl_key_callbacks'): self.figure.canvas.callbacks.callbacks['key_press_event'].update(self._mpl_key_callbacks) elif self.figure is not None: mpl_keypress_handler = self.figure.canvas.manager.key_press_handler_id bmp = BoundMethodProxy(self.figure.canvas.manager.key_press) self.figure.canvas.callbacks.callbacks['key_press_event'].update({mpl_keypress_handler: bmp}) def __call__(self, figure=None, axis=None, clear=True, autorefresh=None, plotscale=1.0, override_plotkwargs=False, **kwargs): """ Plot a spectrum Keywords: figure - either a matplotlib figure instance or a figure number to pass into pyplot.figure. axis - Alternative to figure, can pass an axis instance and use it as the plotting canvas clear - Clear the axis before plotting? """ # figure out where to put the plot if isinstance(figure,matplotlib.figure.Figure): self.figure = figure self.axis = self.figure.gca() elif type(figure) is int: self.figure = self._pyplot.figure(figure) self.axis = self.figure.gca() elif self.figure is None: if isinstance(axis,matplotlib.axes.Axes): self.axis = axis self.figure = axis.figure else: self.figure = self._pyplot.figure() if hasattr(self.figure, 'number') and not self._pyplot.fignum_exists(self.figure.number): self.figure = self._pyplot.figure(self.figure.number) # always re-connect the interactive keys to avoid frustration... self._mpl_reconnect() if axis is not None: #self._mpl_disconnect() self.axis = axis self.figure = axis.figure #self._mpl_connect() elif len(self.figure.axes) > 0 and self.axis is None: self.axis = self.figure.axes[0] # default to first axis elif self.axis is None: self.axis = self.figure.gca() # A check to deal with issue #117: if you close the figure, the axis # still exists, but it cannot be reattached to a figure if (hasattr(self.axis.get_figure(), 'number') and not (self.axis.get_figure() is self._pyplot.figure(self.axis.get_figure().number))): self.axis = self.figure.gca() if self.axis is not None and self.axis not in self.figure.axes: # if you've cleared the axis, but the figure is still open, you # need a new axis self.figure.add_axes(self.axis) if clear and self.axis is not None: self.axis.clear() # Need to empty the stored model plots if hasattr(self.Spectrum, 'fitter'): self.Spectrum.fitter.clear() if autorefresh is not None: self.autorefresh = autorefresh self.plotscale = plotscale if self.plotkwargs and not override_plotkwargs: self.plotkwargs.update(kwargs) else: self.plotkwargs = kwargs self.plot(**kwargs) def _mpl_connect(self): if self.keyclick is None: self.keyclick = self.figure.canvas.mpl_connect('key_press_event',self.parse_keys) def _mpl_disconnect(self): self.figure.canvas.mpl_disconnect(self.keyclick) self.keyclick = None def disconnect(self): """ Disconnect the matplotlib interactivity of this pyspeckit plotter. """ self._mpl_disconnect() def connect(self): """ Connect to the matplotlib key-parsing interactivity """ self._mpl_connect() def _mpl_reconnect(self): self._mpl_disconnect() self._mpl_connect() # disable fullscreen & grid self._pyplot.rcParams['keymap.fullscreen'] = 'ctrl+f' self._pyplot.rcParams['keymap.grid'] = 'ctrl+g' def plot(self, offset=0.0, xoffset=0.0, color='k', drawstyle='steps-mid', linewidth=0.5, errstyle=None, erralpha=0.2, errcolor=None, silent=None, reset=True, refresh=True, use_window_limits=None, useOffset=False, **kwargs): """ Plot the spectrum! Tries to automatically find a reasonable plotting range if one is not set. Parameters ---------- offset : float vertical offset to add to the spectrum before plotting. Useful if you want to overlay multiple spectra on a single plot xoffset: float An x-axis shift. I don't know why you'd want this... color : str default to plotting spectrum in black drawstyle : 'steps-mid' or str 'steps-mid' for histogram-style plotting. See matplotlib's plot for more information linewidth : float Line width in pixels. Narrow lines are helpful when histo-plotting errstyle : 'fill', 'bars', or None can be "fill", which draws partially transparent boxes around the data to show the error region, or "bars" which draws standard errorbars. ``None`` will display no errorbars useOffset : bool Use offset-style X/Y coordinates (e.g., 1 + 1.483e10)? Defaults to False because these are usually quite annoying. xmin/xmax/ymin/ymax : float override defaults for plot range. Once set, these parameters are sticky (i.e., replotting will use the same ranges). Passed to `reset_limits` reset_[xy]limits : bool Reset the limits to "sensible defaults". Passed to `reset_limits` ypeakscale : float Scale up the Y maximum value. Useful to keep the annotations away from the data. Passed to `reset_limits` reset : bool Reset the x/y axis limits? If set, `reset_limits` will be called. """ if self.axis is None: raise Exception("You must call the Plotter class to initiate the canvas before plotting.") self.offset = offset # there is a bug where this only seems to update the second time it is called self.label(**kwargs) self.label(**kwargs) for arg in ['title','xlabel','ylabel']: if arg in kwargs: kwargs.pop(arg) reset_kwargs = {} for arg in ['xmin', 'xmax', 'ymin', 'ymax', 'reset_xlimits', 'reset_ylimits', 'ypeakscale']: if arg in kwargs: reset_kwargs[arg] = kwargs.pop(arg) if (use_window_limits is None and any(k in reset_kwargs for k in ('xmin','xmax','reset_xlimits'))): use_window_limits = False if use_window_limits: self._stash_window_limits() # for filled errorbars, order matters. inds = np.argsort(self.Spectrum.xarr) if errstyle is not None: if errcolor is None: errcolor = color if errstyle == 'fill': self.errorplot = [self.axis.fill_between(steppify(self.Spectrum.xarr.value[inds]+xoffset, isX=True), steppify((self.Spectrum.data*self.plotscale+self.offset-self.Spectrum.error*self.plotscale)[inds]), steppify((self.Spectrum.data*self.plotscale+self.offset+self.Spectrum.error*self.plotscale)[inds]), facecolor=errcolor, edgecolor=errcolor, alpha=erralpha, **kwargs)] elif errstyle == 'bars': self.errorplot = self.axis.errorbar(self.Spectrum.xarr[inds].value+xoffset, self.Spectrum.data[inds]*self.plotscale+self.offset, yerr=self.Spectrum.error[inds]*self.plotscale, ecolor=errcolor, fmt='none', **kwargs) self._spectrumplot = self.axis.plot(self.Spectrum.xarr.value[inds]+xoffset, self.Spectrum.data[inds]*self.plotscale+self.offset, color=color, drawstyle=drawstyle, linewidth=linewidth, **kwargs) self.axis.ticklabel_format(useOffset=useOffset) if use_window_limits: self._reset_to_stashed_limits() if silent is not None: self.silent = silent if reset: self.reset_limits(use_window_limits=use_window_limits, **reset_kwargs) if self.autorefresh and refresh: self.refresh() # Maybe it's OK to call 'plot' when there is an active gui tool # (e.g., baseline or specfit)? #if self._active_gui: # self._active_gui = None # warn("An active GUI was found while initializing the " # "plot. This is somewhat dangerous and may result " # "in broken interactivity.") def _stash_window_limits(self): self._window_limits = self.axis.get_xlim(),self.axis.get_ylim() if self.debug: print("Stashed window limits: ",self._window_limits) def _reset_to_stashed_limits(self): self.axis.set_xlim(*self._window_limits[0]) self.axis.set_ylim(*self._window_limits[1]) self.xmin,self.xmax = self._window_limits[0] self.ymin,self.ymax = self._window_limits[1] if self.debug: print("Recovered window limits: ",self._window_limits) def reset_limits(self, xmin=None, xmax=None, ymin=None, ymax=None, reset_xlimits=True, reset_ylimits=True, ypeakscale=1.2, silent=None, use_window_limits=False, **kwargs): """ Automatically or manually reset the plot limits """ # if not use_window_limits: use_window_limits = False if self.debug: frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) print(zip(args,values)) if use_window_limits: # this means DO NOT reset! # it simply sets self.[xy][min/max] = current value self.set_limits_from_visible_window() else: if silent is not None: self.silent = silent # if self.xmin and self.xmax: if (reset_xlimits or self.Spectrum.xarr.min().value < self.xmin or self.Spectrum.xarr.max().value > self.xmax): if not self.silent: warn("Resetting X-axis min/max because the plot is out of bounds.") self.xmin = None self.xmax = None if xmin is not None: self.xmin = u.Quantity(xmin, self._xunit) elif self.xmin is None: self.xmin = u.Quantity(self.Spectrum.xarr.min().value, self._xunit) if xmax is not None: self.xmax = u.Quantity(xmax, self._xunit) elif self.xmax is None: self.xmax = u.Quantity(self.Spectrum.xarr.max().value, self._xunit) xpixmin = np.argmin(np.abs(self.Spectrum.xarr.value-self.xmin.value)) xpixmax = np.argmin(np.abs(self.Spectrum.xarr.value-self.xmax.value)) if xpixmin>xpixmax: xpixmin,xpixmax = xpixmax,xpixmin elif xpixmin == xpixmax: if reset_xlimits: raise Exception("Infinite recursion error. Maybe there are no valid data?") if not self.silent: warn("ERROR: the X axis limits specified were invalid. Resetting.") self.reset_limits(reset_xlimits=True, ymin=ymin, ymax=ymax, reset_ylimits=reset_ylimits, ypeakscale=ypeakscale, **kwargs) return if self.ymin is not None and self.ymax is not None: # this is utter nonsense.... if (np.nanmax(self.Spectrum.data) < self.ymin or np.nanmin(self.Spectrum.data) > self.ymax or reset_ylimits): if not self.silent and not reset_ylimits: warn("Resetting Y-axis min/max because the plot is out of bounds.") self.ymin = None self.ymax = None if ymin is not None: self.ymin = ymin elif self.ymin is None: yminval = np.nanmin(self.Spectrum.data[xpixmin:xpixmax]) # Increase the range fractionally. This means dividing a positive #, multiplying a negative # if yminval < 0: self.ymin = float(yminval)*float(ypeakscale) else: self.ymin = float(yminval)/float(ypeakscale) if ymax is not None: self.ymax = ymax elif self.ymax is None: ymaxval = (np.nanmax(self.Spectrum.data[xpixmin:xpixmax])-self.ymin) if ymaxval > 0: self.ymax = float(ymaxval) * float(ypeakscale) + self.ymin else: self.ymax = float(ymaxval) / float(ypeakscale) + self.ymin self.ymin += self.offset self.ymax += self.offset self.axis.set_xlim(self.xmin.value if hasattr(self.xmin, 'value') else self.xmin, self.xmax.value if hasattr(self.xmax, 'value') else self.xmax) self.axis.set_ylim(self.ymin, self.ymax) def label(self, title=None, xlabel=None, ylabel=None, verbose_label=False, **kwargs): """ Label the plot, with an attempt to parse standard units into nice latex labels Parameters ---------- title : str xlabel : str ylabel : str verbose_label: bool """ if title is not None: self.title = title elif hasattr(self.Spectrum,'specname'): self.title = self.Spectrum.specname if self.title != "": self.axis.set_title(self.title) if xlabel is not None: log.debug("setting xlabel={0}".format(xlabel)) self.xlabel = xlabel elif self._xunit: try: self.xlabel = xlabel_table[str(self._xunit.physical_type).lower()] except KeyError: self.xlabel = str(self._xunit.physical_type) # WAS: self.xlabel += " ("+u.Unit(self._xunit).to_string()+")" self.xlabel += " ({0})".format(self._xunit.to_string()) log.debug("xunit is {1}. set xlabel={0}".format(self.xlabel, self._xunit)) if verbose_label: self.xlabel = "%s %s" % (str(self.Spectrum.xarr.velocity_convention), self.xlabel) else: log.warn("Plotter: xlabel was not set") if self.xlabel is not None: self.axis.set_xlabel(self.xlabel) if ylabel is not None: self.axis.set_ylabel(ylabel) elif self.Spectrum.unit in ['Ta*','Tastar']: self.axis.set_ylabel("$T_A^*$ (K)") elif self.Spectrum.unit in ['K']: self.axis.set_ylabel("Brightness Temperature $T$ (K)") elif self.Spectrum.unit == 'mJy': self.axis.set_ylabel("$S_\\nu$ (mJy)") elif self.Spectrum.unit == 'Jy': self.axis.set_ylabel("$S_\\nu$ (Jy)") else: if isinstance(self.Spectrum.unit, str) and "$" in self.Spectrum.unit: # assume LaTeX already self.axis.set_ylabel(self.Spectrum.unit) elif isinstance(self.Spectrum.unit, str): self.axis.set_ylabel(self.Spectrum.unit) else: label_units = self.Spectrum.unit.to_string(format='latex') if 'mathring{A}' in label_units: label_units = label_units.replace('\\mathring{A}', 'A') if '\\overset' in label_units: label_units = label_units.replace('\\overset', '^') self.axis.set_ylabel(label_units) @property def ylabel(self): return self.axis.get_ylabel() def refresh(self): if self.axis is not None: self.axis.figure.canvas.draw() def savefig(self,fname,bbox_inches='tight',**kwargs): """ simple wrapper of maplotlib's savefig. """ self.axis.figure.savefig(fname,bbox_inches=bbox_inches,**kwargs) def parse_keys(self,event): """ Parse key commands entered from the keyboard """ if hasattr(event,'key'): if event.key == '?': print(interactive_help_message) elif event.key == 'f': print("\n\nFitter initiated from the interactive plotter.") # extra optional text: # Matplotlib shortcut keys ('g','l','p',etc.) are disabled. Re-enable with 'r'" if self._active_gui == self.Spectrum.specfit and self._active_gui._check_connections(verbose=False): print("Fitter is already active. Use 'q' to quit the fitter.") elif self._active_gui == self.Spectrum.specfit and not self._active_gui._check_connections(verbose=False): # forcibly clear connections self._active_gui.clear_all_connections() # the 'clear_all_connections' code *explicitly* makes the # following line correct, except in the case that there is # no canvas... assert self._active_gui is None self.activate_interactive_fitter() else: self.activate_interactive_fitter() assert self._active_gui == self.Spectrum.specfit assert self._active_gui._check_connections(verbose=False) if not hasattr(self,'FitterTool') and self.automake_fitter_tool: self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure) elif hasattr(self,'FitterTool') and self.FitterTool.toolfig.number not in self._pyplot.get_fignums(): self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure) elif event.key is not None and event.key.lower() == 'b': if event.key == 'b': print("\n\nBaseline initiated from the interactive plotter") elif event.key == 'B': print("\n\nBaseline initiated from the interactive plotter (with reset)") print("Matplotlib shortcut keys ('g','l','p',etc.) are disabled. Re-enable with 'r'") self.activate_interactive_baseline_fitter(reset_selection=(event.key=='B')) if not hasattr(self,'FitterTool') and self.automake_fitter_tool: self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure) elif hasattr(self,'FitterTool') and self.FitterTool.toolfig.number not in self._pyplot.get_fignums(): self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure) elif event.key == 'r': # print("\n\nReconnected matplotlib shortcut keys.") self._reconnect_matplotlib_keys() elif event.key == 'R': self() elif event.key == 'i': self.Spectrum.specfit.plot_fit(show_components=True) def get_two_clicks(self,event): if self._xclick1 is None: self._xclick1 = event.xdata elif self._xclick2 is None: self._xclick2 = event.xdata def set_limits_from_visible_window(self, debug=False): """ Hopefully self-descriptive: set the x and y limits from the currently visible window (use this if you use the pan/zoom tools or manually change the limits) """ if debug: print("Changing x limits from {},{} to {},{}".format(self.xmin,self.xmax,self.axis.get_xlim()[0],self.axis.get_xlim()[1])) print("Changing y limits from {},{} to {},{}".format(self.ymin,self.ymax,self.axis.get_ylim()[0],self.axis.get_ylim()[1])) self.xmin, self.xmax = self.axis.get_xlim() self.ymin, self.ymax = self.axis.get_ylim() if debug: print("New x limits {},{} == {},{}".format(self.xmin,self.xmax,self.axis.get_xlim()[0],self.axis.get_xlim()[1])) print("New y limits {},{} == {},{}".format(self.ymin,self.ymax,self.axis.get_ylim()[0],self.axis.get_ylim()[1])) def copy(self, parent=None): """ Create a copy of the plotter with blank (uninitialized) axis & figure [ parent ] A spectroscopic axis instance that is the parent of the specfit instance. This needs to be specified at some point, but defaults to None to prevent overwriting a previous plot. """ newplotter = copy.copy(self) newplotter.Spectrum = parent newplotter.axis = None newplotter.figure = None return newplotter def line_ids(self, line_names, line_xvals, xval_units=None, auto_yloc=True, velocity_offset=None, velocity_convention='radio', auto_yloc_fraction=0.9, **kwargs): """ Add line ID labels to a plot using lineid_plot http://oneau.wordpress.com/2011/10/01/line-id-plot/ https://github.com/phn/lineid_plot http://packages.python.org/lineid_plot/ Parameters ---------- line_names : list A list of strings to label the specified x-axis values line_xvals : list List of x-axis values (e.g., wavelengths) at which to label the lines. Can be a list of quantities. xval_units : string The unit of the line_xvals if they are not given as quantities velocity_offset : quantity A velocity offset to apply to the inputs if they are in frequency or wavelength units velocity_convention : 'radio' or 'optical' or 'doppler' Used if the velocity offset is given auto_yloc : bool If set, overrides box_loc and arrow_tip (the vertical position of the lineid labels) in kwargs to be `auto_yloc_fraction` of the plot range auto_yloc_fraction: float in range [0,1] The fraction of the plot (vertically) at which to place labels Examples -------- >>> import numpy as np >>> import pyspeckit >>> sp = pyspeckit.Spectrum( xarr=pyspeckit.units.SpectroscopicAxis(np.linspace(-50,50,101), unit='km/s', refX=6562.8, refX_unit='angstrom'), data=np.random.randn(101), error=np.ones(101)) >>> sp.plotter() >>> sp.plotter.line_ids(['H$\\alpha$'],[6562.8],xval_units='angstrom') """ import lineid_plot if velocity_offset is not None: assert velocity_offset.unit.is_equivalent(u.km/u.s) doppler = getattr(u, 'doppler_{0}'.format(velocity_convention)) if self.Spectrum.xarr.refX is not None: equivalency = doppler(self.Spectrum.xarr.refX) else: equivalency = doppler(self.Spectrum.xarr.as_unit(u.GHz)[0]) xvals = [] linenames_toplot = [] for xv,ln in zip(line_xvals, line_names): if hasattr(xv, 'unit'): pass else: xv = u.Quantity(xv, xval_units) xv = xv.to(u.km/u.s, equivalencies=equivalency) if velocity_offset is not None: xv = xv + velocity_offset xv = xv.to(self.Spectrum.xarr.unit, equivalencies=equivalency) if self.Spectrum.xarr.in_range(xv): xvals.append(xv.value) linenames_toplot.append(ln) if len(xvals) != len(line_xvals): log.warn("Skipped {0} out-of-bounds lines when plotting line IDs." .format(len(line_xvals)-len(xvals))) if auto_yloc: yr = self.axis.get_ylim() kwargs['box_loc'] = (yr[1]-yr[0])*auto_yloc_fraction + yr[0] kwargs['arrow_tip'] = (yr[1]-yr[0])*(auto_yloc_fraction*0.9) + yr[0] lineid_plot.plot_line_ids(self.Spectrum.xarr, self.Spectrum.data, xvals, linenames_toplot, ax=self.axis, **kwargs) def line_ids_from_measurements(self, auto_yloc=True, auto_yloc_fraction=0.9, **kwargs): """ Add line ID labels to a plot using lineid_plot http://oneau.wordpress.com/2011/10/01/line-id-plot/ https://github.com/phn/lineid_plot http://packages.python.org/lineid_plot/ Parameters ---------- auto_yloc : bool If set, overrides box_loc and arrow_tip (the vertical position of the lineid labels) in kwargs to be `auto_yloc_fraction` of the plot range auto_yloc_fraction: float in range [0,1] The fraction of the plot (vertically) at which to place labels Examples -------- >>> import numpy as np >>> import pyspeckit >>> sp = pyspeckit.Spectrum( xarr=pyspeckit.units.SpectroscopicAxis(np.linspace(-50,50,101), units='km/s', refX=6562.8, refX_unit='angstroms'), data=np.random.randn(101), error=np.ones(101)) >>> sp.plotter() >>> sp.specfit(multifit=None, fittype='gaussian', guesses=[1,0,1]) # fitting noise.... >>> sp.measure() >>> sp.plotter.line_ids_from_measurements() """ import lineid_plot if hasattr(self.Spectrum,'measurements'): measurements = self.Spectrum.measurements if auto_yloc: yr = self.axis.get_ylim() kwargs['box_loc'] = (yr[1]-yr[0])*auto_yloc_fraction + yr[0] kwargs['arrow_tip'] = (yr[1]-yr[0])*(auto_yloc_fraction*0.9) + yr[0] lineid_plot.plot_line_ids(self.Spectrum.xarr, self.Spectrum.data, [v['pos'] for v in measurements.lines.values()], measurements.lines.keys(), ax=self.axis, **kwargs) else: warn("Cannot add line IDs from measurements unless measurements have been made!") def activate_interactive_fitter(self): """ Attempt to activate the interactive fitter """ if self._active_gui is not None: # This should not be reachable. Clearing connections is the # "right" behavior if this becomes reachable, but I'd rather raise # an exception because I don't want to get here ever self._active_gui.clear_all_connections() raise ValueError("GUI was active when 'f' key pressed") self._activate_interactive(self.Spectrum.specfit, interactive=True) def activate_interactive_baseline_fitter(self, **kwargs): """ Attempt to activate the interactive baseline fitter """ if self._active_gui is not None: # This should not be reachable. Clearing connections is the # "right" behavior if this becomes reachable, but I'd rather raise # an exception because I don't want to get here ever gui_was = self._active_gui self._active_gui.clear_all_connections() raise ValueError("GUI {0} was active when 'b' key pressed" .format(gui_was)) self._activate_interactive(self.Spectrum.baseline, interactive=True, **kwargs) def _activate_interactive(self, object_to_activate, **kwargs): self._disconnect_matplotlib_keys() self._active_gui = object_to_activate # activating the gui calls clear_all_connections, which disconnects the # gui try: self._active_gui(**kwargs) self._active_gui = object_to_activate assert self._active_gui is not None except Exception as ex: self._active_gui = None raise ex def parse_units(labelstring): import re labelstring = re.sub("um","$\\mu$m",labelstring) labelstring = re.sub("-1","$^{-1}$",labelstring) labelstring = re.sub("-2","$^{-2}$",labelstring) labelstring = re.sub("-3","$^{-3}$",labelstring) labelstring = re.sub("ergss","ergs s",labelstring) return labelstring def parse_norm(norm): """ Expected format: norm = 10E15 """ try: base, exp = norm.split('E') except ValueError: base, exp = norm.split('e') if float(base) == 1.0: norm = '10' else: norm = base norm += '^{%s}' % exp return norm def steppify(arr,isX=False): """ *support function* Converts an array to double-length for step plotting """ if isX: interval = abs(arr[1:]-arr[:-1]) / 2.0 newarr = np.array(list(zip(arr[:-1]-interval,arr[:-1]+interval))).ravel() newarr = np.concatenate([newarr,2*[newarr[-1]+interval[-1]]]) else: newarr = np.array(list(zip(arr,arr))).ravel() return newarr
keflavich/pyspeckit
pyspeckit/spectrum/plotters.py
Python
mit
35,854
[ "Gaussian" ]
d038af81f73fb2e14a7c8dd82df6b4d401c0fd10d63fa59ea53d9c0d301766f5
from django.utils.translation import ugettext_lazy as _ from crystal_dashboard.dashboards.crystal import dashboard import horizon class WorkloadMetrics(horizon.Panel): name = _("Workload Metrics") slug = "metrics" dashboard.CrystalController.register(WorkloadMetrics)
Crystal-SDS/dashboard
crystal_dashboard/dashboards/crystal/metrics/panel.py
Python
gpl-3.0
280
[ "CRYSTAL" ]
5039f9be96a81a39c292b263bd936e74ac9c93764cce7b6edd403b1aff736d29
# coding: utf-8 # Copyright (c) Pymatgen Development Team. <<<<<<< HEAD # Distributed under the terms of the MIT License. ======= # Distributed under the terms of the MIT License. >>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
Bismarrck/pymatgen
pymatgen/optimization/__init__.py
Python
mit
229
[ "pymatgen" ]
e12729bdf6c1328ff7c07aa686bc9220f198fde44e878beb4b52e09a7d9e02c4
import numpy as np from sklearn.utils.testing import (assert_allclose, assert_raises, assert_equal) from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors from sklearn.neighbors.ball_tree import kernel_norm from sklearn.pipeline import make_pipeline from sklearn.datasets import make_blobs from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import StandardScaler def compute_kernel_slow(Y, X, kernel, h): d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0] if kernel == 'gaussian': return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1) elif kernel == 'tophat': return norm * (d < h).sum(-1) elif kernel == 'epanechnikov': return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1) elif kernel == 'exponential': return norm * (np.exp(-d / h)).sum(-1) elif kernel == 'linear': return norm * ((1 - d / h) * (d < h)).sum(-1) elif kernel == 'cosine': return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1) else: raise ValueError('kernel not recognized') def test_kernel_density(n_samples=100, n_features=3): rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) Y = rng.randn(n_samples, n_features) for kernel in ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']: for bandwidth in [0.01, 0.1, 1]: dens_true = compute_kernel_slow(Y, X, kernel, bandwidth) def check_results(kernel, bandwidth, atol, rtol): kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol) log_dens = kde.fit(X).score_samples(Y) assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1E-7, rtol)) assert_allclose(np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1E-7, rtol)) for rtol in [0, 1E-5]: for atol in [1E-6, 1E-2]: for breadth_first in (True, False): yield (check_results, kernel, bandwidth, atol, rtol) def test_kernel_density_sampling(n_samples=100, n_features=3): rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) bandwidth = 0.2 for kernel in ['gaussian', 'tophat']: # draw a tophat sample kde = KernelDensity(bandwidth, kernel=kernel).fit(X) samp = kde.sample(100) assert_equal(X.shape, samp.shape) # check that samples are in the right range nbrs = NearestNeighbors(n_neighbors=1).fit(X) dist, ind = nbrs.kneighbors(X, return_distance=True) if kernel == 'tophat': assert np.all(dist < bandwidth) elif kernel == 'gaussian': # 5 standard deviations is safe for 100 samples, but there's a # very small chance this test could fail. assert np.all(dist < 5 * bandwidth) # check unsupported kernels for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']: kde = KernelDensity(bandwidth, kernel=kernel).fit(X) assert_raises(NotImplementedError, kde.sample, 100) # non-regression test: used to return a scalar X = rng.randn(4, 1) kde = KernelDensity(kernel="gaussian").fit(X) assert_equal(kde.sample().shape, (1, 1)) def test_kde_algorithm_metric_choice(): # Smoke test for various metrics and algorithms rng = np.random.RandomState(0) X = rng.randn(10, 2) # 2 features required for haversine dist. Y = rng.randn(10, 2) for algorithm in ['auto', 'ball_tree', 'kd_tree']: for metric in ['euclidean', 'minkowski', 'manhattan', 'chebyshev', 'haversine']: if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics: assert_raises(ValueError, KernelDensity, algorithm=algorithm, metric=metric) else: kde = KernelDensity(algorithm=algorithm, metric=metric) kde.fit(X) y_dens = kde.score_samples(Y) assert_equal(y_dens.shape, Y.shape[:1]) def test_kde_score(n_samples=100, n_features=3): pass #FIXME #np.random.seed(0) #X = np.random.random((n_samples, n_features)) #Y = np.random.random((n_samples, n_features)) def test_kde_badargs(): assert_raises(ValueError, KernelDensity, algorithm='blah') assert_raises(ValueError, KernelDensity, bandwidth=0) assert_raises(ValueError, KernelDensity, kernel='blah') assert_raises(ValueError, KernelDensity, metric='blah') assert_raises(ValueError, KernelDensity, algorithm='kd_tree', metric='blah') def test_kde_pipeline_gridsearch(): # test that kde plays nice in pipelines and grid-searches X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False), KernelDensity(kernel="gaussian")) params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10]) search = GridSearchCV(pipe1, param_grid=params, cv=5) search.fit(X) assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
krez13/scikit-learn
sklearn/neighbors/tests/test_kde.py
Python
bsd-3-clause
5,560
[ "Gaussian" ]
56115d4dc435645b11d5390f975639ffd1bd97d18c0df9dce1b30f3d69ea6e74
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for testing `LinearOperator` and sub-classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import numpy as np import six from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.platform import test class OperatorBuildInfo(object): """Object encoding expected shape for a test. Encodes the expected shape of a matrix for a test. Also allows additional metadata for the test harness. """ def __init__(self, shape, **kwargs): self.shape = shape self.__dict__.update(kwargs) @six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init class LinearOperatorDerivedClassTest(test.TestCase): """Tests for derived classes. Subclasses should implement every abstractmethod, and this will enable all test methods to work. """ # Absolute/relative tolerance for tests. _atol = { dtypes.float16: 1e-3, dtypes.float32: 1e-6, dtypes.float64: 1e-12, dtypes.complex64: 1e-6, dtypes.complex128: 1e-12 } _rtol = { dtypes.float16: 1e-3, dtypes.float32: 1e-6, dtypes.float64: 1e-12, dtypes.complex64: 1e-6, dtypes.complex128: 1e-12 } def assertAC(self, x, y): """Derived classes can set _atol, _rtol to get different tolerance.""" dtype = dtypes.as_dtype(x.dtype) atol = self._atol[dtype] rtol = self._rtol[dtype] self.assertAllClose(x, y, atol=atol, rtol=rtol) @property def _adjoint_options(self): return [False, True] @property def _adjoint_arg_options(self): return [False, True] @property def _dtypes_to_test(self): # TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit. return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] @property def _use_placeholder_options(self): return [False, True] @abc.abstractproperty def _operator_build_infos(self): """Returns list of OperatorBuildInfo, encapsulating the shape to test.""" raise NotImplementedError("operator_build_infos has not been implemented.") @abc.abstractmethod def _operator_and_matrix(self, build_info, dtype, use_placeholder): """Build a batch matrix and an Operator that should have similar behavior. Every operator acts like a (batch) matrix. This method returns both together, and is used by tests. Args: build_info: `OperatorBuildInfo`, encoding shape information about the operator. dtype: Numpy dtype. Data type of returned array/operator. use_placeholder: Python bool. If True, initialize the operator with a placeholder of undefined shape and correct dtype. Returns: operator: `LinearOperator` subclass instance. mat: `Tensor` representing operator. """ # Create a matrix as a numpy array with desired shape/dtype. # Create a LinearOperator that should have the same behavior as the matrix. raise NotImplementedError("Not implemented yet.") @abc.abstractmethod def _make_rhs(self, operator, adjoint, with_batch=True): """Make a rhs appropriate for calling operator.solve(rhs). Args: operator: A `LinearOperator` adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the adjoint operator. with_batch: Python `bool`. If `True`, create `rhs` with the same batch shape as operator, and otherwise create a matrix without any batch shape. Returns: A `Tensor` """ raise NotImplementedError("_make_rhs is not defined.") @abc.abstractmethod def _make_x(self, operator, adjoint, with_batch=True): """Make an 'x' appropriate for calling operator.matmul(x). Args: operator: A `LinearOperator` adjoint: Python `bool`. If `True`, we are making an 'x' value for the adjoint operator. with_batch: Python `bool`. If `True`, create `x` with the same batch shape as operator, and otherwise create a matrix without any batch shape. Returns: A `Tensor` """ raise NotImplementedError("_make_x is not defined.") @property def _tests_to_skip(self): """List of test names to skip.""" # Subclasses should over-ride if they want to skip some tests. # To skip "test_foo", add "foo" to this list. return [] def _skip_if_tests_to_skip_contains(self, test_name): """If self._tests_to_skip contains test_name, raise SkipTest exception. See tests below for usage. Args: test_name: String name corresponding to a test. Raises: SkipTest Exception, if test_name is in self._tests_to_skip. """ if test_name in self._tests_to_skip: self.skipTest( "{} skipped because it was added to self._tests_to_skip.".format( test_name)) def test_to_dense(self): self._skip_if_tests_to_skip_contains("to_dense") for use_placeholder in self._use_placeholder_options: for build_info in self._operator_build_infos: for dtype in self._dtypes_to_test: with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self._operator_and_matrix( build_info, dtype, use_placeholder=use_placeholder) op_dense = operator.to_dense() if not use_placeholder: self.assertAllEqual(build_info.shape, op_dense.get_shape()) op_dense_v, mat_v = sess.run([op_dense, mat]) self.assertAC(op_dense_v, mat_v) def test_det(self): self._skip_if_tests_to_skip_contains("det") for use_placeholder in self._use_placeholder_options: for build_info in self._operator_build_infos: for dtype in self._dtypes_to_test: with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self._operator_and_matrix( build_info, dtype, use_placeholder=use_placeholder) op_det = operator.determinant() if not use_placeholder: self.assertAllEqual(build_info.shape[:-2], op_det.get_shape()) op_det_v, mat_det_v = sess.run( [op_det, linalg_ops.matrix_determinant(mat)]) self.assertAC(op_det_v, mat_det_v) def test_log_abs_det(self): self._skip_if_tests_to_skip_contains("log_abs_det") for use_placeholder in self._use_placeholder_options: for build_info in self._operator_build_infos: for dtype in self._dtypes_to_test: with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self._operator_and_matrix( build_info, dtype, use_placeholder=use_placeholder) op_log_abs_det = operator.log_abs_determinant() _, mat_log_abs_det = linalg.slogdet(mat) if not use_placeholder: self.assertAllEqual( build_info.shape[:-2], op_log_abs_det.get_shape()) op_log_abs_det_v, mat_log_abs_det_v = sess.run( [op_log_abs_det, mat_log_abs_det]) self.assertAC(op_log_abs_det_v, mat_log_abs_det_v) def _test_matmul(self, with_batch): for use_placeholder in self._use_placeholder_options: for build_info in self._operator_build_infos: # If batch dimensions are omitted, but there are # no batch dimensions for the linear operator, then # skip the test case. This is already checked with # with_batch=True. if not with_batch and len(build_info.shape) <= 2: continue for dtype in self._dtypes_to_test: for adjoint in self._adjoint_options: for adjoint_arg in self._adjoint_arg_options: with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self._operator_and_matrix( build_info, dtype, use_placeholder=use_placeholder) x = self._make_x( operator, adjoint=adjoint, with_batch=with_batch) # If adjoint_arg, compute A X^H^H = A X. if adjoint_arg: op_matmul = operator.matmul( linalg.adjoint(x), adjoint=adjoint, adjoint_arg=adjoint_arg) else: op_matmul = operator.matmul(x, adjoint=adjoint) mat_matmul = linear_operator_util.matmul_with_broadcast( mat, x, adjoint_a=adjoint) if not use_placeholder: self.assertAllEqual(op_matmul.get_shape(), mat_matmul.get_shape()) op_matmul_v, mat_matmul_v = sess.run( [op_matmul, mat_matmul]) self.assertAC(op_matmul_v, mat_matmul_v) def test_matmul(self): self._skip_if_tests_to_skip_contains("matmul") self._test_matmul(with_batch=True) def test_matmul_with_broadcast(self): self._skip_if_tests_to_skip_contains("matmul_with_broadcast") self._test_matmul(with_batch=False) def _test_solve(self, with_batch): for use_placeholder in self._use_placeholder_options: for build_info in self._operator_build_infos: # If batch dimensions are omitted, but there are # no batch dimensions for the linear operator, then # skip the test case. This is already checked with # with_batch=True. if not with_batch and len(build_info.shape) <= 2: continue for dtype in self._dtypes_to_test: for adjoint in self._adjoint_options: for adjoint_arg in self._adjoint_arg_options: with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self._operator_and_matrix( build_info, dtype, use_placeholder=use_placeholder) rhs = self._make_rhs( operator, adjoint=adjoint, with_batch=with_batch) # If adjoint_arg, solve A X = (rhs^H)^H = rhs. if adjoint_arg: op_solve = operator.solve( linalg.adjoint(rhs), adjoint=adjoint, adjoint_arg=adjoint_arg) else: op_solve = operator.solve( rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) mat_solve = linear_operator_util.matrix_solve_with_broadcast( mat, rhs, adjoint=adjoint) if not use_placeholder: self.assertAllEqual(op_solve.get_shape(), mat_solve.get_shape()) op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve]) self.assertAC(op_solve_v, mat_solve_v) def test_solve(self): self._skip_if_tests_to_skip_contains("solve") self._test_solve(with_batch=True) def test_solve_with_broadcast(self): self._skip_if_tests_to_skip_contains("solve_with_broadcast") self._test_solve(with_batch=False) def test_trace(self): self._skip_if_tests_to_skip_contains("trace") for use_placeholder in self._use_placeholder_options: for build_info in self._operator_build_infos: for dtype in self._dtypes_to_test: with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self._operator_and_matrix( build_info, dtype, use_placeholder=use_placeholder) op_trace = operator.trace() mat_trace = math_ops.trace(mat) if not use_placeholder: self.assertAllEqual(op_trace.get_shape(), mat_trace.get_shape()) op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace]) self.assertAC(op_trace_v, mat_trace_v) def test_add_to_tensor(self): self._skip_if_tests_to_skip_contains("add_to_tensor") for use_placeholder in self._use_placeholder_options: for build_info in self._operator_build_infos: for dtype in self._dtypes_to_test: with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self._operator_and_matrix( build_info, dtype, use_placeholder=use_placeholder) op_plus_2mat = operator.add_to_tensor(2 * mat) if not use_placeholder: self.assertAllEqual(build_info.shape, op_plus_2mat.get_shape()) op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat]) self.assertAC(op_plus_2mat_v, 3 * mat_v) def test_diag_part(self): self._skip_if_tests_to_skip_contains("diag_part") for use_placeholder in self._use_placeholder_options: for build_info in self._operator_build_infos: for dtype in self._dtypes_to_test: with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self._operator_and_matrix( build_info, dtype, use_placeholder=use_placeholder) op_diag_part = operator.diag_part() mat_diag_part = array_ops.matrix_diag_part(mat) if not use_placeholder: self.assertAllEqual(mat_diag_part.get_shape(), op_diag_part.get_shape()) op_diag_part_, mat_diag_part_ = sess.run( [op_diag_part, mat_diag_part]) self.assertAC(op_diag_part_, mat_diag_part_) @six.add_metaclass(abc.ABCMeta) class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest): """Base test class appropriate for square operators. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here. """ @property def _operator_build_infos(self): build_info = OperatorBuildInfo # non-batch operators (n, n) and batch operators. return [ build_info((0, 0)), build_info((1, 1)), build_info((1, 3, 3)), build_info((3, 4, 4)), build_info((2, 1, 4, 4))] def _make_rhs(self, operator, adjoint, with_batch=True): # This operator is square, so rhs and x will have same shape. # adjoint value makes no difference because the operator shape doesn't # change since it is square, but be pedantic. return self._make_x(operator, adjoint=not adjoint, with_batch=with_batch) def _make_x(self, operator, adjoint, with_batch=True): # Value of adjoint makes no difference because the operator is square. # Return the number of systems to solve, R, equal to 1 or 2. r = self._get_num_systems(operator) # If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of # shape [B1,...,Bb, N, R], R = 1 or 2. if operator.shape.is_fully_defined(): batch_shape = operator.batch_shape.as_list() n = operator.domain_dimension.value if with_batch: x_shape = batch_shape + [n, r] else: x_shape = [n, r] else: batch_shape = operator.batch_shape_tensor() n = operator.domain_dimension_tensor() if with_batch: x_shape = array_ops.concat((batch_shape, [n, r]), 0) else: x_shape = [n, r] return random_normal(x_shape, dtype=operator.dtype) def _get_num_systems(self, operator): """Get some number, either 1 or 2, depending on operator.""" if operator.tensor_rank is None or operator.tensor_rank % 2: return 1 else: return 2 @six.add_metaclass(abc.ABCMeta) class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest): """Base test class appropriate for generic rectangular operators. Square shapes are never tested by this class, so if you want to test your operator with a square shape, create two test classes, the other subclassing SquareLinearOperatorFullMatrixTest. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here. """ @property def _tests_to_skip(self): """List of test names to skip.""" return ["solve", "solve_with_broadcast", "det", "log_abs_det"] @property def _operator_build_infos(self): build_info = OperatorBuildInfo # non-batch operators (n, n) and batch operators. return [ build_info((2, 1)), build_info((1, 2)), build_info((1, 3, 2)), build_info((3, 3, 4)), build_info((2, 1, 2, 4))] def _make_rhs(self, operator, adjoint, with_batch=True): # TODO(langmore) Add once we're testing solve_ls. raise NotImplementedError( "_make_rhs not implemented because we don't test solve") def _make_x(self, operator, adjoint, with_batch=True): # Return the number of systems for the argument 'x' for .matmul(x) r = self._get_num_systems(operator) # If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of # shape [B1,...,Bb, N, R], R = 1 or 2. if operator.shape.is_fully_defined(): batch_shape = operator.batch_shape.as_list() if adjoint: n = operator.range_dimension.value else: n = operator.domain_dimension.value if with_batch: x_shape = batch_shape + [n, r] else: x_shape = [n, r] else: batch_shape = operator.batch_shape_tensor() if adjoint: n = operator.range_dimension_tensor() else: n = operator.domain_dimension_tensor() if with_batch: x_shape = array_ops.concat((batch_shape, [n, r]), 0) else: x_shape = [n, r] return random_normal(x_shape, dtype=operator.dtype) def _get_num_systems(self, operator): """Get some number, either 1 or 2, depending on operator.""" if operator.tensor_rank is None or operator.tensor_rank % 2: return 1 else: return 2 def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False): """[batch] positive definite matrix. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. force_well_conditioned: Python bool. If `True`, returned matrix has eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are chi-squared random variables. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) if not tensor_util.is_tensor(shape): shape = tensor_shape.TensorShape(shape) # Matrix must be square. shape[-1].assert_is_compatible_with(shape[-2]) with ops.name_scope("random_positive_definite_matrix"): tril = random_tril_matrix( shape, dtype, force_well_conditioned=force_well_conditioned) return math_ops.matmul(tril, tril, adjoint_b=True) def random_tril_matrix(shape, dtype, force_well_conditioned=False, remove_upper=True): """[batch] lower triangular matrix. Args: shape: `TensorShape` or Python `list`. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype force_well_conditioned: Python `bool`. If `True`, returned matrix will have eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit normal random variables. remove_upper: Python `bool`. If `True`, zero out the strictly upper triangle. If `False`, the lower triangle of returned matrix will have desired properties, but will not have the strictly upper triangle zero'd out. Returns: `Tensor` with desired shape and dtype. """ with ops.name_scope("random_tril_matrix"): # Totally random matrix. Has no nice properties. tril = random_normal(shape, dtype=dtype) if remove_upper: tril = array_ops.matrix_band_part(tril, -1, 0) # Create a diagonal with entries having modulus in [1, 2]. if force_well_conditioned: maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype) diag = random_sign_uniform( shape[:-1], dtype=dtype, minval=1., maxval=maxval) tril = array_ops.matrix_set_diag(tril, diag) return tril def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Gaussian entries. Samples are distributed like ``` N(mean, stddev^2), if dtype is real, X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_normal"): samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) if dtype.is_complex: if seed is not None: seed += 1234 more_samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) samples = math_ops.complex(samples, more_samples) return samples def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Uniform entries. Samples are distributed like ``` Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_uniform"): samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) if dtype.is_complex: if seed is not None: seed += 12345 more_samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) samples = math_ops.complex(samples, more_samples) return samples def random_sign_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) random entries from a "sign Uniform". Letting `Z` be a random variable equal to `-1` and `1` with equal probability, Samples from this `Op` are distributed like ``` Z * X, where X ~ Uniform[minval, maxval], if dtype is real, Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_sign_uniform"): unsigned_samples = random_uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) if seed is not None: seed += 12 signs = math_ops.sign( random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed)) return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype) def random_normal_correlated_columns(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, eps=1e-4, seed=None): """Batch matrix with (possibly complex) Gaussian entries and correlated cols. Returns random batch matrix `A` with specified element-wise `mean`, `stddev`, living close to an embedded hyperplane. Suppose `shape[-2:] = (M, N)`. If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries. If `M >= N`, then the colums of `A` will be made almost dependent as follows: ``` L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1) B = random normal M x N-1 matrix, mean = 0, stddev = stddev. G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane E = a random normal M x N matrix, mean = 0, stddev = eps mu = a constant M x N matrix, equal to the argument "mean" A = G + E + mu ``` Args: shape: Python list of integers. Shape of the returned tensor. Must be at least length two. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype eps: Distance each column is perturbed from the low-dimensional subspace. seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. Raises: ValueError: If `shape` is not at least length 2. """ dtype = dtypes.as_dtype(dtype) if len(shape) < 2: raise ValueError( "Argument shape must be at least length 2. Found: %s" % shape) # Shape is the final shape, e.g. [..., M, N] shape = list(shape) batch_shape = shape[:-2] m, n = shape[-2:] # If there is only one column, "they" are by definition correlated. if n < 2 or n < m: return random_normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) # Shape of the matrix with only n - 1 columns that we will embed in higher # dimensional space. smaller_shape = batch_shape + [m, n - 1] # Shape of the embedding matrix, mapping batch matrices # from [..., N-1, M] to [..., N, M] embedding_mat_shape = batch_shape + [n, n - 1] # This stddev for the embedding_mat ensures final result has correct stddev. stddev_mat = 1 / np.sqrt(n - 1) with ops.name_scope("random_normal_correlated_columns"): smaller_mat = random_normal( smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed) if seed is not None: seed += 1287 embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed) embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True) embedded = array_ops.matrix_transpose(embedded_t) mean_mat = array_ops.ones_like(embedded) * mean return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
kobejean/tensorflow
tensorflow/python/ops/linalg/linear_operator_test_util.py
Python
apache-2.0
28,127
[ "Gaussian" ]
685e87a645a4c2c83225c3d1ca80e1c1529ca0022930587bd843032677790573
# -*- coding: utf-8 -*- # # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2003-2005 Donald N. Allingham # Copyright (C) 2008 Brian G. Matherly # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ """ Specific classes for relationships. """ #------------------------------------------------------------------------- # # GRAMPS modules # #------------------------------------------------------------------------- from gramps.gen.lib import Person import gramps.gen.relationship #------------------------------------------------------------------------- # # # #------------------------------------------------------------------------- _removed_level = [ " ", " eerste", " tweede", " derde", " vierde", " vijfde", " zesde", " zevende", " achtste", " negende", " tiende", " elfde", " twaalfde", " dertiende", " veertiende", " vijftiende", " zestiende", " zeventiende", " achttiende", " negentiende", " twintigste", " eenentwintigste", " tweeëntwintigste", " drieëntwingste", " vierentwingste", " vijfentwintigste", " zesentwintigste", " zevenentwintigste", " achtentwintigste", " negenentwintigste", " dertigste" ] _parents_level = [ "", "ouders", "grootouders", "overgrootouders", "betovergrootouders", "oudouders", "oudgrootouders", "oudovergrootouders", "oudbetovergrootouders", "stamouders", "stamgrootouders", # gen 10 "stamovergrootouders", "stambetovergrootouders", "stamoudouders", "stamoudgrootouders", "stamoudovergrootouders", "stamoudbetovergrootouders", "edelouders", "edelgrootoders", "edelovergrootoudouders", "edelbetovergrootouders", # gen 20 "edeloudouders", "edeloudgrootouders", "edeloudvergrootouders", "edeloudbetovergrootouders", "edelstamouders", "edelstamgrootouders", "edelstamovergrootouders", "edelstambetovergrootouders", "edelstamoudouders" ] _father_level = [ "", "%s%svader", "%s%sgrootvader", "%s%sovergrootvader", "%s%sbetovergrootvader", "%s%soudvader (generatie 5)", "%s%soudgrootvader (generatie 6)", "%s%soudovergrootvader (generatie 7)", "%s%soudbetovergrootvader (generatie 8)", "%s%sstamvader (generatie 9)", "%s%sstamgrootvader (generatie 10)", "%s%sstamovergrootvader (generatie 11)", "%s%sstambetovergrootvader (generatie 12)", "%s%sstamoudvader (generatie 13)", "%s%sstamoudgrootvader (generatie 14)", "%s%sstamoudovergrootvader (generatie 15)", "%s%sstamoudbetovergrootvader (generatie 16)", "%s%sedelvader (generatie 17)", "%s%sedelgrootvader (generatie 18)", "%s%sedelovergrootoudvader (generatie 19)", "%s%sedelbetovergrootvader (generatie 20)", "%s%sedeloudvader (generatie 21)", "%s%sedeloudgrootvader (generatie 22)", "%s%sedeloudvergrootvader (generatie 23)", "%s%sedeloudbetovergrootvader (generatie 24)", "%s%sedelstamvader (generatie 25)", "%s%sedelstamgrootvader (generatie 26)", "%s%sedelstamovergrootvader (generatie 27)", "%s%sedelstambetovergrootvader (generatie 28)", "%s%sedelstamoudvader (generatie 29)" ] _mother_level = [ "", "%s%smoeder", "%s%sgrootmoeder", "%s%sovergrootmoeder", "%s%sbetovergrootmoeder", "%s%soudmoeder (generatie 5)", "%s%soudgrootmoeder (generatie 6)", "%s%soudovergrootmoeder (generatie 7)", "%s%soudbetovergrootmoeder (generatie 8)", "%s%sstammoeder (generatie 9)", "%s%sstamgrootmoeder (generatie 10)", "%s%sstamovergrootmoeder (generatie 11)", "%s%sstambetovergrootmoeder (generatie 12)", "%s%sstamoudmoeder (generatie 13)", "%s%sstamoudgrootmoeder (generatie 14)", "%s%sstamoudovergrootmoeder (generatie 15)", "%s%sstamoudbetovergrootmoeder (generatie 16)", "%s%sedelmoeder (generatie 17)", "%s%sedelgrootmoeder (generatie 18)", "%s%sedelovergrootoudmoeder (generatie 19)", "%s%sedelbetovergrootmoeder (generatie 20)", "%s%sedeloudmoeder (generatie 21)", "%s%sedeloudgrootmoeder (generatie 22)", "%s%sedeloudvergrootmoeder (generatie 23)", "%s%sedeloudbetovergrootmoeder (generatie 24)", "%s%sedelstammoeder (generatie 25)", "%s%sedelstamgrootmoeder (generatie 26)", "%s%sedelstamovergrootmoeder (generatie 27)", "%s%sedelstambetovergrootmoeder (generatie 28)", "%s%sedelstamoudmoeder (generatie 29)" ] _ouder_level = [ "", "%s%souder ", "%s%sgrootouder", "%s%sovergrootouder", "%s%sbetovergrootouder", "%s%soudouder (generatie 5)", "%s%soudgrootouder (generatie 6)", "%s%soudovergrootouder (generatie 7)", "%s%soudbetovergrootouder (generatie 8)", "%s%sstamouder (generatie 9)", "%s%sstamgrootouder (generatie 10)", "%s%sstamovergrootouder (generatie 11)", "%s%sstambetovergrootouder (generatie 12)", "%s%sstamoudouder (generatie 13)", "%s%sstamoudgrootouder (generatie 14)", "%s%sstamoudovergrootouder (generatie 15)", "%s%sstamoudbetovergrootouder (generatie 16)", "%s%sedelouder (generatie 17)", "%s%sedelgrootouder (generatie 18)", "%s%sedelovergrootoudouder (generatie 19)", "%s%sedelbetovergrootouder (generatie 20)", "%s%sedeloudouder (generatie 21)", "%s%sedeloudgrootouder (generatie 22)", "%s%sedeloudvergrootouder (generatie 23)", "%s%sedeloudbetovergrootouder (generatie 24)", "%s%sedelstamouder (generatie 25)", "%s%sedelstamgrootouder (generatie 26)", "%s%sedelstamovergrootouder (generatie 27)", "%s%sedelstambetovergrootouder (generatie 28)", "%s%sedelstamoudouder (generatie 29)" ] _son_level = [ "", "%s%szoon", "%s%skleinzoon", "%s%sachterkleinzoon", "%s%sachterachterkleinzoon", "%s%sachterachterachterkleinzoon"] _daughter_level = [ "", "%s%sdochter", "%s%skleindochter", "%s%sachterkleindochter", "%s%sachterachterkleindochter", "%s%sachterachterachterkleindochter"] _kind_level = [ "", "%s%skind", "%s%skleinkind", "%s%sachterkleinkind", "%s%sachterachterkleinkind", "%s%sachterachterachterkleinkind"] _nephew_level = [ "", "%s%sneef", "%s%sachterneef", "%s%sachterachterneef" ] _niece_level = [ "", "%s%snicht", "%s%sachternicht", "%s%sachterachternicht"] _aunt_level = [ "", "%s%stante", "%s%sgroottante", "%s%sovergroottante", "%s%sbetovergroottante", "%s%soudtante"] _uncle_level = [ "", "%s%soom", "%s%sgrootoom", "%s%sovergrootoom", "%s%sbetovergrootoom", "%s%soudoom"] #------------------------------------------------------------------------- # # # #------------------------------------------------------------------------- class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator): """ RelationshipCalculator Class """ #sibling strings STEP = 'stief' HALF = 'half' INLAW = 'aangetrouwde ' def __init__(self): gramps.gen.relationship.RelationshipCalculator.__init__(self) def get_parents(self, level): if level > len(_parents_level)-1: return "verre voorouders (%d generaties)" % level else: return _parents_level[level] def _get_father(self, level, step='', inlaw=''): """Internal Dutch method to create relation string """ if level > len(_father_level)-1: return "verre %s%svoorvader (%d generaties)" % (inlaw, step, level) else: return _father_level[level] % (inlaw, step) def _get_son(self, level, step='', inlaw=''): """Internal Dutch method to create relation string """ if level < len(_son_level): return _son_level[level] % (inlaw, step) else: return "verre %s%sachterkleinzoon (%d generaties)" % (inlaw, step, level) def _get_mother(self, level, step='', inlaw=''): """Internal Dutch method to create relation string """ if level > len(_mother_level)-1: return "verre %s%svoormoeder (%d generaties)" % (inlaw, step, level) else: return _mother_level[level] % (inlaw, step) def _get_daughter(self, level, step='', inlaw=''): """Internal Dutch method to create relation string """ if level > len(_daughter_level)-1: return "verre %s%sachterkleindochter (%d generaties)" % (inlaw, step, level) else: return _daughter_level[level] % (inlaw, step) def _get_parent_unknown(self, level, step='', inlaw=''): """Internal Dutch method to create relation string """ if level > len(_ouder_level)-1: return "verre %s%svoorouder (%d generaties)" % (inlaw, step, level) elif level == 1: return _mother_level[level] % (inlaw, step) + ' of ' + \ _father_level[level] % (inlaw, step) else: return _ouder_level[level] % (inlaw, step) def _get_child_unknown(self, level, step='', inlaw=''): """Internal Dutch method to create relation string """ if level > len(_kind_level)-1: return "ver %s%sachterkleinkind (%d generaties)" % (inlaw, step, level) else: return _kind_level[level] % (inlaw, step) def _get_aunt(self, level, removed, step='', inlaw=''): """Internal Dutch method to create relation string """ if removed == 1 and level < len(_aunt_level): return _aunt_level[level] % (inlaw, step) elif removed == 1: return "verre %s%stante (%d generaties)" % (inlaw, step, level) elif level > len(_aunt_level)-1 and removed > len(_removed_level) -1: return "verre %s%stante (%d generaties, %d graden)" % (inlaw, step, level, removed) elif level > len(_aunt_level)-1: return "verre %s%stante van de%s graad (%d generaties)" % (inlaw, step, _removed_level[removed], level) else: return _aunt_level[level] % (inlaw, step) \ + _removed_level[removed] + " graad" def _get_uncle(self, level, removed, step='', inlaw=''): """Internal Dutch method to create relation string """ if removed == 1 and level < len(_uncle_level): return _uncle_level[level] % (inlaw, step) elif removed == 1: return "verre %s%soom (%d generaties)" % (inlaw, step, level) elif level > len(_uncle_level)-1 and removed > len(_removed_level) -1: return "verre %s%soom (%d generaties, %d graden)" % (inlaw, step, level, removed) elif level > len(_uncle_level)-1: return "verre %s%soom van de%s graad (%d generaties)" % (inlaw, step, _removed_level[removed], level) else: return _uncle_level[level] % (inlaw, step) \ + _removed_level[removed] + " graad" def _get_sibling(self, level, step='', inlaw=''): """overwrite of English method to return unknown gender sibling """ assert(level == 1) return self._get_male_cousin(0, step=step, inlaw=inlaw) + ' of ' \ + self._get_female_cousin(0, step=step, inlaw=inlaw) def _get_nephew(self, level, removed=1, step='', inlaw=''): """Internal Dutch method to create relation string """ if removed == 1 and level < len(_nephew_level): return _nephew_level[level] % (inlaw, step) elif removed == 1: return "verre %s%sneef (%d generaties)" % (inlaw, step, level) elif level > len(_nephew_level)-1 and removed > len(_removed_level) -1: return "verre %s%sneef (%d generaties, %d graden)" % (inlaw, step, level, removed) elif level > len(_nephew_level)-1: return "verre %s%sneef van de%s graad (%d generaties)" % (inlaw, step, _removed_level[removed], level) else: return _nephew_level[level] % (inlaw, step) \ + _removed_level[removed] + " graad" def _get_niece(self, level, removed=1, step='', inlaw=''): """Internal Dutch method to create relation string """ if removed == 1 and level < len(_niece_level): return _niece_level[level] % (inlaw, step) elif removed == 1: return "verre %s%snicht (%d generaties)" % (inlaw, step, level) elif level > len(_niece_level)-1 and removed > len(_removed_level) -1: return "verre %s%snicht (%d generaties, %d graden)" % (inlaw, step, level, removed) elif level > len(_niece_level)-1: return "verre %s%snicht van de%s graad (%d generaties)"% (inlaw, step, _removed_level[removed], level) else: return _niece_level[level] % (inlaw, step) \ + _removed_level[removed] + " graad" def _get_male_cousin(self, removed, step='', inlaw=''): """Specific Dutch thing, the nieces/nephews on same level are called going sideways in a branch as the nieces/newphews going downward from your brother/sisters. This used to be called "kozijn" """ removed -= 1 if removed > len(_removed_level)-1: return "verre %s%sneef (kozijn, %d graden)" % (inlaw, step, removed) elif removed == 0: return "%s%sbroer" % (inlaw, step) else: return "%s%sneef (kozijn)" % (inlaw, step) \ +_removed_level[removed] + " graad" def _get_female_cousin(self, removed, step='', inlaw=''): """Specific Dutch thing, the nieces/nephews on same level are called going sideways in a branch as the nieces/newphews going downward from your brother/sisters. This used to be called "kozijn" """ removed -= 1 if removed > len(_removed_level)-1: return "verre %s%snicht (kozijn, %d graden)" % (inlaw, step, removed) elif removed == 0: return "%s%szus" % (inlaw, step) else: return "%s%snicht (kozijn)" % (inlaw, step) \ + _removed_level[removed] + " graad" def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b, reltocommon_a, reltocommon_b, only_birth=True, in_law_a=False, in_law_b=False): """ Return a string representing the relationship between the two people, see english method, eg b is father of a """ if only_birth: step = '' else: step = self.STEP if in_law_a or in_law_b : inlaw = self.INLAW else: inlaw = '' rel_str = "verre %s%sfamilie" % (inlaw, step) if Gb == 0: #b is ancestor if Ga == 0: rel_str = 'zelfde persoon' elif Ga == 1 and inlaw and not step: if gender_b == Person.MALE: rel_str = 'schoonvader' elif gender_b == Person.FEMALE: rel_str = 'schoonmoeder' else: rel_str = 'schoonouder' elif gender_b == Person.MALE: rel_str = self._get_father(Ga, step, inlaw) elif gender_b == Person.FEMALE: rel_str = self._get_mother(Ga, step, inlaw) else: rel_str = self._get_parent_unknown(Ga, step, inlaw) elif Ga == 0: #a is descendant if Gb == 1 and inlaw and not step: if gender_b == Person.MALE: rel_str = 'schoonzoon' elif gender_b == Person.FEMALE: rel_str = 'schoondochter' else: rel_str = 'schoonzoon of -dochter' elif Gb == 1 and inlaw and step: #inlaw stepchild if gender_b == Person.MALE: rel_str = 'aangetrouwde stiefzoon' elif gender_b == Person.FEMALE: rel_str = 'aangetrouwde stiefdochter' else: rel_str = 'aangetrouwde stiefzoon of dochter' elif gender_b == Person.MALE: rel_str = self._get_son(Gb, step, inlaw) elif gender_b == Person.FEMALE: rel_str = self._get_daughter(Gb, step, inlaw) else: rel_str = self._get_child_unknown(Gb, step, inlaw) elif Ga > Gb: #b is higher in the branch, in english uncle/aunt or #cousin up, in dutch always 'oom/tante' if gender_b == Person.MALE: rel_str = self._get_uncle(Ga - Gb, Gb, step, inlaw) else: rel_str = self._get_aunt(Ga - Gb, Gb, step, inlaw) elif Ga < Gb: #b is lower in the branch, in english niece/nephew or #cousin down, in dutch always 'neef/nicht' if gender_b == Person.MALE: rel_str = self._get_nephew(Gb - Ga, Ga, step, inlaw) else: rel_str = self._get_niece(Gb - Ga, Ga, step, inlaw) else: # people on the same level Ga == Gb if gender_b == Person.MALE: rel_str = self._get_male_cousin(Ga, step, inlaw) else: rel_str = self._get_female_cousin(Ga, step, inlaw) return rel_str def get_sibling_relationship_string(self, sib_type, gender_a, gender_b, in_law_a=False, in_law_b=False): """ Determine the string giving the relation between two siblings of type sib_type. Eg: b is the brother of a Here 'brother' is the string we need to determine This method gives more details about siblings than get_single_relationship_string can do. DIFFERENT HELPER FUNCTIONS THAN ENGLISH """ if sib_type == self.NORM_SIB or sib_type == self.UNKNOWN_SIB: typestr = '' elif sib_type == self.HALF_SIB_FATHER \ or sib_type == self.HALF_SIB_MOTHER: typestr = self.HALF elif sib_type == self.STEP_SIB: typestr = self.STEP if in_law_a or in_law_b : inlaw = self.INLAW else: inlaw = '' if inlaw and not typestr: if gender_b == Person.MALE: rel_str = 'schoonbroer' elif gender_b == Person.FEMALE: rel_str = 'schoonzus' else: rel_str = 'schoonzus/broer' else: if gender_b == Person.MALE: rel_str = self._get_male_cousin(1, typestr, inlaw) else: rel_str = self._get_female_cousin(1, typestr, inlaw) return rel_str if __name__ == "__main__": # Test function. Call it as follows from the command line (so as to find # imported modules): # export PYTHONPATH=/path/to/gramps/src # python src/plugins/rel/rel_nl.py """TRANSLATORS, copy this if statement at the bottom of your rel_xx.py module, and test your work with: python src/plugins/rel/rel_xx.py """ from gramps.gen.relationship import test RC = RelationshipCalculator() test(RC, True)
arunkgupta/gramps
gramps/plugins/rel/rel_nl.py
Python
gpl-2.0
23,343
[ "Brian" ]
18d3355b28628f048cd8cf2e7809b3adb525243d08f09d4ef8e3f66180c63b8f
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- http://www.mdanalysis.org # Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # from MDAnalysis import Universe import os from numpy.testing import TestCase, assert_equal from MDAnalysisTests.datafiles import PDB_full from MDAnalysisTests import tempdir class TestAltloc(TestCase): def setUp(self): self.filename = PDB_full self.tempdir = tempdir.TempDir() self.outfile = os.path.join(self.tempdir.name, 'test.pdb') def tearDown(self): del self.tempdir def test_atomgroups(self): u = Universe(self.filename) segidB0 = len(u.select_atoms("segid B and (not altloc B)")) segidB1 = len(u.select_atoms("segid B and (not altloc A)")) assert_equal(segidB0, segidB1) altlocB0 = len(u.select_atoms("segid B and (altloc A)")) altlocB1 = len(u.select_atoms("segid B and (altloc B)")) assert_equal(altlocB0, altlocB1) sum = len(u.select_atoms("segid B")) assert_equal(sum, segidB0 + altlocB0) def test_bonds(self): u = Universe(self.filename, guess_bonds=True) # need to force topology to load before querying individual atom bonds bonds0 = u.select_atoms("segid B and (altloc A)")[0].bonds bonds1 = u.select_atoms("segid B and (altloc B)")[0].bonds assert_equal(len(bonds0), len(bonds1)) def test_write_read(self): u = Universe(self.filename) u.select_atoms("all").write(self.outfile) u2 = Universe(self.outfile) assert_equal(len(u.atoms), len(u2.atoms))
alejob/mdanalysis
testsuite/MDAnalysisTests/test_altloc.py
Python
gpl-2.0
2,503
[ "MDAnalysis" ]
fbec9048559d9f526d1a476196edf7779e8791cfcef1f17ae83f1791ca9ed2f7
# $Id: docutils_xml.py 7497 2012-08-16 15:17:29Z milde $ # Author: David Goodger, Paul Tremblay, Guenter Milde # Maintainer: docutils-develop@lists.sourceforge.net # Copyright: This module has been placed in the public domain. """ Simple document tree Writer, writes Docutils XML according to http://docutils.sourceforge.net/docs/ref/docutils.dtd. """ __docformat__ = 'reStructuredText' import sys # Work around broken PyXML and obsolete python stdlib behaviour. (The stdlib # replaces its own xml module with PyXML if the latter is installed. However, # PyXML is no longer maintained and partially incompatible/buggy.) Reverse # the order in which xml module and submodules are searched to import stdlib # modules if they exist and PyXML modules if they do not exist in the stdlib. # # See http://sourceforge.net/tracker/index.php?func=detail&aid=3552403&group_id=38414&atid=422030 # and http://lists.fedoraproject.org/pipermail/python-devel/2012-July/000406.html import xml if "_xmlplus" in xml.__path__[0]: # PyXML sub-module xml.__path__.reverse() # If both are available, prefer stdlib over PyXML import xml.sax.saxutils from io import StringIO import docutils from docutils import frontend, writers, nodes class RawXmlError(docutils.ApplicationError): pass class Writer(writers.Writer): supported = ('xml',) """Formats this writer supports.""" settings_spec = ( '"Docutils XML" Writer Options', None, (('Generate XML with newlines before and after tags.', ['--newlines'], {'action': 'store_true', 'validator': frontend.validate_boolean}), ('Generate XML with indents and newlines.', ['--indents'], {'action': 'store_true', 'validator': frontend.validate_boolean}), ('Omit the XML declaration. Use with caution.', ['--no-xml-declaration'], {'dest': 'xml_declaration', 'default': 1, 'action': 'store_false', 'validator': frontend.validate_boolean}), ('Omit the DOCTYPE declaration.', ['--no-doctype'], {'dest': 'doctype_declaration', 'default': 1, 'action': 'store_false', 'validator': frontend.validate_boolean}),)) settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'} config_section = 'docutils_xml writer' config_section_dependencies = ('writers',) output = None """Final translated form of `document`.""" def __init__(self): writers.Writer.__init__(self) self.translator_class = XMLTranslator def translate(self): self.visitor = visitor = self.translator_class(self.document) self.document.walkabout(visitor) self.output = ''.join(visitor.output) class XMLTranslator(nodes.GenericNodeVisitor): xml_declaration = '<?xml version="1.0" encoding="%s"?>\n' # TODO: add stylesheet options similar to HTML and LaTeX writers? #xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n' doctype = ( '<!DOCTYPE document PUBLIC' ' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"' ' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n') generator = '<!-- Generated by Docutils %s -->\n' xmlparser = xml.sax.make_parser() """SAX parser instance to check/exctract raw XML.""" xmlparser.setFeature( "http://xml.org/sax/features/external-general-entities", True) def __init__(self, document): nodes.NodeVisitor.__init__(self, document) # Reporter self.warn = self.document.reporter.warning self.error = self.document.reporter.error # Settings self.settings = settings = document.settings self.indent = self.newline = '' if settings.newlines: self.newline = '\n' if settings.indents: self.newline = '\n' self.indent = ' ' self.level = 0 # indentation level self.in_simple = 0 # level of nesting inside mixed-content elements # Output self.output = [] if settings.xml_declaration: self.output.append( self.xml_declaration % settings.output_encoding) if settings.doctype_declaration: self.output.append(self.doctype) self.output.append(self.generator % docutils.__version__) # initialize XML parser self.the_handle=TestXml() self.xmlparser.setContentHandler(self.the_handle) # generic visit and depart methods # -------------------------------- def default_visit(self, node): """Default node visit method.""" if not self.in_simple: self.output.append(self.indent*self.level) self.output.append(node.starttag(xml.sax.saxutils.quoteattr)) self.level += 1 if isinstance(node, nodes.TextElement): self.in_simple += 1 if not self.in_simple: self.output.append(self.newline) def default_departure(self, node): """Default node depart method.""" self.level -= 1 if not self.in_simple: self.output.append(self.indent*self.level) self.output.append(node.endtag()) if isinstance(node, nodes.TextElement): self.in_simple -= 1 if not self.in_simple: self.output.append(self.newline) # specific visit and depart methods # --------------------------------- def visit_Text(self, node): text = xml.sax.saxutils.escape(node.astext()) self.output.append(text) def depart_Text(self, node): pass def visit_raw(self, node): if 'xml' not in node.get('format', '').split(): # skip other raw content? # raise nodes.SkipNode self.default_visit(node) return # wrap in <raw> element self.default_visit(node) # or not? xml_string = node.astext() self.output.append(xml_string) self.default_departure(node) # or not? # Check validity of raw XML: if isinstance(xml_string, str) and sys.version_info < (3,): xml_string = xml_string.encode('utf8') try: self.xmlparser.parse(StringIO(xml_string)) except xml.sax._exceptions.SAXParseException as error: col_num = self.the_handle.locator.getColumnNumber() line_num = self.the_handle.locator.getLineNumber() srcline = node.line if not isinstance(node.parent, nodes.TextElement): srcline += 2 # directive content start line msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % ( col_num, line_num, node.astext()) self.warn(msg, source=node.source, line=srcline+line_num-1) raise nodes.SkipNode # content already processed class TestXml(xml.sax.ContentHandler): def setDocumentLocator(self, locator): self.locator = locator
asedunov/intellij-community
python/helpers/py3only/docutils/writers/docutils_xml.py
Python
apache-2.0
6,973
[ "VisIt" ]
5f86c7b7f5bf72b775beefed6e2d6ba5c342488dea25ec820be6e02c52e12070
# Copyright 2018 The Lucid Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Objective functions for visualizing neural networks. We represent objectives with a class `Objective` enclosing functions of the form: (T) => TensorFlow Scalar Where `T` is a function that allows one to access the activations of different layers of the network. For example `T("mixed4a")` gives the activations for the layer mixed4a. This allows objectives to be declared outside the rendering function, but then actually constructed within its graph/session. """ from __future__ import absolute_import, division, print_function from decorator import decorator import numpy as np import tensorflow as tf from lucid.optvis.objectives_util import _dot, _dot_cossim, _extract_act_pos, _make_arg_str, _T_force_NHWC, _T_handle_batch # We use T as a variable name to access all kinds of tensors # pylint: disable=invalid-name class Objective(object): """"A wrapper to make objective functions easy to combine. For example, suppose you want to optimize 20% for mixed4a:20 and 80% for mixed4a:21. Then you could use: objective = 0.2 * channel("mixed4a", 20) + 0.8 * channel("mixed4a", 21) Under the hood, we think of objectives as functions of the form: T => tensorflow scalar for loss where T is a function allowing you to index layers in the network -- that is, if there's a layer "mixed4a" then T("mixed4a") would give you its activations). This allows objectives to be declared outside the rendering function, but then actually constructed within its graph/session. """ def __init__(self, objective_func, name="", description=""): self.objective_func = objective_func self.name = name self.description = description def __add__(self, other): if isinstance(other, (int, float)): objective_func = lambda T: other + self(T) name = self.name description = self.description else: objective_func = lambda T: self(T) + other(T) name = ", ".join([self.name, other.name]) description = "Sum(" + " +\n".join([self.description, other.description]) + ")" return Objective(objective_func, name=name, description=description) def __neg__(self): return -1 * self def __sub__(self, other): return self + (-1 * other) @staticmethod def sum(objs): objective_func = lambda T: sum([obj(T) for obj in objs]) descriptions = [obj.description for obj in objs] description = "Sum(" + " +\n".join(descriptions) + ")" names = [obj.name for obj in objs] name = ", ".join(names) return Objective(objective_func, name=name, description=description) def __mul__(self, other): if isinstance(other, (int, float)): objective_func = lambda T: other * self(T) else: objective_func = lambda T: self(T) * other(T) return Objective(objective_func, name=self.name, description=self.description) def __rmul__(self, other): return self.__mul__(other) def __radd__(self, other): return self.__add__(other) def __call__(self, T): return self.objective_func(T) def wrap_objective(require_format=None, handle_batch=False): """Decorator for creating Objective factories. Changes f from the closure: (args) => () => TF Tensor into an Objective factory: (args) => Objective while preserving function name, arg info, docs... for interactive python. """ @decorator def inner(f, *args, **kwds): objective_func = f(*args, **kwds) objective_name = f.__name__ args_str = " [" + ", ".join([_make_arg_str(arg) for arg in args]) + "]" description = objective_name.title() + args_str def process_T(T): if require_format == "NHWC": T = _T_force_NHWC(T) return T return Objective(lambda T: objective_func(process_T(T)), objective_name, description) return inner def handle_batch(batch=None): return lambda f: lambda T: f(_T_handle_batch(T, batch=batch)) @wrap_objective(require_format='NHWC') def neuron(layer_name, channel_n, x=None, y=None, batch=None): """Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+ """ @handle_batch(batch) def inner(T): layer = T(layer_name) layer = _extract_act_pos(layer, x, y) return tf.reduce_mean(layer[..., channel_n]) return inner @wrap_objective(require_format='NHWC') def channel(layer, n_channel, batch=None): """Visualize a single channel""" @handle_batch(batch) def inner(T): return tf.reduce_mean(T(layer)[..., n_channel]) return inner @wrap_objective(require_format='NHWC') def direction(layer, vec, cossim_pow=0, batch=None): """Visualize a direction""" vec = vec[None, None, None] vec = vec.astype("float32") @handle_batch(batch) def inner(T): return _dot_cossim(T(layer), vec, cossim_pow=cossim_pow) return inner direction_cossim = direction @wrap_objective(require_format='NHWC') def direction_neuron(layer_name, vec, x=None, y=None, cossim_pow=0, batch=None): """Visualize a single (x, y) position along the given direction""" vec = vec.astype("float32") @handle_batch(batch) def inner(T): layer = T(layer_name) layer = _extract_act_pos(layer, x, y) return _dot_cossim(layer, vec[None, None, None], cossim_pow=cossim_pow) return inner @wrap_objective(require_format='NHWC') def tensor_direction(layer, vec, cossim_pow=0, batch=None): """Visualize a tensor.""" assert len(vec.shape) in [3,4] vec = vec.astype("float32") if len(vec.shape) == 3: vec = vec[None] @handle_batch(batch) def inner(T): t_acts = T(layer) t_shp = tf.shape(t_acts) v_shp = vec.shape M1 = (t_shp[1] - v_shp[1]) // 2 M2 = (t_shp[2] - v_shp[2]) // 2 t_acts_ = t_acts[:, M1 : M1+v_shp[1], M2 : M2+v_shp[2], :] return _dot_cossim(t_acts_, vec, cossim_pow=cossim_pow) return inner @wrap_objective(handle_batch=True) def deepdream(layer): """Maximize 'interestingness' at some layer. See Mordvintsev et al., 2015. """ return lambda T: tf.reduce_mean(T(layer)**2) @wrap_objective(handle_batch=True) def total_variation(layer="input"): """Total variation of image (or activations at some layer). This operation is most often used as a penalty to reduce noise. See Mahendran, V. 2014. Understanding Deep Image Representations by Inverting Them. """ return lambda T: tf.image.total_variation(T(layer)) @wrap_objective(handle_batch=True) def L1(layer="input", constant=0): """L1 norm of layer. Generally used as penalty.""" return lambda T: tf.reduce_sum(tf.abs(T(layer) - constant)) @wrap_objective(handle_batch=True) def L2(layer="input", constant=0, epsilon=1e-6): """L2 norm of layer. Generally used as penalty.""" return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer) - constant) ** 2)) def _tf_blur(x, w=3): depth = x.shape[-1] k = np.zeros([w, w, depth, depth]) for ch in range(depth): k_ch = k[:, :, ch, ch] k_ch[ :, : ] = 0.5 k_ch[1:-1, 1:-1] = 1.0 conv_k = lambda t: tf.nn.conv2d(t, k, [1, 1, 1, 1], "SAME") return conv_k(x) / conv_k(tf.ones_like(x)) @wrap_objective() def blur_input_each_step(): """Minimizing this objective is equivelant to blurring input each step. Optimizing (-k)*blur_input_each_step() is equivelant to: input <- (1-k)*input + k*blur(input) An operation that was used in early feature visualization work. See Nguyen, et al., 2015. """ def inner(T): t_input = T("input") t_input_blurred = tf.stop_gradient(_tf_blur(t_input)) return 0.5*tf.reduce_sum((t_input - t_input_blurred)**2) return inner @wrap_objective() def blur_alpha_each_step(): def inner(T): t_input = T("input")[..., 3:4] t_input_blurred = tf.stop_gradient(_tf_blur(t_input)) return 0.5*tf.reduce_sum((t_input - t_input_blurred)**2) return inner @wrap_objective() def channel_interpolate(layer1, n_channel1, layer2, n_channel2): """Interpolate between layer1, n_channel1 and layer2, n_channel2. Optimize for a convex combination of layer1, n_channel1 and layer2, n_channel2, transitioning across the batch. Args: layer1: layer to optimize 100% at batch=0. n_channel1: neuron index to optimize 100% at batch=0. layer2: layer to optimize 100% at batch=N. n_channel2: neuron index to optimize 100% at batch=N. Returns: Objective """ def inner(T): batch_n = T(layer1).get_shape().as_list()[0] arr1 = T(layer1)[..., n_channel1] arr2 = T(layer2)[..., n_channel2] weights = (np.arange(batch_n)/float(batch_n-1)) S = 0 for n in range(batch_n): S += (1-weights[n]) * tf.reduce_mean(arr1[n]) S += weights[n] * tf.reduce_mean(arr2[n]) return S return inner @wrap_objective() def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5): """Encourage the boundaries of an image to have less variation and of color C. Args: shp: shape of T("input") because this may not be known. w: width of boundary to penalize. Ignored if mask is set. mask: mask describing what area should be penalized. Returns: Objective. """ def inner(T): arr = T("input") # print shp if mask is None: mask_ = np.ones(shp) mask_[:, w:-w, w:-w] = 0 else: mask_ = mask blur = _tf_blur(arr, w=5) diffs = (blur-arr)**2 diffs += 0.8*(arr-C)**2 return -tf.reduce_sum(diffs*mask_) return inner @wrap_objective() def alignment(layer, decay_ratio=2): """Encourage neighboring images to be similar. When visualizing the interpolation between two objectives, it's often desirable to encourage analogous objects to be drawn in the same position, to make them more comparable. This term penalizes L2 distance between neighboring images, as evaluated at layer. In general, we find this most effective if used with a parameterization that shares across the batch. (In fact, that works quite well by itself, so this function may just be obsolete.) Args: layer: layer to penalize at. decay_ratio: how much to decay penalty as images move apart in batch. Returns: Objective. """ def inner(T): batch_n = T(layer).get_shape().as_list()[0] arr = T(layer) accum = 0 for d in [1, 2, 3, 4]: for i in range(batch_n - d): a, b = i, i+d arr1, arr2 = arr[a], arr[b] accum += tf.reduce_mean((arr1-arr2)**2) / decay_ratio**float(d) return -accum return inner @wrap_objective() def diversity(layer): """Encourage diversity between each batch element. A neural net feature often responds to multiple things, but naive feature visualization often only shows us one. If you optimize a batch of images, this objective will encourage them all to be different. In particular, it calculates the correlation matrix of activations at layer for each image, and then penalizes cossine similarity between them. This is very similar to ideas in style transfer, except we're *penalizing* style similarity instead of encouraging it. Args: layer: layer to evaluate activation correlations on. Returns: Objective. """ def inner(T): layer_t = T(layer) batch_n, _, _, channels = layer_t.get_shape().as_list() flattened = tf.reshape(layer_t, [batch_n, -1, channels]) grams = tf.matmul(flattened, flattened, transpose_a=True) grams = tf.nn.l2_normalize(grams, axis=[1,2], epsilon=1e-10) return sum([ sum([ tf.reduce_sum(grams[i]*grams[j]) for j in range(batch_n) if j != i]) for i in range(batch_n)]) / batch_n return inner @wrap_objective() def input_diff(orig_img): """Average L2 difference between optimized image and orig_img. This objective is usually mutliplied by a negative number and used as a penalty in making advarsarial counterexamples. """ def inner(T): diff = T("input") - orig_img return tf.sqrt(tf.reduce_mean(diff**2)) return inner @wrap_objective() def class_logit(layer, label, batch=None): """Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit. """ @handle_batch(batch) def inner(T): if isinstance(label, int): class_n = label else: class_n = T("labels").index(label) logits = T(layer) logit = tf.reduce_sum(logits[:, class_n]) return logit return inner def as_objective(obj): """Convert obj into Objective class. Strings of the form "layer:n" become the Objective channel(layer, n). Objectives are returned unchanged. Args: obj: string or Objective. Returns: Objective """ if isinstance(obj, Objective): return obj elif callable(obj): return obj elif isinstance(obj, str): layer, n = obj.split(":") layer, n = layer.strip(), int(n) return channel(layer, n)
tensorflow/lucid
lucid/optvis/objectives.py
Python
apache-2.0
14,268
[ "NEURON" ]
da721265a57f529d33017f2574a915d5c2f1c0dfef2d90148c715f3c357b8a31
#!/usr/bin/python # -*- coding: utf-8 -*- # :::~ Author: Claudio Juan Tessone <claudio.tessone@uzh.ch> (c) 2010 ### ### ### ### # Distributed According to GNU Generic Purpose License (GPL) version 3 # Please visit www.gnu.org ############################################################################### PROGRAM_NAME = "SPG-Edit-Param" PROGRAM_VERSION = "0.1.0" PROGRAM_AUTHOR = "Claudio J. Tessone" PROGRAM_RELEASE_DATE = "2010/05/29" PROGRAM_EMAIL = "claudio.tessone@uzh.ch" import spg.utils as spgu import re, sys, shutil ######################################################################################### ######################################################################################### def parse_command_line(): from optparse import OptionParser parser = OptionParser() parser.add_option("-e","--edit", type="string", action='append', dest="edit", help = "Replaces a given iterator. Its name is grabbed directly from the argument") parser.add_option("-s","--swap", type="string", nargs=2, action='append', dest="swap", help = "Swaps two variables in the param.dat file") parser.add_option("-i","--insert", type="string", nargs=2, action='append', dest="insert", help = "Inserts the given iterator before the first variable. The second argument is usually enclosed between quotes") parser.add_option("-a","--append", type="string", nargs=2, action='append', dest="append", help = "Appends the second iterator after the first variable. The second argument is usually enclosed between quotes") parser.add_option("-d","--delete", type="string", action='append', dest="delete", help = "Deletes the second whose variables are those named") parser.add_option("-m","--move", type="string", nargs=2, action='append', dest="move", help = "Moves the iterator to the position given as second argument") return parser.parse_args() def parse_param_dat(fin): regex = re.compile(r'(?P<iter>[*+.:/])(?P<var>[a-zA-Z]\w*)\s*(?P<values>.*)') vec_entities = [] dict_iters = {} for l in fin: l = l.strip() if l[:2] == "%!": vec_entities.append( l ) continue if l[:2] == "#": vec_entities.append( l ) continue match = regex.match( l ) iter = match.group( 'iter' ) var = match.group( 'var' ) values = match.group( 'values' ) vec_entities.append( var ) dict_iters[var] = (iter, values) return vec_entities, dict_iters def out_param_dat(fout, vec_ents, dict_iters): for ent in vec_ents: if ent in dict_iters.keys(): (iter, values) = dict_iters[ent] print >> fout, '%s%s %s'%(iter, ent, values) else: print >> fout, '%s'%(ent) def find_var(vec_ents, var): try: return vec_ents.index(var) except: spgu.newline_msg("ERR","variable '%s' not found "%var1) sys.exit(1) #----------------------------------------------------- from spg import SPGParser #=============================================================================== # # parser = SPGParser() # # vec_param_dat = glob.glob() #=============================================================================== #--------------------------------------------- parser.fetch( open("param.dat") ) #-------------------------------------------------------------- for i in parser: #--------------------------------------------------------------- print i opts, args = parse_command_line() #print opts for i_arg in args: spgu.newline_msg("MSG","parsing... '%s' "%i_arg) vec_entities, dict_iters = parse_param_dat( open(i_arg) ) if opts.swap is not None: for var1, var2 in opts.swap: pos1 = find_var(vec_entities, var1) pos2 = find_var(vec_entities, var2) vec_entities[pos1], vec_entities[pos2] = vec_entities[pos2], vec_entities[pos1] if opts.edit is not None: for ed in opts.edit: var, dil = parse_param_dat( [ed] ) var = var[0] (iter, values) = dil[ var ] dict_iters[var] = (iter, values) if opts.insert is not None: for var1, oth in opts.insert: pos1 = find_var(vec_entities, var1) var2, dil = parse_param_dat( [oth] ) var2 = var2[0] dict_iters[var2] = dil[ var2 ] vec_entities.insert( pos1, var2 ) if opts.append is not None: for var1, oth in opts.append: pos1 = find_var(vec_entities, var1)+1 var2, dil = parse_param_dat( [oth] ) var2 = var2[0] dict_iters[var2] = dil[ var2 ] vec_entities.insert( pos1, var2 ) if opts.delete is not None: for var1 in opts.delete: vec_entities.remove( var1 ) if opts.move is not None: for var1, shift in opts.move: pos1 = find_var(vec_entities, var1) vec_entities.remove( var1 ) vec_entities.insert( pos1+int(shift) , var1 ) shutil.copy( i_arg, "%s-"%i_arg ) out_param_dat(open(i_arg,"w"), vec_entities, dict_iters)
tessonec/PySPG
scripts/vault/spg-param-split.py
Python
gpl-3.0
5,135
[ "VisIt" ]
c145d37285f32c8fa61edbce2314058384e29dc7430686de826f6175ded8ef6b
#!/usr/bin/env python # ****************************************************************************** # Copyright 2014-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** """ Overfeat Benchmark https://github.com/soumith/convnet-benchmarks ./overfeat.py ./overfeat.py -d f16 """ from neon import NervanaObject from neon.util.argparser import NeonArgparser from neon.initializers import Gaussian from neon.layers import Conv, Pooling, GeneralizedCost, Affine from neon.optimizers import GradientDescentMomentum, MultiOptimizer, Schedule from neon.transforms import Rectlin, Softmax, CrossEntropyMulti from neon.models import Model from neon.data import ArrayIterator import numpy as np parser = NeonArgparser(__doc__) args = parser.parse_args() NervanaObject.be.enable_winograd = 4 # setup data provider X_train = np.random.uniform(-1, 1, (128, 3 * 231 * 231)) y_train = np.random.randint(0, 999, (128, 1000)) train = ArrayIterator(X_train, y_train, nclass=1000, lshape=(3, 231, 231)) layers = [Conv((11, 11, 96), init=Gaussian(scale=0.01), activation=Rectlin(), padding=0, strides=4), Pooling(2, strides=2), Conv((5, 5, 256), init=Gaussian(scale=0.01), activation=Rectlin(), padding=0), Pooling(2, strides=2), Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1), Conv((3, 3, 1024), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1), Conv((3, 3, 1024), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1), Pooling(2, strides=2), Affine(nout=3072, init=Gaussian(scale=0.01), activation=Rectlin()), Affine(nout=4096, init=Gaussian(scale=0.01), activation=Rectlin()), Affine(nout=1000, init=Gaussian(scale=0.01), activation=Softmax())] model = Model(layers=layers) weight_sched = Schedule([22, 44, 65], (1 / 250.)**(1 / 3.)) opt_gdm = GradientDescentMomentum(0.01, 0.0, wdecay=0.0005, schedule=weight_sched) opt = MultiOptimizer({'default': opt_gdm}) cost = GeneralizedCost(costfunc=CrossEntropyMulti()) model.benchmark(train, cost=cost, optimizer=opt, niterations=10, nskip=1)
NervanaSystems/neon
examples/convnet-benchmarks/overfeat.py
Python
apache-2.0
2,740
[ "Gaussian" ]
41ece6460d98a8f7afa1ead117c7cc180aee45d8e5fa7d66cca7d33c7fe017d4
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- """ desitarget.gaiamatch ==================== Useful Gaia matching and manipulation routines. .. _`Gaia Collaboration/Babusiaux et al. (2018)`: https://ui.adsabs.harvard.edu/abs/2018A%26A...616A..10G/abstract .. _`borrowed shamelessly from Sergey Koposov`: https://github.com/desihub/desispec/blob/cd9af0dcc81c7c597aef2bc1c2a9454dcbc47e17/py/desispec/scripts/stdstars.py#L114 """ import os import sys import numpy as np import numpy.lib.recfunctions as rfn import fitsio import requests import pickle from glob import glob from time import time import healpy as hp from . import __version__ as desitarget_version from desiutil import depend from desitarget import io from desitarget.io import check_fitsio_version, gitversion from desitarget.internal import sharedmem from desitarget.geomask import hp_in_box, add_hp_neighbors, pixarea2nside from desitarget.geomask import hp_beyond_gal_b, nside2nside, rewind_coords from desimodel.footprint import radec2pix from astropy.coordinates import SkyCoord from astropy import units as u from astropy.io import ascii # ADM set up the DESI default logger from desiutil.log import get_logger log = get_logger() # ADM start the clock start = time() # ADM the current data model for Gaia columns for READING from Gaia files ingaiadatamodel = np.array([], dtype=[ ('SOURCE_ID', '>i8'), ('REF_CAT', 'S2'), ('RA', '>f8'), ('DEC', '>f8'), ('PHOT_G_MEAN_MAG', '>f4'), ('PHOT_G_MEAN_FLUX_OVER_ERROR', '>f4'), ('PHOT_BP_MEAN_MAG', '>f4'), ('PHOT_BP_MEAN_FLUX_OVER_ERROR', '>f4'), ('PHOT_RP_MEAN_MAG', '>f4'), ('PHOT_RP_MEAN_FLUX_OVER_ERROR', '>f4'), ('PHOT_BP_RP_EXCESS_FACTOR', '>f4'), ('ASTROMETRIC_EXCESS_NOISE', '>f4'), ('DUPLICATED_SOURCE', '?'), ('ASTROMETRIC_SIGMA5D_MAX', '>f4'), ('ASTROMETRIC_PARAMS_SOLVED', '>i1'), ('PARALLAX', '>f4'), ('PARALLAX_ERROR', '>f4'), ('PMRA', '>f4'), ('PMRA_ERROR', '>f4'), ('PMDEC', '>f4'), ('PMDEC_ERROR', '>f4') ]) # ADM the current data model for Gaia columns for WRITING to target files gaiadatamodel = np.array([], dtype=[ ('REF_ID', '>i8'), ('REF_CAT', 'S2'), ('GAIA_RA', '>f8'), ('GAIA_DEC', '>f8'), ('GAIA_PHOT_G_MEAN_MAG', '>f4'), ('GAIA_PHOT_G_MEAN_FLUX_OVER_ERROR', '>f4'), ('GAIA_PHOT_BP_MEAN_MAG', '>f4'), ('GAIA_PHOT_BP_MEAN_FLUX_OVER_ERROR', '>f4'), ('GAIA_PHOT_RP_MEAN_MAG', '>f4'), ('GAIA_PHOT_RP_MEAN_FLUX_OVER_ERROR', '>f4'), ('GAIA_PHOT_BP_RP_EXCESS_FACTOR', '>f4'), ('GAIA_ASTROMETRIC_EXCESS_NOISE', '>f4'), ('GAIA_DUPLICATED_SOURCE', '?'), ('GAIA_ASTROMETRIC_SIGMA5D_MAX', '>f4'), ('GAIA_ASTROMETRIC_PARAMS_SOLVED', '>i1'), ('PARALLAX', '>f4'), ('PARALLAX_IVAR', '>f4'), ('PMRA', '>f4'), ('PMRA_IVAR', '>f4'), ('PMDEC', '>f4'), ('PMDEC_IVAR', '>f4') ]) # ADM the current data model for READING from Gaia EDR3 files. inedr3datamodel = np.array([], dtype=[ ('SOURCE_ID', '>i8'), ('REF_CAT', 'S2'), ('REF_EPOCH', '>f4'), ('RA', '>f8'), ('RA_ERROR', '>f8'), ('DEC', '>f8'), ('DEC_ERROR', '>f8'), ('PHOT_G_MEAN_MAG', '>f4'), ('PHOT_G_MEAN_FLUX_OVER_ERROR', '>f4'), ('PHOT_BP_MEAN_MAG', '>f4'), ('PHOT_BP_MEAN_FLUX_OVER_ERROR', '>f4'), ('PHOT_RP_MEAN_MAG', '>f4'), ('PHOT_RP_MEAN_FLUX_OVER_ERROR', '>f4'), ('PHOT_BP_RP_EXCESS_FACTOR', '>f4'), ('PHOT_G_N_OBS', '>i4'), ('ASTROMETRIC_EXCESS_NOISE', '>f4'), ('ASTROMETRIC_EXCESS_NOISE_SIG', '>f4'), ('DUPLICATED_SOURCE', '?'), ('ASTROMETRIC_SIGMA5D_MAX', '>f4'), ('ASTROMETRIC_PARAMS_SOLVED', '>i1'), ('RUWE', '>f4'), ('IPD_GOF_HARMONIC_AMPLITUDE', '>f4'), ('IPD_FRAC_MULTI_PEAK', '>i1'), ('PARALLAX', '>f4'), ('PARALLAX_ERROR', '>f4'), ('PMRA', '>f4'), ('PMRA_ERROR', '>f4'), ('PMDEC', '>f4'), ('PMDEC_ERROR', '>f4') ]) # ADM the current data model for WRITING to Gaia EDR3 files. edr3datamodel = np.array([], dtype=[ ('REF_ID', '>i8'), ('REF_CAT', 'S2'), ('REF_EPOCH', '>f4'), ('EDR3_RA', '>f8'), ('EDR3_RA_IVAR', '>f8'), ('EDR3_DEC', '>f8'), ('EDR3_DEC_IVAR', '>f8'), ('EDR3_PHOT_G_MEAN_MAG', '>f4'), ('EDR3_PHOT_G_MEAN_FLUX_OVER_ERROR', '>f4'), ('EDR3_PHOT_BP_MEAN_MAG', '>f4'), ('EDR3_PHOT_BP_MEAN_FLUX_OVER_ERROR', '>f4'), ('EDR3_PHOT_RP_MEAN_MAG', '>f4'), ('EDR3_PHOT_RP_MEAN_FLUX_OVER_ERROR', '>f4'), ('EDR3_PHOT_BP_RP_EXCESS_FACTOR', '>f4'), ('EDR3_PHOT_G_N_OBS', '>i4'), ('EDR3_ASTROMETRIC_EXCESS_NOISE', '>f4'), ('EDR3_ASTROMETRIC_EXCESS_NOISE_SIG', '>f4'), ('EDR3_DUPLICATED_SOURCE', '?'), ('EDR3_ASTROMETRIC_SIGMA5D_MAX', '>f4'), ('EDR3_ASTROMETRIC_PARAMS_SOLVED', '>i1'), ('EDR3_RUWE', '>f4'), ('EDR3_IPD_GOF_HARMONIC_AMPLITUDE', '>f4'), ('EDR3_IPD_FRAC_MULTI_PEAK', '>i1'), ('EDR3_PARALLAX', '>f4'), ('EDR3_PARALLAX_IVAR', '>f4'), ('EDR3_PMRA', '>f4'), ('EDR3_PMRA_IVAR', '>f4'), ('EDR3_PMDEC', '>f4'), ('EDR3_PMDEC_IVAR', '>f4') ]) def check_gaia_survey(dr): """Convenience function to check allowed Gaia Data Releases Parameters ---------- dr : :class:`str` Name of a Gaia data release. Options are "dr2", "edr3". If one of those options isn't passed, a ValueError is raised. """ # ADM allowed Data Releases for input. droptions = ["dr2", "edr3"] if dr not in droptions: msg = "input dr must be one of {}".format(droptions) log.critical(msg) raise ValueError(msg) def get_gaia_dir(dr="dr2"): """Convenience function to grab the Gaia environment variable. Parameters ---------- dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3" Returns ------- :class:`str` The directory stored in the $GAIA_DIR environment variable. """ # ADM check for valid Gaia DR. check_gaia_survey(dr) # ADM check that the $GAIA_DIR environment variable is set. gaiadir = os.environ.get('GAIA_DIR') if gaiadir is None: msg = "Set $GAIA_DIR environment variable!" log.critical(msg) raise ValueError(msg) # ADM the specific meaning of the GAIA_DIR is the DR2 directory, # ADM so reconstruct for other DRs. if dr != "dr2": gaiadir = os.path.join(os.path.dirname(gaiadir), "gaia_{}".format(dr)) return gaiadir def _get_gaia_nside(): """Grab the HEALPixel nside to be used throughout this module. Returns ------- :class:`int` The HEALPixel nside number for Gaia file creation and retrieval. """ nside = 32 return nside def get_gaia_nside_brick(bricksize=0.25): """Grab the HEALPixel nside that corresponds to a brick. Parameters ---------- bricksize : :class:`float`, optional, defaults to 0.25 Size of the brick, default is the Legacy Surveys standard. Returns ------- :class:`int` The HEALPixel nside number that corresponds to a brick. """ return pixarea2nside(bricksize*bricksize) def gaia_psflike(aen, g, dr="dr2"): """Whether an objects is PSF-like based on Gaia quantities. Parameters ---------- aen : :class:`array_like` or :class`float` Gaia Astrometric Excess Noise. g : :class:`array_like` or :class`float` Gaia-based g MAGNITUDE (not Galactic-extinction-corrected). dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3" Returns ------- :class:`array_like` or :class`float` A boolean that is ``True`` for objects that are psf-like based on Gaia quantities. Notes ----- - Input quantities are the same as in `the Gaia data model`_. """ # ADM check for valid Gaia DR. check_gaia_survey(dr) if dr == "dr2": psflike = np.logical_or( (g <= 19.) * (aen < 10.**0.5), (g >= 19.) * (aen < 10.**(0.5 + 0.2*(g - 19.))) ) elif dr == "edr3": psflike = np.logical_or( (g <= 19.) * (aen < 10.**0.3), (g >= 19.) * (aen < 10.**(0.3 + 0.2*(g - 19.))) ) return psflike def sub_gaia_edr3(filename, objs=None, suball=False): """Substitute Gaia EDR3 "astrometric" columns into DR9 sweeps. Parameters ---------- filename : :class:`str` Full path to a sweeps file e.g. `legacysurvey/dr9/south/sweep/9.0/sweep-210p015-220p020.fits`. objs : :class:`array_like`, optional, defaults to ``None`` The contents of `filename`. If ``None``, read from `filename`. suball : :class:`bool`, optional, defaults to ``False`` If ``True`` substitute all of the Gaia EDR3 columns, not just the "astrometric" set used for targeting. Returns ------- :class:`array_like` `objs` (or the contents of `filename`) is returned but with the PARALLAX, PARALLAX_IVAR, PMRA, PMRA_IVAR, PMDEC, PMDEC_IVAR columns substituted with their Gaia EDR3 values. Notes ----- - The GAIA_DIR environment variable must be set. - The input `objs` will be altered (it it is not ``None``). """ # ADM if "objs" wasn't sent, read in the sweeps file. if objs is None: objs = fitsio.read(filename, "SWEEP") # ADM construct the GAIA sweep file location. ender = filename.split("dr9/")[-1].replace(".fits", '-gaiaedr3match.fits') gd = get_gaia_dir("edr3") gsweepfn = os.path.join(gd, "sweeps", ender) # ADM read the gaia sweep. if suball: cols = [col for col in objs.dtype.names if col in gaiadatamodel.dtype.names] else: cols = ["PARALLAX", "PARALLAX_IVAR", "PMRA", "PMRA_IVAR", "PMDEC", "PMDEC_IVAR", "GAIA_DUPLICATED_SOURCE", "GAIA_ASTROMETRIC_PARAMS_SOLVED", "GAIA_ASTROMETRIC_SIGMA5D_MAX", "GAIA_ASTROMETRIC_EXCESS_NOISE"] gaiacols = [col.replace("GAIA", "EDR3") if "GAIA" in col or "REF" in col else "EDR3_{}".format(col) for col in cols] gswobjs, gswhdr = fitsio.read(gsweepfn, "GAIA_SWEEP", header=True) # ADM substitute the appropriate columns. g3 = gswobjs["REF_CAT"] == 'G3' for col, gaiacol in zip(cols, gaiacols): objs[col][g3] = gswobjs[gaiacol][g3] # ADM may also need to update the REF_EPOCH. objs["REF_EPOCH"][g3] = gswhdr["REFEPOCH"] # ADM if substituting everything, add vital 'PHOT_G_N_OBS' column. if suball: dt = objs.dtype.descr + [('GAIA_PHOT_G_N_OBS', '>i4')] objsout = np.empty(len(objs), dtype=dt) for col in objs.dtype.names: objsout[col] = objs[col] objsout['GAIA_PHOT_G_N_OBS'][g3] = gswobjs['EDR3_PHOT_G_N_OBS'][g3] return objsout return objs def unextinct_gaia_mags(G, Bp, Rp, ebv, scaling=0.86): """Correct gaia magnitudes based for dust. Parameters ---------- G : :class:`array_like` or :class`float` Gaia-based G MAGNITUDE (not Galactic-extinction-corrected). Bp : :class:`array_like` or :class`float` Gaia-based Bp MAGNITUDE (not Galactic-extinction-corrected). Rp : :class:`array_like` or :class`float` Gaia-based Rp MAGNITUDE (not Galactic-extinction-corrected). ebv : :class:`array_like` or :class`float` E(B-V) values from the SFD dust maps. scaling : :class:`int` Multiply `ebv` by this scaling factor. Set to 0.86 to apply Schlafly & Finkbeiner (2011) correction. Returns ------- :class:`array_like` or :class`float` Gaia-based G MAGNITUDE (with Galactic-extinction correction). :class:`array_like` or :class`float` Gaia-based Bp MAGNITUDE (not Galactic-extinction correction). :class:`array_like` or :class`float` Gaia-based Rp MAGNITUDE (not Galactic-extinction correction). Notes ----- - See eqn1/tab1 of `Gaia Collaboration/Babusiaux et al. (2018)`_. - First version `borrowed shamelessly from Sergey Koposov`_. """ # ADM correction coefficient for non-linear dust. gaia_poly_coeff = {"G": [0.9761, -0.1704, 0.0086, 0.0011, -0.0438, 0.0013, 0.0099], "BP": [1.1517, -0.0871, -0.0333, 0.0173, -0.0230, 0.0006, 0.0043], "RP": [0.6104, -0.0170, -0.0026, -0.0017, -0.0078, 0.00005, 0.0006]} # ADM dictionaries to hold the input and output magnitudes. inmags = {"G": G, "BP": Bp, "RP": Rp} outmags = {} # ADM apply the extinction corrections in each band. gaia_a0 = 3.1 * ebv * scaling for i in range(2): if i == 0: bprp = Bp - Rp else: bprp = outmags["BP"] - outmags["RP"] for band in ['G', 'BP', 'RP']: curp = gaia_poly_coeff[band] dmag = ( np.poly1d(gaia_poly_coeff[band][:4][::-1])(bprp) + curp[4]*gaia_a0 + curp[5]*gaia_a0**2 + curp[6]*bprp*gaia_a0 )*gaia_a0 # ADM populate the per-band extinction-corrected magnitudes. outmags[band] = inmags[band] - dmag return outmags["G"], outmags["BP"], outmags["RP"] def is_in_Galaxy(objs, radec=False): """An (l, b) cut developed by Boris Gaensicke to avoid the Galaxy. Parameters ---------- objs : :class:`~numpy.ndarray` Array of objects. Must contain at least the columns "RA" and "DEC". radec : :class:`bool`, optional, defaults to ``False`` If ``True`` then the passed `objs` is an [RA, Dec] list instead of a rec array. Returns ------- :class:`~numpy.ndarray` A boolean array that is ``True`` for objects that are close to the Galaxy and ``False`` for objects that aren't. """ # ADM which flavor of RA/Dec was passed. if radec: ra, dec = objs else: ra, dec = objs["RA"], objs["DEC"] # ADM convert to Galactic coordinates. c = SkyCoord(ra*u.degree, dec*u.degree) gal = c.galactic # ADM and limit to (l, b) ranges. ii = np.abs(gal.b.value) < np.abs(gal.l.value*0.139-25) return ii def gaia_dr_from_ref_cat(refcat): """Determine the Gaia DR from an array of values, check it's unique. Parameters ---------- ref_cat : :class:`~numpy.ndarray` or `str` A `REF_CAT` string or an array of `REF_CAT` strings (e.g. b"G2"). Returns ------- :class:`~numpy.ndarray` The corresponding Data Release number (e.g. 2) Notes ----- - In reality, only strips the final integer off strings like "X3". So, can generically be used for that purpose. """ # ADM if an integer was passed. refcat = np.atleast_1d(refcat) # ADM in case old-style byte strings were passed. if isinstance(refcat[0], bytes): return np.array([int(i.decode()[-1]) for i in refcat]) else: return np.array([int(i[-1]) for i in refcat]) return gaiadr def scrape_gaia(dr="dr2", nfiletest=None): """Retrieve the bulk CSV files released by the Gaia collaboration. Parameters ---------- dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3" nfiletest : :class:`int`, optional, defaults to ``None`` If an integer is sent, only retrieve this number of files, for testing. Returns ------- Nothing But the archived Gaia CSV files are written to $GAIA_DIR/csv. For "edr3" the directory actually written to is $GAIA_DIR/../gaia_edr3. Notes ----- - The environment variable $GAIA_DIR must be set. - Runs in about 26 hours for 61,234 Gaia DR2 files. - Runs in about 19 hours for 3,386 Gaia EDR3 files. """ # ADM check that the GAIA_DIR is set and retrieve it. gaiadir = get_gaia_dir(dr) gdict = {"dr2": "http://cdn.gea.esac.esa.int/Gaia/gdr2/gaia_source/csv/", "edr3": "http://cdn.gea.esac.esa.int/Gaia/gedr3/gaia_source/"} url = gdict[dr] # ADM construct the directory to which to write files. csvdir = os.path.join(gaiadir, 'csv') # ADM the directory better be empty for the wget! if os.path.exists(csvdir): if len(os.listdir(csvdir)) > 0: msg = "{} should be empty to wget Gaia csv files!".format(csvdir) log.critical(msg) raise ValueError(msg) # ADM make the directory, if needed. else: log.info('Making Gaia directory for storing CSV files') os.makedirs(csvdir) # ADM pull back the index.html from the url. index = requests.get(url) # ADM retrieve any file name that starts with GaiaSource. # ADM the [1::2] pulls back just the odd lines from the split list. filelist = index.text.split("GaiaSource")[1::2] # ADM if nfiletest was passed, just work with that number of files. test = nfiletest is not None if test: filelist = filelist[:nfiletest] nfiles = len(filelist) # ADM loop through the filelist. t0 = time() stepper = nfiles//600 for nfile, fileinfo in enumerate(filelist): # ADM make the wget command to retrieve the file and issue it. cmd = 'wget -q {}/GaiaSource{} -P {}'.format(url, fileinfo[:-2], csvdir) os.system(cmd) nfil = nfile + 1 if nfil % stepper == 0 or test: elapsed = time() - t0 rate = nfil / elapsed log.info( '{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed' .format(nfil, nfiles, rate, elapsed/60.) ) log.info('Done...t={:.1f}s'.format(time()-t0)) return def gaia_csv_to_fits(dr="dr2", numproc=32): """Convert files in $GAIA_DIR/csv to files in $GAIA_DIR/fits. Parameters ---------- dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3". For "edr3" the directory used is actually $GAIA_DIR/../gaia_edr3 numproc : :class:`int`, optional, defaults to 32 The number of parallel processes to use. Returns ------- Nothing But the archived Gaia CSV files in $GAIA_DIR/csv are converted to FITS files in the directory $GAIA_DIR/fits. Also, a look-up table is written to $GAIA_DIR/fits/hpx-to-files.pickle for which each index is an nside=_get_gaia_nside(), nested scheme HEALPixel and each entry is a list of the FITS files that touch that HEAPixel. Notes ----- - The environment variable $GAIA_DIR must be set. - if numproc==1, use the serial code instead of the parallel code. - Runs in 1-2 hours with numproc=32 for 61,234 Gaia DR2 files. - Runs in 1-2 hours with numproc=32 for 3,386 Gaia EDR3 files. """ # ADM the resolution at which the Gaia HEALPix files should be stored. nside = _get_gaia_nside() # ADM check that the GAIA_DIR is set. gaiadir = get_gaia_dir(dr) log.info("running on {} processors".format(numproc)) # ADM construct the directories for reading/writing files. csvdir = os.path.join(gaiadir, 'csv') fitsdir = os.path.join(gaiadir, 'fits') # ADM make sure the output directory is empty. if os.path.exists(fitsdir): if len(os.listdir(fitsdir)) > 0: msg = "{} should be empty to make Gaia FITS files!".format(fitsdir) log.critical(msg) raise ValueError(msg) # ADM make the output directory, if needed. else: log.info('Making Gaia directory for storing FITS files') os.makedirs(fitsdir) # ADM construct the list of input files. infiles = sorted(glob("{}/GaiaSource*csv*".format(csvdir))) nfiles = len(infiles) # ADM the critical function to run on every file. def _write_gaia_fits(infile): """read an input name for a csv file and write it to FITS""" outbase = os.path.basename(infile) outfilename = "{}.fits".format(outbase.split(".")[0]) outfile = os.path.join(fitsdir, outfilename) fitstable = ascii.read(infile, format='csv') # ADM need to convert 5-string values to boolean. cols = np.array(fitstable.dtype.names) boolcols = cols[np.hstack(fitstable.dtype.descr)[1::2] == '<U5'] for col in boolcols: fitstable[col] = fitstable[col] == 'true' # ADM only write out the columns we need for targeting. nobjs = len(fitstable) if dr == "dr2": done = np.zeros(nobjs, dtype=ingaiadatamodel.dtype) elif dr == "edr3": done = np.zeros(nobjs, dtype=inedr3datamodel.dtype) for col in done.dtype.names: if col == 'REF_CAT': if dr == "dr2": done[col] = 'G2' elif dr == "edr3": done[col] = 'G3' else: done[col] = fitstable[col.lower()] fitsio.write(outfile, done, extname='GAIAFITS') # ADM return the HEALPixels that this file touches. pix = set(radec2pix(nside, fitstable["ra"], fitstable["dec"])) return [pix, os.path.basename(outfile)] # ADM this is just to count processed files in _update_status. nfile = np.zeros((), dtype='i8') t0 = time() stepper = nfiles//600 def _update_status(result): """wrapper function for the critical reduction operation, that occurs on the main parallel process""" if nfile % stepper == 0 and nfile > 0: rate = nfile / (time() - t0) elapsed = time() - t0 log.info( '{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed' .format(nfile, nfiles, rate, elapsed/60.) ) nfile[...] += 1 # this is an in-place modification return result # - Parallel process input files... if numproc > 1: pool = sharedmem.MapReduce(np=numproc) with pool: pixinfile = pool.map(_write_gaia_fits, infiles, reduce=_update_status) # ADM ...or run in serial. else: pixinfile = list() for file in infiles: pixinfile.append(_update_status(_write_gaia_fits(file))) # ADM create a list for which each index is a HEALPixel and each # ADM entry is a list of files that touch that HEALPixel. npix = hp.nside2npix(nside) pixlist = [[] for i in range(npix)] for pixels, file in pixinfile: for pix in pixels: pixlist[pix].append(file) # ADM write out the HEALPixel->files look-up table. outfilename = os.path.join(fitsdir, "hpx-to-files.pickle") outfile = open(outfilename, "wb") pickle.dump(pixlist, outfile) outfile.close() log.info('Done...t={:.1f}s'.format(time()-t0)) return def gaia_fits_to_healpix(dr="dr2", numproc=32): """Convert files in $GAIA_DIR/fits to files in $GAIA_DIR/healpix. Parameters ---------- dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3". For "edr3" the directory used is actually $GAIA_DIR/../gaia_edr3 numproc : :class:`int`, optional, defaults to 32 The number of parallel processes to use. Returns ------- Nothing But the archived Gaia FITS files in $GAIA_DIR/fits are rearranged by HEALPixel in the directory $GAIA_DIR/healpix. The HEALPixel sense is nested with nside=_get_gaia_nside(), and each file in $GAIA_DIR/healpix is called healpix-xxxxx.fits, where xxxxx corresponds to the HEALPixel number. Notes ----- - The environment variable $GAIA_DIR must be set. - if numproc==1, use the serial code instead of the parallel code. - Runs in 1-2 hours with numproc=32 for 61,234 Gaia DR2 files. - Runs in ~15 minutes with numproc=32 for 3,386 Gaia EDR3 files. """ # ADM the resolution at which the Gaia HEALPix files should be stored. nside = _get_gaia_nside() # ADM check that the GAIA_DIR is set. gaiadir = get_gaia_dir(dr) # ADM construct the directories for reading/writing files. fitsdir = os.path.join(gaiadir, 'fits') hpxdir = os.path.join(gaiadir, 'healpix') # ADM make sure the output directory is empty. if os.path.exists(hpxdir): if len(os.listdir(hpxdir)) > 0: msg = "{} should be empty to make Gaia HEALPix files!".format(hpxdir) log.critical(msg) raise ValueError(msg) # ADM make the output directory, if needed. else: log.info('Making Gaia directory for storing HEALPix files') os.makedirs(hpxdir) # ADM read the pixel -> file look-up table. infilename = os.path.join(fitsdir, "hpx-to-files.pickle") infile = open(infilename, "rb") pixlist = pickle.load(infile) npixels = len(pixlist) # ADM include the pixel number explicitly in the look-up table. pixlist = list(zip(np.arange(npixels), pixlist)) # ADM the critical function to run on every file. def _write_hpx_fits(pixlist): """from files that touch a pixel, write out objects in each pixel""" pixnum, files = pixlist # ADM only proceed if some files touch a pixel. if len(files) > 0: # ADM track if it's our first time through the files loop. first = True # ADM Read in files that touch a pixel. for file in files: filename = os.path.join(fitsdir, file) objs = fitsio.read(filename) # ADM only retain objects in the correct pixel. pix = radec2pix(nside, objs["RA"], objs["DEC"]) if first: done = objs[pix == pixnum] first = False else: done = np.hstack([done, objs[pix == pixnum]]) # ADM construct the name of the output file. outfilename = io.hpx_filename(pixnum) outfile = os.path.join(hpxdir, outfilename) # ADM write out the file. hdr = fitsio.FITSHDR() hdr['HPXNSIDE'] = nside hdr['HPXNEST'] = True fitsio.write(outfile, done, extname='GAIAHPX', header=hdr) return # ADM this is just to count processed files in _update_status. npix = np.zeros((), dtype='i8') t0 = time() def _update_status(result): """wrapper function for the critical reduction operation, that occurs on the main parallel process""" if npix % 100 == 0 and npix > 0: rate = npix / (time() - t0) elapsed = time() - t0 log.info( '{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed' .format(npix, npixels, rate, elapsed/60.) ) npix[...] += 1 # this is an in-place modification return result # - Parallel process input files... if numproc > 1: pool = sharedmem.MapReduce(np=numproc) with pool: _ = pool.map(_write_hpx_fits, pixlist, reduce=_update_status) # ADM ...or run in serial. else: for pix in pixlist: _update_status(_write_hpx_fits(pix)) log.info('Done...t={:.1f}s'.format(time()-t0)) return def make_gaia_files(dr="dr2", numproc=32, download=False): """Make the HEALPix-split Gaia DR2 files used by desitarget. Parameters ---------- dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3". For "edr3" the directory used is actually $GAIA_DIR/../gaia_edr3. numproc : :class:`int`, optional, defaults to 32 The number of parallel processes to use. download : :class:`bool`, optional, defaults to ``False`` If ``True`` then wget the Gaia DR2 csv files from ESA. Returns ------- Nothing But produces: - Full Gaia DR2 CSV files in $GAIA_DIR/csv. - FITS files with columns from `ingaiadatamodel` or `inedr3datamodel` in $GAIA_DIR/fits. - FITS files reorganized by HEALPixel in $GAIA_DIR/healpix. The HEALPixel sense is nested with nside=_get_gaia_nside(), and each file in $GAIA_DIR/healpix is called healpix-xxxxx.fits, where xxxxx corresponds to the HEALPixel number. Notes ----- - The environment variable $GAIA_DIR must be set. - if numproc==1, use the serial code instead of the parallel code. - Runs in ~26/20 hours for "dr2"/"edr3" if download is ``True``. - Runs in 1-2 hours with numproc=32 if download is ``False``. """ t0 = time() log.info('Begin making Gaia files...t={:.1f}s'.format(time()-t0)) # ADM check that the GAIA_DIR is set. gaiadir = get_gaia_dir(dr) # ADM a quick check that the fits and healpix directories are empty # ADM before embarking on the slower parts of the code. fitsdir = os.path.join(gaiadir, 'fits') hpxdir = os.path.join(gaiadir, 'healpix') for direc in [fitsdir, hpxdir]: if os.path.exists(direc): if len(os.listdir(direc)) > 0: msg = "{} should be empty to make Gaia files!".format(direc) log.critical(msg) raise ValueError(msg) if download: scrape_gaia(dr=dr) log.info('Retrieved Gaia files from ESA...t={:.1f}s'.format(time()-t0)) gaia_csv_to_fits(dr=dr, numproc=numproc) log.info('Converted CSV files to FITS...t={:.1f}s'.format(time()-t0)) gaia_fits_to_healpix(dr=dr, numproc=numproc) log.info('Rearranged FITS files by HEALPixel...t={:.1f}s'.format(time()-t0)) return def pop_gaia_coords(inarr): """Pop (DR2 and/or EDR3) GAIA_RA and GAIA_DEC columns off an array. Parameters ---------- inarr : :class:`~numpy.ndarray` Structured array with various column names. Returns ------- :class:`~numpy.ndarray` Input array with Gaia RA/Dec columns removed. """ posscols = ['GAIA_RA', 'GAIA_DEC', 'EDR3_RA', 'EDR3_DEC'] return rfn.drop_fields(inarr, posscols) def pop_gaia_columns(inarr, popcols): """Convenience function to pop columns off an input array. Parameters ---------- inarr : :class:`~numpy.ndarray` Structured array with various column names. popcols : :class:`list` List of columns to remove from the input array. Returns ------- :class:`~numpy.ndarray` Input array with columns in cols removed. """ return rfn.drop_fields(inarr, popcols) def read_gaia_file(filename, header=False, addobjid=False, dr="dr2"): """Read in a Gaia healpix file in the appropriate format for desitarget. Parameters ---------- filename : :class:`str` File name of a single Gaia "healpix-" file. header : :class:`bool`, optional, defaults to ``False`` If ``True`` then return (data, header) instead of just data. addobjid : :class:`bool`, optional, defaults to ``False`` Include, in the output, two additional columns. A column "GAIA_OBJID" that is the integer number of each row read from file and a column "GAIA_BRICKID" that is the integer number of the file itself. dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3". Used to format the output data model. Returns ------- :class:`~numpy.ndarray` Gaia data translated to targeting format (upper-case etc.) with the columns corresponding to `desitarget.gaiamatch.gaiadatamodel` Notes ----- - A better location for this might be in `desitarget.io`? """ # ADM check for an epic fail on the the version of fitsio. check_fitsio_version() # ADM prepare to read in the Gaia data by reading in columns. fx = fitsio.FITS(filename, upper=True) fxcolnames = fx[1].get_colnames() hdr = fx[1].read_header() # ADM read appropriate columns and convert output data model names. if dr == "edr3": readcolumns = list(inedr3datamodel.dtype.names) try: outdata = fx[1].read(columns=readcolumns) # ADM basic check for mismatched files. except ValueError: msg = "{} is a dr2 file, but the dr input is {}".format(filename, dr) log.error(msg) raise ValueError(msg) outdata.dtype.names = edr3datamodel.dtype.names prefix = "EDR3" # ADM the ERRORS need to be converted to IVARs. # ADM remember to leave 0 entries as 0. for col in ['RA_IVAR', 'DEC_IVAR', 'PMRA_IVAR', 'PMDEC_IVAR', 'PARALLAX_IVAR']: outcol = "{}_{}".format(prefix, col) w = np.where(outdata[outcol] != 0)[0] outdata[outcol][w] = 1./(outdata[outcol][w]**2.) else: readcolumns = list(ingaiadatamodel.dtype.names) outdata = fx[1].read(columns=readcolumns) # ADM basic check for mismatched files. if 'G3' in outdata["REF_CAT"]: msg = "{} is a dr3 file, but the dr input is {}".format(filename, dr) log.error(msg) raise ValueError(msg) outdata.dtype.names = gaiadatamodel.dtype.names prefix = "GAIA" # ADM the proper motion ERRORS need to be converted to IVARs. # ADM remember to leave 0 entries as 0. for col in ['PMRA_IVAR', 'PMDEC_IVAR', 'PARALLAX_IVAR']: w = np.where(outdata[col] != 0)[0] outdata[col][w] = 1./(outdata[col][w]**2.) # ADM if requested, add an object identifier for each file row. if addobjid: newdt = outdata.dtype.descr for tup in [('{}_BRICKID'.format(prefix), '>i4'), ('{}_OBJID'.format(prefix), '>i4')]: newdt.append(tup) nobjs = len(outdata) newoutdata = np.zeros(nobjs, dtype=newdt) for col in outdata.dtype.names: newoutdata[col] = outdata[col] newoutdata['{}_OBJID'.format(prefix)] = np.arange(nobjs) nside = _get_gaia_nside() hpnum = radec2pix(nside, outdata["{}_RA".format(prefix)], outdata["{}_DEC".format(prefix)]) # ADM int should fail if HEALPix in the file aren't unique. newoutdata['{}_BRICKID'.format(prefix)] = int(np.unique(hpnum)) outdata = newoutdata # ADM return data from the Gaia file, with the header if requested. if header: fx.close() return outdata, hdr else: fx.close() return outdata def find_gaia_files(objs, neighbors=True, radec=False, dr="dr2"): """Find full paths to Gaia healpix files for objects by RA/Dec. Parameters ---------- objs : :class:`~numpy.ndarray` Array of objects. Must contain at least the columns "RA" and "DEC". neighbors : :class:`bool`, optional, defaults to ``True`` Also return all neighboring pixels that touch the files of interest in order to prevent edge effects (e.g. if a Gaia source is 1 arcsec away from a primary source and so in an adjacent pixel). radec : :class:`bool`, optional, defaults to ``False`` If ``True`` then the passed `objs` is an [RA, Dec] list instead of a rec array. dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3" Returns ------- :class:`list` A list of all Gaia files that need to be read in to account for objects at the passed locations. Notes ----- - The environment variable $GAIA_DIR must be set. """ # ADM the resolution at which the Gaia HEALPix files are stored. nside = _get_gaia_nside() # ADM check that the GAIA_DIR is set and retrieve it. gaiadir = get_gaia_dir(dr) hpxdir = os.path.join(gaiadir, 'healpix') return io.find_star_files(objs, hpxdir, nside, neighbors=neighbors, radec=radec) def find_gaia_files_hp(nside, pixlist, neighbors=True, dr="dr2"): """Find full paths to Gaia healpix files in a set of HEALPixels. Parameters ---------- nside : :class:`int` (NESTED) HEALPixel nside. pixlist : :class:`list` or `int` A set of HEALPixels at `nside`. neighbors : :class:`bool`, optional, defaults to ``True`` Also return files corresponding to all neighbors that touch the pixels in `pixlist` to prevent edge effects (e.g. a Gaia source is 1 arcsec outside of `pixlist` and so in an adjacent pixel). dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3" Returns ------- :class:`list` A list of all Gaia files that need to be read in to account for objects in the passed list of pixels. Notes ----- - The environment variable $GAIA_DIR must be set. """ # ADM the resolution at which the healpix files are stored. filenside = _get_gaia_nside() # ADM check that the GAIA_DIR is set and retrieve it. gaiadir = get_gaia_dir(dr) hpxdir = os.path.join(gaiadir, 'healpix') # ADM work with pixlist as an array. pixlist = np.atleast_1d(pixlist) # ADM determine the pixels that touch the passed pixlist. pixnum = nside2nside(nside, filenside, pixlist) # ADM if neighbors was sent, then retrieve all pixels that touch each # ADM pixel covered by the provided locations, to prevent edge effects... if neighbors: pixnum = add_hp_neighbors(filenside, pixnum) # ADM reformat in the Gaia healpix format used by desitarget. gaiafiles = [os.path.join(hpxdir, io.hpx_filename(pn)) for pn in pixnum] return gaiafiles def find_gaia_files_box(gaiabounds, neighbors=True, dr="dr2"): """Find full paths to Gaia healpix files in an RA/Dec box. Parameters ---------- gaiabounds : :class:`list` A region of the sky bounded by RA/Dec. Pass as a 4-entry list to represent an area bounded by [RAmin, RAmax, DECmin, DECmax] neighbors : :class:`bool`, optional, defaults to ``True`` Also return files corresponding to all neighboring pixels that touch the files that touch the box in order to prevent edge effects (e.g. if a Gaia source might be 1 arcsec outside of the box and so in an adjacent pixel) dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3" Returns ------- :class:`list` A list of all Gaia files that need to be read in to account for objects in the passed box. Notes ----- - Uses the `healpy` routines that rely on `fact`, so the usual warnings about returning different pixel sets at different values of `fact` apply. See: https://healpy.readthedocs.io/en/latest/generated/healpy.query_polygon.html - The environment variable $GAIA_DIR must be set. """ # ADM the resolution at which the healpix files are stored. nside = _get_gaia_nside() # ADM check that the GAIA_DIR is set and retrieve it. gaiadir = get_gaia_dir(dr) hpxdir = os.path.join(gaiadir, 'healpix') # ADM determine the pixels that touch the box. pixnum = hp_in_box(nside, gaiabounds, inclusive=True, fact=4) # ADM if neighbors was sent, then retrieve all pixels that touch each # ADM pixel covered by the provided locations, to prevent edge effects... if neighbors: pixnum = add_hp_neighbors(nside, pixnum) # ADM reformat in the Gaia healpix format used by desitarget. gaiafiles = [os.path.join(hpxdir, io.hpx_filename(pn)) for pn in pixnum] return gaiafiles def find_gaia_files_beyond_gal_b(mingalb, neighbors=True, dr="dr2"): """Find full paths to Gaia healpix files beyond a Galactic b. Parameters ---------- mingalb : :class:`float` Closest latitude to Galactic plane to return HEALPixels (e.g. send 10 to limit to pixels beyond -10o <= b < 10o). neighbors : :class:`bool`, optional, defaults to ``True`` Also return files corresponding to neighboring pixels that touch in order to prevent edge effects (e.g. if a Gaia source might be 1 arcsec beyond mingalb and so in an adjacent pixel). dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3". Returns ------- :class:`list` All Gaia files that need to be read in to account for objects further from the Galactic plane than `mingalb`. Notes ----- - The environment variable $GAIA_DIR must be set. - :func:`desitarget.geomask.hp_beyond_gal_b()` is already quite inclusive, so you may retrieve some extra files along the `mingalb` boundary. """ # ADM the resolution at which the healpix files are stored. nside = _get_gaia_nside() # ADM check that the GAIA_DIR is set and retrieve it. gaiadir = get_gaia_dir(dr) hpxdir = os.path.join(gaiadir, 'healpix') # ADM determine the pixels beyond mingalb. pixnum = hp_beyond_gal_b(nside, mingalb, neighbors=True) # ADM if neighbors was sent, retrieve all pixels that touch each # ADM retrieved, to prevent edge effects... if neighbors: pixnum = add_hp_neighbors(nside, pixnum) # ADM reformat in the Gaia healpix format used by desitarget. gaiafiles = [os.path.join(hpxdir, io.hpx_filename(pn)) for pn in pixnum] return gaiafiles def find_gaia_files_tiles(tiles=None, neighbors=True, dr="dr2"): """ Parameters ---------- tiles : :class:`~numpy.ndarray` Array of tiles, or ``None`` to use all DESI tiles from :func:`desimodel.io.load_tiles`. neighbors : :class:`bool`, optional, defaults to ``True`` Also return all neighboring pixels that touch the files of interest in order to prevent edge effects (e.g. if a Gaia source is 1 arcsec away from a primary source and so in an adjacent pixel). dr : :class:`str`, optional, defaults to "dr2" Name of a Gaia data release. Options are "dr2", "edr3". Returns ------- :class:`list` A list of all Gaia files that touch the passed tiles. Notes ----- - The environment variables $GAIA_DIR and $DESIMODEL must be set. """ # ADM check that the DESIMODEL environment variable is set. if os.environ.get('DESIMODEL') is None: msg = "DESIMODEL environment variable must be set!!!" log.critical(msg) raise ValueError(msg) # ADM the resolution at which the healpix files are stored. nside = _get_gaia_nside() # ADM check that the GAIA_DIR is set and retrieve it. gaiadir = get_gaia_dir(dr) hpxdir = os.path.join(gaiadir, 'healpix') # ADM determine the pixels that touch the tiles. from desimodel.footprint import tiles2pix pixnum = tiles2pix(nside, tiles=tiles) # ADM if neighbors was sent, then retrieve all pixels that touch each # ADM pixel covered by the provided locations, to prevent edge effects... if neighbors: pixnum = add_hp_neighbors(nside, pixnum) # ADM reformat in the Gaia healpix format used by desitarget. gaiafiles = [os.path.join(hpxdir, io.hpx_filename(pn)) for pn in pixnum] return gaiafiles def match_gaia_to_primary(objs, matchrad=0.2, retaingaia=False, gaiabounds=[0., 360., -90., 90.], dr="edr3"): """Match objects to Gaia healpix files and return Gaia information. Parameters ---------- objs : :class:`~numpy.ndarray` Must contain at least "RA", "DEC". ASSUMED TO BE AT A REFERENCE EPOCH OF 2015.5 and EQUINOX J2000/ICRS. matchrad : :class:`float`, optional, defaults to 0.2 arcsec The matching radius in arcseconds. retaingaia : :class:`float`, optional, defaults to False If set, return all of the Gaia information in the "area" occupied by `objs` (whether a Gaia object matches a passed RA/Dec or not.) THIS ASSUMES THAT THE PASSED OBJECTS ARE FROM A SWEEPS file and that integer values nearest the maximum and minimum passed RAs and Decs fairly represent the areal "edges" of that file. gaiabounds : :class:`list`, optional, defaults to the whole sky Used with `retaingaia` to determine the area over which to retrieve Gaia objects that don't match a sweeps object. Pass a 4-entry (corresponding to [RAmin, RAmax, DECmin, DECmax]). dr : :class:`str`, optional, defaults to "edr3" Name of a Gaia data release. Options are "dr2", "edr3". Specifies which output data model to use. Returns ------- :class:`~numpy.ndarray` Gaia information for each matching object, in a format like `gaiadatamodel` (for `dr=dr2`) or `edr3datamodel` (`dr=edr3`). Notes ----- - The first len(`objs`) objects correspond row-by-row to `objs`. - For objects that do NOT have a match in Gaia, the "REF_ID" column is set to -1, and all other columns are zero. - If `retaingaia` is ``True`` then objects after the first len(`objs`) objects are Gaia objects that do not have a sweeps match but are in the area bounded by `gaiabounds`. """ # ADM retain all Gaia objects in a sweeps-like box. if retaingaia: ramin, ramax, decmin, decmax = gaiabounds # ADM convert the coordinates of the objects to a SkyCoord object. cobjs = SkyCoord(objs["RA"]*u.degree, objs["DEC"]*u.degree) nobjs = cobjs.size # ADM catch the special case that only a single object was passed. if nobjs == 1: return match_gaia_to_primary_single(objs, matchrad=matchrad, dr=dr) # ADM set up the output arrays, contingent on the Gaia Data Release. if dr == "edr3": gaiainfo = np.zeros(nobjs, dtype=edr3datamodel.dtype) suppgaiainfo = np.zeros(0, dtype=edr3datamodel.dtype) prefix = "EDR3" else: gaiainfo = np.zeros(nobjs, dtype=gaiadatamodel.dtype) suppgaiainfo = np.zeros(0, dtype=gaiadatamodel.dtype) prefix = "GAIA" # ADM objects without matches should have REF_ID of -1. gaiainfo['REF_ID'] = -1 # ADM determine which Gaia files need to be considered. if retaingaia: gaiafiles = find_gaia_files_box(gaiabounds, dr=dr) else: gaiafiles = find_gaia_files(objs, dr=dr) # ADM loop through the Gaia files and match to the passed objects. gracol, gdeccol = "{}_RA".format(prefix), "{}_DEC".format(prefix) for fn in gaiafiles: gaia = read_gaia_file(fn, dr=dr) # ADM rewind the coordinates in the case of Gaia EDR3, which is # ADM at a reference epoch of 2016.0 not 2015.5. if dr == 'edr3': rarew, decrew = rewind_coords(gaia["EDR3_RA"], gaia["EDR3_DEC"], gaia["EDR3_PMRA"], gaia["EDR3_PMDEC"], epochnow=2016.0, epochpast=2015.5) gaia["EDR3_RA"] = rarew gaia["EDR3_DEC"] = decrew cgaia = SkyCoord(gaia[gracol]*u.degree, gaia[gdeccol]*u.degree) idobjs, idgaia, _, _ = cgaia.search_around_sky(cobjs, matchrad*u.arcsec) # ADM assign the Gaia info to the array that corresponds to the passed objects. gaiainfo[idobjs] = gaia[idgaia] # ADM if retaingaia was set, also build an array of Gaia objects that # ADM don't have sweeps matches, but are within the RA/Dec bounds. if retaingaia: # ADM find the Gaia IDs that didn't match the passed objects. nomatch = set(np.arange(len(gaia)))-set(idgaia) noidgaia = np.array(list(nomatch)) # ADM which Gaia objects with these IDs are within the bounds. if len(noidgaia) > 0: suppg = gaia[noidgaia] winbounds = np.where( (suppg[gracol] >= ramin) & (suppg[gracol] < ramax) & (suppg[gdeccol] >= decmin) & (suppg[gdeccol] < decmax) )[0] # ADM Append those Gaia objects to the suppgaiainfo array. if len(winbounds) > 0: suppgaiainfo = np.hstack([suppgaiainfo, suppg[winbounds]]) if retaingaia: gaiainfo = np.hstack([gaiainfo, suppgaiainfo]) return gaiainfo def match_gaia_to_primary_single(objs, matchrad=0.2, dr="edr3"): """Match ONE object to Gaia "chunks" files and return the Gaia information. Parameters ---------- objs : :class:`~numpy.ndarray` Must contain at least "RA" and "DEC". MUST BE A SINGLE ROW. matchrad : :class:`float`, optional, defaults to 0.2 arcsec The matching radius in arcseconds. dr : :class:`str`, optional, defaults to "edr3" Name of a Gaia data release. Options are "dr2", "edr3". Specifies which output data model to use. Returns ------- :class:`~numpy.ndarray` The matching Gaia information for the object, where the returned format and columns correspond to `desitarget.secondary.gaiadatamodel` Notes ----- - If the object does NOT have a match in the Gaia files, the "REF_ID" column is set to -1, and all other columns are zero """ # ADM convert the coordinates of the input objects to a SkyCoord object. cobjs = SkyCoord(objs["RA"]*u.degree, objs["DEC"]*u.degree) nobjs = cobjs.size if nobjs > 1: log.error("Only matches one row but {} rows were sent".format(nobjs)) # ADM set up the output arrays, contingent on the Gaia Data Release. if dr == "edr3": gaiainfo = np.zeros(nobjs, dtype=edr3datamodel.dtype) prefix = "EDR3" else: gaiainfo = np.zeros(nobjs, dtype=gaiadatamodel.dtype) prefix = "GAIA" # ADM an object without matches should have REF_ID of -1. gaiainfo['REF_ID'] = -1 # ADM determine which Gaia files need to be considered. gaiafiles = find_gaia_files(objs, dr=dr) # ADM loop through the Gaia files and match to the passed object. gracol, gdeccol = "{}_RA".format(prefix), "{}_DEC".format(prefix) for fn in gaiafiles: gaia = read_gaia_file(fn, dr=dr) # ADM rewind the coordinates in the case of Gaia EDR3, which is # ADM at a reference epoch of 2016.0 not 2015.5. if dr == 'edr3': rarew, decrew = rewind_coords(gaia["EDR3_RA"], gaia["EDR3_DEC"], gaia["EDR3_PMRA"], gaia["EDR3_PMDEC"], epochnow=2016.0, epochpast=2015.5) gaia["EDR3_RA"] = rarew gaia["EDR3_DEC"] = decrew cgaia = SkyCoord(gaia[gracol]*u.degree, gaia[gdeccol]*u.degree) sep = cobjs.separation(cgaia) idgaia = np.where(sep < matchrad*u.arcsec)[0] # ADM assign the Gaia info to the array that corresponds to the passed object. if len(idgaia) > 0: gaiainfo = gaia[idgaia] return gaiainfo def write_gaia_matches(infiles, numproc=4, outdir=".", matchrad=0.2, dr="edr3", merge=False): """Match sweeps files to Gaia and rewrite with the Gaia columns added Parameters ---------- infiles : :class:`list` or `str` A list of input filenames (sweep files) OR a single filename. The files must contain at least the columns "RA" and "DEC". numproc : :class:`int`, optional, defaults to 4 The number of parallel processes to use. outdir : :class:`str`, optional, default to the current directory The directory to write the files. matchrad : :class:`float`, optional, defaults to 0.2 arcsec The matching radius in arcseconds. dr : :class:`str`, optional, defaults to "edr3" Name of a Gaia data release. Options are "dr2", "edr3" merge : :class:`bool`, optional, defaults to ``False`` If ``True``, merge the Gaia columns into the original sweeps file. Otherwise, just write the Gaia columns. Returns ------- Nothing But columns in `gaiadatamodel` or `edr3datamodel` that match the input sweeps files are written to file (if `merge=False`) or written after merging with the input sweeps columns (if `merge=True`). The output filename is the input filename with ".fits" replaced by "-gaia$DRmatch.fits", where $DR is `dr`. Notes ----- - if numproc==1, use the serial code instead of the parallel code. - The environment variable $GAIA_DIR must be set. """ # ADM check that the GAIA_DIR is set and retrieve it. gaiadir = get_gaia_dir(dr) # ADM convert a single file, if passed to a list of files. if isinstance(infiles, str): infiles = [infiles, ] # ADM check that files exist before proceeding. for filename in infiles: if not os.path.exists(filename): msg = "{} doesn't exist".format(filename) log.critical(msg) raise FileNotFoundError(msg) nfiles = len(infiles) ender = '-gaia{}match.fits'.format(dr) # ADM the critical function to run on every file. def _get_gaia_matches(fnwdir): '''wrapper on match_gaia_to_primary() given a file name''' # ADM extract the output file name. fn = os.path.basename(fnwdir) outfile = '{}/{}'.format(outdir, fn.replace(".fits", ender)) # ADM read in the objects. objs, hdr = io.read_tractor(fnwdir, header=True) # ADM add relevant header information. hdr["SWEEP"] = fnwdir hdr["MATCHRAD"] = matchrad hdr["GAIADR"] = dr # ADM match_gaia_to_primary always rewinds the epoch to 2015.5. hdr["REFEPOCH"] = 2015.5 depend.setdep(hdr, 'desitarget', desitarget_version) depend.setdep(hdr, 'desitarget-git', gitversion()) # ADM match to Gaia sources. gaiainfo = match_gaia_to_primary(objs, matchrad=matchrad, dr=dr) log.info('Done with Gaia match for {} primary objects...t = {:.1f}s' .format(len(objs), time()-start)) # ADM the extension name for the output file. if merge: # ADM if we are writing sweeps columns, remove GAIA_RA/DEC # ADM as they aren't in the imaging surveys data model. gaiainfo = pop_gaia_coords(gaiainfo) # ADM for EDR3, change column names to mimic the Legacy # ADM Surveys if we're updating the Legacy Surveys files. # ADM nothing will happen if the EDR3 fields aren't present. colmapper = {"EDR3_"+col.split("GAIA_")[-1]: col for col in gaiadatamodel.dtype.names if "REF" not in col} gaiainfo = rfn.rename_fields(gaiainfo, colmapper) # ADM add the Gaia column information to the sweeps array. scols = set(gaiainfo.dtype.names).intersection(set(objs.dtype.names)) for col in scols: objs[col] = gaiainfo[col] # ADM write out the file, atomically. fitsio.write(outfile+".tmp", objs, extname="GAIA_SWEEP", header=hdr, clobber=True) else: # ADM we're just writing the gaiainfo. But, include object # ADM identification information (RELEASE, BRICKID, OBJID). outdm = [desc for desc in objs.dtype.descr if 'RELEASE' in desc[0] or 'BRICKID' in desc[0] or 'OBJID' in desc[0]] outdm += gaiainfo.dtype.descr outobjs = np.empty(len(objs), dtype=outdm) for col in ['RELEASE', 'BRICKID', 'OBJID']: outobjs[col] = objs[col] for col in gaiainfo.dtype.names: outobjs[col] = gaiainfo[col] # ADM write out the file, atomically. fitsio.write(outfile+".tmp", outobjs, extname="GAIA_SWEEP", header=hdr, clobber=True) # ADM rename the atomically written file. os.rename(outfile+'.tmp', outfile) return True # ADM this is just to count sweeps files in _update_status. nfile = np.zeros((), dtype='i8') t0 = time() def _update_status(result): """wrapper function for the critical reduction operation, that occurs on the main parallel process""" if nfile % 20 == 0 and nfile > 0: elapsed = time() - t0 rate = elapsed / nfile log.info('{}/{} files; {:.1f} secs/file; {:.1f} total mins elapsed' .format(nfile, nfiles, rate, elapsed/60.)) nfile[...] += 1 # this is an in-place modification. return result # - Parallel process input files. if numproc > 1: pool = sharedmem.MapReduce(np=numproc) with pool: _ = pool.map(_get_gaia_matches, infiles, reduce=_update_status) else: for fn in infiles: _ = _update_status(_get_gaia_matches(fn)) return
desihub/desitarget
py/desitarget/gaiamatch.py
Python
bsd-3-clause
57,119
[ "Galaxy" ]
e0531f594f00e2b5ac73c5ff3f059ea907e32f578d28e273aa231385d38112db
#!/usr/bin/env python import sys import argparse import numpy as np import netCDF4 as nc import matplotlib.pyplot as plt from matplotlib import animation """ This script makes a fluid animation using NetCDF data. """ def main (): # Setup arguments. parser = argparse.ArgumentParser() parser.add_argument('input_file',help="Input file containing our data") parser.add_argument('field_name',help="Data field to animate") args = parser.parse_args() # Open netCDF file (arg.input_file). f = nc.Dataset(args.input_file) vorticity = f.variables['vorticity_z'] # vorticity dimension: Time, st_ocean (pressure), yt_ocean (latitude), xt_ocean (longitude) vorticity = vorticity[:] fig = plt.figure() images = [] # Generate the plot at pressure level = 0 for t in range(0,vorticity.shape[0]): #plt.imshow(vorticity[t,0,:,:]) img = plt.imshow(vorticity[t,0,:,:]) images.append([img]) # To show plot immediately, use: # plt.show() # Save the plot as an image file #plt.savefig('vorticity'+str(t).zfill(3)+'.png') #plt.close() ani = animation.ArtistAnimation(fig,images,interval=20) plt.show() # Close the netcdf file. f.close() print "Completed..." return True if __name__=="__main__": sys.exit(main())
LAMBDA-HYPERON/fluids-movie
make_movie.py
Python
apache-2.0
1,245
[ "NetCDF" ]
84ecd28f6a587405e62e5d472b08b9984214dd97865428a592ae035780d849c7
import tensorflow as tf import tensorflow_addons as tfa import numpy as np import scipy.signal import cv2 as cv def derivative_of_Gaussian_win( N, sigma=1.0 ): assert N%2==1 x = np.expand_dims( np.array( scipy.signal.windows.gaussian(N, sigma ) ), axis=0 ).astype(np.double) x = x.T @ x dx = np.gradient(x,axis=1) dy = np.gradient(x,axis=0) return dx, dy class TFVariationalRefinement: def __init__( self, I0, I1, Rp2c, Tp2c, P0cam, P1cam, XX, YY, baseline, mask ): self.XXshape = XX.shape self.Xpts = tf.constant( np.expand_dims( XX.flatten(), axis=0 )/baseline, dtype=tf.float32 ) self.Ypts = tf.constant( np.expand_dims( YY.flatten(), axis=0 )/baseline, dtype=tf.float32 ) self.Rp2c = tf.constant( Rp2c, dtype=tf.float32 ) self.Tp2c = tf.constant( Tp2c, dtype=tf.float32 ) self.P0cam = tf.constant( P0cam, dtype=tf.float32 ) self.P1cam = tf.constant( P1cam, dtype=tf.float32 ) self.I0 = tf.constant(I0[np.newaxis,:,:,np.newaxis], dtype=tf.float32 ) self.I1 = tf.constant(I1[np.newaxis,:,:,np.newaxis], dtype=tf.float32 ) self.mask = tf.constant( mask, dtype=tf.float32 ) self.mask_reduced = tf.constant( cv.erode( mask, np.ones((11,11))).astype(np.float32) ) sobel_x, sobel_y = derivative_of_Gaussian_win( 7, sigma=0.8 ) sobel_kernels = np.concatenate( [sobel_x[...,np.newaxis,np.newaxis], sobel_y[...,np.newaxis,np.newaxis] ], axis=-1 ) self.sobel_kernels = tf.constant( sobel_kernels, dtype=tf.float32 ) def sample_images( self, Z ): p3d = tf.concat( [self.Xpts, self.Ypts, tf.reshape(Z,self.Xpts.shape) ], axis=0 ) p3d_cam = self.Rp2c @ p3d + self.Tp2c surfpts_cam0 = self.P0cam @ tf.concat( [p3d_cam, tf.ones((1,p3d_cam.shape[1]))], axis=0) surfpts_cam0 /= surfpts_cam0[2,:] surfpts_cam1 = self.P1cam @ tf.concat( [p3d_cam, tf.ones((1,p3d_cam.shape[1]))], axis=0) surfpts_cam1 /= surfpts_cam1[2,:] cam0_p2 = tf.transpose( surfpts_cam0[:2,...] ) cam1_p2 = tf.transpose( surfpts_cam1[:2,...] ) I0_samp = tf.reshape( tfa.image.interpolate_bilinear( self.I0, tf.expand_dims( cam0_p2, axis=0 ), indexing="xy" ), self.XXshape ) * self.mask I1_samp = tf.reshape( tfa.image.interpolate_bilinear( self.I1, tf.expand_dims( cam1_p2, axis=0 ), indexing="xy" ), self.XXshape ) * self.mask return I0_samp, I1_samp, cam0_p2, cam1_p2 def compute_Z_gradient( self, Z ): Z_grad = tf.nn.conv2d( tf.expand_dims( tf.expand_dims(Z,axis=0), axis=-1), self.sobel_kernels, strides=1, padding="SAME" ) Z_dx = Z_grad[0,:,:,0]#*self.mask_reduced Z_dy = Z_grad[0,:,:,1]#*self.mask_reduced return Z_dx, Z_dy def compute_loss( self, Z ): I0_samp, I1_samp,_,_ = self.sample_images( Z ) I0_mean = tf.reduce_mean(I0_samp) I0_std = tf.math.reduce_std(I0_samp) I1_mean = tf.reduce_mean(I1_samp) I1_std = tf.math.reduce_std(I1_samp) #I0_samp_norm = (I0_samp-I0_mean) / I0_std #I1_samp_norm = (I1_samp-I1_mean) / I1_std I0_samp_norm = (I0_samp)/255.0 I1_samp_norm = (I1_samp)/255.0 Z_dx, Z_dy = self.compute_Z_gradient( Z ) data_loss = tf.reduce_mean( tf.square( I0_samp_norm-I1_samp_norm ) ) smoothness_loss = tf.reduce_mean( tf.square(Z_dx) + tf.square(Z_dy) ) #print("data: ", data_loss ) #print("smoothness: ",smoothness_loss) #alpha=100.0 return data_loss, smoothness_loss def optimize( self, Zinit, max_iters=400, alpha=10 ): print("Zinit shape", Zinit.shape ) Zfullshape = Zinit.shape Zinit = cv.resize(Zinit, (Zinit.shape[0]//2, Zinit.shape[1]//2), interpolation=cv.INTER_LINEAR) #Z = tf.Variable( Zinit, dtype=tf.float32 ) #Z = tf.Variable( np.zeros( (Zinit.shape[0]//8, Zinit.shape[1]//8), dtype=np.float32) ) Z = tf.Variable( Zinit, dtype=tf.float32 ) opt = tf.keras.optimizers.Adam(learning_rate=1E-3, epsilon=1E-7 ) myself = self def energy(): Zresized = tf.image.resize( Z[tf.newaxis,:,:,tf.newaxis], Zfullshape ) dloss, sloss = myself.compute_loss( Zresized[0,...,0] ) #print("Data loss: ",dloss.numpy()) return dloss + alpha*sloss prev_loss = energy().numpy() print("Optimizing: Initial loss: %3.5f"%prev_loss ) print("===============================================") print(" It Loss DLoss") for ii in range(max_iters): step_count = opt.minimize(energy, [Z]).numpy() if ii%10==0: current_loss = energy().numpy() delta_loss = np.abs( prev_loss-current_loss ) print("%05d %3.5f %3.5f"%(ii,current_loss,delta_loss)) #if delta_loss<1E-6: # break prev_loss = current_loss Zresized = tf.image.resize(Z[tf.newaxis,:,:,tf.newaxis], Zfullshape ) return (Zresized[0,...,0]*self.mask).numpy()
fbergama/wass
gridding/TFVariationalRefinement.py
Python
gpl-3.0
5,232
[ "Gaussian" ]
070050bd3c08bf49f8a32b67107ed348125a1c7b0918fb4537569a90a9e4c16f
""" # Notes: - This simulation seeks to emulate the CUBA benchmark simulations of (Brette et al. 2007) using the Brian2 simulator for speed benchmark comparison to DynaSim. However, this simulation does NOT include synapses, for better comparison to Figure 5 of (Goodman and Brette, 2008). - The time taken to simulate will be indicated in the stdout log file '~/batchdirs/brian2_benchmark_CUBA_nosyn_compiled_500/pbsout/brian2_benchmark_CUBA_nosyn_compiled_500.out' - Note that this code has been slightly modified from the original (Brette et al. 2007) benchmarking code, available here on ModelDB: https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319 in order to work with version 2 of the Brian simulator (aka Brian2), and also modified to change the model being benchmarked, etc. # References: - Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al. Simulation of networks of spiking neurons: A review of tools and strategies. Journal of Computational Neuroscience 2007;23:349–98. doi:10.1007/s10827-007-0038-6. - Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python. Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008. """ from brian2 import * set_device('cpp_standalone') prefs.codegen.cpp.extra_compile_args = ['-w', '-O3', '-ffast-math', '-march=native'] # Parameters cells = 500 defaultclock.dt = 0.01*ms taum=20*ms Vt = -50*mV Vr = -60*mV El = -49*mV # The model eqs = Equations(''' dv/dt = ((v-El))/taum : volt ''') P = NeuronGroup(cells, model=eqs,threshold="v>Vt",reset="v=Vr",refractory=5*ms, method='euler') proportion=int(0.8*cells) Pe = P[:proportion] Pi = P[proportion:] # Initialization P.v = Vr # Record a few traces trace = StateMonitor(P, 'v', record=[1, 10, 100]) totaldata = StateMonitor(P, 'v', record=True) run(0.5 * second, report='text') # plot(trace.t/ms, trace[1].v/mV) # plot(trace.t/ms, trace[10].v/mV) # plot(trace.t/ms, trace[100].v/mV) # xlabel('t (ms)') # ylabel('v (mV)') # show() # print("Saving TC cell voltages!") # numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
asoplata/dynasim-benchmark-brette-2007
Brian2/brian2_benchmark_CUBA_nosyn_compiled_500.py
Python
gpl-3.0
2,137
[ "Brian" ]
d7b89052599cbabca2566f773ee8b007f850a5cfeefe4258defe604e17fe4dda
######################################################################## # File : InstallTools.py # Author : Ricardo Graciani ######################################################################## """ Collection of Tools for installation of DIRAC components: MySQL, DB's, Services's, Agents It only makes use of defaults in LocalInstallation Section in dirac.cfg The Following Options are used:: /DIRAC/Setup: Setup to be used for any operation /LocalInstallation/InstanceName: Name of the Instance for the current Setup (default /DIRAC/Setup) /LocalInstallation/LogLevel: LogLevel set in "run" script for all components installed /LocalInstallation/RootPath: Used instead of rootPath in "run" script if defined (if links are used to named versions) /LocalInstallation/InstancePath: Location where runit and startup directories are created (default rootPath) /LocalInstallation/UseVersionsDir: DIRAC is installed under versions/<Versioned Directory> with a link from pro (This option overwrites RootPath and InstancePath) /LocalInstallation/Host: Used when build the URL to be published for the installed service (default: socket.getfqdn()) /LocalInstallation/RunitDir: Location where runit directory is created (default InstancePath/runit) /LocalInstallation/StartupDir: Location where startup directory is created (default InstancePath/startup) /LocalInstallation/MySQLDir: Location where mysql databases are created (default InstancePath/mysql) /LocalInstallation/Database/User: (default Dirac) /LocalInstallation/Database/Password: (must be set for SystemAdministrator Service to work) /LocalInstallation/Database/RootPwd: (must be set for SystemAdministrator Service to work) /LocalInstallation/Database/Host: (must be set for SystemAdministrator Service to work) /LocalInstallation/Database/MySQLSmallMem: Configure a MySQL with small memory requirements for testing purposes innodb_buffer_pool_size=200MB /LocalInstallation/Database/MySQLLargeMem: Configure a MySQL with high memory requirements for production purposes innodb_buffer_pool_size=10000MB The setupSite method (used by the dirac-setup-site command) will use the following info:: /LocalInstallation/Systems: List of Systems to be defined for this instance in the CS (default: Configuration, Framework) /LocalInstallation/Databases: List of Databases to be installed and configured /LocalInstallation/Services: List of System/ServiceName to be setup /LocalInstallation/Agents: List of System/AgentName to be setup /LocalInstallation/WebPortal: Boolean to setup the Web Portal (default no) /LocalInstallation/ConfigurationMaster: Boolean, requires Configuration/Server to be given in the list of Services (default: no) /LocalInstallation/PrivateConfiguration: Boolean, requires Configuration/Server to be given in the list of Services (default: no) If a Master Configuration Server is being installed the following Options can be used:: /LocalInstallation/ConfigurationName: Name of the Configuration (default: Setup ) /LocalInstallation/AdminUserName: Name of the Admin user (default: None ) /LocalInstallation/AdminUserDN: DN of the Admin user certificate (default: None ) /LocalInstallation/AdminUserEmail: Email of the Admin user (default: None ) /LocalInstallation/AdminGroupName: Name of the Admin group (default: dirac_admin ) /LocalInstallation/HostDN: DN of the host certificate (default: None ) /LocalInstallation/VirtualOrganization: Name of the main Virtual Organization (default: None) """ __RCSID__ = "$Id$" # import datetime import os, re, glob, stat, time, shutil, socket gDefaultPerms = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH import DIRAC from DIRAC import rootPath from DIRAC import gConfig from DIRAC import gLogger from DIRAC.Core.Utilities.Subprocess import systemCall from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR from DIRAC.Core.Utilities.CFG import CFG from DIRAC.Core.Utilities.Version import getVersion from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI from DIRAC.ConfigurationSystem.Client.Helpers import cfgPath, cfgPathToList, cfgInstallPath, \ cfgInstallSection, ResourcesDefaults, CSGlobals from DIRAC.Core.Security.Properties import ALARMS_MANAGEMENT, SERVICE_ADMINISTRATOR, \ CS_ADMINISTRATOR, JOB_ADMINISTRATOR, \ FULL_DELEGATION, PROXY_MANAGEMENT, OPERATOR, \ NORMAL_USER, TRUSTED_HOST from DIRAC.ConfigurationSystem.Client import PathFinder from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient from DIRAC.FrameworkSystem.Utilities import MonitoringUtilities from DIRAC.Core.Base.private.ModuleLoader import ModuleLoader from DIRAC.Core.Base.AgentModule import AgentModule from DIRAC.Core.Base.ExecutorModule import ExecutorModule from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC.Core.Utilities.PrettyPrint import printTable from DIRAC.Core.Utilities.Platform import getPlatformString # On command line tools this can be set to True to abort after the first error. exitOnError = False # First some global defaults gLogger.debug( 'DIRAC Root Path =', rootPath ) def loadDiracCfg( verbose = False ): """ Read again defaults from dirac.cfg """ global localCfg, cfgFile, setup, instance, logLevel, linkedRootPath, host global basePath, instancePath, runitDir, startDir, controlDir global db, mysqlDir, mysqlDbDir, mysqlLogDir, mysqlMyOrg, mysqlMyCnf, mysqlStartupScript global mysqlRootPwd, mysqlUser, mysqlPassword, mysqlHost, mysqlMode global mysqlSmallMem, mysqlLargeMem, mysqlPort, mysqlRootUser global monitoringClient global COMPONENT_TYPES from DIRAC.Core.Utilities.Network import getFQDN localCfg = CFG() cfgFile = os.path.join( rootPath, 'etc', 'dirac.cfg' ) try: localCfg.loadFromFile( cfgFile ) except Exception: gLogger.always( "Can't load ", cfgFile ) gLogger.always( "Might be OK if setting up the site" ) setup = localCfg.getOption( cfgPath( 'DIRAC', 'Setup' ), '' ) instance = localCfg.getOption( cfgInstallPath( 'InstanceName' ), setup ) logLevel = localCfg.getOption( cfgInstallPath( 'LogLevel' ), 'INFO' ) linkedRootPath = localCfg.getOption( cfgInstallPath( 'RootPath' ), rootPath ) useVersionsDir = localCfg.getOption( cfgInstallPath( 'UseVersionsDir' ), False ) host = localCfg.getOption( cfgInstallPath( 'Host' ), getFQDN() ) basePath = os.path.dirname( rootPath ) instancePath = localCfg.getOption( cfgInstallPath( 'InstancePath' ), rootPath ) if useVersionsDir: # This option takes precedence instancePath = os.path.dirname( os.path.dirname( rootPath ) ) linkedRootPath = os.path.join( instancePath, 'pro' ) if verbose: gLogger.notice( 'Using Instance Base Dir at', instancePath ) runitDir = os.path.join( instancePath, 'runit' ) runitDir = localCfg.getOption( cfgInstallPath( 'RunitDir' ), runitDir ) if verbose: gLogger.notice( 'Using Runit Dir at', runitDir ) startDir = os.path.join( instancePath, 'startup' ) startDir = localCfg.getOption( cfgInstallPath( 'StartupDir' ), startDir ) if verbose: gLogger.notice( 'Using Startup Dir at', startDir ) controlDir = os.path.join( instancePath, 'control' ) controlDir = localCfg.getOption( cfgInstallPath( 'ControlDir' ), controlDir ) if verbose: gLogger.notice( 'Using Control Dir at', controlDir ) # Now some MySQL default values db = {} mysqlDir = os.path.join( instancePath, 'mysql' ) mysqlDir = localCfg.getOption( cfgInstallPath( 'MySQLDir' ), mysqlDir ) if verbose: gLogger.notice( 'Using MySQL Dir at', mysqlDir ) mysqlDbDir = os.path.join( mysqlDir, 'db' ) mysqlLogDir = os.path.join( mysqlDir, 'log' ) mysqlMyOrg = os.path.join( rootPath, 'mysql', 'etc', 'my.cnf' ) mysqlMyCnf = os.path.join( mysqlDir, '.my.cnf' ) mysqlStartupScript = os.path.join( rootPath, 'mysql', 'share', 'mysql', 'mysql.server' ) mysqlRootPwd = localCfg.getOption( cfgInstallPath( 'Database', 'RootPwd' ), mysqlRootPwd ) if verbose and mysqlRootPwd: gLogger.notice( 'Reading Root MySQL Password from local configuration' ) mysqlUser = localCfg.getOption( cfgInstallPath( 'Database', 'User' ), '' ) if mysqlUser: if verbose: gLogger.notice( 'Reading MySQL User from local configuration' ) else: mysqlUser = 'Dirac' mysqlPassword = localCfg.getOption( cfgInstallPath( 'Database', 'Password' ), mysqlPassword ) if verbose and mysqlPassword: gLogger.notice( 'Reading %s MySQL Password from local configuration ' % mysqlUser ) mysqlHost = localCfg.getOption( cfgInstallPath( 'Database', 'Host' ), '' ) if mysqlHost: if verbose: gLogger.notice( 'Using MySQL Host from local configuration', mysqlHost ) else: # if it is not defined use the same as for dirac services mysqlHost = host mysqlPort = localCfg.getOption( cfgInstallPath( 'Database', 'Port' ), 0 ) if mysqlPort: if verbose: gLogger.notice( 'Using MySQL Port from local configuration ', mysqlPort ) else: # if it is not defined use the same as for dirac services mysqlPort = 3306 mysqlRootUser = localCfg.getOption( cfgInstallPath( 'Database', 'RootUser' ), '' ) if mysqlRootUser: if verbose: gLogger.notice( 'Using MySQL root user from local configuration ', mysqlRootUser ) else: # if it is not defined use root mysqlRootUser = 'root' mysqlMode = localCfg.getOption( cfgInstallPath( 'Database', 'MySQLMode' ), '' ) if verbose and mysqlMode: gLogger.notice( 'Configuring MySQL server as %s' % mysqlMode ) mysqlSmallMem = localCfg.getOption( cfgInstallPath( 'Database', 'MySQLSmallMem' ), False ) if verbose and mysqlSmallMem: gLogger.notice( 'Configuring MySQL server for Low Memory usage' ) mysqlLargeMem = localCfg.getOption( cfgInstallPath( 'Database', 'MySQLLargeMem' ), False ) if verbose and mysqlLargeMem: gLogger.notice( 'Configuring MySQL server for Large Memory usage' ) monitoringClient = ComponentMonitoringClient() if verbose and monitoringClient: gLogger.notice( 'Client configured for Component Monitoring' ) # FIXME: we probably need a better way to do this mysqlRootPwd = '' mysqlPassword = '' mysqlMode = '' localCfg = None cfgFile = '' setup = '' instance = '' logLevel = '' linkedRootPath = '' host = '' basePath = '' instancePath = '' runitDir = '' startDir = '' db = {} mysqlDir = '' mysqlDbDir = '' mysqlLogDir = '' mysqlMyOrg = '' mysqlMyCnf = '' mysqlStartupScript = '' mysqlUser = '' mysqlHost = '' mysqlPort = '' mysqlRootUser = '' mysqlSmallMem = '' mysqlLargeMem = '' COMPONENT_TYPES = [ 'service', 'agent', 'executor' ] loadDiracCfg() def getInfo( extensions ): result = getVersion() if not result['OK']: return result rDict = result['Value'] if setup: rDict['Setup'] = setup else: rDict['Setup'] = 'Unknown' return S_OK( rDict ) def getExtensions(): """ Get the list of installed extensions """ initList = glob.glob( os.path.join( rootPath, '*DIRAC', '__init__.py' ) ) extensions = [ os.path.basename( os.path.dirname( k ) ) for k in initList] try: extensions.remove( 'DIRAC' ) except Exception: error = 'DIRAC is not properly installed' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) return S_OK( extensions ) def _addCfgToDiracCfg( cfg, verbose = False ): """ Merge cfg into existing dirac.cfg file """ global localCfg if str( localCfg ): newCfg = localCfg.mergeWith( cfg ) else: newCfg = cfg result = newCfg.writeToFile( cfgFile ) if not result: return result loadDiracCfg( verbose ) return result def _addCfgToCS( cfg ): """ Merge cfg into central CS """ cfgClient = CSAPI() result = cfgClient.downloadCSData() if not result['OK']: return result result = cfgClient.mergeFromCFG( cfg ) if not result['OK']: return result result = cfgClient.commit() return result def _addCfgToLocalCS( cfg ): """ Merge cfg into local CS """ csName = localCfg.getOption( cfgPath( 'DIRAC', 'Configuration', 'Name' ) , '' ) if not csName: error = 'Missing %s' % cfgPath( 'DIRAC', 'Configuration', 'Name' ) if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) csCfg = CFG() csFile = os.path.join( rootPath, 'etc', '%s.cfg' % csName ) if os.path.exists( csFile ): csCfg.loadFromFile( csFile ) if str( csCfg ): newCfg = csCfg.mergeWith( cfg ) else: newCfg = cfg return newCfg.writeToFile( csFile ) def _removeOptionFromCS( path ): """ Delete options from central CS """ cfgClient = CSAPI() result = cfgClient.downloadCSData() if not result['OK']: return result result = cfgClient.delOption( path ) if not result['OK']: return result result = cfgClient.commit() return result def _removeSectionFromCS( path ): """ Delete setions from central CS """ cfgClient = CSAPI() result = cfgClient.downloadCSData() if not result['OK']: return result result = cfgClient.delSection( path ) if not result['OK']: return result result = cfgClient.commit() return result def _getCentralCfg( installCfg ): """ Create the skeleton of central Cfg for an initial Master CS """ # First copy over from installation cfg centralCfg = CFG() # DIRAC/Extensions extensions = localCfg.getOption( cfgInstallPath( 'Extensions' ), [] ) while 'Web' in list( extensions ): extensions.remove( 'Web' ) centralCfg.createNewSection( 'DIRAC', '' ) if extensions: centralCfg['DIRAC'].addKey( 'Extensions', ','.join( extensions ), '' ) vo = localCfg.getOption( cfgInstallPath( 'VirtualOrganization' ), '' ) if vo: centralCfg['DIRAC'].addKey( 'VirtualOrganization', vo, '' ) for section in [ 'Systems', 'Resources', 'Resources/Sites', 'Resources/Sites/DIRAC', 'Resources/Sites/LCG', 'Operations', 'Website', 'Registry' ]: if installCfg.isSection( section ): centralCfg.createNewSection( section, contents = installCfg[section] ) # Now try to add things from the Installation section # Registry adminUserName = localCfg.getOption( cfgInstallPath( 'AdminUserName' ), '' ) adminUserDN = localCfg.getOption( cfgInstallPath( 'AdminUserDN' ), '' ) adminUserEmail = localCfg.getOption( cfgInstallPath( 'AdminUserEmail' ), '' ) adminGroupName = localCfg.getOption( cfgInstallPath( 'AdminGroupName' ), 'dirac_admin' ) hostDN = localCfg.getOption( cfgInstallPath( 'HostDN' ), '' ) defaultGroupName = 'user' adminGroupProperties = [ ALARMS_MANAGEMENT, SERVICE_ADMINISTRATOR, CS_ADMINISTRATOR, JOB_ADMINISTRATOR, FULL_DELEGATION, PROXY_MANAGEMENT, OPERATOR ] defaultGroupProperties = [ NORMAL_USER ] defaultHostProperties = [ TRUSTED_HOST, CS_ADMINISTRATOR, JOB_ADMINISTRATOR, FULL_DELEGATION, PROXY_MANAGEMENT, OPERATOR ] for section in ( cfgPath( 'Registry' ), cfgPath( 'Registry', 'Users' ), cfgPath( 'Registry', 'Groups' ), cfgPath( 'Registry', 'Hosts' ) ): if not centralCfg.isSection( section ): centralCfg.createNewSection( section ) if adminUserName: if not ( adminUserDN and adminUserEmail ): gLogger.error( 'AdminUserName is given but DN or Mail is missing it will not be configured' ) else: for section in [ cfgPath( 'Registry', 'Users', adminUserName ), cfgPath( 'Registry', 'Groups', defaultGroupName ), cfgPath( 'Registry', 'Groups', adminGroupName ) ]: if not centralCfg.isSection( section ): centralCfg.createNewSection( section ) if centralCfg['Registry'].existsKey( 'DefaultGroup' ): centralCfg['Registry'].deleteKey( 'DefaultGroup' ) centralCfg['Registry'].addKey( 'DefaultGroup', defaultGroupName, '' ) if centralCfg['Registry']['Users'][adminUserName].existsKey( 'DN' ): centralCfg['Registry']['Users'][adminUserName].deleteKey( 'DN' ) centralCfg['Registry']['Users'][adminUserName].addKey( 'DN', adminUserDN, '' ) if centralCfg['Registry']['Users'][adminUserName].existsKey( 'Email' ): centralCfg['Registry']['Users'][adminUserName].deleteKey( 'Email' ) centralCfg['Registry']['Users'][adminUserName].addKey( 'Email' , adminUserEmail, '' ) # Add Admin User to Admin Group and default group for group in [adminGroupName, defaultGroupName]: if not centralCfg['Registry']['Groups'][group].isOption( 'Users' ): centralCfg['Registry']['Groups'][group].addKey( 'Users', '', '' ) users = centralCfg['Registry']['Groups'][group].getOption( 'Users', [] ) if adminUserName not in users: centralCfg['Registry']['Groups'][group].appendToOption( 'Users', ', %s' % adminUserName ) if not centralCfg['Registry']['Groups'][group].isOption( 'Properties' ): centralCfg['Registry']['Groups'][group].addKey( 'Properties', '', '' ) properties = centralCfg['Registry']['Groups'][adminGroupName].getOption( 'Properties', [] ) for prop in adminGroupProperties: if prop not in properties: properties.append( prop ) centralCfg['Registry']['Groups'][adminGroupName].appendToOption( 'Properties', ', %s' % prop ) properties = centralCfg['Registry']['Groups'][defaultGroupName].getOption( 'Properties', [] ) for prop in defaultGroupProperties: if prop not in properties: properties.append( prop ) centralCfg['Registry']['Groups'][defaultGroupName].appendToOption( 'Properties', ', %s' % prop ) # Add the master Host description if hostDN: hostSection = cfgPath( 'Registry', 'Hosts', host ) if not centralCfg.isSection( hostSection ): centralCfg.createNewSection( hostSection ) if centralCfg['Registry']['Hosts'][host].existsKey( 'DN' ): centralCfg['Registry']['Hosts'][host].deleteKey( 'DN' ) centralCfg['Registry']['Hosts'][host].addKey( 'DN', hostDN, '' ) if not centralCfg['Registry']['Hosts'][host].isOption( 'Properties' ): centralCfg['Registry']['Hosts'][host].addKey( 'Properties', '', '' ) properties = centralCfg['Registry']['Hosts'][host].getOption( 'Properties', [] ) for prop in defaultHostProperties: if prop not in properties: properties.append( prop ) centralCfg['Registry']['Hosts'][host].appendToOption( 'Properties', ', %s' % prop ) # Operations if adminUserEmail: operationsCfg = __getCfg( cfgPath( 'Operations', 'Defaults', 'EMail' ), 'Production', adminUserEmail ) centralCfg = centralCfg.mergeWith( operationsCfg ) operationsCfg = __getCfg( cfgPath( 'Operations', 'Defaults', 'EMail' ), 'Logging', adminUserEmail ) centralCfg = centralCfg.mergeWith( operationsCfg ) # Website websiteCfg = __getCfg( cfgPath( 'Website', 'Authorization', 'systems', 'configuration' ), 'Default', 'all' ) websiteCfg['Website'].addKey( 'DefaultGroups', ', '.join( ['visitor', defaultGroupName, adminGroupName] ), '' ) websiteCfg['Website'].addKey( 'DefaultSetup', setup, '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'showHistory' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'commitConfiguration' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'showCurrentDiff' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'showDiff' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'rollbackToVersion' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'manageRemoteConfig' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].appendToOption( 'manageRemoteConfig' , ', ServiceAdministrator' ) centralCfg = centralCfg.mergeWith( websiteCfg ) return centralCfg def __getCfg( section, option = '', value = '' ): """ Create a new Cfg with given info """ if not section: return None cfg = CFG() sectionList = [] for sect in cfgPathToList( section ): if not sect: continue sectionList.append( sect ) cfg.createNewSection( cfgPath( *sectionList ) ) if not sectionList: return None if option and value: sectionList.append( option ) cfg.setOption( cfgPath( *sectionList ), value ) return cfg def addOptionToDiracCfg( option, value ): """ Add Option to dirac.cfg """ optionList = cfgPathToList( option ) optionName = optionList[-1] section = cfgPath( *optionList[:-1] ) cfg = __getCfg( section, optionName, value ) if not cfg: return S_ERROR( 'Wrong option: %s = %s' % ( option, value ) ) if _addCfgToDiracCfg( cfg ): return S_OK() return S_ERROR( 'Could not merge %s=%s with local configuration' % ( option, value ) ) def removeComponentOptionsFromCS( system, component, mySetup = setup ): """ Remove the section with Component options from the CS, if possible """ result = monitoringClient.getInstallations( { 'UnInstallationTime': None, 'Instance': component }, { 'System': system }, {}, True ) if not result[ 'OK' ]: return result installations = result[ 'Value' ] instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system ) if gConfig: compInstance = gConfig.getValue( instanceOption, '' ) else: compInstance = localCfg.getOption( instanceOption, '' ) if len( installations ) == 1: remove = True removeMain = False installation = installations[0] cType = installation[ 'Component' ][ 'Type' ] # Is the component a rename of another module? if installation[ 'Instance' ] == installation[ 'Component' ][ 'Module' ]: isRenamed = False else: isRenamed = True result = monitoringClient.getInstallations( { 'UnInstallationTime': None }, { 'System': system, 'Module': installation[ 'Component' ][ 'Module' ] }, {}, True ) if not result[ 'OK' ]: return result installations = result[ 'Value' ] # If the component is not renamed we keep it in the CS if there are any renamed ones if not isRenamed: if len( installations ) > 1: remove = False # If the component is renamed and is the last one, we remove the entry for the main module as well else: if len( installations ) == 1: removeMain = True if remove: result = _removeSectionFromCS( cfgPath( 'Systems', system, compInstance, installation[ 'Component' ][ 'Type' ].title() + 's', component ) ) if not result[ 'OK' ]: return result if not isRenamed and cType == 'service': result = _removeOptionFromCS( cfgPath( 'Systems', system, compInstance, 'URLs', component ) ) if not result[ 'OK' ]: return result if removeMain: result = _removeSectionFromCS( cfgPath( 'Systems', system, compInstance, installation[ 'Component' ][ 'Type' ].title() + 's', installation[ 'Component' ][ 'Module' ] ) ) if not result[ 'OK' ]: return result if cType == 'service': result = _removeOptionFromCS( cfgPath( 'Systems', system, compInstance, 'URLs', installation[ 'Component' ][ 'Module' ] ) ) if not result[ 'OK' ]: return result return S_OK( 'Successfully removed entries from CS' ) return S_OK( 'Instances of this component still exist. It won\'t be completely removed' ) def addDefaultOptionsToCS( gConfig, componentType, systemName, component, extensions, mySetup = setup, specialOptions = {}, overwrite = False, addDefaultOptions = True ): """ Add the section with the component options to the CS """ if gConfig: gConfig.forceRefresh() system = systemName.replace( 'System', '' ) instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system ) if gConfig: compInstance = gConfig.getValue( instanceOption, '' ) else: compInstance = localCfg.getOption( instanceOption, '' ) if not compInstance: return S_ERROR( '%s not defined in %s' % ( instanceOption, cfgFile ) ) result = _getSectionName( componentType ) if not result[ 'OK' ]: return result sectionName = result[ 'Value' ] # Check if the component CS options exist addOptions = True componentSection = cfgPath( 'Systems', system, compInstance, sectionName, component ) if not overwrite: if gConfig: result = gConfig.getOptions( componentSection ) if result['OK']: addOptions = False if not addOptions: return S_OK( 'Component options already exist' ) # Add the component options now result = getComponentCfg( componentType, system, component, compInstance, extensions, specialOptions, addDefaultOptions ) if not result['OK']: return result compCfg = result['Value'] gLogger.notice( 'Adding to CS', '%s %s/%s' % ( componentType, system, component ) ) resultAddToCFG = _addCfgToCS( compCfg ) if componentType == 'executor': # Is it a container ? execList = compCfg.getOption( '%s/Load' % componentSection, [] ) for element in execList: result = addDefaultOptionsToCS( gConfig, componentType, systemName, element, extensions, setup, {}, overwrite ) resultAddToCFG.setdefault( 'Modules', {} ) resultAddToCFG['Modules'][element] = result['OK'] return resultAddToCFG def addDefaultOptionsToComponentCfg( componentType, systemName, component, extensions ): """ Add default component options local component cfg """ system = systemName.replace( 'System', '' ) instanceOption = cfgPath( 'DIRAC', 'Setups', setup, system ) compInstance = localCfg.getOption( instanceOption, '' ) if not compInstance: return S_ERROR( '%s not defined in %s' % ( instanceOption, cfgFile ) ) # Add the component options now result = getComponentCfg( componentType, system, component, compInstance, extensions ) if not result['OK']: return result compCfg = result['Value'] compCfgFile = os.path.join( rootPath, 'etc', '%s_%s.cfg' % ( system, component ) ) return compCfg.writeToFile( compCfgFile ) def addCfgToComponentCfg( componentType, systemName, component, cfg ): """ Add some extra configuration to the local component cfg """ result = _getSectionName( componentType ) if not result[ 'OK' ]: return result sectionName = result[ 'Value' ] if not cfg: return S_OK() system = systemName.replace( 'System', '' ) instanceOption = cfgPath( 'DIRAC', 'Setups', setup, system ) compInstance = localCfg.getOption( instanceOption, '' ) if not compInstance: return S_ERROR( '%s not defined in %s' % ( instanceOption, cfgFile ) ) compCfgFile = os.path.join( rootPath, 'etc', '%s_%s.cfg' % ( system, component ) ) compCfg = CFG() if os.path.exists( compCfgFile ): compCfg.loadFromFile( compCfgFile ) sectionPath = cfgPath( 'Systems', system, compInstance, sectionName ) newCfg = __getCfg( sectionPath ) newCfg.createNewSection( cfgPath( sectionPath, component ), 'Added by InstallTools', cfg ) if newCfg.writeToFile( compCfgFile ): return S_OK( compCfgFile ) error = 'Can not write %s' % compCfgFile gLogger.error( error ) return S_ERROR( error ) def getComponentCfg( componentType, system, component, compInstance, extensions, specialOptions = {}, addDefaultOptions = True ): """ Get the CFG object of the component configuration """ result = _getSectionName( componentType ) if not result[ 'OK' ]: return result sectionName = result[ 'Value' ] componentModule = component if "Module" in specialOptions: componentModule = specialOptions['Module'] compCfg = CFG() if addDefaultOptions: extensionsDIRAC = [ x + 'DIRAC' for x in extensions ] + extensions for ext in extensionsDIRAC + ['DIRAC']: cfgTemplatePath = os.path.join( rootPath, ext, '%sSystem' % system, 'ConfigTemplate.cfg' ) if os.path.exists( cfgTemplatePath ): gLogger.notice( 'Loading configuration template', cfgTemplatePath ) # Look up the component in this template loadCfg = CFG() loadCfg.loadFromFile( cfgTemplatePath ) compCfg = loadCfg.mergeWith( compCfg ) compPath = cfgPath( sectionName, componentModule ) if not compCfg.isSection( compPath ): error = 'Can not find %s in template' % compPath gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) compCfg = compCfg[sectionName][componentModule] # Delete Dependencies section if any compCfg.deleteKey( 'Dependencies' ) sectionPath = cfgPath( 'Systems', system, compInstance, sectionName ) cfg = __getCfg( sectionPath ) cfg.createNewSection( cfgPath( sectionPath, component ), '', compCfg ) for option, value in specialOptions.items(): cfg.setOption( cfgPath( sectionPath, component, option ), value ) # Add the service URL if componentType == "service": port = compCfg.getOption( 'Port' , 0 ) if port and host: urlsPath = cfgPath( 'Systems', system, compInstance, 'URLs' ) cfg.createNewSection( urlsPath ) cfg.setOption( cfgPath( urlsPath, component ), 'dips://%s:%d/%s/%s' % ( host, port, system, component ) ) return S_OK( cfg ) def addDatabaseOptionsToCS( gConfig, systemName, dbName, mySetup = setup, overwrite = False ): """ Add the section with the database options to the CS """ if gConfig: gConfig.forceRefresh() system = systemName.replace( 'System', '' ) instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system ) if gConfig: compInstance = gConfig.getValue( instanceOption, '' ) else: compInstance = localCfg.getOption( instanceOption, '' ) if not compInstance: return S_ERROR( '%s not defined in %s' % ( instanceOption, cfgFile ) ) # Check if the component CS options exist addOptions = True if not overwrite: databasePath = cfgPath( 'Systems', system, compInstance, 'Databases', dbName ) result = gConfig.getOptions( databasePath ) if result['OK']: addOptions = False if not addOptions: return S_OK( 'Database options already exist' ) # Add the component options now result = getDatabaseCfg( system, dbName, compInstance ) if not result['OK']: return result databaseCfg = result['Value'] gLogger.notice( 'Adding to CS', '%s/%s' % ( system, dbName ) ) return _addCfgToCS( databaseCfg ) def removeDatabaseOptionsFromCS( gConfig, system, dbName, mySetup = setup ): """ Remove the section with database options from the CS, if possible """ global monitoringClient result = monitoringClient.installationExists( { 'UnInstallationTime': None }, { 'System': system, 'Type': 'DB', 'Module': dbName }, {} ) if not result[ 'OK' ]: return result exists = result[ 'Value' ] instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system ) if gConfig: compInstance = gConfig.getValue( instanceOption, '' ) else: compInstance = localCfg.getOption( instanceOption, '' ) if not exists: result = _removeSectionFromCS( cfgPath( 'Systems', system, compInstance, 'Databases', dbName ) ) if not result[ 'OK' ]: return result return S_OK( 'Successfully removed entries from CS' ) def getDatabaseCfg( system, dbName, compInstance ): """ Get the CFG object of the database configuration """ databasePath = cfgPath( 'Systems', system, compInstance, 'Databases', dbName ) cfg = __getCfg( databasePath, 'DBName', dbName ) cfg.setOption( cfgPath( databasePath, 'Host' ), mysqlHost ) cfg.setOption( cfgPath( databasePath, 'Port' ), mysqlPort ) return S_OK( cfg ) def addSystemInstance( systemName, compInstance, mySetup = setup, myCfg = False ): """ Add a new system instance to dirac.cfg and CS """ system = systemName.replace( 'System', '' ) gLogger.notice( 'Adding %s system as %s instance for %s setup to dirac.cfg and CS' % ( system, compInstance, mySetup ) ) cfg = __getCfg( cfgPath( 'DIRAC', 'Setups', mySetup ), system, compInstance ) if myCfg: if not _addCfgToDiracCfg( cfg ): return S_ERROR( 'Failed to add system instance to dirac.cfg' ) return _addCfgToCS( cfg ) def printStartupStatus( rDict ): """ Print in nice format the return dictionary from getStartupComponentStatus (also returned by runsvctrlComponent) """ fields = ['Name','Runit','Uptime','PID'] records = [] try: for comp in rDict: records.append( [comp, rDict[comp]['RunitStatus'], rDict[comp]['Timeup'], str( rDict[comp]['PID'] ) ] ) printTable( fields, records ) except Exception, x: print "Exception while gathering data for printing: %s" % str( x ) return S_OK() def printOverallStatus( rDict ): """ Print in nice format the return dictionary from getOverallStatus """ fields = ['System','Name','Type','Setup','Installed','Runit','Uptime','PID'] records = [] try: for compType in rDict: for system in rDict[compType]: for component in rDict[compType][system]: record = [ system, component, compType.lower()[:-1] ] if rDict[compType][system][component]['Setup']: record.append( 'SetUp' ) else: record.append( 'NotSetUp' ) if rDict[compType][system][component]['Installed']: record.append( 'Installed' ) else: record.append( 'NotInstalled' ) record.append( str( rDict[compType][system][component]['RunitStatus'] ) ) record.append( str( rDict[compType][system][component]['Timeup'] ) ) record.append( str( rDict[compType][system][component]['PID'] ) ) records.append( record ) printTable( fields, records ) except Exception, x: print "Exception while gathering data for printing: %s" % str( x ) return S_OK() def getAvailableSystems( extensions ): """ Get the list of all systems (in all given extensions) locally available """ systems = [] for extension in extensions: extensionPath = os.path.join( DIRAC.rootPath, extension, '*System' ) for system in [ os.path.basename( k ).split( 'System' )[0] for k in glob.glob( extensionPath ) ]: if system not in systems: systems.append( system ) return systems def getSoftwareComponents( extensions ): """ Get the list of all the components ( services and agents ) for which the software is installed on the system """ # The Gateway does not need a handler services = { 'Framework' : ['Gateway'] } agents = {} executors = {} remainders = {} resultDict = {} remainingTypes = [ cType for cType in COMPONENT_TYPES if cType not in [ 'service', 'agent', 'executor' ] ] resultIndexes = {} # Components other than services, agents and executors for cType in remainingTypes: result = _getSectionName( cType ) if not result[ 'OK' ]: return result resultIndexes[ cType ] = result[ 'Value' ] resultDict[ resultIndexes[ cType ] ] = {} remainders[ cType ] = {} for extension in ['DIRAC'] + [ x + 'DIRAC' for x in extensions]: if not os.path.exists( os.path.join( rootPath, extension ) ): # Not all the extensions are necessarily installed in this instance continue systemList = os.listdir( os.path.join( rootPath, extension ) ) for sys in systemList: system = sys.replace( 'System', '' ) try: agentDir = os.path.join( rootPath, extension, sys, 'Agent' ) agentList = os.listdir( agentDir ) for agent in agentList: if os.path.splitext( agent )[1] == ".py": agentFile = os.path.join( agentDir, agent ) with open( agentFile, 'r' ) as afile: body = afile.read() if body.find( 'AgentModule' ) != -1 or body.find( 'OptimizerModule' ) != -1: if not agents.has_key( system ): agents[system] = [] agents[system].append( agent.replace( '.py', '' ) ) except OSError: pass try: serviceDir = os.path.join( rootPath, extension, sys, 'Service' ) serviceList = os.listdir( serviceDir ) for service in serviceList: if service.find( 'Handler' ) != -1 and os.path.splitext( service )[1] == '.py': if not services.has_key( system ): services[system] = [] if system == 'Configuration' and service == 'ConfigurationHandler.py': service = 'ServerHandler.py' services[system].append( service.replace( '.py', '' ).replace( 'Handler', '' ) ) except OSError: pass try: executorDir = os.path.join( rootPath, extension, sys, 'Executor' ) executorList = os.listdir( executorDir ) for executor in executorList: if os.path.splitext( executor )[1] == ".py": executorFile = os.path.join( executorDir, executor ) with open( executorFile, 'r' ) as afile: body = afile.read() if body.find( 'OptimizerExecutor' ) != -1: if not executors.has_key( system ): executors[system] = [] executors[system].append( executor.replace( '.py', '' ) ) except OSError: pass # Rest of component types for cType in remainingTypes: try: remainDir = os.path.join( rootPath, extension, sys, cType.title() ) remainList = os.listdir( remainDir ) for remainder in remainList: if os.path.splitext( remainder )[1] == ".py": if not remainders[ cType ].has_key( system ): remainders[ cType ][system] = [] remainders[ cType ][system].append( remainder.replace( '.py', '' ) ) except OSError: pass resultDict['Services'] = services resultDict['Agents'] = agents resultDict['Executors'] = executors for cType in remainingTypes: resultDict[ resultIndexes[ cType ] ] = remainders[ cType ] return S_OK( resultDict ) def getInstalledComponents(): """ Get the list of all the components ( services and agents ) installed on the system in the runit directory """ resultDict = {} resultIndexes = {} for cType in COMPONENT_TYPES: result = _getSectionName( cType ) if not result[ 'OK' ]: return result resultIndexes[ cType ] = result[ 'Value' ] resultDict[ resultIndexes[ cType ] ] = {} systemList = os.listdir( runitDir ) for system in systemList: systemDir = os.path.join( runitDir, system ) components = os.listdir( systemDir ) for component in components: try: runFile = os.path.join( systemDir, component, 'run' ) rfile = open( runFile, 'r' ) body = rfile.read() rfile.close() for cType in COMPONENT_TYPES: if body.find( 'dirac-%s' % ( cType ) ) != -1: if not resultDict[ resultIndexes[ cType ] ].has_key( system ): resultDict[ resultIndexes[ cType ] ][system] = [] resultDict[ resultIndexes[ cType ] ][system].append( component ) except IOError: pass return S_OK( resultDict ) def getSetupComponents(): """ Get the list of all the components ( services and agents ) set up for running with runsvdir in startup directory """ resultDict = {} resultIndexes = {} for cType in COMPONENT_TYPES: result = _getSectionName( cType ) if not result[ 'OK' ]: return result resultIndexes[ cType ] = result[ 'Value' ] resultDict[ resultIndexes[ cType ] ] = {} if not os.path.isdir( startDir ): return S_ERROR( 'Startup Directory does not exit: %s' % startDir ) componentList = os.listdir( startDir ) for component in componentList: try: runFile = os.path.join( startDir, component, 'run' ) rfile = open( runFile, 'r' ) body = rfile.read() rfile.close() for cType in COMPONENT_TYPES: if body.find( 'dirac-%s' % ( cType ) ) != -1: system, compT = component.split( '_' )[0:2] if not resultDict[ resultIndexes[ cType ] ].has_key( system ): resultDict[ resultIndexes[ cType ] ][system] = [] resultDict[ resultIndexes[ cType ] ][system].append( compT ) except IOError: pass return S_OK( resultDict ) def getStartupComponentStatus( componentTupleList ): """ Get the list of all the components ( services and agents ) set up for running with runsvdir in startup directory """ try: if componentTupleList: cList = [] for componentTuple in componentTupleList: cList.extend( glob.glob( os.path.join( startDir, '_'.join( componentTuple ) ) ) ) else: cList = glob.glob( os.path.join( startDir, '*' ) ) except Exception: error = 'Failed to parse List of Components' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 0, ['runsvstat'] + cList ) if not result['OK']: return result output = result['Value'][1].strip().split( '\n' ) componentDict = {} for line in output: if not line: continue cname, routput = line.split( ':' ) cname = cname.replace( '%s/' % startDir, '' ) run = False reResult = re.search( '^ run', routput ) if reResult: run = True down = False reResult = re.search( '^ down', routput ) if reResult: down = True reResult = re.search( '([0-9]+) seconds', routput ) timeup = 0 if reResult: timeup = reResult.group( 1 ) reResult = re.search( 'pid ([0-9]+)', routput ) pid = 0 if reResult: pid = reResult.group( 1 ) runsv = "Not running" if run or down: runsv = "Running" reResult = re.search( 'runsv not running', routput ) if reResult: runsv = "Not running" runDict = {} runDict['Timeup'] = timeup runDict['PID'] = pid runDict['RunitStatus'] = "Unknown" if run: runDict['RunitStatus'] = "Run" if down: runDict['RunitStatus'] = "Down" if runsv == "Not running": runDict['RunitStatus'] = "NoRunitControl" componentDict[cname] = runDict return S_OK( componentDict ) def getComponentModule( gConfig, system, component, compType ): """ Get the component software module """ setup = CSGlobals.getSetup() instance = gConfig.getValue( cfgPath( 'DIRAC', 'Setups', setup, system ), '' ) if not instance: return S_OK( component ) module = gConfig.getValue( cfgPath( 'Systems', system, instance, compType, component, 'Module' ), '' ) if not module: module = component return S_OK( module ) def getOverallStatus( extensions ): """ Get the list of all the components ( services and agents ) set up for running with runsvdir in startup directory """ result = getSoftwareComponents( extensions ) if not result['OK']: return result softDict = result['Value'] result = getSetupComponents() if not result['OK']: return result setupDict = result['Value'] result = getInstalledComponents() if not result['OK']: return result installedDict = result['Value'] result = getStartupComponentStatus( [] ) if not result['OK']: return result runitDict = result['Value'] # Collect the info now resultDict = {} resultIndexes = {} for cType in COMPONENT_TYPES: result = _getSectionName( cType ) if not result[ 'OK' ]: return result resultIndexes[ cType ] = result[ 'Value' ] resultDict[ resultIndexes[ cType ] ] = {} for compType in resultIndexes.values(): if softDict.has_key( 'Services' ): for system in softDict[compType]: resultDict[compType][system] = {} for component in softDict[compType][system]: if system == 'Configuration' and component == 'Configuration': # Fix to avoid missing CS due to different between Service name and Handler name component = 'Server' resultDict[compType][system][component] = {} resultDict[compType][system][component]['Setup'] = False resultDict[compType][system][component]['Installed'] = False resultDict[compType][system][component]['RunitStatus'] = 'Unknown' resultDict[compType][system][component]['Timeup'] = 0 resultDict[compType][system][component]['PID'] = 0 # TODO: why do we need a try here? try: if component in setupDict[compType][system]: resultDict[compType][system][component]['Setup'] = True except Exception: pass try: if component in installedDict[compType][system]: resultDict[compType][system][component]['Installed'] = True except Exception: pass try: compDir = system + '_' + component if runitDict.has_key( compDir ): resultDict[compType][system][component]['RunitStatus'] = runitDict[compDir]['RunitStatus'] resultDict[compType][system][component]['Timeup'] = runitDict[compDir]['Timeup'] resultDict[compType][system][component]['PID'] = runitDict[compDir]['PID'] except Exception: #print str(x) pass # Installed components can be not the same as in the software list if installedDict.has_key( 'Services' ): for system in installedDict[compType]: for component in installedDict[compType][system]: if compType in resultDict: if system in resultDict[compType]: if component in resultDict[compType][system]: continue resultDict[compType][system][component] = {} resultDict[compType][system][component]['Setup'] = False resultDict[compType][system][component]['Installed'] = True resultDict[compType][system][component]['RunitStatus'] = 'Unknown' resultDict[compType][system][component]['Timeup'] = 0 resultDict[compType][system][component]['PID'] = 0 # TODO: why do we need a try here? try: if component in setupDict[compType][system]: resultDict[compType][system][component]['Setup'] = True except Exception: pass try: compDir = system + '_' + component if runitDict.has_key( compDir ): resultDict[compType][system][component]['RunitStatus'] = runitDict[compDir]['RunitStatus'] resultDict[compType][system][component]['Timeup'] = runitDict[compDir]['Timeup'] resultDict[compType][system][component]['PID'] = runitDict[compDir]['PID'] except Exception: #print str(x) pass return S_OK( resultDict ) def checkComponentModule( componentType, system, module ): """ Check existence of the given module and if it inherits from the proper class """ if componentType == 'agent': loader = ModuleLoader( "Agent", PathFinder.getAgentSection, AgentModule ) elif componentType == 'service': loader = ModuleLoader( "Service", PathFinder.getServiceSection, RequestHandler, moduleSuffix = "Handler" ) elif componentType == 'executor': loader = ModuleLoader( "Executor", PathFinder.getExecutorSection, ExecutorModule ) else: return S_ERROR( 'Unknown component type %s' % componentType ) return loader.loadModule( "%s/%s" % ( system, module ) ) def checkComponentSoftware( componentType, system, component, extensions ): """ Check the component software """ result = getSoftwareComponents( extensions ) if not result['OK']: return result softComp = result[ 'Value' ] result = _getSectionName( componentType ) if not result[ 'OK' ]: return result try: softDict = softComp[ result[ 'Value' ] ] except KeyError, e: return S_ERROR( 'Unknown component type %s' % componentType ) if system in softDict and component in softDict[system]: return S_OK() return S_ERROR( 'Unknown Component %s/%s' % ( system, component ) ) def runsvctrlComponent( system, component, mode ): """ Execute runsvctrl and check status of the specified component """ if not mode in ['u', 'd', 'o', 'p', 'c', 'h', 'a', 'i', 'q', '1', '2', 't', 'k', 'x', 'e']: return S_ERROR( 'Unknown runsvctrl mode "%s"' % mode ) startCompDirs = glob.glob( os.path.join( startDir, '%s_%s' % ( system, component ) ) ) # Make sure that the Configuration server restarts first and the SystemAdmin restarts last tmpList = list( startCompDirs ) for comp in tmpList: if "Framework_SystemAdministrator" in comp: startCompDirs.append( startCompDirs.pop( startCompDirs.index( comp ) ) ) if "Configuration_Server" in comp: startCompDirs.insert( 0, startCompDirs.pop( startCompDirs.index( comp ) ) ) startCompList = [ [k] for k in startCompDirs] for startComp in startCompList: result = execCommand( 0, ['runsvctrl', mode] + startComp ) if not result['OK']: return result time.sleep( 1 ) # Check the runsv status if system == '*' or component == '*': time.sleep( 5 ) # Final check result = getStartupComponentStatus( [( system, component )] ) if not result['OK']: return S_ERROR( 'Failed to start the component' ) return result def getLogTail( system, component, length = 100 ): """ Get the tail of the component log file """ retDict = {} for startCompDir in glob.glob( os.path.join( startDir, '%s_%s' % ( system, component ) ) ): compName = os.path.basename( startCompDir ) logFileName = os.path.join( startCompDir, 'log', 'current' ) if not os.path.exists( logFileName ): retDict[compName] = 'No log file found' else: logFile = open( logFileName, 'r' ) lines = [ line.strip() for line in logFile.readlines() ] logFile.close() if len( lines ) < length: retDict[compName] = '\n'.join( lines ) else: retDict[compName] = '\n'.join( lines[-length:] ) return S_OK( retDict ) def setupSite( scriptCfg, cfg = None ): """ Setup a new site using the options defined """ # First we need to find out what needs to be installed # by default use dirac.cfg, but if a cfg is given use it and # merge it into the dirac.cfg diracCfg = CFG() installCfg = None if cfg: try: installCfg = CFG() installCfg.loadFromFile( cfg ) for section in ['DIRAC', 'LocalSite', cfgInstallSection]: if installCfg.isSection( section ): diracCfg.createNewSection( section, contents = installCfg[section] ) if instancePath != basePath: if not diracCfg.isSection( 'LocalSite' ): diracCfg.createNewSection( 'LocalSite' ) diracCfg.setOption( cfgPath( 'LocalSite', 'InstancePath' ), instancePath ) _addCfgToDiracCfg( diracCfg, verbose = True ) except Exception: error = 'Failed to load %s' % cfg gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) # Now get the necessary info from localCfg setupSystems = localCfg.getOption( cfgInstallPath( 'Systems' ), ['Configuration', 'Framework'] ) installMySQLFlag = localCfg.getOption( cfgInstallPath( 'InstallMySQL' ), False ) setupDatabases = localCfg.getOption( cfgInstallPath( 'Databases' ), [] ) setupServices = [ k.split( '/' ) for k in localCfg.getOption( cfgInstallPath( 'Services' ), [] ) ] setupAgents = [ k.split( '/' ) for k in localCfg.getOption( cfgInstallPath( 'Agents' ), [] ) ] setupExecutors = [ k.split( '/' ) for k in localCfg.getOption( cfgInstallPath( 'Executors' ), [] ) ] setupWeb = localCfg.getOption( cfgInstallPath( 'WebPortal' ), False ) setupWebApp = localCfg.getOption( cfgInstallPath( 'WebApp' ), False ) setupConfigurationMaster = localCfg.getOption( cfgInstallPath( 'ConfigurationMaster' ), False ) setupPrivateConfiguration = localCfg.getOption( cfgInstallPath( 'PrivateConfiguration' ), False ) setupConfigurationName = localCfg.getOption( cfgInstallPath( 'ConfigurationName' ), setup ) setupAddConfiguration = localCfg.getOption( cfgInstallPath( 'AddConfiguration' ), True ) for serviceTuple in setupServices: error = '' if len( serviceTuple ) != 2: error = 'Wrong service specification: system/service' # elif serviceTuple[0] not in setupSystems: # error = 'System %s not available' % serviceTuple[0] if error: if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) serviceSysInstance = serviceTuple[0] if not serviceSysInstance in setupSystems: setupSystems.append( serviceSysInstance ) for agentTuple in setupAgents: error = '' if len( agentTuple ) != 2: error = 'Wrong agent specification: system/agent' # elif agentTuple[0] not in setupSystems: # error = 'System %s not available' % agentTuple[0] if error: if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) agentSysInstance = agentTuple[0] if not agentSysInstance in setupSystems: setupSystems.append( agentSysInstance ) for executorTuple in setupExecutors: error = '' if len( executorTuple ) != 2: error = 'Wrong executor specification: system/executor' if error: if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) executorSysInstance = executorTuple[0] if not executorSysInstance in setupSystems: setupSystems.append( executorSysInstance ) # And to find out the available extensions result = getExtensions() if not result['OK']: return result extensions = [ k.replace( 'DIRAC', '' ) for k in result['Value']] # Make sure the necessary directories are there if basePath != instancePath: if not os.path.exists( instancePath ): try: os.makedirs( instancePath ) except Exception: error = 'Can not create directory for instance %s' % instancePath if exitOnError: gLogger.exception( error ) DIRAC.exit( -1 ) return S_ERROR( error ) if not os.path.isdir( instancePath ): error = 'Instance directory %s is not valid' % instancePath if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) instanceEtcDir = os.path.join( instancePath, 'etc' ) etcDir = os.path.dirname( cfgFile ) if not os.path.exists( instanceEtcDir ): try: os.symlink( etcDir, instanceEtcDir ) except Exception: error = 'Can not create link to configuration %s' % instanceEtcDir if exitOnError: gLogger.exception( error ) DIRAC.exit( -1 ) return S_ERROR( error ) if os.path.realpath( instanceEtcDir ) != os.path.realpath( etcDir ): error = 'Instance etc (%s) is not the same as DIRAC etc (%s)' % ( instanceEtcDir, etcDir ) if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # if any server or agent needs to be install we need the startup directory and runsvdir running if setupServices or setupAgents or setupExecutors or setupWeb: if not os.path.exists( startDir ): try: os.makedirs( startDir ) except Exception: error = 'Can not create %s' % startDir if exitOnError: gLogger.exception( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # And need to make sure runsvdir is running result = execCommand( 0, ['ps', '-ef'] ) if not result['OK']: if exitOnError: gLogger.error( 'Failed to verify runsvdir running', result['Message'] ) DIRAC.exit( -1 ) return S_ERROR( result['Message'] ) processList = result['Value'][1].split( '\n' ) cmd = 'runsvdir %s' % startDir cmdFound = False for process in processList: if process.find( cmd ) != -1: cmdFound = True if not cmdFound: gLogger.notice( 'Starting runsvdir ...' ) os.system( "runsvdir %s 'log: DIRAC runsv' &" % startDir ) if ['Configuration', 'Server'] in setupServices and setupConfigurationMaster: # This server hosts the Master of the CS from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData gLogger.notice( 'Installing Master Configuration Server' ) cfg = __getCfg( cfgPath( 'DIRAC', 'Setups', setup ), 'Configuration', instance ) _addCfgToDiracCfg( cfg ) cfg = __getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Master' , 'yes' ) cfg.setOption( cfgPath( 'DIRAC', 'Configuration', 'Name' ) , setupConfigurationName ) serversCfgPath = cfgPath( 'DIRAC', 'Configuration', 'Servers' ) if not localCfg.getOption( serversCfgPath , [] ): serverUrl = 'dips://%s:9135/Configuration/Server' % host cfg.setOption( serversCfgPath, serverUrl ) gConfigurationData.setOptionInCFG( serversCfgPath, serverUrl ) instanceOptionPath = cfgPath( 'DIRAC', 'Setups', setup ) instanceCfg = __getCfg( instanceOptionPath, 'Configuration', instance ) cfg = cfg.mergeWith( instanceCfg ) _addCfgToDiracCfg( cfg ) result = getComponentCfg( 'service', 'Configuration', 'Server', instance, extensions, addDefaultOptions = True ) if not result['OK']: if exitOnError: DIRAC.exit( -1 ) else: return result compCfg = result['Value'] cfg = cfg.mergeWith( compCfg ) gConfigurationData.mergeWithLocal( cfg ) addDefaultOptionsToComponentCfg( 'service', 'Configuration', 'Server', [] ) if installCfg: centralCfg = _getCentralCfg( installCfg ) else: centralCfg = _getCentralCfg( localCfg ) _addCfgToLocalCS( centralCfg ) setupComponent( 'service', 'Configuration', 'Server', [], checkModule = False ) MonitoringUtilities.monitorInstallation( 'service', 'Configuration', 'Server' ) runsvctrlComponent( 'Configuration', 'Server', 't' ) while ['Configuration', 'Server'] in setupServices: setupServices.remove( ['Configuration', 'Server'] ) time.sleep( 5 ) # Now need to check if there is valid CS to register the info result = scriptCfg.enableCS() if not result['OK']: if exitOnError: DIRAC.exit( -1 ) return result cfgClient = CSAPI() if not cfgClient.initialize(): error = 'Configuration Server not defined' if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # We need to make sure components are connecting to the Master CS, that is the only one being update from DIRAC import gConfig localServers = localCfg.getOption( cfgPath( 'DIRAC', 'Configuration', 'Servers' ) ) masterServer = gConfig.getValue( cfgPath( 'DIRAC', 'Configuration', 'MasterServer' ), '' ) initialCfg = __getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Servers' , localServers ) masterCfg = __getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Servers' , masterServer ) _addCfgToDiracCfg( masterCfg ) # 1.- Setup the instances in the CS # If the Configuration Server used is not the Master, it can take some time for this # info to be propagated, this may cause the later setup to fail if setupAddConfiguration: gLogger.notice( 'Registering System instances' ) for system in setupSystems: addSystemInstance( system, instance, setup, True ) for system, service in setupServices: if not addDefaultOptionsToCS( None, 'service', system, service, extensions, overwrite = True )['OK']: # If we are not allowed to write to the central CS, add the configuration to the local file addDefaultOptionsToComponentCfg( 'service', system, service, extensions ) for system, agent in setupAgents: if not addDefaultOptionsToCS( None, 'agent', system, agent, extensions, overwrite = True )['OK']: # If we are not allowed to write to the central CS, add the configuration to the local file addDefaultOptionsToComponentCfg( 'agent', system, agent, extensions ) for system, executor in setupExecutors: if not addDefaultOptionsToCS( None, 'executor', system, executor, extensions, overwrite = True )['OK']: # If we are not allowed to write to the central CS, add the configuration to the local file addDefaultOptionsToComponentCfg( 'executor', system, executor, extensions ) else: gLogger.warn( 'Configuration parameters definition is not requested' ) if ['Configuration', 'Server'] in setupServices and setupPrivateConfiguration: cfg = __getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'AutoPublish' , 'no' ) _addCfgToDiracCfg( cfg ) # 2.- Check if MySQL is to be installed if installMySQLFlag: gLogger.notice( 'Installing MySQL' ) getMySQLPasswords() installMySQL() # 3.- Install requested Databases # if MySQL is not installed locally, we assume a host is given if setupDatabases: result = getDatabases() if not result['OK']: if exitOnError: gLogger.error( 'Failed to get databases', result['Message'] ) DIRAC.exit( -1 ) return result installedDatabases = result['Value'] for dbName in setupDatabases: if dbName not in installedDatabases: result = installDatabase( dbName, monitorFlag = False ) if not result['OK']: gLogger.error( result['Message'] ) DIRAC.exit( -1 ) extension, system = result['Value'] gLogger.notice( 'Database %s from %s/%s installed' % ( dbName, extension, system ) ) result = addDatabaseOptionsToCS( None, system, dbName, overwrite = True ) if not result['OK']: gLogger.error( 'Database %s CS registration failed: %s' % ( dbName, result['Message'] ) ) else: gLogger.notice( 'Database %s already installed' % dbName ) if mysqlPassword: if not _addMySQLToDiracCfg(): error = 'Failed to add MySQL user password to local configuration' if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # 4.- Then installed requested services for system, service in setupServices: result = setupComponent( 'service', system, service, extensions, monitorFlag = False ) if not result['OK']: gLogger.error( result['Message'] ) # 5.- Now the agents for system, agent in setupAgents: result = setupComponent( 'agent', system, agent, extensions, monitorFlag = False ) if not result['OK']: gLogger.error( result['Message'] ) # 6.- Now the executors for system, executor in setupExecutors: result = setupComponent( 'executor', system, executor, extensions, monitorFlag = False ) if not result['OK']: gLogger.error( result['Message'] ) # 7.- And finally the Portal if setupWeb: if setupWebApp: setupNewPortal() else: setupPortal() if localServers != masterServer: _addCfgToDiracCfg( initialCfg ) for system, service in setupServices: runsvctrlComponent( system, service, 't' ) for system, agent in setupAgents: runsvctrlComponent( system, agent, 't' ) for system, executor in setupExecutors: runsvctrlComponent( system, executor, 't' ) return S_OK() def _getSectionName( compType ): """ Returns the section name for a component in the CS For instance, the section for service is Services, whereas the section for agent is Agents """ return S_OK( '%ss' % ( compType.title() ) ) def _createRunitLog( runitCompDir ): controlDir = os.path.join( runitCompDir, 'control' ) os.makedirs( controlDir ) logDir = os.path.join( runitCompDir, 'log' ) os.makedirs( logDir ) logConfigFile = os.path.join( logDir, 'config' ) fd = open( logConfigFile, 'w' ) fd.write( """s10000000 n20 """ ) fd.close() logRunFile = os.path.join( logDir, 'run' ) fd = open( logRunFile, 'w' ) fd.write( """#!/bin/bash # rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec svlogd . """ % { 'bashrc' : os.path.join( instancePath, 'bashrc' ) } ) fd.close() os.chmod( logRunFile, gDefaultPerms ) def installComponent( componentType, system, component, extensions, componentModule = '', checkModule = True ): """ Install runit directory for the specified component """ # Check if the component is already installed runitCompDir = os.path.join( runitDir, system, component ) if os.path.exists( runitCompDir ): msg = "%s %s_%s already installed" % ( componentType, system, component ) gLogger.notice( msg ) return S_OK( runitCompDir ) # Check that the software for the component is installed # Any "Load" or "Module" option in the configuration defining what modules the given "component" # needs to load will be taken care of by checkComponentModule. if checkModule: cModule = componentModule if not cModule: cModule = component result = checkComponentModule( componentType, system, cModule ) if not result['OK']: if not checkComponentSoftware( componentType, system, cModule, extensions )['OK'] and componentType != 'executor': error = 'Software for %s %s/%s is not installed' % ( componentType, system, component ) if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) gLogger.notice( 'Installing %s %s/%s' % ( componentType, system, component ) ) # Retrieve bash variables to be set result = gConfig.getOption( 'DIRAC/Setups/%s/%s' % ( CSGlobals.getSetup(), system ) ) if not result[ 'OK' ]: return result instance = result[ 'Value' ] specialOptions = {} if componentModule: specialOptions['Module'] = componentModule result = getComponentCfg( componentType, system, component, instance, extensions, specialOptions = specialOptions ) if not result[ 'OK' ]: return result compCfg = result[ 'Value' ] result = _getSectionName( componentType ) if not result[ 'OK' ]: return result section = result[ 'Value' ] bashVars = '' if compCfg.isSection( 'Systems/%s/%s/%s/%s/Environment' % ( system, instance, section, component ) ): dictionary = compCfg.getAsDict() bashSection = dictionary[ 'Systems' ][ system ][ instance ][ section ][ component ][ 'BashVariables' ] for var in bashSection: bashVars = '%s\nexport %s=%s' % ( bashVars, var, bashSection[ var ] ) # Now do the actual installation try: componentCfg = os.path.join( linkedRootPath, 'etc', '%s_%s.cfg' % ( system, component ) ) if not os.path.exists( componentCfg ): fd = open( componentCfg, 'w' ) fd.close() _createRunitLog( runitCompDir ) runFile = os.path.join( runitCompDir, 'run' ) fd = open( runFile, 'w' ) fd.write( """#!/bin/bash rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec 2>&1 # [ "%(componentType)s" = "agent" ] && renice 20 -p $$ #%(bashVariables)s # exec python $DIRAC/DIRAC/Core/scripts/dirac-%(componentType)s.py %(system)s/%(component)s %(componentCfg)s < /dev/null """ % {'bashrc': os.path.join( instancePath, 'bashrc' ), 'bashVariables': bashVars, 'componentType': componentType, 'system' : system, 'component': component, 'componentCfg': componentCfg } ) fd.close() os.chmod( runFile, gDefaultPerms ) if componentType.lower() == 'agent': stopFile = os.path.join( runitCompDir, 'control', 't' ) fd = open( stopFile, 'w' ) fd.write( """#!/bin/bash echo %(controlDir)s/%(system)s/%(component)s/stop_agent touch %(controlDir)s/%(system)s/%(component)s/stop_agent """ % {'controlDir': controlDir, 'system' : system, 'component': component } ) fd.close() os.chmod( stopFile, gDefaultPerms ) except Exception: error = 'Failed to prepare setup for %s %s/%s' % ( componentType, system, component ) gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 5, [runFile] ) gLogger.notice( result['Value'][1] ) return S_OK( runitCompDir ) def setupComponent( componentType, system, component, extensions, componentModule = '', checkModule = True, monitorFlag = True ): """ Install and create link in startup """ result = installComponent( componentType, system, component, extensions, componentModule, checkModule ) if not result['OK']: return result # Create the startup entry now runitCompDir = result['Value'] startCompDir = os.path.join( startDir, '%s_%s' % ( system, component ) ) if not os.path.exists( startDir ): os.makedirs( startDir ) if not os.path.lexists( startCompDir ): gLogger.notice( 'Creating startup link at', startCompDir ) os.symlink( runitCompDir, startCompDir ) time.sleep( 10 ) # Check the runsv status start = time.time() while ( time.time() - 20 ) < start: result = getStartupComponentStatus( [ ( system, component )] ) if not result['OK']: continue if result['Value'] and result['Value']['%s_%s' % ( system, component )]['RunitStatus'] == "Run": break time.sleep( 1 ) # Final check result = getStartupComponentStatus( [( system, component )] ) if not result['OK']: return S_ERROR( 'Failed to start the component %s_%s' % ( system, component ) ) resDict = {} resDict['ComponentType'] = componentType resDict['RunitStatus'] = result['Value']['%s_%s' % ( system, component )]['RunitStatus'] return S_OK( resDict ) def unsetupComponent( system, component ): """ Remove link from startup """ for startCompDir in glob.glob( os.path.join( startDir, '%s_%s' % ( system, component ) ) ): try: os.unlink( startCompDir ) except Exception: gLogger.exception() return S_OK() def uninstallComponent( system, component, removeLogs ): """ Remove startup and runit directories """ result = runsvctrlComponent( system, component, 'd' ) if not result['OK']: pass result = unsetupComponent( system, component ) if removeLogs: for runitCompDir in glob.glob( os.path.join( runitDir, system, component ) ): try: shutil.rmtree( runitCompDir ) except Exception: gLogger.exception() result = removeComponentOptionsFromCS( system, component ) if not result [ 'OK' ]: return result return S_OK() def installPortal(): """ Install runit directories for the Web Portal """ # Check that the software for the Web Portal is installed error = '' webDir = os.path.join( linkedRootPath, 'Web' ) if not os.path.exists( webDir ): error = 'Web extension not installed at %s' % webDir if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # First the lighthttpd server # Check if the component is already installed runitHttpdDir = os.path.join( runitDir, 'Web', 'httpd' ) runitPasterDir = os.path.join( runitDir, 'Web', 'paster' ) if os.path.exists( runitHttpdDir ): msg = "lighthttpd already installed" gLogger.notice( msg ) else: gLogger.notice( 'Installing Lighttpd' ) # Now do the actual installation try: _createRunitLog( runitHttpdDir ) runFile = os.path.join( runitHttpdDir, 'run' ) fd = open( runFile, 'w' ) fd.write( """#!/bin/bash rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec 2>&1 # exec lighttpdSvc.sh < /dev/null """ % {'bashrc': os.path.join( instancePath, 'bashrc' ), } ) fd.close() os.chmod( runFile, gDefaultPerms ) except Exception: error = 'Failed to prepare setup for lighttpd' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 5, [runFile] ) gLogger.notice( result['Value'][1] ) # Second the Web portal # Check if the component is already installed if os.path.exists( runitPasterDir ): msg = "Web Portal already installed" gLogger.notice( msg ) else: gLogger.notice( 'Installing Web Portal' ) # Now do the actual installation try: _createRunitLog( runitPasterDir ) runFile = os.path.join( runitPasterDir, 'run' ) fd = open( runFile, 'w' ) fd.write( """#!/bin/bash rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec 2>&1 # cd %(DIRAC)s/Web exec paster serve --reload production.ini < /dev/null """ % {'bashrc': os.path.join( instancePath, 'bashrc' ), 'DIRAC': linkedRootPath} ) fd.close() os.chmod( runFile, gDefaultPerms ) except Exception: error = 'Failed to prepare setup for Web Portal' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 5, [runFile] ) gLogger.notice( result['Value'][1] ) return S_OK( [runitHttpdDir, runitPasterDir] ) def setupPortal(): """ Install and create link in startup """ result = installPortal() if not result['OK']: return result # Create the startup entries now runitCompDir = result['Value'] startCompDir = [ os.path.join( startDir, 'Web_httpd' ), os.path.join( startDir, 'Web_paster' ) ] if not os.path.exists( startDir ): os.makedirs( startDir ) for i in range( 2 ): if not os.path.lexists( startCompDir[i] ): gLogger.notice( 'Creating startup link at', startCompDir[i] ) os.symlink( runitCompDir[i], startCompDir[i] ) time.sleep( 1 ) time.sleep( 5 ) # Check the runsv status start = time.time() while ( time.time() - 10 ) < start: result = getStartupComponentStatus( [ ( 'Web', 'httpd' ), ( 'Web', 'paster' ) ] ) if not result['OK']: return S_ERROR( 'Failed to start the Portal' ) if result['Value'] and \ result['Value']['%s_%s' % ( 'Web', 'httpd' )]['RunitStatus'] == "Run" and \ result['Value']['%s_%s' % ( 'Web', 'paster' )]['RunitStatus'] == "Run" : break time.sleep( 1 ) # Final check return getStartupComponentStatus( [ ( 'Web', 'httpd' ), ( 'Web', 'paster' ) ] ) def setupNewPortal(): """ Install and create link in startup """ result = installNewPortal() if not result['OK']: return result # Create the startup entries now runitCompDir = result['Value'] startCompDir = os.path.join( startDir, 'Web_WebApp' ) if not os.path.exists( startDir ): os.makedirs( startDir ) if not os.path.lexists( startCompDir ): gLogger.notice( 'Creating startup link at', startCompDir ) os.symlink( runitCompDir, startCompDir ) time.sleep( 5 ) # Check the runsv status start = time.time() while ( time.time() - 10 ) < start: result = getStartupComponentStatus( [( 'Web', 'WebApp' )] ) if not result['OK']: return S_ERROR( 'Failed to start the Portal' ) if result['Value'] and \ result['Value']['%s_%s' % ( 'Web', 'WebApp' )]['RunitStatus'] == "Run": break time.sleep( 1 ) # Final check return getStartupComponentStatus( [ ('Web', 'WebApp') ] ) def installNewPortal(): """ Install runit directories for the Web Portal """ result = execCommand( False, ["pip", "install", "tornado"] ) if not result['OK']: error = "Tornado can not be installed:%s" % result['Value'] gLogger.error( error ) DIRAC.exit(-1) return error else: gLogger.notice("Tornado is installed successfully!") # Check that the software for the Web Portal is installed error = '' webDir = os.path.join( linkedRootPath, 'WebAppDIRAC' ) if not os.path.exists( webDir ): error = 'WebApp extension not installed at %s' % webDir if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) #compile the JS code prodMode = "" webappCompileScript = os.path.join( linkedRootPath, "WebAppDIRAC/scripts", "dirac-webapp-compile.py" ) if os.path.isfile( webappCompileScript ): os.chmod( webappCompileScript , gDefaultPerms ) gLogger.notice( "Executing %s..." % webappCompileScript ) if os.system( "python '%s' > '%s.out' 2> '%s.err'" % ( webappCompileScript, webappCompileScript, webappCompileScript ) ): gLogger.error( "Compile script %s failed. Check %s.err" % ( webappCompileScript, webappCompileScript ) ) else: prodMode = "-p" # Check if the component is already installed runitWebAppDir = os.path.join( runitDir, 'Web', 'WebApp' ) # Check if the component is already installed if os.path.exists( runitWebAppDir ): msg = "Web Portal already installed" gLogger.notice( msg ) else: gLogger.notice( 'Installing Web Portal' ) # Now do the actual installation try: _createRunitLog( runitWebAppDir ) runFile = os.path.join( runitWebAppDir, 'run' ) fd = open( runFile, 'w' ) fd.write( """#!/bin/bash rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec 2>&1 # exec python %(DIRAC)s/WebAppDIRAC/scripts/dirac-webapp-run.py %(prodMode)s < /dev/null """ % {'bashrc': os.path.join( instancePath, 'bashrc' ), 'DIRAC': linkedRootPath, 'prodMode':prodMode} ) fd.close() os.chmod( runFile, gDefaultPerms ) except Exception: error = 'Failed to prepare setup for Web Portal' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 5, [runFile] ) gLogger.notice( result['Value'][1] ) return S_OK( runitWebAppDir ) def fixMySQLScripts( startupScript = mysqlStartupScript ): """ Edit MySQL scripts to point to desired locations for db and my.cnf """ gLogger.verbose( 'Updating:', startupScript ) try: fd = open( startupScript, 'r' ) orgLines = fd.readlines() fd.close() fd = open( startupScript, 'w' ) for line in orgLines: if line.find( 'export HOME' ) == 0: continue if line.find( 'datadir=' ) == 0: line = 'datadir=%s\n' % mysqlDbDir gLogger.debug( line ) line += 'export HOME=%s\n' % mysqlDir if line.find( 'basedir=' ) == 0: platform = getPlatformString() line = 'basedir=%s\n' % os.path.join( rootPath, platform ) if line.find( 'extra_args=' ) == 0: line = 'extra_args="-n"\n' if line.find( '$bindir/mysqld_safe --' ) >= 0 and not ' --defaults-file' in line: line = line.replace( 'mysqld_safe', 'mysqld_safe --defaults-file=$HOME/.my.cnf' ) fd.write( line ) fd.close() except Exception: error = 'Failed to Update MySQL startup script' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) return S_OK() def mysqlInstalled( doNotExit = False ): """ Check if MySQL is already installed """ if os.path.exists( mysqlDbDir ) or os.path.exists( mysqlLogDir ): return S_OK() if doNotExit: return S_ERROR() error = 'MySQL not properly Installed' gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) def getMySQLPasswords(): """ Get MySQL passwords from local configuration or prompt """ import getpass global mysqlRootPwd, mysqlPassword if not mysqlRootPwd: mysqlRootPwd = getpass.getpass( 'MySQL root password: ' ) if not mysqlPassword: # Take it if it is already defined mysqlPassword = localCfg.getOption( '/Systems/Databases/Password', '' ) if not mysqlPassword: mysqlPassword = getpass.getpass( 'MySQL Dirac password: ' ) return S_OK() def setMySQLPasswords( root = '', dirac = '' ): """ Set MySQL passwords """ global mysqlRootPwd, mysqlPassword if root: mysqlRootPwd = root if dirac: mysqlPassword = dirac return S_OK() def startMySQL(): """ Start MySQL server """ result = mysqlInstalled() if not result['OK']: return result return execCommand( 0, [mysqlStartupScript, 'start'] ) def stopMySQL(): """ Stop MySQL server """ result = mysqlInstalled() if not result['OK']: return result return execCommand( 0, [mysqlStartupScript, 'stop'] ) def installMySQL(): """ Attempt an installation of MySQL mode: -Master -Slave -None """ fixMySQLScripts() if mysqlInstalled( doNotExit = True )['OK']: gLogger.notice( 'MySQL already installed' ) return S_OK() if mysqlMode.lower() not in [ '', 'master', 'slave' ]: error = 'Unknown MySQL server Mode' if exitOnError: gLogger.fatal( error, mysqlMode ) DIRAC.exit( -1 ) gLogger.error( error, mysqlMode ) return S_ERROR( error ) if mysqlHost: gLogger.notice( 'Installing MySQL server at', mysqlHost ) if mysqlMode: gLogger.notice( 'This is a MySQl %s server' % mysqlMode ) try: os.makedirs( mysqlDbDir ) os.makedirs( mysqlLogDir ) except Exception: error = 'Can not create MySQL dirs' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) try: fd = open( mysqlMyOrg, 'r' ) myOrg = fd.readlines() fd.close() fd = open( mysqlMyCnf, 'w' ) for line in myOrg: if line.find( '[mysqld]' ) == 0: line += '\n'.join( [ 'innodb_file_per_table', '' ] ) elif line.find( 'innodb_log_arch_dir' ) == 0: line = '' elif line.find( 'innodb_data_file_path' ) == 0: line = line.replace( '2000M', '200M' ) elif line.find( 'server-id' ) == 0 and mysqlMode.lower() == 'master': # MySQL Configuration for Master Server line = '\n'.join( ['server-id = 1', '# DIRAC Master-Server', 'sync-binlog = 1', 'replicate-ignore-table = mysql.MonitorData', '# replicate-ignore-db=db_name', 'log-bin = mysql-bin', 'log-slave-updates', '' ] ) elif line.find( 'server-id' ) == 0 and mysqlMode.lower() == 'slave': # MySQL Configuration for Slave Server line = '\n'.join( ['server-id = %s' % int( time.time() ), '# DIRAC Slave-Server', 'sync-binlog = 1', 'replicate-ignore-table = mysql.MonitorData', '# replicate-ignore-db=db_name', 'log-bin = mysql-bin', 'log-slave-updates', '' ] ) elif line.find( '/opt/dirac/mysql' ) > -1: line = line.replace( '/opt/dirac/mysql', mysqlDir ) if mysqlSmallMem: if line.find( 'innodb_buffer_pool_size' ) == 0: line = 'innodb_buffer_pool_size = 200M\n' elif mysqlLargeMem: if line.find( 'innodb_buffer_pool_size' ) == 0: line = 'innodb_buffer_pool_size = 10G\n' fd.write( line ) fd.close() except Exception: error = 'Can not create my.cnf' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) gLogger.notice( 'Initializing MySQL...' ) result = execCommand( 0, ['mysql_install_db', '--defaults-file=%s' % mysqlMyCnf, '--datadir=%s' % mysqlDbDir ] ) if not result['OK']: return result gLogger.notice( 'Starting MySQL...' ) result = startMySQL() if not result['OK']: return result gLogger.notice( 'Setting MySQL root password' ) result = execCommand( 0, ['mysqladmin', '-u', mysqlRootUser, 'password', mysqlRootPwd] ) if not result['OK']: return result # MySQL tends to define root@host user rather than root@host.domain hostName = mysqlHost.split('.')[0] result = execMySQL( "UPDATE user SET Host='%s' WHERE Host='%s'" % ( mysqlHost, hostName ), localhost=True ) if not result['OK']: return result result = execMySQL( "FLUSH PRIVILEGES" ) if not result['OK']: return result if mysqlHost and socket.gethostbyname( mysqlHost ) != '127.0.0.1' : result = execCommand( 0, ['mysqladmin', '-u', mysqlRootUser, '-h', mysqlHost, 'password', mysqlRootPwd] ) if not result['OK']: return result result = execMySQL( "DELETE from user WHERE Password=''", localhost=True ) if not _addMySQLToDiracCfg(): return S_ERROR( 'Failed to add MySQL user password to local configuration' ) return S_OK() def getMySQLStatus(): """ Get the status of the MySQL database installation """ result = execCommand( 0, ['mysqladmin', 'status' ] ) if not result['OK']: return result output = result['Value'][1] _d1, uptime, nthreads, nquestions, nslow, nopens, nflash, nopen, nqpersec = output.split( ':' ) resDict = {} resDict['UpTime'] = uptime.strip().split()[0] resDict['NumberOfThreads'] = nthreads.strip().split()[0] resDict['NumberOfQuestions'] = nquestions.strip().split()[0] resDict['NumberOfSlowQueries'] = nslow.strip().split()[0] resDict['NumberOfOpens'] = nopens.strip().split()[0] resDict['OpenTables'] = nopen.strip().split()[0] resDict['FlushTables'] = nflash.strip().split()[0] resDict['QueriesPerSecond'] = nqpersec.strip().split()[0] return S_OK( resDict ) def getAvailableDatabases( extensions ): dbDict = {} for extension in extensions + ['']: databases = glob.glob( os.path.join( rootPath, ('%sDIRAC' % extension).replace( 'DIRACDIRAC', 'DIRAC' ), '*', 'DB', '*.sql' ) ) for dbPath in databases: dbName = os.path.basename( dbPath ).replace( '.sql', '' ) dbDict[dbName] = {} dbDict[dbName]['Extension'] = extension dbDict[dbName]['System'] = dbPath.split( '/' )[-3].replace( 'System', '' ) return S_OK( dbDict ) def getDatabases(): """ Get the list of installed databases """ result = execMySQL( 'SHOW DATABASES' ) if not result['OK']: return result dbList = [] for dbName in result['Value']: if not dbName[0] in ['Database', 'information_schema', 'mysql', 'test']: dbList.append( dbName[0] ) return S_OK( dbList ) def installDatabase( dbName, monitorFlag = True ): """ Install requested DB in MySQL server """ global mysqlRootPwd, mysqlPassword # Create entry in the static monitoring DB result = getAvailableDatabases( CSGlobals.getCSExtensions() ) if not result[ 'OK' ]: return result dbSystem = result[ 'Value' ][ dbName ][ 'System' ] if not mysqlRootPwd: rootPwdPath = cfgInstallPath( 'Database', 'RootPwd' ) return S_ERROR( 'Missing %s in %s' % ( rootPwdPath, cfgFile ) ) if not mysqlPassword: mysqlPassword = localCfg.getOption( cfgPath( 'Systems', 'Databases', 'Password' ), mysqlPassword ) if not mysqlPassword: mysqlPwdPath = cfgPath( 'Systems', 'Databases', 'Password' ) return S_ERROR( 'Missing %s in %s' % ( mysqlPwdPath, cfgFile ) ) gLogger.notice( 'Installing', dbName ) dbFile = glob.glob( os.path.join( rootPath, '*', '*', 'DB', '%s.sql' % dbName ) ) if not dbFile: error = 'Database %s not found' % dbName gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) dbFile = dbFile[0] # just check result = execMySQL( 'SHOW STATUS' ) if not result['OK']: error = 'Could not connect to MySQL server' gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) # now creating the Database result = execMySQL( 'CREATE DATABASE `%s`' % dbName ) if not result['OK']: gLogger.error( 'Failed to create databases', result['Message'] ) if exitOnError: DIRAC.exit( -1 ) return result perms = "SELECT,INSERT,LOCK TABLES,UPDATE,DELETE,CREATE,DROP,ALTER," \ "CREATE VIEW,SHOW VIEW,INDEX,TRIGGER,ALTER ROUTINE,CREATE ROUTINE" for cmd in ["GRANT %s ON `%s`.* TO '%s'@'localhost' IDENTIFIED BY '%s'" % ( perms, dbName, mysqlUser, mysqlPassword ), "GRANT %s ON `%s`.* TO '%s'@'%s' IDENTIFIED BY '%s'" % ( perms, dbName, mysqlUser, mysqlHost, mysqlPassword ), "GRANT %s ON `%s`.* TO '%s'@'%%' IDENTIFIED BY '%s'" % ( perms, dbName, mysqlUser, mysqlPassword ) ]: result = execMySQL( cmd ) if not result['OK']: error = "Error executing '%s'" % cmd gLogger.error( error, result['Message'] ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execMySQL( 'FLUSH PRIVILEGES' ) if not result['OK']: gLogger.error( 'Failed to flush provileges', result['Message'] ) if exitOnError: exit( -1 ) return result # first getting the lines to be executed, and then execute them try: cmdLines = _createMySQLCMDLines( dbFile ) # We need to run one SQL cmd at once, mysql is much happier that way. # Create a string of commands, ignoring comment lines sqlString = '\n'.join( x for x in cmdLines if not x.startswith( "--" ) ) # Now run each command (They are seperated by ;) # Ignore any empty ones cmds = [ x.strip() for x in sqlString.split( ";" ) if x.strip() ] for cmd in cmds: result = execMySQL( cmd, dbName ) if not result['OK']: error = 'Failed to initialize Database' gLogger.notice( cmd ) gLogger.error( error, result['Message'] ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) except Exception, e: gLogger.error( str( e ) ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) return S_OK( dbFile.split( '/' )[-4:-2] ) def uninstallDatabase( gConfig, dbName ): """ Remove a database from DIRAC """ result = getAvailableDatabases( CSGlobals.getCSExtensions() ) if not result[ 'OK' ]: return result dbSystem = result[ 'Value' ][ dbName ][ 'System' ] result = removeDatabaseOptionsFromCS( gConfig, dbSystem, dbName ) if not result [ 'OK' ]: return result return S_OK( 'DB successfully uninstalled' ) def _createMySQLCMDLines( dbFile ): """ Creates a list of MYSQL commands to be executed, inspecting the dbFile(s) """ cmdLines = [] fd = open( dbFile ) dbLines = fd.readlines() fd.close() for line in dbLines: # Should we first source an SQL file (is this sql file an extension)? if line.lower().startswith('source'): sourcedDBbFileName = line.split( ' ' )[1].replace( '\n', '' ) gLogger.info( "Found file to source: %s" % sourcedDBbFileName ) sourcedDBbFile = os.path.join( rootPath, sourcedDBbFileName ) fdSourced = open( sourcedDBbFile ) dbLinesSourced = fdSourced.readlines() fdSourced.close() for lineSourced in dbLinesSourced: if lineSourced.strip(): cmdLines.append( lineSourced.strip() ) # Creating/adding cmdLines else: if line.strip(): cmdLines.append( line.strip() ) return cmdLines def execMySQL( cmd, dbName = 'mysql', localhost=False ): """ Execute MySQL Command """ global db from DIRAC.Core.Utilities.MySQL import MySQL if not mysqlRootPwd: return S_ERROR( 'MySQL root password is not defined' ) if dbName not in db: dbHost = mysqlHost if localhost: dbHost = 'localhost' db[dbName] = MySQL( dbHost, mysqlRootUser, mysqlRootPwd, dbName, mysqlPort ) if not db[dbName]._connected: error = 'Could not connect to MySQL server' gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) return db[dbName]._query( cmd ) def _addMySQLToDiracCfg(): """ Add the database access info to the local configuration """ if not mysqlPassword: return S_ERROR( 'Missing %s in %s' % ( cfgInstallPath( 'Database', 'Password' ), cfgFile ) ) sectionPath = cfgPath( 'Systems', 'Databases' ) cfg = __getCfg( sectionPath, 'User', mysqlUser ) cfg.setOption( cfgPath( sectionPath, 'Password' ), mysqlPassword ) return _addCfgToDiracCfg( cfg ) def configureCE( ceName = '', ceType = '', cfg = None, currentSectionPath = '' ): """ Produce new dirac.cfg including configuration for new CE """ from DIRAC.Resources.Computing.ComputingElementFactory import ComputingElementFactory from DIRAC import gConfig cesCfg = ResourcesDefaults.getComputingElementDefaults( ceName, ceType, cfg, currentSectionPath ) ceNameList = cesCfg.listSections() if not ceNameList: error = 'No CE Name provided' gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) for ceName in ceNameList: if 'CEType' not in cesCfg[ceName]: error = 'Missing Type for CE "%s"' % ceName gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) localsiteCfg = localCfg['LocalSite'] # Replace Configuration under LocalSite with new Configuration for ceName in ceNameList: if localsiteCfg.existsKey( ceName ): gLogger.notice( ' Removing existing CE:', ceName ) localsiteCfg.deleteKey( ceName ) gLogger.notice( 'Configuring CE:', ceName ) localsiteCfg.createNewSection( ceName, contents = cesCfg[ceName] ) # Apply configuration and try to instantiate the CEs gConfig.loadCFG( localCfg ) for ceName in ceNameList: ceFactory = ComputingElementFactory() try: ceInstance = ceFactory.getCE( ceType, ceName ) except Exception: error = 'Fail to instantiate CE' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) if not ceInstance['OK']: error = 'Fail to instantiate CE: %s' % ceInstance['Message'] gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) # Everything is OK, we can save the new cfg localCfg.writeToFile( cfgFile ) gLogger.always( 'LocalSite section in %s has been uptdated with new configuration:' % os.path.basename( cfgFile ) ) gLogger.always( str( localCfg['LocalSite'] ) ) return S_OK( ceNameList ) def configureLocalDirector( ceNameList = '' ): """ Install a Local DIRAC TaskQueueDirector, basically write the proper configuration file """ if ceNameList: result = setupComponent( 'agent', 'WorkloadManagement', 'TaskQueueDirector', [] ) if not result['OK']: return result result = MonitoringUtilities.monitorInstallation( 'agent', 'WorkloadManagement', 'TaskQueueDirector' ) if not result[ 'OK' ]: return result # Now write a local Configuration for the Director directorCfg = CFG() directorCfg.addKey( 'SubmitPools', 'DIRAC', 'Added by InstallTools' ) directorCfg.addKey( 'DefaultSubmitPools', 'DIRAC', 'Added by InstallTools' ) directorCfg.addKey( 'ComputingElements', ', '.join( ceNameList ), 'Added by InstallTools' ) result = addCfgToComponentCfg( 'agent', 'WorkloadManagement', 'TaskQueueDirector', directorCfg ) if not result['OK']: return result return runsvctrlComponent( 'WorkloadManagement', 'TaskQueueDirector', 't' ) def execCommand( timeout, cmd ): """ Execute command tuple and handle Error cases """ result = systemCall( timeout, cmd ) if not result['OK']: if timeout and result['Message'].find( 'Timeout' ) == 0: return result gLogger.error( 'Failed to execute', '%s: %s' % ( cmd[0], result['Message'] ) ) if exitOnError: DIRAC.exit( -1 ) return result if result['Value'][0]: error = 'Failed to execute' gLogger.error( error, cmd[0] ) gLogger.error( 'Exit code:' , ( '%s\n' % result['Value'][0] ) + '\n'.join( result['Value'][1:] ) ) if exitOnError: DIRAC.exit( -1 ) error = S_ERROR( error ) error['Value'] = result['Value'] return error gLogger.verbose( result['Value'][1] ) return result
vmendez/DIRAC
Core/Utilities/InstallTools.py
Python
gpl-3.0
97,592
[ "DIRAC" ]
0006e62952783f8f2d5851554feefb0b0ad0850e778aab1f7b4e00ced141b020
# -*- coding: utf-8 -*- """ jinja2.compiler ~~~~~~~~~~~~~~~ Compiles nodes into python code. :copyright: (c) 2009 by the Jinja Team. :license: BSD. """ from cStringIO import StringIO from itertools import chain from copy import deepcopy from jinja2 import nodes from jinja2.visitor import NodeVisitor, NodeTransformer from jinja2.exceptions import TemplateAssertionError from jinja2.utils import Markup, concat, escape, is_python_keyword, next operators = { 'eq': '==', 'ne': '!=', 'gt': '>', 'gteq': '>=', 'lt': '<', 'lteq': '<=', 'in': 'in', 'notin': 'not in' } try: exec '(0 if 0 else 0)' except SyntaxError: have_condexpr = False else: have_condexpr = True def generate(node, environment, name, filename, stream=None): """Generate the python source for a node tree.""" if not isinstance(node, nodes.Template): raise TypeError('Can\'t compile non template nodes') generator = CodeGenerator(environment, name, filename, stream) generator.visit(node) if stream is None: return generator.stream.getvalue() def has_safe_repr(value): """Does the node have a safe representation?""" if value is None or value is NotImplemented or value is Ellipsis: return True if isinstance(value, (bool, int, long, float, complex, basestring, xrange, Markup)): return True if isinstance(value, (tuple, list, set, frozenset)): for item in value: if not has_safe_repr(item): return False return True elif isinstance(value, dict): for key, value in value.iteritems(): if not has_safe_repr(key): return False if not has_safe_repr(value): return False return True return False def find_undeclared(nodes, names): """Check if the names passed are accessed undeclared. The return value is a set of all the undeclared names from the sequence of names found. """ visitor = UndeclaredNameVisitor(names) try: for node in nodes: visitor.visit(node) except VisitorExit: pass return visitor.undeclared class Identifiers(object): """Tracks the status of identifiers in frames.""" def __init__(self): # variables that are known to be declared (probably from outer # frames or because they are special for the frame) self.declared = set() # undeclared variables from outer scopes self.outer_undeclared = set() # names that are accessed without being explicitly declared by # this one or any of the outer scopes. Names can appear both in # declared and undeclared. self.undeclared = set() # names that are declared locally self.declared_locally = set() # names that are declared by parameters self.declared_parameter = set() def add_special(self, name): """Register a special name like `loop`.""" self.undeclared.discard(name) self.declared.add(name) def is_declared(self, name, local_only=False): """Check if a name is declared in this or an outer scope.""" if name in self.declared_locally or name in self.declared_parameter: return True if local_only: return False return name in self.declared def copy(self): return deepcopy(self) class Frame(object): """Holds compile time information for us.""" def __init__(self, parent=None): self.identifiers = Identifiers() # a toplevel frame is the root + soft frames such as if conditions. self.toplevel = False # the root frame is basically just the outermost frame, so no if # conditions. This information is used to optimize inheritance # situations. self.rootlevel = False # in some dynamic inheritance situations the compiler needs to add # write tests around output statements. self.require_output_check = parent and parent.require_output_check # inside some tags we are using a buffer rather than yield statements. # this for example affects {% filter %} or {% macro %}. If a frame # is buffered this variable points to the name of the list used as # buffer. self.buffer = None # the name of the block we're in, otherwise None. self.block = parent and parent.block or None # a set of actually assigned names self.assigned_names = set() # the parent of this frame self.parent = parent if parent is not None: self.identifiers.declared.update( parent.identifiers.declared | parent.identifiers.declared_parameter | parent.assigned_names ) self.identifiers.outer_undeclared.update( parent.identifiers.undeclared - self.identifiers.declared ) self.buffer = parent.buffer def copy(self): """Create a copy of the current one.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.identifiers = object.__new__(self.identifiers.__class__) rv.identifiers.__dict__.update(self.identifiers.__dict__) return rv def inspect(self, nodes, hard_scope=False): """Walk the node and check for identifiers. If the scope is hard (eg: enforce on a python level) overrides from outer scopes are tracked differently. """ visitor = FrameIdentifierVisitor(self.identifiers, hard_scope) for node in nodes: visitor.visit(node) def find_shadowed(self, extra=()): """Find all the shadowed names. extra is an iterable of variables that may be defined with `add_special` which may occour scoped. """ i = self.identifiers return (i.declared | i.outer_undeclared) & \ (i.declared_locally | i.declared_parameter) | \ set(x for x in extra if i.is_declared(x)) def inner(self): """Return an inner frame.""" return Frame(self) def soft(self): """Return a soft frame. A soft frame may not be modified as standalone thing as it shares the resources with the frame it was created of, but it's not a rootlevel frame any longer. """ rv = self.copy() rv.rootlevel = False return rv __copy__ = copy class VisitorExit(RuntimeError): """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" class DependencyFinderVisitor(NodeVisitor): """A visitor that collects filter and test calls.""" def __init__(self): self.filters = set() self.tests = set() def visit_Filter(self, node): self.generic_visit(node) self.filters.add(node.name) def visit_Test(self, node): self.generic_visit(node) self.tests.add(node.name) def visit_Block(self, node): """Stop visiting at blocks.""" class UndeclaredNameVisitor(NodeVisitor): """A visitor that checks if a name is accessed without being declared. This is different from the frame visitor as it will not stop at closure frames. """ def __init__(self, names): self.names = set(names) self.undeclared = set() def visit_Name(self, node): if node.ctx == 'load' and node.name in self.names: self.undeclared.add(node.name) if self.undeclared == self.names: raise VisitorExit() else: self.names.discard(node.name) def visit_Block(self, node): """Stop visiting a blocks.""" class FrameIdentifierVisitor(NodeVisitor): """A visitor for `Frame.inspect`.""" def __init__(self, identifiers, hard_scope): self.identifiers = identifiers self.hard_scope = hard_scope def visit_Name(self, node): """All assignments to names go through this function.""" if node.ctx == 'store': self.identifiers.declared_locally.add(node.name) elif node.ctx == 'param': self.identifiers.declared_parameter.add(node.name) elif node.ctx == 'load' and not \ self.identifiers.is_declared(node.name, self.hard_scope): self.identifiers.undeclared.add(node.name) def visit_If(self, node): self.visit(node.test) real_identifiers = self.identifiers old_names = real_identifiers.declared | \ real_identifiers.declared_locally | \ real_identifiers.declared_parameter def inner_visit(nodes): if not nodes: return set() self.identifiers = real_identifiers.copy() for subnode in nodes: self.visit(subnode) rv = self.identifiers.declared_locally - old_names # we have to remember the undeclared variables of this branch # because we will have to pull them. real_identifiers.undeclared.update(self.identifiers.undeclared) self.identifiers = real_identifiers return rv body = inner_visit(node.body) else_ = inner_visit(node.else_ or ()) # the differences between the two branches are also pulled as # undeclared variables real_identifiers.undeclared.update(body.symmetric_difference(else_)) # remember those that are declared. real_identifiers.declared_locally.update(body | else_) def visit_Macro(self, node): self.identifiers.declared_locally.add(node.name) def visit_Import(self, node): self.generic_visit(node) self.identifiers.declared_locally.add(node.target) def visit_FromImport(self, node): self.generic_visit(node) for name in node.names: if isinstance(name, tuple): self.identifiers.declared_locally.add(name[1]) else: self.identifiers.declared_locally.add(name) def visit_Assign(self, node): """Visit assignments in the correct order.""" self.visit(node.node) self.visit(node.target) def visit_For(self, node): """Visiting stops at for blocks. However the block sequence is visited as part of the outer scope. """ self.visit(node.iter) def visit_CallBlock(self, node): for child in node.iter_child_nodes(exclude=('body',)): self.visit(child) def visit_FilterBlock(self, node): self.visit(node.filter) def visit_Scope(self, node): """Stop visiting at scopes.""" def visit_Block(self, node): """Stop visiting at blocks.""" class CompilerExit(Exception): """Raised if the compiler encountered a situation where it just doesn't make sense to further process the code. Any block that raises such an exception is not further processed. """ class CodeGenerator(NodeVisitor): def __init__(self, environment, name, filename, stream=None): if stream is None: stream = StringIO() self.environment = environment self.name = name self.filename = filename self.stream = stream self.created_block_context = False # aliases for imports self.import_aliases = {} # a registry for all blocks. Because blocks are moved out # into the global python scope they are registered here self.blocks = {} # the number of extends statements so far self.extends_so_far = 0 # some templates have a rootlevel extends. In this case we # can safely assume that we're a child template and do some # more optimizations. self.has_known_extends = False # the current line number self.code_lineno = 1 # registry of all filters and tests (global, not block local) self.tests = {} self.filters = {} # the debug information self.debug_info = [] self._write_debug_info = None # the number of new lines before the next write() self._new_lines = 0 # the line number of the last written statement self._last_line = 0 # true if nothing was written so far. self._first_write = True # used by the `temporary_identifier` method to get new # unique, temporary identifier self._last_identifier = 0 # the current indentation self._indentation = 0 # -- Various compilation helpers def fail(self, msg, lineno): """Fail with a `TemplateAssertionError`.""" raise TemplateAssertionError(msg, lineno, self.name, self.filename) def temporary_identifier(self): """Get a new unique identifier.""" self._last_identifier += 1 return 't_%d' % self._last_identifier def buffer(self, frame): """Enable buffering for the frame from that point onwards.""" frame.buffer = self.temporary_identifier() self.writeline('%s = []' % frame.buffer) def return_buffer_contents(self, frame): """Return the buffer contents of the frame.""" if self.environment.autoescape: self.writeline('return Markup(concat(%s))' % frame.buffer) else: self.writeline('return concat(%s)' % frame.buffer) def indent(self): """Indent by one.""" self._indentation += 1 def outdent(self, step=1): """Outdent by step.""" self._indentation -= step def start_write(self, frame, node=None): """Yield or write into the frame buffer.""" if frame.buffer is None: self.writeline('yield ', node) else: self.writeline('%s.append(' % frame.buffer, node) def end_write(self, frame): """End the writing process started by `start_write`.""" if frame.buffer is not None: self.write(')') def simple_write(self, s, frame, node=None): """Simple shortcut for start_write + write + end_write.""" self.start_write(frame, node) self.write(s) self.end_write(frame) def blockvisit(self, nodes, frame): """Visit a list of nodes as block in a frame. If the current frame is no buffer a dummy ``if 0: yield None`` is written automatically unless the force_generator parameter is set to False. """ if frame.buffer is None: self.writeline('if 0: yield None') else: self.writeline('pass') try: for node in nodes: self.visit(node, frame) except CompilerExit: pass def write(self, x): """Write a string into the output stream.""" if self._new_lines: if not self._first_write: self.stream.write('\n' * self._new_lines) self.code_lineno += self._new_lines if self._write_debug_info is not None: self.debug_info.append((self._write_debug_info, self.code_lineno)) self._write_debug_info = None self._first_write = False self.stream.write(' ' * self._indentation) self._new_lines = 0 self.stream.write(x) def writeline(self, x, node=None, extra=0): """Combination of newline and write.""" self.newline(node, extra) self.write(x) def newline(self, node=None, extra=0): """Add one or more newlines before the next write.""" self._new_lines = max(self._new_lines, 1 + extra) if node is not None and node.lineno != self._last_line: self._write_debug_info = node.lineno self._last_line = node.lineno def signature(self, node, frame, extra_kwargs=None): """Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax error could occour. The extra keyword arguments should be given as python dict. """ # if any of the given keyword arguments is a python keyword # we have to make sure that no invalid call is created. kwarg_workaround = False for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): if is_python_keyword(kwarg): kwarg_workaround = True break for arg in node.args: self.write(', ') self.visit(arg, frame) if not kwarg_workaround: for kwarg in node.kwargs: self.write(', ') self.visit(kwarg, frame) if extra_kwargs is not None: for key, value in extra_kwargs.iteritems(): self.write(', %s=%s' % (key, value)) if node.dyn_args: self.write(', *') self.visit(node.dyn_args, frame) if kwarg_workaround: if node.dyn_kwargs is not None: self.write(', **dict({') else: self.write(', **{') for kwarg in node.kwargs: self.write('%r: ' % kwarg.key) self.visit(kwarg.value, frame) self.write(', ') if extra_kwargs is not None: for key, value in extra_kwargs.iteritems(): self.write('%r: %s, ' % (key, value)) if node.dyn_kwargs is not None: self.write('}, **') self.visit(node.dyn_kwargs, frame) self.write(')') else: self.write('}') elif node.dyn_kwargs is not None: self.write(', **') self.visit(node.dyn_kwargs, frame) def pull_locals(self, frame): """Pull all the references identifiers into the local scope.""" for name in frame.identifiers.undeclared: self.writeline('l_%s = context.resolve(%r)' % (name, name)) def pull_dependencies(self, nodes): """Pull all the dependencies.""" visitor = DependencyFinderVisitor() for node in nodes: visitor.visit(node) for dependency in 'filters', 'tests': mapping = getattr(self, dependency) for name in getattr(visitor, dependency): if name not in mapping: mapping[name] = self.temporary_identifier() self.writeline('%s = environment.%s[%r]' % (mapping[name], dependency, name)) def unoptimize_scope(self, frame): """Disable Python optimizations for the frame.""" # XXX: this is not that nice but it has no real overhead. It # mainly works because python finds the locals before dead code # is removed. If that breaks we have to add a dummy function # that just accepts the arguments and does nothing. if frame.identifiers.declared: self.writeline('if 0: dummy(%s)' % ', '.join( 'l_' + name for name in frame.identifiers.declared)) def push_scope(self, frame, extra_vars=()): """This function returns all the shadowed variables in a dict in the form name: alias and will write the required assignments into the current scope. No indentation takes place. This also predefines locally declared variables from the loop body because under some circumstances it may be the case that `extra_vars` is passed to `Frame.find_shadowed`. """ aliases = {} for name in frame.find_shadowed(extra_vars): aliases[name] = ident = self.temporary_identifier() self.writeline('%s = l_%s' % (ident, name)) to_declare = set() for name in frame.identifiers.declared_locally: if name not in aliases: to_declare.add('l_' + name) if to_declare: self.writeline(' = '.join(to_declare) + ' = missing') return aliases def pop_scope(self, aliases, frame): """Restore all aliases and delete unused variables.""" for name, alias in aliases.iteritems(): self.writeline('l_%s = %s' % (name, alias)) to_delete = set() for name in frame.identifiers.declared_locally: if name not in aliases: to_delete.add('l_' + name) if to_delete: # we cannot use the del statement here because enclosed # scopes can trigger a SyntaxError: # a = 42; b = lambda: a; del a self.writeline(' = '.join(to_delete) + ' = missing') def function_scoping(self, node, frame, children=None, find_special=True): """In Jinja a few statements require the help of anonymous functions. Those are currently macros and call blocks and in the future also recursive loops. As there is currently technical limitation that doesn't allow reading and writing a variable in a scope where the initial value is coming from an outer scope, this function tries to fall back with a common error message. Additionally the frame passed is modified so that the argumetns are collected and callers are looked up. This will return the modified frame. """ # we have to iterate twice over it, make sure that works if children is None: children = node.iter_child_nodes() children = list(children) func_frame = frame.inner() func_frame.inspect(children, hard_scope=True) # variables that are undeclared (accessed before declaration) and # declared locally *and* part of an outside scope raise a template # assertion error. Reason: we can't generate reasonable code from # it without aliasing all the variables. # this could be fixed in Python 3 where we have the nonlocal # keyword or if we switch to bytecode generation overriden_closure_vars = ( func_frame.identifiers.undeclared & func_frame.identifiers.declared & (func_frame.identifiers.declared_locally | func_frame.identifiers.declared_parameter) ) if overriden_closure_vars: self.fail('It\'s not possible to set and access variables ' 'derived from an outer scope! (affects: %s)' % ', '.join(sorted(overriden_closure_vars)), node.lineno) # remove variables from a closure from the frame's undeclared # identifiers. func_frame.identifiers.undeclared -= ( func_frame.identifiers.undeclared & func_frame.identifiers.declared ) # no special variables for this scope, abort early if not find_special: return func_frame func_frame.accesses_kwargs = False func_frame.accesses_varargs = False func_frame.accesses_caller = False func_frame.arguments = args = ['l_' + x.name for x in node.args] undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs')) if 'caller' in undeclared: func_frame.accesses_caller = True func_frame.identifiers.add_special('caller') args.append('l_caller') if 'kwargs' in undeclared: func_frame.accesses_kwargs = True func_frame.identifiers.add_special('kwargs') args.append('l_kwargs') if 'varargs' in undeclared: func_frame.accesses_varargs = True func_frame.identifiers.add_special('varargs') args.append('l_varargs') return func_frame def macro_body(self, node, frame, children=None): """Dump the function def of a macro or call block.""" frame = self.function_scoping(node, frame, children) # macros are delayed, they never require output checks frame.require_output_check = False args = frame.arguments # XXX: this is an ugly fix for the loop nesting bug # (tests.test_old_bugs.test_loop_call_bug). This works around # a identifier nesting problem we have in general. It's just more # likely to happen in loops which is why we work around it. The # real solution would be "nonlocal" all the identifiers that are # leaking into a new python frame and might be used both unassigned # and assigned. if 'loop' in frame.identifiers.declared: args = args + ['l_loop=l_loop'] self.writeline('def macro(%s):' % ', '.join(args), node) self.indent() self.buffer(frame) self.pull_locals(frame) self.blockvisit(node.body, frame) self.return_buffer_contents(frame) self.outdent() return frame def macro_def(self, node, frame): """Dump the macro definition for the def created by macro_body.""" arg_tuple = ', '.join(repr(x.name) for x in node.args) name = getattr(node, 'name', None) if len(node.args) == 1: arg_tuple += ',' self.write('Macro(environment, macro, %r, (%s), (' % (name, arg_tuple)) for arg in node.defaults: self.visit(arg, frame) self.write(', ') self.write('), %r, %r, %r)' % ( bool(frame.accesses_kwargs), bool(frame.accesses_varargs), bool(frame.accesses_caller) )) def position(self, node): """Return a human readable position for the node.""" rv = 'line %d' % node.lineno if self.name is not None: rv += ' in ' + repr(self.name) return rv # -- Statement Visitors def visit_Template(self, node, frame=None): assert frame is None, 'no root frame allowed' from jinja2.runtime import __all__ as exported self.writeline('from __future__ import division') self.writeline('from jinja2.runtime import ' + ', '.join(exported)) # do we have an extends tag at all? If not, we can save some # overhead by just not processing any inheritance code. have_extends = node.find(nodes.Extends) is not None # find all blocks for block in node.find_all(nodes.Block): if block.name in self.blocks: self.fail('block %r defined twice' % block.name, block.lineno) self.blocks[block.name] = block # find all imports and import them for import_ in node.find_all(nodes.ImportedName): if import_.importname not in self.import_aliases: imp = import_.importname self.import_aliases[imp] = alias = self.temporary_identifier() if '.' in imp: module, obj = imp.rsplit('.', 1) self.writeline('from %s import %s as %s' % (module, obj, alias)) else: self.writeline('import %s as %s' % (imp, alias)) # add the load name self.writeline('name = %r' % self.name) # generate the root render function. self.writeline('def root(context, environment=environment):', extra=1) # process the root frame = Frame() frame.inspect(node.body) frame.toplevel = frame.rootlevel = True frame.require_output_check = have_extends and not self.has_known_extends self.indent() if have_extends: self.writeline('parent_template = None') if 'self' in find_undeclared(node.body, ('self',)): frame.identifiers.add_special('self') self.writeline('l_self = TemplateReference(context)') self.pull_locals(frame) self.pull_dependencies(node.body) self.blockvisit(node.body, frame) self.outdent() # make sure that the parent root is called. if have_extends: if not self.has_known_extends: self.indent() self.writeline('if parent_template is not None:') self.indent() self.writeline('for event in parent_template.' 'root_render_func(context):') self.indent() self.writeline('yield event') self.outdent(2 + (not self.has_known_extends)) # at this point we now have the blocks collected and can visit them too. for name, block in self.blocks.iteritems(): block_frame = Frame() block_frame.inspect(block.body) block_frame.block = name self.writeline('def block_%s(context, environment=environment):' % name, block, 1) self.indent() undeclared = find_undeclared(block.body, ('self', 'super')) if 'self' in undeclared: block_frame.identifiers.add_special('self') self.writeline('l_self = TemplateReference(context)') if 'super' in undeclared: block_frame.identifiers.add_special('super') self.writeline('l_super = context.super(%r, ' 'block_%s)' % (name, name)) self.pull_locals(block_frame) self.pull_dependencies(block.body) self.blockvisit(block.body, block_frame) self.outdent() self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x) for x in self.blocks), extra=1) # add a function that returns the debug info self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x in self.debug_info)) def visit_Block(self, node, frame): """Call a block and register it for the template.""" level = 1 if frame.toplevel: # if we know that we are a child template, there is no need to # check if we are one if self.has_known_extends: return if self.extends_so_far > 0: self.writeline('if parent_template is None:') self.indent() level += 1 context = node.scoped and 'context.derived(locals())' or 'context' self.writeline('for event in context.blocks[%r][0](%s):' % ( node.name, context), node) self.indent() self.simple_write('event', frame) self.outdent(level) def visit_Extends(self, node, frame): """Calls the extender.""" if not frame.toplevel: self.fail('cannot use extend from a non top-level scope', node.lineno) # if the number of extends statements in general is zero so # far, we don't have to add a check if something extended # the template before this one. if self.extends_so_far > 0: # if we have a known extends we just add a template runtime # error into the generated code. We could catch that at compile # time too, but i welcome it not to confuse users by throwing the # same error at different times just "because we can". if not self.has_known_extends: self.writeline('if parent_template is not None:') self.indent() self.writeline('raise TemplateRuntimeError(%r)' % 'extended multiple times') self.outdent() # if we have a known extends already we don't need that code here # as we know that the template execution will end here. if self.has_known_extends: raise CompilerExit() self.writeline('parent_template = environment.get_template(', node) self.visit(node.template, frame) self.write(', %r)' % self.name) self.writeline('for name, parent_block in parent_template.' 'blocks.iteritems():') self.indent() self.writeline('context.blocks.setdefault(name, []).' 'append(parent_block)') self.outdent() # if this extends statement was in the root level we can take # advantage of that information and simplify the generated code # in the top level from this point onwards if frame.rootlevel: self.has_known_extends = True # and now we have one more self.extends_so_far += 1 def visit_Include(self, node, frame): """Handles includes.""" if node.with_context: self.unoptimize_scope(frame) if node.ignore_missing: self.writeline('try:') self.indent() self.writeline('template = environment.get_template(', node) self.visit(node.template, frame) self.write(', %r)' % self.name) if node.ignore_missing: self.outdent() self.writeline('except TemplateNotFound:') self.indent() self.writeline('pass') self.outdent() self.writeline('else:') self.indent() if node.with_context: self.writeline('for event in template.root_render_func(' 'template.new_context(context.parent, True, ' 'locals())):') else: self.writeline('for event in template.module._body_stream:') self.indent() self.simple_write('event', frame) self.outdent() if node.ignore_missing: self.outdent() def visit_Import(self, node, frame): """Visit regular imports.""" if node.with_context: self.unoptimize_scope(frame) self.writeline('l_%s = ' % node.target, node) if frame.toplevel: self.write('context.vars[%r] = ' % node.target) self.write('environment.get_template(') self.visit(node.template, frame) self.write(', %r).' % self.name) if node.with_context: self.write('make_module(context.parent, True, locals())') else: self.write('module') if frame.toplevel and not node.target.startswith('_'): self.writeline('context.exported_vars.discard(%r)' % node.target) frame.assigned_names.add(node.target) def visit_FromImport(self, node, frame): """Visit named imports.""" self.newline(node) self.write('included_template = environment.get_template(') self.visit(node.template, frame) self.write(', %r).' % self.name) if node.with_context: self.write('make_module(context.parent, True)') else: self.write('module') var_names = [] discarded_names = [] for name in node.names: if isinstance(name, tuple): name, alias = name else: alias = name self.writeline('l_%s = getattr(included_template, ' '%r, missing)' % (alias, name)) self.writeline('if l_%s is missing:' % alias) self.indent() self.writeline('l_%s = environment.undefined(%r %% ' 'included_template.__name__, ' 'name=%r)' % (alias, 'the template %%r (imported on %s) does ' 'not export the requested name %s' % ( self.position(node), repr(name) ), name)) self.outdent() if frame.toplevel: var_names.append(alias) if not alias.startswith('_'): discarded_names.append(alias) frame.assigned_names.add(alias) if var_names: if len(var_names) == 1: name = var_names[0] self.writeline('context.vars[%r] = l_%s' % (name, name)) else: self.writeline('context.vars.update({%s})' % ', '.join( '%r: l_%s' % (name, name) for name in var_names )) if discarded_names: if len(discarded_names) == 1: self.writeline('context.exported_vars.discard(%r)' % discarded_names[0]) else: self.writeline('context.exported_vars.difference_' 'update((%s))' % ', '.join(map(repr, discarded_names))) def visit_For(self, node, frame): # when calculating the nodes for the inner frame we have to exclude # the iterator contents from it children = node.iter_child_nodes(exclude=('iter',)) if node.recursive: loop_frame = self.function_scoping(node, frame, children, find_special=False) else: loop_frame = frame.inner() loop_frame.inspect(children) # try to figure out if we have an extended loop. An extended loop # is necessary if the loop is in recursive mode if the special loop # variable is accessed in the body. extended_loop = node.recursive or 'loop' in \ find_undeclared(node.iter_child_nodes( only=('body',)), ('loop',)) # if we don't have an recursive loop we have to find the shadowed # variables at that point. Because loops can be nested but the loop # variable is a special one we have to enforce aliasing for it. if not node.recursive: aliases = self.push_scope(loop_frame, ('loop',)) # otherwise we set up a buffer and add a function def else: self.writeline('def loop(reciter, loop_render_func):', node) self.indent() self.buffer(loop_frame) aliases = {} # make sure the loop variable is a special one and raise a template # assertion error if a loop tries to write to loop if extended_loop: loop_frame.identifiers.add_special('loop') for name in node.find_all(nodes.Name): if name.ctx == 'store' and name.name == 'loop': self.fail('Can\'t assign to special loop variable ' 'in for-loop target', name.lineno) self.pull_locals(loop_frame) if node.else_: iteration_indicator = self.temporary_identifier() self.writeline('%s = 1' % iteration_indicator) # Create a fake parent loop if the else or test section of a # loop is accessing the special loop variable and no parent loop # exists. if 'loop' not in aliases and 'loop' in find_undeclared( node.iter_child_nodes(only=('else_', 'test')), ('loop',)): self.writeline("l_loop = environment.undefined(%r, name='loop')" % ("'loop' is undefined. the filter section of a loop as well " "as the else block doesn't have access to the special 'loop'" " variable of the current loop. Because there is no parent " "loop it's undefined. Happened in loop on %s" % self.position(node))) self.writeline('for ', node) self.visit(node.target, loop_frame) self.write(extended_loop and ', l_loop in LoopContext(' or ' in ') # if we have an extened loop and a node test, we filter in the # "outer frame". if extended_loop and node.test is not None: self.write('(') self.visit(node.target, loop_frame) self.write(' for ') self.visit(node.target, loop_frame) self.write(' in ') if node.recursive: self.write('reciter') else: self.visit(node.iter, loop_frame) self.write(' if (') test_frame = loop_frame.copy() self.visit(node.test, test_frame) self.write('))') elif node.recursive: self.write('reciter') else: self.visit(node.iter, loop_frame) if node.recursive: self.write(', recurse=loop_render_func):') else: self.write(extended_loop and '):' or ':') # tests in not extended loops become a continue if not extended_loop and node.test is not None: self.indent() self.writeline('if not ') self.visit(node.test, loop_frame) self.write(':') self.indent() self.writeline('continue') self.outdent(2) self.indent() self.blockvisit(node.body, loop_frame) if node.else_: self.writeline('%s = 0' % iteration_indicator) self.outdent() if node.else_: self.writeline('if %s:' % iteration_indicator) self.indent() self.blockvisit(node.else_, loop_frame) self.outdent() # reset the aliases if there are any. if not node.recursive: self.pop_scope(aliases, loop_frame) # if the node was recursive we have to return the buffer contents # and start the iteration code if node.recursive: self.return_buffer_contents(loop_frame) self.outdent() self.start_write(frame, node) self.write('loop(') self.visit(node.iter, frame) self.write(', loop)') self.end_write(frame) def visit_If(self, node, frame): if_frame = frame.soft() self.writeline('if ', node) self.visit(node.test, if_frame) self.write(':') self.indent() self.blockvisit(node.body, if_frame) self.outdent() if node.else_: self.writeline('else:') self.indent() self.blockvisit(node.else_, if_frame) self.outdent() def visit_Macro(self, node, frame): macro_frame = self.macro_body(node, frame) self.newline() if frame.toplevel: if not node.name.startswith('_'): self.write('context.exported_vars.add(%r)' % node.name) self.writeline('context.vars[%r] = ' % node.name) self.write('l_%s = ' % node.name) self.macro_def(node, macro_frame) frame.assigned_names.add(node.name) def visit_CallBlock(self, node, frame): children = node.iter_child_nodes(exclude=('call',)) call_frame = self.macro_body(node, frame, children) self.writeline('caller = ') self.macro_def(node, call_frame) self.start_write(frame, node) self.visit_Call(node.call, call_frame, forward_caller=True) self.end_write(frame) def visit_FilterBlock(self, node, frame): filter_frame = frame.inner() filter_frame.inspect(node.iter_child_nodes()) aliases = self.push_scope(filter_frame) self.pull_locals(filter_frame) self.buffer(filter_frame) self.blockvisit(node.body, filter_frame) self.start_write(frame, node) self.visit_Filter(node.filter, filter_frame) self.end_write(frame) self.pop_scope(aliases, filter_frame) def visit_ExprStmt(self, node, frame): self.newline(node) self.visit(node.node, frame) def visit_Output(self, node, frame): # if we have a known extends statement, we don't output anything # if we are in a require_output_check section if self.has_known_extends and frame.require_output_check: return if self.environment.finalize: finalize = lambda x: unicode(self.environment.finalize(x)) else: finalize = unicode self.newline(node) # if we are inside a frame that requires output checking, we do so outdent_later = False if frame.require_output_check: self.writeline('if parent_template is None:') self.indent() outdent_later = True # try to evaluate as many chunks as possible into a static # string at compile time. body = [] for child in node.nodes: try: const = child.as_const() except nodes.Impossible: body.append(child) continue try: if self.environment.autoescape: if hasattr(const, '__html__'): const = const.__html__() else: const = escape(const) const = finalize(const) except: # if something goes wrong here we evaluate the node # at runtime for easier debugging body.append(child) continue if body and isinstance(body[-1], list): body[-1].append(const) else: body.append([const]) # if we have less than 3 nodes or a buffer we yield or extend/append if len(body) < 3 or frame.buffer is not None: if frame.buffer is not None: # for one item we append, for more we extend if len(body) == 1: self.writeline('%s.append(' % frame.buffer) else: self.writeline('%s.extend((' % frame.buffer) self.indent() for item in body: if isinstance(item, list): val = repr(concat(item)) if frame.buffer is None: self.writeline('yield ' + val) else: self.writeline(val + ', ') else: if frame.buffer is None: self.writeline('yield ', item) else: self.newline(item) close = 1 if self.environment.autoescape: self.write('escape(') else: self.write('to_string(') if self.environment.finalize is not None: self.write('environment.finalize(') close += 1 self.visit(item, frame) self.write(')' * close) if frame.buffer is not None: self.write(', ') if frame.buffer is not None: # close the open parentheses self.outdent() self.writeline(len(body) == 1 and ')' or '))') # otherwise we create a format string as this is faster in that case else: format = [] arguments = [] for item in body: if isinstance(item, list): format.append(concat(item).replace('%', '%%')) else: format.append('%s') arguments.append(item) self.writeline('yield ') self.write(repr(concat(format)) + ' % (') idx = -1 self.indent() for argument in arguments: self.newline(argument) close = 0 if self.environment.autoescape: self.write('escape(') close += 1 if self.environment.finalize is not None: self.write('environment.finalize(') close += 1 self.visit(argument, frame) self.write(')' * close + ', ') self.outdent() self.writeline(')') if outdent_later: self.outdent() def visit_Assign(self, node, frame): self.newline(node) # toplevel assignments however go into the local namespace and # the current template's context. We create a copy of the frame # here and add a set so that the Name visitor can add the assigned # names here. if frame.toplevel: assignment_frame = frame.copy() assignment_frame.toplevel_assignments = set() else: assignment_frame = frame self.visit(node.target, assignment_frame) self.write(' = ') self.visit(node.node, frame) # make sure toplevel assignments are added to the context. if frame.toplevel: public_names = [x for x in assignment_frame.toplevel_assignments if not x.startswith('_')] if len(assignment_frame.toplevel_assignments) == 1: name = next(iter(assignment_frame.toplevel_assignments)) self.writeline('context.vars[%r] = l_%s' % (name, name)) else: self.writeline('context.vars.update({') for idx, name in enumerate(assignment_frame.toplevel_assignments): if idx: self.write(', ') self.write('%r: l_%s' % (name, name)) self.write('})') if public_names: if len(public_names) == 1: self.writeline('context.exported_vars.add(%r)' % public_names[0]) else: self.writeline('context.exported_vars.update((%s))' % ', '.join(map(repr, public_names))) # -- Expression Visitors def visit_Name(self, node, frame): if node.ctx == 'store' and frame.toplevel: frame.toplevel_assignments.add(node.name) self.write('l_' + node.name) frame.assigned_names.add(node.name) def visit_Const(self, node, frame): val = node.value if isinstance(val, float): self.write(str(val)) else: self.write(repr(val)) def visit_TemplateData(self, node, frame): self.write(repr(node.as_const())) def visit_Tuple(self, node, frame): self.write('(') idx = -1 for idx, item in enumerate(node.items): if idx: self.write(', ') self.visit(item, frame) self.write(idx == 0 and ',)' or ')') def visit_List(self, node, frame): self.write('[') for idx, item in enumerate(node.items): if idx: self.write(', ') self.visit(item, frame) self.write(']') def visit_Dict(self, node, frame): self.write('{') for idx, item in enumerate(node.items): if idx: self.write(', ') self.visit(item.key, frame) self.write(': ') self.visit(item.value, frame) self.write('}') def binop(operator): def visitor(self, node, frame): self.write('(') self.visit(node.left, frame) self.write(' %s ' % operator) self.visit(node.right, frame) self.write(')') return visitor def uaop(operator): def visitor(self, node, frame): self.write('(' + operator) self.visit(node.node, frame) self.write(')') return visitor visit_Add = binop('+') visit_Sub = binop('-') visit_Mul = binop('*') visit_Div = binop('/') visit_FloorDiv = binop('//') visit_Pow = binop('**') visit_Mod = binop('%') visit_And = binop('and') visit_Or = binop('or') visit_Pos = uaop('+') visit_Neg = uaop('-') visit_Not = uaop('not ') del binop, uaop def visit_Concat(self, node, frame): self.write('%s((' % (self.environment.autoescape and 'markup_join' or 'unicode_join')) for arg in node.nodes: self.visit(arg, frame) self.write(', ') self.write('))') def visit_Compare(self, node, frame): self.visit(node.expr, frame) for op in node.ops: self.visit(op, frame) def visit_Operand(self, node, frame): self.write(' %s ' % operators[node.op]) self.visit(node.expr, frame) def visit_Getattr(self, node, frame): self.write('environment.getattr(') self.visit(node.node, frame) self.write(', %r)' % node.attr) def visit_Getitem(self, node, frame): # slices bypass the environment getitem method. if isinstance(node.arg, nodes.Slice): self.visit(node.node, frame) self.write('[') self.visit(node.arg, frame) self.write(']') else: self.write('environment.getitem(') self.visit(node.node, frame) self.write(', ') self.visit(node.arg, frame) self.write(')') def visit_Slice(self, node, frame): if node.start is not None: self.visit(node.start, frame) self.write(':') if node.stop is not None: self.visit(node.stop, frame) if node.step is not None: self.write(':') self.visit(node.step, frame) def visit_Filter(self, node, frame): self.write(self.filters[node.name] + '(') func = self.environment.filters.get(node.name) if func is None: self.fail('no filter named %r' % node.name, node.lineno) if getattr(func, 'contextfilter', False): self.write('context, ') elif getattr(func, 'environmentfilter', False): self.write('environment, ') # if the filter node is None we are inside a filter block # and want to write to the current buffer if node.node is not None: self.visit(node.node, frame) elif self.environment.autoescape: self.write('Markup(concat(%s))' % frame.buffer) else: self.write('concat(%s)' % frame.buffer) self.signature(node, frame) self.write(')') def visit_Test(self, node, frame): self.write(self.tests[node.name] + '(') if node.name not in self.environment.tests: self.fail('no test named %r' % node.name, node.lineno) self.visit(node.node, frame) self.signature(node, frame) self.write(')') def visit_CondExpr(self, node, frame): def write_expr2(): if node.expr2 is not None: return self.visit(node.expr2, frame) self.write('environment.undefined(%r)' % ('the inline if-' 'expression on %s evaluated to false and ' 'no else section was defined.' % self.position(node))) if not have_condexpr: self.write('((') self.visit(node.test, frame) self.write(') and (') self.visit(node.expr1, frame) self.write(',) or (') write_expr2() self.write(',))[0]') else: self.write('(') self.visit(node.expr1, frame) self.write(' if ') self.visit(node.test, frame) self.write(' else ') write_expr2() self.write(')') def visit_Call(self, node, frame, forward_caller=False): if self.environment.sandboxed: self.write('environment.call(context, ') else: self.write('context.call(') self.visit(node.node, frame) extra_kwargs = forward_caller and {'caller': 'caller'} or None self.signature(node, frame, extra_kwargs) self.write(')') def visit_Keyword(self, node, frame): self.write(node.key + '=') self.visit(node.value, frame) # -- Unused nodes for extensions def visit_MarkSafe(self, node, frame): self.write('Markup(') self.visit(node.expr, frame) self.write(')') def visit_EnvironmentAttribute(self, node, frame): self.write('environment.' + node.name) def visit_ExtensionAttribute(self, node, frame): self.write('environment.extensions[%r].%s' % (node.identifier, node.name)) def visit_ImportedName(self, node, frame): self.write(self.import_aliases[node.importname]) def visit_InternalName(self, node, frame): self.write(node.name) def visit_ContextReference(self, node, frame): self.write('context') def visit_Continue(self, node, frame): self.writeline('continue', node) def visit_Break(self, node, frame): self.writeline('break', node) def visit_Scope(self, node, frame): scope_frame = frame.inner() scope_frame.inspect(node.iter_child_nodes()) aliases = self.push_scope(scope_frame) self.pull_locals(scope_frame) self.blockvisit(node.body, scope_frame) self.pop_scope(aliases, scope_frame)
yesudeep/tisshrmlr
app/jinja2/jinja2/compiler.py
Python
mit
57,185
[ "VisIt" ]
1290584dc50192b9a2b7767fdf1a613f3c9cd00a5671b2a258a80de1bf7e8a44
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, print_function, unicode_literals, \ absolute_import from scipy.linalg import polar import numpy as np import itertools import warnings import collections from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.core.operations import SymmOp from pymatgen.core.lattice import Lattice """ This module provides a base class for tensor-like objects and methods for basic tensor manipulation. It also provides a class, SquareTensor, that provides basic methods for creating and manipulating rank 2 tensors """ __author__ = "Maarten de Jong" __copyright__ = "Copyright 2012, The Materials Project" __credits__ = ("Joseph Montoya, Shyam Dwaraknath, Wei Chen, " "Mark Asta, Anubhav Jain, Terence Lew") __version__ = "1.0" __maintainer__ = "Joseph Montoya" __email__ = "montoyjh@lbl.gov" __status__ = "Development" __date__ = "March 22, 2012" voigt_map = [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)] reverse_voigt_map = np.array([[0, 5, 4], [5, 1, 3], [4, 3, 2]]) class Tensor(np.ndarray): """ Base class for doing useful general operations on Nth order tensors, without restrictions on the type (stress, elastic, strain, piezo, etc.) """ def __new__(cls, input_array, vscale=None, check_rank=None): """ Create a Tensor object. Note that the constructor uses __new__ rather than __init__ according to the standard method of subclassing numpy ndarrays. Args: input_array: (array-like with shape 3^N): array-like representing a tensor quantity in standard (i. e. non-voigt) notation vscale: (N x M array-like): a matrix corresponding to the coefficients of the voigt-notation tensor """ obj = np.asarray(input_array).view(cls) obj.rank = len(obj.shape) if check_rank and check_rank != obj.rank: raise ValueError("{} input must be rank {}".format( obj.__class__.__name__, check_rank)) vshape = tuple([3] * (obj.rank % 2) + [6] * (obj.rank // 2)) obj._vscale = np.ones(vshape) if vscale is not None: obj._vscale = vscale if obj._vscale.shape != vshape: raise ValueError("Voigt scaling matrix must be the shape of the " "voigt notation matrix or vector.") if not all([i == 3 for i in obj.shape]): raise ValueError("Pymatgen only supports 3-dimensional tensors, " "and default tensor constructor uses standard " "notation. To construct from voigt notation, use" " {}.from_voigt".format(obj.__class__.__name__)) return obj def __array_finalize__(self, obj): if obj is None: return self.rank = getattr(obj, 'rank', None) self._vscale = getattr(obj, '_vscale', None) self._vdict = getattr(obj, '_vdict', None) def __array_wrap__(self, obj): """ Overrides __array_wrap__ methods in ndarray superclass to avoid errors associated with functions that return scalar values """ if len(obj.shape) == 0: return obj[()] else: return np.ndarray.__array_wrap__(self, obj) def __hash__(self): """ define a hash function, since numpy arrays have their own __eq__ method """ return hash(self.tostring()) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.__str__()) def zeroed(self, tol=1e-3): """ returns the matrix with all entries below a certain threshold (i.e. tol) set to zero """ new_tensor = self.copy() new_tensor[abs(new_tensor) < tol] = 0 return new_tensor def transform(self, symm_op): """ Applies a transformation (via a symmetry operation) to a tensor. Args: symm_op (SymmOp): a symmetry operation to apply to the tensor """ return self.__class__(symm_op.transform_tensor(self)) def rotate(self, matrix, tol=1e-3): """ Applies a rotation directly, and tests input matrix to ensure a valid rotation. Args: matrix (3x3 array-like): rotation matrix to be applied to tensor tol (float): tolerance for testing rotation matrix validity """ matrix = SquareTensor(matrix) if not matrix.is_rotation(tol): raise ValueError("Rotation matrix is not valid.") sop = SymmOp.from_rotation_and_translation(matrix, [0., 0., 0.]) return self.transform(sop) @property def symmetrized(self): """ Returns a generally symmetrized tensor, calculated by taking the sum of the tensor and its transpose with respect to all possible permutations of indices """ perms = list(itertools.permutations(range(self.rank))) return sum([np.transpose(self, ind) for ind in perms]) / len(perms) @property def voigt_symmetrized(self): """ Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices """ if not (self.rank % 2 == 0 and self.rank > 2): raise ValueError("V-symmetrization requires rank even and > 2") v = self.voigt perms = list(itertools.permutations(range(len(v.shape)))) new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms) return self.__class__.from_voigt(new_v) def is_symmetric(self, tol=1e-5): """ Tests whether a tensor is symmetric or not based on the residual with its symmetric part, from self.symmetrized Args: tol (float): tolerance to test for symmetry """ return (self - self.symmetrized < tol).all() def fit_to_structure(self, structure, symprec=0.1): """ Returns a tensor that is invariant with respect to symmetry operations corresponding to a structure Args: structure (Structure): structure from which to generate symmetry operations symprec (float): symmetry tolerance for the Spacegroup Analyzer used to generate the symmetry operations """ sga = SpacegroupAnalyzer(structure, symprec) symm_ops = sga.get_symmetry_operations(cartesian=True) return sum([self.transform(symm_op) for symm_op in symm_ops]) / len(symm_ops) def is_fit_to_structure(self, structure, tol=1e-2): """ Tests whether a tensor is invariant with respect to the symmetry operations of a particular structure by testing whether the residual of the symmetric portion is below a tolerance Args: structure (Structure): structure to be fit to tol (float): tolerance for symmetry testing """ return (self - self.fit_to_structure(structure) < tol).all() @property def voigt(self): """ Returns the tensor in Voigt notation """ v_matrix = np.zeros(self._vscale.shape, dtype=self.dtype) this_voigt_map = self.get_voigt_dict(self.rank) for ind in this_voigt_map: v_matrix[this_voigt_map[ind]] = self[ind] if not self.is_voigt_symmetric(): warnings.warn("Tensor is not symmetric, information may " "be lost in voigt conversion.") return v_matrix * self._vscale def is_voigt_symmetric(self, tol=1e-6): """ Tests symmetry of tensor to that necessary for voigt-conversion by grouping indices into pairs and constructing a sequence of possible permutations to be used in a tensor transpose """ transpose_pieces = [[[0 for i in range(self.rank % 2)]]] transpose_pieces += [[range(j, j + 2)] for j in range(self.rank % 2, self.rank, 2)] for n in range(self.rank % 2, len(transpose_pieces)): if len(transpose_pieces[n][0]) == 2: transpose_pieces[n] += [transpose_pieces[n][0][::-1]] for trans_seq in itertools.product(*transpose_pieces): trans_seq = list(itertools.chain(*trans_seq)) if (self - self.transpose(trans_seq) > tol).any(): return False return True @staticmethod def get_voigt_dict(rank): """ Returns a dictionary that maps indices in the tensor to those in a voigt representation based on input rank Args: rank (int): Tensor rank to generate the voigt map """ vdict = {} for ind in itertools.product(*[range(3)] * rank): v_ind = ind[:rank % 2] for j in range(rank // 2): pos = rank % 2 + 2 * j v_ind += (reverse_voigt_map[ind[pos:pos + 2]],) vdict[ind] = v_ind return vdict @classmethod def from_voigt(cls, voigt_input): """ Constructor based on the voigt notation vector or matrix. Args: voigt_input (array-like): voigt input for a given tensor """ voigt_input = np.array(voigt_input) rank = sum(voigt_input.shape) // 3 t = cls(np.zeros([3] * rank)) if voigt_input.shape != t._vscale.shape: raise ValueError("Invalid shape for voigt matrix") voigt_input = voigt_input / t._vscale this_voigt_map = t.get_voigt_dict(rank) for ind in this_voigt_map: t[ind] = voigt_input[this_voigt_map[ind]] return cls(t) def convert_to_ieee(self, structure): """ Given a structure associated with a tensor, attempts a calculation of the tensor in IEEE format according to the 1987 IEEE standards. Args: structure (Structure): a structure associated with the tensor to be converted to the IEEE standard """ def get_uvec(v): """ Gets a unit vector parallel to input vector""" l = np.linalg.norm(v) if l < 1e-8: return v return v / l # Check conventional setting: sga = SpacegroupAnalyzer(structure) dataset = sga.get_symmetry_dataset() trans_mat = dataset['transformation_matrix'] conv_latt = Lattice(np.transpose(np.dot(np.transpose( structure.lattice.matrix), np.linalg.inv(trans_mat)))) xtal_sys = sga.get_crystal_system() vecs = conv_latt.matrix lengths = np.array(conv_latt.abc) angles = np.array(conv_latt.angles) rotation = np.zeros((3, 3)) # IEEE rules: a,b,c || x1,x2,x3 if xtal_sys == "cubic": rotation = [vecs[i] / lengths[i] for i in range(3)] # IEEE rules: a=b in length; c,a || x3, x1 elif xtal_sys == "tetragonal": rotation = np.array([vec / mag for (mag, vec) in sorted(zip(lengths, vecs), key=lambda x: x[0])]) if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]): rotation[0], rotation[2] = rotation[2], rotation[0].copy() rotation[1] = get_uvec(np.cross(rotation[2], rotation[0])) # IEEE rules: c<a<b; c,a || x3,x1 elif xtal_sys == "orthorhombic": rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))] rotation = np.roll(rotation, 2, axis=0) # IEEE rules: c,a || x3,x1, c is threefold axis # Note this also includes rhombohedral crystal systems elif xtal_sys in ("trigonal", "hexagonal"): # find threefold axis: tf_index = np.argmin(abs(angles - 120.)) non_tf_mask = np.logical_not(angles == angles[tf_index]) rotation[2] = get_uvec(vecs[tf_index]) rotation[0] = get_uvec(vecs[non_tf_mask][0]) rotation[1] = get_uvec(np.cross(rotation[2], rotation[0])) # IEEE rules: b,c || x2,x3; alpha=beta=90, c<a elif xtal_sys == "monoclinic": # Find unique axis u_index = np.argmax(abs(angles - 90.)) n_umask = np.logical_not(angles == angles[u_index]) rotation[1] = get_uvec(vecs[u_index]) # Shorter of remaining lattice vectors for c axis c = [vec / mag for (mag, vec) in sorted(zip(lengths[n_umask], vecs[n_umask]))][0] rotation[2] = np.array(c) rotation[0] = np.cross(rotation[1], rotation[2]) # IEEE rules: c || x3 elif xtal_sys == "triclinic": rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))] rotation = np.roll(rotation, 2, axis=0) rotation[1] = get_uvec(np.cross(rotation[2], rotation[1])) rotation[0] = np.cross(rotation[1], rotation[2]) return self.rotate(rotation, tol=1e-2) class TensorCollection(collections.Sequence): """ A sequence of tensors that can be used for fitting data or for having a tensor expansion """ def __init__(self, tensor_list, base_class=Tensor): self.tensors = [base_class(t) if not isinstance(t, base_class) else t for t in tensor_list] def __len__(self): return len(self.tensors) def __getitem__(self, ind): return self.tensors[ind] def __iter__(self): return self.tensors.__iter__() def zeroed(self, tol=1e-3): return self.__class__([t.zeroed(tol) for t in self]) def transform(self, symm_op): return self.__class__([t.transform(symm_op) for t in self]) def rotate(self, matrix, tol=1e-3): return self.__class__([t.rotate(matrix, tol) for t in self]) @property def symmetrized(self): return self.__class__([t.symmetrized for t in self]) def is_symmetric(self, tol=1e-5): return all([t.is_symmetric(tol) for t in self]) def fit_to_structure(self, structure, symprec=0.1): return self.__class__([t.fit_to_structure(structure, symprec) for t in self]) @property def voigt(self): return [t.voigt for t in self] def is_voigt_symmetric(self, tol=1e-6): return all([t.is_voigt_symmetric(tol) for t in self]) @classmethod def from_voigt(cls, voigt_input_list, base_class=Tensor): return cls([base_class.from_voigt(v) for v in voigt_input_list]) def convert_to_ieee(self, structure): return self.__class__([t.convert_to_ieee(structure) for t in self]) class SquareTensor(Tensor): """ Base class for doing useful general operations on second rank tensors (stress, strain etc.). """ def __new__(cls, input_array, vscale=None): """ Create a SquareTensor object. Note that the constructor uses __new__ rather than __init__ according to the standard method of subclassing numpy ndarrays. Error is thrown when the class is initialized with non-square matrix. Args: input_array (3x3 array-like): the 3x3 array-like representing the content of the tensor vscale (6x1 array-like): 6x1 array-like scaling the voigt-notation vector with the tensor entries """ obj = super(SquareTensor, cls).__new__(cls, input_array, vscale, check_rank=2) return obj.view(cls) @property def trans(self): """ shorthand for transpose on SquareTensor """ return SquareTensor(np.transpose(self)) @property def inv(self): """ shorthand for matrix inverse on SquareTensor """ if self.det == 0: raise ValueError("SquareTensor is non-invertible") return SquareTensor(np.linalg.inv(self)) @property def det(self): """ shorthand for the determinant of the SquareTensor """ return np.linalg.det(self) def is_rotation(self, tol=1e-3, include_improper=True): """ Test to see if tensor is a valid rotation matrix, performs a test to check whether the inverse is equal to the transpose and if the determinant is equal to one within the specified tolerance Args: tol (float): tolerance to both tests of whether the the determinant is one and the inverse is equal to the transpose include_improper (bool): whether to include improper rotations in the determination of validity """ det = np.abs(np.linalg.det(self)) if include_improper: det = np.abs(det) return (np.abs(self.inv - self.trans) < tol).all() \ and (np.abs(det - 1.) < tol) def get_scaled(self, scale_factor): """ Scales the tensor by a certain multiplicative scale factor Args: scale_factor (float): scalar multiplier to be applied to the SquareTensor object """ return SquareTensor(self * scale_factor) @property def principal_invariants(self): """ Returns a list of principal invariants for the tensor, which are the values of the coefficients of the characteristic polynomial for the matrix """ return np.poly(self)[1:] * np.array([-1, 1, -1]) def polar_decomposition(self, side='right'): """ calculates matrices for polar decomposition """ return polar(self, side=side) def symmetry_reduce(tensors, structure, tol=1e-8, **kwargs): """ Function that converts a list of tensors corresponding to a structure and returns a dictionary consisting of unique tensor keys with symmop values corresponding to transformations that will result in derivative tensors from the original list Args: tensors (list of tensors): list of Tensor objects to test for symmetrically-equivalent duplicates structure (Structure): structure from which to get symmetry tol (float): tolerance for tensor equivalence kwargs: keyword arguments for the SpacegroupAnalyzer returns: dictionary consisting of unique tensors with symmetry operations corresponding to those which will reconstruct the remaining tensors as values """ sga = SpacegroupAnalyzer(structure, **kwargs) symmops = sga.get_symmetry_operations(cartesian=True) unique_tdict = {} for tensor in tensors: is_unique = True for unique_tensor, symmop in itertools.product(unique_tdict, symmops): if (np.abs(unique_tensor.transform(symmop) - tensor) < tol).all(): unique_tdict[unique_tensor].append(symmop) is_unique = False break if is_unique: unique_tdict[tensor] = [] return unique_tdict
tallakahath/pymatgen
pymatgen/analysis/elasticity/tensors.py
Python
mit
19,491
[ "CRYSTAL", "pymatgen" ]
91380a1179dafd8f75102c3c98bf126036fb5b2f47b6bcd02f93020ae1e40ce9
# Copyright (C) 2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest as ut import importlib_wrapper def disable_GUI(code): # integrate without visualizer breakpoint = "visualizer.run(1)" assert breakpoint in code code = code.replace(breakpoint, "steps=1\nsystem.integrator.run(steps)", 1) return code sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import( "@SAMPLES_DIR@/visualization_mmm2d.py", substitutions=disable_GUI, steps=100) @skipIfMissingFeatures class Sample(ut.TestCase): system = sample.system if __name__ == "__main__": ut.main()
psci2195/espresso-ffans
testsuite/scripts/samples/test_visualization_mmm2d.py
Python
gpl-3.0
1,258
[ "ESPResSo" ]
c70fdc911d9b0113a16f9c0c03fe37a075ad7b2c090b39088c6022c08490d467
#!/usr/bin/env python import os.path from zipfile import ZipFile from django.conf import settings from django.test import TransactionTestCase from panda.models import TaskStatus from panda.tasks import ExportSearchTask from panda.tests import utils class TestExportSearch(TransactionTestCase): fixtures = ['init_panda.json', 'test_users.json'] def setUp(self): settings.CELERY_ALWAYS_EAGER = True utils.setup_test_solr() self.user = utils.get_panda_user() self.dataset = utils.get_test_dataset(self.user) self.dataset2 = utils.get_test_dataset(self.user) self.upload = utils.get_test_data_upload(self.user, self.dataset) def test_export_query_csv(self): self.dataset.import_data(self.user, self.upload) self.dataset2.import_data(self.user, self.upload) task_type = ExportSearchTask task = TaskStatus.objects.create(task_name=task_type.name, creator=self.user) task_type.apply_async( args=['tribune', task.id], kwargs={ 'filename': 'test' }, task_id=task.id ) # Refresh from database task = TaskStatus.objects.get(id=task.id) self.assertEqual(task.status, 'SUCCESS') self.assertNotEqual(task.start, None) self.assertNotEqual(task.end, None) self.assertEqual(task.traceback, None) self.assertEqual(os.path.exists(os.path.join(settings.EXPORT_ROOT, 'test.zip')), True) self.assertEqual(os.path.exists(os.path.join(settings.EXPORT_ROOT, 'test')), False) zipfile = ZipFile(os.path.join(settings.EXPORT_ROOT, 'test.zip')) expected_filenames = ['contributors.csv', 'contributors-2.csv'] self.assertEqual(set(zipfile.namelist()), set(expected_filenames)) for filename in expected_filenames: with zipfile.open(filename) as f: self.assertEqual('id,first_name,last_name,employer\n', f.next()) self.assertEqual('1,Brian,Boyer,Chicago Tribune\n', f.next()) self.assertEqual('2,Joseph,Germuska,Chicago Tribune\n', f.next()) with self.assertRaises(StopIteration): f.next() os.remove(os.path.join(settings.EXPORT_ROOT, 'test.zip')) def test_export_query_no_results(self): self.dataset.import_data(self.user, self.upload) self.dataset2.import_data(self.user, self.upload) task_type = ExportSearchTask task = TaskStatus.objects.create(task_name=task_type.name, creator=self.user) task_type.apply_async( args=['foobar', task.id], kwargs={ 'filename': 'test' }, task_id=task.id ) # Refresh from database task = TaskStatus.objects.get(id=task.id) self.assertEqual(task.status, 'SUCCESS') self.assertNotEqual(task.start, None) self.assertNotEqual(task.end, None) self.assertEqual(task.traceback, None) self.assertEqual(os.path.exists(os.path.join(settings.EXPORT_ROOT, 'test.zip')), True) self.assertEqual(os.path.exists(os.path.join(settings.EXPORT_ROOT, 'test')), False) zipfile = ZipFile(os.path.join(settings.EXPORT_ROOT, 'test.zip')) self.assertEqual(set(zipfile.namelist()), set()) os.remove(os.path.join(settings.EXPORT_ROOT, 'test.zip'))
NUKnightLab/panda
panda/tests/test_export_search.py
Python
mit
3,391
[ "Brian" ]
e3411283128c3746cb20805dee7cab1e10efbcafdeba06e3f6b6de1cfce031a0
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ******************************************* **espressopp.standard_system.LennardJones** ******************************************* .. function:: espressopp.standard_system.LennardJones(num_particles, box, rc, skin, dt, epsilon, sigma, shift, temperature, xyzfilename, xyzrfilename) :param num_particles: :param box: (default: (000)) :param rc: (default: 1.12246) :param skin: (default: 0.3) :param dt: (default: 0.005) :param epsilon: (default: 1.0) :param sigma: (default: 1.0) :param shift: (default: 'auto') :param temperature: (default: None) :param xyzfilename: (default: None) :param xyzrfilename: (default: None) :type num_particles: :type box: :type rc: real :type skin: real :type dt: real :type epsilon: real :type sigma: real :type shift: :type temperature: :type xyzfilename: :type xyzrfilename: return random Lennard Jones system and integrator: if tempearture is != None then Langevin thermostat is set to temperature (gamma is 1.0) """ import espressopp import mpi4py.MPI as MPI def LennardJones(num_particles, box=(0,0,0), rc=1.12246, skin=0.3, dt=0.005, epsilon=1.0, sigma=1.0, shift='auto', temperature=None, xyzfilename=None, xyzrfilename=None): if xyzfilename and xyzrfilename: print "ERROR: only one of xyzfilename (only xyz data) or xyzrfilename (additional particle radius data) can be provided." sys.exit(1) if xyzrfilename: pidf, typef, xposf, yposf, zposf, xvelf, yvelf, zvelf, Lxf, Lyf, Lzf, radiusf = espressopp.tools.readxyzr(xyzrfilename) box = (Lxf, Lyf, Lzf) num_particles = len(pidf) elif xyzfilename: pidf, typef, xposf, yposf, zposf, xvelf, yvelf, zvelf, Lxf, Lyf, Lzf = espressopp.tools.readxyz(xyzfilename) box = (Lxf, Lyf, Lzf) num_particles = len(pidf) else: if box[0]<=0 or box[1]<=0 or box[2]<=0: print "WARNING: no valid box size specified, box size set to (100,100,100) !" system = espressopp.System() system.rng = espressopp.esutil.RNG() system.bc = espressopp.bc.OrthorhombicBC(system.rng, box) system.skin = skin nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size) cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc, skin) system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid) interaction = espressopp.interaction.VerletListLennardJones(espressopp.VerletList(system, cutoff=rc)) interaction.setPotential(type1=0, type2=0, potential=espressopp.interaction.LennardJones(epsilon, sigma, rc, shift)) system.addInteraction(interaction) integrator = espressopp.integrator.VelocityVerlet(system) integrator.dt = dt if (temperature != None): thermostat = espressopp.integrator.LangevinThermostat(system) thermostat.gamma = 1.0 thermostat.temperature = temperature integrator.addExtension(thermostat) mass = 1.0 if xyzrfilename: new_particles = [] props = ['id', 'type', 'mass', 'pos', 'v', 'radius'] for idx in range(num_particles): part = [ pidf[idx], typef[idx], mass, espressopp.Real3D(xposf[idx],yposf[idx],zposf[idx]), espressopp.Real3D(xvelf[idx],yvelf[idx],zvelf[idx]), radiusf[idx] ] new_particles.append(part) if idx % 1000 == 0: system.storage.addParticles(new_particles, *props) system.storage.decompose() new_particles = [] system.storage.addParticles(new_particles, *props) system.storage.decompose() elif xyzfilename: new_particles = [] props = ['id', 'type', 'mass', 'pos', 'v'] for idx in range(num_particles): part = [ pidf[idx], typef[idx], mass, espressopp.Real3D(xposf[idx],yposf[idx],zposf[idx]), espressopp.Real3D(xvelf[idx],yvelf[idx],zvelf[idx])] new_particles.append(part) if idx % 1000 == 0: system.storage.addParticles(new_particles, *props) system.storage.decompose() new_particles = [] system.storage.addParticles(new_particles, *props) system.storage.decompose() else: props = ['id', 'type', 'mass', 'pos', 'v'] new_particles = [] pid = 1 while pid <= num_particles: type = 0 mass = 1.0 pos = system.bc.getRandomPos() vel = espressopp.Real3D(0.0, 0.0, 0.0) part = [pid, type, mass, pos, vel] new_particles.append(part) if pid % 1000 == 0: system.storage.addParticles(new_particles, *props) system.storage.decompose() new_particles = [] pid += 1 system.storage.addParticles(new_particles, *props) system.storage.decompose() return system, integrator
capoe/espressopp.soap
src/standard_system/LennardJones.py
Python
gpl-3.0
5,615
[ "ESPResSo" ]
1f03be7a56999ad0647e745d0b9d087fee4dacf8efcf380a4eca4e3239e1383b
# $Id$ # # Copyright (C) 2004-2006 Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # from __future__ import print_function from rdkit import Chem from rdkit.Chem import rdMolDescriptors import math def ExplainAtomCode(code, branchSubtract=0): """ **Arguments**: - the code to be considered - branchSubtract: (optional) the constant that was subtracted off the number of neighbors before integrating it into the code. This is used by the topological torsions code. >>> m = Chem.MolFromSmiles('C=CC(=O)O') >>> code = GetAtomCode(m.GetAtomWithIdx(0)) >>> ExplainAtomCode(code) ('C', 1, 1) >>> code = GetAtomCode(m.GetAtomWithIdx(1)) >>> ExplainAtomCode(code) ('C', 2, 1) >>> code = GetAtomCode(m.GetAtomWithIdx(2)) >>> ExplainAtomCode(code) ('C', 3, 1) >>> code = GetAtomCode(m.GetAtomWithIdx(3)) >>> ExplainAtomCode(code) ('O', 1, 1) >>> code = GetAtomCode(m.GetAtomWithIdx(4)) >>> ExplainAtomCode(code) ('O', 1, 0) """ typeMask = (1 << rdMolDescriptors.AtomPairsParameters.numTypeBits) - 1 branchMask = (1 << rdMolDescriptors.AtomPairsParameters.numBranchBits) - 1 piMask = (1 << rdMolDescriptors.AtomPairsParameters.numPiBits) - 1 nBranch = int(code & branchMask) code = code >> rdMolDescriptors.AtomPairsParameters.numBranchBits nPi = int(code & piMask) code = code >> rdMolDescriptors.AtomPairsParameters.numPiBits typeIdx = int(code & typeMask) if typeIdx < len(rdMolDescriptors.AtomPairsParameters.atomTypes): atomNum = rdMolDescriptors.AtomPairsParameters.atomTypes[typeIdx] atomSymbol = Chem.GetPeriodicTable().GetElementSymbol(atomNum) else: atomSymbol = 'X' return (atomSymbol, nBranch, nPi) GetAtomCode = rdMolDescriptors.GetAtomPairAtomCode def NumPiElectrons(atom): """ Returns the number of electrons an atom is using for pi bonding >>> m = Chem.MolFromSmiles('C=C') >>> NumPiElectrons(m.GetAtomWithIdx(0)) 1 >>> m = Chem.MolFromSmiles('C#CC') >>> NumPiElectrons(m.GetAtomWithIdx(0)) 2 >>> NumPiElectrons(m.GetAtomWithIdx(1)) 2 >>> m = Chem.MolFromSmiles('O=C=CC') >>> NumPiElectrons(m.GetAtomWithIdx(0)) 1 >>> NumPiElectrons(m.GetAtomWithIdx(1)) 2 >>> NumPiElectrons(m.GetAtomWithIdx(2)) 1 >>> NumPiElectrons(m.GetAtomWithIdx(3)) 0 >>> m = Chem.MolFromSmiles('c1ccccc1') >>> NumPiElectrons(m.GetAtomWithIdx(0)) 1 FIX: this behaves oddly in these cases: >>> m = Chem.MolFromSmiles('S(=O)(=O)') >>> NumPiElectrons(m.GetAtomWithIdx(0)) 2 >>> m = Chem.MolFromSmiles('S(=O)(=O)(O)O') >>> NumPiElectrons(m.GetAtomWithIdx(0)) 0 In the second case, the S atom is tagged as sp3 hybridized. """ res = 0 if atom.GetIsAromatic(): res = 1 elif atom.GetHybridization() != Chem.HybridizationType.SP3: # the number of pi electrons is just the number of # unsaturations (valence - degree): res = atom.GetExplicitValence() - atom.GetNumExplicitHs() if res < atom.GetDegree(): raise ValueError("explicit valence exceeds atom degree") res -= atom.GetDegree() return res def BitsInCommon(v1, v2): """ Returns the number of bits in common between two vectors **Arguments**: - two vectors (sequences of bit ids) **Returns**: an integer **Notes** - the vectors must be sorted - duplicate bit IDs are counted more than once >>> BitsInCommon( (1,2,3,4,10), (2,4,6) ) 2 Here's how duplicates are handled: >>> BitsInCommon( (1,2,2,3,4), (2,2,4,5,6) ) 3 """ res = 0 v2Pos = 0 nV2 = len(v2) for val in v1: while v2Pos < nV2 and v2[v2Pos] < val: v2Pos += 1 if v2Pos >= nV2: break if v2[v2Pos] == val: res += 1 v2Pos += 1 return res def DiceSimilarity(v1, v2, bounds=None): """ Implements the DICE similarity metric. This is the recommended metric in both the Topological torsions and Atom pairs papers. **Arguments**: - two vectors (sequences of bit ids) **Returns**: a float. **Notes** - the vectors must be sorted >>> DiceSimilarity( (1,2,3), (1,2,3) ) 1.0 >>> DiceSimilarity( (1,2,3), (5,6) ) 0.0 >>> DiceSimilarity( (1,2,3,4), (1,3,5,7) ) 0.5 >>> DiceSimilarity( (1,2,3,4,5,6), (1,3) ) 0.5 Note that duplicate bit IDs count multiple times: >>> DiceSimilarity( (1,1,3,4,5,6), (1,1) ) 0.5 but only if they are duplicated in both vectors: >>> DiceSimilarity( (1,1,3,4,5,6), (1,) )==2./7 True edge case >>> DiceSimilarity( (), () ) 0.0 and bounds check >>> DiceSimilarity( (1,1,3,4), (1,1)) 0.666... >>> DiceSimilarity( (1,1,3,4), (1,1), bounds=0.3) 0.666... >>> DiceSimilarity( (1,1,3,4), (1,1), bounds=0.33) 0.666... >>> DiceSimilarity( (1,1,3,4,5,6), (1,1), bounds=0.34) 0.0 """ denom = 1.0 * (len(v1) + len(v2)) if not denom: res = 0.0 else: if bounds and (min(len(v1), len(v2)) / denom) < bounds: numer = 0.0 else: numer = 2.0 * BitsInCommon(v1, v2) res = numer / denom return res def Dot(v1, v2): """ Returns the Dot product between two vectors: **Arguments**: - two vectors (sequences of bit ids) **Returns**: an integer **Notes** - the vectors must be sorted - duplicate bit IDs are counted more than once >>> Dot( (1,2,3,4,10), (2,4,6) ) 2 Here's how duplicates are handled: >>> Dot( (1,2,2,3,4), (2,2,4,5,6) ) 5 >>> Dot( (1,2,2,3,4), (2,4,5,6) ) 2 >>> Dot( (1,2,2,3,4), (5,6) ) 0 >>> Dot( (), (5,6) ) 0 """ res = 0 nV1 = len(v1) nV2 = len(v2) i = 0 j = 0 while i < nV1: v1Val = v1[i] v1Count = 1 i += 1 while i < nV1 and v1[i] == v1Val: v1Count += 1 i += 1 while j < nV2 and v2[j] < v1Val: j += 1 if j < nV2 and v2[j] == v1Val: v2Count = 1 j += 1 while j < nV2 and v2[j] == v1Val: v2Count += 1 j += 1 commonCount = min(v1Count, v2Count) res += commonCount * commonCount elif j >= nV2: break return res def CosineSimilarity(v1, v2): """ Implements the Cosine similarity metric. This is the recommended metric in the LaSSI paper **Arguments**: - two vectors (sequences of bit ids) **Returns**: a float. **Notes** - the vectors must be sorted >>> print('%.3f'%CosineSimilarity( (1,2,3,4,10), (2,4,6) )) 0.516 >>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (2,2,4,5,6) )) 0.714 >>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (1,2,2,3,4) )) 1.000 >>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (5,6,7) )) 0.000 >>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), () )) 0.000 """ d1 = Dot(v1, v1) d2 = Dot(v2, v2) denom = math.sqrt(d1 * d2) if not denom: res = 0.0 else: numer = Dot(v1, v2) res = numer / denom return res # ------------------------------------ # # doctest boilerplate # def _runDoctests(verbose=None): # pragma: nocover import sys import doctest failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose) sys.exit(failed) if __name__ == '__main__': # pragma: nocover _runDoctests()
rvianello/rdkit
rdkit/Chem/AtomPairs/Utils.py
Python
bsd-3-clause
7,283
[ "RDKit" ]
e9b2d646742275c4551612db2a994f692f2c7bd9b5f55b5c557bd593dd4216ee
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import collections import abc import numpy as np from pymatgen.core.spectrum import Spectrum from pymatgen.util.plotting import add_fig_kwargs """ This module implements core classes for calculation of diffraction patterns. """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "ongsp@ucsd.edu" __date__ = "5/22/14" class DiffractionPattern(Spectrum): """ A representation of a diffraction pattern """ XLABEL = "$2\\Theta$" YLABEL = "Intensity" def __init__(self, x, y, hkls, d_hkls): """ Args: x: Two theta angles. y: Intensities hkls: [{"hkl": (h, k, l), "multiplicity": mult}], where {"hkl": (h, k, l), "multiplicity": mult} is a dict of Miller indices for all diffracted lattice facets contributing to each intensity. d_hkls: List of interplanar spacings. """ super(DiffractionPattern, self).__init__(x, y, hkls, d_hkls) self.hkls = hkls self.d_hkls = d_hkls class AbstractDiffractionPatternCalculator(abc.ABC): """ Abstract base class for computing the diffraction pattern of a crystal. """ # Tolerance in which to treat two peaks as having the same two theta. TWO_THETA_TOL = 1e-5 # Tolerance in which to treat a peak as effectively 0 if the scaled # intensity is less than this number. Since the max intensity is 100, # this means the peak must be less than 1e-5 of the peak intensity to be # considered as zero. This deals with numerical issues where systematic # absences do not cancel exactly to zero. SCALED_INTENSITY_TOL = 1e-3 @abc.abstractmethod def get_pattern(self, structure, scaled=True, two_theta_range=(0, 90)): """ Calculates the diffraction pattern for a structure. Args: structure (Structure): Input structure scaled (bool): Whether to return scaled intensities. The maximum peak is set to a value of 100. Defaults to True. Use False if you need the absolute values to combine XRD plots. two_theta_range ([float of length 2]): Tuple for range of two_thetas to calculate in degrees. Defaults to (0, 90). Set to None if you want all diffracted beams within the limiting sphere of radius 2 / wavelength. Returns: (DiffractionPattern) """ pass def get_plot(self, structure, two_theta_range=(0, 90), annotate_peaks=True, ax=None, with_labels=True, fontsize=16): """ Returns the diffraction plot as a matplotlib.pyplot. Args: structure: Input structure two_theta_range ([float of length 2]): Tuple for range of two_thetas to calculate in degrees. Defaults to (0, 90). Set to None if you want all diffracted beams within the limiting sphere of radius 2 / wavelength. annotate_peaks: Whether to annotate the peaks with plane information. ax: matplotlib :class:`Axes` or None if a new figure should be created. with_labels: True to add xlabels and ylabels to the plot. fontsize: (int) fontsize for peak labels. Returns: (matplotlib.pyplot) """ if ax is None: from pymatgen.util.plotting import pretty_plot plt = pretty_plot(16, 10) ax = plt.gca() else: # This to maintain the type of the return value. import matplotlib.pyplot as plt xrd = self.get_pattern(structure, two_theta_range=two_theta_range) for two_theta, i, hkls, d_hkl in zip(xrd.x, xrd.y, xrd.hkls, xrd.d_hkls): if two_theta_range[0] <= two_theta <= two_theta_range[1]: print(hkls) label = ", ".join([str(hkl["hkl"]) for hkl in hkls]) ax.plot([two_theta, two_theta], [0, i], color='k', linewidth=3, label=label) if annotate_peaks: ax.annotate(label, xy=[two_theta, i], xytext=[two_theta, i], fontsize=fontsize) if with_labels: ax.set_xlabel(r"$2\theta$ ($^\circ$)") ax.set_ylabel("Intensities (scaled)") if hasattr(ax, "tight_layout"): ax.tight_layout() return plt def show_plot(self, structure, **kwargs): """ Shows the diffraction plot. Args: structure (Structure): Input structure two_theta_range ([float of length 2]): Tuple for range of two_thetas to calculate in degrees. Defaults to (0, 90). Set to None if you want all diffracted beams within the limiting sphere of radius 2 / wavelength. annotate_peaks (bool): Whether to annotate the peaks with plane information. """ self.get_plot(structure, **kwargs).show() @add_fig_kwargs def plot_structures(self, structures, fontsize=6, **kwargs): """ Plot diffraction patterns for multiple structures on the same figure. Args: structures (Structure): List of structures two_theta_range ([float of length 2]): Tuple for range of two_thetas to calculate in degrees. Defaults to (0, 90). Set to None if you want all diffracted beams within the limiting sphere of radius 2 / wavelength. annotate_peaks (bool): Whether to annotate the peaks with plane information. fontsize: (int) fontsize for peak labels. """ import matplotlib.pyplot as plt nrows = len(structures) fig, axes = plt.subplots(nrows=nrows, ncols=1, sharex=True, squeeze=False) for i, (ax, structure) in enumerate(zip(axes.ravel(), structures)): self.get_plot(structure, fontsize=fontsize, ax=ax, with_labels=i == nrows - 1, **kwargs) spg_symbol, spg_number = structure.get_space_group_info() ax.set_title("{} {} ({}) ".format(structure.formula, spg_symbol, spg_number)) return fig def get_unique_families(hkls): """ Returns unique families of Miller indices. Families must be permutations of each other. Args: hkls ([h, k, l]): List of Miller indices. Returns: {hkl: multiplicity}: A dict with unique hkl and multiplicity. """ # TODO: Definitely can be sped up. def is_perm(hkl1, hkl2): h1 = np.abs(hkl1) h2 = np.abs(hkl2) return all([i == j for i, j in zip(sorted(h1), sorted(h2))]) unique = collections.defaultdict(list) for hkl1 in hkls: found = False for hkl2 in unique.keys(): if is_perm(hkl1, hkl2): found = True unique[hkl2].append(hkl1) break if not found: unique[hkl1].append(hkl1) pretty_unique = {} for k, v in unique.items(): pretty_unique[sorted(v)[-1]] = len(v) return pretty_unique
montoyjh/pymatgen
pymatgen/analysis/diffraction/core.py
Python
mit
7,574
[ "CRYSTAL", "pymatgen" ]
1482a1f237fa5a1e83642ac14ea6e97cb3f5300e4e7d466bdb289e231ca37bda
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- http://www.mdanalysis.org # Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # from __future__ import absolute_import import os def test_failure(): """Fail if the MDA_FAILURE_TEST environment variable is set. """ # Have a file open to trigger an output from the open_files plugin. f = open('./failure.txt', 'w') if u'MDA_FAILURE_TEST' in os.environ: assert False
kain88-de/mdanalysis
testsuite/MDAnalysisTests/test_failure.py
Python
gpl-2.0
1,352
[ "MDAnalysis" ]
d12c782529e5dbc501776165c48f078ab1714975d68cd030ece88ac05b3a2c41
#!/usr/bin/env python __author__ = 'Brian Jimenez <brian.jimenez@bsc.es>' from Bio import Entrez import xml.etree.ElementTree as ET import sys # Needed in order to let Pubmed to notify you instead of blocking your IP crawler_contact = 'your_email@here' def search(query): """Searches the query in Entrez""" Entrez.email = crawler_contact handle = Entrez.esearch(db='pubmed', sort='date', retmode='xml', term=query) results = Entrez.read(handle, validate=False) return results def fetch_article_details(id_list): """Fetches the details of the articles given its pubmed ids""" ids = ','.join(id_list) Entrez.email = crawler_contact handle = Entrez.efetch(db='pubmed', retmode='xml', id=ids) lines = handle.readlines() return ''.join(lines) class Article(object): """Represents an article""" def __init__(self, pmid='', title='', authors=None, published='', abstract='', journal='', iso_journal='', doi=''): self.pmid = pmid self.title = title if authors: self.authors = authors else: self.authors = [] self.published = published self.abstract = abstract self.journal = journal self.iso_journal = iso_journal self.doi = doi def __str__(self): return '<pmid="%s", title="%s", authors="%s", journal="%s", doi="%s">' % (self.pmid, self.title, self.authors, self.journal, self.doi) def parse_articles(xml_file_name): """Parses the XML found in xml_file_name It expects to find a set of PubmedArticle as a root node """ tree = ET.parse(xml_file_name) root = tree.getroot() articles = [] for pubmed_article in root.findall('PubmedArticle'): try: article = Article() for citation in pubmed_article.findall('MedlineCitation'): article.pmid = citation.find('PMID').text medline_article = citation.find('Article') article.title = medline_article.find('ArticleTitle').text journal = medline_article.find('Journal') article.journal = journal.find('Title').text article.iso_journal = journal.find('ISOAbbreviation').text article.abstract = medline_article.find('Abstract').find('AbstractText').text date_created = citation.find('DateCreated') year = date_created.find('Year').text month = date_created.find('Month').text day = date_created.find('Day').text article.published = '%s.%s.%s' % (year, month, day) authors_list = medline_article.find('AuthorList') for author in authors_list.findall('Author'): last_name = author.find('LastName').text initials = author.find('Initials').text article.authors.append('%s,%s.' % (last_name, initials)) pubmed_data = pubmed_article.find('PubmedData') article_id_list = pubmed_data.find('ArticleIdList') for article_id in article_id_list.findall('ArticleId'): type = article_id.get('IdType') if type == 'doi': article.doi = article_id.text articles.append(article) except Exception, e: print 'Error: can not parse article. Reason: %s' % str(e) return articles def usage(): """Prints the usage of this script""" print 'Usage: %s action[fetch|parse]' % sys.argv[0] if __name__ == '__main__': # Here more users can be defined with their alternative names in case they have users = { 'bjimenez': ['jimenez-garcia b', 'jimenez, brian', 'jimenez-garci b'] } if len(sys.argv[1:]) != 1: usage() raise SystemExit('Wrong command line') action = sys.argv[1] if action == 'fetch': # Fetch the articles from Entrez for user, names in users.iteritems(): for id, name in enumerate(names): print 'Looking for %s' % name results = search(name) id_list = results['IdList'] print id_list print '%d articles found for %s' % (len(id_list), name) papers = fetch_article_details(id_list) output = open('%s_%d.xml' % (user, id), 'w') output.write(papers) elif action == 'parse': # Parses and shows the articles for user, names in users.iteritems(): for id, name in enumerate(names): articles = parse_articles('%s_%d.xml' % (user, id)) for article in articles: print article else: raise SystemExit('Wrong action')
brianjimenez/pub_crawler
papers_crawler.py
Python
gpl-2.0
4,934
[ "Brian" ]
006273a792150f1377eb2e2111f89a2db8aba4b129216cf895f9022a9f9cbf53
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides the docker CLI access to the Google Container Registry. Sets docker up to authenticate with the Google Container Registry, and passes all flags after -- to the docker CLI. """ import argparse from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base from googlecloudsdk.calliope import exceptions from googlecloudsdk.core import log from googlecloudsdk.core.docker import constants from googlecloudsdk.core.docker import docker # By default, we'll set up authentication for these registries. # If the user changes the --server argument to something not in this list, # we'll just give them a warning that they're using an unexpected server. _DEFAULT_REGISTRIES = constants.ALL_SUPPORTED_REGISTRIES @base.ReleaseTracks(base.ReleaseTrack.GA) class Docker(base.Command): """Provides the docker CLI access to the Google Container Registry.""" detailed_help = { 'DESCRIPTION': """\ The docker sub-command of gcloud wraps docker commands, so that gcloud can inject the appropriate fresh authentication token into requests that interact with the docker registry. As commands are simply passed through to docker, see [](http://docs.docker.com/reference/commandline/cli/) for a full reference of command-line options that can be supplied after the --. For more information please visit [](https://gcr.io/) """, 'EXAMPLES': """\ Pull the image '{registry}/google-containers/pause:1.0' from the docker registry: $ {{command}} -- pull {registry}/google-containers/pause:1.0 Push the image '{registry}/example-org/example-image:latest' to our private docker registry. $ {{command}} -- push {registry}/example-org/example-image:latest Configure authentication, then simply use docker: $ {{command}} --authorize-only $ docker push {registry}/example-org/example-image:latest """.format(registry=constants.DEFAULT_REGISTRY), } @staticmethod def Args(parser): parser.add_argument( '--server', '-s', type=arg_parsers.ArgList(min_length=1), metavar='SERVER', help='The address of the Google Cloud Registry.', required=False, default=_DEFAULT_REGISTRIES) # TODO(user): This should evolve into something that launches an # auth daemon process, or utilizes a more permanent credential. parser.add_argument( '--authorize-only', '-a', help='Configure docker authorization only, do not launch the ' 'docker command-line.', action='store_true') parser.add_argument( '--docker-host', help='The URL to connect to Docker Daemon. Format: tcp://host:port or ' 'unix:///path/to/socket.') parser.add_argument( 'docker_args', nargs=argparse.REMAINDER, default=[], help='Arguments to pass to docker.') def Run(self, args): """Executes the given docker command, after refreshing our credentials. Args: args: An argparse.Namespace that contains the values for the arguments specified in the .Args() method. Raises: exceptions.ExitCodeNoError: The docker command execution failed. """ force_refresh = True for server in args.server: if server not in _DEFAULT_REGISTRIES: log.warn('Authenticating to a non-default server: {server}.'.format( server=server)) docker.UpdateDockerCredentials(server, refresh=force_refresh) # Only force a refresh for the first server we authorize force_refresh = False if args.authorize_only: # NOTE: We don't know at this point how long the access token we have # placed in the docker configuration will last. More information needs # to be exposed from all credential kinds in order for us to have an # accurate awareness of lifetime here. log.err.Print('Short-lived access for {server} configured.'.format( server=args.server)) return # TODO(user): reconcile with the 'gcloud app' docker stuff, # which should be using a gcloud config property. docker_args = (args.docker_args if not args.docker_host else ['-H', args.docker_host] + args.docker_args) result = docker.Execute(docker_args) # Explicitly avoid displaying an error message that might # distract from the docker error message already displayed. if result: raise exceptions.ExitCodeNoError(exit_code=result) return
KaranToor/MA450
google-cloud-sdk/lib/surface/docker.py
Python
apache-2.0
5,178
[ "VisIt" ]
b17e45c0479e83a278d39839fbffd09e92e052eb35fbb831b3a9ec9894335f7e
import sys import os.path #sys.path.insert(0, '/home/andy/theano/tool_examples/theano-lstm-0.0.15') from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss from utilities import * import dill import argparse #import cPickle import pickle import numpy from collections import OrderedDict import theano, theano.tensor as T import turing_model from theano_toolkit.parameters import Parameters from theano.compile.nanguardmode import NanGuardMode DESCRIPTION = """ Recurrent neural network based statistical language modelling toolkit (based on LSTM algorithm) Implemented by Daniel Soutner, Department of Cybernetics, University of West Bohemia, Plzen, Czech rep. dsoutner@kky.zcu.cz, 2013 """ def parse_args(parser): parser.add_argument('--train', nargs=1, action="store", metavar="FILE", help='training file !') parser.add_argument('--valid', nargs=1, action="store", metavar="FILE", help='valid file !') parser.add_argument('--test', nargs=1, action="store", metavar="FILE", help='testing file for ppl!') parser.add_argument('--neuron-type', action="store", dest='celltype', help='type of hidden neurons, RNN/LSTM, default: RNN', type=str, default='RNN') parser.add_argument('--train-method', action="store", dest='train_method', help='training method LSTM/TURING/ALL, default: ALL', type=str, default='ALL') parser.add_argument('--projection-size', action="store", dest='n_projection', help='Number of neurons in projection layer, default: 100', type=int, default=100) parser.add_argument('--hidden-size', action="store", dest='n_hidden', help='Number of neurons in hidden layer, default: 100', type=int, default=100) parser.add_argument('--stack', action="store", dest='n_stack', help='Number of hidden neurons, default: 1 ', type=int, default=1) parser.add_argument('--learning-rate', action="store", dest='lr', help='learing rate at begining, default: 0.01 ', type=float, default=0.01) parser.add_argument('--improvement-rate', action="store", dest='improvement_rate', help='relative improvement for early stopping on ppl , default: 0.005 ', type=float, default=0.005) parser.add_argument('--minibatch-size', action="store", dest='minibatch_size', help='minibatch size for training, default: 100', type=int, default=100) parser.add_argument('--max-epoch', action="store", dest='max_epoch', help='maximum number of epoch if not early stopping, default: 1000', type=int, default=1000) parser.add_argument('--early-stop', action="store", dest='early_stop', help='1 for early-stopping, 0 for not', type=int, default=1) parser.add_argument('--save-net', action="store", dest="save_net", default=None, metavar="FILE", help="Save RNN to file") parser.add_argument('--load-net', action="store", dest="load_net", default=None, metavar="FILE", help="Load RNN from file") return parser.parse_args() def build_vocab(data_file_str): lines = [] data_file = open(data_file_str) for line in data_file: tokens = line.replace('\n','.') lines.append(tokens) data_file.close() vocab = Vocab() for line in lines: vocab.add_words(line.split(" ")) return vocab def load_data(data_file_str, vocab, data_type): lines = [] data_file = open(data_file_str) for line in data_file: tokens = line.replace('\n','.') # abandom too long sent in training set., too long sent will take too many time and decrease preformance tokens_for_count = line.replace('\n','').split(' ') if len(tokens_for_count) > 50 and data_type == 'train': continue lines.append(tokens) data_file.close() # transform into big numerical matrix of sentences: numerical_lines = [] for line in lines: numerical_lines.append(vocab(line)) numerical_lines, numerical_lengths = pad_into_matrix(numerical_lines) return numerical_lines, numerical_lengths def softmax(x): """ Wrapper for softmax, helps with pickling, and removing one extra dimension that Theano adds during its exponential normalization. """ return T.nnet.softmax(x.T) def has_hidden(layer): """ Whether a layer has a trainable initial hidden state. """ return hasattr(layer, 'initial_hidden_state') def matrixify(vector, n): return T.repeat(T.shape_padleft(vector), n, axis=0) def initial_state(layer, dimensions = None): """ Initalizes the recurrence relation with an initial hidden state if needed, else replaces with a "None" to tell Theano that the network **will** return something, but it does not need to send it to the next step of the recurrence """ if dimensions is None: return layer.initial_hidden_state if has_hidden(layer) else None else: return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None def initial_state_with_taps(layer, dimensions = None): """Optionally wrap tensor variable into a dict with taps=[-1]""" state = initial_state(layer, dimensions) if state is not None: return dict(initial=state, taps=[-1]) else: return None class Model: """ Simple predictive model for forecasting words from sequence using LSTMs. Choose how many LSTMs to stack what size their memory should be, and how many words can be predicted. """ def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM): # core layer in RNN/LSTM self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size) # add an embedding self.model.layers.insert(0, Embedding(vocab_size, input_size)) # add a classifier: self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax)) self.turing_params = Parameters() #init turing machine model self.turing_updates , self.turing_predict = turing_model.build(self.turing_params , hidden_size , vocab_size) # inputs are matrices of indices, # each row is a sentence, each column a timestep self._stop_word = theano.shared(np.int32(999999999), name="stop word") self.for_how_long = T.ivector() self.input_mat = T.imatrix() self.priming_word = T.iscalar() self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024)) # create symbolic variables for prediction: #change by darong #issue : what is greedy self.lstm_predictions = self.create_lstm_prediction() self.final_predictions = self.create_final_prediction() # create symbolic variable for greedy search: self.greedy_predictions = self.create_lstm_prediction(greedy=True) # create gradient training functions: self.create_cost_fun()#create 2 cost func(lstm final) self.lstm_lr = 0.01 self.turing_lr = 0.01 self.all_lr = 0.01 self.create_training_function()#create 3 functions(lstm turing all) self.create_predict_function()#create 2 predictions(lstm final) # create ppl self.lstm_ppl = self.create_lstm_ppl() self.final_ppl = self.create_final_ppl() self.create_ppl_function() def save(self, save_file, vocab): pickle.dump(self.model, open(save_file, "wb")) # pickle is for lambda function, cPickle cannot pickle.dump(vocab, open(save_file+'.vocab', "wb")) # pickle is for lambda function, cPickle cannot def save_turing(self, save_file): self.turing_params.save(save_file + '.turing') def load(self, load_file, lr): self.model = pickle.load(open(load_file, "rb")) if os.path.isfile(load_file + '.turing') : self.turing_params.load(load_file + '.turing') else : print "no turing model!!!! pretrain with lstm param" self.turing_params['W_input_hidden'] = self.model.layers[-1].params[0].get_value().T #not sure self.turing_params['W_read_hidden'] = self.model.layers[-1].params[0].get_value().T self.turing_params['b_hidden_0'] = self.model.layers[-1].params[1].get_value() # need to compile again for calculating predictions after loading lstm self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024)) self.lstm_predictions = self.create_lstm_prediction() self.final_predictions = self.create_final_prediction() self.greedy_predictions = self.create_lstm_prediction(greedy=True)#can change to final self.create_cost_fun()#create 2 cost func(lstm final) self.lstm_lr = lr self.turing_lr = lr#change this self.all_lr = lr self.create_training_function()#create 3 functions(lstm turing all) self.create_predict_function()#create 2 predictions(lstm final) self.lstm_ppl = self.create_lstm_ppl() self.final_ppl = self.create_final_ppl() self.create_ppl_function() # print "done compile" def stop_on(self, idx): self._stop_word.set_value(idx) @property def params(self): return self.model.params def create_lstm_prediction(self, greedy=False): def step(idx, *states): # new hiddens are the states we need to pass to LSTMs # from past. Because the StackedCells also include # the embeddings, and those have no state, we pass # a "None" instead: new_hiddens = [None] + list(states) new_states = self.model.forward(idx, prev_hiddens = new_hiddens) if greedy: new_idxes = new_states[-1] new_idx = new_idxes.argmax() # provide a stopping condition for greedy search: return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word)) else: return new_states[1:] # in sequence forecasting scenario we take everything # up to the before last step, and predict subsequent # steps ergo, 0 ... n - 1, hence: inputs = self.input_mat[:, 0:-1] num_examples = inputs.shape[0] # pass this to Theano's recurrence relation function: # choose what gets outputted at each timestep: if greedy: outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]] result, _ = theano.scan(fn=step, n_steps=200, outputs_info=outputs_info) else: outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]] result, _ = theano.scan(fn=step, sequences=[inputs.T], outputs_info=outputs_info) if greedy: return result[0] # softmaxes are the last layer of our network, # and are at the end of our results list: return result[-1].transpose((2,0,1)) # we reorder the predictions to be: # 1. what row / example # 2. what timestep # 3. softmax dimension def create_final_prediction(self, greedy=False): def step(idx, *states): # new hiddens are the states we need to pass to LSTMs # from past. Because the StackedCells also include # the embeddings, and those have no state, we pass # a "None" instead: new_hiddens = [None] + list(states) new_states = self.model.forward(idx, prev_hiddens = new_hiddens) if greedy: new_idxes = new_states[-1] new_idx = new_idxes.argmax() # provide a stopping condition for greedy search: return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word)) else: return new_states[1:] # in sequence forecasting scenario we take everything # up to the before last step, and predict subsequent # steps ergo, 0 ... n - 1, hence: inputs = self.input_mat[:, 0:-1] num_examples = inputs.shape[0] # pass this to Theano's recurrence relation function: # choose what gets outputted at each timestep: if greedy: outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]] result, _ = theano.scan(fn=step, n_steps=200, outputs_info=outputs_info) else: outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]] result, _ = theano.scan(fn=step, sequences=[inputs.T], outputs_info=outputs_info) if greedy: return result[0] # softmaxes are the last layer of our network, # and are at the end of our results list: hidden_size = result[-2].shape[2]/2 turing_result = self.turing_predict(result[-2][:,:,hidden_size:]) #the last layer do transpose before compute return turing_result.transpose((1,0,2)) # we reorder the predictions to be: # 1. what row / example # 2. what timestep # 3. softmax dimension def create_cost_fun (self): # create a cost function that # takes each prediction at every timestep # and guesses next timestep's value: what_to_predict = self.input_mat[:, 1:] # because some sentences are shorter, we # place masks where the sentences end: # (for how long is zero indexed, e.g. an example going from `[2,3)`) # has this value set 0 (here we substract by 1): for_how_long = self.for_how_long - 1 # all sentences start at T=0: starting_when = T.zeros_like(self.for_how_long) self.lstm_cost = masked_loss(self.lstm_predictions, what_to_predict, for_how_long, starting_when).sum() self.final_cost = masked_loss(self.final_predictions, what_to_predict, for_how_long, starting_when).sum() def create_predict_function(self): self.lstm_pred_fun = theano.function( inputs=[self.input_mat], outputs=self.lstm_predictions, allow_input_downcast=True ) self.final_pred_fun = theano.function( inputs=[self.input_mat], outputs=self.final_predictions, allow_input_downcast=True ) self.greedy_fun = theano.function( inputs=[self.priming_word], outputs=T.concatenate([T.shape_padleft(self.priming_word), self.greedy_predictions]), allow_input_downcast=True ) def create_training_function(self): updates, _, _, _, _ = create_optimization_updates(self.lstm_cost, self.params, method="SGD", lr=self.lstm_lr) # updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr) self.lstm_update_fun = theano.function( inputs=[self.input_mat, self.for_how_long], outputs=self.lstm_cost, updates=updates, allow_input_downcast=True) updates_turing = self.turing_updates(self.final_cost , lr=self.turing_lr) # updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr) self.turing_update_fun = theano.function( inputs=[self.input_mat, self.for_how_long], outputs=self.final_cost, updates=updates_turing, mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True), allow_input_downcast=True) all_updates_lstm, _, _, _, _ = create_optimization_updates(self.final_cost, self.params, method="SGD", lr=self.all_lr,part=True) all_updates_turing_temp = self.turing_updates(self.final_cost , lr=self.all_lr) updates_all = all_updates_lstm for pair in all_updates_turing_temp : updates_all[pair[0]] = pair[1] self.all_update_fun = theano.function( inputs=[self.input_mat, self.for_how_long], outputs=self.final_cost, updates=updates_all, allow_input_downcast=True) def create_lstm_ppl(self): def timestep(predictions, label, len_example, total_len_example): label_binary = T.gt(label[0:len_example-1], 0) oov_count = T.shape(label_binary)[0] - T.sum(label_binary) a = total_len_example return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count result, _ = theano.scan(fn=timestep, sequences=[ self.lstm_predictions, self.input_mat[:, 1:], self.for_how_long ], non_sequences=T.sum(self.for_how_long)) oov_count_total = T.sum(result[1]) return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX) def create_final_ppl(self): def timestep(predictions, label, len_example, total_len_example): label_binary = T.gt(label[0:len_example-1], 0) oov_count = T.shape(label_binary)[0] - T.sum(label_binary) a = total_len_example return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count result, _ = theano.scan(fn=timestep, sequences=[ self.final_predictions, self.input_mat[:, 1:], self.for_how_long ], non_sequences=T.sum(self.for_how_long)) oov_count_total = T.sum(result[1]) return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX) def create_ppl_function(self): self.lstm_ppl_fun = theano.function( inputs=[self.input_mat, self.for_how_long], outputs=self.lstm_ppl, allow_input_downcast=True) self.final_ppl_fun = theano.function( inputs=[self.input_mat, self.for_how_long], outputs=self.final_ppl, allow_input_downcast=True) def __call__(self, x): return self.pred_fun(x)#any problem?? def get_minibatch(full_data, full_lengths, minibatch_size, minibatch_idx): lengths = [] for j in range(minibatch_size): lengths.append(full_lengths[minibatch_size * minibatch_idx + j]) width = max(full_lengths) # width = max(full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :]) height = minibatch_size minibatch_data = np.empty([height, width], dtype=theano.config.floatX) minibatch_data = full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :] return minibatch_data, lengths def training(args, vocab, train_data, train_lengths, valid_data, valid_lengths): # training information print 'training information' print '-------------------------------------------------------' print 'method: %s' % args.train_method print 'vocab size: %d' % len(vocab) print 'sentences in training file: %d' % len(train_lengths) print 'max length in training file: %d' % max(train_lengths) print 'train file: %s' % args.train[0] print 'valid file: %s' % args.valid[0] print 'type: %s' % args.celltype print 'project: %d' % args.n_projection print 'hidden: %d' % args.n_hidden print 'stack: %d' % args.n_stack print 'learning rate: %f' % args.lr print 'minibatch size: %d' % args.minibatch_size print 'max epoch: %d' % args.max_epoch print 'improvement rate: %f' % args.improvement_rate print 'save file: %s' % args.save_net print 'load_model: %s' % args.load_net print 'early-stop: %r' % args.early_stop print '-------------------------------------------------------' if args.celltype == 'LSTM': celltype = LSTM elif args.celltype == 'RNN': celltype = RNN print 'start initializing model' # construct model & theano functions: model = Model( input_size=args.n_projection, hidden_size=args.n_hidden, vocab_size=len(vocab), stack_size=args.n_stack, # make this bigger, but makes compilation slow celltype=celltype # use RNN or LSTM ) if args.lr : model.lstm_lr = args.lr model.turing_lr = args.lr model.all_lr = args.lr model.stop_on(vocab.word2index["."]) if args.load_net : if args.lr : model.load(args.load_net, args.lr)# 0 is useless else : model.load(args.load_net, 0) # train: #select correct train and prediction method according to train_method(LSTM/TURING/ALL) if args.train_method == 'LSTM' : update_fun = model.lstm_update_fun ppl_fun = model.lstm_ppl_fun lr = model.lstm_lr print 'update lstm learning rate : %f' % model.lstm_lr elif args.train_method == 'TURING' : update_fun = model.turing_update_fun ppl_fun = model.final_ppl_fun lr = model.turing_lr print 'update turing learning rate : %f' % model.turing_lr else : update_fun = model.all_update_fun ppl_fun = model.final_ppl_fun lr = model.all_lr print 'update all learning rate : %f' % model.all_lr stop_count = 0 # for stop training change_count = 0 # for change learning rate print 'start training' min_valid_ppl = float('inf') for epoch in range(args.max_epoch): print "\nepoch %d" % epoch # minibatch part minibatch_size = args.minibatch_size # how many examples in a minibatch n_train_batches = len(train_lengths)/minibatch_size train_ppl = 0 for minibatch_idx in range(n_train_batches): minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, minibatch_size, minibatch_idx) error = update_fun(minibatch_train_data , list(lengths) ) minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths)) train_ppl = train_ppl + minibatch_train_ppl * sum(lengths) sys.stdout.write( '\n%d minibatch idx / %d total minibatch, ppl: %f '% (minibatch_idx+1, n_train_batches, minibatch_train_ppl) ) sys.stdout.flush() # important # rest minibatch if exits if (minibatch_idx + 1) * minibatch_size != len(train_lengths): minibatch_idx = minibatch_idx + 1 n_rest_example = len(train_lengths) - minibatch_size * minibatch_idx minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, n_rest_example, minibatch_idx) error = update_fun(minibatch_train_data , list(lengths) ) minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths)) train_ppl = train_ppl + minibatch_train_ppl * sum(lengths) train_ppl = train_ppl / sum(train_lengths) # print 'done training' # valid ppl minibatch_size = min(20, len(valid_lengths)) valid_ppl = 0 n_valid_batches = len(valid_lengths)/minibatch_size for minibatch_idx in range(n_valid_batches): minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, minibatch_size, minibatch_idx) minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths)) valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths) # last minibatch if (minibatch_idx + 1) * minibatch_size != len(valid_lengths): minibatch_idx = minibatch_idx + 1 n_rest_example = len(valid_lengths) - minibatch_size * minibatch_idx minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, n_rest_example, minibatch_idx) minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths)) valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths) valid_ppl = valid_ppl / sum(valid_lengths) print "\ntrain ppl: %f, valid ppl: %f" % (train_ppl, valid_ppl) if valid_ppl < min_valid_ppl: min_valid_ppl = valid_ppl model.save(args.save_net, vocab) if args.train_method != 'LSTM' : model.save_turing(args.save_net) stop_count = 0 change_count = 0 print "save best model" continue if args.early_stop: if (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate: if stop_count > 2 or lr < 1e-6: print 'stop training' break stop_count = stop_count + 1 elif (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate * 0.5: # if change_count > 2: print 'change learning rate from %f to %f' % (lr, lr/2) model.lstm_lr = model.lstm_lr / 2. model.turing_lr = model.turing_lr / 2. model.all_lr = model.all_lr / 2. if args.train_method == 'LSTM' : lr = model.lstm_lr elif args.train_method == 'TURING' : lr = model.turing_lr else : lr = model.all_lr # change_count = change_count + 1 def testing(args, test_data, test_lengths): model_load = Model( input_size=1, hidden_size=1, vocab_size=1, stack_size=1, # make this bigger, but makes compilation slow celltype=RNN # use RNN or LSTM ) model_load.stop_on(vocab.word2index["."]) if args.train_method != 'LSTM' : if not os.path.isfile(args.load_net + '.turing') : print "there is no trained turing file so we can't test by turing model!!" sys.exit() model_load.load(args.load_net, 0) # test ppl #select correct train and prediction method according to train_method(LSTM/TURING/ALL) if args.train_method == 'LSTM' : ppl_fun = model_load.lstm_ppl_fun else : ppl_fun = model_load.final_ppl_fun minibatch_size = 1 n_test_batches = len(test_lengths) for minibatch_idx in range(n_test_batches): minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, minibatch_size, minibatch_idx) minibatch_test_ppl = ppl_fun(minibatch_test_data, list(lengths)) print minibatch_test_ppl if __name__ == "__main__": parser = argparse.ArgumentParser(description=DESCRIPTION) args = parse_args(parser) # if no args are passed if len(sys.argv) == 1: parser.print_help() sys.exit() if args.train: vocab = build_vocab(args.train[0]) train_data, train_lengths = load_data(args.train[0], vocab, 'train') valid_data, valid_lengths = load_data(args.valid[0], vocab, 'valid') training(args, vocab, train_data, train_lengths, valid_data, valid_lengths) elif args.test: vocab = pickle.load(open(args.load_net+'.vocab', "rb")) test_data, test_lengths = load_data(args.test[0], vocab, 'test') testing(args, test_data, test_lengths)
darongliu/Lstm_Turing_LM
lstm-neural-turing-machines-lm/analysis/v1-one-weight/lm_v4.py
Python
mit
24,931
[ "NEURON" ]
0da1cdb81ca80d903c129278409e9ed254fe8b216c912da66285f7543edbc535
# rc19.py --- # # Filename: rc19.py # Description: # Author: Subhasis Ray # Maintainer: # Created: Sat May 24 14:10:22 2014 (+0530) # Version: # Last-Updated: # By: # Update #: 0 # URL: # Keywords: # Compatibility: # # # Commentary: # # # # # Change log: # # # # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, Fifth # Floor, Boston, MA 02110-1301, USA. # # # Code: """Cell morphology and passive properties from Branco et al 2010.""" __author__ = 'Subhasis Ray' import sys sys.path.append('/home/subha/src/moose_async13/python') import moose from moose import utils as mutils from synapse import * from matplotlib import pyplot as plt import numpy as np from settings import * from nachannel import * from kchannel import * from cachannel import * from hchannel import * # locations of the synapses # synapse_locations = [#(9, 2), # this one seems garbage : loc should be within 0-1 # (13, 1), # (13, 0.875), # (13, 0.75), # (13, 0.625), # (13, 0.5), # (13, 0.375), # (13, 0.25), # (13, 0.125), # (13, 0)] # This has been generated and edited from dumping the locations in the # neuron model synloc = [ 'dend_13_0', 'dend_13_1', 'dend_13_2', 'dend_13_3', 'dend_13_4', 'dend_13_5', 'dend_13_6', 'dend_13_7', 'dend_13_8',] # synapse stimulation order # stim_order = [# [2, 9], # What is this? # [0, 1, 2, 3, 4, 5, 6, 7, 8], # [8, 7, 6, 5, 4, 3, 2, 1, 0]] stim_order = [[8]] color = {0: 'darkcyan', 1: 'darkmagenta'} library = moose.Neutral('/library') model_container = moose.Neutral('/model') data_container = moose.Neutral('/data') simdt = 5e-6 def make_prototype(passive=True): path = '%s/rc19' % (library.path) pfile = 'rc19.p' try: return moose.element(path) except ValueError: pass if not passive: make_na() make_kv() make_km() make_kca() make_cat() make_cahva() make_h() try: proto = moose.element(path) except ValueError: print 'Loading model %s to %s' % (pfile, path) proto = moose.loadModel(pfile, path, 'ee') # hsolve is not functional yet for comp in proto[0].children: comp.initVm = -75e-3 for chan in moose.wildcardFind('%s/##[ISA=HHChannel]'): chan.Gbar *= tadj return proto def setup_model(model_path, synapse_locations, passive=False, solver='hsolve'): """Set up a single cell model under `model_path` with synapses created in the compartments listed in `synapse_locations`. `model_path` - location where the model should be created. `synapse_locations`: compartment names for the synapses. """ cell = moose.copy(make_prototype(passive), model_path) if solver.lower() == 'hsolve': hsolve = moose.HSolve( '%s/solve' % (cell.path)) hsolve.dt = simdt hsolve.target = cell.path syninfo_list = [] for compname in synapse_locations: comppath = '%s/%s' % (cell.path, compname) print '1111 Creating synapse in', comppath compartment = moose.element(comppath) syninfo = make_synapse(compartment) syninfo_list.append(syninfo) # connect pulse stimulus stim_path = '%s/%s/stim' % (cell.path, compname) print '2222 Creating stimuls in', stim_path stim = moose.PulseGen(stim_path) moose.connect(stim, 'output', syninfo['spike'], 'Vm') syninfo['stimulus'] = stim return {'neuron': cell, 'syninfo': syninfo_list} def setup_recording(data_path, neuron, syninfo_list): """Record Vm from soma and synaptic conductances from synapses in syninfo_list """ neuron_path = neuron.path data_container = moose.Neutral(data_path) soma_vm = moose.Table('%s/Vm_soma' % (data_path)) soma_path = '%s/soma_1' % (neuron_path) print '5555 Soma path', soma_path soma = moose.element(soma_path) moose.connect(soma_vm, 'requestOut', soma, 'getVm') ampa_data = moose.Neutral('%s/G_AMPA' % (data_path)) nmda_data = moose.Neutral('%s/G_NMDA' % (data_path)) ampa_gk = [] nmda_gk = [] # Record synaptic conductances for syninfo in syninfo_list: compname = syninfo['spike'].parent.name tab = moose.Table('%s/Gk_nmda_%s' % (nmda_data.path, compname)) moose.connect(tab, 'requestOut', syninfo['nmda'], 'getGk') nmda_gk.append(tab) tab = moose.Table('%s/Gk_ampa_%s' % (ampa_data.path, compname)) moose.connect(tab, 'requestOut', syninfo['ampa'], 'getGk') ampa_gk.append(tab) return {'ampa_gk': ampa_gk, 'nmda_gk': nmda_gk, 'soma_vm': soma_vm, 'data': data_container} def setup_experiment(name, stim_order, onset, interval, passive=False, solver='hsolve'): """Setup an experiment with specified stimulation order. `stim_order` is a series of integers specifying the compartment numbers along dendritic branch dend_13. `onset` is time of onset of stimulation protocol. `inteval` is the interval between stimulation time of successive synapses.""" model_container = moose.Neutral('/model/%s' % (name)) model_info = setup_model(model_container.path, synloc, passive=passive, solver=solver) data_container = moose.Neutral('/data/%s' % (name)) data_info = setup_recording(data_container.path, model_info['neuron'], model_info['syninfo']) for ii, dend_no in enumerate(stim_order): comp_path = '%s/%s' % (model_info['neuron'].path, synloc[dend_no]) stim = moose.PulseGen('%s/stim' % (comp_path)) stim.delay[0] = onset + ii * interval stim.width[0] = 1e9 # The spike generator is edge triggered. A single level change will suffice. stim.level[0] = 1.0 print 'Experiment %s has been setup.' % (name) print 'Stimulus order:', [synloc[ii] for ii in stim_order] print 'Stimulus onset:', onset print 'Inter stimulus interval:', interval return (data_info, model_info) tstop = 200e-3 tonset = 50e-3 intervals = [1e-3]#[ii * 1e-3 for ii in range(1,11)] def run_sim_parallel(passive=True, solver='hsolve'): data_info_list = [] model_info_list = [] for jj, ti in enumerate(intervals): for ii, st in enumerate(stim_order): experiment_name = 'expt_%d_%d' % (jj, ii) dinfo, minfo = setup_experiment(experiment_name, st, tonset, ti, passive=passive, solver=solver) data_info_list.append(dinfo) model_info_list.append(minfo) mutils.setDefaultDt(elecdt=simdt) mutils.assignDefaultTicks() moose.reinit() moose.start(tstop) print '$$$$$$$$$$$', moose.element('/clock' ).currentTime fig = plt.figure() axes_vm = fig.add_subplot(111) # axes_vm_out = fig.add_subplot(121) # axes_vm_in = fig.add_subplot(122, sharex=axes_vm_out, sharey=axes_vm_out) ################ # axes_vm = fig.add_subplot(311) # axes_nmda = fig.add_subplot(312) # axes_ampa = fig.add_subplot(313) for jj, ti in enumerate(intervals): for ii, st in enumerate(stim_order): dinfo = data_info_list[jj * len(stim_order) + ii] print 'Interval=', ti, 'Stim order=', st print 'dinfo:', dinfo print dinfo['soma_vm'] print dinfo['soma_vm'].vector v = dinfo['soma_vm'].vector t = np.linspace(0, tstop, len(v)) print 'num points=', len(t), 't0=', t[0], 't_last=', t[-1], 'v0=', v[0], 'v_last=', v[-1] axes_vm.plot(t, v) # if ii % 2 == 0: # axes_vm_in.plot(t, # v, # color=color[ii]) # else: # axes_vm_out.plot(t, # v, # color=color[ii]) # for tab in dinfo['nmda_gk']: # axes_nmda.plot(np.linspace(0, tstop, len(tab.vector)), # tab.vector, color=color[ii]) # # axes_nmda.legend() # for tab in dinfo['ampa_gk']: # axes_ampa.plot(np.linspace(0, tstop, len(tab.vector)), # tab.vector, label='%s/%s' % (dinfo['data'].name, tab.name), color=color[ii]) # axes_vm.legend([plt.Line2D([0], [0], color=color[ii]) for ii in range(len(stim_order))], # [str(st) for st in stim_order]) #axes_vm.legend() #axes_nmda.legend() #axes_ampa.legend() plt.show() if __name__ == '__main__': if len(sys.argv) > 1: passive = sys.argv[1].lower() == 'passive' else: passive = False if len(sys.argv) > 2: solver = sys.argv[2].lower() else: solver = 'hsolve' print 'running simulation using: model with solver %s. Model is passive? %s' % (solver, passive) run_sim_parallel(passive=passive, solver=solver) # # rc19.py ends here
dilawar/moose-full
moose-core/tests/python/mus/rc19.py
Python
gpl-2.0
9,829
[ "MOOSE", "NEURON" ]
6fc5de66b9a833c1c056908ed9b6c74b6d5bd321d91bba43c5e8cc541726f2d1
import wx import wx.html import os import sys import webbrowser import warnings import wx.lib.agw.persist as PM if getattr(sys, 'frozen', False): sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))))) elif __file__: sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) from pyverilog.utils.verror import * from pyverilog_toolbox.verify_tool.regmap_analyzer import * from pyverilog_toolbox.verify_tool.combloop_finder import * from pyverilog_toolbox.verify_tool.bindlibrary import * from pyverilog_toolbox.verify_tool.cnt_analyzer import * from pyverilog_toolbox.verify_tool.codeclone_finder import CodeCloneFinder from pyverilog_toolbox.verify_tool.unreferenced_finder import UnreferencedFinder from pyverilog_toolbox.verify_tool.metrics_calculator import MetricsCalculator from pyverilog_toolbox.verify_tool.combloop_finder import CombLoopFinder from pyverilog_toolbox.verify_tool.bindlibrary import CombLoopException from pyverilog_toolbox.verify_tool.cnt_analyzer import CntAnalyzer #from pyverilog_toolbox.gui.output_display import OutputDisplay class GuiMain(wx.Frame): debug = False def OnClose(self, event): self._persistMgr.SaveAndUnregister() self.vfile_data.dump() event.Skip() def __init__(self): wx.Frame.__init__(self,None,wx.ID_ANY,"Pyv_guitools",size=(450,550)) # initialiuze status bar self.CreateStatusBar() self.SetStatusText("") self.GetStatusBar().SetBackgroundColour(None) # initialiuze menu bar self.Bind(wx.EVT_MENU, self.selectMenu) self.SetMenuBar(Menu()) # build body self.commands = ("exec dataflow analyzer", "exec controlflow analyzer", "calculate code metrics", "find combinational loop", "find unused variables", "find code clone", "analyze counter", "analyze register map") root_panel = wx.Panel(self,wx.ID_ANY) root_layout = wx.BoxSizer(wx.VERTICAL) root_layout.Add(wx.StaticText(root_panel, wx.ID_ANY, "TOP MODULE NAME:"), border=5) self.top_name_panel = TextPanel(root_panel) root_layout.Add(self.top_name_panel, 0, wx.GROW|wx.ALL, border=5) filebutton_panel = CommandButtonPanel(root_panel, "Verilog file select", self.click_fs_button) self.selected_file_panel = wx.StaticText(root_panel, wx.ID_ANY, "") root_layout.Add(filebutton_panel, 0, wx.GROW|wx.ALL,border=5) root_layout.Add(wx.StaticText(root_panel, wx.ID_ANY, "Selecting verilog file:"), border=5) root_layout.Add(self.selected_file_panel, border=5) self.radiobutton_panel = RadioPanel(root_panel, self.commands) root_layout.Add(self.radiobutton_panel, 0, wx.GROW|wx.ALL, border=10) exebutton_panel = CommandButtonPanel(root_panel, "EXECUTE!", self.click_exe_button) root_layout.Add(exebutton_panel, 0, wx.GROW|wx.LEFT|wx.RIGHT, border=5) root_panel.SetSizer(root_layout) root_layout.Fit(root_panel) self.dirname = '' #for persistence self.SetName('gui_main.dump') self.Bind(wx.EVT_CLOSE, self.OnClose) self._persistMgr = PM.PersistenceManager.Get() wx.CallAfter(self.RegisterControls) self.vfile_data = self.f_data() def RegisterControls(self): self.Freeze() self.Register() self.Thaw() def Register(self, children=None): if children is None: self._persistMgr.RegisterAndRestore(self) children = self.GetChildren() for child in children: with warnings.catch_warnings(): warnings.simplefilter("ignore") name = child.GetName() if name not in PM.BAD_DEFAULT_NAMES and 'widget' not in name and \ 'wxSpinButton' not in name: self._persistMgr.RegisterAndRestore(child) if child.GetChildren(): self.Register(child.GetChildren()) def click_fs_button(self, event): f_dlg = wx.FileDialog(self, "Select verilog file(s)", self.dirname, "", "*.*", wx.FD_MULTIPLE) self.SetStatusText("Selecting verilog file(s)...") if f_dlg.ShowModal() == wx.ID_OK: self.vfile_data.set_files(f_dlg.GetFilenames(), f_dlg.GetDirectory(), self.selected_file_panel) self.SetStatusText("") f_dlg.Destroy() def selectMenu(self, event): if event.GetId() == wx.ID_ABOUT: webbrowser.open('https://github.com/fukatani/Pyverilog_toolbox/blob/master/Readme.md') elif event.GetId() == wx.ID_EXIT: self.Destroy() def click_exe_button(self, event): now_command = self.radiobutton_panel.get_selected_item() if self.debug: print(now_command) if not hasattr(self.vfile_data, 'selected_vfiles'): self.ShowErrorMessage('Please select verilog files before execution.') return log_file_name = 'log.html' self.SetStatusText("Analyzing...") try: if now_command == 'exec dataflow analyzer': df = dataflow_facade(self.vfile_data.selected_full_path, topmodule=self.top_name_panel.get_text()) df.html_name = log_file_name df.print_dataflow() elif now_command == 'exec controlflow analyzer': df = dataflow_facade(self.vfile_data.selected_full_path, topmodule=self.top_name_panel.get_text()) df.html_name = log_file_name df.print_controlflow() elif now_command == 'calculate code metrics': mc = MetricsCalculator(self.vfile_data.selected_full_path, topmodule=self.top_name_panel.get_text()) mc.html_name = log_file_name mc.synth_profile() mc.show() elif now_command == 'find combinational loop': cf = CombLoopFinder(self.vfile_data.selected_full_path, topmodule=self.top_name_panel.get_text()) cf.html_name = log_file_name cf.search_combloop() elif now_command == 'find unused variables': uf = UnreferencedFinder(self.vfile_data.selected_full_path, topmodule=self.top_name_panel.get_text()) uf.html_name = log_file_name uf.search_unreferenced() elif now_command == 'find code clone': cf = CodeCloneFinder(self.vfile_data.selected_full_path, topmodule=self.top_name_panel.get_text()) cf.html_name = log_file_name cf.show() elif now_command == 'analyze counter': ca = CntAnalyzer(self.vfile_data.selected_full_path, topmodule=self.top_name_panel.get_text()) ca.html_name = log_file_name ca.show() elif now_command == "analyze register map": RegMapConfig(self.vfile_data.selected_full_path, topmodule=self.top_name_panel.get_text()).Show() return else: self.ShowErrorMessage('unimplemented function') return OutputDisplay(log_file_name).Show() self.SetStatusText("") except (DefinitionError, FormatError, ImplementationError, CombLoopException) as e: self.ShowErrorMessage(e.message) except IOError as e: if e.filename == 'preprocess.output': self.ShowErrorMessage(e.filename + 'is not found.' + '\n(Please make sure Icarus verilog is installed)') else: self.ShowErrorMessage(e.filename + 'is not found.') def ShowErrorMessage(self, message): wx.MessageBox(message, 'Error!', wx.ICON_ERROR) class f_data(object): """ [CLASSES] Selected verilog file data. Registerd by pickle. """ def __init__(self): self.dump_enable = False if self.dump_enable: try: with open("pyv.dump", "r") as f: (self.selected_vfiles, self.selected_full_path) = pickle.load(f) #if hasattr(self, 'selected_full_path'): except (IOError, EOFError): pass def __get_state__(self): return self.selected_vfiles, self.selected_full_path def set_label(self, file_panel): if len(self.selected_vfiles) > 1: file_panel.SetLabel(self.selected_vfiles[0] + ', ...') else: file_panel.SetLabel(self.selected_vfiles[0]) def set_files(self, filenames, directory, file_panel): self.selected_vfiles = filenames self.selected_full_path = [directory + "\\" + vfile for vfile in self.selected_vfiles] self.set_label(file_panel) def dump(self): if self.dump_enable: with open("pyv.dump", "w") as f: pickle.dump(self, f) class Menu(wx.MenuBar): def __init__(self): wx.MenuBar.__init__(self) menu_menu = wx.Menu() menu_menu.Append(wx.ID_ABOUT,"display usage(visit online github page)","https://github.com/fukatani/Pyverilog_toolbox") menu_menu.Append(wx.ID_EXIT,"exit","exit pyv_gui") self.Append(menu_menu,"menu") class TextPanel(wx.Panel): def __init__(self, parent, initial="TOP"): wx.Panel.__init__(self,parent, wx.ID_ANY) self.disp_text = wx.TextCtrl(self, wx.ID_ANY, initial, style=wx.TE_RIGHT) self.disp_text.SetName(initial + ".dump") layout = wx.BoxSizer(wx.HORIZONTAL) layout.Add(self.disp_text, 1) self.SetSizer(layout) def get_text(self): return self.disp_text.GetValue() class CommandButtonPanel(wx.Panel): def __init__(self, parent, disp_text, click_event): wx.Panel.__init__(self, parent, wx.ID_ANY) button = wx.Button(self, wx.ID_ANY, disp_text) button.Bind(wx.EVT_BUTTON, click_event) layout = wx.BoxSizer(wx.HORIZONTAL) layout.Add(button,flag=wx.GROW) self.SetSizer(layout) class RadioPanel(wx.Panel): def __init__(self, parent, button_array): wx.Panel.__init__(self,parent,wx.ID_ANY) self.radiobox = wx.RadioBox(self, wx.ID_ANY, choices=button_array, style=wx.RA_VERTICAL) self.radiobox.SetName("command_select.dump") layout = wx.BoxSizer(wx.VERTICAL) layout.Add(self.radiobox, 1, flag=wx.GROW) self.SetSizer(layout) def get_selected_item(self): return self.radiobox.GetStringSelection() class RegMapConfig(wx.Frame): def __init__(self, full_path, topmodule): wx.Frame.__init__(self,None,wx.ID_ANY,"Analyze register map",size=(300,400)) self.full_path = full_path self.topmodule = topmodule self.__persistMgr = PM.PersistenceManager.Get() root_panel = wx.Panel(self,wx.ID_ANY) root_layout = wx.BoxSizer(wx.VERTICAL) root_layout.Add(wx.StaticText(root_panel, wx.ID_ANY, "WRITE FLAG SIGNAL:"), border=5) self.write_flag_panel = TextPanel(root_panel, "TOP.WRITE") root_layout.Add(self.write_flag_panel, 0, wx.GROW|wx.ALL, border=5) ## root_layout.Add(wx.StaticText(root_panel, wx.ID_ANY, "READ FLAG SIGNAL:"), border=5) ## self.read_flag_panel = TextPanel(root_panel, "TOP.READ") ## root_layout.Add(self.read_flag_panel, 0, wx.GROW|wx.ALL, border=5) root_layout.Add(wx.StaticText(root_panel, wx.ID_ANY, "ADDRESS SIGNAL"), border=5) self.address_panel = TextPanel(root_panel, "TOP.ADR") root_layout.Add(self.address_panel, 0, wx.GROW|wx.ALL, border=5) root_layout.Add(wx.StaticText(root_panel, wx.ID_ANY, "WRITE DATA SIGNAL"), border=5) self.write_data_panel = TextPanel(root_panel, "TOP.W_DATA") root_layout.Add(self.write_data_panel, 0, wx.GROW|wx.ALL, border=5) root_layout.Add(wx.StaticText(root_panel, wx.ID_ANY, "READ DATA SIGNAL"), border=5) self.read_data_panel = TextPanel(root_panel, "TOP.R_DATA") root_layout.Add(self.read_data_panel, 0, wx.GROW|wx.ALL, border=5) exebutton_panel = CommandButtonPanel(root_panel, "EXECUTE!", self.click_exe_button) root_layout.Add(exebutton_panel, 0, wx.GROW|wx.LEFT|wx.RIGHT, border=5) root_panel.SetSizer(root_layout) root_layout.Fit(root_panel) #for persistence self.SetName('regmap_config.dump') self.Bind(wx.EVT_CLOSE, self.OnClose) self._persistMgr = PM.PersistenceManager.Get() wx.CallAfter(self.RegisterControls) def click_exe_button(self, event): with open("setup.txt", "w") as setup_file: setup_file.write("READ_FLAG:" + "None" + "\n") setup_file.write("WRITE_FLAG:" + self.write_flag_panel.get_text() + "\n") setup_file.write("ADDRESS:" + self.address_panel.get_text() + "\n") setup_file.write("WRITE_DATA:" + self.write_data_panel.get_text() + "\n") setup_file.write("READ_DATA:" + self.read_data_panel.get_text() + "\n") ra = RegMapAnalyzer(self.full_path, "setup.txt", self.topmodule, "out.csv") ra.getRegMaps() ra.csv2html("out.csv") OutputDisplay("log.html").Show() def RegisterControls(self): self.Freeze() self.Register() self.Thaw() def Register(self, children=None): if children is None: self._persistMgr.RegisterAndRestore(self) children = self.GetChildren() for child in children: with warnings.catch_warnings(): warnings.simplefilter("ignore") name = child.GetName() if name not in PM.BAD_DEFAULT_NAMES and 'widget' not in name and \ 'wxSpinButton' not in name: self._persistMgr.RegisterAndRestore(child) if child.GetChildren(): self.Register(child.GetChildren()) def OnClose(self, event): self._persistMgr.SaveAndUnregister() event.Skip() class OutputDisplay(wx.Frame): def __init__(self, log_file_name): wx.Frame.__init__(self,None,wx.ID_ANY,"Output report",size=(900,700)) log = open(log_file_name, 'r') log_disp_panel = wx.html.HtmlWindow(self) if "gtk2" in wx.PlatformInfo: log_disp_panel.SetStandardFonts() log_disp_panel.SetPage("".join(log.readlines())) if __name__ == "__main__": build_flag = False application = wx.App(redirect=build_flag)#false in debugging frame = GuiMain() frame.Show() application.MainLoop()
fukatani/Pyverilog_toolbox
pyverilog_toolbox/gui/gui_main.py
Python
apache-2.0
14,752
[ "VisIt" ]
266a48885d9f83e8cf3b0805f91be0fc16fd0881f8d397753082ca79d882be05
import json import os import re import time import ast from . import python_minifier class Visitor(ast.NodeVisitor): """Used to list all the modules imported by a script.""" def __init__(self, lib_path, package): self.imports = set() self.lib_path = lib_path self.package = package def visit_Import(self, node): for alias in node.names: self.imports.add(alias.name) def visit_ImportFrom(self, node): if node.level > 0: package = self.package[:] level = node.level - 1 while level: package.pop() level -= 1 module = ".".join(package) if node.module: module += "." + node.module else: module = node.module self.imports.add(module) for alias in node.names: if alias.name == "*": continue else: # Only keep "from X import Y" if X.Y is a module, not if Y # is a variable defined in X path = os.path.join(self.lib_path, *module.split("."), alias.name + ".py") if os.path.exists(path): self.imports.add(module + "." + alias.name) def make(package_name, package_path, exclude_dirs=None): if not package_name: raise ValueError("package name is not specified") print("Generating package {}".format(package_name)) VFS = {"$timestamp": int(1000 * time.time())} has_init = os.path.exists(os.path.join(package_path, "__init__.py")) nb = 0 if exclude_dirs is None: exclude_dirs = [] for dirpath, dirnames, filenames in os.walk(package_path): flag = False root_elts = dirpath.split(os.sep) for exclude in exclude_dirs: if exclude in root_elts: continue if '__pycache__' in dirnames: dirnames.remove("__pycache__") if dirpath == package_path: package = [] else: package = dirpath[len(package_path) + 1:].split(os.sep) if has_init: package.insert(0, package_name) for filename in filenames: name, ext = os.path.splitext(filename) if ext != '.py': continue is_package = name.endswith('__init__') if is_package: mod_name = '.'.join(package) else: mod_name = '.'.join(package + [name]) nb += 1 absname = os.path.join(dirpath, filename) with open(absname, encoding='utf-8') as f: data = f.read() data = python_minifier.minify(data, preserve_lines=True) path_elts = package[:] if os.path.basename(filename) != "__init__.py": path_elts.append(os.path.basename(filename)[:-3]) fqname = ".".join(path_elts) with open(absname, encoding="utf-8") as f: tree = ast.parse(f.read()) visitor = Visitor(package_path, package) visitor.visit(tree) imports = sorted(list(visitor.imports)) if is_package: VFS[mod_name] = [ext, data, imports, 1] else: VFS[mod_name] = [ext, data, imports] print("adding {} package {}".format(mod_name, is_package)) if nb == 0: print("No Python file found in current directory") else: print('{} files'.format(nb)) with open(os.path.join(package_path, package_name + ".brython.js"), "w", encoding="utf-8") as out: out.write('__BRYTHON__.use_VFS = true;\n') out.write('var scripts = {}\n'.format(json.dumps(VFS))) out.write('__BRYTHON__.update_VFS(scripts)\n') if __name__ == "__main__": import sys package_name = sys.argv[1] if len(sys.argv) > 1 else "" src_dir = sys.argv[2] if len(sys.argv) > 2 else os.getcwd() make(package_name, src_dir)
kikocorreoso/brython
setup/brython/make_package.py
Python
bsd-3-clause
4,055
[ "VisIt" ]
8d9d49cd44fc18d10ba2c401ee0041f65bfde84a05ec4d62c13fac5e5f10c685
""" Views for the verification flow """ import datetime import decimal import json import logging import urllib import analytics from django.conf import settings from django.contrib.auth.decorators import login_required from django.contrib.staticfiles.storage import staticfiles_storage from django.core.mail import send_mail from django.db import transaction from django.http import Http404, HttpResponse, HttpResponseBadRequest from django.shortcuts import redirect from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_POST from django.views.generic.base import View from edx_rest_api_client.exceptions import SlumberBaseException from eventtracking import tracker from ipware.ip import get_ip from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from pytz import UTC from course_modes.models import CourseMode from edxmako.shortcuts import render_to_response, render_to_string from lms.djangoapps.commerce.utils import EcommerceService, is_account_activation_requirement_disabled from lms.djangoapps.verify_student.image import InvalidImageData, decode_image_data from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification, VerificationDeadline from lms.djangoapps.verify_student.services import IDVerificationService from lms.djangoapps.verify_student.ssencrypt import has_valid_signature from lms.djangoapps.verify_student.tasks import send_verification_status_email from lms.djangoapps.verify_student.utils import is_verification_expiring_soon from openedx.core.djangoapps.commerce.utils import ecommerce_api_client from openedx.core.djangoapps.embargo import api as embargo_api from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH from openedx.core.djangoapps.user_api.accounts.api import update_account_settings from openedx.core.djangoapps.user_api.errors import AccountValidationError, UserNotFound from openedx.core.lib.log_utils import audit_log from shoppingcart.models import CertificateItem, Order from shoppingcart.processors import get_purchase_endpoint, get_signed_purchase_params from student.models import CourseEnrollment from util.db import outer_atomic from util.json_request import JsonResponse from xmodule.modulestore.django import modulestore log = logging.getLogger(__name__) class PayAndVerifyView(View): """ View for the "verify and pay" flow. This view is somewhat complicated, because the user can enter it from a number of different places: * From the "choose your track" page. * After completing payment. * From the dashboard in order to complete verification. * From the dashboard in order to upgrade to a verified track. The page will display different steps and requirements depending on: * Whether the user has submitted a photo verification recently. * Whether the user has paid for the course. * How the user reached the page (mostly affects messaging) We are also super-paranoid about how users reach this page. If they somehow aren't enrolled, or the course doesn't exist, or they've unenrolled, or they've already paid/verified, ... then we try to redirect them to the page with the most appropriate messaging (including the dashboard). Note that this page does NOT handle re-verification (photo verification that was denied or had an error); that is handled by the "reverify" view. """ # Step definitions # # These represent the numbered steps a user sees in # the verify / payment flow. # # Steps can either be: # - displayed or hidden # - complete or incomplete # # For example, when a user enters the verification/payment # flow for the first time, the user will see steps # for both payment and verification. As the user # completes these steps (for example, submitting a photo) # the steps will be marked "complete". # # If a user has already verified for another course, # then the verification steps will be hidden, # since the user has already completed them. # # If a user re-enters the flow from another application # (for example, after completing payment through # a third-party payment processor), then the user # will resume the flow at an intermediate step. # INTRO_STEP = 'intro-step' MAKE_PAYMENT_STEP = 'make-payment-step' PAYMENT_CONFIRMATION_STEP = 'payment-confirmation-step' FACE_PHOTO_STEP = 'face-photo-step' ID_PHOTO_STEP = 'id-photo-step' REVIEW_PHOTOS_STEP = 'review-photos-step' ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step' ALL_STEPS = [ INTRO_STEP, MAKE_PAYMENT_STEP, PAYMENT_CONFIRMATION_STEP, FACE_PHOTO_STEP, ID_PHOTO_STEP, REVIEW_PHOTOS_STEP, ENROLLMENT_CONFIRMATION_STEP ] PAYMENT_STEPS = [ MAKE_PAYMENT_STEP, PAYMENT_CONFIRMATION_STEP ] VERIFICATION_STEPS = [ FACE_PHOTO_STEP, ID_PHOTO_STEP, REVIEW_PHOTOS_STEP, ENROLLMENT_CONFIRMATION_STEP ] # These steps can be skipped using the ?skip-first-step GET param SKIP_STEPS = [ INTRO_STEP, ] STEP_TITLES = { INTRO_STEP: ugettext_lazy("Intro"), MAKE_PAYMENT_STEP: ugettext_lazy("Make payment"), PAYMENT_CONFIRMATION_STEP: ugettext_lazy("Payment confirmation"), FACE_PHOTO_STEP: ugettext_lazy("Take photo"), ID_PHOTO_STEP: ugettext_lazy("Take a photo of your ID"), REVIEW_PHOTOS_STEP: ugettext_lazy("Review your info"), ENROLLMENT_CONFIRMATION_STEP: ugettext_lazy("Enrollment confirmation"), } # Messages # # Depending on how the user entered reached the page, # we will display different text messaging. # For example, we show users who are upgrading # slightly different copy than users who are verifying # for the first time. # FIRST_TIME_VERIFY_MSG = 'first-time-verify' VERIFY_NOW_MSG = 'verify-now' VERIFY_LATER_MSG = 'verify-later' UPGRADE_MSG = 'upgrade' PAYMENT_CONFIRMATION_MSG = 'payment-confirmation' # Requirements # # These explain to the user what he or she # will need to successfully pay and/or verify. # # These are determined by the steps displayed # to the user; for example, if the user does not # need to complete the verification steps, # then the photo ID and webcam requirements are hidden. # ACCOUNT_ACTIVATION_REQ = "account-activation-required" PHOTO_ID_REQ = "photo-id-required" WEBCAM_REQ = "webcam-required" STEP_REQUIREMENTS = { ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ], FACE_PHOTO_STEP: [WEBCAM_REQ], } # Deadline types VERIFICATION_DEADLINE = "verification" UPGRADE_DEADLINE = "upgrade" def _get_user_active_status(self, user): """ Returns the user's active status to the caller Overrides the actual value if account activation has been disabled via waffle switch Arguments: user (User): Current user involved in the onboarding/verification flow """ return user.is_active or is_account_activation_requirement_disabled() @method_decorator(login_required) def get( self, request, course_id, always_show_payment=False, current_step=None, message=FIRST_TIME_VERIFY_MSG ): """ Render the payment and verification flow. Arguments: request (HttpRequest): The request object. course_id (unicode): The ID of the course the user is trying to enroll in. Keyword Arguments: always_show_payment (bool): If True, show the payment steps even if the user has already paid. This is useful for users returning to the flow after paying. current_step (string): The current step in the flow. message (string): The messaging to display. Returns: HttpResponse Raises: Http404: The course does not exist or does not have a verified mode. """ # Parse the course key # The URL regex should guarantee that the key format is valid. course_key = CourseKey.from_string(course_id) course = modulestore().get_course(course_key) # Verify that the course exists if course is None: log.warn(u"Could not find course with ID %s.", course_id) raise Http404 # Check whether the user has access to this course # based on country access rules. redirect_url = embargo_api.redirect_if_blocked( course_key, user=request.user, ip_address=get_ip(request), url=request.path ) if redirect_url: return redirect(redirect_url) # If the verification deadline has passed # then show the user a message that he/she can't verify. # # We're making the assumptions (enforced in Django admin) that: # # 1) Only verified modes have verification deadlines. # # 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you # let someone upgrade into a verified track if they can't complete verification? # verification_deadline = VerificationDeadline.deadline_for_course(course.id) response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline) if response is not None: log.info(u"Verification deadline for '%s' has passed.", course.id) return response # Retrieve the relevant course mode for the payment/verification flow. # # WARNING: this is technical debt! A much better way to do this would be to # separate out the payment flow and use the product SKU to figure out what # the user is trying to purchase. # # Nonetheless, for the time being we continue to make the really ugly assumption # that at some point there was a paid course mode we can query for the price. relevant_course_mode = self._get_paid_mode(course_key) # If we can find a relevant course mode, then log that we're entering the flow # Otherwise, this course does not support payment/verification, so respond with a 404. if relevant_course_mode is not None: if CourseMode.is_verified_mode(relevant_course_mode): log.info( u"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.", request.user.id, course_id, current_step ) else: log.info( u"Entering payment flow for user '%s', course '%s', with current step '%s'", request.user.id, course_id, current_step ) else: # Otherwise, there has never been a verified/paid mode, # so return a page not found response. log.warn( u"No paid/verified course mode found for course '%s' for verification/payment flow request", course_id ) raise Http404 # If the user is trying to *pay* and the upgrade deadline has passed, # then they shouldn't be able to enter the flow. # # NOTE: This should match the availability dates used by the E-Commerce service # to determine whether a user can purchase a product. The idea is that if the service # won't fulfill the order, we shouldn't even let the user get into the payment flow. # user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG] if user_is_trying_to_pay: upgrade_deadline = relevant_course_mode.expiration_datetime response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline) if response is not None: log.info(u"Upgrade deadline for '%s' has passed.", course.id) return response # Check whether the user has verified, paid, and enrolled. # A user is considered "paid" if he or she has an enrollment # with a paid course mode (such as "verified"). # For this reason, every paid user is enrolled, but not # every enrolled user is paid. # If the course mode is not verified(i.e only paid) then already_verified is always True already_verified = ( self._check_already_verified(request.user) if CourseMode.is_verified_mode(relevant_course_mode) else True ) already_paid, is_enrolled = self._check_enrollment(request.user, course_key) # Redirect the user to a more appropriate page if the # messaging won't make sense based on the user's # enrollment / payment / verification status. sku_to_use = relevant_course_mode.sku purchase_workflow = request.GET.get('purchase_workflow', 'single') if purchase_workflow == 'bulk' and relevant_course_mode.bulk_sku: sku_to_use = relevant_course_mode.bulk_sku redirect_response = self._redirect_if_necessary( message, already_verified, already_paid, is_enrolled, course_key, user_is_trying_to_pay, request.user, sku_to_use ) if redirect_response is not None: return redirect_response display_steps = self._display_steps( always_show_payment, already_verified, already_paid, relevant_course_mode ) # Override the actual value if account activation has been disabled # Also see the reference to this parameter in context dictionary further down user_is_active = self._get_user_active_status(request.user) requirements = self._requirements(display_steps, user_is_active) if current_step is None: current_step = display_steps[0]['name'] # Allow the caller to skip the first page # This is useful if we want the user to be able to # use the "back" button to return to the previous step. # This parameter should only work for known skip-able steps if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS: display_step_names = [step['name'] for step in display_steps] current_step_idx = display_step_names.index(current_step) if (current_step_idx + 1) < len(display_steps): current_step = display_steps[current_step_idx + 1]['name'] courseware_url = "" if not course.start or course.start < datetime.datetime.today().replace(tzinfo=UTC): courseware_url = reverse( 'course_root', kwargs={'course_id': unicode(course_key)} ) full_name = ( request.user.profile.name if request.user.profile.name else "" ) # If the user set a contribution amount on another page, # use that amount to pre-fill the price selection form. contribution_amount = request.session.get( 'donation_for_course', {} ).get(unicode(course_key), '') # Remember whether the user is upgrading # so we can fire an analytics event upon payment. request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG) # Determine the photo verification status verification_good_until = self._verification_valid_until(request.user) # get available payment processors if relevant_course_mode.sku: # transaction will be conducted via ecommerce service processors = ecommerce_api_client(request.user).payment.processors.get() else: # transaction will be conducted using legacy shopping cart processors = [settings.CC_PROCESSOR_NAME] # Render the top-level page context = { 'contribution_amount': contribution_amount, 'course': course, 'course_key': unicode(course_key), 'checkpoint_location': request.GET.get('checkpoint'), 'course_mode': relevant_course_mode, 'courseware_url': courseware_url, 'current_step': current_step, 'disable_courseware_js': True, 'display_steps': display_steps, 'is_active': json.dumps(user_is_active), 'user_email': request.user.email, 'message_key': message, 'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME), 'processors': processors, 'requirements': requirements, 'user_full_name': full_name, 'verification_deadline': verification_deadline or "", 'already_verified': already_verified, 'verification_good_until': verification_good_until, 'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"), 'nav_hidden': True, 'is_ab_testing': 'begin-flow' in request.path, } return render_to_response("verify_student/pay_and_verify.html", context) def add_utm_params_to_url(self, url): # utm_params is [(u'utm_content', u'course-v1:IDBx IDB20.1x 1T2017'),... utm_params = [item for item in self.request.GET.items() if 'utm_' in item[0]] # utm_params is utm_content=course-v1%3AIDBx+IDB20.1x+1T2017&... utm_params = urllib.urlencode(utm_params, True) # utm_params is utm_content=course-v1:IDBx+IDB20.1x+1T2017&... # (course-keys do not have url encoding) utm_params = urllib.unquote(utm_params) if utm_params: if '?' in url: url = url + '&' + utm_params else: url = url + '?' + utm_params return url def _redirect_if_necessary( self, message, already_verified, already_paid, is_enrolled, course_key, user_is_trying_to_pay, user, sku ): """Redirect the user to a more appropriate page if necessary. In some cases, a user may visit this page with verification / enrollment / payment state that we don't anticipate. For example, a user may unenroll from the course after paying for it, then visit the "verify now" page to complete verification. When this happens, we try to redirect the user to the most appropriate page. Arguments: message (string): The messaging of the page. Should be a key in `MESSAGES`. already_verified (bool): Whether the user has submitted a verification request recently. already_paid (bool): Whether the user is enrolled in a paid course mode. is_enrolled (bool): Whether the user has an active enrollment in the course. course_key (CourseKey): The key for the course. Returns: HttpResponse or None """ url = None course_kwargs = {'course_id': unicode(course_key)} if already_verified and already_paid: # If they've already paid and verified, there's nothing else to do, # so redirect them to the dashboard. if message != self.PAYMENT_CONFIRMATION_MSG: url = reverse('dashboard') elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG, self.PAYMENT_CONFIRMATION_MSG]: if is_enrolled: # If the user is already enrolled but hasn't yet paid, # then the "upgrade" messaging is more appropriate. if not already_paid: url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs) else: # If the user is NOT enrolled, then send him/her # to the first time verification page. url = reverse('verify_student_start_flow', kwargs=course_kwargs) elif message == self.UPGRADE_MSG: if is_enrolled: if already_paid: # If the student has paid, but not verified, redirect to the verification flow. url = reverse('verify_student_verify_now', kwargs=course_kwargs) else: url = reverse('verify_student_start_flow', kwargs=course_kwargs) if user_is_trying_to_pay and self._get_user_active_status(user) and not already_paid: # If the user is trying to pay, has activated their account, and the ecommerce service # is enabled redirect him to the ecommerce checkout page. ecommerce_service = EcommerceService() if ecommerce_service.is_enabled(user): url = ecommerce_service.get_checkout_page_url( sku, catalog=self.request.GET.get('catalog') ) # Redirect if necessary, otherwise implicitly return None if url is not None: url = self.add_utm_params_to_url(url) return redirect(url) def _get_paid_mode(self, course_key): """ Retrieve the paid course mode for a course. The returned course mode may or may not be expired. Unexpired modes are preferred to expired modes. Arguments: course_key (CourseKey): The location of the course. Returns: CourseMode tuple """ # Retrieve all the modes at once to reduce the number of database queries all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key]) # Retrieve the first mode that matches the following criteria: # * Unexpired # * Price > 0 # * Not credit for mode in unexpired_modes[course_key]: if mode.min_price > 0 and not CourseMode.is_credit_mode(mode): return mode # Otherwise, find the first non credit expired paid mode for mode in all_modes[course_key]: if mode.min_price > 0 and not CourseMode.is_credit_mode(mode): return mode # Otherwise, return None and so the view knows to respond with a 404. return None def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode): """Determine which steps to display to the user. Includes all steps by default, but removes steps if the user has already completed them. Arguments: always_show_payment (bool): If True, display the payment steps even if the user has already paid. already_verified (bool): Whether the user has submitted a verification request recently. already_paid (bool): Whether the user is enrolled in a paid course mode. Returns: list """ display_steps = self.ALL_STEPS remove_steps = set() if already_verified or not CourseMode.is_verified_mode(course_mode): remove_steps |= set(self.VERIFICATION_STEPS) if already_paid and not always_show_payment: remove_steps |= set(self.PAYMENT_STEPS) else: # The "make payment" step doubles as an intro step, # so if we're showing the payment step, hide the intro step. remove_steps |= set([self.INTRO_STEP]) return [ { 'name': step, 'title': unicode(self.STEP_TITLES[step]), } for step in display_steps if step not in remove_steps ] def _requirements(self, display_steps, is_active): """Determine which requirements to show the user. For example, if the user needs to submit a photo verification, tell the user that she will need a photo ID and a webcam. Arguments: display_steps (list): The steps to display to the user. is_active (bool): If False, adds a requirement to activate the user account. Returns: dict: Keys are requirement names, values are booleans indicating whether to show the requirement. """ all_requirements = { self.ACCOUNT_ACTIVATION_REQ: not is_active, self.PHOTO_ID_REQ: False, self.WEBCAM_REQ: False, } # Remove the account activation requirement if disabled via waffle if is_account_activation_requirement_disabled(): all_requirements.pop(self.ACCOUNT_ACTIVATION_REQ) display_steps = set(step['name'] for step in display_steps) for step, step_requirements in self.STEP_REQUIREMENTS.iteritems(): if step in display_steps: for requirement in step_requirements: all_requirements[requirement] = True return all_requirements def _verification_valid_until(self, user, date_format="%m/%d/%Y"): """ Check whether the user has a valid or pending verification. Arguments: user: date_format: optional parameter for formatting datetime object to string in response Returns: datetime object in string format """ expiration_datetime = IDVerificationService.get_expiration_datetime( user, ['submitted', 'approved', 'must_retry'] ) # return 'expiration_datetime' of latest photo verification if found, # otherwise implicitly return '' if expiration_datetime: return expiration_datetime.strftime(date_format) return '' def _check_already_verified(self, user): """Check whether the user has a valid or pending verification. Note that this includes cases in which the user's verification has not been accepted (either because it hasn't been processed, or there was an error). This should return True if the user has done their part: submitted photos within the expiration period. """ return IDVerificationService.user_has_valid_or_pending(user) def _check_enrollment(self, user, course_key): """Check whether the user has an active enrollment and has paid. If a user is enrolled in a paid course mode, we assume that the user has paid. Arguments: user (User): The user to check. course_key (CourseKey): The key of the course to check. Returns: Tuple `(has_paid, is_active)` indicating whether the user has paid and whether the user has an active account. """ enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key) has_paid = False if enrollment_mode is not None and is_active: all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True) course_mode = all_modes.get(enrollment_mode) has_paid = (course_mode and course_mode.min_price > 0) return (has_paid, bool(is_active)) def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime): """ Respond with some error messaging if the deadline has passed. Arguments: course (Course): The course the user is trying to enroll in. deadline_name (str): One of the deadline constants. deadline_datetime (datetime): The deadline. Returns: HttpResponse or None """ if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]: log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name) return None deadline_passed = ( deadline_datetime is not None and deadline_datetime < datetime.datetime.now(UTC) ) if deadline_passed: context = { 'course': course, 'deadline_name': deadline_name, 'deadline': deadline_datetime } return render_to_response("verify_student/missed_deadline.html", context) def checkout_with_ecommerce_service(user, course_key, course_mode, processor): """ Create a new basket and trigger immediate checkout, using the E-Commerce API. """ course_id = unicode(course_key) try: api = ecommerce_api_client(user) # Make an API call to create the order and retrieve the results result = api.baskets.post({ 'products': [{'sku': course_mode.sku}], 'checkout': True, 'payment_processor_name': processor }) # Pass the payment parameters directly from the API response. return result.get('payment_data') except SlumberBaseException: params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id} log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params) raise finally: audit_log( 'checkout_requested', course_id=course_id, mode=course_mode.slug, processor_name=processor, user_id=user.id ) def checkout_with_shoppingcart(request, user, course_key, course_mode, amount): """ Create an order and trigger checkout using shoppingcart.""" cart = Order.get_cart_for_user(user) cart.clear() enrollment_mode = course_mode.slug CertificateItem.add_to_order(cart, course_key, amount, enrollment_mode) # Change the order's status so that we don't accidentally modify it later. # We need to do this to ensure that the parameters we send to the payment system # match what we store in the database. # (Ordinarily we would do this client-side when the user submits the form, but since # the JavaScript on this page does that immediately, we make the change here instead. # This avoids a second AJAX call and some additional complication of the JavaScript.) # If a user later re-enters the verification / payment flow, she will create a new order. cart.start_purchase() callback_url = request.build_absolute_uri( reverse("shoppingcart.views.postpay_callback") ) payment_data = { 'payment_processor_name': settings.CC_PROCESSOR_NAME, 'payment_page_url': get_purchase_endpoint(), 'payment_form_data': get_signed_purchase_params( cart, callback_url=callback_url, extra_data=[unicode(course_key), course_mode.slug] ), } return payment_data @require_POST @login_required def create_order(request): """ This endpoint is named 'create_order' for backward compatibility, but its actual use is to add a single product to the user's cart and request immediate checkout. """ course_id = request.POST['course_id'] course_id = CourseKey.from_string(course_id) donation_for_course = request.session.get('donation_for_course', {}) contribution = request.POST.get("contribution", donation_for_course.get(unicode(course_id), 0)) try: amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN) except decimal.InvalidOperation: return HttpResponseBadRequest(_("Selected price is not valid number.")) current_mode = None sku = request.POST.get('sku', None) if sku: try: current_mode = CourseMode.objects.get(sku=sku) except CourseMode.DoesNotExist: log.exception(u'Failed to find CourseMode with SKU [%s].', sku) if not current_mode: # Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes # for course exist then choose the first one paid_modes = CourseMode.paid_modes_for_course(course_id) if paid_modes: if len(paid_modes) > 1: log.warn(u"Multiple paid course modes found for course '%s' for create order request", course_id) current_mode = paid_modes[0] # Make sure this course has a paid mode if not current_mode: log.warn(u"Create order requested for course '%s' without a paid mode.", course_id) return HttpResponseBadRequest(_("This course doesn't support paid certificates")) if CourseMode.is_professional_mode(current_mode): amount = current_mode.min_price if amount < current_mode.min_price: return HttpResponseBadRequest(_("No selected price or selected price is below minimum.")) if current_mode.sku: # if request.POST doesn't contain 'processor' then the service's default payment processor will be used. payment_data = checkout_with_ecommerce_service( request.user, course_id, current_mode, request.POST.get('processor') ) else: payment_data = checkout_with_shoppingcart(request, request.user, course_id, current_mode, amount) if 'processor' not in request.POST: # (XCOM-214) To be removed after release. # the absence of this key in the POST payload indicates that the request was initiated from # a stale js client, which expects a response containing only the 'payment_form_data' part of # the payment data result. payment_data = payment_data['payment_form_data'] return HttpResponse(json.dumps(payment_data), content_type="application/json") class SubmitPhotosView(View): """ End-point for submitting photos for verification. """ @method_decorator(transaction.non_atomic_requests) def dispatch(self, request, *args, **kwargs): return super(SubmitPhotosView, self).dispatch(request, *args, **kwargs) @method_decorator(login_required) @method_decorator(outer_atomic(read_committed=True)) def post(self, request): """ Submit photos for verification. This end-point is used for the following cases: * Initial verification through the pay-and-verify flow. * Initial verification initiated from a checkpoint within a course. * Re-verification initiated from a checkpoint within a course. POST Parameters: face_image (str): base64-encoded image data of the user's face. photo_id_image (str): base64-encoded image data of the user's photo ID. full_name (str): The user's full name, if the user is requesting a name change as well. course_key (str): Identifier for the course, if initiated from a checkpoint. checkpoint (str): Location of the checkpoint in the course. """ # If the user already has an initial verification attempt, we can re-use the photo ID # the user submitted with the initial attempt. initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user) # Validate the POST parameters params, response = self._validate_parameters(request, bool(initial_verification)) if response is not None: return response # If necessary, update the user's full name if "full_name" in params: response = self._update_full_name(request.user, params["full_name"]) if response is not None: return response # Retrieve the image data # Validation ensures that we'll have a face image, but we may not have # a photo ID image if this is a reverification. face_image, photo_id_image, response = self._decode_image_data( params["face_image"], params.get("photo_id_image") ) # If we have a photo_id we do not want use the initial verification image. if photo_id_image is not None: initial_verification = None if response is not None: return response # Submit the attempt attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification) self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"}) self._send_confirmation_email(request.user) return JsonResponse({}) def _validate_parameters(self, request, has_initial_verification): """ Check that the POST parameters are valid. Arguments: request (HttpRequest): The request object. has_initial_verification (bool): Whether the user has an initial verification attempt. Returns: HttpResponse or None """ # Pull out the parameters we care about. params = { param_name: request.POST[param_name] for param_name in [ "face_image", "photo_id_image", "course_key", "full_name" ] if param_name in request.POST } # If the user already has an initial verification attempt, then we don't # require the user to submit a photo ID image, since we can re-use the photo ID # image from the initial attempt. # If we don't have an initial verification OR a photo ID image, something has gone # terribly wrong in the JavaScript. Log this as an error so we can track it down. if "photo_id_image" not in params and not has_initial_verification: log.error( ( "User %s does not have an initial verification attempt " "and no photo ID image data was provided. " "This most likely means that the JavaScript client is not " "correctly constructing the request to submit photos." ), request.user.id ) return None, HttpResponseBadRequest( _("Photo ID image is required if the user does not have an initial verification attempt.") ) # The face image is always required. if "face_image" not in params: msg = _("Missing required parameter face_image") return None, HttpResponseBadRequest(msg) # If provided, parse the course key and checkpoint location if "course_key" in params: try: params["course_key"] = CourseKey.from_string(params["course_key"]) except InvalidKeyError: return None, HttpResponseBadRequest(_("Invalid course key")) return params, None def _update_full_name(self, user, full_name): """ Update the user's full name. Arguments: user (User): The user to update. full_name (unicode): The user's updated full name. Returns: HttpResponse or None """ try: update_account_settings(user, {"name": full_name}) except UserNotFound: return HttpResponseBadRequest(_("No profile found for user")) except AccountValidationError: msg = _( "Name must be at least {min_length} characters long." ).format(min_length=NAME_MIN_LENGTH) return HttpResponseBadRequest(msg) def _decode_image_data(self, face_data, photo_id_data=None): """ Decode image data sent with the request. Arguments: face_data (str): base64-encoded face image data. Keyword Arguments: photo_id_data (str): base64-encoded photo ID image data. Returns: tuple of (str, str, HttpResponse) """ try: # Decode face image data (used for both an initial and re-verification) face_image = decode_image_data(face_data) # Decode the photo ID image data if it's provided photo_id_image = ( decode_image_data(photo_id_data) if photo_id_data is not None else None ) return face_image, photo_id_image, None except InvalidImageData: msg = _("Image data is not valid.") return None, None, HttpResponseBadRequest(msg) def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None): """ Submit a verification attempt. Arguments: user (User): The user making the attempt. face_image (str): Decoded face image data. Keyword Arguments: photo_id_image (str or None): Decoded photo ID image data. initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt. """ attempt = SoftwareSecurePhotoVerification(user=user) # We will always have face image data, so upload the face image attempt.upload_face_image(face_image) # If an ID photo wasn't submitted, re-use the ID photo from the initial attempt. # Earlier validation rules ensure that at least one of these is available. if photo_id_image is not None: attempt.upload_photo_id_image(photo_id_image) elif initial_verification is None: # Earlier validation should ensure that we never get here. log.error( "Neither a photo ID image or initial verification attempt provided. " "Parameter validation in the view should prevent this from happening!" ) # Submit the attempt attempt.mark_ready() attempt.submit(copy_id_photo_from=initial_verification) return attempt def _send_confirmation_email(self, user): """ Send an email confirming that the user submitted photos for initial verification. """ context = { 'full_name': user.profile.name, 'platform_name': configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME) } subject = _("{platform_name} ID Verification Photos Received").format(platform_name=context['platform_name']) message = render_to_string('emails/photo_submission_confirmation.txt', context) from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL) to_address = user.email try: send_mail(subject, message, from_address, [to_address], fail_silently=False) except: # pylint: disable=bare-except # We catch all exceptions and log them. # It would be much, much worse to roll back the transaction due to an uncaught # exception than to skip sending the notification email. log.exception("Could not send notification email for initial verification for user %s", user.id) def _fire_event(self, user, event_name, parameters): """ Fire an analytics event. Arguments: user (User): The user who submitted photos. event_name (str): Name of the analytics event. parameters (dict): Event parameters. Returns: None """ if settings.LMS_SEGMENT_KEY: tracking_context = tracker.get_tracker().resolve_context() context = { 'ip': tracking_context.get('ip'), 'Google Analytics': { 'clientId': tracking_context.get('client_id') } } analytics.track(user.id, event_name, parameters, context=context) @require_POST @csrf_exempt # SS does its own message signing, and their API won't have a cookie value def results_callback(request): """ Software Secure will call this callback to tell us whether a user is verified to be who they said they are. """ body = request.body try: body_dict = json.loads(body) except ValueError: log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body)) return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body)) if not isinstance(body_dict, dict): log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body)) return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body)) headers = { "Authorization": request.META.get("HTTP_AUTHORIZATION", ""), "Date": request.META.get("HTTP_DATE", "") } has_valid_signature( "POST", headers, body_dict, settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"], settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"] ) _response, access_key_and_sig = headers["Authorization"].split(" ") access_key = access_key_and_sig.split(":")[0] # This is what we should be doing... #if not sig_valid: # return HttpResponseBadRequest("Signature is invalid") # This is what we're doing until we can figure out why we disagree on sigs if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]: return HttpResponseBadRequest("Access key invalid") receipt_id = body_dict.get("EdX-ID") result = body_dict.get("Result") reason = body_dict.get("Reason", "") error_code = body_dict.get("MessageType", "") try: attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id) except SoftwareSecurePhotoVerification.DoesNotExist: log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id) return HttpResponseBadRequest("edX ID {} not found".format(receipt_id)) user = attempt.user verification_status_email_vars = { 'platform_name': settings.PLATFORM_NAME, } if result == "PASS": log.debug("Approving verification for %s", receipt_id) attempt.approve() status = "approved" expiry_date = datetime.date.today() + datetime.timedelta( days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"] ) verification_status_email_vars['expiry_date'] = expiry_date.strftime("%m/%d/%Y") verification_status_email_vars['full_name'] = user.profile.name subject = _("Your {platform_name} ID Verification Approved").format( platform_name=settings.PLATFORM_NAME ) context = { 'subject': subject, 'template': 'emails/passed_verification_email.txt', 'email': user.email, 'email_vars': verification_status_email_vars } send_verification_status_email.delay(context) elif result == "FAIL": log.debug("Denying verification for %s", receipt_id) attempt.deny(json.dumps(reason), error_code=error_code) status = "denied" reverify_url = '{}{}'.format(settings.LMS_ROOT_URL, reverse("verify_student_reverify")) verification_status_email_vars['reasons'] = reason verification_status_email_vars['reverify_url'] = reverify_url verification_status_email_vars['faq_url'] = settings.ID_VERIFICATION_SUPPORT_LINK subject = _("Your {platform_name} Verification Has Been Denied").format( platform_name=settings.PLATFORM_NAME ) context = { 'subject': subject, 'template': 'emails/failed_verification_email.txt', 'email': user.email, 'email_vars': verification_status_email_vars } send_verification_status_email.delay(context) elif result == "SYSTEM FAIL": log.debug("System failure for %s -- resetting to must_retry", receipt_id) attempt.system_error(json.dumps(reason), error_code=error_code) status = "error" log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason) else: log.error("Software Secure returned unknown result %s", result) return HttpResponseBadRequest( "Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result) ) return HttpResponse("OK!") class ReverifyView(View): """ Reverification occurs when a user's initial verification is denied or expires. When this happens, users can re-submit photos through the re-verification flow. Unlike in-course reverification, this flow requires users to submit *both* face and ID photos. In contrast, during in-course reverification, students submit only face photos, which are matched against the ID photo the user submitted during initial verification. """ @method_decorator(login_required) def get(self, request): """ Render the reverification flow. Most of the work is done client-side by composing the same Backbone views used in the initial verification flow. """ verification_status = IDVerificationService.user_status(request.user) expiration_datetime = IDVerificationService.get_expiration_datetime(request.user, ['approved']) can_reverify = False if expiration_datetime: if is_verification_expiring_soon(expiration_datetime): # The user has an active verification, but the verification # is set to expire within "EXPIRING_SOON_WINDOW" days (default is 4 weeks). # In this case user can resubmit photos for reverification. can_reverify = True # If the user has no initial verification or if the verification # process is still ongoing 'pending' or expired then allow the user to # submit the photo verification. # A photo verification is marked as 'pending' if its status is either # 'submitted' or 'must_retry'. if verification_status['status'] in ["none", "must_reverify", "expired", "pending"] or can_reverify: context = { "user_full_name": request.user.profile.name, "platform_name": configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME), "capture_sound": staticfiles_storage.url("audio/camera_capture.wav"), } return render_to_response("verify_student/reverify.html", context) else: context = { "status": verification_status['status'] } return render_to_response("verify_student/reverify_not_allowed.html", context)
ahmedaljazzar/edx-platform
lms/djangoapps/verify_student/views.py
Python
agpl-3.0
50,989
[ "VisIt" ]
d9c58541cdbc7071508b648ae9b85e74272566c2470b28b05623d170936d9ec9
from setuptools import setup, find_packages setup(name='latimes-mappingla-geopy', version='0.93-latimes', description='Python Geocoding Toolbox', author='Ben Welsh from original work by Brian Beck', author_email='Benjamin.Welsh@latimes.com', url='http://github.com/datadesk/latimes-mappingla-geopy', download_url='http://github.com/datadesk/latimes-mappingla-geopy.git', packages=find_packages(), license='MIT', keywords='geocode geocoding gis geographical maps earth distance', classifiers=["Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Scientific/Engineering :: GIS", "Topic :: Software Development :: Libraries :: Python Modules" ], )
datadesk/latimes-mappingla-geopy
setup.py
Python
mit
1,045
[ "Brian" ]
119ca86c7627f01ff31d11837322da9711fc41045aa161efa11d62d81493e9c3
# -*- coding: utf-8 -*- # # test documentation build configuration file, created by # sphinx-quickstart on Fri Feb 7 11:33:27 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import CGATPipelines.Pipeline as P import CGATPipelines ################################################################ # Options related to CGAT pipelines # path were documentation source resides. # Use environment variable SPHINX_DOCSDIR. # If unset, take the location of CGATPipelines docsdir = os.environ.get("SPHINX_DOCSDIR", os.path.join(os.path.dirname(CGATPipelines.__file__), 'pipeline_docs')) if not os.path.exists(docsdir): raise ValueError("documentation directory '%s' not found" % docsdir) themedir = os.path.join(os.path.dirname(CGATPipelines.__file__), 'pipeline_docs', 'themes') logopath = os.path.join(themedir, "cgat_logo.png") ################################################################ # Import pipeline configuration from pipeline.ini in the current # directory and the common one. # PATH were code for pipelines is stored pipelinesdir = os.path.dirname(CGATPipelines.__file__) # The default configuration file - 'inifile' is read by # sphinx-report. inifile = os.path.join(os.path.dirname(CGATPipelines.__file__), 'configuration', 'pipeline.ini') PARAMS = P.getParameters([inifile, "pipeline.ini"]) # Definition now part of CGATReport # def setup(app): # app.add_config_value('PARAMS', {}, True) ################################################################ ################################################################ ################################################################ # The pipeline assumes that sphinxreport is called within the # working directory. If the report is in a separate build directory, # change the paths below. # # directory with export directory from pipeline # This should be a directory in the build directory - you can # link from here to a directory outside the build tree, though. exportdir = os.path.abspath(PARAMS['exportdir']) datadir = os.path.abspath(PARAMS['datadir']) ################################################################ ################################################################ ################################################################ # sphinx options ################################################################ # General information about the project. project = PARAMS['projectname'] copyright = PARAMS['copyright'] # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = PARAMS['version'] # The full version, including alpha/beta/rc tags. release = PARAMS['release'] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path = [os.path.abspath('.'), pipelinesdir, os.path.abspath('%s/trackers' % docsdir)] + sys.path # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.intersphinx', 'CGATReport.report_directive', 'sphinx.ext.inheritance_diagram', 'CGATReport.errors_directive', 'CGATReport.warnings_directive', 'CGATReport.roles'] if P.CONFIG.has_section('intersphinx'): intersphinx_mapping = dict( [(x, (os.path.abspath(y), None)) for x, y in P.CONFIG.items('intersphinx')]) # Included at the end of each rst file rst_epilog = ''' .. _CGAT: http://www.cgat.org .. _CGAT Training Programme: http://www.cgat.org .. _CGAT Pipelines: https://www.cgat.org/downloads/public/cgat/documentation/Pipelines.html#pipelines .. _CGAT Scripts: https://www.cgat.org/downloads/public/cgat/documentation/cgat.html#cgat .. _pysam: http://code.google.com/p/pysam/ .. _samtools: http://samtools.sourceforge.net/ .. _tabix: http://samtools.sourceforge.net/tabix.shtml/ .. _Galaxy: https://main.g2.bx.psu.edu/ .. _cython: http://cython.org/ .. _python: http://python.org/ .. _pyximport: http://www.prescod.net/pyximport/ .. _sphinx: http://sphinx-doc.org/ .. _ruffus: http://www.ruffus.org.uk/ .. _sphinxreport: http://code.google.com/p/sphinx-report/ .. _sqlite: http://www.sqlite.org/ .. _make: http://www.gnu.org/software/make .. _UCSC: http://genome.ucsc.edu .. _ENSEMBL: http://www.ensembl.org .. _GO: http://www.geneontology.org .. _gwascatalog: http://www.genome.gov/gwastudies/ .. _distlid: http://distild.jensenlab.org/ .. _mysql: https://mariadb.org/ .. _postgres: http://www.postgresql.org/ .. _bedtools: http://bedtools.readthedocs.org/en/latest/ .. _UCSC Tools: http://genome.ucsc.edu/admin/git.html .. _git: http://git-scm.com/ .. _sge: http://wikis.sun.com/display/GridEngine/Home .. _alignlib: https://github.com/AndreasHeger/alignlib ''' # Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory. templates_path = [os.path.relpath('%s/_templates' % docsdir)] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General information about the project. project = 'test' copyright = '2014, %CGAT%' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] exclude_patterns = ["**/.*.rst"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'cgat' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [themedir] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = logopath # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'testdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'test.tex', 'test Documentation', '\\%CGAT\\%', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'test', 'test Documentation', ['%CGAT%'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'test', 'test Documentation', '%CGAT%', 'test', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False
CGATOxford/CGATPipelines
CGATPipelines/pipeline_template_data/conf.py
Python
mit
13,166
[ "pysam" ]
3660419f318a47d77cae7765365826aaf2dd97a961c95bfe245cfb153fb63a6e
# imports import pandas as pd # import pandas for easy data manipulation using data frames import numpy as np # import numpy for numeric calculations on matrices import time # for timers import os # import h2o to check calculations import h2o from h2o.estimators.glm import H2OGeneralizedLinearEstimator # import GLM models from h2o.grid.grid_search import H2OGridSearch # grid search # start h2o h2o.init() #max_mem_size='12G' # h2o.remove_all() h2o.show_progress() # turn on progress bars from feature_combiner import feature_combiner from target_encoder import target_encoder from get_type_lists import get_type_lists # data-related constants IN_FILE_PATH = os.path.dirname( __file__ ) + '/data/train.csv' Y = 'y' ID_VAR = 'ID' DROPS = [ID_VAR] #KAGGLE UNLABELED TEST DATA IN_FILE_PATH_TEST = os.path.dirname( __file__ ) + '/data/test.csv' test = h2o.import_file(IN_FILE_PATH_TEST) # model-related constants LEARN_RATE = 0.005 # how much each gradient descent step impacts parameters CONV = 1e-10 # desired precision in parameters MAX_ITERS = 10000 # maximum number of gradient descent steps to allow # numeric columns train = h2o.import_file(IN_FILE_PATH) train = train.drop(DROPS) X = train.col_names # train.describe() # exit() original_numerics, categoricals = get_type_lists(frame=train,rejects=[ID_VAR,Y]) #These three have test varaibles that don't occur in the train dataset print("Encoding numberic variables...") for i, var in enumerate(categoricals): total = len(categoricals) print('Encoding: ' + var + ' (' + str(i+1) + '/' + str(total) + ') ...') tr_enc, ts_enc = target_encoder(train, test, var, Y) train = train.cbind(tr_enc) test = test.cbind(ts_enc) print('Done.') # run again after encoding encoded_numerics, categoricals = get_type_lists(frame=train,rejects=[ID_VAR,Y,'X2','X0','X5']) # create interaction variables train, test = feature_combiner(train, test, encoded_numerics) # run again after interactions encoded_combined_numerics, categoricals = get_type_lists(frame=train,rejects=[ID_VAR,Y,'X2','X0','X5']) # check number of created variables is correct # 1 id column, 1 target column, 79 original + encoded numeric columns, 43 original categorical variables # sum(range(1, 79)) combined variables print(train.shape == (1460, sum(range(1, 79), (79 + 43 + 1 + 1)))) print(test.shape == (1459, sum(range(1, 79), (79 + 43 + 1)))) # split data print('Splitting Data...') base_train, base_valid, stack_train, stack_valid = train.split_frame([0.3, 0.2, 0.3], seed=654251) print(base_train.shape) print(base_valid.shape) print(stack_train.shape) print(stack_valid.shape) print('Data split.') def glm_grid(X, y, train, valid): """ Wrapper function for penalized GLM with alpha and lambda search. :param X: List of inputs. :param y: Name of target variable. :param train: Name of training H2OFrame. :param valid: Name of validation H2OFrame. :return: Best H2Omodel from H2OGeneralizedLinearEstimator """ alpha_opts = [0.01, 0.25, 0.5, 0.99] # always keep some L2 hyper_parameters = {"alpha":alpha_opts} # initialize grid search grid = H2OGridSearch( H2OGeneralizedLinearEstimator( family="gaussian", lambda_search=True, seed=12345), hyper_params=hyper_parameters) # train grid grid.train(y=y, x=X, training_frame=train, validation_frame=valid) # show grid search results print(grid.show()) best = grid.get_grid()[0] print(best) # plot top frame values yhat_frame = valid.cbind(best.predict(valid)) print(yhat_frame[0:10, [y, 'predict']]) # plot sorted predictions yhat_frame_df = yhat_frame[[y, 'predict']].as_data_frame() yhat_frame_df.sort_values(by='predict', inplace=True) yhat_frame_df.reset_index(inplace=True, drop=True) _ = yhat_frame_df.plot(title='Ranked Predictions Plot') # select best model return best # TRAIN all the models glm0 = glm_grid(original_numerics, Y, base_train, base_valid) glm1 = glm_grid(encoded_numerics, Y, base_train, base_valid) glm2 = glm_grid(encoded_combined_numerics, Y, base_train, base_valid) print('Model training done.') stack_train = stack_train.cbind(glm0.predict(stack_train)) stack_valid = stack_valid.cbind(glm0.predict(stack_valid)) stack_train = stack_train.cbind(glm1.predict(stack_train)) stack_valid = stack_valid.cbind(glm1.predict(stack_valid)) stack_train = stack_train.cbind(glm2.predict(stack_train)) stack_valid = stack_valid.cbind(glm2.predict(stack_valid)) test = test.cbind(glm0.predict(test)) test = test.cbind(glm1.predict(test)) test = test.cbind(glm2.predict(test)) glm_stack_model = glm_grid(encoded_combined_numerics + ['predict', 'predict0', 'predict1'], Y, stack_train, stack_valid) # Score test data sub = test[ID_VAR].cbind(glm_stack_model.predict(test)) sub['predict'] = sub['predict'].exp() print(sub.head()) # create time stamp import re import time time_stamp = re.sub('[: ]', '_', time.asctime()) # save file for submission sub.columns = [ID_VAR, Y] sub_fname = os.path.dirname( __file__ )+ '/data/submissions/submission_' + str(time_stamp) + '.csv' h2o.download_csv(sub, sub_fname) # shutdown h2o # h2o.cluster().shutdown(prompt=False)
kcrandall/Kaggle_Mercedes_Manufacturing
h2o/main.py
Python
mit
5,401
[ "Gaussian" ]
05cb7c8399c3ad0b986dd1570b70102c8e49d82117c41e4f55289db2f0cf0e8f
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module provides """ from __future__ import unicode_literals, division, print_function from collections import namedtuple, OrderedDict from monty.string import is_string from monty.json import MSONable #, MontyEncoder from monty.functools import lazy_property from pymatgen.core.libxcfunc import LibxcFunc __author__ = "Matteo Giantomassi" __copyright__ = "Copyright 2016, The Materials Project" __version__ = "3.0.0" # The libxc version used to generate this file! __maintainer__ = "Matteo Giantomassi" __email__ = "gmatteo@gmail.com" __status__ = "Production" __date__ = "May 16, 2016" class XcFunc(MSONable): """ This object stores information about the XC correlation functional. Client code usually creates the object by calling the class methods: - from_name - from_type_name or code-specific methods such as: - from_abinit_ixc Ax XcFunc instance is hashable and can therefore be used as key in dictionaries. The implementation is based on the libxc conventions and is inspired to the XML specification for atomic PAW datasets documented at: https://wiki.fysik.dtu.dk/gpaw/setups/pawxml.html For convenience, part of the pawxml documentation is reported here. The xc_functional element defines the exchange-correlation functional used for generating the dataset. It has the two attributes type and name. The type attribute can be LDA, GGA, MGGA or HYB. The name attribute designates the exchange-correlation functional and can be specified in the following ways: [1] Taking the names from the LibXC library. The correlation and exchange names are stripped from their XC_ part and combined with a + sign. Here is an example for an LDA functional: <xc_functional type="LDA", name="LDA_X+LDA_C_PW"/> and this is what PBE will look like: <xc_functional type="GGA", name="GGA_X_PBE+GGA_C_PBE"/> [2] Using one of the following pre-defined aliases: type name LibXC equivalent Reference LDA PW LDA_X+LDA_C_PW LDA exchange; Perdew, Wang, PRB 45, 13244 (1992) GGA PW91 GGA_X_PW91+GGA_C_PW91 Perdew et al PRB 46, 6671 (1992) GGA PBE GGA_X_PBE+GGA_C_PBE Perdew, Burke, Ernzerhof, PRL 77, 3865 (1996) GGA RPBE GGA_X_RPBE+GGA_C_PBE Hammer, Hansen, Nørskov, PRB 59, 7413 (1999) GGA revPBE GGA_X_PBE_R+GGA_C_PBE Zhang, Yang, PRL 80, 890 (1998) GGA PBEsol GGA_X_PBE_SOL+GGA_C_PBE_SOL Perdew et al, PRL 100, 136406 (2008) GGA AM05 GGA_X_AM05+GGA_C_AM05 Armiento, Mattsson, PRB 72, 085108 (2005) GGA BLYP GGA_X_B88+GGA_C_LYP Becke, PRA 38, 3098 (1988); Lee, Yang, Parr, PRB 37, 785 """ type_name = namedtuple("type_name", "type, name") xcf = LibxcFunc defined_aliases = OrderedDict([ # (x, c) --> type_name # LDAs ((xcf.LDA_X, xcf.LDA_C_PW), type_name("LDA", "PW")), # ixc 7 ((xcf.LDA_X, xcf.LDA_C_PW_MOD), type_name("LDA", "PW_MOD")), ((xcf.LDA_X, xcf.LDA_C_PZ), type_name("LDA", "PZ")), # ixc 2 ((xcf.LDA_X, xcf.LDA_C_WIGNER), type_name("LDA", "W")), # ixc 4 ((xcf.LDA_X, xcf.LDA_C_HL), type_name("LDA", "HL")), # ixc 5 ((xcf.LDA_X, xcf.LDA_C_GL), type_name("LDA", "GL")), ((xcf.LDA_X, xcf.LDA_C_VWN), type_name("LDA", "VWN")), # GGAs ((xcf.GGA_X_PW91, xcf.GGA_C_PW91), type_name("GGA", "PW91")), ((xcf.GGA_X_PBE, xcf.GGA_C_PBE), type_name("GGA", "PBE")), ((xcf.GGA_X_RPBE, xcf.GGA_C_PBE), type_name("GGA", "RPBE")), # ixc 15 ((xcf.GGA_X_PBE_R, xcf.GGA_C_PBE), type_name("GGA", "revPBE")), # ixc 14 ((xcf.GGA_X_PBE_SOL, xcf.GGA_C_PBE_SOL), type_name("GGA", "PBEsol")), ((xcf.GGA_X_AM05, xcf.GGA_C_AM05), type_name("GGA", "AM05")), ((xcf.GGA_X_B88, xcf.GGA_C_LYP), type_name("GGA", "BLYP")), ]) del type_name # Correspondence between Abinit ixc notation and libxc notation. # see: http://www.abinit.org/doc/helpfiles/for-v7.8/input_variables/varbas.html#ixc # and 42_libpaw/m_pawpsp.F90 for the implementation. # Fortunately, all the other cases are handled with libxc. abinitixc_to_libxc = { 1: dict(xc=xcf.LDA_XC_TETER93), 2: dict(x=xcf.LDA_X, c=xcf.LDA_C_PZ), # PZ 001009 4: dict(x=xcf.LDA_X, c=xcf.LDA_C_WIGNER), # W 5: dict(x=xcf.LDA_X, c=xcf.LDA_C_HL), # HL 7: dict(x=xcf.LDA_X, c=xcf.LDA_C_PW), # PW 001012 11: dict(x=xcf.GGA_X_PBE, c=xcf.GGA_C_PBE), # PBE 14: dict(x=xcf.GGA_X_PBE_R, c=xcf.GGA_C_PBE), # revPBE 15: dict(x=xcf.GGA_X_RPBE, c=xcf.GGA_C_PBE), # RPBE } del xcf @classmethod def aliases(cls): """List of registered names.""" return [nt.name for nt in cls.defined_aliases.values()] @classmethod def asxc(cls, obj): """Convert object into Xcfunc.""" if isinstance(obj, cls): return obj if is_string(obj): return cls.from_name(obj) raise TypeError("Don't know how to convert <%s:%s> to Xcfunc" % (type(obj), str(obj))) @classmethod def from_abinit_ixc(cls, ixc): """Build the object from Abinit ixc (integer)""" ixc = int(ixc) if ixc >= 0: return cls(**cls.abinitixc_to_libxc[ixc]) else: # libxc notation employed in Abinit: a six-digit number in the form XXXCCC or CCCXXX #ixc = str(ixc) #assert len(ixc[1:]) == 6 #first, last = ixc[1:4], ixc[4:] ixc = abs(ixc) first = ixc // 1000 last = ixc - first * 1000 x, c = LibxcFunc(int(first)), LibxcFunc(int(last)) if not x.is_x_kind: x, c = c, x # Swap assert x.is_x_kind and c.is_c_kind return cls(x=x, c=c) @classmethod def from_name(cls, name): """Build the object from one of the registered names""" return cls.from_type_name(None, name) @classmethod def from_type_name(cls, typ, name): """Build the object from (type, name).""" # Try aliases first. for k, nt in cls.defined_aliases.items(): if typ is not None and typ != nt.type: continue if name == nt.name: if len(k) == 1: return cls(xc=k) if len(k) == 2: return cls(x=k[0], c=k[1]) raise ValueError("Wrong key: %s" % k) # At this point, we should have something in the form # name="GGA_X_PBE+GGA_C_PBE" or name=""LDA_XC_TETER93" if "+" in name: assert typ is None x, c = (s.strip() for s in name.split("+")) x, c = LibxcFunc[x], LibxcFunc[c] return cls(x=x, c=c) else: assert typ is None xc = LibxcFunc[name] return cls(xc=xc) if typ is None: raise ValueError("Cannot find name=%s in defined_aliases" % name) else: raise ValueError("Cannot find type=%s, name=%s in defined_aliases" % (typ, name)) @classmethod def from_dict(cls, d): """ Makes XcFunc obey the general json interface used in pymatgen for easier serialization. """ return cls(xc=d.get("xc"), x=d.get("x"), c=d.get("c")) def as_dict(self): """ Makes XcFunc obey the general json interface used in pymatgen for easier serialization. """ d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__} # print("in as_dict", type(self.x), type(self.c), type(self.xc)) if self.x is not None: d["x"] = self.x.as_dict() if self.c is not None: d["c"] = self.c.as_dict() if self.xc is not None: d["xc"] = self.xc.as_dict() return d # def to_json(self): # """ # Returns a json string representation of the MSONable object. # """ # return json.dumps(self.as_dict()) #, cls=MontyEncoder) def __init__(self, xc=None, x=None, c=None): """ Args: xc: LibxcFunc for XC functional. x, c: LibxcFunc for exchange and correlation part. Mutually exclusive with xc. """ # Consistency check if xc is None: if x is None or c is None: raise ValueError("x or c must be specified when xc is None") else: if x is not None or c is not None: raise ValueError("x and c should be None when xc is specified") self.xc, self.x, self.c = xc, x, c @lazy_property def type(self): """The type of the functional.""" if self.xc in self.defined_aliases: return self.defined_aliases[self.xc].type xc = (self.x, self.c) if xc in self.defined_aliases: return self.defined_aliases[xc].type # If self is not in defined_aliases, use LibxcFunc family if self.xc is not None: return self.xc.family return "+".join([self.x.family, self.c.family]) @lazy_property def name(self): """ The name of the functional. If the functional is not found in the aliases, the string has the form X_NAME+C_NAME """ if self.xc in self.defined_aliases: return self.defined_aliases[self.xc].name xc = (self.x, self.c) if xc in self.defined_aliases: return self.defined_aliases[xc].name if self.xc is not None: return self.xc.name return "+".join([self.x.name, self.c.name]) def __repr__(self): return "%s" % self.name def __hash__(self): return hash(self.name) def __eq__(self, other): if other is None: return False if isinstance(other, XcFunc): return self.name == other.name # assume other is a string return self.name == other def __ne__(self, other): return not self == other # @property # def refs(self): #def info_dict() # if self.xc is not None: # return {"xc", self.xc.info_dict} # else: # return {"x", self.x.info_dict, "c", self.c.info_dict}
xhqu1981/pymatgen
pymatgen/core/xcfunc.py
Python
mit
10,353
[ "ABINIT", "GPAW", "pymatgen" ]
77ee86c2806850f5ea68927905130300bd0933e553c173ba57952eb74a209cfb
"""Definitions of Celery tasks in Askbot in this module there are two types of functions: * those wrapped with a @task decorator and a ``_celery_task`` suffix - celery tasks * those with the same base name, but without the decorator and the name suffix the actual work units run by the task Celery tasks are special functions in a way that they require all the parameters be serializable - so instead of ORM objects we pass object id's and instead of query sets - lists of ORM object id's. That is the reason for having two types of methods here: * the base methods (those without the decorator and the ``_celery_task`` in the end of the name are work units that are called from the celery tasks. * celery tasks - shells that reconstitute the necessary ORM objects and call the base methods """ import sys import traceback import logging import uuid from django.contrib.contenttypes.models import ContentType from django.template import Context from django.template.loader import get_template from django.utils.translation import ugettext as _ from django.utils.translation import activate as activate_language from django.utils import simplejson from celery.decorators import task from askbot.conf import settings as askbot_settings from askbot import const from askbot import mail from askbot.models import Post, Thread, User, ReplyAddress from askbot.models.badges import award_badges_signal from askbot.models import get_reply_to_addresses, format_instant_notification_email from askbot import exceptions as askbot_exceptions from askbot.utils.twitter import Twitter # TODO: Make exceptions raised inside record_post_update_celery_task() ... # ... propagate upwards to test runner, if only CELERY_ALWAYS_EAGER = True # (i.e. if Celery tasks are not deferred but executed straight away) @task(ignore_result=True) def tweet_new_post_task(post_id): try: twitter = Twitter() except: return post = Post.objects.get(id=post_id) is_mod = post.author.is_administrator_or_moderator() if is_mod or post.author.reputation > askbot_settings.MIN_REP_TO_TWEET_ON_OTHERS_ACCOUNTS: tweeters = User.objects.filter(social_sharing_mode=const.SHARE_EVERYTHING) tweeters = tweeters.exclude(id=post.author.id) access_tokens = tweeters.values_list('twitter_access_token', flat=True) else: access_tokens = list() tweet_text = post.as_tweet() for raw_token in access_tokens: token = simplejson.loads(raw_token) twitter.tweet(tweet_text, access_token=token) if post.author.social_sharing_mode != const.SHARE_NOTHING: token = simplejson.loads(post.author.twitter_access_token) twitter.tweet(tweet_text, access_token=token) @task(ignore_result = True) def notify_author_of_published_revision_celery_task(revision): #todo: move this to ``askbot.mail`` module #for answerable email only for now, because #we don't yet have the template for the read-only notification data = { 'site_name': askbot_settings.APP_SHORT_NAME, 'post': revision.post } headers = None if askbot_settings.REPLY_BY_EMAIL: #generate two reply codes (one for edit and one for addition) #to format an answerable email or not answerable email reply_options = { 'user': revision.author, 'post': revision.post, 'reply_action': 'append_content' } append_content_address = ReplyAddress.objects.create_new( **reply_options ).as_email_address() reply_options['reply_action'] = 'replace_content' replace_content_address = ReplyAddress.objects.create_new( **reply_options ).as_email_address() #populate template context variables reply_code = append_content_address + ',' + replace_content_address if revision.post.post_type == 'question': mailto_link_subject = revision.post.thread.title else: mailto_link_subject = _('make an edit by email') #todo: possibly add more mailto thread headers to organize messages prompt = _('To add to your post EDIT ABOVE THIS LINE') reply_separator_line = const.SIMPLE_REPLY_SEPARATOR_TEMPLATE % prompt data['reply_code'] = reply_code data['author_email_signature'] = revision.author.email_signature data['replace_content_address'] = replace_content_address data['reply_separator_line'] = reply_separator_line data['mailto_link_subject'] = mailto_link_subject headers = {'Reply-To': append_content_address} #load the template activate_language(revision.post.language_code) template = get_template('email/notify_author_about_approved_post.html') #todo: possibly add headers to organize messages in threads #send the message mail.send_mail( subject_line = _('Your post at %(site_name)s is now published') % data, body_text = template.render(Context(data)), recipient_list = [revision.author.email,], related_object = revision, activity_type = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT, headers = headers ) @task(ignore_result = True) def record_post_update_celery_task( post_id, post_content_type_id, newly_mentioned_user_id_list=None, updated_by_id=None, suppress_email=False, timestamp=None, created=False, diff=None, ): #reconstitute objects from the database updated_by = User.objects.get(id=updated_by_id) post_content_type = ContentType.objects.get(id=post_content_type_id) post = post_content_type.get_object_for_this_type(id=post_id) newly_mentioned_users = User.objects.filter( id__in=newly_mentioned_user_id_list ) try: notify_sets = post.get_notify_sets( mentioned_users=newly_mentioned_users, exclude_list=[updated_by,] ) #todo: take into account created == True case #update_object is not used (activity_type, update_object) = post.get_updated_activity_data(created) post.issue_update_notifications( updated_by=updated_by, notify_sets=notify_sets, activity_type=activity_type, suppress_email=suppress_email, timestamp=timestamp, diff=diff ) except Exception: # HACK: exceptions from Celery job don't propagate upwards # to the Django test runner # so at least let's print tracebacks print >>sys.stderr, unicode(traceback.format_exc()).encode('utf-8') raise @task(ignore_result = True) def record_question_visit( question_post = None, user_id = None, update_view_count = False): """celery task which records question visit by a person updates view counter, if necessary, and awards the badges associated with the question visit """ #1) maybe update the view count #question_post = Post.objects.filter( # id = question_post_id #).select_related('thread')[0] if update_view_count: question_post.thread.increase_view_count() #we do not track visits per anon user if user_id is None: return user = User.objects.get(id=user_id) #2) question view count per user and clear response displays #user = User.objects.get(id = user_id) if user.is_authenticated(): #get response notifications user.visit_question(question_post) #3) send award badges signal for any badges #that are awarded for question views award_badges_signal.send(None, event = 'view_question', actor = user, context_object = question_post, ) @task() def send_instant_notifications_about_activity_in_post( update_activity = None, post = None, recipients = None, ): #reload object from the database post = Post.objects.get(id=post.id) if post.is_approved() is False: return if recipients is None: return acceptable_types = const.RESPONSE_ACTIVITY_TYPES_FOR_INSTANT_NOTIFICATIONS if update_activity.activity_type not in acceptable_types: return #calculate some variables used in the loop below update_type_map = const.RESPONSE_ACTIVITY_TYPE_MAP_FOR_TEMPLATES update_type = update_type_map[update_activity.activity_type] origin_post = post.get_origin_post() headers = mail.thread_headers( post, origin_post, update_activity.activity_type ) logger = logging.getLogger() if logger.getEffectiveLevel() <= logging.DEBUG: log_id = uuid.uuid1() message = 'email-alert %s, logId=%s' % (post.get_absolute_url(), log_id) logger.debug(message) else: log_id = None for user in recipients: if user.is_blocked(): continue reply_address, alt_reply_address = get_reply_to_addresses(user, post) activate_language(post.language_code) subject_line, body_text = format_instant_notification_email( to_user = user, from_user = update_activity.user, post = post, reply_address = reply_address, alt_reply_address = alt_reply_address, update_type = update_type, template = get_template('email/instant_notification.html') ) headers['Reply-To'] = reply_address try: mail.send_mail( subject_line=subject_line, body_text=body_text, recipient_list=[user.email], related_object=origin_post, activity_type=const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT, headers=headers, raise_on_failure=True ) except askbot_exceptions.EmailNotSent, error: logger.debug( '%s, error=%s, logId=%s' % (user.email, error, log_id) ) else: logger.debug('success %s, logId=%s' % (user.email, log_id))
coffenbacher/askbot-devel
askbot/tasks.py
Python
gpl-3.0
10,807
[ "VisIt" ]
f7614d5c3850a770b5115f0789b8e845934dd19f3f0fcd873f9dbec210f9e144
# # The BAO likelihoods. # from TabulatedBAOLikelihood import * from TabulatedBAODVLikelihood import * from GaussBAODVLikelihood import * from LCDMCosmology import * from scipy import * class DR11LOWZ(GaussBAODVLikelihood): def __init__(self): obh2 = 0.0224 Om = 0.274 h = 0.7 mnu = 0 fidTheory = LCDMCosmology(obh2, Om, h, mnu) GaussBAODVLikelihood.__init__( self, "DR11LOWZ", 0.32, 1264.0, 25.0, fidTheory) class DR11CMASS(TabulatedBAOLikelihood): def __init__(self): # fiducial cosmology for LOWZ/CMASS data. # see Anderson et al, page 28 obh2 = 0.0224 Om = 0.274 h = 0.7 mnu = 0 # rd=149.28 fidTheory = LCDMCosmology(obh2, Om, h, mnu) # negative col means the cols is probability and not chi2 TabulatedBAOLikelihood.__init__(self, "DR11CMASS", 'data/sdss_DR11CMASS_consensus.dat', -2, fidTheory, 0.57) class DR11LyaAuto(TabulatedBAOLikelihood): def __init__(self): # fiducial cosmology for Lya data. # see e.g. Busca's email on 12/3/13 obh2 = 0.0227 Om = 0.27 h = 0.7 mnu = 0.06 # ;# rd=149.77 fidTheory = LCDMCosmology(obh2, Om, h, mnu) # File from 5/16 from Nicolas. TabulatedBAOLikelihood.__init__(self, "DR11LyaAuto", 'data/chi2_surface_dr11_baseline_fit.txt', 4, fidTheory, 2.34) class DR11LyaCross(TabulatedBAOLikelihood): def __init__(self): obh2 = 0.0227 Om = 0.27 h = 0.7 mnu = 0 # ;# rd=149.77 fidTheory = LCDMCosmology(obh2, Om, h, mnu) TabulatedBAOLikelihood.__init__(self, "DR11LyaCross", 'data/lyabaocross.scan', 2, fidTheory, 2.36) class DR14LyaAuto(TabulatedBAOLikelihood): def __init__(self): # fiducial cosmology for Lya data. # Taken from https://github.com/igmhub/picca/blob/master/data/deSainteAgatheetal2019/auto_alone_stdFit/auto_alone_stdFit..ap.at.scan.dat # fiducial model -- see Table 2 of Victoria's paper obh2 = 0.02222 h = 0.6731 Om = 0.1426/h**2 mnu = 0.06 # rd=147.33 fidTheory = LCDMCosmology(obh2, Om, h, mnu) TabulatedBAOLikelihood.__init__(self, "DR14LyaAuto", 'data/deSainteAgatheetal2019_ap_at_scan.dat', 2, fidTheory, 2.34, aperp_col=1, apar_col=0, skiprows=1) class DR14LyaCross(TabulatedBAOLikelihood): def __init__(self): # fiducial cosmology for Lya data. # Taken from https://github.com/igmhub/picca/tree/master/data/Blomqvistetal2019/cross_alone_stdFit # fiducial model -- double check obh2 = 0.02222 h = 0.6731 Om = 0.1426/h**2 mnu = 0.06 # rd=147.33 fidTheory = LCDMCosmology(obh2, Om, h, mnu) # File from 5/16 from Nicolas. TabulatedBAOLikelihood.__init__(self, "DR14LyaCross", 'data/Blomqvistetal2019_ap_at_scan.dat', 2, fidTheory, 2.34, aperp_col=1, apar_col=0, skiprows=1) # Data extracted from http://arxiv.org/pdf/1106.3366.pdf class SixdFGS(GaussBAODVLikelihood): def __init__(self): obh2 = 0.02227 Om = 0.27 h = 0.7 mnu = 0 fidTheory = LCDMCosmology(obh2, Om, h, mnu) GaussBAODVLikelihood.__init__( self, "SixdFGS", 0.106, 456.0, 27.0, fidTheory, maxchi2=4) # SDSS Main Galaxy Sample BAO class SDSSMGS(TabulatedBAODVLikelihood): def __init__(self): obh2 = 0.021547 Om = 0.31 h = 0.67 mnu = 0 fidTheory = LCDMCosmology(obh2, Om, h, mnu) TabulatedBAODVLikelihood.__init__( self, "MGS", "data/chidavexi8stavePk5staverec.dat", fidTheory, 0.15)
slosar/april
py/BAOLikelihoods.py
Python
gpl-2.0
3,883
[ "Galaxy" ]
b327cccfc1e920370b2883f3c6c711f4cfdc54b114e0363e1ea2750712fc211e
import os import sys import re import requests import urllib import json from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) Debug = False #shared across functions defined here LOG = sys.stderr PatricUser = None def createTSVGet(api_url=None): if api_url == None: api_url="https://www.patricbrc.org/api/" Session = requests.Session() Session.headers.update({ 'accept': "text/tsv" }) Session.headers.update({ "Content-Type": "application/rqlquery+x-www-form-urlencoded" }) if not authenticateByEnv(Session): authenticateByFile(None, Session) return Session def authenticateByFile(tokenFile=None, Session=None): if not tokenFile: tokenFile = os.path.join(os.environ.get('HOME'), ".patric_token") if os.path.exists(tokenFile): LOG.write("reading auth key from file %s\n"%tokenFile) with open(tokenFile) as F: tokenString = F.read().rstrip() authenticateByString(tokenString, Session) return True return False def authenticateByEnv(Session): if os.environ.has_key("KB_AUTH_TOKEN"): LOG.write("reading auth key from environment\n") authenticateByString(os.environ.get('KB_AUTH_TOKEN'), Session) return True else: return authenticateByFile(None, Session) def authenticateByString(tokenString, Session): Session.headers.update({ 'Authorization' : tokenString }) if "Authorization" in Session.headers: global PatricUser PatricUser = Session.headers["Authorization"].split(r"|")[3].split("=")[1] LOG.write("Patric user = %s\n"%PatricUser) def getGenomeIdsNamesByName(name, limit='10', Session=None): query = "eq(genome_name,%s)"%name query += "&select(genome_id,genome_name)" query += "&limit(%s)"%limit ret = Session.get(Base_url+"genome/", params=query) if Debug: LOG.write(ret.url+"\n") return(ret.text.replace('"', '')) def getGenomeGroupIds(genomeGroupName, Session): LOG.write("getGenomeGroupIds(%s), PatricUser=%s\n"%(genomeGroupName, PatricUser)) genomeGroupSpecifier = PatricUser+"/home/Genome Groups/"+genomeGroupName genomeGroupSpecifier = "/"+urllib.quote(genomeGroupSpecifier) genomeGroupSpecifier = genomeGroupSpecifier.replace("/", "%2f") query = "in(genome_id,GenomeGroup("+genomeGroupSpecifier+"))" query += "&select(genome_id)" query += "&limit(10000)" if Debug: LOG.write("requesting group %s for user %s\n"%(genomeGroupName, PatricUser)) LOG.write("query = %s\n"%(query)) ret = Session.get(Base_url+"genome/", params=query) if Debug: LOG.write(ret.url+"\n") return(ret.text.replace('"', '').split("\n"))[1:-1] def getNamesForGenomeIds(genomeIds, Session): # return getDataForGenomes(genomeIdSet, ["genome_id", "genome_name"]) retval = {} for genome in genomeIds: retval[genome] = "" query="in(genome_id,("+",".join(genomeIds)+"))&select(genome_id,genome_name)" response = Session.get(Base_url+"genome/", params=query) #, if Debug: LOG.write(" response URL: %s\n"%response.url) LOG.write(" len(response.text)= %d\n"%len(response.text)) if not response.ok: LOG.write("Error code %d returned by %s in getNamesForGenomeIds\n"%(response.status_code, response.url)) for line in response.text.split("\n"): line = line.replace('"','') row = line.split("\t", 1) if len(row) >= 2: genome, name = row retval[genome] = name return retval def getNamesForGenomeIdsByN(genomeIds, n=5): """ For some reason, grabbing them in bulk misses some, so grab N at a time. """ retval = {} i = 0 genomeIds = list(genomeIds) while i < len(genomeIds): subset = genomeIds[i:i+n] retval.update(getNamesForGenomeIds(subset)) i += n return retval def getGenomeIdByFieldValue(queryField, queryValue, Session): query = "eq(%s,%s)"%(queryField, queryValue) query += "&select(genome_id)" req = Session.get(Base_url+"genome/", params=query) if Debug: LOG.write("getGenomeIdsByQuery: "+req.url+"\n") LOG.write(req.text+"\n") data = req.text.split("\n") genomeId = "" if len(data) > 1: genomeId = data[1] genomeId = genomeId.replace('\"', '') return genomeId def getDataForGenomes(genomeIdSet, fieldNames, Session): query = "in(genome_id,(%s))"%",".join(genomeIdSet) if fieldNames: query += "&select(%s)"%",".join(fieldNames) query += "&limit(%s)"%len(genomeIdSet) response = Session.get(Base_url+"genome/", params=query) if Debug: LOG.write("getDataForGenomes:\nurl="+response.url+"\nquery="+query+"\n") if not response.ok: LOG.write("Error code %d returned by %s in getDataForGenomes\n"%(response.status_code, response.url)) LOG.write("length of query was %d\n"%len(query)) LOG.write("url="+req.url+"\nquery="+query+"\n") raise Exception(errorMessage) data = response.text.replace('"','') #get rid of quotes rows = data.split("\n")[:-1] # leave off empty last element retval = [] for row in rows: fields = row.split("\t") #if len(fields) != len(fieldNames): # continue retval.append(fields) return(retval) def getProteinFastaForPatricIds(patricIds, Session): query="in(patric_id,("+",".join(map(urllib.quote, patricIds))+"))" query += "&limit(%d)"%len(patricIds) response=Session.get(Base_url+"genome_feature/", params=query, headers={'Accept': 'application/protein+fasta'}) if False and Debug: LOG.write("getProteinFastaForByPatricIds:\nurl="+response.url+"\nquery="+query+"\n") if not response.ok: LOG.write("Error code %d returned by %s in getProteinFastaForPatricIds\n"%(response.status_code, Base_url)) errorMessage= "Error code %d returned by %s in getGenomeFeaturesByPatricIds\nlength of query was %d\n"%(response.status_code, Base_url, len(query)) LOG.write(errorMessage) LOG.flush() raise Exception(errorMessage) idsFixedFasta="" for line in response.text.split("\n"): if line.startswith(">"): parts = line.split("|") if len(parts) > 2: line = "|".join(parts[:2]) idsFixedFasta += line+"\n" return idsFixedFasta def getDnaFastaForPatricIds(patricIds, Session): query="in(patric_id,("+",".join(map(urllib.quote, patricIds))+"))" query += "&limit(%d)"%len(patricIds) response=Session.get(Base_url+"genome_feature/", params=query, headers={'Accept': 'application/dna+fasta'}) if False and Debug: LOG.write("getDnaFastaForByPatricIds:\nurl="+response.url+"\nquery="+query+"\n") if not response.ok: LOG.write("Error code %d returned by %s in getDnaFastaForPatricIds\n"%(response.status_code, Base_url)) errorMessage= "Error code %d returned by %s in getGenomeFeaturesByPatricIds\nlength of query was %d\n"%(response.status_code, Base_url, len(query)) LOG.write(errorMessage) LOG.flush() raise Exception(errorMessage) idsFixedFasta="" for line in response.text.split("\n"): if line.startswith(">"): parts = line.split("|") if len(parts) > 2: line = "|".join(parts[:2]) idsFixedFasta += line+"\n" return idsFixedFasta def getProteinsFastaForGenomeId(genomeId, Session): query="in(genome_id,("+genomeId+"))" query += "&limit(25000)" response=Session.get(Base_url+"genome_feature/", params=query, headers={'Accept': 'application/protein+fasta'}) if Debug: LOG.write("getProteinsFastaForGenomeId:\nurl="+response.url+"\nquery="+query+"\n") if not response.ok: LOG.write("Error code %d returned by %s in getProteinsFastaForGenomeId\n"%(response.status_code, Base_url)) errorMessage= "Error code %d returned by %s in getProteinsFastaForGenomeId\nlength of query was %d\n"%(response.status_code, Base_url, len(query)) LOG.write(errorMessage) LOG.flush() raise Exception(errorMessage) idsFixedFasta="" for line in response.text.split("\n"): if line.startswith(">"): parts = line.split("|") if len(parts) > 2: line = "|".join(parts[:2])+"\n" idsFixedFasta += line return idsFixedFasta def getProductsForPgfams(pgfams, Session): retval = {} for pgfam in pgfams: retval[pgfam] = "" query="in(family_id,("+",".join(pgfams)+"))&select(family_id,family_product)" response = Session.get(Base_url+"protein_family_ref/", params=query) #, if Debug: LOG.write(" response URL: %s\n"%response.url) LOG.write(" len(response.text)= %d\n"%len(response.text)) if not response.ok: LOG.write("Error code %d returned by %s in getProductsForPgfams\n"%(response.status_code, response.url)) for line in response.text.split("\n"): line = line.replace('"','') row = line.split("\t", 1) if len(row) >= 2: pgfam, product = row retval[pgfam] = product return retval def getProductsForPgfamsByN(pgfams, n=5, Session=None): """ For some reason, grabbing them in bulk misses some, so grab N at a time. """ retval = {} i = 0 pgfams = list(pgfams) while i < len(pgfams): subset = pgfams[i:i+n] retval.update(getProductsForPgfams(subset)) i += n return retval def getPatricGenesPgfamsForGenomeSet(genomeIdSet, Session): if Debug: LOG.write("getPatricGenesPgfamsForGenomeSet() called for %d genomes\n"%len(genomeIdSet)) LOG.write(" Session headers=\n"+str(Session.headers)+"\n") retval = [] # one genome at a time, so using 'get' should be fine for genomeId in genomeIdSet: query="and(%s,%s,%s)"%("eq(genome_id,(%s))"%genomeId, "eq(feature_type,CDS)", "eq(pgfam_id,PGF*)") query += "&select(genome_id,patric_id,pgfam_id)" query += "&limit(25000)" response = Session.get(Base_url+"genome_feature/", params=query) #, """ req = requests.Request('POST', Base_url+"genome_feature/", data=query) prepared = Session.prepare_request(req) #req.prepare() response=Session.send(prepared, verify=False) """ if Debug: LOG.write(" response URL: %s\n"%response.url) LOG.write(" len(response.text)= %d\n"%len(response.text)) curLen = len(retval) for line in response.text.split("\n"): line = line.replace('"','') row = line.split("\t") if len(row) != 3: continue if not row[2].startswith("PGF"): continue retval.append(row) if Debug: LOG.write(" got %d pgfams for that genome\n"%(len(retval)-curLen)) return(retval) def getPgfamGenomeMatrix(genomeIdSet, ggpMat = None): """ Given list of genome ids: tabulate genes per genome per pgfam (formats data from getPatricGenesPgfamsForGenomeSet as table) """ genomeGenePgfamList = getPatricGenesPgfamsForGenomeSet(genomeIdSet) if not ggpMat: # if a real value was passed, extend it ggpMat = {} # genome-gene-pgfam matrix (really just a dictionary) for row in genomeGenePgfamList: genome, gene, pgfam = row if pgfam not in ggpMat: ggpMat[pgfam] = {} if genome not in ggpMat[pgfam]: ggpMat[pgfam][genome] = [] ggpMat[pgfam][genome].append(gene) return ggpMat def getPgfamCountMatrix(genomeIdSet, ggpMat = None): """ Given list of genome ids: tabulate counts per genome per pgfam (formats data from getPatricGenesPgfamsForGenomeSet as table) """ genomeGenePgfamList = getPatricGenesPgfamsForGenomeSet(genomeIdSet) if not ggpMat: # if a real value was passed, extend it ggpMat = {} # genome-gene-pgfam matrix (really just a dictionary) for row in genomeGenePgfamList: genome, gene, pgfam = row if pgfam not in ggpMat: ggpMat[pgfam] = {} if genome not in ggpMat[pgfam]: ggpMat[pgfam][genome] = 0 ggpMat[pgfam][genome] += 1 return ggpMat def writePgfamGeneMatrix(ggpMat, fileHandle): """ write out pgfamGeneMatrix to file handle data is list of genes per pgfam per genome rows are pgfams cols are genomes column headers identify genomes genes are comma-separated """ # first collect set of all genomes genomeSet = set() for pgfam in ggpMat: genomeSet.update(set(ggpMat[pgfam].keys())) genomes = sorted(genomeSet) fileHandle.write("PGFam\t"+"\t".join(genomes)+"\n") for pgfam in ggpMat: fileHandle.write(pgfam) for genome in genomes: gene = "" if genome in ggpMat[pgfam]: gene = ",".join(ggpMat[pgfam][genome]) fileHandle.write("\t"+gene) fileHandle.write("\n") def writePgfamCountMatrix(ggpMat, fileHandle): """ write out matrix of counts per pgfam per genome to file handle data is count of genes per pgfam per genome (integers) rows are pgfams cols are genomes column headers identify genomes """ # first collect set of all genomes genomeSet = set() for pgfam in ggpMat: genomeSet.update(set(ggpMat[pgfam].keys())) genomes = sorted(genomeSet) fileHandle.write("PGFam\t"+"\t".join(genomes)+"\n") for pgfam in ggpMat: fileHandle.write(pgfam) for genome in genomes: count = 0 if genome in ggpMat[pgfam]: count = len(ggpMat[pgfam][genome]) fileHandle.write("\t%d"%count) fileHandle.write("\n") def readPgfamGeneMatrix(fileHandle): """ read pgfamGeneMatrix from file handle Data are list of genes (comma-delimited) per genome per pgfam rows are pgfams, cols are genomes, column headers identify genomes """ # genome ids are headers in first line header = fileHandle.readline().rstrip() genomes = header.split("\t")[1:] # first entry is placeholder for pgfam rownames pgMat = {} # genome-gene-pgfam matrix (really just a dictionary) for row in fileHandle: fields = row.rstrip().split("\t") pgfam = fields[0] pgMat[pgfam] = {} data = fields[1:] for i, genome in enumerate(genomes): if len(data[i]): pgMat[pgfam][genome] = data[i] return pgMat def readPgfamCountMatrix(fileHandle): """ read pgfamCountMatrix from file handle rows are pgfams cols are genomes data are integer counts of that pgfam in that genome column headers identify genomes """ # genome ids are headers in first line header = fileHandle.readline().rstrip() genomes = header.split("\t")[1:] # first entry is placeholder for pgfam rownames pcMat = {} # pgfam count matrix (really just a dictionary) for row in fileHandle: fields = row.rstrip().split("\t") pgfam = fields[0] pcMat[pgfam] = {} data = fields[1:] for i, genome in enumerate(genomes): pcMat[pgfam][genome] = int(float(data[i])) return pcMat def getPatricGenesPgfamsForGenomeObject(genomeObject): # parse a PATRIC genome object (read from json format) for PGFams retval = [] # a list of tupples of (genomeId, Pgfam, geneId) genomeId = genomeObject['id'] for feature in genomeObject['features']: if 'family_assignments' in feature: for familyAssignment in feature['family_assignments']: if familyAssignment[0] == 'PGFAM': retval.append((genomeId, feature['id'], familyAssignment[1])) return retval def getGenomeObjectProteins(genomeObject): # return dictionary of patricId -> BioPython.SeqRecord genomeId = genomeObject['id'] retval = {} for feature in genomeObject['features']: patricId, product, genomeId, aa_sequence = '', '', '', '' patricId = feature['id'] if "protein_translation" in feature: aa_sequence = feature["protein_translation"] if 'function' in feature: product = feature['function'] simpleSeq = Seq(aa_sequence, IUPAC.extended_protein) seqRecord = SeqRecord(simpleSeq, id=patricId, description=product) seqRecord.annotations["genome_id"] = genomeId retval[patricId] = seqRecord return retval def getGenomeObjectGeneDna(genomeObject): # return dictionary of patricId -> BioPython.SeqRecord genomeId = genomeObject['id'] contigSeq = {} for contig in genomeObject['contigs']: contigSeq[contig['id']] = contig['dna'] retval = {} # dict of SeqRecords for feature in genomeObject['features']: geneId = feature['id'] if geneId not in patricIds: continue product = '' if 'product' in feature: product = feature['function'] if not 'location' in feature: continue contig, start, ori, length = feature['location'][0] # this should be an array of (contig, start, orientation, length) start = int(float(start)) length = int(float(length)) if ori == '+': start -= 1 simpleSeq = Seq(contigSeq[contig][start:start+length], IUPAC.ambiguous_dna) if ori == '-': simpleSeq = Seq(contigSeq[contig][start-length:start], IUPAC.ambiguous_dna) simpleSeq = simpleSeq.reverse_complement() seqRecord = SeqRecord(simpleSeq, id=geneId, description=product) seqRecord.annotations["genome_id"] = genomeId retval[geneId] = seqRecord return retval
PATRIC3/p3diffexp
lib/diffexp_api.py
Python
mit
17,928
[ "Biopython" ]
3ce8931a4a24db496bf91fd367ffff2355cf5a7f6807b4cdf2f6c73343be0daa
import SimpleITK as sitk import matplotlib.pyplot as plt import ipywidgets as widgets from IPython.display import display import numpy as np from matplotlib.widgets import RectangleSelector import matplotlib.patches as patches import matplotlib.cm as cm from matplotlib.ticker import MaxNLocator import copy class RegistrationPointDataAquisition(object): """ This class provides a GUI for localizing corresponding points in two images, and for evaluating registration results using a linked cursor approach, user clicks in one image and the corresponding point is added to the other image. """ def __init__( self, fixed_image, moving_image, fixed_window_level=None, moving_window_level=None, figure_size=(10, 8), known_transformation=None, ): self.fixed_image = fixed_image ( self.fixed_npa, self.fixed_min_intensity, self.fixed_max_intensity, ) = self.get_window_level_numpy_array(self.fixed_image, fixed_window_level) self.moving_image = moving_image ( self.moving_npa, self.moving_min_intensity, self.moving_max_intensity, ) = self.get_window_level_numpy_array(self.moving_image, moving_window_level) self.fixed_point_indexes = [] self.moving_point_indexes = [] self.click_history = ( [] ) # Keep a history of user point localizations, enabling undo of last localization. self.known_transformation = known_transformation # If the transformation is valid (not None) then corresponding points are automatically added. self.text_and_marker_color = "red" ui = self.create_ui() display(ui) # Create a figure with two axes for the fixed and moving images. self.fig, axes = plt.subplots(1, 2, figsize=figure_size) # self.fig.canvas.set_window_title('Registration Points Acquisition') self.fixed_axes = axes[0] self.moving_axes = axes[1] # Connect the mouse button press to the canvas (__call__ method is the invoked callback). self.fig.canvas.mpl_connect("button_press_event", self) # Display the data and the controls, first time we display the images is outside the "update_display" method # as that method relies on the previous zoom factor which doesn't exist yet. self.fixed_axes.imshow( self.fixed_npa[self.fixed_slider.value, :, :] if self.fixed_slider else self.fixed_npa, cmap=plt.cm.Greys_r, vmin=self.fixed_min_intensity, vmax=self.fixed_max_intensity, ) self.moving_axes.imshow( self.moving_npa[self.moving_slider.value, :, :] if self.moving_slider else self.moving_npa, cmap=plt.cm.Greys_r, vmin=self.moving_min_intensity, vmax=self.moving_max_intensity, ) self.update_display() def create_ui(self): # Create the active UI components. Height and width are specified in 'em' units. This is # a html size specification, size relative to current font size. self.viewing_checkbox = widgets.RadioButtons( description="Interaction mode:", options=["edit", "view"], value="edit" ) self.clearlast_button = widgets.Button( description="Clear Last", width="7em", height="3em" ) self.clearlast_button.on_click(self.clear_last) self.clearall_button = widgets.Button( description="Clear All", width="7em", height="3em" ) self.clearall_button.on_click(self.clear_all) # Sliders are only created if a 3D image, otherwise no need. self.fixed_slider = self.moving_slider = None if self.fixed_npa.ndim == 3: self.fixed_slider = widgets.IntSlider( description="fixed image z slice:", min=0, max=self.fixed_npa.shape[0] - 1, step=1, value=int((self.fixed_npa.shape[0] - 1) / 2), width="20em", ) self.fixed_slider.observe(self.on_slice_slider_value_change, names="value") self.moving_slider = widgets.IntSlider( description="moving image z slice:", min=0, max=self.moving_npa.shape[0] - 1, step=1, value=int((self.moving_npa.shape[0] - 1) / 2), width="19em", ) self.moving_slider.observe(self.on_slice_slider_value_change, names="value") bx0 = widgets.Box( padding=7, children=[self.fixed_slider, self.moving_slider] ) # Layout of UI components. This is pure ugliness because we are not using a UI toolkit. Layout is done # using the box widget and padding so that the visible UI components are spaced nicely. bx1 = widgets.Box(padding=7, children=[self.viewing_checkbox]) bx2 = widgets.Box(padding=15, children=[self.clearlast_button]) bx3 = widgets.Box(padding=15, children=[self.clearall_button]) return ( widgets.HBox(children=[widgets.HBox(children=[bx1, bx2, bx3]), bx0]) if self.fixed_npa.ndim == 3 else widgets.HBox(children=[widgets.HBox(children=[bx1, bx2, bx3])]) ) def get_window_level_numpy_array(self, image, window_level): """ Get the numpy array representation of the image and the min and max of the intensities used for display. """ npa = sitk.GetArrayViewFromImage(image) if not window_level: return npa, npa.min(), npa.max() else: return ( npa, window_level[1] - window_level[0] / 2.0, window_level[1] + window_level[0] / 2.0, ) def on_slice_slider_value_change(self, change): self.update_display() def update_display(self): """ Display the two images based on the slider values, if relevant, and the points which are on the displayed slices. """ # We want to keep the zoom factor which was set prior to display, so we log it before # clearing the axes. fixed_xlim = self.fixed_axes.get_xlim() fixed_ylim = self.fixed_axes.get_ylim() moving_xlim = self.moving_axes.get_xlim() moving_ylim = self.moving_axes.get_ylim() # Draw the fixed image in the first subplot and the localized points. self.fixed_axes.clear() self.fixed_axes.imshow( self.fixed_npa[self.fixed_slider.value, :, :] if self.fixed_slider else self.fixed_npa, cmap=plt.cm.Greys_r, vmin=self.fixed_min_intensity, vmax=self.fixed_max_intensity, ) # Positioning the text is a bit tricky, we position relative to the data coordinate system, but we # want to specify the shift in pixels as we are dealing with display. We therefore (a) get the data # point in the display coordinate system in pixel units (b) modify the point using pixel offset and # transform back to the data coordinate system for display. text_x_offset = -10 text_y_offset = -10 for i, pnt in enumerate(self.fixed_point_indexes): if ( self.fixed_slider and int(pnt[2] + 0.5) == self.fixed_slider.value ) or not self.fixed_slider: self.fixed_axes.scatter( pnt[0], pnt[1], s=90, marker="+", color=self.text_and_marker_color ) # Get point in pixels. text_in_data_coords = self.fixed_axes.transData.transform( [pnt[0], pnt[1]] ) # Offset in pixels and get in data coordinates. text_in_data_coords = self.fixed_axes.transData.inverted().transform( ( text_in_data_coords[0] + text_x_offset, text_in_data_coords[1] + text_y_offset, ) ) self.fixed_axes.text( text_in_data_coords[0], text_in_data_coords[1], str(i), color=self.text_and_marker_color, ) self.fixed_axes.set_title( f"fixed image - localized {len(self.fixed_point_indexes)} points" ) self.fixed_axes.set_axis_off() # Draw the moving image in the second subplot and the localized points. self.moving_axes.clear() self.moving_axes.imshow( self.moving_npa[self.moving_slider.value, :, :] if self.moving_slider else self.moving_npa, cmap=plt.cm.Greys_r, vmin=self.moving_min_intensity, vmax=self.moving_max_intensity, ) for i, pnt in enumerate(self.moving_point_indexes): if ( self.moving_slider and int(pnt[2] + 0.5) == self.moving_slider.value ) or not self.moving_slider: self.moving_axes.scatter( pnt[0], pnt[1], s=90, marker="+", color=self.text_and_marker_color ) text_in_data_coords = self.moving_axes.transData.transform( [pnt[0], pnt[1]] ) text_in_data_coords = self.moving_axes.transData.inverted().transform( ( text_in_data_coords[0] + text_x_offset, text_in_data_coords[1] + text_y_offset, ) ) self.moving_axes.text( text_in_data_coords[0], text_in_data_coords[1], str(i), color=self.text_and_marker_color, ) self.moving_axes.set_title( f"moving image - localized {len(self.moving_point_indexes)} points" ) self.moving_axes.set_axis_off() # Set the zoom factor back to what it was before we cleared the axes, and rendered our data. self.fixed_axes.set_xlim(fixed_xlim) self.fixed_axes.set_ylim(fixed_ylim) self.moving_axes.set_xlim(moving_xlim) self.moving_axes.set_ylim(moving_ylim) self.fig.canvas.draw_idle() def clear_all(self, button): """ Get rid of all the data. """ del self.fixed_point_indexes[:] del self.moving_point_indexes[:] del self.click_history[:] self.update_display() def clear_last(self, button): """ Remove last point or point-pair addition (depends on whether the interface is used for localizing point pairs or evaluation of registration). """ if self.click_history: if self.known_transformation: self.click_history.pop().pop() self.click_history.pop().pop() self.update_display() def get_points(self): """ Get the points in the image coordinate systems. """ if len(self.fixed_point_indexes) != len(self.moving_point_indexes): raise Exception( "Number of localized points in fixed and moving images does not match." ) fixed_point_list = [ self.fixed_image.TransformContinuousIndexToPhysicalPoint(pnt) for pnt in self.fixed_point_indexes ] moving_point_list = [ self.moving_image.TransformContinuousIndexToPhysicalPoint(pnt) for pnt in self.moving_point_indexes ] return fixed_point_list, moving_point_list def __call__(self, event): """ Callback invoked when the user clicks inside the figure. """ # We add points only in 'edit' mode. If the spatial transformation between the two images is known, self.known_transformation was set, # then every button_press_event will generate a point in each of the images. Finally, we enforce that all points have a corresponding # point in the other image by not allowing the user to add multiple points in the same image, they have to add points by switching between # the two images. if self.viewing_checkbox.value == "edit": if event.inaxes == self.fixed_axes: if len(self.fixed_point_indexes) - len(self.moving_point_indexes) <= 0: self.fixed_point_indexes.append( (event.xdata, event.ydata, self.fixed_slider.value) if self.fixed_slider else (event.xdata, event.ydata) ) self.click_history.append(self.fixed_point_indexes) if self.known_transformation: moving_point_physical = self.known_transformation.TransformPoint( self.fixed_image.TransformContinuousIndexToPhysicalPoint( self.fixed_point_indexes[-1] ) ) moving_point_indexes = ( self.moving_image.TransformPhysicalPointToContinuousIndex( moving_point_physical ) ) self.moving_point_indexes.append(moving_point_indexes) self.click_history.append(self.moving_point_indexes) if self.moving_slider: z_index = int(moving_point_indexes[2] + 0.5) if ( self.moving_slider.max >= z_index and self.moving_slider.min <= z_index ): self.moving_slider.value = z_index self.update_display() if event.inaxes == self.moving_axes: if len(self.moving_point_indexes) - len(self.fixed_point_indexes) <= 0: self.moving_point_indexes.append( (event.xdata, event.ydata, self.moving_slider.value) if self.moving_slider else (event.xdata, event.ydata) ) self.click_history.append(self.moving_point_indexes) if self.known_transformation: inverse_transform = self.known_transformation.GetInverse() fixed_point_physical = inverse_transform.TransformPoint( self.moving_image.TransformContinuousIndexToPhysicalPoint( self.moving_point_indexes[-1] ) ) fixed_point_indexes = ( self.fixed_image.TransformPhysicalPointToContinuousIndex( fixed_point_physical ) ) self.fixed_point_indexes.append(fixed_point_indexes) self.click_history.append(self.fixed_point_indexes) if self.fixed_slider: z_index = int(fixed_point_indexes[2] + 0.5) if ( self.fixed_slider.max >= z_index and self.fixed_slider.min <= z_index ): self.fixed_slider.value = z_index self.update_display() class PointDataAquisition(object): def __init__(self, image, window_level=None, figure_size=(10, 8)): self.image = image ( self.npa, self.min_intensity, self.max_intensity, ) = self.get_window_level_numpy_array(self.image, window_level) self.point_indexes = [] ui = self.create_ui() display(ui) # Create a figure. self.fig, self.axes = plt.subplots(1, 1, figsize=figure_size) # Connect the mouse button press to the canvas (__call__ method is the invoked callback). self.fig.canvas.mpl_connect("button_press_event", self) # Display the data and the controls, first time we display the image is outside the "update_display" method # as that method relies on the previous zoom factor which doesn't exist yet. self.axes.imshow( self.npa[self.slice_slider.value, :, :] if self.slice_slider else self.npa, cmap=plt.cm.Greys_r, vmin=self.min_intensity, vmax=self.max_intensity, ) self.update_display() def create_ui(self): # Create the active UI components. Height and width are specified in 'em' units. This is # a html size specification, size relative to current font size. self.viewing_checkbox = widgets.RadioButtons( description="Interaction mode:", options=["edit", "view"], value="edit" ) self.clearlast_button = widgets.Button( description="Clear Last", width="7em", height="3em" ) self.clearlast_button.on_click(self.clear_last) self.clearall_button = widgets.Button( description="Clear All", width="7em", height="3em" ) self.clearall_button.on_click(self.clear_all) # Slider is only created if a 3D image, otherwise no need. self.slice_slider = None if self.npa.ndim == 3: self.slice_slider = widgets.IntSlider( description="image z slice:", min=0, max=self.npa.shape[0] - 1, step=1, value=int((self.npa.shape[0] - 1) / 2), width="20em", ) self.slice_slider.observe(self.on_slice_slider_value_change, names="value") bx0 = widgets.Box(padding=7, children=[self.slice_slider]) # Layout of UI components. This is pure ugliness because we are not using a UI toolkit. Layout is done # using the box widget and padding so that the visible UI components are spaced nicely. bx1 = widgets.Box(padding=7, children=[self.viewing_checkbox]) bx2 = widgets.Box(padding=15, children=[self.clearlast_button]) bx3 = widgets.Box(padding=15, children=[self.clearall_button]) return ( widgets.HBox(children=[widgets.HBox(children=[bx1, bx2, bx3]), bx0]) if self.slice_slider else widgets.HBox(children=[widgets.HBox(children=[bx1, bx2, bx3])]) ) def get_window_level_numpy_array(self, image, window_level): npa = sitk.GetArrayViewFromImage(image) if not window_level: return npa, npa.min(), npa.max() else: return ( npa, window_level[1] - window_level[0] / 2.0, window_level[1] + window_level[0] / 2.0, ) def on_slice_slider_value_change(self, change): self.update_display() def update_display(self): # We want to keep the zoom factor which was set prior to display, so we log it before # clearing the axes. xlim = self.axes.get_xlim() ylim = self.axes.get_ylim() # Draw the image and localized points. self.axes.clear() self.axes.imshow( self.npa[self.slice_slider.value, :, :] if self.slice_slider else self.npa, cmap=plt.cm.Greys_r, vmin=self.min_intensity, vmax=self.max_intensity, ) # Positioning the text is a bit tricky, we position relative to the data coordinate system, but we # want to specify the shift in pixels as we are dealing with display. We therefore (a) get the data # point in the display coordinate system in pixel units (b) modify the point using pixel offset and # transform back to the data coordinate system for display. text_x_offset = -10 text_y_offset = -10 for i, pnt in enumerate(self.point_indexes): if ( self.slice_slider and int(pnt[2] + 0.5) == self.slice_slider.value ) or not self.slice_slider: self.axes.scatter(pnt[0], pnt[1], s=90, marker="+", color="yellow") # Get point in pixels. text_in_data_coords = self.axes.transData.transform([pnt[0], pnt[1]]) # Offset in pixels and get in data coordinates. text_in_data_coords = self.axes.transData.inverted().transform( ( text_in_data_coords[0] + text_x_offset, text_in_data_coords[1] + text_y_offset, ) ) self.axes.text( text_in_data_coords[0], text_in_data_coords[1], str(i), color="yellow", ) self.axes.set_title(f"localized {len(self.point_indexes)} points") self.axes.set_axis_off() # Set the zoom factor back to what it was before we cleared the axes, and rendered our data. self.axes.set_xlim(xlim) self.axes.set_ylim(ylim) self.fig.canvas.draw_idle() def add_point_indexes(self, point_index_data): self.validate_points(point_index_data) self.point_indexes.append(list(point_index_data)) self.update_display() def set_point_indexes(self, point_index_data): self.validate_points(point_index_data) del self.point_indexes[:] self.point_indexes = list(point_index_data) self.update_display() def validate_points(self, point_index_data): for p in point_index_data: if self.npa.ndim != len(p): raise ValueError( "Given point (" + ", ".join(map(str, p)) + ") dimension does not match image dimension." ) outside_2d_bounds = ( p[0] >= self.npa.shape[2] or p[0] < 0 or p[1] >= self.npa.shape[1] or p[1] < 0 ) outside_bounds = outside_2d_bounds or ( False if self.npa.ndim == 2 else p[2] >= self.npa.shape[0] or p[2] < 0 ) if outside_bounds: raise ValueError( "Given point (" + ", ".join(map(str, p)) + ") is outside the image bounds." ) def clear_all(self, button): del self.point_indexes[:] self.update_display() def clear_last(self, button): if self.point_indexes: self.point_indexes.pop() self.update_display() def get_points(self): return [ self.image.TransformContinuousIndexToPhysicalPoint(pnt) for pnt in self.point_indexes ] def get_point_indexes(self): """ Return the point indexes, not the continous index we keep. """ # Round and then cast to int, just rounding will return a float return [tuple(map(lambda x: int(round(x)), pnt)) for pnt in self.point_indexes] def __call__(self, event): if self.viewing_checkbox.value == "edit": if event.inaxes == self.axes: self.point_indexes.append( (event.xdata, event.ydata, self.slice_slider.value) if self.slice_slider else (event.xdata, event.ydata) ) self.update_display() def multi_image_display2D( image_list, title_list=None, window_level_list=None, figure_size=(10, 8), horizontal=True, ): if title_list: if len(image_list) != len(title_list): raise ValueError("Title list and image list lengths do not match") else: title_list = [""] * len(image_list) # Create a figure. col_num, row_num = (len(image_list), 1) if horizontal else (1, len(image_list)) fig, axes = plt.subplots(row_num, col_num, figsize=figure_size) if len(image_list) == 1: axes = [axes] # Get images as numpy arrays for display and the window level settings npa_list = list(map(sitk.GetArrayViewFromImage, image_list)) if not window_level_list: min_intensity_list = list(map(np.min, npa_list)) max_intensity_list = list(map(np.max, npa_list)) else: min_intensity_list = list(map(lambda x: x[1] - x[0] / 2.0, window_level_list)) max_intensity_list = list(map(lambda x: x[1] + x[0] / 2.0, window_level_list)) # Draw the image(s) for ax, npa, title, min_intensity, max_intensity in zip( axes, npa_list, title_list, min_intensity_list, max_intensity_list ): ax.imshow(npa, cmap=plt.cm.Greys_r, vmin=min_intensity, vmax=max_intensity) ax.set_title(title) ax.set_axis_off() fig.tight_layout() return (fig, axes) class MultiImageDisplay(object): """ This class provides a GUI for displaying 3D images. It supports display of multiple images in the same UI. The image slices are selected according to the axis specified by the user. Each image can have a title and a slider to scroll through the stack. The images can also share a single slider if they have the same number of slices along the given axis. Images are either grayscale or color. The intensity range used for display (window-level) can be specified by the user as input to the constructor or set via the displayed slider. For color images the intensity control slider will be disabled. This allows us to display both color and grayscale images in the same figure with a consistent look to the controls. The range of the intensity slider is set to be from top/bottom 2% of intensities (accomodating for outliers). Images are displayed either in horizontal or vertical layout, depending on the users choice. """ def __init__( self, image_list, axis=0, shared_slider=False, title_list=None, window_level_list=None, intensity_slider_range_percentile=[2, 98], figure_size=(10, 8), horizontal=True, ): self.npa_list, wl_range, wl_init = self.get_window_level_numpy_array( image_list, window_level_list, intensity_slider_range_percentile ) if title_list: if len(image_list) != len(title_list): raise ValueError("Title list and image list lengths do not match") self.title_list = list(title_list) else: self.title_list = [""] * len(image_list) # Our dynamic slice, based on the axis the user specifies self.slc = [slice(None)] * 3 self.axis = axis ui = self.create_ui(shared_slider, wl_range, wl_init) display(ui) # Create a figure. col_num, row_num = (len(image_list), 1) if horizontal else (1, len(image_list)) self.fig, self.axes = plt.subplots(row_num, col_num, figsize=figure_size) if len(image_list) == 1: self.axes = [self.axes] # Display the data and the controls, first time we display the image is outside the "update_display" method # as that method relies on the previous zoom factor which doesn't exist yet. for ax, npa, slider, wl_slider in zip( self.axes, self.npa_list, self.slider_list, self.wl_list ): self.slc[self.axis] = slice(slider.value, slider.value + 1) # Need to use squeeze to collapse degenerate dimension (e.g. RGB image size 124 124 1 3) ax.imshow( np.squeeze(npa[tuple(self.slc)]), cmap=plt.cm.Greys_r, vmin=wl_slider.value[0], vmax=wl_slider.value[1], ) self.update_display() plt.tight_layout() def create_ui(self, shared_slider, wl_range, wl_init): # Create the active UI components. Height and width are specified in 'em' units. This is # a html size specification, size relative to current font size. if shared_slider: # Validate that all the images have the same size along the axis which we scroll through sz = self.npa_list[0].shape[self.axis] for npa in self.npa_list: if npa.shape[self.axis] != sz: raise ValueError( "Not all images have the same size along the specified axis, cannot share slider." ) slider = widgets.IntSlider( description="image slice:", min=0, max=sz - 1, step=1, value=int((sz - 1) / 2), width="20em", ) slider.observe(self.on_slice_slider_value_change, names="value") self.slider_list = [slider] * len(self.npa_list) slicer_box = widgets.Box(padding=7, children=[slider]) else: self.slider_list = [] for npa in self.npa_list: slider = widgets.IntSlider( description="image slice:", min=0, max=npa.shape[self.axis] - 1, step=1, value=int((npa.shape[self.axis] - 1) / 2), width="20em", ) slider.observe(self.on_slice_slider_value_change, names="value") self.slider_list.append(slider) slicer_box = widgets.Box(padding=7, children=self.slider_list) self.wl_list = [] # Each image has a window-level slider, but it is disabled if the image # is a color image len(npa.shape)==4 . This allows us to display both # color and grayscale images in the same UI while retaining a reasonable # layout for the sliders. for r_values, i_values, npa in zip(wl_range, wl_init, self.npa_list): wl_range_slider = widgets.IntRangeSlider( description="intensity:", min=r_values[0], max=r_values[1], step=1, value=[i_values[0], i_values[1]], width="20em", disabled=len(npa.shape) == 4, ) wl_range_slider.observe(self.on_wl_slider_value_change, names="value") self.wl_list.append(wl_range_slider) wl_box = widgets.Box(padding=7, children=self.wl_list) return widgets.VBox(children=[slicer_box, wl_box]) def get_window_level_numpy_array( self, image_list, window_level_list, intensity_slider_range_percentile ): # Using GetArray and not GetArrayView because we don't keep references # to the original images. If they are deleted outside the view would become # invalid, so we use a copy wich guarentees that the gui is consistent. npa_list = list(map(sitk.GetArrayFromImage, image_list)) wl_range = [] wl_init = [] # We need to iterate over the images because they can be a mix of # grayscale and color images. If they are color we set the wl_range # to [0,255] and the wl_init is equal, ignoring the window_level_list # entry. for i, npa in enumerate(npa_list): if len(npa.shape) == 4: # color image wl_range.append((0, 255)) wl_init.append((0, 255)) # ignore any window_level_list entry else: # We don't necessarily take the minimum/maximum values, just in case there are outliers # user can specify how much to take off from top and bottom. min_max = np.percentile( npa.flatten(), intensity_slider_range_percentile ) wl_range.append((min_max[0], min_max[1])) if not window_level_list: # No list was given. wl_init.append(wl_range[-1]) else: wl = window_level_list[i] if wl: wl_init.append((wl[1] - wl[0] / 2.0, wl[1] + wl[0] / 2.0)) else: # We have a list, but for this image the entry was left empty: [] wl_init.append(wl_range[-1]) return (npa_list, wl_range, wl_init) def on_slice_slider_value_change(self, change): self.update_display() def on_wl_slider_value_change(self, change): self.update_display() def update_display(self): # Draw the image(s) for ax, npa, title, slider, wl_slider in zip( self.axes, self.npa_list, self.title_list, self.slider_list, self.wl_list ): # We want to keep the zoom factor which was set prior to display, so we log it before # clearing the axes. xlim = ax.get_xlim() ylim = ax.get_ylim() self.slc[self.axis] = slice(slider.value, slider.value + 1) ax.clear() # Need to use squeeze to collapse degenerate dimension (e.g. RGB image size 124 124 1 3) ax.imshow( np.squeeze(npa[tuple(self.slc)]), cmap=plt.cm.Greys_r, vmin=wl_slider.value[0], vmax=wl_slider.value[1], ) ax.set_title(title) ax.set_axis_off() # Set the zoom factor back to what it was before we cleared the axes, and rendered our data. ax.set_xlim(xlim) ax.set_ylim(ylim) self.fig.canvas.draw_idle() class ROIDataAquisition(object): """ This class provides a GUI for selecting box shaped Regions Of Interest (ROIs). Each ROI is represented as a tuple: ((min_x,max_x),(min_y,max_y), and possibly (min_z,max_z)) if dealing with a 3D image. When using the zoom/pan tool from the toolbar ROI selection is disabled. Once you click again on the zoom/pan button zooming/panning will be disabled and ROI selection is enabled. Note that when you are marking the ROI on a slice that is outside the Z-range selected by the range slider, once you are done selecting the ROI, you will see no change on the current slice. This is the correct behavior, though initially you may be surprised by it. """ def __init__(self, image, window_level=None, figure_size=(10, 8)): self.image = image ( self.npa, self.min_intensity, self.max_intensity, ) = self.get_window_level_numpy_array(self.image, window_level) self.rois = [] # ROI display settings self.roi_display_properties = dict( facecolor="red", edgecolor="black", alpha=0.2, fill=True ) ui = self.create_ui() display(ui) # Create a figure. self.fig, self.axes = plt.subplots(1, 1, figsize=figure_size) # Connect the mouse button press to the canvas (__call__ method is the invoked callback). self.fig.canvas.mpl_connect("button_press_event", self) self.roi_selector = RectangleSelector( self.axes, lambda eclick, erelease: None, drawtype="box", useblit=True, button=[1, 3], # Left, right buttons only. minspanx=5, minspany=5, # Ignore motion smaller than 5 pixels. spancoords="pixels", interactive=True, rectprops=self.roi_display_properties, ) self.roi_selector.set_visible(False) # Display the data and the controls, first time we display the image is outside the "update_display" method # as that method relies on the existance of a previous image which is removed from the figure. self.axes.imshow( self.npa[self.slice_slider.value, :, :] if self.slice_slider else self.npa, cmap=plt.cm.Greys_r, vmin=self.min_intensity, vmax=self.max_intensity, ) self.update_display() def create_ui(self): # Create the active UI components. Height and width are specified in 'em' units. This is # a html size specification, size relative to current font size. self.addroi_button = widgets.Button( description="Add ROI", width="7em", height="3em" ) self.addroi_button.on_click(self.add_roi) self.clearlast_button = widgets.Button( description="Clear Last", width="7em", height="3em" ) self.clearlast_button.on_click(self.clear_last) self.clearall_button = widgets.Button( description="Clear All", width="7em", height="3em" ) self.clearall_button.on_click(self.clear_all) # Create sliders only if 3D image self.slice_slider = self.roi_range_slider = None if self.npa.ndim == 3: self.roi_range_slider = widgets.IntRangeSlider( description="ROI z range:", min=0, max=self.npa.shape[0] - 1, step=1, value=[0, self.npa.shape[0] - 1], width="20em", ) bx4 = widgets.Box(padding=15, children=[self.roi_range_slider]) self.slice_slider = widgets.IntSlider( description="image z slice:", min=0, max=self.npa.shape[0] - 1, step=1, value=int((self.npa.shape[0] - 1) / 2), width="20em", ) self.slice_slider.observe(self.on_slice_slider_value_change, names="value") bx0 = widgets.Box(padding=7, children=[self.slice_slider]) # Layout of UI components. This is pure ugliness because we are not using a UI toolkit. Layout is done # using the box widget and padding so that the visible UI components are spaced nicely. bx1 = widgets.Box(padding=7, children=[self.addroi_button]) bx2 = widgets.Box(padding=15, children=[self.clearlast_button]) bx3 = widgets.Box(padding=15, children=[self.clearall_button]) return ( widgets.HBox( children=[ widgets.HBox(children=[bx1, bx2, bx3]), widgets.VBox(children=[bx0, bx4]), ] ) if self.npa.ndim == 3 else widgets.HBox(children=[widgets.HBox(children=[bx1, bx2, bx3])]) ) def on_slice_slider_value_change(self, change): self.update_display() def get_window_level_numpy_array(self, image, window_level): npa = sitk.GetArrayViewFromImage(image) # We don't take the minimum/maximum values, just in case there are outliers (top/bottom 2%) if not window_level: min_max = np.percentile(npa.flatten(), [2, 98]) return npa, min_max[0], min_max[1] else: return ( npa, window_level[1] - window_level[0] / 2.0, window_level[1] + window_level[0] / 2.0, ) def update_display(self): # Draw the image and ROIs. # imshow adds an image to the axes, so we also remove the previous one. self.axes.imshow( self.npa[self.slice_slider.value, :, :] if self.slice_slider else self.npa, cmap=plt.cm.Greys_r, vmin=self.min_intensity, vmax=self.max_intensity, ) self.axes.images[0].remove() # Iterate over all of the ROIs and only display/undisplay those that are relevant. if self.slice_slider: for roi_data in self.rois: if ( self.slice_slider.value >= roi_data[3][0] and self.slice_slider.value <= roi_data[3][1] ): roi_data[0].set_visible(True) else: roi_data[0].set_visible(False) self.axes.set_title(f"selected {len(self.rois)} ROIs") self.axes.set_axis_off() self.fig.canvas.draw_idle() def add_roi_data(self, roi_data): """ Add regions of interest to this GUI. Input is an iterable containing tuples where each tuple contains either two or three tuples (min_x,max_x),(min_y,max_y), (min_z,max_z). depending on the image dimensionality. The ROI is the box defined by these integer values and includes both min/max values. """ self.validate_rois(roi_data) for roi in roi_data: self.rois.append( ( patches.Rectangle( (roi[0][0], roi[1][0]), roi[0][1] - roi[0][0], roi[1][1] - roi[1][0], **self.roi_display_properties, ), roi[0], roi[1], roi[2] if self.npa.ndim == 3 else None, ) ) self.axes.add_patch(self.rois[-1][0]) self.update_display() def set_rois(self, roi_data): """ Clear any existing ROIs and set the display to the given ones. Input is an iterable containing tuples where each tuple contains two or three tuples (min_x,max_x),(min_y,max_y), (min_z,max_z) depending on the image dimensionality. The ROI is the box defined by these integer values and includes both min/max values. """ self.clear_all_data() self.add_roi_data(roi_data) def validate_rois(self, roi_data): for roi in roi_data: for i, bounds in enumerate(roi, 1): if bounds[0] > bounds[1]: raise ValueError( "First element in each tuple is expected to be smaller than second element, error in ROI (" + ", ".join(map(str, roi)) + ")." ) # Note that SimpleITK uses x-y-z specification vs. numpy's z-y-x if not ( bounds[0] >= 0 and bounds[1] < self.npa.shape[self.npa.ndim - i] ): raise ValueError( "Given ROI (" + ", ".join(map(str, roi)) + ") is outside the image bounds." ) def add_roi(self, button): if self.roi_selector.visible: self.roi_selector.set_visible(False) # Extent is in sub-pixel coordinates, we need it in pixels/voxels. roi_extent = [int(round(coord)) for coord in self.roi_selector.extents] # We keep the patch for display and the x,y,z ranges of the ROI. self.rois.append( ( patches.Rectangle( (roi_extent[0], roi_extent[2]), roi_extent[1] - roi_extent[0], roi_extent[3] - roi_extent[2], **self.roi_display_properties, ), (roi_extent[0], roi_extent[1]), (roi_extent[2], roi_extent[3]), self.roi_range_slider.value if self.roi_range_slider else None, ) ) self.axes.add_patch(self.rois[-1][0]) self.update_display() def clear_all_data(self): for roi_data in self.rois: roi_data[0].remove() del self.rois[:] def clear_all(self, button): self.clear_all_data() self.update_display() def clear_last(self, button): if self.rois: self.rois[-1][0].remove() self.rois.pop() self.update_display() def get_rois(self): """ Return a list of tuples representing the ROIs. Each tuple contains either two or three tuples (min_x,max_x), (min_y,max_y), (min_z,max_z) depending on image dimensionality. The ROI is the box defined by these integer values and includes both min/max values. """ return [ (roi_data[1], roi_data[2], roi_data[3]) if self.npa.ndim == 3 else (roi_data[1], roi_data[2]) for roi_data in self.rois ] def __call__(self, event): # When Zoom or pan are active we will # ignore the button press, once the user deactivates the zoom/pan we can allow them # to select the ROI. # Details in this stack overflow discussion: # http://stackoverflow.com/questions/20711148/ignore-matplotlib-cursor-widget-when-toolbar-widget-selected if self.fig.canvas.manager.toolbar.mode == "": self.roi_selector.set_visible(True) self.addroi_button.disabled = False self.update_display() class PairedPointDataManipulation(object): """ This class provides a GUI for paired point creation, to illustrate the use of the SimpleITK LandmarkBasedTransformInitializer. The GUI displays a region of size [0,data_scale]X[0,data_scale] in which the user can select points. The user can then translate and rotate the point configuration. The points are either fiducials (used in registration) or targets (not used in registration). The transformation estimated by the component can be either rigid or affine. The UI allows the user to add noise, bias,... to the moving fiducials and perform registration. """ def __init__( self, transform=sitk.Euler2DTransform(), data_scale=100.0, figure_size=(8, 6) ): self.figure_size = figure_size # Properties of the glyphs used to display the fixed/moving fiducials/targets and rotation centroid. self.FIXED_FIDUCIAL_CONFIG = { "marker": "x", "markersize": 6, "color": "#ff8888", } self.FIXED_TARGET_CONFIG = { "marker": "s", "markerfacecolor": "none", "markersize": 6, "color": "#88ff88", } self.MOVING_FIDUCIAL_CONFIG = { "marker": "+", "markersize": 8, "color": "#ffdddd", } self.MOVING_TARGET_CONFIG = { "marker": "o", "markerfacecolor": "none", "markersize": 8, "color": "#ddffdd", } self.CENTROID_CONFIG = {"marker": ".", "markersize": 4, "color": "#58d8ff"} # The fixed fiducials/targets do not have noise added to them. self.fixed_fiducials = [] self.fixed_targets = [] # The moving fiducials will have noise, bias added to their coordinates. Noise # model is zero mean Gaussian, isotropic and homogenous. self.moving_fiducials = [] self.moving_targets = [] # This list will contain the FLE vectors, not the FLE. Allows us to # accumulate the error as the user adds errors multiple times. self.FLE = [] # Centroid of the moving fiducials and targets. self.centroid = [] # Fiducials can only be added before any noise/bias is added to the existing fiducials self.can_add_fiducials = True # The transformation type that will be used by the LandmarkBasedTransformInitializer # this component supports, Rigid2DTransform, AffineTransform, BSplineTransform self.transform = transform # The point data will be in [0,data_scale]x[0,data_scale] self.scale = data_scale self.ui = self.create_ui() display(self.ui) # Create a figure. self.fig, self.axes = plt.subplots( nrows=1, ncols=1, sharex=True, sharey=True, figsize=self.figure_size ) self.fig.canvas.mpl_connect("button_press_event", self.on_press) self.fig.canvas.mpl_connect("motion_notify_event", self.on_motion) self.fig.canvas.mpl_connect("button_release_event", self.on_release) self.update_display() def create_ui(self): # Create the active UI components. Height and width are specified in 'em' units. This is # a html size specification, size relative to current font size. self.viewing_checkbox = widgets.RadioButtons( description="Mode:", options=["edit", "translate", "rotate"], value="edit" ) self.viewing_checkbox.observe(self.update_centroid_and_display) self.noise_button = widgets.Button( description="Add Noise", width="7em", height="3em" ) self.noise_button.on_click(self.noise) self.outlier_button = widgets.Button( description="Add Outlier", width="7em", height="3em" ) self.outlier_button.on_click(self.outlier) self.bias1_button = widgets.Button( description="Bias (FRE<TRE)", width="7em", height="3em" ) self.bias1_button.on_click(self.bias_1) self.bias2_button = widgets.Button( description="Bias (FRE>TRE)", width="7em", height="3em" ) self.bias2_button.on_click(self.bias_2) self.clear_fiducial_button = widgets.Button( description="Clear Fiducials", width="7em", height="3em" ) self.clear_fiducial_button.on_click(self.clear_fiducials) self.clear_target_button = widgets.Button( description="Clear Targets", width="7em", height="3em" ) self.clear_target_button.on_click(self.clear_targets) self.reset_button = widgets.Button( description="Reset", width="7em", height="3em" ) self.reset_button.on_click(self.reset) self.register_button = widgets.Button( description="Register", width="7em", height="3em" ) self.register_button.on_click(self.register) # Layout of UI components. This is pure ugliness because we are not using a UI toolkit. Layout is done # using the box widget and padding so that the visible UI components are spaced nicely. bx0 = widgets.Box(padding=2, children=[self.viewing_checkbox]) bx1 = widgets.Box(padding=15, children=[self.noise_button]) bx2 = widgets.Box(padding=15, children=[self.outlier_button]) bx3 = widgets.Box(padding=15, children=[self.bias1_button]) bx4 = widgets.Box(padding=15, children=[self.bias2_button]) bx5 = widgets.Box(padding=15, children=[self.clear_fiducial_button]) bx6 = widgets.Box(padding=15, children=[self.clear_target_button]) bx7 = widgets.Box(padding=15, children=[self.reset_button]) bx8 = widgets.Box(padding=15, children=[self.register_button]) return widgets.HBox( children=[ bx0, widgets.VBox( children=[ widgets.HBox([bx1, bx2, bx3, bx4]), widgets.HBox(children=[bx5, bx6, bx7, bx8]), ] ), ] ) def update_display(self): self.axes.clear() # Draw the fixed and moving fiducials and targets using the glyph specifications defined in # the class constructor. self.moving_fiducials_glyphs = [] self.moving_targets_glyphs = [] for fixed_fiducial, moving_fiducial in zip( self.fixed_fiducials, self.moving_fiducials ): self.axes.plot( fixed_fiducial[0], fixed_fiducial[1], **(self.FIXED_FIDUCIAL_CONFIG) ) self.moving_fiducials_glyphs += self.axes.plot( moving_fiducial[0], moving_fiducial[1], **(self.MOVING_FIDUCIAL_CONFIG) ) for fixed_target, moving_target in zip(self.fixed_targets, self.moving_targets): self.axes.plot( fixed_target[0], fixed_target[1], **(self.FIXED_TARGET_CONFIG) ) self.moving_targets_glyphs += self.axes.plot( moving_target[0], moving_target[1], **(self.MOVING_TARGET_CONFIG) ) if self.centroid: self.axes.plot(self.centroid[0], self.centroid[1], **(self.CENTROID_CONFIG)) self.axes.set_title("Registration Error Demonstration") self.axes.get_xaxis().set_visible(False) self.axes.get_yaxis().set_visible(False) self.axes.set_facecolor((0, 0, 0)) # Set the data range back to what it was before we cleared the axes, and rendered our data. self.axes.set_xlim([0, self.scale]) self.axes.set_ylim([0, self.scale]) self.fig.canvas.draw_idle() def update_centroid_and_display(self, button): self.update_centroid() self.update_display() def update_centroid(self): if self.viewing_checkbox.value == "rotate" and ( self.moving_targets or self.moving_fiducials ): n = len(self.moving_fiducials) + len(self.moving_targets) x, y = zip(*(self.moving_fiducials + self.moving_targets)) self.centroid = [sum(x) / n, sum(y) / n] else: self.centroid = [] def noise(self, button): if self.moving_fiducials: self.can_add_fiducials = False for fiducial, fle in zip(self.moving_fiducials, self.FLE): dx = float(np.random.normal(scale=0.02 * self.scale)) dy = float(np.random.normal(scale=0.02 * self.scale)) fiducial[0] += dx fiducial[1] += dy fle[0] += dx fle[1] += dy self.update_display() def outlier(self, button): if self.moving_fiducials: self.can_add_fiducials = False index = np.random.randint(low=0, high=len(self.moving_fiducials)) new_x = max( min(self.moving_fiducials[index][0] + 0.1 * self.scale, self.scale), 0 ) new_y = max( min(self.moving_fiducials[index][1] + 0.1 * self.scale, self.scale), 0 ) self.FLE[index][0] += new_x - self.moving_fiducials[index][0] self.FLE[index][1] += new_y - self.moving_fiducials[index][1] self.moving_fiducials[index][0] = new_x self.moving_fiducials[index][1] = new_y self.update_display() def bias_1(self, button): if self.moving_fiducials: self.can_add_fiducials = False for fiducial, fle in zip(self.moving_fiducials, self.FLE): fiducial[0] += 0.015 * self.scale fiducial[1] += 0.015 * self.scale fle[0] += 0.015 * self.scale fle[1] += 0.015 * self.scale self.update_display() def bias_2(self, button): if self.moving_fiducials: self.can_add_fiducials = False pol = 1 for fiducial, fle in zip(self.moving_fiducials, self.FLE): fiducial[0] += 0.015 * pol * self.scale fiducial[1] += 0.015 * pol * self.scale fle[0] += 0.015 * pol * self.scale fle[1] += 0.015 * pol * self.scale pol *= -1 self.update_display() def clear_fiducials(self, button): self.fixed_fiducials = [] self.moving_fiducials = [] self.FLE = [] self.can_add_fiducials = True self.update_centroid() self.update_display() def clear_targets(self, button): self.fixed_targets = [] self.moving_targets = [] self.update_centroid() self.update_display() def reset(self, button): self.moving_fiducials = copy.deepcopy(self.fixed_fiducials) self.moving_targets = copy.deepcopy(self.fixed_targets) self.FLE = [[0.0, 0.0]] * len(self.moving_fiducials) self.can_add_fiducials = True self.update_centroid() self.update_display() def register(self, button): fixed_points = [c for p in self.fixed_fiducials for c in p] moving_points = [c for p in self.moving_fiducials for c in p] transform = sitk.LandmarkBasedTransformInitializer( self.transform, fixed_points, moving_points ) # For display purposes we want to transform the moving points to the # fixed ones, so using the inverse transformation inverse_transform = transform.GetInverse() for pnt in self.moving_fiducials + self.moving_targets: transformed_pnt = inverse_transform.TransformPoint(pnt) pnt[0] = transformed_pnt[0] pnt[1] = transformed_pnt[1] self.update_centroid() self.update_display() def on_press(self, event): if self.viewing_checkbox.value == "edit": if self.can_add_fiducials: if event.button == 1: # left click if event.inaxes == self.axes: self.fixed_fiducials.append([event.xdata, event.ydata]) self.moving_fiducials.append([event.xdata, event.ydata]) self.FLE.append([0.0, 0.0]) self.update_display() elif event.button == 3: # right click if event.inaxes == self.axes: self.fixed_targets.append([event.xdata, event.ydata]) self.moving_targets.append([event.xdata, event.ydata]) self.update_display() elif event.button == 1: # left click if event.inaxes == self.axes: if self.viewing_checkbox.value == "translate": self.previousx = event.xdata self.previousy = event.ydata elif self.viewing_checkbox.value == "rotate" and self.centroid: self.previous = [ event.xdata - self.centroid[0], event.ydata - self.centroid[1], ] def on_motion(self, event): if event.button == 1: # left click if self.viewing_checkbox.value == "translate": dx = event.xdata - self.previousx dy = event.ydata - self.previousy for glyph in self.moving_fiducials_glyphs + self.moving_targets_glyphs: glyph.set_data(glyph.get_xdata() + dx, glyph.get_ydata() + dy) self.previousx = event.xdata self.previousy = event.ydata self.fig.canvas.draw_idle() self.fig.canvas.flush_events() elif self.viewing_checkbox.value == "rotate" and self.centroid: ox = self.centroid[0] oy = self.centroid[1] v1 = self.previous v2 = [event.xdata - ox, event.ydata - oy] cosang = v1[0] * v2[0] + v1[1] * v2[1] sinang = v1[0] * v2[1] - v1[1] * v2[0] angle = np.arctan2(sinang, cosang) for glyph in self.moving_fiducials_glyphs + self.moving_targets_glyphs: px = glyph.get_xdata() py = glyph.get_ydata() glyph.set_data( ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy), oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy), ) self.previous = v2 self.fig.canvas.draw_idle() self.fig.canvas.flush_events() def on_release(self, event): if event.button == 1: # left click if ( self.viewing_checkbox.value == "translate" or self.viewing_checkbox.value == "rotate" ): # Update the actual data using the glyphs (modified during translation/rotation) for glyph, fiducial in zip( self.moving_fiducials_glyphs, self.moving_fiducials ): fiducial[0] = float(glyph.get_xdata()) fiducial[1] = float(glyph.get_ydata()) for glyph, target in zip( self.moving_targets_glyphs, self.moving_targets ): target[0] = float(glyph.get_xdata()) target[1] = float(glyph.get_ydata()) def get_fixed_fiducials(self): return self.fixed_fiducials def get_fixed_targets(self): return self.fixed_targets def get_moving_fiducials(self): return self.moving_fiducials def get_moving_targets(self): return self.moving_targets def get_FLE(self): return [np.sqrt(fle_vec[0] ** 2 + fle_vec[1] ** 2) for fle_vec in self.FLE] def get_all_data(self): return ( self.fixed_fiducials, self.fixed_targets, self.moving_fiducials, self.moving_targets, self.get_FLE(), ) def set_fiducials(self, fiducials): self.set_points(fiducials) self.FLE = [[0.0, 0.0]] * len(self.moving_fiducials) def set_targets(self, targets): self.set_points(targets, are_fiducials=False) def set_points(self, points, are_fiducials=True): # Validate the points are inside the expected range. all_coords = [coord for pnt in points for coord in pnt] if min(all_coords) < 0 or max(all_coords) > self.scale: raise ValueError( f"One of the points is outside the image bounds, [0,{0}]X[0,{self.scale}]." ) # Copy the data in and coerce points to lists. The coercion to list # allows us to accept tuples, as internally we need the points to be mutable. fill_lists = ( [self.fixed_fiducials, self.moving_fiducials] if are_fiducials else [self.fixed_targets, self.moving_targets] ) for p in points: fill_lists[0].append(list(p)) fill_lists[1].append(list(p)) self.update_display() def display_errors( fixed_fiducials, fixed_targets, FLE_errors, FRE_errors, TRE_errors, min_err=None, max_err=None, title="Registration Errors", ): if not min_err: min_err = min(FRE_errors[2], TRE_errors[2]) if not max_err: max_err = max(FRE_errors[3], TRE_errors[3]) print( "Mean FLE %.6f\t STD FLE %.6f\t Min FLE %.6f\t Max FLE %.6f" % FLE_errors[0:4] ) print( "Mean FRE %.6f\t STD FRE %.6f\t Min FRE %.6f\t Max FRE %.6f" % FRE_errors[0:4] ) print( "Mean TRE %.6f\t STD TRE %.6f\t Min TRE %.6f\t Max TRE %.6f" % TRE_errors[0:4] ) plt.figure(figsize=(9, 3.5), num=title) ax1 = plt.subplot(1, 2, 1) ax1.set_title("Registration Errors Distributions") ax1.yaxis.set_major_locator(MaxNLocator(integer=True)) bins = np.linspace( min(FLE_errors[2], FRE_errors[2], TRE_errors[2]), max(FLE_errors[3], FRE_errors[3], TRE_errors[3]), 20, ) plt.hist(FLE_errors[4], bins, alpha=0.3, label="FLE") plt.hist(FRE_errors[4], bins, alpha=0.3, label="FRE") plt.hist(TRE_errors[4], bins, alpha=0.3, label="TRE") plt.legend(loc="upper right") ax2 = plt.subplot(1, 2, 2) ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.set_facecolor((0.8, 0.8, 0.8)) ax2.set_title("Spatial Variability of Registration Errors") collection = ax2.scatter( list(np.array(fixed_fiducials).T)[0], list(np.array(fixed_fiducials).T)[1], marker="o", c=FRE_errors[4], vmin=min_err, vmax=max_err, cmap=cm.hot, ) ax2.scatter( list(np.array(fixed_targets).T)[0], list(np.array(fixed_targets).T)[1], marker="s", c=TRE_errors[4], vmin=min_err, vmax=max_err, cmap=cm.hot, ) plt.colorbar(collection, shrink=0.8) plt.show()
InsightSoftwareConsortium/SimpleITK-Notebooks
Python/gui.py
Python
apache-2.0
64,301
[ "Gaussian" ]
9a0837e2e5c01e27f212ff11fd788f0d8ecd66110c089c6aae582c2f80d5a6e3
""" This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit. The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well as testing instructions are located at http://amzn.to/1LzFrj6 For additional samples, visit the Alexa Skills Kit Getting Started guide at http://amzn.to/1LGWsLG """ from __future__ import print_function import urllib.request # --------------- Helpers that build all of the responses ---------------------- def build_speechlet_response(title, output, reprompt_text, should_end_session, debug): return { 'outputSpeech': { 'type': 'PlainText', 'text': output }, 'card': { 'type': 'Simple', 'title': "SessionSpeechlet - " + title, 'content': "SessionSpeechlet - " + output }, 'reprompt': { 'outputSpeech': { 'type': 'PlainText', 'text': reprompt_text } }, 'shouldEndSession': should_end_session, 'debug': debug } def build_response(session_attributes, speechlet_response): return { 'version': '1.0', 'sessionAttributes': session_attributes, 'response': speechlet_response } # --------------- Functions that control the skill's behavior ------------------ def get_welcome_response(): """ If we wanted to initialize the session to have some attributes we could add those here """ session_attributes = {} card_title = "Welcome" speech_output = "I will " + \ "start transcribing" + \ ". You can ask me to stop transcribing anytime. " reprompt_text = " " debug = "starting reading" captioning = urllib.request.urlopen("https://1d496ef7.ngrok.io/start").read() #print(captioning) print(speech_output) should_end_session = False return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session, debug)) def handle_session_end_request(): card_title = "Session Ended" speech_output = "Thank you for trying the Alexa Skills for transcribing. " \ "Bye! " # Setting this to true ends the session and exits the skill. should_end_session = True debug = " " return build_response({}, build_speechlet_response( card_title, speech_output, None, should_end_session, debug)) def create_transcribe_attribute(startTrans): return {"startTrans": startTrans} def set_transcribe_in_session(intent, session): card_title = intent['name'] session_attributes = {} should_end_session = False speech_output = " " reprompt_text = " " debug = "starting reading" #print(captioning) print(speech_output) should_end_session = True return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session, debug)) def get_transcribe_from_session(intent, session): session_attributes = {} reprompt_text = None if session.get('attributes', {}) and "startTrans" in session.get('attributes', {}): startTrans = session['attributes']['startTrans'] speech_output = " " should_end_session = True else: speech_output = "I'm not sure what you mean. " \ "Please try again." should_end_session = False debug = " " # Setting reprompt_text to None signifies that we do not want to reprompt # the user. If the user does not respond or says something that is not # understood, the session will end. return build_response(session_attributes, build_speechlet_response( intent['name'], speech_output, reprompt_text, should_end_session, debug)) # --------------- Events ------------------ (Alexa is called) def on_session_started(session_started_request, session): """ Called when the session starts """ print("on_session_started requestId=" + session_started_request['requestId'] + ", sessionId=" + session['sessionId']) def on_launch(launch_request, session): """ Called when the user launches the skill without specifying what they want """ print("on_launch requestId=" + launch_request['requestId'] + ", sessionId=" + session['sessionId']) # Dispatch to your skill's launch return get_welcome_response() def on_intent(intent_request, session): """ Called when the user specifies an intent for this skill """ print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId']) intent = intent_request['intent'] intent_name = intent_request['intent']['name'] # Dispatch to your skill's intent handlers if intent_name == "startTransIsIntent": return set_transcribe_in_session(intent, session) elif intent_name == "AMAZON.HelpIntent": return get_welcome_response() elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent": return handle_session_end_request() else: raise ValueError("Invalid intent") def on_session_ended(session_ended_request, session): """ Called when the user ends the session. Is not called when the skill returns should_end_session=true """ print("on_session_ended requestId=" + session_ended_request['requestId'] + ", sessionId=" + session['sessionId']) # add cleanup logic here # --------------- Main handler ------------------ def lambda_handler(event, context): """ Route the incoming request based on type (LaunchRequest, IntentRequest, etc.) The JSON body of the request is provided in the event parameter. """ print("event.session.application.applicationId=" + event['session']['application']['applicationId']) """ Uncomment this if statement and populate with your skill's application ID to prevent someone else from configuring a skill that sends requests to this function. """ # if (event['session']['application']['applicationId'] != # "amzn1.echo-sdk-ams.app.[unique-value-here]"): # raise ValueError("Invalid Application ID") if event['session']['new']: on_session_started({'requestId': event['request']['requestId']}, event['session']) if event['request']['type'] == "LaunchRequest": return on_launch(event['request'], event['session']) elif event['request']['type'] == "IntentRequest": return on_intent(event['request'], event['session']) elif event['request']['type'] == "SessionEndedRequest": return on_session_ended(event['request'], event['session'])
ItsNotABugItsAFeature/transcribe
alexa/startTranscribe.py
Python
mit
6,780
[ "VisIt" ]
b808fff5a3116ee16b4bbcd25c9217f64ae998adb26f8d830e289a7caf7e1a8c
# # Copyright (C) 2007, Mark Lee # #http://rl-glue-ext.googlecode.com/ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # $Revision: 446 $ # $Date: 2009-01-23 04:20:21 +0100 (Fri, 23 Jan 2009) $ # $Author: brian@tannerpages.com $ # $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/types.py $ import copy class RL_Abstract_Type: def __init__(self,numInts=None,numDoubles=None,numChars=None): self.intArray = [] self.doubleArray = [] self.charArray = [] if numInts != None: self.intArray = [0]*numInts if numDoubles != None: self.doubleArray = [0.0]*numDoubles if numChars != None: self.charArray = ['']*numChars def sameAs(self,otherAbstractType): return self.intArray==otherAbstractType.intArray and self.doubleArray==otherAbstractType.doubleArray and self.charArray==otherAbstractType.charArray #this coolness added by btanner sept 30/2008 #it allows the subclasses to be used like myAction=Action.fromAbstractType(someAbstractType) @classmethod def fromAbstractType(cls, theAbstractType): retStruct=cls() retStruct.intArray=copy.deepcopy(theAbstractType.intArray) retStruct.doubleArray=copy.deepcopy(theAbstractType.doubleArray) retStruct.charArray=copy.deepcopy(theAbstractType.charArray) return retStruct class Action(RL_Abstract_Type): def __init__(self,numInts=None,numDoubles=None,numChars=None): RL_Abstract_Type.__init__(self,numInts,numDoubles,numChars) class Observation(RL_Abstract_Type): def __init__(self,numInts=None,numDoubles=None,numChars=None): RL_Abstract_Type.__init__(self,numInts,numDoubles,numChars) class Observation_action: def __init__(self,theObservation=None,theAction=None): if theObservation != None: self.o = theObservation else: self.o = Observation() if theAction != None: self.a = theAction else: self.a = Action() class Reward_observation_terminal: def __init__(self,reward=None, theObservation=None, terminal=None): if reward != None: self.r = reward else: self.r = 0.0 if theObservation != None: self.o = theObservation else: self.o = Observation() if terminal != None: self.terminal = terminal else: self.terminal = False class Reward_observation_action_terminal: def __init__(self,reward=None, theObservation=None, theAction=None, terminal=None): if reward != None: self.r = reward else: self.r = 0.0 if theObservation != None: self.o = theObservation else: self.o = Observation() if theAction != None: self.a = theAction else: self.a = Action() if terminal != None: self.terminal = terminal else: self.terminal = False
steckdenis/rlglue-py3
rlglue/types.py
Python
apache-2.0
4,169
[ "Brian" ]
7081833662a347c06898c6716b4bbab0854e511cbdc93aac605d6f767ed19c77
""" This package provides the packages and modules to perform IO from various input file formats and pymatgen objects. """
Dioptas/pymatgen
pymatgen/io/__init__.py
Python
mit
123
[ "pymatgen" ]
12e40db9cf06391ac34a9aaf3435b6f7a19f0e646dbe7aad13a7bcbb4f1cc53c
#!/usr/bin/env python """ Miscellaneous computations. Authors: - Arno Klein, 2012-2013 (arno@mindboggle.info) http://binarybottle.com - Yrjo Hame, 2012 (yrjo.hame@gmail.com) Copyright 2013, Mindboggle team (http://mindboggle.info), Apache v2.0 License """ def sigmoid(values, gain, shift): """ Map values with sigmoid function to range [0,1]. Y(t) = 1/(1 + exp(-gain*(values - shift)) """ import numpy as np tiny = 0.000000001 # Make sure argument is a numpy array if type(values) != np.ndarray: values = np.array(values) return 1.0 / (1.0 + np.exp(-gain * (values - shift)) + tiny) def find_segment_endpoints(indices, neighbor_lists, likelihoods, step=1): """ Find endpoints in a region of connected vertices. These points are intended to serve as endpoints of fundus curves running along high-likelihood paths within a region (fold). This algorithm iteratively propagates from an initial high-likelihood seed toward the boundary of a region within thresholded subregions of decreasing likelihood. The first boundary point that is reached for each segmented branch serves as an endpoint. Note :: This algorithm suffers from the following problems: 1. If multiple boundary points are reached simultaneously, the choice of highest likelihood among them might not be appropriate. 2. The boundary may be reached before any other branches are discovered, especially if other branches are separated by low likelihood (shallow) vertices. 3. Segmentation may reach the top of a fold's wall before reaching the tip of its branch. Steps :: Initialize: R: Region/remaining vertices to segment (initially fold vertices) P: Previous segment (initially the maximum likelihood point) X: Excluded segment N: New/next segment (initially P) E: indices to endpoint vertices (initially empty) For each decreasing threshold, run recursive function creep(): Propagate P into R, and call these new vertices N. Propagate X into P, R, and N. Optionally remove points from N and R that are also in the expanded X. Remove P and N from R. Reassign P to X. If N is empty: Choose highest likelihood point in P as endpoint. Return endpoints E and remaining vertices R. else: Identify N_i different segments of N. For each segment N_i: If N_i large enough or if max(i)==1: Call recursive function creep() with new arguments. Return endpoints E and R, P, X, and N. Parameters ---------- indices : list of integers indices of the vertices to segment (such as a fold in a surface mesh) neighbor_lists : list of lists of integers indices to neighboring vertices for each vertex likelihoods : numpy array of floats fundus likelihood values for all vertices step : integer number of segmentation steps before assessing segments Returns ------- indices_endpoints : list of integers indices of surface mesh vertices that are endpoints Examples -------- >>> # Setup: >>> import os >>> import numpy as np >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars >>> from mindboggle.utils.mesh import find_neighbors_from_file >>> from mindboggle.features.fundi import find_endpoints >>> from mindboggle.utils.plots import plot_vtk >>> path = os.environ['MINDBOGGLE_DATA'] >>> likelihood_file = os.path.join(path, 'arno', 'features', 'likelihoods.vtk') >>> likelihoods, name = read_scalars(likelihood_file, True, True) >>> vtk_file = os.path.join(path, 'arno', 'freesurfer', 'lh.pial.vtk') >>> neighbor_lists = find_neighbors_from_file(vtk_file) >>> step = 1 >>> min_size = 50 >>> # >>> #----------------------------------------------------------------------- >>> # Find endpoints on a single fold: >>> #----------------------------------------------------------------------- >>> fold_file = os.path.join(path, 'arno', 'features', 'fold11.vtk') >>> fold, name = read_scalars(fold_file) >>> indices = [i for i,x in enumerate(fold) if x != -1] >>> indices_endpoints = find_endpoints(indices, neighbor_lists, >>> likelihoods, step) >>> # Write results to VTK file and view: >>> likelihoods[indices_endpoints] = max(likelihoods) + 0.1 >>> rewrite_scalars(likelihood_file, 'find_endpoints.vtk', >>> likelihoods, 'endpoints_on_likelihoods_in_fold', fold) >>> plot_vtk('find_endpoints.vtk') >>> # >>> #----------------------------------------------------------------------- >>> # Find endpoints on every fold in a hemisphere: >>> #----------------------------------------------------------------------- >>> plot_each_fold = False >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file) >>> fold_numbers = [x for x in np.unique(folds) if x != -1] >>> nfolds = len(fold_numbers) >>> endpoints = [] >>> for ifold, fold_number in enumerate(fold_numbers): >>> print('Fold {0} ({1} of {2})'.format(int(fold_number), ifold+1, nfolds)) >>> indices = [i for i,x in enumerate(folds) if x == fold_number] >>> if len(indices) > min_size: >>> indices_endpoints = find_endpoints(indices, neighbor_lists, likelihoods, step) >>> endpoints.extend(indices_endpoints) >>> # Plot each fold: >>> if plot_each_fold: >>> fold = -1 * np.ones(len(likelihoods)) >>> fold[indices] = 1 >>> likelihoods[indices_endpoints] = max(likelihoods) + 0.1 >>> rewrite_scalars(likelihood_file, 'find_endpoints.vtk', >>> likelihoods, 'endpoints_on_likelihoods_in_fold', fold) >>> plot_vtk('find_endpoints.vtk') >>> E = -1 * np.ones(len(likelihoods)) >>> E[endpoints] = 1 >>> # >>> # Write results to VTK file and view: >>> rewrite_scalars(folds_file, 'find_endpoints.vtk', >>> E, 'endpoints_on_folds', folds) >>> plot_vtk('find_endpoints.vtk') """ import numpy as np from mindboggle.labels.labels import extract_borders # Make sure arguments are numpy arrays if isinstance(likelihoods, list): likelihoods = np.array(likelihoods) # Parameters: min_size = 1 xstep = 1 # Threshold parameters: use_thresholds = True threshold_factor = 0.9 min_threshold = 0.1 # Recursive function for segmenting and finding endpoints: def creep(R, P, X, E, L, B, step, neighbor_lists, min_size=1): """ Recursively segment a mesh, creeping toward its edges to find endpoints. Steps :: Propagate P into R, and call these new vertices N. Propagate X into P, R, and N. Remove points from N and R that are also in the expanded X. Remove P and N from R. Reassign P to X. If N is empty: Choose highest likelihood point in P as endpoint. Return endpoints E and remaining vertices R. else: Identify N_i different segments of N. For each segment N_i: If N_i large enough or if max(i)==1: Call recursive function creep() with new arguments. Return E, R, P, X, and N. Parameters ---------- R : list of integers indices of vertices to segment (such as a fold) P : list of integers indices of previous segment vertices X : list of integers indices of segmented vertices to exclude from endpoint selection E: list of integers indices to endpoint vertices L : numpy array of floats likelihood values for all vertices step : integer number of segmentation steps before assessing segments neighbor_lists : list of lists of integers indices to neighboring vertices for each vertex min_size : integer minimum number of vertices for an endpoint segment Returns ------- R : list of integers remaining vertices to segment P : list of integers previous segment X : list of integers excluded segment E: list of integers endpoints """ import numpy as np from mindboggle.utils.segment import segment # Expand X and exclude endpoint selection?: rmX = False #----------------------------------------------------------------------- # Propagate P into R, and call these new vertices N: #----------------------------------------------------------------------- PintoR = segment(R, neighbor_lists, min_region_size=1, seed_lists=[P], keep_seeding=False, spread_within_labels=False, labels=[], label_lists=[], values=[], max_steps=step) PN = [i for i,x in enumerate(PintoR) if x != -1] # Remove P (seeds) from N: N = list(frozenset(PN).difference(P)) #print(' {0} vertices in the new segment'.format(len(N))) #----------------------------------------------------------------------- # Propagate X into R (including P and N): #----------------------------------------------------------------------- if rmX: if X: RPN = R[:] RPN.extend(PN) XintoR = segment(RPN, neighbor_lists, min_region_size=1, seed_lists=[X], keep_seeding=False, spread_within_labels=False, labels=[], label_lists=[], values=[], max_steps=xstep) X = [i for i,x in enumerate(XintoR) if x != -1] print(' {0} vertices spread from previously segmented'.format(len(X))) # Remove points from N and R that are also in the expanded X: N = list(frozenset(N).difference(X)) R = list(frozenset(R).difference(X)) # Reassign P to X: X.extend(P) # Remove P and N from R: R = list(frozenset(R).difference(P)) R = list(frozenset(R).difference(N)) #----------------------------------------------------------------------- # If N is empty, return endpoints: #----------------------------------------------------------------------- BandN = list(frozenset(B).intersection(N)) if not N: pass elif BandN: # Choose highest likelihood point in P as endpoint: E.append(BandN[np.argmax(L[BandN])]) #----------------------------------------------------------------------- # If N is not empty, assign as P and continue segmenting recursively: #----------------------------------------------------------------------- else: # Identify N_i different segments of N: N_segments = segment(N, neighbor_lists, min_region_size=1) unique_N = [x for x in np.unique(N_segments) if x!=-1] n_segments = len(unique_N) # For each segment N_i: for n in unique_N: N_i = [i for i,x in enumerate(N_segments) if x==n] # If N_i large enough or if max(i)==1: if len(N_i) >= min_size or n_segments==1: # Call creep() with new arguments: R, P, X, E = creep(R, N_i, X, E, L, B, step, neighbor_lists, min_size) # Return endpoints E and remaining vertices R: return R, P, X, E # Extract boundary: D = np.ones(len(likelihoods)) D[indices] = 2 B, foo1, foo2 = extract_borders(range(len(likelihoods)), D, neighbor_lists) # Initialize R, X, and E: R = [] X = [] E = [] indices_endpoints = [] # Initialize P and threshold with the maximum likelihood point: L = likelihoods Imax = indices[np.argmax(L[indices])] P = [Imax] threshold = L[Imax] # Include new vertices with lower likelihood values: if use_thresholds: # Iterate endpoint extraction until all vertices have been segmented: continue_loop = True while continue_loop: prev_threshold = threshold # If threshold above minimum, update R based on the threshold: if threshold > min_threshold: #if X: threshold = threshold_factor * np.mean(L[X]) threshold = threshold_factor * threshold T = [x for x in indices if L[x] >= threshold if L[x] < prev_threshold] if not T: decrease_threshold = True while decrease_threshold: threshold *= threshold_factor T = [x for x in indices if L[x] >= threshold if L[x] < prev_threshold] if T or threshold < min_threshold: decrease_threshold = False R.extend(T) # If threshold below minimum, update and exit: else: T = [x for x in indices if L[x] < prev_threshold] R.extend(T) continue_loop = False # Run recursive function creep() to return endpoints: R, P, X, E = creep(R, P, X, E, L, B, step, neighbor_lists, min_size) E = np.unique(E).tolist() # Print message: if len(R) == 1: str1 = 'vertex' else: str1 = 'vertices' if len(E) == 1: str2 = 'endpoint' else: str2 = 'endpoints' print(' {0} remaining {1}, {2} {3} (threshold: {4:0.3f})'. format(len(R), str1, len(E), str2, threshold)) # Don't use thresholds -- include all vertices: else: R = indices print(' Segment {0} vertices'.format(len(R))) # Run recursive function creep() to return endpoints: R, P, X, E = creep(R, P, X, E, L, B, step, neighbor_lists, min_size) indices_endpoints = E return indices_endpoints def track(R, P, T, L, B, neighbor_lists): """ Recursively run tracks along a mesh, through vertices of high likelihood. At each vertex, continue, branch, or terminate. Steps :: R is the set of remaining (untracked) vertices. Find the neighborhood N for point P in R. Remove N from R. For each neighborhood vertex N_i: Remove N_i from N. Find the neighbors for N_i also in N. If N_i has the maximum value in its restricted neighborhood: Call recursive function track() with N_i as P if N_i not in B. Parameters ---------- R : list of integers indices of vertices (such as a fold in a surface mesh) P : integer index to vertex T : list of lists of pairs of integers index pairs are track segments L : numpy array of floats likelihood values for all vertices B : list of integers indices of boundary vertices for R neighbor_lists : list of lists of integers indices to neighboring vertices for each vertex Returns ------- R : list of integers remaining vertices T : list of lists of pairs of integers track segments """ import numpy as np # Find the neighborhood N for point P in R: N = neighbor_lists[P] N = list(frozenset(N).intersection(R)) print('N', N) if N: # Remove N from R: R = list(frozenset(R).difference(N)) # For each neighborhood vertex N_i: Imax = np.argmax(L[N]) print(Imax) for N_i in [N[Imax]]: # Find the neighbors of N_i also in N: N2 = list(frozenset(neighbor_lists[N_i]).intersection(N)) print('N2', N2) if N2: # If N_i has the maximum value in its restricted neighborhood: if L[N_i] >= max(L[N2]): # Add track segment: T.append([P, N_i]) print('T', T) # Call recursive function track() with N_i as P if N_i not in B: if N_i not in B: R, T = track(R, N_i, T, L, B, neighbor_lists) return R, T #----------------------------------------------------------------------------- # Shrink segments #----------------------------------------------------------------------------- def shrink_segments(regions, segments, depths, shrink_factor=0.25, only_multiple_segments=False): """ Shrink segments in a segmented surface mesh by a fraction of its maximum depth, for all segments or for segments in regions with multiple segments. Parameters ---------- regions : list or array of integers region IDs for all vertices, indicating inclusion in a region (default -1) segments : numpy array of integers segment IDs for all vertices, indicating inclusion in a segment (default -1) depths : numpy array of floats depth values for all vertices (default -1) shrink_factor : float shrink each region of connected vertices to this fraction of its maximum depth only_multiple_segments : Boolean shrink only segments in regions with multiple segments (otherwise shrink all segments) Returns ------- shrunken_segments : list of integers shrunken segment numbers for all vertices (default -1) -- non-shrunken segments are removed Examples -------- >>> # Segment folds with watershed(), then shrink these segments: >>> import os >>> import numpy as np >>> from mindboggle.utils.mesh import find_neighbors >>> from mindboggle.utils.segment import watershed >>> from mindboggle.x.misc import shrink_segments >>> from mindboggle.utils.io_vtk import read_scalars, read_vtk, rewrite_scalars >>> path = os.environ['MINDBOGGLE_DATA'] >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file) >>> depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> faces, lines, indices, points, npoints, depths, name, input_vtk = read_vtk(depth_file, >>> return_first=True, return_array=True) >>> indices = np.where(depths > 0.11)[0] # high to speed up >>> neighbor_lists = find_neighbors(faces, npoints) >>> segments = watershed(depths, points, indices, neighbor_lists, min_size=1, >>> depth_factor=0.25, depth_ratio=0.1, tolerance=0.01)[0] >>> # >>> shrink_factor = 0.25 >>> shrunken_segments = shrink_segments(folds, segments, depths, >>> shrink_factor, only_multiple_segments=True) >>> # >>> # Write results to vtk file and view: >>> rewrite_scalars(depth_file, 'shrink_segments.vtk', >>> shrunken_segments, 'shrunken_segments', shrunken_segments) >>> from mindboggle.utils.plots import plot_vtk >>> plot_vtk('shrink_segments.vtk') """ import numpy as np print('Shrink segments') remove_fraction = 1 - shrink_factor shrunken_segments = -1 * np.ones(len(depths)) # Make sure arguments are numpy arrays if not isinstance(segments, np.ndarray): segments = np.array(segments) if not isinstance(depths, np.ndarray): depths = np.array(depths) # Shrink only segments in regions with multiple segments if only_multiple_segments: print(' Shrink each segment to {0:.2f} of its depth for regions with ' 'multiple segments'.format(shrink_factor)) # For each region unique_regions = [x for x in np.unique(regions) if x > -1] for n_region in unique_regions: # Check to see if there are multiple segments in the region indices_region = [i for i,x in enumerate(regions) if x == n_region] segments_in_region = [x for x in np.unique(segments[indices_region]) if x > -1] if len(segments_in_region) > 1: # Shrink each segment in the region for n_segment in segments_in_region: indices_segment = [i for i,x in enumerate(segments) if x == n_segment] indices_segment = list(frozenset(indices_segment).intersection(indices_region)) depth_threshold = remove_fraction * np.max(depths[indices_segment]) indices_segment = [x for x in indices_segment if depths[x] > depth_threshold] shrunken_segments[indices_segment] = n_segment # Shrink all segments else: print(' Shrink each segment to {0:.2f} of its depth'.format(shrink_factor)) unique_segments = [x for x in np.unique(segments) if x != -1] for n_segment in unique_segments: indices_segment = [i for i,x in enumerate(segments) if x == n_segment] depth_threshold = remove_fraction * np.max(depths[indices_segment]) indices_segment = [x for x in indices_segment if depths[x] > depth_threshold] shrunken_segments[indices_segment] = n_segment return shrunken_segments
binarybottle/mindboggle_sidelined
misc.py
Python
apache-2.0
22,033
[ "VTK" ]
705c6a0654f1dbcc8e0ad8344bf9e844bcd8b514656bc3b544488e2d8921eee5
from django.shortcuts import render from django.http import HttpResponse from django.template.loader import get_template from django.shortcuts import render_to_response from datetime import datetime, timedelta import httplib, urllib import xml.etree.ElementTree as ET # Create your views here. def principal(request): members = [ {"name": "Andres del Rio", "id": 12109014, "hours": 0, "number": '316 5536752'}, {"name": "Yesid Ortiz", "id": 11679382, "hours": 0, "number": '316 3458730'}, {"name": "Luis Salinas", "id": 12011890, "hours": 0, "number": '301 3365150'}, {"name": "Alejandro Figueroa", "id": 12412894, "hours": 0, "number": 'NA'}, {"name": "Juan Fernando Rojas", "id": 12414671, "hours": 0, "number": 'NA'}, {"name": "Ernesto Guitierrez", "id": 12433433, "hours": 0, "number": 'NA'}, ] holidays = [ datetime.strptime('20170109', '%Y%m%d'), #Epiphany datetime.strptime('20170320', '%Y%m%d'), #St Josephs Day datetime.strptime('20170413', '%Y%m%d'), #Maundy Thursday datetime.strptime('20170414', '%Y%m%d'), #Good Friday datetime.strptime('20170501', '%Y%m%d'), #Labour Day datetime.strptime('20170529', '%Y%m%d'), #Ascension Day datetime.strptime('20170619', '%Y%m%d'), #Corpus Christi datetime.strptime('20170626', '%Y%m%d'), #Sacred Heart datetime.strptime('20170703', '%Y%m%d'), #Saint Peter and Saint Paul datetime.strptime('20170720', '%Y%m%d'), #Declaration of Independenc datetime.strptime('20170807', '%Y%m%d'), #Battle of Boyaca datetime.strptime('20170815', '%Y%m%d'), #Assumption Day datetime.strptime('20171016', '%Y%m%d'), #Columbus Day datetime.strptime('20171106', '%Y%m%d'), #All Saints Day datetime.strptime('20171113', '%Y%m%d'), #Independece of Cartagena datetime.strptime('20171208', '%Y%m%d'), #Immaculate Conception datetime.strptime('20171225', '%Y%m%d'), #Christmas ] templ = get_template("index.html") #careful differentiating between HTTPConnection and HTTPSConnection conn = httplib.HTTPSConnection("zemogatime.basecamphq.com") conn.connect() headers = {"Authorization": "ffd9b9cd58b4227a10ea2fa3f89dc3a547e567e1=", } current_time = datetime.now() #Check the report for the day before the_date = current_time + timedelta(days=-1) #Check if the date is monday. From the documentation: Return the day of the week as an integer, where Monday is 0 and Sunday is 6. if current_time.weekday() == 0: the_date = current_time + timedelta(days=-3) #Check if the current date is a holiday for holiday in holidays: next_day_after_holiday = holiday + timedelta(days=+1) #If today is the same day after a holiday if current_time.strftime('%Y%m%d') == next_day_after_holiday.strftime('%Y%m%d'): if holiday.weekday() == 0: the_date = current_time + timedelta(days=-4) #substract -4 days if the holiday was on monday elif holiday.weekday() == 6: the_date = current_time + timedelta(days=-3) #If sunday substract to check for friday's date else: the_date = current_time + timedelta(days=-2) #substract 2 days (-1 day of the holiday -1 day for the date of the review) timeFormated = the_date.strftime('%Y%m%d') #<yyyy><mm><dd> #params = urllib.urlencode({'from': timeFormated}) path = '/time_entries/report.xml?from=' + timeFormated print path conn.request('GET', path, {}, headers) response = conn.getresponse() print "STATUS: " + str(response.status) + " - REASON: " + response.reason if response.status == httplib.OK: print "SUCCESS: Request succesfull!" #print response.read() xml = ET.fromstring(response.read()) for time_entry in xml.findall('time-entry'): name = time_entry.find('person-name').text person_id = time_entry.find('person-id').text hours = time_entry.find('hours').text #print(name, person_id, hours) for person in members: if str(person['id']) == person_id: #print (name, person_id, hours) new_hours = person['hours'] + float(hours) person['hours'] = new_hours else: print "ERROR: Something went wrong with the request" #Useful to have just in case #user_names = (members['name'] for person in members) #Get all the user names from a list of dictionaries html = templ.render({"fecha": the_date, "lista": members}) return HttpResponse(html)
NullSleep/Super-Fun-Time
main_content/views.py
Python
gpl-3.0
4,790
[ "COLUMBUS" ]
c718e012be2b3af036f26ec55be0878a4b0fc3b77ee723ef21946caacd9eaf54
''' Plugin for CudaText editor Authors: Andrey Kvichansky (kvichans on github.com) Version: '0.9.6 2016-12-06' ToDo: (see end of file) ''' import re, colorsys import cudatext as app #from cudatext import ed #import cudatext_cmd as cmds import cudax_lib as apx from .cd_plug_lib import * _ = get_translation(__file__) # I18N pass; # Logging pass; LOG = (-2==-2) # Do or dont logging. get_hist_ = lambda k,v: get_hist(k, v, module_name='palettes') set_hist_ = lambda k,v: set_hist(k, v, module_name='palettes') Rc = lambda c: (c&0x0000FF) Gc = lambda c: (c&0x00FF00) >> 8 Bc = lambda c: (c&0xFF0000) >> 16 int_to_rgb = lambda clr: ( 255&clr , 255&(clr>>8) , 255&(clr>>16)) int_to_rgb01= lambda clr: ((255&clr)/255 , (255&(clr>>8))/255 , (255&(clr>>16))/255) rgb_to_int = lambda r,g,b: r | (g <<8) | (b <<16) rgb01_to_int= lambda r,g,b: int(255*r) | (int(255*g)<<8) | (int(255*b)<<16) clr_h2i = apx.html_color_to_int BLUE = 0xff0000 YELLOW = 0x00ffff COLOR_NAMES={} PLTYPES = [ '60 colors: 3*20' , '142 colors: 7-hexagon' , '216 web-colors: 9-hexagon' , '216 web-colors: dragon' , '216 web-colors: candles' # , '343 colors: 18*19' , '3221 colors: 7-hexagon, dialog' , '146 named colors' , '420 named colors: 12*35' , '1431 named colors: 27*53' ] def dlg_color_palette(caption, old_color=None, palette_type=None, i18n={}): """ Show dlg to choose new color or view old one. Params caption (str) Title for dlg. old_color (int) Old color as RGB-int. None if no. palette_type (str) Palette name i18n (dict) Caption for control. Wait keys 'cancel', 'named', 'nearby', 'nocolor' Return (int) Selected color COLOR_NONE (int) If "No color" None If "Cancel" """ pass; #LOG and log('caption, old_color, palette_type={}',(caption, old_color, palette_type)) pass; sltr = 0 pass; #sltr = 37 # for 7 pass; #sltr = 43 # for 6 pass; rc4exch_src = None new_color = None active_plts = get_hist_('active_palettes', apx.get_opt('active_palettes', '|'.join(PLTYPES))).split('|') # active_plts = apx.get_opt('active_palettes', '|'.join(PLTYPES)).split('|') if not active_plts: # All if never Config active_plts = PLTYPES[:] else: active_plts = [plt for plt in active_plts if plt in PLTYPES] if palette_type in PLTYPES and \ palette_type not in active_plts: # Add to list if in params active_plts += [palette_type] if not palette_type: # Use last or first if not in params palette_type = get_hist_('last_palette_type', apx.get_opt('last_palette_type', active_plts[0])) # palette_type = apx.get_opt('last_palette_type', active_plts[0]) palette_type = palette_type if palette_type in active_plts else active_plts[0] grey_clr_for_plt = get_hist_('last_palette_grey_level', apx.get_opt('last_palette_grey_level', 0)) # grey_clr_for_plt = apx.get_opt('last_palette_grey_level', 0) view_more = get_hist_('palette_more', apx.get_opt('palette_more', False)) # view_more = apx.get_opt('palette_more', False) cnRGBs = [(int_to_rgb(c), c, s) for (c, s) in COLOR_NAMES.items()] brd_c = clr_h2i('#b6feff') MIN_PLT_WIDTH = 555 C_NMED = i18n.get('named' ,_('M&ark named')) C_NRBY = i18n.get('nearby' ,_('N&earby')) C_NOTH = i18n.get('nocolor' ,_('&No color')) C_CANC = i18n.get('cancel' ,_('Cancel')) C_CNFG = i18n.get('config' ,_('Config...')) H_TITL = i18n.get('help_hint' ,_(' (Shift+Click to preview. Ctrl+Click to copy data)')) H_MORE = i18n.get('more_hint' ,_('Show/Hide advanced options')) H_NMED = i18n.get('named_hint' ,_('Mark named colors with "!"')) H_NRBY = i18n.get('nearby_hint',_('Assign names to some colors, which are "near" named colors. Marks show distance to these near colors:' '\r"!" if distance is very low,' '\r"." if distance is small, ' '\r".." all others' )) H_NFLT = i18n.get('inname_hint',_('Point colors which name includes the string')) H_NEWC = i18n.get('new_c_hint' ,_('New color')) H_OLDC = i18n.get('old_c_hint' ,_('Old color')) def clr_data(clr, vw_nrby, nflt): R, G, B = int_to_rgb(clr) H, S, V = list(int(255*c) for c in colorsys.rgb_to_hsv(R/255, G/255, B/255)) nmd = clr in COLOR_NAMES nm = COLOR_NAMES.get(clr, '') sure,ma = '', 0 cn = clr if nm else 0 if not nm and vw_nrby: d,ma, \ nm,cn = min( (abs(R-cnR)+abs(G-cnG)+abs(B-cnB) , max(abs(R-cnR),abs(G-cnG),abs(B-cnB)) , sn, c) for ((cnR, cnG, cnB), c, sn) in cnRGBs) sure = sure_by_ma(ma) nm = nm if sure else '' fltd = nflt and nm and nflt in nm.upper() # flt_c = YELLOW # pass #;flt_s = '' # if not fltd:pass # elif (R+G+B)>230*3: # flt_c = BLUE #;flt_s = 'L' # elif V>220 and not abs(H-170) < 50: # flt_c = BLUE #;flt_s = 'Lb' # elif B>=R and B>=G: # pass #;flt_s = 'B' # elif R>220: # flt_c = BLUE #;flt_s = 'R' # elif G>220: # flt_c = BLUE #;flt_s = 'G' # elif R+G>190*2: # flt_c = BLUE #;flt_s = 'RG' hint = f('{h}\rRGB:{R},{G},{B}\rHSV:{H},{S},{V}{n}' , h=apx.int_to_html_color(clr).upper() , R=R, G=G, B=B , H=H, S=S, V=V , n='\r'+nm+('' if nmd else f('\r({})',apx.int_to_html_color(cn).upper())) if nm else '') return (R,G,B, H,S,V ,nmd,nm,sure,ma,cn ,fltd #,flt_c #,flt_s ,hint) fid = 'pltp' vals = dict(pltp=active_plts.index(palette_type) ,nmed=False ,nrby=False ,nflt='' ) pre_plt = palette_type pre_grey = grey_clr_for_plt while True: C_MORE = '&<<' if view_more else '&>>' if pre_plt != vals.get('pltp', pre_plt) or pre_grey != grey_clr_for_plt: clrs, \ w,h, \ sp_clrs, \ sp_w,sp_h = _dlg_color_palette_clrs(active_plts[vals['pltp']], grey_clr_for_plt) pre_plt = vals['pltp'] pre_grey = grey_clr_for_plt if view_more: vw_nmed = vals['nmed'] # Point of named color vw_nrby = vals['nrby'] and vw_nmed # Point color names with nearby nflt = vals['nflt'].upper() # Filter value else: vw_nmed = False vw_nrby = False nflt = '' pass; #LOG and log('nflt={}',(nflt)) max_cnt = max(len(r) for ir,r in enumerate( clrs)) sp_max_cnt = max(len(r) for ir,r in enumerate(sp_clrs)) if sp_clrs else 0 plt_w = max( w * max_cnt ,sp_w * sp_max_cnt ,MIN_PLT_WIDTH) sure_by_ma = lambda ma:'!' if ma<=3 else \ '.' if ma<=9 else \ '..' cnts = [] # Main plt pass; #LOG and log('?? main plt (==',()) for irow,crow in enumerate(clrs): shft = (plt_w - w *len(crow)) // 2 for icol,clr in enumerate(crow): if clr is None: continue (R,G,B, H,S,V ,nmd,nm,sure,ma,cn ,fltd #,flt_c #,flt_s ,hint)= clr_data(clr, vw_nrby, nflt) if nflt and not fltd: continue#for fg_c = (0x000000 if (R+G+B)/3>128 or G>240 else 0xffffff) cnts += [dict(tp='clr' ,cid =f('c{:2}{:2}',irow,icol) ,t =10+irow*h ,h=h+1 ,l =shft+10+icol*w ,w=w+1 ,props =f('1,{bg},{fg},{bord_c}', bg=clr, fg=fg_c, bord_c= brd_c ) ,cap =('!' if vw_nmed and nmd else sure # +flt_s #+ str(ma) +(f('{}{}{}', R//sltr, G//sltr, B//sltr) if sltr else '') ) ,hint =hint # +(f('\rrc={},{}',irow,icol) if sltr else '') ,rc =(irow,icol) ,c = clr ,act ='1' )] plt_h = h * len(clrs) pass; #LOG and log('ok main plt',()) pass; #LOG and log('?? spec plt',()) # Spec plt for irow,crow in enumerate(sp_clrs): shft = (plt_w - sp_w *len(crow)) // 2 for icol,clr in enumerate(crow): if clr is None: continue (R,G,B, H,S,V ,nmd,nm,sure,ma,cn ,fltd #,flt_c #,flt_s ,hint)= clr_data(clr, vw_nrby, nflt) fg_c = (0x000000 if (R+G+B)/3>128 or G>240 else 0xffffff) cnts += [dict(tp='clr' ,cid =f('s{}', clr) ,t =plt_h+10+irow*sp_h ,h=sp_h+1 ,l =shft+10+icol*sp_w ,w=sp_w+1 ,props =f('1,{bg},{fg},{bord_c}', bg=clr, fg=fg_c, bord_c=brd_c) ,cap= '^' if clr==grey_clr_for_plt else '' ,hint =hint ,act= '1' )] sp_plt_h = sp_h * len(sp_clrs) pass; #LOG and log('ok spec plt',()) plt_h = plt_h + sp_plt_h if old_color is not None: (old_R,old_G,old_B, old_H,old_S,old_V ,old_nmd,old_nm,old_sure,old_ma,old_cn ,old_fltd #,old_flt_c #,flt_s ,old_hint) = clr_data(old_color, vw_nrby, nflt) idold = 'c'+str(old_color) if new_color is not None: (new_R,new_G,new_B, new_H,new_S,new_V ,new_nmd,new_nm,new_sure,new_ma,new_cn ,new_fltd #,new_flt_c #,flt_s ,new_hint) = clr_data(new_color, vw_nrby, nflt) idnew = 'c'+str(new_color) if view_more: cnts+= [dict(cid='aflt' ,tp='bt' ,tid='----' ,l= 0 ,w=0 ,cap='' ,props='1' )] #default cnts+= [dict(cid='pltp' ,tp='cb-ro',tid='noth' ,l= 10 ,w=385,items=active_plts+[C_CNFG] ,act='1' )] cnts+= [dict(cid='nflt' ,tp='ed' ,tid='----' ,l= 10 ,w=100 ,hint=H_NFLT )] cnts+= [dict(cid='nmed' ,tp='ch' ,tid='----' ,l=130 ,w=150,cap=C_NMED ,hint=H_NMED ,act='1' )] cnts+= [dict(cid='nrby' ,tp='ch' ,tid='----' ,l=260 ,w=150,cap=C_NRBY ,hint=H_NRBY ,act='1',en=vals['nmed'])] if new_color is not None: cnts+= [dict(cid=idnew ,tp='clr' ,t=10+plt_h+ 7 ,l=10+plt_w-165 ,w= 50, h=30,cap='' ,props=f('1,{bg},0,{bc}',bg=new_color,bc=brd_c) ,hint=H_NEWC+'\r'+new_hint ,c=new_color,act='1' )] if old_color is not None: cnts+= [dict(cid=idold ,tp='clr' ,t=10+plt_h+36 ,l=10+plt_w-165 ,w= 50, h=30,cap='' ,props=f('1,{bg},0,{bc}',bg=old_color,bc=brd_c) ,hint=H_OLDC+'\r'+old_hint ,c=old_color,act='1' )] cnts+= [dict(cid='more' ,tp='bt' ,tid='----' ,l=10+plt_w-215 ,w= 45,cap=C_MORE ,hint=H_MORE )] cnts+= [dict(cid='noth' ,tp='bt' ,t=10+plt_h+10 ,l=10+plt_w-110 ,w=110,cap=C_NOTH )] cnts+= [dict(cid='----' ,tp='bt' ,t=10+plt_h+40 ,l=10+plt_w-110 ,w=110,cap=C_CANC )] else: cnts+= [dict(cid='more' ,tp='bt' ,tid='----' ,l=10 ,w= 45,cap=C_MORE ,hint=H_MORE )] if old_color is not None: cnts+= [dict(cid=idold ,tp='clr' ,t=10+plt_h+ 7 ,l=10+plt_w-275 ,w= 50, h=30,cap='' ,props=f('1,{bg},0,{bc}',bg=old_color,bc=brd_c) ,hint=H_OLDC+'\r'+old_hint ,c=old_color,act='1' )] cnts+= [dict(cid='noth' ,tp='bt' ,t=10+plt_h+10 ,l=10+plt_w-220 ,w=110,cap=C_NOTH )] cnts+= [dict(cid='----' ,tp='bt' ,t=10+plt_h+10 ,l=10+plt_w-110 ,w=110,cap=C_CANC )] dlg_w = 10 + plt_w + 10 dlg_h = 5 + plt_h + 15 + 30 + (30 if view_more else 0) #+ 5 pass; #LOG and log('?? dlg_wrapper ==)',()) aid,vals,*_t = dlg_wrapper(caption + (H_TITL if view_more else '') ,dlg_w, dlg_h, cnts ,vals if view_more else {} ,focus_cid=fid) pass; #LOG and log('aid,vals={}',(aid,vals)) if not aid or aid=='----': return None if aid=='more': view_more = not view_more set_hist_( 'palette_more', view_more) # apx.set_opt('palette_more', view_more) if view_more: fid = 'pltp' vals = dict(pltp=active_plts.index(palette_type) ,nmed=False ,nrby=False ,nflt='' ) continue#while scam = app.app_proc(app.PROC_GET_KEYSTATE, '') if app.app_api_version()>='1.0.143' else '' if sltr and \ aid=='noth' and sltr and scam=='sc': # Show 0-6 main plt pass; #LOG and log('clrs={}',(clrs)) pass; plt_s = '\n'.join( ' '.join( (f('{}{}{}',Rc(cl)//sltr, Gc(cl)//sltr, Bc(cl)//sltr) if cl is not None else 'NNN') for cl in r )+' ' for r in clrs ) + '\n' pass; plt_s = re.sub(r'(\S\S\S \S\S\S \S\S\S )', r'\1 ', plt_s) pass; app.app_proc(app.PROC_SET_CLIP, plt_s) pass; dlg_wrapper('Plt', 5+700+5, 5+320+5, [dict(cid='plt' ,tp='me',t=5,h=600 ,l=5,w=700, props='1,1,1')], dict(plt=plt_s)) pass; continue#while if aid=='noth': return app.COLOR_NONE if aid[0]=='c': cnt = [cnt for cnt in cnts if aid==cnt['cid']][0] new_color = cnt['c'] if scam=='': return new_color if sltr: pass; rc4exch = cnt['rc'] if scam=='c': app.app_proc(app.PROC_SET_CLIP, cnt['hint'].replace('\r', '\n')) if scam=='a' and sltr and rc4exch_src is not None and rc4exch!=rc4exch_src: pass; #LOG and log('?? clrs={}',clrs) pass; clrs[rc4exch_src[0]][rc4exch_src[1]] , \ clrs[rc4exch [0]][rc4exch [1]] = clrs[rc4exch [0]][rc4exch [1]] , \ clrs[rc4exch_src[0]][rc4exch_src[1]] pass; LOG and log('exch! {} with {}',rc4exch,rc4exch_src) pass; #LOG and log('!! clrs={}',clrs) # continue#while if scam=='s': if sltr: pass; rc4exch_src = rc4exch pass; LOG and log('rc4exch_src={}',(rc4exch_src)) continue#while fid = 'nflt' if aid in ('nmed', 'nrby', 'aflt') else 'pltp' pass; #LOG and log('aid, fid={}',(aid, fid)) if aid[0]=='s': # Special color clr = int(aid[1:]) R, G, B = int_to_rgb(clr) #255&clr, 255&(clr>>8), 255&(clr>>16) if R==G==B: grey_clr_for_plt = clr set_hist_( 'last_palette_grey_level', clr) # apx.set_opt('last_palette_grey_level', clr) continue#while if aid=='pltp' and vals['pltp']==len(active_plts): # Config old_plt = get_hist_('last_palette_type', apx.get_opt('last_palette_type', '')) # old_plt = apx.get_opt('last_palette_type', '') sels = [to01(plt in active_plts) for plt in PLTYPES] ap_vals = dict(ps=(0,sels)) while True: ap_aid, \ ap_vals,\ *_t = dlg_wrapper(_('Select active palettes'), 5+200+5, 5+400+5+24+5, [dict(cid='ps' ,tp='ch-lbx',t=5,h=400 ,l=5 ,w=200 ,items=PLTYPES ) # ,dict(cid='!' ,tp='bt' ,t=5+400+5 ,l= 200-140 ,w=70 ,cap=_('OK'),props='1' ) # default ,dict(cid='-' ,tp='bt' ,t=5+400+5 ,l=5+200- 70 ,w=70 ,cap=_('Cancel') ) # ], ap_vals, focus_cid='ps') if ap_aid is None or ap_aid=='-': break#while ap sels = ap_vals['ps'][1] if not any(sl=='1' for sl in sels): app.msg_box(_('Select some palettes'), app.MB_OK) continue#while ap active_plts = [pl for ip,pl in enumerate(PLTYPES) if sels[ip]=='1'] set_hist_( 'active_palettes', '|'.join(active_plts)) # apx.set_opt('active_palettes', '|'.join(active_plts)) break#while ap vals['pltp'] = active_plts.index(old_plt) if old_plt in active_plts else 0 continue#while if aid=='pltp': set_hist_( 'last_palette_type', active_plts[vals['pltp']]) # apx.set_opt('last_palette_type', active_plts[vals['pltp']]) #do while #def dlg_color_palette def _dlg_color_palette_clrs(palette_type, grey_clr_for_plt=0): R1 = 0x000033 G1 = 0x003300 B1 = 0x330000 inversedRGB = True clrs = () w,h = 21,21 sp_clrs = () sp_w,sp_h = 21,21 def inverse_RGB(clrs): return list(list( (c & 0x0000ff)<<16 | (c & 0x00ff00) | (c & 0xff0000)>>16 if c is not None else c for c in row) for row in clrs) if False:pass elif palette_type=='343 colors: 18*19': # 9-hexagon: 9*2 + 10*2 + 11*2 + 12*2 + 13*2 + 14*2 + 15*2 + 16*2 + 17 = 217 # 10-hexagon: 10*2 + 11*2 + 12*2 + 13*2 + 14*2 + 15*2 + 16*2 + 17*2 + 18*2 + 19 = 271 # 11-hexagon: 11*2 + 12*2 + 13*2 + 14*2 + 15*2 + 16*2 + 17*2 + 18*2 + 19*2 + 20*2 + 21= 331 # 343 = 7*7*7 = 8*27 + 127 = 331 + 12 = 7 + 7*48 = 7 + 7*6*8 = 7 + 16*21 = 16*22 - 9 = 7 + 12*28 = 7 + 6*7*8 = 12*29 - 5 = 18*19 + 1 RPrts = (0x000000,0x00002a,0x000055,0x00007f,0x0000aa,0x0000d4,0x0000ff) GPrts = (0x000000,0x002a00,0x005500,0x007f00,0x00aa00,0x00d400,0x00ff00) BPrts = (0x000000,0x2a0000,0x550000,0x7f0000,0xaa0000,0xd40000,0xff0000) _rest = """ /== B =/= =\= GB ==\ ==/ G /== \== RG =\= =/= R ==/ ==\ R B \== """ clls_t = '((0x' + """ 016 006 106 056 066 065 061 060 050 560 660 650 610 600 601 605 606 506 105 005 015 046 166 064 062 160 040 561 661 550 510 500 620 626 616 406 014 004 104 045 266 054 052 051 041 540 662 640 602 612 611 604 515 505 116 003 126 156 165 164 162 150 161 641 663 651 621 511 502 614 516 504 025 002 206 146 055 155 151 140 260 652 664 460 501 520 400 625 526 615 101 001 010 430 440 450 530 200 210 220 665 240 250 063 300 310 320 330 401 411 421 431 441 451 461 201 211 221 231 241 251 261 301 311 321 331 402 412 422 432 442 452 462 202 212 536 232 242 252 262 302 312 322 332 403 413 423 433 443 453 463 203 213 223 233 243 253 263 303 313 323 562 404 414 424 434 565 454 464 204 214 224 234 244 254 264 304 314 324 334 405 415 425 435 445 455 465 205 215 225 235 245 255 265 305 315 325 335 603 416 426 436 446 456 466 630 216 226 236 246 256 036 306 316 326 336 535 420 020 030 100 110 120 130 410 011 021 031 503 121 654 131 141 136 026 012 022 032 042 102 112 122 132 142 152 163 340 350 360 343 353 363 115 013 023 033 043 053 103 113 123 133 143 153 341 351 361 344 354 364 024 034 044 114 124 134 144 154 035 125 135 145 342 352 362 345 355 365 521 531 541 631 346 356 366 632 642 512 522 532 542 552 622 643 653 551 513 523 533 543 553 563 613 623 633 514 524 534 544 554 564 624 634 644 635 525 545 000 000 000 000 655 230 645 546 556 566 636 646 656 000 000 666 555 444 333 222 111 000 NNN NNN NNN NNN NNN NNN NNN NNN NNN NNN NNN """.strip('\n').replace(' ', ' ').replace(' \n', ')\n,(0x').replace(' ', ',0x').replace('0xNNN', 'None')[:-3] + '))' pass; #LOG and log('clls_t={}',(clls_t)) clls16 = eval(clls_t) pass; #LOG and log('clls16={}',(clls16)) # clls16 = list( list(clls16[ir][ic] for ir in range(len(clls16))) for ic in range(len(clls16[0])) ) # Transposition clrs = list(list(( RPrts[(cll&0xF00)>>8] | GPrts[(cll&0x0F0)>>4] | BPrts[cll&0x00F] if cll is not None else None) for cll in clls_row) for clls_row in clls16) pass; #LOG and log('clrs={}',(clrs)) w,h = 31,31 inversedRGB = False elif palette_type=='3221 colors: 7-hexagon, dialog': # 6*6 + 35 * (6+7+8+9+10+11+10+9+8+7+6) = 36 + 35 *91 = 3221 inversedRGB= False clrs = ( (0x00ffff,0x00d4ff,0x00aaff,0x007fff,0x0055ff,0x002aff,0x0000ff) # 0 , (0x00ffd4,None ,None ,None ,None ,None ,None ,0x2a00ff) # 1 , (0x00ffaa,None ,None ,None ,None ,None ,None ,None ,0x5500ff) # 2 , (0x00ff7f,None ,None ,None ,None ,None ,None ,None ,None ,0x7f00ff) # 3 , (0x00ff55,None ,None ,None ,None ,None ,None ,None ,None ,None ,0xaa00ff) # 4 , (0x00ff2a,None ,None ,None ,None ,None ,None ,None ,None ,None ,None ,0xd400ff) # 5 , (0x00ff00,None ,None ,None ,None ,None ,None ,None ,None ,None ,None ,None ,0xff00ff) # 6 , (0x2aff00,None ,None ,None ,None ,None ,None ,None ,None ,None ,None ,0xff00d4) # 7 , (0x55ff00,None ,None ,None ,None ,None ,None ,None ,None ,None ,0xff00aa) # 8 , (0x7fff00,None ,None ,None ,None ,None ,None ,None ,None ,0xff007f) # 9 , (0xaaff00,None ,None ,None ,None ,None ,None ,None ,0xff0055) # 10 , (0xd4ff00,None ,None ,None ,None ,None ,None ,0xff002a) # 11 , (0xffff00,0xffd400,0xffaa00,0xff7f00,0xff5500,0xff2a00,0xff0000) # 12 , () ) w,h = 29,29 sp_clrs = ( (0xffffff, 0xf7f7f7, 0xf0f0f0, 0xe8e8e8, 0xe1e1e1, 0xd9d9d9, 0xd2d2d2, 0xcacaca, 0xc3c3c3, 0xbbbbbb, 0xb4b4b4, 0xacacac, 0xa5a5a5, 0x9d9d9d, 0x969696, 0x8e8e8e, 0x878787, 0x7f7f7f, 0x787878, 0x707070, 0x696969, 0x616161, 0x5a5a5a, 0x525252, 0x4b4b4b, 0x434343, 0x3c3c3c, 0x343434, 0x2d2d2d, 0x252525, 0x1e1e1e, 0x161616, 0x0f0f0f, 0x070707, 0x000000), ) sp_w,sp_h = 11,23 clrs = inverse_RGB(clrs) sp_clrs = inverse_RGB(sp_clrs) # Center clrs[6][6] = grey_clr_for_plt def interpolate(r1,c1, r2,c2): steps = max(abs(r2-r1), abs(c2-c1)) rs,cs = int((r2-r1)/steps), int((c2-c1)/steps) R1,G1,B1= int_to_rgb(clrs[r1][c1]) R2,G2,B2= int_to_rgb(clrs[r2][c2]) Rs,Gs,Bs= (R2-R1)/steps, (G2-G1)/steps, (B2-B1)/steps for i in range(1, steps): clrs[r1+rs*i][c1+cs*i] = rgb_to_int(int(R1 + Rs*i), int(G1 + Gs*i), int(B1 + Bs*i)) # Vertex-Center interpolate( 0, 0, 6, 6) interpolate( 0, 6, 6, 6) interpolate( 6, 0, 6, 6) interpolate( 6,12, 6, 6) interpolate(12, 0, 6, 6) interpolate(12, 6, 6, 6) interpolate(12, 6, 6, 6) # Filling sectors pass; interpolate( 1, 1, 1, 6) pass; interpolate( 2, 2, 2, 6) pass; interpolate( 3, 3, 3, 6) pass; interpolate( 4, 4, 4, 6) interpolate( 2, 0, 2, 2); interpolate( 2, 6, 2, 8) interpolate( 3, 0, 3, 3); interpolate( 3, 6, 3, 9) interpolate( 4, 0, 4, 4); interpolate( 4, 6, 4,10) interpolate( 5, 0, 5, 5); interpolate( 5, 6, 5,11) interpolate( 7, 0, 7, 5); interpolate( 7, 6, 7,11) interpolate( 8, 0, 8, 4); interpolate( 8, 6, 8,10) interpolate( 9, 0, 9, 3); interpolate( 9, 6, 9, 9) interpolate(10, 0,10, 2); interpolate(10, 6,10, 8) pass; interpolate( 8, 4, 8, 6) pass; interpolate( 9, 3, 9, 6) pass; interpolate(10, 2,10, 6) pass; interpolate(11, 1,11, 6) elif palette_type=='60 colors: 3*20': clrs = ( (0xff8080,0xffa080,0xffc080,0xffe080,0xffff80,0xe0ff80,0xc0ff80,0xa0ff80,0x80ff80,0x80ffa0,0x80ffc0,0x80ffe0,0x80ffff,0x80e0ff,0x80c0ff,0x80a0ff,0x8080ff,0xa080ff,0xc080ff,0xe080ff) , (0xff0000,0xff4000,0xff8000,0xffc000,0xffff00,0xc0ff00,0x80ff00,0x40ff00,0x01ff00,0x01ff40,0x01ff80,0x01ffc0,0x01ffff,0x00c0ff,0x0080ff,0x0040ff,0x0000ff,0x4000ff,0x8000ff,0xc000ff) , (0x800000,0x802000,0x804000,0x806000,0x808000,0x608000,0x408000,0x208000,0x008000,0x008020,0x008040,0x008060,0x008080,0x006080,0x004080,0x002080,0x000080,0x200080,0x400080,0x600080) ) w,h = 31,31 elif palette_type=='146 named colors': # http://colorscheme.ru/html-colors.html clrs = ( (0xCD5C5C,0xF08080,0xFA8072,0xE9967A,0xFFA07A,0xDC143C,0xFF0000,0xB22222,0x8B0000,0xFFC0CB,0xFFB6C1,0xFF69B4,0xFF1493,0xC71585,0xDB7093) ,(0xFFA07A,0xFF7F50,0xFF6347,0xFF4500,0xFF8C00,0xFFA500,0xFFD700,0xFFFF00,0xFFFFE0,0xFFFACD,0xFAFAD2,0xFFEFD5,0xFFE4B5,0xFFDAB9,0xEEE8AA,0xF0E68C,0xBDB76B) ,(0xE6E6FA,0xD8BFD8,0xDDA0DD,0xEE82EE,0xDA70D6,0xFF00FF,0xBA55D3,0x9370DB,0x8A2BE2,0x9400D3,0x9932CC,0x8B008B,0x800080,0x4B0082,0x6A5ACD,0x483D8B) ,(0xFFF8DC,0xFFEBCD,0xFFE4C4,0xFFDEAD,0xF5DEB3,0xDEB887,0xD2B48C,0xBC8F8F,0xF4A460,0xDAA520,0xB8860B,0xCD853F,0xD2691E,0x8B4513,0xA0522D,0xA52A2A,0x800000) ,(0xC0C0C0,0xFF00FF,0x800080,0xFF0000,0x800000,0x808000,0x00FF00,0x008000,0x008080) ,(0xADFF2F,0x7FFF00,0x7CFC00,0x32CD32,0x98FB98,0x90EE90,0x00FA9A,0x00FF7F,0x3CB371,0x2E8B57,0x228B22,0x008000,0x006400,0x9ACD32,0x6B8E23,0x808000,0x556B2F,0x66CDAA,0x8FBC8F,0x20B2AA,0x008B8B) ,(0x00FFFF,0xE0FFFF,0xAFEEEE,0x7FFFD4,0x40E0D0,0x48D1CC,0x00CED1,0x5F9EA0,0x4682B4,0xB0C4DE,0xB0E0E6,0xADD8E6,0x87CEEB,0x87CEFA,0x00BFFF,0x1E90FF,0x6495ED,0x7B68EE,0x4169E1,0x0000FF,0x0000CD,0x00008B,0x000080,0x191970) ,(0xFFFFFF,0xFFFAFA,0xF0FFF0,0xF5FFFA,0xF0FFFF,0xF0F8FF,0xF8F8FF,0xF5F5F5,0xFFF5EE,0xF5F5DC,0xFDF5E6,0xFFFAF0,0xFFFFF0,0xFAEBD7,0xFAF0E6,0xFFF0F5,0xFFE4E1) ,(0xDCDCDC,0xD3D3D3,0xC0C0C0,0xA9A9A9,0x808080,0x696969,0x778899,0x708090,0x2F4F4F,0x000000) ) w,h = 31,31 elif palette_type=='420 named colors: 12*35': # 420 = 6*7*10 = 12*35 clrs = ( (0x000000,0x1C1C1C,0x363636,0x4F4F4F,0x696969,0x800000,0x800080,0x808000,0x808080,0x8B0000,0x8B008B,0x8B0A50) ,(0x8B1A1A,0x8B1C62,0x8B2252,0x8B2323,0x8B2500,0x8B3626,0x8B3A3A,0x8B3A62,0x8B3E2F,0x8B4500,0x8B4513,0x8B4726) ,(0x8B475D,0x8B4789,0x8B4C39,0x8B5742,0x8B5A00,0x8B5A2B,0x8B5F65,0x8B636C,0x8B658B,0x8B668B,0x8B6914,0x8B6969) ,(0x8B7355,0x8B7500,0x8B7765,0x8B795E,0x8B7B8B,0x8B7D6B,0x8B7D7B,0x8B7E66,0x8B814C,0x8B8378,0x8B8386,0x8B864E) ,(0x8B8682,0x8B8878,0x8B8970,0x8B8989,0x8B8B00,0x8B8B7A,0x8B8B83,0x9C9C9C,0xA0522D,0xA52A2A,0xA9A9A9,0xB03060) ,(0xB22222,0xB5B5B5,0xB8860B,0xBC8F8F,0xBDB76B,0xBEBEBE,0xC0C0C0,0xC71585,0xCD0000,0xCD00CD,0xCD1076,0xCD2626) ,(0xCD2990,0xCD3278,0xCD3333,0xCD3700,0xCD4F39,0xCD5555,0xCD5B45,0xCD5C5C,0xCD6090,0xCD6600,0xCD661D,0xCD6839) ,(0xCD6889,0xCD69C9,0xCD7054,0xCD8162,0xCD8500,0xCD853F,0xCD8C95,0xCD919E,0xCD950C,0xCD96CD,0xCD9B1D,0xCD9B9B) ,(0xCDAA7D,0xCDAD00,0xCDAF95,0xCDB38B,0xCDB5CD,0xCDB79E,0xCDB7B5,0xCDBA96,0xCDBE70,0xCDC0B0,0xCDC1C5,0xCDC5BF) ,(0xCDC673,0xCDC8B1,0xCDC9A5,0xCDC9C9,0xCDCD00,0xCDCDB4,0xCDCDC1,0xCFCFCF,0xD02090,0xD2691E,0xD2B48C,0xD3D3D3) ,(0xD8BFD8,0xDA70D6,0xDAA520,0xDB7093,0xDC143C,0xDCDCDC,0xDDA0DD,0xDEB887,0xE8E8E8,0xE9967A,0xEE0000,0xEE00EE) ,(0xEE1289,0xEE2C2C,0xEE30A7,0xEE3A8C,0xEE3B3B,0xEE4000,0xEE5C42,0xEE6363,0xEE6A50,0xEE6AA7,0xEE7600,0xEE7621) ,(0xEE7942,0xEE799F,0xEE7AE9,0xEE8262,0xEE82EE,0xEE9572,0xEE9A00,0xEE9A49,0xEEA2AD,0xEEA9B8,0xEEAD0E,0xEEAEEE) ,(0xEEB422,0xEEB4B4,0xEEC591,0xEEC900,0xEECBAD,0xEECFA1,0xEED2EE,0xEED5B7,0xEED5D2,0xEED8AE,0xEEDC82,0xEEDC82) ,(0xEEDFCC,0xEEE0E5,0xEEE5DE,0xEEE685,0xEEE8AA,0xEEE8CD,0xEEE9BF,0xEEE9E9,0xEEEE00,0xEEEED1,0xEEEEE0,0xF08080) ,(0xF0E68C,0xF4A460,0xF5DEB3,0xF5F5DC,0xF5F5F5,0xFA8072,0xFAEBD7,0xFAF0E6,0xFAFAD2,0xFDF5E6,0xFF0000,0xFF00FF) ,(0xFF1493,0xFF3030,0xFF34B3,0xFF3E96,0xFF4040,0xFF4500,0xFF6347,0xFF69B4,0xFF6A6A,0xFF6EB4,0xFF7256,0xFF7F00) ,(0xFF7F24,0xFF7F50,0xFF8247,0xFF82AB,0xFF83FA,0xFF8C00,0xFF8C69,0xFFA07A,0xFFA500,0xFFA54F,0xFFAEB9,0xFFB5C5) ,(0xFFB6C1,0xFFB90F,0xFFBBFF,0xFFC0CB,0xFFC125,0xFFC1C1,0xFFD39B,0xFFD700,0xFFDAB9,0xFFDEAD,0xFFE1FF,0xFFE4B5) ,(0xFFE4C4,0xFFE4E1,0xFFE7BA,0xFFEBCD,0xFFEC8B,0xFFEFD5,0xFFEFDB,0xFFF0F5,0xFFF5EE,0xFFF68F,0xFFF8DC,0xFFFACD) ,(0xFFFAF0,0xFFFAFA,0xFFFF00,0xFFFFE0,0xFFFFF0,0xFFFFFF,0x2F4F4F,0x006400,0x556B2F,0x008000,0x008080,0x008B00) ,(0x008B45,0x008B8B,0x228B22,0x2E8B57,0x458B00,0x458B74,0x528B8B,0x548B54,0x668B8B,0x698B22,0x698B69,0x6E8B3D) ,(0x7A8B8B,0x838B83,0x838B8B,0x6B8E23,0x20B2AA,0x3CB371,0x8FBC8F,0x00CD00,0x00CD66,0x00CDCD,0x32CD32,0x43CD80) ,(0x66CD00,0x66CDAA,0x79CDCD,0x7CCD7C,0x96CDCD,0x9ACD32,0x9BCD9B,0xA2CD5A,0xB4CDCD,0xC1CDC1,0xC1CDCD,0x48D1CC) ,(0x40E0D0,0x00EE00,0x00EE76,0x00EEEE,0x4EEE94,0x76EE00,0x76EEC6,0x8DEEEE,0x90EE90,0xAEEEEE,0xB3EE3A,0xB4EEB4) ,(0xBCEE68,0xD1EEEE,0xE0EEE0,0xE0EEEE,0x00FA9A,0x98FB98,0x7CFC00,0x00FF00,0x00FF7F,0x00FFFF,0x54FF9F,0x7FFF00) ,(0x7FFFD4,0x97FFFF,0x9AFF9A,0xADFF2F,0xBBFFFF,0xC0FF3E,0xC1FFC1,0xCAFF70,0xE0FFFF,0xF0FFF0,0xF0FFFF,0xF5FFFA) ,(0x191970,0x000080,0x4B0082,0x00008B,0x00688B,0x00868B,0x104E8B,0x27408B,0x36648B,0x473C8B,0x483D8B,0x4A708B) ,(0x53868B,0x551A8B,0x5D478B,0x607B8B,0x68228B,0x68838B,0x6C7B8B,0x6E7B8B,0x7A378B,0x708090,0x778899,0x5F9EA0) ,(0x4682B4,0x9932CC,0x0000CD,0x009ACD,0x00C5CD,0x1874CD,0x3A5FCD,0x4F94CD,0x6959CD,0x6A5ACD,0x6CA6CD,0x7AC5CD) ,(0x7D26CD,0x8968CD,0x8DB6CD,0x9A32CD,0x9AC0CD,0x9FB6CD,0xA2B5CD,0xB452CD,0x00CED1,0x9400D3,0xBA55D3,0x9370DB) ,(0xB0C4DE,0x4169E1,0x8A2BE2,0xADD8E6,0xB0E0E6,0x87CEEB,0x6495ED,0x0000EE,0x00B2EE,0x00E5EE,0x1C86EE,0x436EEE) ,(0x5CACEE,0x7A67EE,0x7B68EE,0x7EC0EE,0x8EE5EE,0x912CEE,0x9F79EE,0xA4D3EE,0xB23AEE,0xB2DFEE,0xB9D3EE,0xBCD2EE) ,(0xD15FEE,0xA020F0,0x87CEFA,0xE6E6FA,0x0000FF,0x00BFFF,0x00F5FF,0x1E90FF,0x4876FF,0x63B8FF,0x836FFF,0x8470FF) ,(0x87CEFF,0x98F5FF,0x9B30FF,0xAB82FF,0xB0E2FF,0xBF3EFF,0xBFEFFF,0xC6E2FF,0xCAE1FF,0xE066FF,0xF0F8FF,0xF8F8FF) ) clrs = list( list(clrs[ir][ic] for ir in range(len(clrs))) for ic in range(len(clrs[0])) ) # Transposition w,h = 25,25 elif palette_type=='1431 named colors: 27*53': clrs = ( (0x100C08,0x1A1110,0x1B1B1B,0x1C1C1C,0x242124,0x2C1608,0x343434,0x363636,0x3B331C,0x3C1414,0x3C341F,0x3D0C02,0x3D2B1F,0x43302E,0x480607,0x483C32,0x4B3621,0x4E1609,0x4F3A3C,0x4F4F4F,0x50404D,0x51484F,0x534B4F,0x543D37,0x555555,0x560319,0x59260B) ,(0x592720,0x5B3256,0x5D3954,0x614051,0x635147,0x644117,0x645452,0x65000B,0x654321,0x66023C,0x663854,0x664228,0x66424D,0x664C28,0x665D1E,0x673147,0x674846,0x674C47,0x676767,0x682860,0x696969,0x6B4423,0x6C2E1F,0x6C541E,0x6F4E37,0x701C1C,0x702670) ,(0x702963,0x703642,0x704214,0x704241,0x722F37,0x737000,0x757575,0x78184A,0x79443B,0x796878,0x7B1113,0x7B3F00,0x7C0A02,0x7C1C05,0x7C4848,0x7E5E60,0x7F1734,0x800000,0x800020,0x800080,0x801818,0x80461B,0x807532,0x808000,0x808080,0x811453,0x81613C) ,(0x820000,0x826644,0x832A0D,0x836953,0x841B2D,0x843F5B,0x848482,0x850101,0x856D4D,0x85754E,0x860111,0x867E36,0x872657,0x873260,0x880085,0x882D17,0x885818,0x88654E,0x893843,0x893F45,0x8A3324,0x8A496B,0x8A795D,0x8A7F80,0x8B0000,0x8B008B,0x8B0A50) ,(0x8B1A1A,0x8B1C62,0x8B2252,0x8B2323,0x8B2500,0x8B3626,0x8B3A3A,0x8B3A62,0x8B3E2F,0x8B4500,0x8B4513,0x8B4726,0x8B475D,0x8B4789,0x8B4C39,0x8B5742,0x8B5A00,0x8B5A2B,0x8B5f4D,0x8B5F65,0x8B636C,0x8B658B,0x8B668B,0x8B6914,0x8B6969,0x8B7355,0x8B7500) ,(0x8B7765,0x8B795E,0x8B7B8B,0x8B7D6B,0x8B7D7B,0x8B7E66,0x8B814C,0x8B8378,0x8B8386,0x8B8589,0x8B864E,0x8B8682,0x8B8878,0x8B8970,0x8B8989,0x8B8B00,0x8B8B7A,0x8B8B83,0x8D4E85,0x8E3A59,0x8E4585,0x905D5D,0x914E75,0x915C83,0x915F6D,0x918151,0x92000A) ,(0x922724,0x933D41,0x954535,0x960018,0x964B00,0x965A3E,0x967117,0x980036,0x986960,0x987456,0x987654,0x98777B,0x98817B,0x989898,0x990000,0x996515,0x996600,0x996666,0x997A8D,0x9B111E,0x9B7653,0x9B870C,0x9C2542,0x9C7C38,0x9C9C9C,0x9D2933,0x9E1316) ,(0x9E5E6F,0x9F1D35,0x9F2B68,0x9F4576,0x9F8170,0xA0522D,0xA0785A,0xA17A74,0xA2006D,0xA40000,0xA45A52,0xA50B5E,0xA52A2A,0xA55353,0xA57164,0xA63A79,0xA67B5B,0xA6A6A6,0xA75502,0xA81C07,0xA83731,0xA8516E,0xA9203E,0xA95C68,0xA99A86,0xA9A9A9,0xAA381E) ,(0xAA4069,0xAA98A9,0xAB274F,0xAB4B52,0xAB4E52,0xAC1E44,0xACACAC,0xAD4379,0xAD6F69,0xAE0C00,0xAE2029,0xAE98AA,0xAF002A,0xAF4035,0xAF6E4D,0xB03060,0xB05C52,0xB06500,0xB22222,0xB31B1B,0xB3446C,0xB38B6D,0xB48395,0xB53389,0xB5651D,0xB57281,0xB5A642) ,(0xB5B5B5,0xB7410E,0xB768A2,0xB76E79,0xB784A7,0xB78727,0xB86D29,0xB87333,0xB8860B,0xB94E48,0xBA160C,0xBA8759,0xBB3385,0xBB6528,0xBBB477,0xBC8F8F,0xBC987E,0xBCB88A,0xBD33A4,0xBDB76B,0xBE0032,0xBE4F62,0xBEBEBE,0xBF4F51,0xBFAFB2,0xC0362C,0xC04000) ,(0xC08081,0xC09999,0xC0C0C0,0xC154C1,0xC19A6B,0xC21E56,0xC23B22,0xC2B280,0xC30B4E,0xC32148,0xC39953,0xC3B091,0xC40233,0xC41E3A,0xC46210,0xC4AEAD,0xC53151,0xC54B8C,0xC5B358,0xC71585,0xC72C48,0xC74375,0xC80815,0xC84186,0xC8A2C8,0xC8AD7F,0xC90016) ,(0xC95A49,0xC9C0BB,0xCA1F7B,0xCA2C92,0xCB410B,0xCB4154,0xCB6D51,0xCB99C9,0xCBA135,0xCC0000,0xCC0033,0xCC00CC,0xCC3333,0xCC3336,0xCC338B,0xCC33CC,0xCC397B,0xCC474B,0xCC4E5C,0xCC5500,0xCC6666,0xCC7722,0xCC8899,0xCC9900,0xcc9966,0xCC99CC,0xCCA01D) ,(0xCD0000,0xCD00CD,0xCD1076,0xCD2626,0xCD2990,0xCD3278,0xCD3333,0xCD3700,0xCD4F39,0xCD5555,0xCD5700,0xCD5B45,0xCD5C5C,0xCD607E,0xCD6090,0xCD6600,0xCD661D,0xCD6839,0xCD6889,0xCD69C9,0xCD7054,0xCD7F32,0xCD8162,0xCD8500,0xCD853F,0xCD8C95,0xCD919E) ,(0xCD950C,0xCD9575,0xCD96CD,0xCD9B1D,0xCD9B9B,0xCDAA7D,0xCDAD00,0xCDAF95,0xCDB38B,0xCDB5CD,0xCDB79E,0xCDB7B5,0xCDBA96,0xCDBE70,0xCDC0B0,0xCDC1C5,0xCDC5BF,0xCDC673,0xCDC8B1,0xCDC9A5,0xCDC9C9,0xCDCD00,0xCDCDB4,0xCDCDC1,0xCE2029,0xCE4676,0xCF1020) ,(0xCF3476,0xCF6BA9,0xCF71AF,0xCFB53B,0xCFCFC4,0xCFCFCF,0xD02090,0xD0417E,0xD10047,0xD10056,0xD1BEA8,0xD2691E,0xD2B48C,0xD3003F,0xD3212D,0xD39BCB,0xD3D3D3,0xD40000,0xD470A2,0xD473D4,0xD4AF37,0xD65282,0xD68A59,0xD70040,0xD70A53,0xD71868,0xD73B3E) ,(0xD74894,0xD7837F,0xD8B2D1,0xD8BFD8,0xD9004C,0xD92121,0xD9381E,0xD9603B,0xD982B5,0xD98695,0xD99058,0xD998A0,0xDA1D81,0xDA2C43,0xDA3287,0xDA614E,0xDA70D6,0xDA8A67,0xDA9100,0xDAA520,0xDB7093,0xDBD7D2,0xDC143C,0xDCDCDC,0xDDA0DD,0xDDADAF,0xDE3163) ,(0xDE5285,0xDE5D83,0xDE6FA1,0xDEA5A4,0xDEAA88,0xDEB887,0xDF6124,0xE0115F,0xE0218A,0xE03C31,0xE08D3C,0xE12C2C,0xE18E96,0xE1A95F,0xE1AD21,0xE2062C,0xE25098,0xE25822,0xE2725B,0xE30022,0xE30B5D,0xE3256B,0xE32636,0xE34234,0xE3A857,0xE3AB57,0xE3DAC9) ,(0xE40078,0xE4007C,0xE4717A,0xE48400,0xE49B0F,0xE4D00A,0xE4D96F,0xE51A4C,0xE52B50,0xE56024,0xE58E73,0xE5AA70,0xE5B73B,0xE5CCC9,0xE5E4E2,0xE60026,0xE62020,0xE63E62,0xE66771,0xE68FAC,0xE6A8D7,0xE6BE8A,0xE6E200,0xE75480,0xE79FC4,0xE7ACCF,0xE8000D) ,(0xE86100,0xE88E5A,0xE8CCD7,0xE8E8E8,0xE936A7,0xE9692C,0xE97451,0xE9967A,0xE9D66B,0xEA3C53,0xEAA221,0xEAE0C8,0xEB4C42,0xEC3B83,0xEC5800,0xECB176,0xECD540,0xECEBBD,0xED1C24,0xED2939,0xED872D,0xED9121,0xEDC9AF,0xEE0000,0xEE00EE,0xEE1289,0xEE204D) ,(0xEE2C2C,0xEE30A7,0xEE3A8C,0xEE3B3B,0xEE4000,0xEE5C42,0xEE6363,0xEE6A50,0xEE6AA7,0xEE7600,0xEE7621,0xEE7942,0xEE799F,0xEE7AE9,0xEE8262,0xEE82EE,0xEE9572,0xEE9A00,0xEE9A49,0xEEA2AD,0xEEA9B8,0xEEAD0E,0xEEAEEE,0xEEB422,0xEEB4B4,0xEEC591,0xEEC900) ,(0xEECBAD,0xEECFA1,0xEED202,0xEED2EE,0xEED5B7,0xEED5D2,0xEED8AE,0xEEDC82,0xEEDFCC,0xEEE0E5,0xEEE5DE,0xEEE600,0xEEE685,0xEEE8AA,0xEEE8CD,0xEEE9BF,0xEEE9E9,0xEEEE00,0xEEEED1,0xEEEEE0,0xEF3038,0xEF98AA,0xEFBBCC,0xEFCC00,0xEFDECD,0xEFDFBB,0xF07427) ,(0xF08080,0xF0DC82,0xF0E130,0xF0E68C,0xF0EAD6,0xF19CBB,0xF1DDCF,0xF2003C,0xF28500,0xF2BA49,0xF2BDCD,0xF2F0E6,0xF2F27A,0xF37A48,0xF38FA9,0xF3E5AB,0xF400A1,0xF49AC2,0xF4A460,0xF4C2C2,0xF4C430,0xF4CA16,0xF4F0EC,0xF56991,0xF56FA1,0xF58025,0xF5C71A) ,(0xF5DEB3,0xF5E050,0xF5F5DC,0xF5F5F5,0xF64A8A,0xF6ADC6,0xF6EABE,0xF70D1A,0xF75394,0xF77F00,0xF77FBE,0xF78FA7,0xF7BFBE,0xF7E7CE,0xF7E98E,0xF88379,0xF8B878,0xF8D568,0xF8DE7E,0xF9429E,0xF94D00,0xF984E5,0xF984EF,0xFA5B3D,0xFA6E79,0xFA8072,0xFAD6A5) ,(0xFADA5E,0xFADADD,0xFADFAD,0xFAE7B5,0xFAEBD7,0xFAF0BE,0xFAF0E6,0xFAFA37,0xFAFAD2,0xFB4D46,0xFB4F14,0xFB607F,0xFB9902,0xFBA0E3,0xFBAB60,0xFBAED2,0xFBCCE7,0xFBCEB1,0xFBEC5D,0xFC0FC0,0xFC5A8D,0xFC6C85,0xFC89AC,0xFC8EAC,0xFCC200,0xFCE883,0xFCF75E) ,(0xFD0E35,0xFD3A4A,0xFD3F92,0xFD5240,0xFD5800,0xFD5E53,0xFD6C9E,0xFD7C6E,0xFDBCB4,0xFDD5B1,0xFDD9B5,0xFDDDE6,0xFDEE00,0xFDF5E6,0xFDFD96,0xFE2712,0xFE28A2,0xFE4164,0xFE4EDA,0xFE5A1D,0xFE6F5E,0xFEDF00,0xFEFE33,0xFEFEFA,0xFF0000,0xFF0028,0xFF0038) ,(0xFF003F,0xFF004F,0xFF006C,0xFF007C,0xFF007F,0xFF0090,0xFF00FF,0xFF033E,0xFF0800,0xFF1493,0xFF1DCE,0xFF2052,0xFF2400,0xFF2800,0xFF3030,0xFF33CC,0xFF34B3,0xFF355E,0xFF3800,0xFF3855,0xFF3E96,0xFF4040,0xFF404C,0xFF43A4,0xFF4466,0xFF4500,0xFF4681) ,(0xFF496C,0xFF4F00,0xFF5349,0xFF5470,0xFF55A3,0xFF5800,0xFF5A36,0xFF5CCD,0xFF5F00,0xFF6347,0xFF66CC,0xFF6700,0xFF6961,0xFF69B4,0xFF6A6A,0xFF6D3A,0xFF6E4A,0xFF6EB4,0xFF6FFF,0xFF7256,0xFF7518,0xFF77FF,0xFF7800,0xFF7A00,0xFF7E00,0xFF7F00,0xFF7F24) ,(0xFF7F50,0xFF8243,0xFF8247,0xFF82AB,0xFF83FA,0xFF85CF,0xFF878D,0xFF8C00,0xFF8C69,0xFF91A4,0xFF91AF,0xFF9900,0xFF9933,0xFF9966,0xFF9999,0xFF99CC,0xFF9F00,0xFFA000,0xFFA07A,0xFFA089,0xFFA343,0xFFA500,0xFFA54F,0xFFA6C9,0xFFA700,0xFFA812,0xFFAA1D) ,(0xFFAE42,0xFFAEB9,0xFFB077,0xFFB300,0xFFB347,0xFFB3DE,0xFFB5C5,0xFFB6C1,0xFFB7C5,0xFFB90F,0xFFBA00,0xFFBBFF,0xFFBCD9,0xFFBD88,0xFFBF00,0xFFC0CB,0xFFC125,0xFFC1C1,0xFFC1CC,0xFFC40C,0xFFC87C,0xFFCBA4,0xFFCC00,0xFFCC33,0xFFCC99,0xFFCFF1,0xFFD300) ,(0xFFD39B,0xFFD700,0xFFD800,0xFFDAB9,0xFFDAE9,0xFFDB00,0xFFDB58,0xFFDDCA,0xFFDDF4,0xFFDEAD,0xFFDF00,0xFFDF46,0xFFDFBF,0xFFE135,0xFFE1FF,0xFFE302,0xFFE4B5,0xFFE4C4,0xFFE4CD,0xFFE4E1,0xFFE5B4,0xFFE7BA,0xFFEB00,0xFFEBCD,0xFFEC8B,0xFFEF00,0xFFEFD5) ,(0xFFEFDB,0xFFF000,0xFFF0F5,0xFFF44F,0xFFF5EE,0xFFF600,0xFFF68F,0xFFF700,0xFFF8DC,0xFFF8E7,0xFFFACD,0xFFFAF0,0xFFFAFA,0xFFFDD0,0xFFFF00,0xFFFF31,0xFFFF33,0xFFFF66,0xFFFF99,0xFFFFBF,0xFFFFE0,0xFFFFF0,0x1A2421,0x232B2B,0x013220,0x123524,0x1C352D) ,(0x253529,0x3B3C36,0x004040,0x004225,0x004242,0x354230,0x014421,0x18453B,0x004B49,0x444C38,0x1B4D3E,0x1E4D2B,0x2F4F4F,0x4B5320,0x00563F,0x195905,0x465945,0x4A5D23,0x4D5D53,0x555D50,0x355E3B,0x306030,0x006400,0x006600,0x056608,0x006A4E,0x006B3C) ,(0x556B2F,0x00703C,0x177245,0x007474,0x727472,0x00755E,0x087830,0x317873,0x01796F,0x49796B,0x4F7942,0x3B7A57,0x0E7C61,0x507D2A,0x007F5C,0x007F66,0x008000,0x008080,0x2A8000,0x00827F,0x40826D,0x568203,0x738276,0x2F847C,0x738678,0x78866B,0x138808) ,(0x56887D,0x008B00,0x008B45,0x008B8B,0x228B22,0x2E8B57,0x458B00,0x458B74,0x528B8B,0x548B54,0x668B8B,0x698B22,0x698B69,0x6E8B3D,0x7A8B8B,0x838B83,0x838B8B,0x4D8C57,0x5E8C31,0x6B8E23,0x828E84,0x009000,0x059033,0x009150,0x319177,0x4C9141,0x679267) ,(0x299617,0x8F9779,0x009966,0x669999,0x6F9940,0x8A9A5B,0x009B7D,0x009E60,0x009F6B,0x8DA399,0x5DA493,0x00A550,0x00A693,0x39A78E,0x5FA778,0x00A86B,0x00A877,0x87A96B,0x9FA91F,0x00AB66,0x29AB87,0x1CAC78,0x00AD43,0x6EAEA1,0x3AB09E,0x66B032,0x20B2AA) ,(0x34B233,0x3CB371,0x43B3AE,0x3EB489,0x7BB661,0x8DB600,0x9AB973,0x0ABAB5,0x30BA8F,0xA9BA9D,0x4CBB17,0x85BB65,0x71BC78,0x8FBC8F,0xB2BEB5,0x30BFBF,0x48BF91,0xACBF60,0xB0BF1A,0x03C03C,0xA3C1AD,0x9DC209,0x74C365,0x00C4B0,0x93C572,0xA4C639,0x50C878) ,(0x96C8A2,0x46CB18,0x00CC33,0x00CC99,0x00CCCC,0x00CD00,0x00CD66,0x00CDCD,0x32CD32,0x43CD80,0x66CD00,0x66CDAA,0x79CDCD,0x7CCD7C,0x96CDCD,0x9ACD32,0x9BCD9B,0xA2CD5A,0xB4CDCD,0xC1CDC1,0xC1CDCD,0x3CD070,0x48D1CC,0x8FD400,0xA0D6B4,0xA6D608,0x44D7A8) ,(0x88D8C0,0x8DD9CC,0x0BDA51,0xBDDA57,0xC9DC87,0x66DDAA,0x77DD77,0x84DE02,0x96DED1,0xADDFAD,0x40E0D0,0xCAE00D,0xACE1AF,0x9FE2BF,0xD1E231,0xDDE26A,0xA8E4A0,0x8EE53F,0x99E6B3,0xD9E650,0x08E8DE,0x64E986,0xB2EC5D,0x00EE00,0x00EE76,0x00EEEE,0x4EEE94) ,(0x76EE00,0x76EEC6,0x8DEEEE,0x90EE90,0xAFEEEE,0xB3EE3A,0xB4EEB4,0xBCEE68,0xD1EEEE,0xE0EEE0,0xE0EEEE,0xAAF0D1,0xD0F0C0,0xA7F432,0xE8F48C,0xE3F988,0x00FA9A,0x98FB98,0x7CFC00,0xA7FC00,0x9EFD38,0x00FF00,0x00FF7F,0x00FFEF,0x00FFFF,0x39FF14,0x3FFF00) ,(0x4AFF00,0x54FF9F,0x66FF00,0x66FF66,0x7FFF00,0x7FFFD4,0x87FF2A,0x97FFFF,0x98FF98,0x9AFF9A,0xADFF2F,0xB2FFFF,0xBBFFFF,0xBFFF00,0xC0FF3E,0xC1FFC1,0xC9FFE5,0xCAFF70,0xCCFF00,0xCEFF00,0xD0FF14,0xDFFF00,0xE0FFFF,0xE3FF00,0xE9FFDB,0xF0FFF0,0xF0FFFF) ,(0xF5FFFA,0xFDFF00,0xFDFFF5,0x010203,0x010B13,0x1F262A,0x301934,0x210837,0x2a3439,0x353839,0x001C3D,0x1C2841,0x002147,0x264348,0x3B444B,0x414A4C,0x32174D,0x36454F,0x1D2951,0x003153,0x004953,0x563C5C,0x002E63,0x002366,0x003366,0x330066,0x333366) ,(0x00416A,0x4F666A,0x602F6B,0x4A646C,0x4C516D,0x54626F,0x191970,0x536872,0x062A78,0x536878,0x23297A,0x32127A,0x58427C,0x36747D,0x00147E,0x08457E,0x000080,0x126180,0x4E5180,0x522D80,0x6E7F80,0x733380,0x4B0082,0x4C2882,0x6C3082,0x391285,0x002387) ,(0x2E2D88,0x367588,0x512888,0x856088,0x000F89,0x00008B,0x00688B,0x00868B,0x104E8B,0x27408B,0x36648B,0x473C8B,0x483D8B,0x4A708B,0x53868B,0x551A8B,0x5D478B,0x5F8A8B,0x607B8B,0x68228B,0x68838B,0x6C7B8B,0x6E7B8B,0x7A378B,0x0A7E8C,0x86608E,0x00308F) ,(0x708090,0x091F92,0x0F4D92,0x553592,0x006994,0x2E5894,0x002395,0x436B95,0x536895,0x035096,0x734F96,0x777696,0x838996,0x004F98,0x009698,0x003399,0x333399,0x663399,0x666699,0x778899,0x00009C,0x26619C,0x28589C,0x69359C,0x5F9EA0,0x0067A5,0x007AA5) ,(0x778BA5,0x1034A6,0x002FA7,0x007BA7,0x5072A7,0x545AA7,0x0014A8,0x0018A8,0x0038A8,0x5D8AA8,0x6F2DA8,0x7851A9,0x0033AA,0x979AAA,0x0047AB,0x7C98AB,0x8C92AC,0x9A4EAE,0x0093AF,0x8601AF,0x006DB0,0x91A3B0,0x324AB2,0x5946B2,0xAB92B3,0x1164B4,0x4682B4) ,(0x4E82B4,0x4F42B5,0xB39EB5,0x0095B6,0x2243B6,0x6082B6,0x9678B6,0x967BB6,0x9C51B6,0x8BA8B7,0x0070B8,0x007BB8,0x0048BA,0x0D98BA,0x0F52BA,0x5D89BA,0x0072BB,0x1C39BB,0x9955BB,0xAA00BB,0x0087BD,0x1560BD,0x0077BE,0x2A52BE,0x8B72BE,0xB284BE,0x4682BF) ,(0x746CC0,0x72A0C1,0x188BC2,0x73A9C2,0xBFC1C2,0x6D9BC3,0x8878C3,0x9F00C5,0x214FC6,0x1CA9C9,0x779ECB,0x360CCC,0x47ABCC,0x6699CC,0x9932CC,0x9966CC,0x0000CD,0x009ACD,0x00C5CD,0x1874CD,0x21ABCD,0x3A5FCD,0x4F94CD,0x6959CD,0x6A5ACD,0x6CA6CD,0x7AC5CD) ,(0x7D26CD,0x8968CD,0x8DB6CD,0x9A32CD,0x9AC0CD,0x9FB6CD,0xA2B5CD,0xB452CD,0x8806CE,0x0073CF,0x446CCF,0x4BC7CF,0x5A4FCF,0x92A1CF,0xA76BCF,0xAEC6CF,0x0892D0,0x4997D0,0xA2A2D0,0xA2ADD0,0xC4C3D0,0x00CED1,0x1974D2,0x71A6D2,0xB666D2,0x56A0D3,0x9400D3) ,(0xBA55D3,0x1DACD6,0x8CBED6,0x966FD6,0x7C9ED9,0xB19CD9,0x9370DB,0x6050DC,0x6CA0DC,0xB57EDC,0xC9A0DC,0xD6CADD,0x3E8EDE,0xB0C4DE,0xCDA4DE,0x88ACE0,0x4169E1,0x273BE2,0x8A2BE2,0x9BC4E2,0xC4D8E2,0xB80CE3,0xBF94E4,0x5B92E5,0x7ED4E6,0xACACE6,0xADD8E6) ,(0xB0E0E6,0xBCD4E6,0x318CE7,0x45B1E8,0x7CB9E8,0xD19FE8,0x93CCEA,0x00B7EB,0x80DAEB,0x87CEEB,0x9457EB,0x5DADEC,0x24A0ED,0x6495ED,0xA4DDED,0x0000EE,0x00AAEE,0x00B2EE,0x00E5EE,0x1C86EE,0x436EEE,0x5CACEE,0x7A67EE,0x7B68EE,0x7EC0EE,0x8EE5EE,0x912CEE) ,(0x9F79EE,0xA4D3EE,0xACE5EE,0xB23AEE,0xB2DFEE,0xB9D3EE,0xBCD2EE,0xD15FEE,0xABCDEF,0xCEC8EF,0xD891EF,0x1C1CF0,0x89CFF0,0xA020F0,0x8AB9F1,0xA1CAF1,0xDBE9F4,0xF2F3F4,0x4166F5,0x4F86F7,0x87D3F8,0xA4F4F9,0x5218FA,0x87CEFA,0xE6E6FA,0xE6E8FA,0x00B9FB) ,(0x73C2FB,0x74BBFB,0x0FC0FC,0x15F2FD,0xFC74FD,0x0247FE,0x1F75FE,0x77B5FE,0xF1A7FE,0x0000FF,0x0070FF,0x007FFF,0x00BFFF,0x00CCFF,0x00F5FF,0x1E90FF,0x3399FF,0x3F00FF,0x4876FF,0x63B8FF,0x6666FF,0x6F00FF,0x7DF9FF,0x7F00FF,0x836FFF,0x8470FF,0x87CEFF) ,(0x8F00FF,0x98F5FF,0x9B30FF,0x9F00FF,0xA0E6FF,0xA6E7FF,0xAB82FF,0xB0E2FF,0xB9F2FF,0xBF00FF,0xBF3EFF,0xBFEFFF,0xC6E2FF,0xCAE1FF,0xCC00FF,0xCC99FF,0xCCCCFF,0xDCD0FF,0xDF00FF,0xDF73FF,0xE066FF,0xE0B0FF,0xE7FEFF,0xF0F8FF,0xF4BBFF,0xF8F4FF,0xF8F8FF) ) clrs = list( list(clrs[ir][ic] for ir in range(len(clrs))) for ic in range(len(clrs[0])) ) # Transposition w,h = 21,21 elif palette_type=='142 colors: 7-hexagon': clrs = ( (0xeaeaea,0xdddddd,0xc0c0c0,0xb2b2b2,0x969696,0x808080,0x777777,0x5f5f5f,0x4d4d4d,0x333333,0x292929,0x1c1c1c,0x111111,0x080808,0x000000) , () , (0x003366,0x336699,0x3366cc,0x003399,0x000099,0x0000cc,0x000066) , (0x006666,0x006699,0x0099cc,0x0066cc,0x0033cc,0x0000ff,0x3333ff,0x333399) , (0x008080,0x009999,0x33cccc,0x00ccff,0x0099ff,0x0066ff,0x3366ff,0x3333cc,0x666699) , (0x339966,0x00cc99,0x01ffcc,0x01ffff,0x33ccff,0x3399ff,0x6699ff,0x6666ff,0x6600ff,0x6600cc) , (0x339933,0x00cc66,0x01ff99,0x66ffcc,0x66ffff,0x66ccff,0x99ccff,0x9999ff,0x9966ff,0x9933ff,0x9900ff) , (0x006600,0x00cc00,0x01ff00,0x66ff99,0x99ffcc,0xccffff,0xccecff,0xccccff,0xcc99ff,0xcc66ff,0xcc00ff,0x9900cc) , (0x003300,0x008000,0x33cc33,0x66ff66,0x99ff99,0xccffcc,0xffffff,0xffccff,0xff99ff,0xff66ff,0xff00ff,0xcc00cc,0x660066) , (0x336600,0x009900,0x66ff33,0x99ff66,0xccff99,0xffffcc,0xffcccc,0xff99cc,0xff66cc,0xff33cc,0xcc0099,0x800080) , (0x333300,0x669900,0x99ff33,0xccff66,0xffff99,0xffcc99,0xff9999,0xff6699,0xff3399,0xcc3399,0x990099) , (0x666633,0x99cc00,0xccff33,0xffff66,0xffcc66,0xff9966,0xff7c80,0xff0066,0xd60093,0x993366) , (0x808000,0xcccc00,0xffff00,0xffcc00,0xff9933,0xff6600,0xff5050,0xcc0066,0x660033) , (0x996633,0xcc9900,0xff9900,0xcc6600,0xff3300,0xff0000,0xcc0000,0x990033) , (0x663300,0x996600,0xcc3300,0x993300,0x990000,0x800000,0xa50021) ) w,h = 31,31 elif palette_type=='216 web-colors: dragon': clls = ( ((5,5,5),(4,4,4),(3,3,3),(2,2,2),(1,1,1),(0,0,0)) , () , ((5,4,0),(5,3,0),(5,2,0),(5,1,0)) , ((3,4,0),None ,None ,None ,None ,(4,3,0),(5,4,1),(5,4,2),(5,3,2),(5,2,1),(4,1,0),None ,None ,None ,None ,(4,0,1)) , ((4,5,0),(4,5,1),(1,1,0),(2,2,0),(3,3,0),(4,4,0),(5,5,0),(4,3,1),(4,2,1),(1,0,0),(2,1,0),(3,0,0),(4,0,0),(5,0,0),(5,1,2),(5,0,1)) , ((3,5,0),(4,5,2),(3,4,1),(2,2,1),(3,3,1),(4,4,1),(5,5,1),(3,2,0),(3,1,0),(2,1,1),(3,1,1),(4,1,1),(5,1,1),(4,1,2),(5,2,3),(5,0,2)) , ((2,5,0),(3,5,2),(2,4,1),(2,3,0),(3,3,2),(4,4,2),(5,5,2),(3,2,1),(2,1,0),(3,2,2),(4,2,2),(5,2,2),(3,0,1),(4,1,3),(5,2,4),(5,0,3)) , ((1,5,0),(2,5,1),(1,3,0),(2,4,0),(3,5,1),(4,4,3),(5,5,3),(4,3,2),(4,2,0),(4,3,3),(5,3,3),(5,1,3),(4,0,2),(3,0,2),(5,1,4),(5,0,4)) , ((0,4,0),(1,4,0),(1,2,0),(2,3,1),(3,4,2),(4,5,3),(5,5,4),(5,4,3),(5,3,1),(5,4,4),(5,3,4),(4,2,3),(3,1,2),(2,0,1),(4,0,3),(1,0,1)) , ((1,4,1),(2,4,2),(0,5,0),(1,5,1),(2,5,2),(3,5,3),(4,5,4),None ,None ,None ,(4,3,4),(3,2,3),(3,1,3),(3,0,3),(2,1,2),(2,0,2)) , ((0,2,0),(1,2,1),(4,3,0),(1,3,1),(2,3,2),(3,4,3),None ,None ,None ,(5,4,5),(5,3,5),(5,2,5),(5,1,5),(5,0,5),(4,2,4),(4,1,4)) , ((0,1,0),(0,4,1),(0,2,1),(1,3,2),(2,4,3),(3,5,4),(4,5,5),(1,3,5),(3,4,5),(4,4,5),(4,3,5),(3,2,4),(2,1,3),(1,0,2),(3,0,4),(4,0,4)) , ((0,5,1),(1,5,2),(0,3,1),(0,4,2),(1,5,3),(3,5,5),(3,4,4),(0,2,4),(2,3,4),(3,3,5),(3,3,4),(3,1,5),(2,0,4),(2,0,3),(4,1,5),(4,0,5)) , ((0,5,2),(2,5,3),(1,4,2),(0,3,2),(2,5,5),(2,4,4),(2,3,3),(0,1,2),(1,2,3),(2,2,5),(2,2,4),(2,2,3),(1,0,3),(3,1,4),(4,2,5),(3,0,5)) , ((0,5,3),(2,5,4),(1,4,3),(1,5,5),(1,4,4),(1,3,3),(1,2,2),(0,2,3),(0,1,3),(1,1,5),(1,1,4),(1,1,3),(1,1,2),(2,1,4),(0,2,5),(2,0,5)) , ((0,5,4),(1,5,4),(0,5,5),(0,4,4),(0,3,3),(0,2,2),(0,1,1),(1,3,4),(1,2,4),(0,0,5),(0,0,4),(0,0,3),(0,0,2),(0,0,1),(2,1,5),(1,0,5)) , ((0,4,3),None ,None ,None ,None ,(0,3,4),(1,4,5),(2,4,5),(2,3,5),(1,2,5),(0,1,4),None ,None ,None ,None ,(1,0,4)) , ((0,4,5),(0,3,5),(0,2,5),(0,1,5)) ) clrs = list(list(( R1*cll[0]|G1*cll[1]|B1*cll[2] if cll else None) for cll in clls_row) for clls_row in clls) w,h = 27,27 elif palette_type=='216 web-colors: 9-hexagon': clls = ( ((5,5,5),(4,4,4),(3,3,3),(2,2,2),(1,1,1),(0,0,0)) , () , ((3,5,0),(4,5,2),(4,5,1),(3,4,0),(4,3,0),(5,4,1),(5,4,0),(5,3,0),(5,2,0)) , ((2,5,0),(3,5,1),(4,5,2),(3,4,1),(2,3,0),(3,2,0),(4,3,1),(5,4,2),(5,3,1),(5,1,0)) , ((1,5,0),(3,5,2),(2,4,0),(1,1,0),(2,2,0),(3,3,0),(4,4,0),(5,5,0),(4,2,0),(5,3,2),(5,2,1)) , ((2,5,1),(2,4,1),(0,5,0),(1,2,0),(2,2,1),(3,3,1),(4,4,1),(5,5,1),(2,1,0),(1,0,0),(4,2,1),(4,1,0)) , ((1,4,0),(1,3,0),(0,4,0),(1,5,1),(2,3,1),(3,3,2),(4,4,2),(5,5,2),(3,2,1),(2,1,1),(2,0,0),(3,1,0),(4,0,1)) , ((0,4,1),(0,3,1),(0,3,0),(1,4,1),(2,5,2),(3,4,2),(4,4,3),(5,5,3),(4,3,2),(3,2,2),(3,1,1),(3,0,0),(3,0,1),(5,1,2)) , ((1,5,2),(1,4,2),(0,2,0),(1,3,1),(2,4,2),(3,5,3),(4,5,3),(5,5,4),(5,4,3),(4,3,3),(4,2,2),(4,1,1),(4,0,0),(4,1,2),(5,0,1)) , ((0,5,1),(2,5,3),(0,1,0),(1,2,1),(2,3,2),(3,4,3),(4,5,4),None ,None ,(5,4,4),(5,3,3),(5,2,2),(5,1,1),(5,0,0),(5,2,3),(5,0,2)) , ((0,5,2),(1,5,3),(0,4,2),(0,2,1),(1,3,2),(2,4,3),(3,5,4),None ,None ,None ,(5,3,4),(4,2,3),(3,1,2),(2,0,1),(4,0,2),(5,1,3),(5,0,3)) , ((0,5,3),(2,5,4),(0,5,5),(1,5,5),(2,5,5),(3,5,5),(4,5,5),None ,None ,(5,4,5),(4,3,4),(3,2,3),(2,1,2),(1,0,1),(5,2,4),(5,0,4)) , ((0,5,4),(1,4,3),(0,4,4),(1,4,4),(2,4,4),(3,4,4),(3,4,5),(4,4,5),(4,3,5),(5,3,5),(4,2,4),(3,1,3),(2,0,2),(4,1,3),(5,1,4)) , ((1,5,4),(0,3,2),(0,3,3),(1,3,3),(2,3,3),(2,3,4),(3,3,5),(3,3,4),(3,2,4),(5,2,5),(4,1,4),(3,0,3),(3,0,2),(4,0,3)) , ((0,4,3),(0,2,3),(0,2,2),(1,2,2),(1,2,3),(2,2,5),(2,2,4),(0,0,3),(2,1,3),(5,1,5),(4,0,4),(2,0,3),(3,0,4)) , ((0,3,4),(1,3,4),(0,1,1),(0,1,2),(1,1,5),(1,1,4),(1,1,3),(1,1,2),(1,0,2),(5,0,5),(3,1,4),(4,1,5)) , ((1,4,5),(2,4,5),(0,2,4),(0,0,5),(0,0,4),(0,0,3),(0,0,2),(0,0,1),(2,0,4),(4,2,5),(4,0,5)) , ((0,4,5),(1,3,5),(2,3,5),(1,2,4),(0,1,3),(1,0,3),(2,1,4),(3,2,5),(3,1,5),(3,0,5)) , ((0,3,5),(0,2,5),(0,1,5),(1,2,5),(0,1,4),(1,0,4),(2,1,5),(1,0,5),(2,0,5)) ) clrs = list(list(( R1*cll[0]|G1*cll[1]|B1*cll[2] if cll else None) for cll in clls_row) for clls_row in clls) w,h = 27,27 elif palette_type in ('216web:4*v-candle', '216 web-colors: candles'): clls = (( ),(None ,(0+0,0+0,0+0),None ,None,None ,(5-0,0+0,0+0),None ,None,None ,(0+0,5-0,0+0),None ,None,None ,(0+0,0+0,5-0),None , ),((0+1,0+1,0+0),(0+1,0+0,0+1),(0+0,0+1,0+1),None,(5-1,0+0,0+0),(5-0,0+1,0+0),(5-0,0+0,0+1),None,(0+1,5-1,0+0),(0+1,5-0,0+1),(0+0,5-1,0+1),None,(0+1,0+1,5-0),(0+1,0+0,5-1),(0+0,0+1,5-1), ),((0+1,0+0,0+0),(0+0,0+1,0+0),(0+0,0+0,0+1),None,(5-1,0+1,0+0),(5-1,0+0,0+1),(5-0,0+1,0+1),None,(0+1,5-0,0+0),(0+0,5-1,0+0),(0+0,5-0,0+1),None,(0+1,0+0,5-0),(0+0,0+1,5-0),(0+0,0+0,5-1), ),((0+2,0+0,0+0),(0+0,0+2,0+0),(0+0,0+0,0+2),None,(5-2,0+0,0+0),(5-0,0+2,0+0),(5-0,0+0,0+2),None,(0+2,5-0,0+0),(0+0,5-2,0+0),(0+0,5-0,0+2),None,(0+2,0+0,5-0),(0+0,0+2,5-0),(0+0,0+0,5-2), ),((0+2,0+0,0+1),(0+0,0+2,0+1),(0+1,0+0,0+2),None,(5-2,0+0,0+1),(5-0,0+2,0+1),(5-1,0+0,0+2),None,(0+2,5-0,0+1),(0+0,5-2,0+1),(0+1,5-0,0+2),None,(0+2,0+0,5-1),(0+0,0+2,5-1),(0+1,0+0,5-2), ),(None ,(0+1,0+1,0+1),None ,None,None ,(5-1,0+1,0+1),None ,None,None ,(0+1,5-1,0+1),None ,None,None ,(0+1,0+1,5-1),None , ),((0+2,0+1,0+0),(0+1,0+2,0+0),(0+0,0+1,0+2),None,(5-2,0+1,0+0),(5-1,0+2,0+0),(5-0,0+1,0+2),None,(0+2,5-1,0+0),(0+1,5-2,0+0),(0+0,5-1,0+2),None,(0+2,0+1,5-0),(0+1,0+2,5-0),(0+0,0+1,5-2), ),((0+2,0+1,0+1),(0+1,0+2,0+1),(0+1,0+1,0+2),None,(5-2,0+1,0+1),(5-1,0+2,0+1),(5-1,0+1,0+2),None,(0+2,5-1,0+1),(0+1,5-2,0+1),(0+1,5-1,0+2),None,(0+2,0+1,5-1),(0+1,0+2,5-1),(0+1,0+1,5-2), ),((0+2,0+2,0+0),(0+2,0+0,0+2),(0+0,0+2,0+2),None,(5-2,0+2,0+0),(5-2,0+0,0+2),(5-0,0+2,0+2),None,(0+2,5-2,0+0),(0+2,5-0,0+2),(0+0,5-2,0+2),None,(0+2,0+2,5-0),(0+2,0+0,5-2),(0+0,0+2,5-2), ),((0+2,0+2,0+1),(0+2,0+1,0+2),(0+1,0+2,0+2),None,(5-2,0+2,0+1),(5-2,0+1,0+2),(5-1,0+2,0+2),None,(0+2,5-2,0+1),(0+2,5-1,0+2),(0+1,5-2,0+2),None,(0+2,0+2,5-1),(0+2,0+1,5-2),(0+1,0+2,5-2), ),(None ,(0+2,0+2,0+2),None ,None,None ,(5-2,0+2,0+2),None ,None,None ,(0+2,5-2,0+2),None ,None,None ,(0+2,0+2,5-2),None , ),(None ,(5-2,5-2,5-2),None ,None,None ,(0+2,5-2,5-2),None ,None,None ,(5-2,0+2,5-2),None ,None,None ,(5-2,5-2,0+2),None , ),((5-2,5-2,5-1),(5-2,5-1,5-2),(5-1,5-2,5-2),None,(0+2,5-2,5-1),(0+2,5-1,5-2),(0+1,5-2,5-2),None,(5-2,0+2,5-1),(5-2,0+1,5-2),(5-1,0+2,5-2),None,(5-2,5-2,0+1),(5-2,5-1,0+2),(5-1,5-2,0+2), ),((5-2,5-2,5-0),(5-2,5-0,5-2),(5-0,5-2,5-2),None,(0+2,5-2,5-0),(0+2,5-0,5-2),(0+0,5-2,5-2),None,(5-2,0+2,5-0),(5-2,0+0,5-2),(5-0,0+2,5-2),None,(5-2,5-2,0+0),(5-2,5-0,0+2),(5-0,5-2,0+2), ),((5-2,5-1,5-1),(5-1,5-2,5-1),(5-1,5-1,5-2),None,(0+2,5-1,5-1),(0+1,5-2,5-1),(0+1,5-1,5-2),None,(5-2,0+1,5-1),(5-1,0+2,5-1),(5-1,0+1,5-2),None,(5-2,5-1,0+1),(5-1,5-2,0+1),(5-1,5-1,0+2), ),((5-2,5-1,5-0),(5-1,5-2,5-0),(5-0,5-1,5-2),None,(0+2,5-1,5-0),(0+1,5-2,5-0),(0+0,5-1,5-2),None,(5-2,0+1,5-0),(5-1,0+2,5-0),(5-0,0+1,5-2),None,(5-2,5-1,0+0),(5-1,5-2,0+0),(5-0,5-1,0+2), ),(None ,(5-1,5-1,5-1),None ,None,None ,(0+1,5-1,5-1),None ,None,None ,(5-1,0+1,5-1),None ,None,None ,(5-1,5-1,0+1),None , ),((5-2,5-0,5-1),(5-0,5-2,5-1),(5-1,5-0,5-2),None,(0+2,5-0,5-1),(0+0,5-2,5-1),(0+1,5-0,5-2),None,(5-2,0+0,5-1),(5-0,0+2,5-1),(5-1,0+0,5-2),None,(5-2,5-0,0+1),(5-0,5-2,0+1),(5-1,5-0,0+2), ),((5-2,5-0,5-0),(5-0,5-2,5-0),(5-0,5-0,5-2),None,(0+2,5-0,5-0),(0+0,5-2,5-0),(0+0,5-0,5-2),None,(5-2,0+0,5-0),(5-0,0+2,5-0),(5-0,0+0,5-2),None,(5-2,5-0,0+0),(5-0,5-2,0+0),(5-0,5-0,0+2), ),((5-1,5-1,5-0),(5-1,5-0,5-1),(5-0,5-1,5-1),None,(0+1,5-0,5-0),(0+0,5-1,5-0),(0+0,5-0,5-1),None,(5-1,0+0,5-0),(5-0,0+1,5-0),(5-0,0+0,5-1),None,(5-1,5-1,0+0),(5-1,5-0,0+1),(5-0,5-1,0+1), ),((5-1,5-0,5-0),(5-0,5-1,5-0),(5-0,5-0,5-1),None,(0+1,5-1,5-0),(0+1,5-0,5-1),(0+0,5-1,5-1),None,(5-1,0+1,5-0),(5-1,0+0,5-1),(5-0,0+1,5-1),None,(5-1,5-0,0+0),(5-0,5-1,0+0),(5-0,5-0,0+1), ),(None ,(5-0,5-0,5-0),None ,None,None ,(0+0,5-0,5-0),None ,None,None ,(5-0,0+0,5-0),None ,None,None ,(5-0,5-0,0+0),None , ) )[1:] if palette_type=='216 web-colors: candles': clls = list( list(clls[ir][ic] for ir in range(len(clls))) for ic in range(len(clls[0])) ) # Transposition clrs = list(list(( R1*cll[0]|G1*cll[1]|B1*cll[2] if cll else None) for cll in clls_row) for clls_row in clls) w,h = 27,27 # elif palette_type=='???:hsv': # inversedRGB= False # clls = (( #),((200,255,255),(190,255,255),(180,255,255),(170,255,255),(160,255,255),(150,255,255),(140,255,255),(130,255,255),(120,255,255),(110,255,255),(100,255,255),( 90,255,255),( 80,255,255),( 70,255,255),( 60,255,255),( 50,255,255),( 40,255,255),( 30,255,255),( 20,255,255),( 10,255,255), #),((200,255,255),(190,255,255),(180,255,255),(170,255,255),(160,255,255),(150,255,255),(140,255,255),(130,255,255),(120,255,255),(110,255,255),(100,255,255),( 90,255,255),( 80,255,255),( 70,255,255),( 60,255,255),( 50,255,255),( 40,255,255),( 30,255,255),( 20,255,255),( 10,255,255), #) )[1:] # clrs = list(list(( rgb01_to_int(*colorsys.hsv_to_rgb(cll[0]/255,cll[1]/255,cll[2]/255)) if cll else None) for cll in clls_row) for clls_row in clls) ## clrs = list(list(( rgb(* hsv_to_rgb(cll[0]/255,cll[1]/255,cll[2]/255)) if cll else None) for cll in clls_row) for clls_row in clls) # w,h = 27,27 # pass; LOG and log('clrs[0][0]={}',(clrs[0][0])) # elif palette_type=='216web:8*27rand': # # 0123456789abcdef # # 0 3 6 9 c f # bs1 = list(i*R1 for i in range(6)) # bs2 = list(i*G1 for i in range(6)) # bs3 = list(i*B1 for i in range(6)) ## import itertools ## cube_3 = itertools.product(bs1, bs2, bs3) # cube_cls = ( # (0,0,0) # # ,(1,0,0) ,(0,0,1) # ,(0,1,0) # # ,(2,0,0) ,(1,0,1) ,(0,0,2) # ,(1,1,0) ,(0,1,1) # ,(0,2,0) # # ,(3,0,0) ,(2,0,1) ,(1,0,2) ,(0,0,3) # ,(2,1,0) ,(1,1,1) ,(0,2,1) # ,(1,2,0) ,(0,1,2) # ,(0,3,0) # # ,(4,0,0) ,(3,0,1) ,(2,0,2) ,(1,0,3) ,(0,0,4) # ,(3,1,0) ,(2,1,1) ,(1,1,2) ,(0,1,3) # ,(2,2,0) ,(1,2,1) ,(0,2,2) # ,(1,3,0) ,(0,3,1) # ,(0,4,0) # # ,(5,0,0) ,(4,0,1) ,(3,0,2) ,(2,0,3) ,(1,0,4) ,(0,0,5) # ,(4,1,0) ,(3,1,1) ,(2,1,2) ,(1,1,3) ,(0,1,4) # ,(3,2,0) ,(2,2,1) ,(1,2,2) ,(0,2,3) # ,(2,3,0) ,(1,3,1) ,(0,3,2) # ,(1,4,0) ,(0,4,1) # ,(0,5,0) # # ,(5,0,1) ,(4,0,2) ,(3,0,3) ,(2,0,4) ,(1,0,5) # ,(5,1,0) ,(4,1,1) ,(3,1,2) ,(2,1,3) ,(1,1,4) ,(0,1,5) # ,(4,2,0) ,(3,2,1) ,(2,2,2) ,(1,2,3) ,(0,2,4) # ,(3,3,0) ,(2,3,1) ,(1,3,2) ,(0,3,3) # ,(2,4,0) ,(1,4,1) ,(0,4,2) # ,(1,5,0) ,(0,5,1) # # ,(5,0,2) ,(4,0,3) ,(3,0,4) ,(2,0,5) # ,(5,1,1) ,(4,1,2) ,(3,1,3) ,(2,1,4) ,(1,1,5) # ,(5,2,0) ,(4,2,1) ,(3,2,2) ,(2,2,3) ,(1,2,4) ,(0,2,5) # ,(4,3,0) ,(3,3,1) ,(2,3,2) ,(1,3,3) ,(0,3,4) # ,(3,4,0) ,(2,4,1) ,(1,4,2) ,(0,4,3) # ,(2,5,0) ,(1,5,1) ,(0,5,2) # # # ,(5,0,3) ,(4,0,4) ,(3,0,5) # ,(5,1,2) ,(4,1,3) ,(3,1,4) ,(2,1,5) # ,(5,2,1) ,(4,2,2) ,(3,2,3) ,(2,2,4) ,(1,2,5) # ,(5,3,0) ,(4,3,1) ,(3,3,2) ,(2,3,3) ,(1,3,4) ,(0,3,5) # ,(4,4,0) ,(3,4,1) ,(2,4,2) ,(1,4,3) ,(0,4,4) # ,(3,5,0) ,(2,5,1) ,(1,5,2) ,(0,5,3) # # ,(5,0,4) ,(4,0,5) # ,(5,1,3) ,(4,1,4) ,(3,1,5) # ,(5,2,2) ,(4,2,3) ,(3,2,4) ,(2,2,5) # ,(5,3,1) ,(4,3,2) ,(3,3,3) ,(2,3,4) ,(1,3,5) # ,(5,4,0) ,(4,4,1) ,(3,4,2) ,(2,4,3) ,(1,4,4) ,(0,4,5) # ,(4,5,0) ,(3,5,1) ,(2,5,2) ,(1,5,3) ,(0,5,4) # # ,(5,0,5) # ,(5,1,4) ,(4,1,5) # ,(5,2,3) ,(4,2,4) ,(3,2,5) # ,(5,3,2) ,(4,3,3) ,(3,3,4) ,(2,3,5) # ,(5,4,1) ,(4,4,2) ,(3,4,3) ,(2,4,4) ,(1,4,5) # ,(5,5,0) ,(4,5,1) ,(3,5,2) ,(2,5,3) ,(1,5,4) ,(0,5,5) # # ,(5,1,5) # ,(5,2,4) ,(4,2,5) # ,(5,3,3) ,(4,3,4) ,(3,3,5) # ,(5,4,2) ,(4,4,3) ,(3,4,4) ,(2,4,5) # ,(5,5,1) ,(4,5,2) ,(3,5,3) ,(2,5,4) ,(1,5,5) # # ,(5,2,5) # ,(5,3,4) ,(4,3,5) # ,(5,4,3) ,(4,4,4) ,(3,4,5) # ,(5,5,2) ,(4,5,3) ,(3,5,4) ,(2,5,5) # # ,(5,3,5) # ,(5,4,4) ,(4,4,5) # ,(5,5,3) ,(4,5,4) ,(3,5,5) # # ,(5,4,5) # ,(5,5,4) ,(4,5,5) # # ,(5,5,5) # ) # pass; #cube_cls_a = ((i,j,k) for i in range(6) for j in range(6) for k in range(6)) # pass; #diff = list(cl for cl in cube_cls_a if cl not in cube_cls) # pass; #LOG and log('diff={}',(diff)) # pass; #return # cube_3 = ((bs1[cl[0]], bs2[cl[1]], bs3[cl[2]]) for cl in cube_cls) # cube = list(c[0]|c[1]|c[2] for c in cube_3) # pass; #LOG and log('cube={}',(cube)) # clrs = [] # for r in range(12): # clrs+= [cube[r*18:r*18+18]] ## for r in range(8): ## clrs+= [cube[r*27:r*27+27]] # pass; #LOG and log('clrs={}',(clrs)) # pass; #return ## clrs = (cb for i, c in enumerate(cube)) # w,h = 25,25 #LOG and log('clrs={}',(clrs)) if inversedRGB: clrs = inverse_RGB(clrs) return clrs,w,h, sp_clrs,sp_w,sp_h #def _dlg_color_palette_clrs COLOR_NAMES[clr_h2i('#000000')]=_('Black') COLOR_NAMES[clr_h2i('#000080')]=_('Navy') COLOR_NAMES[clr_h2i('#00008B')]=_('Dark blue') COLOR_NAMES[clr_h2i('#00009C')]=_('Duke blue') COLOR_NAMES[clr_h2i('#0000CD')]=_('Medium blue') COLOR_NAMES[clr_h2i('#0000EE')]=_('Blue') COLOR_NAMES[clr_h2i('#0000FF')]=_('Blue') COLOR_NAMES[clr_h2i('#000F89')]=_('Phthalo blue') COLOR_NAMES[clr_h2i('#00147E')]=_('Dark imperial blue') COLOR_NAMES[clr_h2i('#0014A8')]=_('Zaffre') COLOR_NAMES[clr_h2i('#0018A8')]=_('Blue') COLOR_NAMES[clr_h2i('#001C3D')]=_('Maastricht blue') COLOR_NAMES[clr_h2i('#002147')]=_('Oxford blue') COLOR_NAMES[clr_h2i('#002366')]=_('Royal blue') COLOR_NAMES[clr_h2i('#002387')]=_('Resolution blue') COLOR_NAMES[clr_h2i('#002395')]=_('Imperial blue') COLOR_NAMES[clr_h2i('#002E63')]=_('Cool black') COLOR_NAMES[clr_h2i('#002FA7')]=_('International Klein blue') COLOR_NAMES[clr_h2i('#00308F')]=_('Air Force blue') COLOR_NAMES[clr_h2i('#003153')]=_('Prussian blue') COLOR_NAMES[clr_h2i('#003366')]=_('Dark midnight blue') COLOR_NAMES[clr_h2i('#003399')]=_('Smalt, Dark powder blue') COLOR_NAMES[clr_h2i('#0033AA')]=_('UA blue') COLOR_NAMES[clr_h2i('#0038A8')]=_('Royal azure') COLOR_NAMES[clr_h2i('#004040')]=_('Rich black') COLOR_NAMES[clr_h2i('#00416A')]=_('Dark imperial blue') COLOR_NAMES[clr_h2i('#004225')]=_('British racing green') COLOR_NAMES[clr_h2i('#004242')]=_('Warm black') COLOR_NAMES[clr_h2i('#0047AB')]=_('Cobalt blue') COLOR_NAMES[clr_h2i('#0048BA')]=_('Absolute zero') COLOR_NAMES[clr_h2i('#004953')]=_('Midnight green, Eagle green') COLOR_NAMES[clr_h2i('#004B49')]=_('Deep jungle green') COLOR_NAMES[clr_h2i('#004F98')]=_('USAFA blue') COLOR_NAMES[clr_h2i('#00563F')]=_('Sacramento state green') COLOR_NAMES[clr_h2i('#006400')]=_('Dark green') COLOR_NAMES[clr_h2i('#006600')]=_('Pakistan green') COLOR_NAMES[clr_h2i('#0067A5')]=_('Sapphire blue') COLOR_NAMES[clr_h2i('#00688B')]=_('Deep sky blue') COLOR_NAMES[clr_h2i('#006994')]=_('Sea blue') COLOR_NAMES[clr_h2i('#006A4E')]=_('Bottle green') COLOR_NAMES[clr_h2i('#006B3C')]=_('Cadmium green') COLOR_NAMES[clr_h2i('#006DB0')]=_('Honolulu blue') COLOR_NAMES[clr_h2i('#00703C')]=_('Dartmouth green') COLOR_NAMES[clr_h2i('#0070B8')]=_('Spanish blue') COLOR_NAMES[clr_h2i('#0070FF')]=_('Brandeis blue') COLOR_NAMES[clr_h2i('#0072BB')]=_('French blue') COLOR_NAMES[clr_h2i('#0073CF')]=_('True blue') COLOR_NAMES[clr_h2i('#007474')]=_('Skobeloff') COLOR_NAMES[clr_h2i('#00755E')]=_('Tropical rain forest') COLOR_NAMES[clr_h2i('#0077BE')]=_('Ocean boat blue') COLOR_NAMES[clr_h2i('#007AA5')]=_('CG blue') COLOR_NAMES[clr_h2i('#007BA7')]=_('Celadon blue') COLOR_NAMES[clr_h2i('#007BB8')]=_('Star command blue') COLOR_NAMES[clr_h2i('#007F5C')]=_('Spanish viridian') COLOR_NAMES[clr_h2i('#007F66')]=_('Generic viridian') COLOR_NAMES[clr_h2i('#007FFF')]=_('Azure') COLOR_NAMES[clr_h2i('#008000')]=_('Green') COLOR_NAMES[clr_h2i('#008080')]=_('Teal') COLOR_NAMES[clr_h2i('#00827F')]=_('Teal green') COLOR_NAMES[clr_h2i('#00868B')]=_('Turquoise') COLOR_NAMES[clr_h2i('#0087BD')]=_('Blue') COLOR_NAMES[clr_h2i('#008B00')]=_('Green') COLOR_NAMES[clr_h2i('#008B45')]=_('Spring green') COLOR_NAMES[clr_h2i('#008B8B')]=_('Dark cyan') COLOR_NAMES[clr_h2i('#009000')]=_('Islamic green') COLOR_NAMES[clr_h2i('#009150')]=_('Spanish green') COLOR_NAMES[clr_h2i('#0093AF')]=_('Blue') COLOR_NAMES[clr_h2i('#0095B6')]=_('Bondi blue') COLOR_NAMES[clr_h2i('#009698')]=_('Viridian green') COLOR_NAMES[clr_h2i('#009966')]=_('Green-cyan') COLOR_NAMES[clr_h2i('#009ACD')]=_('Deep sky blue') COLOR_NAMES[clr_h2i('#009B7D')]=_('Paolo Veronese green') COLOR_NAMES[clr_h2i('#009E60')]=_('Shamrock green') COLOR_NAMES[clr_h2i('#009F6B')]=_('Green') COLOR_NAMES[clr_h2i('#00A550')]=_('Green') COLOR_NAMES[clr_h2i('#00A693')]=_('Persian green') COLOR_NAMES[clr_h2i('#00A86B')]=_('Jade') COLOR_NAMES[clr_h2i('#00A877')]=_('Green') COLOR_NAMES[clr_h2i('#00AAEE')]=_('Vivid cerulean') COLOR_NAMES[clr_h2i('#00AB66')]=_('GO green') COLOR_NAMES[clr_h2i('#00AD43')]=_('Green') COLOR_NAMES[clr_h2i('#00B2EE')]=_('Deep sky blue') COLOR_NAMES[clr_h2i('#00B7EB')]=_('Cyan') COLOR_NAMES[clr_h2i('#00B9FB')]=_('Blue bolt') COLOR_NAMES[clr_h2i('#00BFFF')]=_('Deep sky blue') COLOR_NAMES[clr_h2i('#00C4B0')]=_('Amazonite') COLOR_NAMES[clr_h2i('#00C5CD')]=_('Turquoise') COLOR_NAMES[clr_h2i('#00CC33')]=_('Vivid malachite') COLOR_NAMES[clr_h2i('#00CC99')]=_('Caribbean green') COLOR_NAMES[clr_h2i('#00CCCC')]=_('Robin egg blue') COLOR_NAMES[clr_h2i('#00CCFF')]=_('Vivid sky blue') COLOR_NAMES[clr_h2i('#00CD00')]=_('Green') COLOR_NAMES[clr_h2i('#00CD66')]=_('Spring green') COLOR_NAMES[clr_h2i('#00CDCD')]=_('Cyan') COLOR_NAMES[clr_h2i('#00CED1')]=_('Dark turquoise') COLOR_NAMES[clr_h2i('#00E5EE')]=_('Turquoise') COLOR_NAMES[clr_h2i('#00EE00')]=_('Green') COLOR_NAMES[clr_h2i('#00EE76')]=_('Spring green') COLOR_NAMES[clr_h2i('#00EEEE')]=_('Cyan') COLOR_NAMES[clr_h2i('#00F5FF')]=_('Turquoise') COLOR_NAMES[clr_h2i('#00FA9A')]=_('Medium spring green') COLOR_NAMES[clr_h2i('#00FF00')]=_('Lime green ') COLOR_NAMES[clr_h2i('#00FF7F')]=_('Spring green') COLOR_NAMES[clr_h2i('#00FFEF')]=_('Turquoise blue') COLOR_NAMES[clr_h2i('#00FFFF')]=_('Cyan, Spanish sky blue') COLOR_NAMES[clr_h2i('#010203')]=_('Rich black') COLOR_NAMES[clr_h2i('#010B13')]=_('Rich black') COLOR_NAMES[clr_h2i('#013220')]=_('Dark green') COLOR_NAMES[clr_h2i('#014421')]=_('Forest green (traditional)') COLOR_NAMES[clr_h2i('#01796F')]=_('Pine green') COLOR_NAMES[clr_h2i('#0247FE')]=_('Blue') COLOR_NAMES[clr_h2i('#035096')]=_('Medium electric blue') COLOR_NAMES[clr_h2i('#03C03C')]=_('Dark pastel green') COLOR_NAMES[clr_h2i('#056608')]=_('Deep green') COLOR_NAMES[clr_h2i('#059033')]=_('North Texas green') COLOR_NAMES[clr_h2i('#062A78')]=_('Catalina blue') COLOR_NAMES[clr_h2i('#08457E')]=_('Dark cerulean') COLOR_NAMES[clr_h2i('#087830')]=_('La Salle green') COLOR_NAMES[clr_h2i('#0892D0')]=_('Rich electric blue') COLOR_NAMES[clr_h2i('#08E8DE')]=_('Bright turquoise') COLOR_NAMES[clr_h2i('#091F92')]=_('Indigo dye') COLOR_NAMES[clr_h2i('#0A7E8C')]=_('Metallic seaweed') COLOR_NAMES[clr_h2i('#0ABAB5')]=_('Tiffany blue') COLOR_NAMES[clr_h2i('#0BDA51')]=_('Malachite') COLOR_NAMES[clr_h2i('#0D98BA')]=_('Blue-green') COLOR_NAMES[clr_h2i('#0E7C61')]=_('Deep green-cyan turquoise') COLOR_NAMES[clr_h2i('#0F4D92')]=_('Yale blue') COLOR_NAMES[clr_h2i('#0F52BA')]=_('Sapphire') COLOR_NAMES[clr_h2i('#0FC0FC')]=_('Spiro Disco Ball') COLOR_NAMES[clr_h2i('#100C08')]=_('Smoky black') COLOR_NAMES[clr_h2i('#1034A6')]=_('Egyptian blue') COLOR_NAMES[clr_h2i('#104E8B')]=_('Dodger blue') COLOR_NAMES[clr_h2i('#1164B4')]=_('Green-blue') COLOR_NAMES[clr_h2i('#123524')]=_('Phthalo green') COLOR_NAMES[clr_h2i('#126180')]=_('Blue sapphire') COLOR_NAMES[clr_h2i('#138808')]=_('India green') COLOR_NAMES[clr_h2i('#1560BD')]=_('Denim') COLOR_NAMES[clr_h2i('#15F2FD')]=_('Vomit+indogo+Lopen+Gabriel') COLOR_NAMES[clr_h2i('#177245')]=_('Dark spring green') COLOR_NAMES[clr_h2i('#18453B')]=_('MSU green') COLOR_NAMES[clr_h2i('#1874CD')]=_('Dodger blue') COLOR_NAMES[clr_h2i('#188BC2')]=_('Cyan cornflower blue') COLOR_NAMES[clr_h2i('#191970')]=_('Midnight blue') COLOR_NAMES[clr_h2i('#195905')]=_('Lincoln green') COLOR_NAMES[clr_h2i('#1974D2')]=_('Bright navy blue') COLOR_NAMES[clr_h2i('#1A1110')]=_('Licorice') COLOR_NAMES[clr_h2i('#1A2421')]=_('Dark jungle green') COLOR_NAMES[clr_h2i('#1B1B1B')]=_('Eerie black') COLOR_NAMES[clr_h2i('#1B4D3E')]=_('English green') COLOR_NAMES[clr_h2i('#1C1C1C')]=_('Grey') COLOR_NAMES[clr_h2i('#1C1CF0')]=_('Bluebonnet') COLOR_NAMES[clr_h2i('#1C2841')]=_('Yankees blue') COLOR_NAMES[clr_h2i('#1C352D')]=_('Medium jungle green') COLOR_NAMES[clr_h2i('#1C39BB')]=_('Persian blue') COLOR_NAMES[clr_h2i('#1C86EE')]=_('Dodger blue') COLOR_NAMES[clr_h2i('#1CA9C9')]=_('Pacific blue') COLOR_NAMES[clr_h2i('#1CAC78')]=_('Green') COLOR_NAMES[clr_h2i('#1D2951')]=_('Space cadet') COLOR_NAMES[clr_h2i('#1DACD6')]=_('Battery charged blue') COLOR_NAMES[clr_h2i('#1E4D2B')]=_('Cal Poly Pomona green') COLOR_NAMES[clr_h2i('#1E90FF')]=_('Dodger blue') COLOR_NAMES[clr_h2i('#1F262A')]=_('Dark gunmetal') COLOR_NAMES[clr_h2i('#1F75FE')]=_('Blue') COLOR_NAMES[clr_h2i('#20B2AA')]=_('Light sea green') COLOR_NAMES[clr_h2i('#210837')]=_('Middle Red purple') COLOR_NAMES[clr_h2i('#214FC6')]=_('New car') COLOR_NAMES[clr_h2i('#21ABCD')]=_('Ball blue') COLOR_NAMES[clr_h2i('#2243B6')]=_('Denim blue') COLOR_NAMES[clr_h2i('#228B22')]=_('Forest green') COLOR_NAMES[clr_h2i('#23297A')]=_('St. Patrick\'s blue') COLOR_NAMES[clr_h2i('#232B2B')]=_('Charleston green') COLOR_NAMES[clr_h2i('#242124')]=_('Raisin black') COLOR_NAMES[clr_h2i('#24A0ED')]=_('Button blue') COLOR_NAMES[clr_h2i('#253529')]=_('Black leather jacket') COLOR_NAMES[clr_h2i('#264348')]=_('Japanese indigo') COLOR_NAMES[clr_h2i('#26619C')]=_('Lapis lazuli') COLOR_NAMES[clr_h2i('#273BE2')]=_('Palatinate blue') COLOR_NAMES[clr_h2i('#27408B')]=_('Royal blue') COLOR_NAMES[clr_h2i('#28589C')]=_('Cyan cobalt blue') COLOR_NAMES[clr_h2i('#299617')]=_('Slimy green') COLOR_NAMES[clr_h2i('#29AB87')]=_('Jungle green') COLOR_NAMES[clr_h2i('#2a3439')]=_('Gunmetal') COLOR_NAMES[clr_h2i('#2A52BE')]=_('Cerulean blue') COLOR_NAMES[clr_h2i('#2A8000')]=_('Napier green') COLOR_NAMES[clr_h2i('#2C1608')]=_('Zinnwaldite brown') COLOR_NAMES[clr_h2i('#2E2D88')]=_('Cosmic cobalt') COLOR_NAMES[clr_h2i('#2E5894')]=_('B\'dazzled blue') COLOR_NAMES[clr_h2i('#2E8B57')]=_('Sea green') COLOR_NAMES[clr_h2i('#2F4F4F')]=_('Dark slate gray') COLOR_NAMES[clr_h2i('#2F847C')]=_('Celadon green') COLOR_NAMES[clr_h2i('#301934')]=_('Dark purple') COLOR_NAMES[clr_h2i('#306030')]=_('Mughal green') COLOR_NAMES[clr_h2i('#30BA8F')]=_('Mountain Meadow') COLOR_NAMES[clr_h2i('#30BFBF')]=_('Maximum blue green') COLOR_NAMES[clr_h2i('#317873')]=_('Myrtle green') COLOR_NAMES[clr_h2i('#318CE7')]=_('Bleu de France') COLOR_NAMES[clr_h2i('#319177')]=_('Illuminating emerald') COLOR_NAMES[clr_h2i('#32127A')]=_('Persian indigo') COLOR_NAMES[clr_h2i('#32174D')]=_('Russian violet') COLOR_NAMES[clr_h2i('#324AB2')]=_('Violet-blue') COLOR_NAMES[clr_h2i('#32CD32')]=_('Lime green') COLOR_NAMES[clr_h2i('#330066')]=_('Deep violet') COLOR_NAMES[clr_h2i('#333366')]=_('Deep koamaru') COLOR_NAMES[clr_h2i('#333399')]=_('Blue') COLOR_NAMES[clr_h2i('#3399FF')]=_('Brilliant azure') COLOR_NAMES[clr_h2i('#343434')]=_('Jet') COLOR_NAMES[clr_h2i('#34B233')]=_('Wageningen green') COLOR_NAMES[clr_h2i('#353839')]=_('Onyx') COLOR_NAMES[clr_h2i('#354230')]=_('Kombu green') COLOR_NAMES[clr_h2i('#355E3B')]=_('Deep moss green') COLOR_NAMES[clr_h2i('#360CCC')]=_('Interdimensional blue') COLOR_NAMES[clr_h2i('#363636')]=_('Grey') COLOR_NAMES[clr_h2i('#36454F')]=_('Charcoal') COLOR_NAMES[clr_h2i('#36648B')]=_('Steel blue') COLOR_NAMES[clr_h2i('#36747D')]=_('Ming') COLOR_NAMES[clr_h2i('#367588')]=_('Teal blue') COLOR_NAMES[clr_h2i('#391285')]=_('Pixie powder') COLOR_NAMES[clr_h2i('#39A78E')]=_('Zomp') COLOR_NAMES[clr_h2i('#39FF14')]=_('Neon green') COLOR_NAMES[clr_h2i('#3A5FCD')]=_('Royal blue') COLOR_NAMES[clr_h2i('#3AB09E')]=_('Keppel') COLOR_NAMES[clr_h2i('#3B331C')]=_('Pullman green') COLOR_NAMES[clr_h2i('#3B3C36')]=_('Black olive') COLOR_NAMES[clr_h2i('#3B444B')]=_('Arsenic') COLOR_NAMES[clr_h2i('#3B7A57')]=_('Amazon') COLOR_NAMES[clr_h2i('#3C1414')]=_('Dark sienna') COLOR_NAMES[clr_h2i('#3C341F')]=_('Olive Drab #7') COLOR_NAMES[clr_h2i('#3CB371')]=_('Medium sea green') COLOR_NAMES[clr_h2i('#3CD070')]=_('UFO green') COLOR_NAMES[clr_h2i('#3D0C02')]=_('Black bean') COLOR_NAMES[clr_h2i('#3D2B1F')]=_('Bistre') COLOR_NAMES[clr_h2i('#3E8EDE')]=_('Tufts blue') COLOR_NAMES[clr_h2i('#3EB489')]=_('Mint') COLOR_NAMES[clr_h2i('#3F00FF')]=_('Ultramarine') COLOR_NAMES[clr_h2i('#3FFF00')]=_('Harlequin') COLOR_NAMES[clr_h2i('#40826D')]=_('Deep aquamarine') COLOR_NAMES[clr_h2i('#40E0D0')]=_('Turquoise') COLOR_NAMES[clr_h2i('#414A4C')]=_('Outer space') COLOR_NAMES[clr_h2i('#4166F5')]=_('Ultramarine blue') COLOR_NAMES[clr_h2i('#4169E1')]=_('Royal blue') COLOR_NAMES[clr_h2i('#43302E')]=_('Old burgundy') COLOR_NAMES[clr_h2i('#436B95')]=_('Queen blue') COLOR_NAMES[clr_h2i('#436EEE')]=_('Royal blue') COLOR_NAMES[clr_h2i('#43B3AE')]=_('Verdigris') COLOR_NAMES[clr_h2i('#43CD80')]=_('Sea green') COLOR_NAMES[clr_h2i('#444C38')]=_('Rifle green') COLOR_NAMES[clr_h2i('#446CCF')]=_('Han blue') COLOR_NAMES[clr_h2i('#44D7A8')]=_('Eucalyptus') COLOR_NAMES[clr_h2i('#458B00')]=_('Chartreuse') COLOR_NAMES[clr_h2i('#458B74')]=_('Aquamarine') COLOR_NAMES[clr_h2i('#45B1E8')]=_('Picton blue') COLOR_NAMES[clr_h2i('#465945')]=_('Gray-asparagus') COLOR_NAMES[clr_h2i('#4682B4')]=_('Steel blue') COLOR_NAMES[clr_h2i('#4682BF')]=_('Cyan-blue azure') COLOR_NAMES[clr_h2i('#46CB18')]=_('Harlequin green') COLOR_NAMES[clr_h2i('#473C8B')]=_('Slate blue') COLOR_NAMES[clr_h2i('#47ABCC')]=_('Maximum blue') COLOR_NAMES[clr_h2i('#480607')]=_('Bulgarian rose') COLOR_NAMES[clr_h2i('#483C32')]=_('Taupe') COLOR_NAMES[clr_h2i('#483D8B')]=_('Dark slate blue') COLOR_NAMES[clr_h2i('#4876FF')]=_('Royal blue') COLOR_NAMES[clr_h2i('#48BF91')]=_('Ocean green') COLOR_NAMES[clr_h2i('#48D1CC')]=_('Medium turquoise') COLOR_NAMES[clr_h2i('#49796B')]=_('Hooker\'s green') COLOR_NAMES[clr_h2i('#4997D0')]=_('Celestial blue') COLOR_NAMES[clr_h2i('#4A5D23')]=_('Dark moss green') COLOR_NAMES[clr_h2i('#4A646C')]=_('Deep space sparkle') COLOR_NAMES[clr_h2i('#4A708B')]=_('Sky blue') COLOR_NAMES[clr_h2i('#4AFF00')]=_('Chlorophyll green') COLOR_NAMES[clr_h2i('#4B0082')]=_('Indigo') COLOR_NAMES[clr_h2i('#4B3621')]=_('Cafe noir') COLOR_NAMES[clr_h2i('#4B5320')]=_('Army green') COLOR_NAMES[clr_h2i('#4BC7CF')]=_('Sea serpent') COLOR_NAMES[clr_h2i('#4C2882')]=_('Spanish violet') COLOR_NAMES[clr_h2i('#4C516D')]=_('Independence') COLOR_NAMES[clr_h2i('#4C9141')]=_('May green') COLOR_NAMES[clr_h2i('#4CBB17')]=_('Kelly green') COLOR_NAMES[clr_h2i('#4D5D53')]=_('Feldgrau') COLOR_NAMES[clr_h2i('#4D8C57')]=_('Middle green') COLOR_NAMES[clr_h2i('#4E1609')]=_('French puce') COLOR_NAMES[clr_h2i('#4E5180')]=_('Purple navy') COLOR_NAMES[clr_h2i('#4E82B4')]=_('Cyan azure') COLOR_NAMES[clr_h2i('#4EEE94')]=_('Sea green') COLOR_NAMES[clr_h2i('#4F3A3C')]=_('Dark puce') COLOR_NAMES[clr_h2i('#4F42B5')]=_('Ocean blue') COLOR_NAMES[clr_h2i('#4F4F4F')]=_('Grey') COLOR_NAMES[clr_h2i('#4F666A')]=_('Stormcloud') COLOR_NAMES[clr_h2i('#4F7942')]=_('Fern green') COLOR_NAMES[clr_h2i('#4F86F7')]=_('Blueberry') COLOR_NAMES[clr_h2i('#4F94CD')]=_('Steel blue') COLOR_NAMES[clr_h2i('#50404D')]=_('Purple taupe') COLOR_NAMES[clr_h2i('#5072A7')]=_('Blue yonder') COLOR_NAMES[clr_h2i('#507D2A')]=_('Sap green') COLOR_NAMES[clr_h2i('#50C878')]=_('Emerald') COLOR_NAMES[clr_h2i('#512888')]=_('KSU purple') COLOR_NAMES[clr_h2i('#51484F')]=_('Quartz') COLOR_NAMES[clr_h2i('#5218FA')]=_('Han purple') COLOR_NAMES[clr_h2i('#522D80')]=_('Regalia') COLOR_NAMES[clr_h2i('#528B8B')]=_('Dark slate gray') COLOR_NAMES[clr_h2i('#534B4F')]=_('Dark liver') COLOR_NAMES[clr_h2i('#536872')]=_('Cadet') COLOR_NAMES[clr_h2i('#536878')]=_('Dark electric blue') COLOR_NAMES[clr_h2i('#536895')]=_('UCLA blue') COLOR_NAMES[clr_h2i('#53868B')]=_('Cadet blue') COLOR_NAMES[clr_h2i('#543D37')]=_('Dark liver (horses)') COLOR_NAMES[clr_h2i('#545AA7')]=_('Liberty') COLOR_NAMES[clr_h2i('#54626F')]=_('Black Coral') COLOR_NAMES[clr_h2i('#548B54')]=_('Pale green') COLOR_NAMES[clr_h2i('#54FF9F')]=_('Sea green') COLOR_NAMES[clr_h2i('#551A8B')]=_('Purple') COLOR_NAMES[clr_h2i('#553592')]=_('Blue-magenta violet') COLOR_NAMES[clr_h2i('#555555')]=_('Davy\'s grey') COLOR_NAMES[clr_h2i('#555D50')]=_('Ebony') COLOR_NAMES[clr_h2i('#556B2F')]=_('Dark olive green') COLOR_NAMES[clr_h2i('#560319')]=_('Dark scarlet') COLOR_NAMES[clr_h2i('#563C5C')]=_('Pineapple') COLOR_NAMES[clr_h2i('#568203')]=_('Avocado') COLOR_NAMES[clr_h2i('#56887D')]=_('Wintergreen Dream') COLOR_NAMES[clr_h2i('#56A0D3')]=_('Carolina blue') COLOR_NAMES[clr_h2i('#58427C')]=_('Cyber grape') COLOR_NAMES[clr_h2i('#59260B')]=_('Seal brown') COLOR_NAMES[clr_h2i('#592720')]=_('Caput mortuum') COLOR_NAMES[clr_h2i('#5946B2')]=_('Plump purple') COLOR_NAMES[clr_h2i('#5A4FCF')]=_('Iris') COLOR_NAMES[clr_h2i('#5B3256')]=_('Japanese violet') COLOR_NAMES[clr_h2i('#5B92E5')]=_('United nations blue') COLOR_NAMES[clr_h2i('#5CACEE')]=_('Steel blue') COLOR_NAMES[clr_h2i('#5D3954')]=_('Dark byzantium') COLOR_NAMES[clr_h2i('#5D478B')]=_('Medium purple') COLOR_NAMES[clr_h2i('#5D89BA')]=_('Silver lake blue') COLOR_NAMES[clr_h2i('#5D8AA8')]=_('Air Force blue') COLOR_NAMES[clr_h2i('#5DA493')]=_('Polished Pine') COLOR_NAMES[clr_h2i('#5DADEC')]=_('Blue jeans') COLOR_NAMES[clr_h2i('#5E8C31')]=_('Maximum green') COLOR_NAMES[clr_h2i('#5F8A8B')]=_('Steel teal') COLOR_NAMES[clr_h2i('#5F9EA0')]=_('Cadet blue') COLOR_NAMES[clr_h2i('#5FA778')]=_('Shiny shamrock') COLOR_NAMES[clr_h2i('#602F6B')]=_('Imperial') COLOR_NAMES[clr_h2i('#6050DC')]=_('Majorelle blue') COLOR_NAMES[clr_h2i('#607B8B')]=_('Light sky blue') COLOR_NAMES[clr_h2i('#6082B6')]=_('Glaucous') COLOR_NAMES[clr_h2i('#614051')]=_('Eggplant') COLOR_NAMES[clr_h2i('#635147')]=_('Umber') COLOR_NAMES[clr_h2i('#63B8FF')]=_('Steel blue') COLOR_NAMES[clr_h2i('#644117')]=_('Pullman brown') COLOR_NAMES[clr_h2i('#645452')]=_('Wenge') COLOR_NAMES[clr_h2i('#6495ED')]=_('Cornflower blue') COLOR_NAMES[clr_h2i('#64E986')]=_('Very light malachite green') COLOR_NAMES[clr_h2i('#65000B')]=_('Rosewood') COLOR_NAMES[clr_h2i('#654321')]=_('Dark brown') COLOR_NAMES[clr_h2i('#66023C')]=_('Imperial purple') COLOR_NAMES[clr_h2i('#663399')]=_('Rebecca purple') COLOR_NAMES[clr_h2i('#663854')]=_('Halaya ube') COLOR_NAMES[clr_h2i('#664228')]=_('Van dyke brown') COLOR_NAMES[clr_h2i('#66424D')]=_('Deep tuscan red') COLOR_NAMES[clr_h2i('#664C28')]=_('Donkey brown') COLOR_NAMES[clr_h2i('#665D1E')]=_('Antique bronze') COLOR_NAMES[clr_h2i('#666699')]=_('Dark blue-gray') COLOR_NAMES[clr_h2i('#6666FF')]=_('Very light blue') COLOR_NAMES[clr_h2i('#668B8B')]=_('Pale turquoise') COLOR_NAMES[clr_h2i('#669999')]=_('Desaturated cyan') COLOR_NAMES[clr_h2i('#6699CC')]=_('Livid') COLOR_NAMES[clr_h2i('#66B032')]=_('Green') COLOR_NAMES[clr_h2i('#66CD00')]=_('Chartreuse') COLOR_NAMES[clr_h2i('#66CDAA')]=_('Medium aquamarine') COLOR_NAMES[clr_h2i('#66DDAA')]=_('Medium aquamarine') COLOR_NAMES[clr_h2i('#66FF00')]=_('Bright green') COLOR_NAMES[clr_h2i('#66FF66')]=_('Screamin\' green') COLOR_NAMES[clr_h2i('#673147')]=_('Wine dregs') COLOR_NAMES[clr_h2i('#674846')]=_('Rose ebony') COLOR_NAMES[clr_h2i('#674C47')]=_('Medium taupe') COLOR_NAMES[clr_h2i('#676767')]=_('Granite Gray') COLOR_NAMES[clr_h2i('#679267')]=_('Russian green') COLOR_NAMES[clr_h2i('#68228B')]=_('Dark orchid') COLOR_NAMES[clr_h2i('#682860')]=_('Palatinate purple') COLOR_NAMES[clr_h2i('#68838B')]=_('Light blue') COLOR_NAMES[clr_h2i('#69359C')]=_('Purple heart') COLOR_NAMES[clr_h2i('#6959CD')]=_('Slate blue') COLOR_NAMES[clr_h2i('#696969')]=_('Dim gray') COLOR_NAMES[clr_h2i('#698B22')]=_('Olive drab') COLOR_NAMES[clr_h2i('#698B69')]=_('Dark sea green') COLOR_NAMES[clr_h2i('#6A5ACD')]=_('Slate blue') COLOR_NAMES[clr_h2i('#6B4423')]=_('Kobicha') COLOR_NAMES[clr_h2i('#6B8E23')]=_('Olive drab') COLOR_NAMES[clr_h2i('#6C2E1F')]=_('Liver (organ)') COLOR_NAMES[clr_h2i('#6C3082')]=_('Eminence') COLOR_NAMES[clr_h2i('#6C541E')]=_('Field drab') COLOR_NAMES[clr_h2i('#6C7B8B')]=_('Slate gray') COLOR_NAMES[clr_h2i('#6CA0DC')]=_('Little boy blue') COLOR_NAMES[clr_h2i('#6CA6CD')]=_('Sky blue') COLOR_NAMES[clr_h2i('#6D9BC3')]=_('Cerulean frost') COLOR_NAMES[clr_h2i('#6E7B8B')]=_('Light steel blue') COLOR_NAMES[clr_h2i('#6E7F80')]=_('Auro metal saurus') COLOR_NAMES[clr_h2i('#6E8B3D')]=_('Dark olive green') COLOR_NAMES[clr_h2i('#6EAEA1')]=_('Green Sheen') COLOR_NAMES[clr_h2i('#6F00FF')]=_('Electric indigo') COLOR_NAMES[clr_h2i('#6F2DA8')]=_('Grape') COLOR_NAMES[clr_h2i('#6F4E37')]=_('Tuscan brown') COLOR_NAMES[clr_h2i('#6F9940')]=_('Palm Leaf') COLOR_NAMES[clr_h2i('#701C1C')]=_('Persian plum') COLOR_NAMES[clr_h2i('#702670')]=_('Midnight') COLOR_NAMES[clr_h2i('#702963')]=_('Byzantium') COLOR_NAMES[clr_h2i('#703642')]=_('Catawba') COLOR_NAMES[clr_h2i('#704214')]=_('Sepia') COLOR_NAMES[clr_h2i('#704241')]=_('Roast coffee') COLOR_NAMES[clr_h2i('#708090')]=_('Slate gray') COLOR_NAMES[clr_h2i('#71A6D2')]=_('Iceberg') COLOR_NAMES[clr_h2i('#71BC78')]=_('Iguana green') COLOR_NAMES[clr_h2i('#722F37')]=_('Puce red') COLOR_NAMES[clr_h2i('#727472')]=_('Nickel') COLOR_NAMES[clr_h2i('#72A0C1')]=_('Air superiority blue') COLOR_NAMES[clr_h2i('#733380')]=_('Maximum purple') COLOR_NAMES[clr_h2i('#734F96')]=_('Dark lavender') COLOR_NAMES[clr_h2i('#737000')]=_('Bronze yellow') COLOR_NAMES[clr_h2i('#738276')]=_('Smoke') COLOR_NAMES[clr_h2i('#738678')]=_('Xanadu') COLOR_NAMES[clr_h2i('#73A9C2')]=_('Moonstone blue') COLOR_NAMES[clr_h2i('#73C2FB')]=_('Maya blue') COLOR_NAMES[clr_h2i('#746CC0')]=_('Toolbox') COLOR_NAMES[clr_h2i('#74BBFB')]=_('Very light azure') COLOR_NAMES[clr_h2i('#74C365')]=_('Mantis') COLOR_NAMES[clr_h2i('#757575')]=_('Sonic silver') COLOR_NAMES[clr_h2i('#76EE00')]=_('Chartreuse') COLOR_NAMES[clr_h2i('#76EEC6')]=_('Aquamarine') COLOR_NAMES[clr_h2i('#777696')]=_('Rhythm') COLOR_NAMES[clr_h2i('#778899')]=_('Light slate gray') COLOR_NAMES[clr_h2i('#778BA5')]=_('Shadow blue') COLOR_NAMES[clr_h2i('#779ECB')]=_('Dark pastel blue') COLOR_NAMES[clr_h2i('#77B5FE')]=_('French sky blue') COLOR_NAMES[clr_h2i('#77DD77')]=_('Pastel green') COLOR_NAMES[clr_h2i('#78184A')]=_('Pansy purple') COLOR_NAMES[clr_h2i('#7851A9')]=_('Royal purple') COLOR_NAMES[clr_h2i('#78866B')]=_('Camouflage green') COLOR_NAMES[clr_h2i('#79443B')]=_('Medium tuscan red') COLOR_NAMES[clr_h2i('#796878')]=_('Old lavender') COLOR_NAMES[clr_h2i('#79CDCD')]=_('Dark slate gray') COLOR_NAMES[clr_h2i('#7A378B')]=_('Medium orchid') COLOR_NAMES[clr_h2i('#7A67EE')]=_('Slate blue') COLOR_NAMES[clr_h2i('#7A8B8B')]=_('Light cyan') COLOR_NAMES[clr_h2i('#7AC5CD')]=_('Cadet blue') COLOR_NAMES[clr_h2i('#7B1113')]=_('UP maroon') COLOR_NAMES[clr_h2i('#7B3F00')]=_('Chocolate (traditional)') COLOR_NAMES[clr_h2i('#7B68EE')]=_('Medium slate blue') COLOR_NAMES[clr_h2i('#7BB661')]=_('Bud green') COLOR_NAMES[clr_h2i('#7C0A02')]=_('Barn red') COLOR_NAMES[clr_h2i('#7C1C05')]=_('Kenyan copper') COLOR_NAMES[clr_h2i('#7C4848')]=_('Tuscan red') COLOR_NAMES[clr_h2i('#7C98AB')]=_('Weldon blue') COLOR_NAMES[clr_h2i('#7C9ED9')]=_('Vista blue') COLOR_NAMES[clr_h2i('#7CB9E8')]=_('Aero') COLOR_NAMES[clr_h2i('#7CCD7C')]=_('Pale green') COLOR_NAMES[clr_h2i('#7CFC00')]=_('Lawn green') COLOR_NAMES[clr_h2i('#7D26CD')]=_('Purple') COLOR_NAMES[clr_h2i('#7DF9FF')]=_('Electric blue') COLOR_NAMES[clr_h2i('#7E5E60')]=_('Deep taupe') COLOR_NAMES[clr_h2i('#7EC0EE')]=_('Sky blue') COLOR_NAMES[clr_h2i('#7ED4E6')]=_('Middle blue') COLOR_NAMES[clr_h2i('#7F00FF')]=_('Violet') COLOR_NAMES[clr_h2i('#7F1734')]=_('Claret') COLOR_NAMES[clr_h2i('#7FFF00')]=_('Chartreuse') COLOR_NAMES[clr_h2i('#7FFFD4')]=_('Aquamarine') COLOR_NAMES[clr_h2i('#800000')]=_('Maroon') COLOR_NAMES[clr_h2i('#800020')]=_('Burgundy') COLOR_NAMES[clr_h2i('#800080')]=_('Patriarch, purple') COLOR_NAMES[clr_h2i('#801818')]=_('Falu red') COLOR_NAMES[clr_h2i('#80461B')]=_('Russet') COLOR_NAMES[clr_h2i('#807532')]=_('Spanish bistre') COLOR_NAMES[clr_h2i('#808000')]=_('Olive') COLOR_NAMES[clr_h2i('#808080')]=_('Trolley grey') COLOR_NAMES[clr_h2i('#80DAEB')]=_('Medium sky blue') COLOR_NAMES[clr_h2i('#811453')]=_('French plum') COLOR_NAMES[clr_h2i('#81613C')]=_('Coyote brown') COLOR_NAMES[clr_h2i('#820000')]=_('Deep maroon') COLOR_NAMES[clr_h2i('#826644')]=_('Raw umber') COLOR_NAMES[clr_h2i('#828E84')]=_('Dolphin gray') COLOR_NAMES[clr_h2i('#832A0D')]=_('Smokey topaz') COLOR_NAMES[clr_h2i('#836953')]=_('Pastel brown') COLOR_NAMES[clr_h2i('#836FFF')]=_('Slate blue') COLOR_NAMES[clr_h2i('#838996')]=_('Roman silver') COLOR_NAMES[clr_h2i('#838B83')]=_('Honeydew') COLOR_NAMES[clr_h2i('#838B8B')]=_('Azure') COLOR_NAMES[clr_h2i('#841B2D')]=_('Antique ruby') COLOR_NAMES[clr_h2i('#843F5B')]=_('Deep ruby') COLOR_NAMES[clr_h2i('#8470FF')]=_('Light slate blue') COLOR_NAMES[clr_h2i('#848482')]=_('Old silver') COLOR_NAMES[clr_h2i('#84DE02')]=_('Alien armpit') COLOR_NAMES[clr_h2i('#850101')]=_('Deep red') COLOR_NAMES[clr_h2i('#856088')]=_('Chinese violet') COLOR_NAMES[clr_h2i('#856D4D')]=_('French bistre') COLOR_NAMES[clr_h2i('#85754E')]=_('Gold Fusion') COLOR_NAMES[clr_h2i('#85BB65')]=_('Dollar bill') COLOR_NAMES[clr_h2i('#860111')]=_('Red devil') COLOR_NAMES[clr_h2i('#8601AF')]=_('Violet') COLOR_NAMES[clr_h2i('#86608E')]=_('French lilac') COLOR_NAMES[clr_h2i('#867E36')]=_('Old moss green') COLOR_NAMES[clr_h2i('#872657')]=_('Dark raspberry') COLOR_NAMES[clr_h2i('#873260')]=_('Boysenberry') COLOR_NAMES[clr_h2i('#87A96B')]=_('Asparagus') COLOR_NAMES[clr_h2i('#87CEEB')]=_('Sky blue') COLOR_NAMES[clr_h2i('#87CEFA')]=_('Light sky blue') COLOR_NAMES[clr_h2i('#87CEFF')]=_('Sky blue') COLOR_NAMES[clr_h2i('#87D3F8')]=_('Pale cyan') COLOR_NAMES[clr_h2i('#87FF2A')]=_('Spring Frost') COLOR_NAMES[clr_h2i('#880085')]=_('Mardi gras') COLOR_NAMES[clr_h2i('#8806CE')]=_('French violet') COLOR_NAMES[clr_h2i('#882D17')]=_('Sienna') COLOR_NAMES[clr_h2i('#885818')]=_('Grizzly') COLOR_NAMES[clr_h2i('#88654E')]=_('Dark brown-tangelo') COLOR_NAMES[clr_h2i('#8878C3')]=_('Ube') COLOR_NAMES[clr_h2i('#88ACE0')]=_('Light cobalt blue') COLOR_NAMES[clr_h2i('#88D8C0')]=_('Pearl aqua') COLOR_NAMES[clr_h2i('#893843')]=_('Solid pink') COLOR_NAMES[clr_h2i('#893F45')]=_('Cordovan') COLOR_NAMES[clr_h2i('#8968CD')]=_('Medium purple') COLOR_NAMES[clr_h2i('#89CFF0')]=_('Baby blue') COLOR_NAMES[clr_h2i('#8A2BE2')]=_('Blue-violet') COLOR_NAMES[clr_h2i('#8A3324')]=_('Burnt umber') COLOR_NAMES[clr_h2i('#8A496B')]=_('Twilight lavender') COLOR_NAMES[clr_h2i('#8A795D')]=_('Shadow') COLOR_NAMES[clr_h2i('#8A7F80')]=_('Rocket metallic') COLOR_NAMES[clr_h2i('#8A9A5B')]=_('Turtle green') COLOR_NAMES[clr_h2i('#8AB9F1')]=_('Jordy blue') COLOR_NAMES[clr_h2i('#8B0000')]=_('Dark red') COLOR_NAMES[clr_h2i('#8B008B')]=_('Dark magenta') COLOR_NAMES[clr_h2i('#8B0A50')]=_('Deep pink') COLOR_NAMES[clr_h2i('#8B1A1A')]=_('Firebrick') COLOR_NAMES[clr_h2i('#8B1C62')]=_('Maroon') COLOR_NAMES[clr_h2i('#8B2252')]=_('Violet red') COLOR_NAMES[clr_h2i('#8B2323')]=_('Brown') COLOR_NAMES[clr_h2i('#8B2500')]=_('Orange red') COLOR_NAMES[clr_h2i('#8B3626')]=_('Tomato') COLOR_NAMES[clr_h2i('#8B3A3A')]=_('Indian red') COLOR_NAMES[clr_h2i('#8B3A62')]=_('Hot pink') COLOR_NAMES[clr_h2i('#8B3E2F')]=_('Coral') COLOR_NAMES[clr_h2i('#8B4500')]=_('Dark orange') COLOR_NAMES[clr_h2i('#8B4513')]=_('Saddle brown') COLOR_NAMES[clr_h2i('#8B4726')]=_('Sienna') COLOR_NAMES[clr_h2i('#8B475D')]=_('Pale violet red') COLOR_NAMES[clr_h2i('#8B4789')]=_('Orchid') COLOR_NAMES[clr_h2i('#8B4C39')]=_('Salmon') COLOR_NAMES[clr_h2i('#8B5742')]=_('Light salmon') COLOR_NAMES[clr_h2i('#8B5A00')]=_('Orange') COLOR_NAMES[clr_h2i('#8B5A2B')]=_('Tan') COLOR_NAMES[clr_h2i('#8B5f4D')]=_('Spicy mix') COLOR_NAMES[clr_h2i('#8B5F65')]=_('Light pink') COLOR_NAMES[clr_h2i('#8B636C')]=_('Pink') COLOR_NAMES[clr_h2i('#8B658B')]=_('Dark goldenrod') COLOR_NAMES[clr_h2i('#8B668B')]=_('Plum') COLOR_NAMES[clr_h2i('#8B6914')]=_('Goldenrod') COLOR_NAMES[clr_h2i('#8B6969')]=_('Rosy brown') COLOR_NAMES[clr_h2i('#8B72BE')]=_('Middle blue purple') COLOR_NAMES[clr_h2i('#8B7355')]=_('Burlywood') COLOR_NAMES[clr_h2i('#8B7500')]=_('Gold') COLOR_NAMES[clr_h2i('#8B7765')]=_('Peach puff') COLOR_NAMES[clr_h2i('#8B795E')]=_('Navajo white') COLOR_NAMES[clr_h2i('#8B7B8B')]=_('Thistle') COLOR_NAMES[clr_h2i('#8B7D6B')]=_('Bisque') COLOR_NAMES[clr_h2i('#8B7D7B')]=_('Misty rose') COLOR_NAMES[clr_h2i('#8B7E66')]=_('Wheat') COLOR_NAMES[clr_h2i('#8B814C')]=_('Light goldenrod') COLOR_NAMES[clr_h2i('#8B8378')]=_('Antique white') COLOR_NAMES[clr_h2i('#8B8386')]=_('Lavender blush') COLOR_NAMES[clr_h2i('#8B8589')]=_('Taupe gray') COLOR_NAMES[clr_h2i('#8B864E')]=_('Khaki') COLOR_NAMES[clr_h2i('#8B8682')]=_('Seashell') COLOR_NAMES[clr_h2i('#8B8878')]=_('Cornsilk') COLOR_NAMES[clr_h2i('#8B8970')]=_('Lemon chiffon') COLOR_NAMES[clr_h2i('#8B8989')]=_('Snow') COLOR_NAMES[clr_h2i('#8B8B00')]=_('Yellow') COLOR_NAMES[clr_h2i('#8B8B7A')]=_('Light yellow') COLOR_NAMES[clr_h2i('#8B8B83')]=_('Ivory') COLOR_NAMES[clr_h2i('#8BA8B7')]=_('Pewter blue') COLOR_NAMES[clr_h2i('#8C92AC')]=_('Cool grey, Gray-blue') COLOR_NAMES[clr_h2i('#8CBED6')]=_('Dark sky blue') COLOR_NAMES[clr_h2i('#8D4E85')]=_('Razzmic berry') COLOR_NAMES[clr_h2i('#8DA399')]=_('Morning blue') COLOR_NAMES[clr_h2i('#8DB600')]=_('Apple green') COLOR_NAMES[clr_h2i('#8DB6CD')]=_('Light sky blue') COLOR_NAMES[clr_h2i('#8DD9CC')]=_('Middle blue green') COLOR_NAMES[clr_h2i('#8DEEEE')]=_('Dark slate gray') COLOR_NAMES[clr_h2i('#8E3A59')]=_('Quinacridone magenta') COLOR_NAMES[clr_h2i('#8E4585')]=_('Plum') COLOR_NAMES[clr_h2i('#8EE53F')]=_('Kiwi') COLOR_NAMES[clr_h2i('#8EE5EE')]=_('Cadet blue') COLOR_NAMES[clr_h2i('#8F00FF')]=_('Violet') COLOR_NAMES[clr_h2i('#8F9779')]=_('Artichoke') COLOR_NAMES[clr_h2i('#8FBC8F')]=_('Dark sea green') COLOR_NAMES[clr_h2i('#8FD400')]=_('Sheen green') COLOR_NAMES[clr_h2i('#905D5D')]=_('Rose taupe') COLOR_NAMES[clr_h2i('#90EE90')]=_('Light green') COLOR_NAMES[clr_h2i('#912CEE')]=_('Purple') COLOR_NAMES[clr_h2i('#914E75')]=_('Sugar plum') COLOR_NAMES[clr_h2i('#915C83')]=_('Antique fuchsia') COLOR_NAMES[clr_h2i('#915F6D')]=_('Mauve taupe') COLOR_NAMES[clr_h2i('#918151')]=_('Dark tan') COLOR_NAMES[clr_h2i('#91A3B0')]=_('Cadet grey') COLOR_NAMES[clr_h2i('#92000A')]=_('Sangria') COLOR_NAMES[clr_h2i('#922724')]=_('Vivid auburn') COLOR_NAMES[clr_h2i('#92A1CF')]=_('Ceil') COLOR_NAMES[clr_h2i('#933D41')]=_('Smoky Topaz') COLOR_NAMES[clr_h2i('#9370DB')]=_('Medium purple') COLOR_NAMES[clr_h2i('#93C572')]=_('Pistachio') COLOR_NAMES[clr_h2i('#93CCEA')]=_('Light cornflower blue') COLOR_NAMES[clr_h2i('#9400D3')]=_('Dark violet') COLOR_NAMES[clr_h2i('#9457EB')]=_('Lavender indigo, Navy purple') COLOR_NAMES[clr_h2i('#954535')]=_('Chestnut') COLOR_NAMES[clr_h2i('#960018')]=_('Carmine, Heidelberg red') COLOR_NAMES[clr_h2i('#964B00')]=_('Brown (traditional)') COLOR_NAMES[clr_h2i('#965A3E')]=_('Coconut') COLOR_NAMES[clr_h2i('#966FD6')]=_('Dark pastel purple') COLOR_NAMES[clr_h2i('#967117')]=_('Sandy taupe') COLOR_NAMES[clr_h2i('#9678B6')]=_('Purple mountain majesty') COLOR_NAMES[clr_h2i('#967BB6')]=_('Lavender purple') COLOR_NAMES[clr_h2i('#96C8A2')]=_('Eton blue') COLOR_NAMES[clr_h2i('#96CDCD')]=_('Pale turquoise') COLOR_NAMES[clr_h2i('#96DED1')]=_('Pale robin egg blue') COLOR_NAMES[clr_h2i('#979AAA')]=_('Manatee') COLOR_NAMES[clr_h2i('#97FFFF')]=_('Dark slate gray') COLOR_NAMES[clr_h2i('#980036')]=_('Pink raspberry') COLOR_NAMES[clr_h2i('#986960')]=_('Dark chestnut') COLOR_NAMES[clr_h2i('#987456')]=_('Liver chestnut') COLOR_NAMES[clr_h2i('#987654')]=_('Pale brown') COLOR_NAMES[clr_h2i('#98777B')]=_('Bazaar') COLOR_NAMES[clr_h2i('#98817B')]=_('Cinereous') COLOR_NAMES[clr_h2i('#989898')]=_('Spanish gray') COLOR_NAMES[clr_h2i('#98F5FF')]=_('Cadet blue') COLOR_NAMES[clr_h2i('#98FB98')]=_('Pale green') COLOR_NAMES[clr_h2i('#98FF98')]=_('Mint green') COLOR_NAMES[clr_h2i('#990000')]=_('Crimson red') COLOR_NAMES[clr_h2i('#9932CC')]=_('Dark orchid') COLOR_NAMES[clr_h2i('#9955BB')]=_('Deep lilac') COLOR_NAMES[clr_h2i('#996515')]=_('Golden brown') COLOR_NAMES[clr_h2i('#996600')]=_('Gamboge orange (brown)') COLOR_NAMES[clr_h2i('#996666')]=_('Copper rose') COLOR_NAMES[clr_h2i('#9966CC')]=_('Amethyst') COLOR_NAMES[clr_h2i('#997A8D')]=_('Mountbatten pink') COLOR_NAMES[clr_h2i('#99E6B3')]=_('Teal deer') COLOR_NAMES[clr_h2i('#9A32CD')]=_('Dark orchid') COLOR_NAMES[clr_h2i('#9A4EAE')]=_('Purpureus') COLOR_NAMES[clr_h2i('#9AB973')]=_('Olivine') COLOR_NAMES[clr_h2i('#9AC0CD')]=_('Light blue') COLOR_NAMES[clr_h2i('#9ACD32')]=_('Yellow-green') COLOR_NAMES[clr_h2i('#9AFF9A')]=_('Pale green') COLOR_NAMES[clr_h2i('#9B111E')]=_('Ruby red') COLOR_NAMES[clr_h2i('#9B30FF')]=_('Purple') COLOR_NAMES[clr_h2i('#9B7653')]=_('Dirt') COLOR_NAMES[clr_h2i('#9B870C')]=_('Dark yellow') COLOR_NAMES[clr_h2i('#9BC4E2')]=_('Pale cerulean') COLOR_NAMES[clr_h2i('#9BCD9B')]=_('Dark sea green') COLOR_NAMES[clr_h2i('#9C2542')]=_('Big dip o\'ruby') COLOR_NAMES[clr_h2i('#9C51B6')]=_('Purple Plum') COLOR_NAMES[clr_h2i('#9C7C38')]=_('Metallic sunburst') COLOR_NAMES[clr_h2i('#9C9C9C')]=_('Grey') COLOR_NAMES[clr_h2i('#9D2933')]=_('Japanese carmine') COLOR_NAMES[clr_h2i('#9DC209')]=_('Limerick') COLOR_NAMES[clr_h2i('#9E1316')]=_('Spartan crimson') COLOR_NAMES[clr_h2i('#9E5E6F')]=_('Rose dust') COLOR_NAMES[clr_h2i('#9EFD38')]=_('French lime') COLOR_NAMES[clr_h2i('#9F00C5')]=_('Purple') COLOR_NAMES[clr_h2i('#9F00FF')]=_('Vivid violet') COLOR_NAMES[clr_h2i('#9F1D35')]=_('Vivid burgundy') COLOR_NAMES[clr_h2i('#9F2B68')]=_('Amaranth deep purple') COLOR_NAMES[clr_h2i('#9F4576')]=_('Magenta haze') COLOR_NAMES[clr_h2i('#9F79EE')]=_('Medium purple') COLOR_NAMES[clr_h2i('#9F8170')]=_('Beaver') COLOR_NAMES[clr_h2i('#9FA91F')]=_('Citron') COLOR_NAMES[clr_h2i('#9FB6CD')]=_('Slate gray') COLOR_NAMES[clr_h2i('#9FE2BF')]=_('Sea Foam green') COLOR_NAMES[clr_h2i('#A020F0')]=_('Purple, Veronica') COLOR_NAMES[clr_h2i('#A0522D')]=_('Sienna') COLOR_NAMES[clr_h2i('#A0785A')]=_('Chamoisee') COLOR_NAMES[clr_h2i('#A0D6B4')]=_('Turquoise green') COLOR_NAMES[clr_h2i('#A0E6FF')]=_('Winter wizard') COLOR_NAMES[clr_h2i('#A17A74')]=_('Burnished brown') COLOR_NAMES[clr_h2i('#A1CAF1')]=_('Baby blue eyes') COLOR_NAMES[clr_h2i('#A2006D')]=_('Flirt') COLOR_NAMES[clr_h2i('#A2A2D0')]=_('Blue bell') COLOR_NAMES[clr_h2i('#A2ADD0')]=_('Wild blue yonder') COLOR_NAMES[clr_h2i('#A2B5CD')]=_('Light steel blue') COLOR_NAMES[clr_h2i('#A2CD5A')]=_('Dark olive green') COLOR_NAMES[clr_h2i('#A3C1AD')]=_('Cambridge blue') COLOR_NAMES[clr_h2i('#A40000')]=_('Dark candy apple red') COLOR_NAMES[clr_h2i('#A45A52')]=_('Redwood') COLOR_NAMES[clr_h2i('#A4C639')]=_('Android green') COLOR_NAMES[clr_h2i('#A4D3EE')]=_('Light sky blue') COLOR_NAMES[clr_h2i('#A4DDED')]=_('Non-photo blue') COLOR_NAMES[clr_h2i('#A4F4F9')]=_('Waterspout') COLOR_NAMES[clr_h2i('#A50B5E')]=_('Jazzberry jam') COLOR_NAMES[clr_h2i('#A52A2A')]=_('Auburn, brown') COLOR_NAMES[clr_h2i('#A55353')]=_('Middle red purple') COLOR_NAMES[clr_h2i('#A57164')]=_('Blast-off bronze') COLOR_NAMES[clr_h2i('#A63A79')]=_('Maximum red purple') COLOR_NAMES[clr_h2i('#A67B5B')]=_('French beige, Tuscan tan') COLOR_NAMES[clr_h2i('#A6A6A6')]=_('Quick silver') COLOR_NAMES[clr_h2i('#A6D608')]=_('Vivid lime green') COLOR_NAMES[clr_h2i('#A6E7FF')]=_('Fresh air') COLOR_NAMES[clr_h2i('#A75502')]=_('Windsor tan') COLOR_NAMES[clr_h2i('#A76BCF')]=_('Rich lavender') COLOR_NAMES[clr_h2i('#A7F432')]=_('Green lizard') COLOR_NAMES[clr_h2i('#A7FC00')]=_('Spring bud') COLOR_NAMES[clr_h2i('#A81C07')]=_('Rufous') COLOR_NAMES[clr_h2i('#A83731')]=_('Sweet brown') COLOR_NAMES[clr_h2i('#A8516E')]=_('China rose') COLOR_NAMES[clr_h2i('#A8E4A0')]=_('Granny Smith apple') COLOR_NAMES[clr_h2i('#A9203E')]=_('Deep carmine') COLOR_NAMES[clr_h2i('#A95C68')]=_('Deep puce') COLOR_NAMES[clr_h2i('#A99A86')]=_('Grullo') COLOR_NAMES[clr_h2i('#A9A9A9')]=_('Dark medium gray') COLOR_NAMES[clr_h2i('#A9BA9D')]=_('Laurel green') COLOR_NAMES[clr_h2i('#AA00BB')]=_('Heliotrope magenta') COLOR_NAMES[clr_h2i('#AA381E')]=_('Chinese red') COLOR_NAMES[clr_h2i('#AA4069')]=_('Medium ruby') COLOR_NAMES[clr_h2i('#AA98A9')]=_('Heliotrope gray, Rose quartz') COLOR_NAMES[clr_h2i('#AAF0D1')]=_('Magic mint') COLOR_NAMES[clr_h2i('#AB274F')]=_('Amaranth purple') COLOR_NAMES[clr_h2i('#AB4B52')]=_('English red') COLOR_NAMES[clr_h2i('#AB4E52')]=_('Rose vale') COLOR_NAMES[clr_h2i('#AB82FF')]=_('Medium purple') COLOR_NAMES[clr_h2i('#AB92B3')]=_('Glossy grape') COLOR_NAMES[clr_h2i('#ABCDEF')]=_('Pale cornflower blue') COLOR_NAMES[clr_h2i('#AC1E44')]=_('French wine') COLOR_NAMES[clr_h2i('#ACACAC')]=_('Silver chalice') COLOR_NAMES[clr_h2i('#ACACE6')]=_('Maximum blue purple') COLOR_NAMES[clr_h2i('#ACBF60')]=_('Middle green yellow') COLOR_NAMES[clr_h2i('#ACE1AF')]=_('Celadon') COLOR_NAMES[clr_h2i('#ACE5EE')]=_('Blizzard blue, Blue Lagoon') COLOR_NAMES[clr_h2i('#AD4379')]=_('Mystic maroon') COLOR_NAMES[clr_h2i('#AD6F69')]=_('Copper penny') COLOR_NAMES[clr_h2i('#ADD8E6')]=_('Light blue') COLOR_NAMES[clr_h2i('#ADDFAD')]=_('Light moss green') COLOR_NAMES[clr_h2i('#ADFF2F')]=_('Green-yellow') COLOR_NAMES[clr_h2i('#AE0C00')]=_('Mordant red 19') COLOR_NAMES[clr_h2i('#AE2029')]=_('Upsdell red') COLOR_NAMES[clr_h2i('#AE98AA')]=_('Lilac luster') COLOR_NAMES[clr_h2i('#AEC6CF')]=_('Pastel blue') COLOR_NAMES[clr_h2i('#AF002A')]=_('Alabama crimson') COLOR_NAMES[clr_h2i('#AF4035')]=_('Pale carmine') COLOR_NAMES[clr_h2i('#AF6E4D')]=_('Brown sugar') COLOR_NAMES[clr_h2i('#AFEEEE')]=_('Pale blue') COLOR_NAMES[clr_h2i('#B03060')]=_('Rich maroon') COLOR_NAMES[clr_h2i('#B05C52')]=_('Giant\'s club') COLOR_NAMES[clr_h2i('#B06500')]=_('Ginger') COLOR_NAMES[clr_h2i('#B0BF1A')]=_('Acid green') COLOR_NAMES[clr_h2i('#B0C4DE')]=_('Light steel blue') COLOR_NAMES[clr_h2i('#B0E0E6')]=_('Powder blue') COLOR_NAMES[clr_h2i('#B0E2FF')]=_('Light sky blue') COLOR_NAMES[clr_h2i('#B19CD9')]=_('Light pastel purple') COLOR_NAMES[clr_h2i('#B22222')]=_('Firebrick') COLOR_NAMES[clr_h2i('#B23AEE')]=_('Dark orchid') COLOR_NAMES[clr_h2i('#B284BE')]=_('African violet') COLOR_NAMES[clr_h2i('#B2BEB5')]=_('Ash grey') COLOR_NAMES[clr_h2i('#B2DFEE')]=_('Light blue') COLOR_NAMES[clr_h2i('#B2EC5D')]=_('Inchworm') COLOR_NAMES[clr_h2i('#B2FFFF')]=_('Celeste, Italian sky blue') COLOR_NAMES[clr_h2i('#B31B1B')]=_('Carnelian, Cornell red') COLOR_NAMES[clr_h2i('#B3446C')]=_('Irresistible, Raspberry rose') COLOR_NAMES[clr_h2i('#B38B6D')]=_('Light taupe') COLOR_NAMES[clr_h2i('#B39EB5')]=_('Pastel purple') COLOR_NAMES[clr_h2i('#B3EE3A')]=_('Olive drab') COLOR_NAMES[clr_h2i('#B452CD')]=_('Medium orchid') COLOR_NAMES[clr_h2i('#B48395')]=_('English lavender') COLOR_NAMES[clr_h2i('#B4CDCD')]=_('Light cyan') COLOR_NAMES[clr_h2i('#B4EEB4')]=_('Dark sea green') COLOR_NAMES[clr_h2i('#B53389')]=_('Fandango') COLOR_NAMES[clr_h2i('#B5651D')]=_('Light brown') COLOR_NAMES[clr_h2i('#B57281')]=_('Turkish rose') COLOR_NAMES[clr_h2i('#B57EDC')]=_('Lavender (floral)') COLOR_NAMES[clr_h2i('#B5A642')]=_('Brass') COLOR_NAMES[clr_h2i('#B5B5B5')]=_('Grey') COLOR_NAMES[clr_h2i('#B666D2')]=_('Rich lilac') COLOR_NAMES[clr_h2i('#B7410E')]=_('Rust') COLOR_NAMES[clr_h2i('#B768A2')]=_('Pearly purple') COLOR_NAMES[clr_h2i('#B76E79')]=_('Rose gold') COLOR_NAMES[clr_h2i('#B784A7')]=_('Opera mauve') COLOR_NAMES[clr_h2i('#B78727')]=_('University of California Gold') COLOR_NAMES[clr_h2i('#B80CE3')]=_('Vivid mulberry') COLOR_NAMES[clr_h2i('#B86D29')]=_('Liver (dogs)') COLOR_NAMES[clr_h2i('#B87333')]=_('Copper') COLOR_NAMES[clr_h2i('#B8860B')]=_('Dark goldenrod') COLOR_NAMES[clr_h2i('#B94E48')]=_('Deep chestnut') COLOR_NAMES[clr_h2i('#B9D3EE')]=_('Slate gray') COLOR_NAMES[clr_h2i('#B9F2FF')]=_('Diamond') COLOR_NAMES[clr_h2i('#BA160C')]=_('International orange') COLOR_NAMES[clr_h2i('#BA55D3')]=_('Medium orchid') COLOR_NAMES[clr_h2i('#BA8759')]=_('Deer') COLOR_NAMES[clr_h2i('#BB3385')]=_('Medium red-violet') COLOR_NAMES[clr_h2i('#BB6528')]=_('Ruddy brown') COLOR_NAMES[clr_h2i('#BBB477')]=_('Misty moss') COLOR_NAMES[clr_h2i('#BBFFFF')]=_('Pale turquoise') COLOR_NAMES[clr_h2i('#BC8F8F')]=_('Rosy brown') COLOR_NAMES[clr_h2i('#BC987E')]=_('Pale taupe') COLOR_NAMES[clr_h2i('#BCB88A')]=_('Sage') COLOR_NAMES[clr_h2i('#BCD2EE')]=_('Light steel blue') COLOR_NAMES[clr_h2i('#BCD4E6')]=_('Pale aqua') COLOR_NAMES[clr_h2i('#BCEE68')]=_('Dark olive green') COLOR_NAMES[clr_h2i('#BD33A4')]=_('Byzantine') COLOR_NAMES[clr_h2i('#BDB76B')]=_('Dark khaki') COLOR_NAMES[clr_h2i('#BDDA57')]=_('June bud') COLOR_NAMES[clr_h2i('#BE0032')]=_('Crimson glory') COLOR_NAMES[clr_h2i('#BE4F62')]=_('Popstar') COLOR_NAMES[clr_h2i('#BEBEBE')]=_('Gray') COLOR_NAMES[clr_h2i('#BF00FF')]=_('Electric purple') COLOR_NAMES[clr_h2i('#BF3EFF')]=_('Dark orchid') COLOR_NAMES[clr_h2i('#BF4F51')]=_('Bittersweet shimmer') COLOR_NAMES[clr_h2i('#BF94E4')]=_('Bright lavender') COLOR_NAMES[clr_h2i('#BFAFB2')]=_('Black shadows') COLOR_NAMES[clr_h2i('#BFC1C2')]=_('Silver sand') COLOR_NAMES[clr_h2i('#BFEFFF')]=_('Light blue') COLOR_NAMES[clr_h2i('#BFFF00')]=_('Bitter lime') COLOR_NAMES[clr_h2i('#C0362C')]=_('International orange (Golden Gate Bridge)') COLOR_NAMES[clr_h2i('#C04000')]=_('Mahogany') COLOR_NAMES[clr_h2i('#C08081')]=_('Old rose') COLOR_NAMES[clr_h2i('#C09999')]=_('Tuscany') COLOR_NAMES[clr_h2i('#C0C0C0')]=_('Silver') COLOR_NAMES[clr_h2i('#C0FF3E')]=_('Olive drab') COLOR_NAMES[clr_h2i('#C154C1')]=_('Deep fuchsia') COLOR_NAMES[clr_h2i('#C19A6B')]=_('Camel, Desert, Wood brown') COLOR_NAMES[clr_h2i('#C1CDC1')]=_('Honeydew') COLOR_NAMES[clr_h2i('#C1CDCD')]=_('Azure') COLOR_NAMES[clr_h2i('#C1FFC1')]=_('Dark sea green') COLOR_NAMES[clr_h2i('#C21E56')]=_('Rose red') COLOR_NAMES[clr_h2i('#C23B22')]=_('Dark pastel red') COLOR_NAMES[clr_h2i('#C2B280')]=_('Sand') COLOR_NAMES[clr_h2i('#C30B4E')]=_('Pictorial carmine') COLOR_NAMES[clr_h2i('#C32148')]=_('Bright maroon') COLOR_NAMES[clr_h2i('#C39953')]=_('Aztec gold') COLOR_NAMES[clr_h2i('#C3B091')]=_('Khaki') COLOR_NAMES[clr_h2i('#C40233')]=_('Red') COLOR_NAMES[clr_h2i('#C41E3A')]=_('Cardinal') COLOR_NAMES[clr_h2i('#C46210')]=_('Alloy orange') COLOR_NAMES[clr_h2i('#C4AEAD')]=_('Silver pink') COLOR_NAMES[clr_h2i('#C4C3D0')]=_('Lavender gray') COLOR_NAMES[clr_h2i('#C4D8E2')]=_('Columbia blue') COLOR_NAMES[clr_h2i('#C53151')]=_('Dingy dungeon') COLOR_NAMES[clr_h2i('#C54B8C')]=_('Mulberry') COLOR_NAMES[clr_h2i('#C5B358')]=_('Vegas gold') COLOR_NAMES[clr_h2i('#C6E2FF')]=_('Slate gray') COLOR_NAMES[clr_h2i('#C71585')]=_('Medium violet-red') COLOR_NAMES[clr_h2i('#C72C48')]=_('French raspberry') COLOR_NAMES[clr_h2i('#C74375')]=_('Fuchsia rose') COLOR_NAMES[clr_h2i('#C80815')]=_('Venetian red') COLOR_NAMES[clr_h2i('#C84186')]=_('Smitten') COLOR_NAMES[clr_h2i('#C8A2C8')]=_('Lilac') COLOR_NAMES[clr_h2i('#C8AD7F')]=_('Light french beige') COLOR_NAMES[clr_h2i('#C90016')]=_('Harvard crimson') COLOR_NAMES[clr_h2i('#C95A49')]=_('Cedar Chest') COLOR_NAMES[clr_h2i('#C9A0DC')]=_('Wisteria') COLOR_NAMES[clr_h2i('#C9C0BB')]=_('Pale silver') COLOR_NAMES[clr_h2i('#C9DC87')]=_('Medium spring bud') COLOR_NAMES[clr_h2i('#C9FFE5')]=_('Aero blue') COLOR_NAMES[clr_h2i('#CA1F7B')]=_('Magenta (dye)') COLOR_NAMES[clr_h2i('#CA2C92')]=_('Royal fuchsia') COLOR_NAMES[clr_h2i('#CAE00D')]=_('Bitter lemon') COLOR_NAMES[clr_h2i('#CAE1FF')]=_('Light steel blue') COLOR_NAMES[clr_h2i('#CAFF70')]=_('Dark olive green') COLOR_NAMES[clr_h2i('#CB410B')]=_('Sinopia') COLOR_NAMES[clr_h2i('#CB4154')]=_('Brick red') COLOR_NAMES[clr_h2i('#CB6D51')]=_('Copper red') COLOR_NAMES[clr_h2i('#CB99C9')]=_('Pastel violet') COLOR_NAMES[clr_h2i('#CBA135')]=_('Satin sheen gold') COLOR_NAMES[clr_h2i('#CC0000')]=_('Boston university red') COLOR_NAMES[clr_h2i('#CC0033')]=_('Vivid crimson') COLOR_NAMES[clr_h2i('#CC00CC')]=_('Deep magenta') COLOR_NAMES[clr_h2i('#CC00FF')]=_('Vivid orchid') COLOR_NAMES[clr_h2i('#CC3333')]=_('Persian red') COLOR_NAMES[clr_h2i('#CC3336')]=_('Madder lake') COLOR_NAMES[clr_h2i('#CC338B')]=_('Magenta-pink') COLOR_NAMES[clr_h2i('#CC33CC')]=_('Steel pink') COLOR_NAMES[clr_h2i('#CC397B')]=_('Fuchsia purple') COLOR_NAMES[clr_h2i('#CC474B')]=_('English vermillion') COLOR_NAMES[clr_h2i('#CC4E5C')]=_('Dark terra cotta') COLOR_NAMES[clr_h2i('#CC5500')]=_('Burnt orange') COLOR_NAMES[clr_h2i('#CC6666')]=_('Fuzzy Wuzzy') COLOR_NAMES[clr_h2i('#CC7722')]=_('Ochre') COLOR_NAMES[clr_h2i('#CC8899')]=_('Puce') COLOR_NAMES[clr_h2i('#CC9900')]=_('Vivid amber') COLOR_NAMES[clr_h2i('#cc9966')]=_('Brown yellow') COLOR_NAMES[clr_h2i('#CC99CC')]=_('Light grayish magenta') COLOR_NAMES[clr_h2i('#CC99FF')]=_('Pale violet') COLOR_NAMES[clr_h2i('#CCA01D')]=_('Lemon curry') COLOR_NAMES[clr_h2i('#CCCCFF')]=_('Lavender blue, Periwinkle') COLOR_NAMES[clr_h2i('#CCFF00')]=_('Fluorescent yellow') COLOR_NAMES[clr_h2i('#CD0000')]=_('Red') COLOR_NAMES[clr_h2i('#CD00CD')]=_('Magenta') COLOR_NAMES[clr_h2i('#CD1076')]=_('Deep pink') COLOR_NAMES[clr_h2i('#CD2626')]=_('Firebrick') COLOR_NAMES[clr_h2i('#CD2990')]=_('Maroon') COLOR_NAMES[clr_h2i('#CD3278')]=_('Violet red') COLOR_NAMES[clr_h2i('#CD3333')]=_('Brown') COLOR_NAMES[clr_h2i('#CD3700')]=_('Orange red') COLOR_NAMES[clr_h2i('#CD4F39')]=_('Tomato') COLOR_NAMES[clr_h2i('#CD5555')]=_('Indian red') COLOR_NAMES[clr_h2i('#CD5700')]=_('Tenne (tawny)') COLOR_NAMES[clr_h2i('#CD5B45')]=_('Dark coral') COLOR_NAMES[clr_h2i('#CD5C5C')]=_('Indian red') COLOR_NAMES[clr_h2i('#CD607E')]=_('Cinnamon satin') COLOR_NAMES[clr_h2i('#CD6090')]=_('Hot pink') COLOR_NAMES[clr_h2i('#CD6600')]=_('Dark orange') COLOR_NAMES[clr_h2i('#CD661D')]=_('Chocolate') COLOR_NAMES[clr_h2i('#CD6839')]=_('Sienna') COLOR_NAMES[clr_h2i('#CD6889')]=_('Pale violet red') COLOR_NAMES[clr_h2i('#CD69C9')]=_('Orchid') COLOR_NAMES[clr_h2i('#CD7054')]=_('Salmon') COLOR_NAMES[clr_h2i('#CD7F32')]=_('Bronze') COLOR_NAMES[clr_h2i('#CD8162')]=_('Light salmon') COLOR_NAMES[clr_h2i('#CD8500')]=_('Orange') COLOR_NAMES[clr_h2i('#CD853F')]=_('Peru') COLOR_NAMES[clr_h2i('#CD8C95')]=_('Light pink') COLOR_NAMES[clr_h2i('#CD919E')]=_('Pink') COLOR_NAMES[clr_h2i('#CD950C')]=_('Dark goldenrod') COLOR_NAMES[clr_h2i('#CD9575')]=_('Antique brass') COLOR_NAMES[clr_h2i('#CD96CD')]=_('Plum') COLOR_NAMES[clr_h2i('#CD9B1D')]=_('Goldenrod') COLOR_NAMES[clr_h2i('#CD9B9B')]=_('Rosy brown') COLOR_NAMES[clr_h2i('#CDA4DE')]=_('Tropical violet') COLOR_NAMES[clr_h2i('#CDAA7D')]=_('Burlywood') COLOR_NAMES[clr_h2i('#CDAD00')]=_('Gold') COLOR_NAMES[clr_h2i('#CDAF95')]=_('Peach puff') COLOR_NAMES[clr_h2i('#CDB38B')]=_('Navajo white') COLOR_NAMES[clr_h2i('#CDB5CD')]=_('Thistle') COLOR_NAMES[clr_h2i('#CDB79E')]=_('Bisque') COLOR_NAMES[clr_h2i('#CDB7B5')]=_('Misty rose') COLOR_NAMES[clr_h2i('#CDBA96')]=_('Wheat') COLOR_NAMES[clr_h2i('#CDBE70')]=_('Light goldenrod') COLOR_NAMES[clr_h2i('#CDC0B0')]=_('Antique white') COLOR_NAMES[clr_h2i('#CDC1C5')]=_('Lavender blush') COLOR_NAMES[clr_h2i('#CDC5BF')]=_('Seashell') COLOR_NAMES[clr_h2i('#CDC673')]=_('Khaki') COLOR_NAMES[clr_h2i('#CDC8B1')]=_('Cornsilk') COLOR_NAMES[clr_h2i('#CDC9A5')]=_('Lemon chiffon') COLOR_NAMES[clr_h2i('#CDC9C9')]=_('Snow') COLOR_NAMES[clr_h2i('#CDCD00')]=_('Yellow') COLOR_NAMES[clr_h2i('#CDCDB4')]=_('Light yellow') COLOR_NAMES[clr_h2i('#CDCDC1')]=_('Ivory') COLOR_NAMES[clr_h2i('#CE2029')]=_('Fire engine red') COLOR_NAMES[clr_h2i('#CE4676')]=_('Ruber') COLOR_NAMES[clr_h2i('#CEC8EF')]=_('Soap') COLOR_NAMES[clr_h2i('#CEFF00')]=_('Volt') COLOR_NAMES[clr_h2i('#CF1020')]=_('Lava') COLOR_NAMES[clr_h2i('#CF3476')]=_('Telemagenta') COLOR_NAMES[clr_h2i('#CF6BA9')]=_('Super pink') COLOR_NAMES[clr_h2i('#CF71AF')]=_('Sky magenta') COLOR_NAMES[clr_h2i('#CFB53B')]=_('Old gold') COLOR_NAMES[clr_h2i('#CFCFC4')]=_('Pastel gray') COLOR_NAMES[clr_h2i('#CFCFCF')]=_('Gray') COLOR_NAMES[clr_h2i('#D02090')]=_('Violet red') COLOR_NAMES[clr_h2i('#D0417E')]=_('Magenta') COLOR_NAMES[clr_h2i('#D0F0C0')]=_('Tea green') COLOR_NAMES[clr_h2i('#D0FF14')]=_('Arctic lime') COLOR_NAMES[clr_h2i('#D10047')]=_('Spanish carmine') COLOR_NAMES[clr_h2i('#D10056')]=_('Rubine red') COLOR_NAMES[clr_h2i('#D15FEE')]=_('Medium orchid') COLOR_NAMES[clr_h2i('#D19FE8')]=_('Bright ube') COLOR_NAMES[clr_h2i('#D1BEA8')]=_('Dark vanilla') COLOR_NAMES[clr_h2i('#D1E231')]=_('Pear') COLOR_NAMES[clr_h2i('#D1EEEE')]=_('Light cyan') COLOR_NAMES[clr_h2i('#D2691E')]=_('Chocolate, Cocoa brown') COLOR_NAMES[clr_h2i('#D2B48C')]=_('Tan') COLOR_NAMES[clr_h2i('#D3003F')]=_('Utah Crimson') COLOR_NAMES[clr_h2i('#D3212D')]=_('Amaranth red') COLOR_NAMES[clr_h2i('#D39BCB')]=_('Light medium orchid') COLOR_NAMES[clr_h2i('#D3D3D3')]=_('Light gray') COLOR_NAMES[clr_h2i('#D40000')]=_('Rosso corsa') COLOR_NAMES[clr_h2i('#D470A2')]=_('Wild orchid') COLOR_NAMES[clr_h2i('#D473D4')]=_('Deep mauve') COLOR_NAMES[clr_h2i('#D4AF37')]=_('Gold (metallic)') COLOR_NAMES[clr_h2i('#D65282')]=_('Mystic') COLOR_NAMES[clr_h2i('#D68A59')]=_('Raw sienna') COLOR_NAMES[clr_h2i('#D6CADD')]=_('Languid lavender') COLOR_NAMES[clr_h2i('#D70040')]=_('Rich carmine') COLOR_NAMES[clr_h2i('#D70A53')]=_('Debian red') COLOR_NAMES[clr_h2i('#D71868')]=_('Dogwood rose') COLOR_NAMES[clr_h2i('#D73B3E')]=_('Jasper') COLOR_NAMES[clr_h2i('#D74894')]=_('Pink') COLOR_NAMES[clr_h2i('#D7837F')]=_('New York pink') COLOR_NAMES[clr_h2i('#D891EF')]=_('Bright lilac') COLOR_NAMES[clr_h2i('#D8B2D1')]=_('Pink lavender') COLOR_NAMES[clr_h2i('#D8BFD8')]=_('Thistle') COLOR_NAMES[clr_h2i('#D9004C')]=_('UA red') COLOR_NAMES[clr_h2i('#D92121')]=_('Maximum red') COLOR_NAMES[clr_h2i('#D9381E')]=_('Vermilion') COLOR_NAMES[clr_h2i('#D9603B')]=_('Medium vermilion') COLOR_NAMES[clr_h2i('#D982B5')]=_('Middle purple') COLOR_NAMES[clr_h2i('#D98695')]=_('Shimmering blush') COLOR_NAMES[clr_h2i('#D99058')]=_('Persian orange') COLOR_NAMES[clr_h2i('#D998A0')]=_('Parrot pink') COLOR_NAMES[clr_h2i('#D9E650')]=_('Maximum green yellow') COLOR_NAMES[clr_h2i('#DA1D81')]=_('Vivid cerise') COLOR_NAMES[clr_h2i('#DA2C43')]=_('Rusty red') COLOR_NAMES[clr_h2i('#DA3287')]=_('Deep cerise') COLOR_NAMES[clr_h2i('#DA614E')]=_('Jelly bean') COLOR_NAMES[clr_h2i('#DA70D6')]=_('Orchid') COLOR_NAMES[clr_h2i('#DA8A67')]=_('Pale copper') COLOR_NAMES[clr_h2i('#DA9100')]=_('Harvest gold') COLOR_NAMES[clr_h2i('#DAA520')]=_('Goldenrod') COLOR_NAMES[clr_h2i('#DB7093')]=_('Pale red-violet') COLOR_NAMES[clr_h2i('#DBD7D2')]=_('Timberwolf') COLOR_NAMES[clr_h2i('#DBE9F4')]=_('Azureish white') COLOR_NAMES[clr_h2i('#DC143C')]=_('Crimson') COLOR_NAMES[clr_h2i('#DCD0FF')]=_('Pale lavender') COLOR_NAMES[clr_h2i('#DCDCDC')]=_('Gainsboro') COLOR_NAMES[clr_h2i('#DDA0DD')]=_('Medium lavender magenta, Pale plum') COLOR_NAMES[clr_h2i('#DDADAF')]=_('Pale chestnut') COLOR_NAMES[clr_h2i('#DDE26A')]=_('Booger buster') COLOR_NAMES[clr_h2i('#DE3163')]=_('Cherry') COLOR_NAMES[clr_h2i('#DE5285')]=_('Fandango pink') COLOR_NAMES[clr_h2i('#DE5D83')]=_('Blush') COLOR_NAMES[clr_h2i('#DE6FA1')]=_('China pink, Liseran purple') COLOR_NAMES[clr_h2i('#DEA5A4')]=_('Pastel pink') COLOR_NAMES[clr_h2i('#DEAA88')]=_('Tumbleweed') COLOR_NAMES[clr_h2i('#DEB887')]=_('Burlywood') COLOR_NAMES[clr_h2i('#DF00FF')]=_('Phlox, Psychedelic purple') COLOR_NAMES[clr_h2i('#DF6124')]=_('Vivid red-tangelo') COLOR_NAMES[clr_h2i('#DF73FF')]=_('Heliotrope') COLOR_NAMES[clr_h2i('#DFFF00')]=_('Chartreuse (traditional)') COLOR_NAMES[clr_h2i('#E0115F')]=_('Ruby') COLOR_NAMES[clr_h2i('#E0218A')]=_('Barbie pink') COLOR_NAMES[clr_h2i('#E03C31')]=_('CG red') COLOR_NAMES[clr_h2i('#E066FF')]=_('Medium orchid') COLOR_NAMES[clr_h2i('#E08D3C')]=_('Tiger\'s eye') COLOR_NAMES[clr_h2i('#E0B0FF')]=_('Mauve') COLOR_NAMES[clr_h2i('#E0EEE0')]=_('Honeydew') COLOR_NAMES[clr_h2i('#E0EEEE')]=_('Azure') COLOR_NAMES[clr_h2i('#E0FFFF')]=_('Light cyan') COLOR_NAMES[clr_h2i('#E12C2C')]=_('Permanent geranium lake') COLOR_NAMES[clr_h2i('#E18E96')]=_('Ruddy pink') COLOR_NAMES[clr_h2i('#E1A95F')]=_('Earth yellow') COLOR_NAMES[clr_h2i('#E1AD21')]=_('Urobilin') COLOR_NAMES[clr_h2i('#E2062C')]=_('Medium candy apple red') COLOR_NAMES[clr_h2i('#E25098')]=_('Raspberry pink') COLOR_NAMES[clr_h2i('#E25822')]=_('Flame') COLOR_NAMES[clr_h2i('#E2725B')]=_('Terra cotta') COLOR_NAMES[clr_h2i('#E30022')]=_('Cadmium red') COLOR_NAMES[clr_h2i('#E30B5D')]=_('Raspberry') COLOR_NAMES[clr_h2i('#E3256B')]=_('Razzmatazz') COLOR_NAMES[clr_h2i('#E32636')]=_('Alizarin crimson, Rose madder') COLOR_NAMES[clr_h2i('#E34234')]=_('Cinnabar, Vermilion') COLOR_NAMES[clr_h2i('#E3A857')]=_('Indian yellow') COLOR_NAMES[clr_h2i('#E3AB57')]=_('Sunray') COLOR_NAMES[clr_h2i('#E3DAC9')]=_('Bone') COLOR_NAMES[clr_h2i('#E3F988')]=_('Mindaro') COLOR_NAMES[clr_h2i('#E3FF00')]=_('Lemon lime') COLOR_NAMES[clr_h2i('#E40078')]=_('Red-purple') COLOR_NAMES[clr_h2i('#E4007C')]=_('Mexican pink') COLOR_NAMES[clr_h2i('#E4717A')]=_('Tango pink') COLOR_NAMES[clr_h2i('#E48400')]=_('Fulvous') COLOR_NAMES[clr_h2i('#E49B0F')]=_('Gamboge') COLOR_NAMES[clr_h2i('#E4D00A')]=_('Citrine') COLOR_NAMES[clr_h2i('#E4D96F')]=_('Straw') COLOR_NAMES[clr_h2i('#E51A4C')]=_('Spanish crimson') COLOR_NAMES[clr_h2i('#E52B50')]=_('Amaranth') COLOR_NAMES[clr_h2i('#E56024')]=_('Vivid vermilion') COLOR_NAMES[clr_h2i('#E58E73')]=_('Middle red') COLOR_NAMES[clr_h2i('#E5AA70')]=_('Fawn') COLOR_NAMES[clr_h2i('#E5B73B')]=_('Meat brown') COLOR_NAMES[clr_h2i('#E5CCC9')]=_('Dust storm') COLOR_NAMES[clr_h2i('#E5E4E2')]=_('Platinum') COLOR_NAMES[clr_h2i('#E60026')]=_('Spanish red') COLOR_NAMES[clr_h2i('#E62020')]=_('Lust') COLOR_NAMES[clr_h2i('#E63E62')]=_('Paradise pink') COLOR_NAMES[clr_h2i('#E66771')]=_('Light carmine pink') COLOR_NAMES[clr_h2i('#E68FAC')]=_('Light Thulian pink') COLOR_NAMES[clr_h2i('#E6A8D7')]=_('Light orchid') COLOR_NAMES[clr_h2i('#E6BE8A')]=_('Pale gold') COLOR_NAMES[clr_h2i('#E6E200')]=_('Peridot') COLOR_NAMES[clr_h2i('#E6E6FA')]=_('Lavender mist') COLOR_NAMES[clr_h2i('#E6E8FA')]=_('Glitter') COLOR_NAMES[clr_h2i('#E75480')]=_('Dark pink') COLOR_NAMES[clr_h2i('#E79FC4')]=_('Kobi') COLOR_NAMES[clr_h2i('#E7ACCF')]=_('Pink pearl') COLOR_NAMES[clr_h2i('#E7FEFF')]=_('Bubbles') COLOR_NAMES[clr_h2i('#E8000D')]=_('KU crimson') COLOR_NAMES[clr_h2i('#E86100')]=_('Spanish orange') COLOR_NAMES[clr_h2i('#E88E5A')]=_('Big foot feet') COLOR_NAMES[clr_h2i('#E8CCD7')]=_('Queen pink') COLOR_NAMES[clr_h2i('#E8E8E8')]=_('Grey') COLOR_NAMES[clr_h2i('#E8F48C')]=_('Key Lime') COLOR_NAMES[clr_h2i('#E936A7')]=_('Frostbite') COLOR_NAMES[clr_h2i('#E9692C')]=_('Deep carrot orange') COLOR_NAMES[clr_h2i('#E97451')]=_('Burnt sienna, Light red ochre') COLOR_NAMES[clr_h2i('#E9967A')]=_('Dark salmon') COLOR_NAMES[clr_h2i('#E9D66B')]=_('Arylide yellow') COLOR_NAMES[clr_h2i('#E9FFDB')]=_('Nyanza') COLOR_NAMES[clr_h2i('#EA3C53')]=_('Desire') COLOR_NAMES[clr_h2i('#EAA221')]=_('Marigold') COLOR_NAMES[clr_h2i('#EAE0C8')]=_('Pearl') COLOR_NAMES[clr_h2i('#EB4C42')]=_('Carmine pink') COLOR_NAMES[clr_h2i('#EC3B83')]=_('Cerise pink') COLOR_NAMES[clr_h2i('#EC5800')]=_('Persimmon') COLOR_NAMES[clr_h2i('#ECB176')]=_('Middle yellow red') COLOR_NAMES[clr_h2i('#ECD540')]=_('Sandstorm') COLOR_NAMES[clr_h2i('#ECEBBD')]=_('Pale spring bud') COLOR_NAMES[clr_h2i('#ED1C24')]=_('Red') COLOR_NAMES[clr_h2i('#ED2939')]=_('Imperial red') COLOR_NAMES[clr_h2i('#ED872D')]=_('Cadmium orange') COLOR_NAMES[clr_h2i('#ED9121')]=_('Carrot orange') COLOR_NAMES[clr_h2i('#EDC9AF')]=_('Desert sand') COLOR_NAMES[clr_h2i('#EE0000')]=_('Red') COLOR_NAMES[clr_h2i('#EE00EE')]=_('Magenta') COLOR_NAMES[clr_h2i('#EE1289')]=_('Deep pink') COLOR_NAMES[clr_h2i('#EE204D')]=_('Red') COLOR_NAMES[clr_h2i('#EE2C2C')]=_('Firebrick') COLOR_NAMES[clr_h2i('#EE30A7')]=_('Maroon') COLOR_NAMES[clr_h2i('#EE3A8C')]=_('Violet red') COLOR_NAMES[clr_h2i('#EE3B3B')]=_('Brown') COLOR_NAMES[clr_h2i('#EE4000')]=_('Orange red') COLOR_NAMES[clr_h2i('#EE5C42')]=_('Tomato') COLOR_NAMES[clr_h2i('#EE6363')]=_('Indian red') COLOR_NAMES[clr_h2i('#EE6A50')]=_('Coral') COLOR_NAMES[clr_h2i('#EE6AA7')]=_('Hot pink') COLOR_NAMES[clr_h2i('#EE7600')]=_('Dark orange') COLOR_NAMES[clr_h2i('#EE7621')]=_('Chocolate') COLOR_NAMES[clr_h2i('#EE7942')]=_('Sienna') COLOR_NAMES[clr_h2i('#EE799F')]=_('Pale violet red') COLOR_NAMES[clr_h2i('#EE7AE9')]=_('Orchid') COLOR_NAMES[clr_h2i('#EE8262')]=_('Salmon') COLOR_NAMES[clr_h2i('#EE82EE')]=_('Lavender magenta, Violet') COLOR_NAMES[clr_h2i('#EE9572')]=_('Light salmon') COLOR_NAMES[clr_h2i('#EE9A00')]=_('Orange') COLOR_NAMES[clr_h2i('#EE9A49')]=_('Tan') COLOR_NAMES[clr_h2i('#EEA2AD')]=_('Light pink') COLOR_NAMES[clr_h2i('#EEA9B8')]=_('Pink') COLOR_NAMES[clr_h2i('#EEAD0E')]=_('Dark goldenrod') COLOR_NAMES[clr_h2i('#EEAEEE')]=_('Plum') COLOR_NAMES[clr_h2i('#EEB422')]=_('Goldenrod') COLOR_NAMES[clr_h2i('#EEB4B4')]=_('Rosy brown') COLOR_NAMES[clr_h2i('#EEC591')]=_('Burlywood') COLOR_NAMES[clr_h2i('#EEC900')]=_('Gold') COLOR_NAMES[clr_h2i('#EECBAD')]=_('Peach puff') COLOR_NAMES[clr_h2i('#EECFA1')]=_('Navajo white') COLOR_NAMES[clr_h2i('#EED202')]=_('Safety yellow') COLOR_NAMES[clr_h2i('#EED2EE')]=_('Thistle') COLOR_NAMES[clr_h2i('#EED5B7')]=_('Bisque') COLOR_NAMES[clr_h2i('#EED5D2')]=_('Misty rose') COLOR_NAMES[clr_h2i('#EED8AE')]=_('Wheat') COLOR_NAMES[clr_h2i('#EEDC82')]=_('Flax, Light goldenrod') COLOR_NAMES[clr_h2i('#EEDFCC')]=_('Antique white') COLOR_NAMES[clr_h2i('#EEE0E5')]=_('Lavender blush') COLOR_NAMES[clr_h2i('#EEE5DE')]=_('Seashell') COLOR_NAMES[clr_h2i('#EEE600')]=_('Titanium yellow') COLOR_NAMES[clr_h2i('#EEE685')]=_('Khaki') COLOR_NAMES[clr_h2i('#EEE8AA')]=_('Pale goldenrod') COLOR_NAMES[clr_h2i('#EEE8CD')]=_('Cornsilk') COLOR_NAMES[clr_h2i('#EEE9BF')]=_('Lemon chiffon') COLOR_NAMES[clr_h2i('#EEE9E9')]=_('Snow') COLOR_NAMES[clr_h2i('#EEEE00')]=_('Yellow') COLOR_NAMES[clr_h2i('#EEEED1')]=_('Light yellow') COLOR_NAMES[clr_h2i('#EEEEE0')]=_('vory') COLOR_NAMES[clr_h2i('#EF3038')]=_('Deep carmine pink') COLOR_NAMES[clr_h2i('#EF98AA')]=_('Mauvelous') COLOR_NAMES[clr_h2i('#EFBBCC')]=_('Cameo pink') COLOR_NAMES[clr_h2i('#EFCC00')]=_('Yellow') COLOR_NAMES[clr_h2i('#EFDECD')]=_('Almond') COLOR_NAMES[clr_h2i('#EFDFBB')]=_('Dutch white') COLOR_NAMES[clr_h2i('#F07427')]=_('Vivid tangelo') COLOR_NAMES[clr_h2i('#F08080')]=_('Light coral') COLOR_NAMES[clr_h2i('#F0DC82')]=_('Buff') COLOR_NAMES[clr_h2i('#F0E130')]=_('Dandelion') COLOR_NAMES[clr_h2i('#F0E68C')]=_('Light khaki') COLOR_NAMES[clr_h2i('#F0EAD6')]=_('Eggshell') COLOR_NAMES[clr_h2i('#F0F8FF')]=_('Alice blue') COLOR_NAMES[clr_h2i('#F0FFF0')]=_('Honeydew') COLOR_NAMES[clr_h2i('#F0FFFF')]=_('Azure mist') COLOR_NAMES[clr_h2i('#F19CBB')]=_('Amaranth pink') COLOR_NAMES[clr_h2i('#F1A7FE')]=_('Rich brilliant lavender') COLOR_NAMES[clr_h2i('#F1DDCF')]=_('Champagne pink') COLOR_NAMES[clr_h2i('#F2003C')]=_('Red') COLOR_NAMES[clr_h2i('#F28500')]=_('Tangerine') COLOR_NAMES[clr_h2i('#F2BA49')]=_('Maximum yellow red') COLOR_NAMES[clr_h2i('#F2BDCD')]=_('Orchid pink') COLOR_NAMES[clr_h2i('#F2F0E6')]=_('Alabaster') COLOR_NAMES[clr_h2i('#F2F27A')]=_('Sunny') COLOR_NAMES[clr_h2i('#F2F3F4')]=_('Anti-flash white') COLOR_NAMES[clr_h2i('#F37A48')]=_('Mandarin') COLOR_NAMES[clr_h2i('#F38FA9')]=_('Vanilla ice') COLOR_NAMES[clr_h2i('#F3E5AB')]=_('Medium champagne, Vanilla') COLOR_NAMES[clr_h2i('#F400A1')]=_('Fashion fuchsia, Hollywood cerise') COLOR_NAMES[clr_h2i('#F49AC2')]=_('Pastel magenta') COLOR_NAMES[clr_h2i('#F4A460')]=_('Sandy brown') COLOR_NAMES[clr_h2i('#F4BBFF')]=_('Brilliant lavender') COLOR_NAMES[clr_h2i('#F4C2C2')]=_('Baby pink, Tea rose') COLOR_NAMES[clr_h2i('#F4C430')]=_('Saffron') COLOR_NAMES[clr_h2i('#F4CA16')]=_('Jonquil') COLOR_NAMES[clr_h2i('#F4F0EC')]=_('Isabelline') COLOR_NAMES[clr_h2i('#F56991')]=_('Light crimson') COLOR_NAMES[clr_h2i('#F56FA1')]=_('Cyclamen') COLOR_NAMES[clr_h2i('#F58025')]=_('Princeton orange') COLOR_NAMES[clr_h2i('#F5C71A')]=_('Deep lemon') COLOR_NAMES[clr_h2i('#F5DEB3')]=_('Wheat') COLOR_NAMES[clr_h2i('#F5E050')]=_('Minion yellow') COLOR_NAMES[clr_h2i('#F5F5DC')]=_('Beige') COLOR_NAMES[clr_h2i('#F5F5F5')]=_('White smoke') COLOR_NAMES[clr_h2i('#F5FFFA')]=_('Mint cream') COLOR_NAMES[clr_h2i('#F64A8A')]=_('French rose') COLOR_NAMES[clr_h2i('#F6ADC6')]=_('Nadeshiko pink') COLOR_NAMES[clr_h2i('#F6EABE')]=_('Lemon meringue') COLOR_NAMES[clr_h2i('#F70D1A')]=_('Vivid red') COLOR_NAMES[clr_h2i('#F75394')]=_('Violet-red') COLOR_NAMES[clr_h2i('#F77F00')]=_('University of Tennessee orange') COLOR_NAMES[clr_h2i('#F77FBE')]=_('Persian pink') COLOR_NAMES[clr_h2i('#F78FA7')]=_('Pink sherbet') COLOR_NAMES[clr_h2i('#F7BFBE')]=_('Spanish pink') COLOR_NAMES[clr_h2i('#F7E7CE')]=_('Champagne') COLOR_NAMES[clr_h2i('#F7E98E')]=_('Flavescent') COLOR_NAMES[clr_h2i('#F88379')]=_('Coral pink, Tea rose') COLOR_NAMES[clr_h2i('#F8B878')]=_('Mellow apricot') COLOR_NAMES[clr_h2i('#F8D568')]=_('Orange-yellow') COLOR_NAMES[clr_h2i('#F8DE7E')]=_('Jasmine, Mellow yellow') COLOR_NAMES[clr_h2i('#F8F4FF')]=_('Magnolia') COLOR_NAMES[clr_h2i('#F8F8FF')]=_('Ghost white') COLOR_NAMES[clr_h2i('#F9429E')]=_('Rose bonbon') COLOR_NAMES[clr_h2i('#F94D00')]=_('Tangelo') COLOR_NAMES[clr_h2i('#F984E5')]=_('Pale magenta') COLOR_NAMES[clr_h2i('#F984EF')]=_('Light fuchsia pink') COLOR_NAMES[clr_h2i('#FA5B3D')]=_('Orange soda') COLOR_NAMES[clr_h2i('#FA6E79')]=_('Begonia') COLOR_NAMES[clr_h2i('#FA8072')]=_('Salmon') COLOR_NAMES[clr_h2i('#FAD6A5')]=_('Deep champagne, Sunset, Tuscan') COLOR_NAMES[clr_h2i('#FADA5E')]=_('Royal yellow') COLOR_NAMES[clr_h2i('#FADADD')]=_('Pale pink') COLOR_NAMES[clr_h2i('#FADFAD')]=_('Peach-yellow') COLOR_NAMES[clr_h2i('#FAE7B5')]=_('Banana mania') COLOR_NAMES[clr_h2i('#FAEBD7')]=_('Antique white, Moccasin') COLOR_NAMES[clr_h2i('#FAF0BE')]=_('Blond') COLOR_NAMES[clr_h2i('#FAF0E6')]=_('Linen') COLOR_NAMES[clr_h2i('#FAFA37')]=_('Maximum yellow') COLOR_NAMES[clr_h2i('#FAFAD2')]=_('Light goldenrod yellow') COLOR_NAMES[clr_h2i('#FB4D46')]=_('Tart orange') COLOR_NAMES[clr_h2i('#FB4F14')]=_('Orioles orange') COLOR_NAMES[clr_h2i('#FB607F')]=_('Brink pink') COLOR_NAMES[clr_h2i('#FB9902')]=_('Orange') COLOR_NAMES[clr_h2i('#FBA0E3')]=_('Lavender rose') COLOR_NAMES[clr_h2i('#FBAB60')]=_('Rajah') COLOR_NAMES[clr_h2i('#FBAED2')]=_('Lavender pink') COLOR_NAMES[clr_h2i('#FBCCE7')]=_('Classic rose') COLOR_NAMES[clr_h2i('#FBCEB1')]=_('Apricot') COLOR_NAMES[clr_h2i('#FBEC5D')]=_('Corn') COLOR_NAMES[clr_h2i('#FC0FC0')]=_('Shocking pink') COLOR_NAMES[clr_h2i('#FC5A8D')]=_('Strawberry') COLOR_NAMES[clr_h2i('#FC6C85')]=_('Ultra red, Wild watermelon') COLOR_NAMES[clr_h2i('#FC74FD')]=_('Pink Flamingo') COLOR_NAMES[clr_h2i('#FC89AC')]=_('Tickle me pink') COLOR_NAMES[clr_h2i('#FC8EAC')]=_('Flamingo pink') COLOR_NAMES[clr_h2i('#FCC200')]=_('Golden poppy') COLOR_NAMES[clr_h2i('#FCE883')]=_('Yellow') COLOR_NAMES[clr_h2i('#FCF75E')]=_('Icterine') COLOR_NAMES[clr_h2i('#FD0E35')]=_('Scarlet, Tractor red') COLOR_NAMES[clr_h2i('#FD3A4A')]=_('Red Salsa') COLOR_NAMES[clr_h2i('#FD3F92')]=_('French fuchsia') COLOR_NAMES[clr_h2i('#FD5240')]=_('Ogre odor') COLOR_NAMES[clr_h2i('#FD5800')]=_('Willpower orange') COLOR_NAMES[clr_h2i('#FD5E53')]=_('Sunset orange') COLOR_NAMES[clr_h2i('#FD6C9E')]=_('French pink') COLOR_NAMES[clr_h2i('#FD7C6E')]=_('Coral reef') COLOR_NAMES[clr_h2i('#FDBCB4')]=_('Melon') COLOR_NAMES[clr_h2i('#FDD5B1')]=_('Feldspar, Light apricot') COLOR_NAMES[clr_h2i('#FDD9B5')]=_('Sandy tan') COLOR_NAMES[clr_h2i('#FDDDE6')]=_('Piggy pink') COLOR_NAMES[clr_h2i('#FDEE00')]=_('Aureolin') COLOR_NAMES[clr_h2i('#FDF5E6')]=_('Old lace') COLOR_NAMES[clr_h2i('#FDFD96')]=_('Pastel yellow') COLOR_NAMES[clr_h2i('#FDFF00')]=_('Lemon glacier') COLOR_NAMES[clr_h2i('#FDFFF5')]=_('Milk') COLOR_NAMES[clr_h2i('#FE2712')]=_('Red') COLOR_NAMES[clr_h2i('#FE28A2')]=_('Persian rose') COLOR_NAMES[clr_h2i('#FE4164')]=_('Neon fuchsia') COLOR_NAMES[clr_h2i('#FE4EDA')]=_('Purple pizzazz') COLOR_NAMES[clr_h2i('#FE5A1D')]=_('Giants orange') COLOR_NAMES[clr_h2i('#FE6F5E')]=_('Bittersweet') COLOR_NAMES[clr_h2i('#FEDF00')]=_('Yellow') COLOR_NAMES[clr_h2i('#FEFE33')]=_('Yellow') COLOR_NAMES[clr_h2i('#FEFEFA')]=_('Baby powder') COLOR_NAMES[clr_h2i('#FF0000')]=_('Red') COLOR_NAMES[clr_h2i('#FF0028')]=_('Ruddy') COLOR_NAMES[clr_h2i('#FF0038')]=_('Carmine red') COLOR_NAMES[clr_h2i('#FF003F')]=_('Electric crimson') COLOR_NAMES[clr_h2i('#FF004F')]=_('Folly') COLOR_NAMES[clr_h2i('#FF006C')]=_('Vivid raspberry') COLOR_NAMES[clr_h2i('#FF007C')]=_('Winter sky') COLOR_NAMES[clr_h2i('#FF007F')]=_('Bright pink, Rose') COLOR_NAMES[clr_h2i('#FF0090')]=_('Magenta') COLOR_NAMES[clr_h2i('#FF00FF')]=_('Fuchsia, Magenta') COLOR_NAMES[clr_h2i('#FF033E')]=_('American rose') COLOR_NAMES[clr_h2i('#FF0800')]=_('Candy apple red') COLOR_NAMES[clr_h2i('#FF1493')]=_('Deep pink') COLOR_NAMES[clr_h2i('#FF1DCE')]=_('Hot magenta') COLOR_NAMES[clr_h2i('#FF2052')]=_('Awesome') COLOR_NAMES[clr_h2i('#FF2400')]=_('Scarlet') COLOR_NAMES[clr_h2i('#FF2800')]=_('Ferrari red') COLOR_NAMES[clr_h2i('#FF3030')]=_('Firebrick') COLOR_NAMES[clr_h2i('#FF33CC')]=_('Razzle dazzle rose') COLOR_NAMES[clr_h2i('#FF34B3')]=_('Maroon') COLOR_NAMES[clr_h2i('#FF355E')]=_('Radical red') COLOR_NAMES[clr_h2i('#FF3800')]=_('Coquelicot') COLOR_NAMES[clr_h2i('#FF3855')]=_('Sizzling red') COLOR_NAMES[clr_h2i('#FF3E96')]=_('Violet red') COLOR_NAMES[clr_h2i('#FF4040')]=_('Brown, Coral red') COLOR_NAMES[clr_h2i('#FF404C')]=_('Sunburnt cyclops') COLOR_NAMES[clr_h2i('#FF43A4')]=_('Wild strawberry') COLOR_NAMES[clr_h2i('#FF4466')]=_('Magic potion') COLOR_NAMES[clr_h2i('#FF4500')]=_('Orange-red') COLOR_NAMES[clr_h2i('#FF4681')]=_('Sasquatch socks') COLOR_NAMES[clr_h2i('#FF496C')]=_('Infra red') COLOR_NAMES[clr_h2i('#FF4F00')]=_('International orange (aerospace)') COLOR_NAMES[clr_h2i('#FF5349')]=_('Red-orange') COLOR_NAMES[clr_h2i('#FF5470')]=_('Fiery Rose') COLOR_NAMES[clr_h2i('#FF55A3')]=_('Brilliant rose') COLOR_NAMES[clr_h2i('#FF5800')]=_('Orange') COLOR_NAMES[clr_h2i('#FF5A36')]=_('Portland orange') COLOR_NAMES[clr_h2i('#FF5CCD')]=_('Light deep pink') COLOR_NAMES[clr_h2i('#FF5F00')]=_('Vivid orange') COLOR_NAMES[clr_h2i('#FF6347')]=_('Tomato') COLOR_NAMES[clr_h2i('#FF66CC')]=_('Rose pink') COLOR_NAMES[clr_h2i('#FF6700')]=_('Safety orange') COLOR_NAMES[clr_h2i('#FF6961')]=_('Pastel red') COLOR_NAMES[clr_h2i('#FF69B4')]=_('Hot pink') COLOR_NAMES[clr_h2i('#FF6A6A')]=_('Indian red') COLOR_NAMES[clr_h2i('#FF6D3A')]=_('Smashed pumpkin') COLOR_NAMES[clr_h2i('#FF6E4A')]=_('Outrageous orange') COLOR_NAMES[clr_h2i('#FF6EB4')]=_('Hot pink') COLOR_NAMES[clr_h2i('#FF6FFF')]=_('Ultra pink') COLOR_NAMES[clr_h2i('#FF7256')]=_('Coral') COLOR_NAMES[clr_h2i('#FF7518')]=_('Pumpkin') COLOR_NAMES[clr_h2i('#FF77FF')]=_('Fuchsia pink') COLOR_NAMES[clr_h2i('#FF7800')]=_('Safety orange') COLOR_NAMES[clr_h2i('#FF7A00')]=_('Heat wave') COLOR_NAMES[clr_h2i('#FF7E00')]=_('Amber') COLOR_NAMES[clr_h2i('#FF7F00')]=_('Dark orange') COLOR_NAMES[clr_h2i('#FF7F24')]=_('Chocolate') COLOR_NAMES[clr_h2i('#FF7F50')]=_('Coral') COLOR_NAMES[clr_h2i('#FF8243')]=_('Mango tango') COLOR_NAMES[clr_h2i('#FF8247')]=_('Sienna') COLOR_NAMES[clr_h2i('#FF82AB')]=_('Pale violet red') COLOR_NAMES[clr_h2i('#FF83FA')]=_('Orchid') COLOR_NAMES[clr_h2i('#FF85CF')]=_('Princess perfume') COLOR_NAMES[clr_h2i('#FF878D')]=_('Tulip') COLOR_NAMES[clr_h2i('#FF8C00')]=_('Dark orange') COLOR_NAMES[clr_h2i('#FF8C69')]=_('Salmon') COLOR_NAMES[clr_h2i('#FF91A4')]=_('Salmon pink') COLOR_NAMES[clr_h2i('#FF91AF')]=_('Baker-Miller pink, Schauss pink') COLOR_NAMES[clr_h2i('#FF9900')]=_('Vivid gamboge') COLOR_NAMES[clr_h2i('#FF9933')]=_('Deep saffron') COLOR_NAMES[clr_h2i('#FF9966')]=_('Atomic tangerine') COLOR_NAMES[clr_h2i('#FF9999')]=_('Light salmon pink') COLOR_NAMES[clr_h2i('#FF99CC')]=_('Pale magenta-pink') COLOR_NAMES[clr_h2i('#FF9F00')]=_('Orange peel') COLOR_NAMES[clr_h2i('#FFA000')]=_('Vivid orange peel') COLOR_NAMES[clr_h2i('#FFA07A')]=_('Light salmon') COLOR_NAMES[clr_h2i('#FFA089')]=_('Vivid tangerine') COLOR_NAMES[clr_h2i('#FFA343')]=_('Neon Carrot') COLOR_NAMES[clr_h2i('#FFA500')]=_('Orange') COLOR_NAMES[clr_h2i('#FFA54F')]=_('Tan') COLOR_NAMES[clr_h2i('#FFA6C9')]=_('Carnation pink') COLOR_NAMES[clr_h2i('#FFA700')]=_('Chrome yellow') COLOR_NAMES[clr_h2i('#FFA812')]=_('Dark tangerine') COLOR_NAMES[clr_h2i('#FFAA1D')]=_('Bright yellow') COLOR_NAMES[clr_h2i('#FFAE42')]=_('Yellow orange') COLOR_NAMES[clr_h2i('#FFAEB9')]=_('Light pink') COLOR_NAMES[clr_h2i('#FFB077')]=_('Very light tangelo') COLOR_NAMES[clr_h2i('#FFB300')]=_('UCLA Gold') COLOR_NAMES[clr_h2i('#FFB347')]=_('Pastel orange') COLOR_NAMES[clr_h2i('#FFB3DE')]=_('Light hot pink') COLOR_NAMES[clr_h2i('#FFB5C5')]=_('Pink') COLOR_NAMES[clr_h2i('#FFB6C1')]=_('Light pink') COLOR_NAMES[clr_h2i('#FFB7C5')]=_('Cherry blossom pink') COLOR_NAMES[clr_h2i('#FFB90F')]=_('Dark goldenrod') COLOR_NAMES[clr_h2i('#FFBA00')]=_('Selective yellow') COLOR_NAMES[clr_h2i('#FFBBFF')]=_('Plum') COLOR_NAMES[clr_h2i('#FFBCD9')]=_('Cotton candy') COLOR_NAMES[clr_h2i('#FFBD88')]=_('Macaroni and cheese') COLOR_NAMES[clr_h2i('#FFBF00')]=_('Amber, Fluorescent orange') COLOR_NAMES[clr_h2i('#FFC0CB')]=_('Pink') COLOR_NAMES[clr_h2i('#FFC125')]=_('Goldenrod') COLOR_NAMES[clr_h2i('#FFC1C1')]=_('Rosy brown') COLOR_NAMES[clr_h2i('#FFC1CC')]=_('Bubble gum') COLOR_NAMES[clr_h2i('#FFC40C')]=_('Mikado yellow') COLOR_NAMES[clr_h2i('#FFC87C')]=_('Topaz') COLOR_NAMES[clr_h2i('#FFCBA4')]=_('Deep peach') COLOR_NAMES[clr_h2i('#FFCC00')]=_('Tangerine yellow') COLOR_NAMES[clr_h2i('#FFCC33')]=_('Sunglow') COLOR_NAMES[clr_h2i('#FFCC99')]=_('Peach-orange') COLOR_NAMES[clr_h2i('#FFCFF1')]=_('Shampoo') COLOR_NAMES[clr_h2i('#FFD300')]=_('Cyber yellow') COLOR_NAMES[clr_h2i('#FFD39B')]=_('Burlywood') COLOR_NAMES[clr_h2i('#FFD700')]=_('Gold') COLOR_NAMES[clr_h2i('#FFD800')]=_('School bus yellow') COLOR_NAMES[clr_h2i('#FFDAB9')]=_('Peach puff') COLOR_NAMES[clr_h2i('#FFDAE9')]=_('Mimi pink') COLOR_NAMES[clr_h2i('#FFDB00')]=_('Sizzling sunrise') COLOR_NAMES[clr_h2i('#FFDB58')]=_('Mustard') COLOR_NAMES[clr_h2i('#FFDDCA')]=_('Unbleached silk') COLOR_NAMES[clr_h2i('#FFDDF4')]=_('Pink lace') COLOR_NAMES[clr_h2i('#FFDEAD')]=_('Navajo white') COLOR_NAMES[clr_h2i('#FFDF00')]=_('Golden yellow') COLOR_NAMES[clr_h2i('#FFDF46')]=_('Gargoyle gas') COLOR_NAMES[clr_h2i('#FFDFBF')]=_('Very pale orange') COLOR_NAMES[clr_h2i('#FFE135')]=_('Banana yellow') COLOR_NAMES[clr_h2i('#FFE1FF')]=_('Thistle') COLOR_NAMES[clr_h2i('#FFE302')]=_('Vivid yellow') COLOR_NAMES[clr_h2i('#FFE4B5')]=_('Moccasin') COLOR_NAMES[clr_h2i('#FFE4C4')]=_('Bisque') COLOR_NAMES[clr_h2i('#FFE4CD')]=_('Lumber') COLOR_NAMES[clr_h2i('#FFE4E1')]=_('Misty rose') COLOR_NAMES[clr_h2i('#FFE5B4')]=_('Peach') COLOR_NAMES[clr_h2i('#FFE7BA')]=_('Wheat') COLOR_NAMES[clr_h2i('#FFEB00')]=_('Middle yellow') COLOR_NAMES[clr_h2i('#FFEBCD')]=_('Blanched almond') COLOR_NAMES[clr_h2i('#FFEC8B')]=_('Light goldenrod') COLOR_NAMES[clr_h2i('#FFEF00')]=_('Canary yellow') COLOR_NAMES[clr_h2i('#FFEFD5')]=_('Papaya whip') COLOR_NAMES[clr_h2i('#FFEFDB')]=_('Antique white') COLOR_NAMES[clr_h2i('#FFF000')]=_('Yellow rose') COLOR_NAMES[clr_h2i('#FFF0F5')]=_('Lavender blush') COLOR_NAMES[clr_h2i('#FFF44F')]=_('Lemon yellow') COLOR_NAMES[clr_h2i('#FFF5EE')]=_('Seashell') COLOR_NAMES[clr_h2i('#FFF600')]=_('Cadmium yellow') COLOR_NAMES[clr_h2i('#FFF68F')]=_('Khaki') COLOR_NAMES[clr_h2i('#FFF700')]=_('Lemon, Yellow sunshine') COLOR_NAMES[clr_h2i('#FFF8DC')]=_('Cornsilk') COLOR_NAMES[clr_h2i('#FFF8E7')]=_('Cosmic latte') COLOR_NAMES[clr_h2i('#FFFACD')]=_('Lemon chiffon') COLOR_NAMES[clr_h2i('#FFFAF0')]=_('Floral white') COLOR_NAMES[clr_h2i('#FFFAFA')]=_('Snow') COLOR_NAMES[clr_h2i('#FFFDD0')]=_('Cream') COLOR_NAMES[clr_h2i('#FFFF00')]=_('Yellow') COLOR_NAMES[clr_h2i('#FFFF31')]=_('Daffodil') COLOR_NAMES[clr_h2i('#FFFF33')]=_('Electric yellow') COLOR_NAMES[clr_h2i('#FFFF66')]=_('Unmellow yellow') COLOR_NAMES[clr_h2i('#FFFF99')]=_('Canary') COLOR_NAMES[clr_h2i('#FFFFBF')]=_('Very pale yellow') COLOR_NAMES[clr_h2i('#FFFFE0')]=_('Light yellow') COLOR_NAMES[clr_h2i('#FFFFF0')]=_('Ivory') COLOR_NAMES[clr_h2i('#FFFFFF')]=_('White') COLOR_NAMES[clr_h2i('#EEDD82')]=_('Light goldenrod') COLOR_NAMES[clr_h2i('#AEEEEE')]=_('Pale turquoise')
vhanla/CudaText
app/py/cuda_palette/__init__.py
Python
mpl-2.0
138,324
[ "Amber", "BLAST" ]
62c8bcab63582c6f68e28eb6dd1f725246b6e3b410c1c55268114894d153781f
"""Testing for Gaussian process classification """ # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD 3 clause import warnings import numpy as np from scipy.optimize import approx_fprime import pytest from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C, WhiteKernel from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel from sklearn.exceptions import ConvergenceWarning from sklearn.utils._testing import assert_almost_equal, assert_array_equal def f(x): return np.sin(x) X = np.atleast_2d(np.linspace(0, 10, 30)).T X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T y = np.array(f(X).ravel() > 0, dtype=int) fX = f(X).ravel() y_mc = np.empty(y.shape, dtype=int) # multi-class y_mc[fX < -0.35] = 0 y_mc[(fX >= -0.35) & (fX < 0.35)] = 1 y_mc[fX > 0.35] = 2 fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed") kernels = [ RBF(length_scale=0.1), fixed_kernel, RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), ] non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel] @pytest.mark.parametrize("kernel", kernels) def test_predict_consistent(kernel): # Check binary predict decision has also predicted probability above 0.5. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5) def test_predict_consistent_structured(): # Check binary predict decision has also predicted probability above 0.5. X = ["A", "AB", "B"] y = np.array([True, False, True]) kernel = MiniSeqKernel(baseline_similarity_bounds="fixed") gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5) @pytest.mark.parametrize("kernel", non_fixed_kernels) def test_lml_improving(kernel): # Test that hyperparameter-tuning improves log-marginal likelihood. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood( kernel.theta ) @pytest.mark.parametrize("kernel", kernels) def test_lml_precomputed(kernel): # Test that lml of optimized kernel is stored correctly. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_almost_equal( gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(), 7 ) @pytest.mark.parametrize("kernel", kernels) def test_lml_without_cloning_kernel(kernel): # Test that clone_kernel=False has side-effects of kernel.theta. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64) gpc.log_marginal_likelihood(input_theta, clone_kernel=False) assert_almost_equal(gpc.kernel_.theta, input_theta, 7) @pytest.mark.parametrize("kernel", non_fixed_kernels) def test_converged_to_local_maximum(kernel): # Test that we are in local maximum after hyperparameter-optimization. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) lml, lml_gradient = gpc.log_marginal_likelihood(gpc.kernel_.theta, True) assert np.all( (np.abs(lml_gradient) < 1e-4) | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1]) ) @pytest.mark.parametrize("kernel", kernels) def test_lml_gradient(kernel): # Compare analytic and numeric gradient of log marginal likelihood. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True) lml_gradient_approx = approx_fprime( kernel.theta, lambda theta: gpc.log_marginal_likelihood(theta, False), 1e-10 ) assert_almost_equal(lml_gradient, lml_gradient_approx, 3) def test_random_starts(): # Test that an increasing number of random-starts of GP fitting only # increases the log marginal likelihood of the chosen theta. n_samples, n_features = 25, 2 rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) * 2 - 1 y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0 kernel = C(1.0, (1e-2, 1e2)) * RBF( length_scale=[1e-3] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features ) last_lml = -np.inf for n_restarts_optimizer in range(5): gp = GaussianProcessClassifier( kernel=kernel, n_restarts_optimizer=n_restarts_optimizer, random_state=0 ).fit(X, y) lml = gp.log_marginal_likelihood(gp.kernel_.theta) assert lml > last_lml - np.finfo(np.float32).eps last_lml = lml @pytest.mark.parametrize("kernel", non_fixed_kernels) def test_custom_optimizer(kernel): # Test that GPC can use externally defined optimizers. # Define a dummy optimizer that simply tests 10 random hyperparameters def optimizer(obj_func, initial_theta, bounds): rng = np.random.RandomState(0) theta_opt, func_min = initial_theta, obj_func( initial_theta, eval_gradient=False ) for _ in range(10): theta = np.atleast_1d( rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1])) ) f = obj_func(theta, eval_gradient=False) if f < func_min: theta_opt, func_min = theta, f return theta_opt, func_min gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer) gpc.fit(X, y_mc) # Checks that optimizer improved marginal likelihood assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood( kernel.theta ) @pytest.mark.parametrize("kernel", kernels) def test_multi_class(kernel): # Test GPC for multi-class classification problems. gpc = GaussianProcessClassifier(kernel=kernel) gpc.fit(X, y_mc) y_prob = gpc.predict_proba(X2) assert_almost_equal(y_prob.sum(1), 1) y_pred = gpc.predict(X2) assert_array_equal(np.argmax(y_prob, 1), y_pred) @pytest.mark.parametrize("kernel", kernels) def test_multi_class_n_jobs(kernel): # Test that multi-class GPC produces identical results with n_jobs>1. gpc = GaussianProcessClassifier(kernel=kernel) gpc.fit(X, y_mc) gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2) gpc_2.fit(X, y_mc) y_prob = gpc.predict_proba(X2) y_prob_2 = gpc_2.predict_proba(X2) assert_almost_equal(y_prob, y_prob_2) def test_warning_bounds(): kernel = RBF(length_scale_bounds=[1e-5, 1e-3]) gpc = GaussianProcessClassifier(kernel=kernel) warning_message = ( "The optimal value found for dimension 0 of parameter " "length_scale is close to the specified upper bound " "0.001. Increasing the bound and calling fit again may " "find a better value." ) with pytest.warns(ConvergenceWarning, match=warning_message): gpc.fit(X, y) kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF( length_scale_bounds=[1e3, 1e5] ) gpc_sum = GaussianProcessClassifier(kernel=kernel_sum) with pytest.warns(None) as record: with warnings.catch_warnings(): # scipy 1.3.0 uses tostring which is deprecated in numpy warnings.filterwarnings("ignore", "tostring", DeprecationWarning) gpc_sum.fit(X, y) assert len(record) == 2 assert ( record[0].message.args[0] == "The optimal value found for " "dimension 0 of parameter " "k1__noise_level is close to the " "specified upper bound 0.001. " "Increasing the bound and calling " "fit again may find a better value." ) assert ( record[1].message.args[0] == "The optimal value found for " "dimension 0 of parameter " "k2__length_scale is close to the " "specified lower bound 1000.0. " "Decreasing the bound and calling " "fit again may find a better value." ) X_tile = np.tile(X, 2) kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2]) gpc_dims = GaussianProcessClassifier(kernel=kernel_dims) with pytest.warns(None) as record: with warnings.catch_warnings(): # scipy 1.3.0 uses tostring which is deprecated in numpy warnings.filterwarnings("ignore", "tostring", DeprecationWarning) gpc_dims.fit(X_tile, y) assert len(record) == 2 assert ( record[0].message.args[0] == "The optimal value found for " "dimension 0 of parameter " "length_scale is close to the " "specified upper bound 100.0. " "Increasing the bound and calling " "fit again may find a better value." ) assert ( record[1].message.args[0] == "The optimal value found for " "dimension 1 of parameter " "length_scale is close to the " "specified upper bound 100.0. " "Increasing the bound and calling " "fit again may find a better value." )
sergeyf/scikit-learn
sklearn/gaussian_process/tests/test_gpc.py
Python
bsd-3-clause
9,224
[ "Gaussian" ]
231847642811263b642454dbc73d5776a056ac63fc8f224514315e084a1b1bc5
#!/usr/bin/env python """Module to keep track of paths and versions of other software used within the workflow at various intervals.""" import Bio import argparse import getpass import logging import os from subprocess import Popen, PIPE, check_call import sys __author__ = "Tim te Beek" __copyright__ = "Copyright 2011, Netherlands Bioinformatics Centre" __license__ = "MIT" SOFTWARE_DIR = os.path.dirname(os.path.abspath(__file__)) + '/' if not os.path.isdir(SOFTWARE_DIR): logging.error('Software directory is missing: %s', SOFTWARE_DIR) # Blast # ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST MAKEBLASTDB = SOFTWARE_DIR + 'makeblastdb' BLASTP = SOFTWARE_DIR + 'blastp' BLASTN = SOFTWARE_DIR + 'blastn' # Life Science Grid Portal # https://apps.grid.sara.nl/applications/makeblastdb/ LSGP_MAKEBLASTDB = SOFTWARE_DIR + 'makeblastdb/2.2.26' LSGP_BLASTN = SOFTWARE_DIR + 'blastn/2.2.26' LSGP_BLASTP = SOFTWARE_DIR + 'blastp/2.2.27' # OrthoMCL # http://www.orthomcl.org/common/downloads/software/v2.0/ ORTHOMCL_INSTALL_SCHEMA = SOFTWARE_DIR + 'orthomclInstallSchema' ORTHOMCL_ADJUST_FASTA = SOFTWARE_DIR + 'orthomclAdjustFasta' ORTHOMCL_FILTER_FASTA = SOFTWARE_DIR + 'orthomclFilterFasta' ORTHOMCL_BLAST_PARSER = SOFTWARE_DIR + 'orthomclBlastParser' ORTHOMCL_LOAD_BLAST = SOFTWARE_DIR + 'orthomclLoadBlast' ORTHOMCL_PAIRS = SOFTWARE_DIR + 'orthomclPairs' ORTHOMCL_DUMP_PAIRS_FILES = SOFTWARE_DIR + 'orthomclDumpPairsFiles' ORTHOMCL_MCL_TO_GROUPS = SOFTWARE_DIR + 'orthomclMclToGroups' # http://micans.org/mcl/ MCL = SOFTWARE_DIR + 'mcl' # Align & Trim # http://pc16141.mncn.csic.es/cgi-bin/translatorx_vLocal.pl TRANSLATORX = SOFTWARE_DIR + 'translatorx' # Concatemer tree # http://evolution.genetics.washington.edu/phylip.html PHYLIP = SOFTWARE_DIR + 'phylip' DNADIST = PHYLIP + ' ' + 'dnadist' NEIGHBOR = PHYLIP + ' ' + 'neighbor' # Recombination # http://www.maths.otago.ac.nz/~dbryant/software.html PHIPACK = SOFTWARE_DIR + 'Phi' # Calculation # http://abacus.gene.ucl.ac.uk/software/paml.html #PAML_DIR = SOFTWARE_DIR + 'paml4.7/' #CODEML = PAML_DIR + 'bin/codeml' CODEML = SOFTWARE_DIR + 'codeml' def _call_program(*command): """Execute command and return the standard output returned by the program. Standard error is caught and ignored.""" logging.debug(' '.join(command)) process = Popen(command, stdout=PIPE, stderr=PIPE) process.wait() return process.communicate()[0].strip() def _grep_version(path, pattern='version'): """Grep for the pattern `version` case insensitively in files specified on path and return the first line.""" stdout = _call_program('grep', '-ri', pattern, path) return stdout.split('\n')[0] def _parse_args(): ''' Parse required arguments. ''' parser = argparse.ArgumentParser() parser.add_argument('target', help='Target output file for version numbers', type=lambda path: logging.FileHandler(path, mode='w')) args = parser.parse_args() # Directly configure logging through args logging.basicConfig(level=logging.INFO, stream=sys.stdout) args.target.setFormatter(logging.Formatter()) logging.root.addHandler(args.target) # Return any other args return args def _check_package(pkg_name): command = ['which', pkg_name] logging.info('Executing: %s', ' '.join(command)) check_call(command, stdout=None) def main(): """Method intended to be run when __name-- == '__main__'.""" # BioPython logging.info('BioPython\t%s', Bio.__version__) # Blast _check_package(MAKEBLASTDB) _check_package(BLASTP) _check_package(BLASTN) # Life Science Grid Portal #logging.info('LSGP %s', LSGP_MAKEBLASTDB.replace('/', '\t')) #logging.info('LSGP %s', LSGP_BLASTP.replace('/', '\t')) #logging.info('LSGP %s', LSGP_BLASTN.replace('/', '\t')) # OrthoMCL & mcl _check_package(ORTHOMCL_INSTALL_SCHEMA) _check_package(ORTHOMCL_ADJUST_FASTA) _check_package(ORTHOMCL_FILTER_FASTA) _check_package(ORTHOMCL_BLAST_PARSER) _check_package(ORTHOMCL_LOAD_BLAST) _check_package(ORTHOMCL_PAIRS) _check_package(ORTHOMCL_DUMP_PAIRS_FILES) _check_package(ORTHOMCL_MCL_TO_GROUPS) _check_package(MCL) # TranslatorX calls muscle internally _check_package(TRANSLATORX) #logging.info('Muscle\t%s', _call_program('muscle', '-version')) # PHYLIP dnadist & neighbor _check_package(PHYLIP) _check_package(DNADIST) _check_package(NEIGHBOR) # PHIPACK _check_package(PHIPACK) # PAML codeml _check_package(CODEML) if __name__ == '__main__': # Parse arguments to setup logging; not in main for testing _parse_args() # Log software versions main()
ODoSE/odose.nl
versions.py
Python
mit
4,752
[ "BLAST", "Biopython" ]
31023b539177f1691df28ed7033201dbea30ac39e3fb546f9693f3a800b90b18
import time import pytest import numpy as np import multiprocessing import psi4 # Test below is fine on its own but erratic through pytest. Most likely # to succeed as first test collected, so here it lies. @pytest.mark.xfail(True, reason='threading treatment suspect', run=True) def test_threaded_blas(): threads = multiprocessing.cpu_count() threads = int(threads / 2) times = {} size = [200, 500, 2000, 5000] threads = [1, threads] for th in threads: psi4.set_num_threads(th) for sz in size: nruns = max(1, int(1.e10 / (sz ** 3))) a = psi4.core.Matrix(sz, sz) b = psi4.core.Matrix(sz, sz) c = psi4.core.Matrix(sz, sz) tp4 = time.time() for n in range(nruns): c.gemm(False, False, 1.0, a, b, 0.0) retp4 = (time.time() - tp4) / nruns tnp = time.time() for n in range(nruns): np.dot(a, b, out=np.asarray(c)) retnp = (time.time() - tnp) / nruns print("Time for threads %2d, size %5d: Psi4: %12.6f NumPy: %12.6f" % (th, sz, retp4, retnp)) if sz == 5000: times["p4-n{}".format(th)] = retp4 times["np-n{}".format(th)] = retnp assert psi4.get_num_threads() == th rat1 = times["np-n" + str(threads[-1])] / times["p4-n" + str(threads[-1])] rat2 = times["p4-n" + str(threads[0])] / times["p4-n" + str(threads[-1])] print(" NumPy@n%d : Psi4@n%d ratio (want ~1): %.2f" % (threads[-1], threads[-1], rat1)) print(" Psi4@n%d : Psi4@n%d ratio (want ~%d): %.2f" % (threads[0], threads[-1], threads[-1], rat2)) assert pytest.approx(rat1, 0.2) == 1.0 assert pytest.approx(rat2, 0.8) == threads[-1]
rmcgibbo/psi4public
tests/pytest/test_aaa_profiling.py
Python
lgpl-3.0
1,787
[ "Psi4" ]
3a8a43cd77ba4e0988a496967537aaeb971ad3a3115bee4b89e9f982cc5e8e5f
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Oliver J. Backhouse <olbackhouse@gmail.com> # George H. Booth <george.booth@kcl.ac.uk> # ''' Auxiliary second-order Green's function perturbation theory for unrestricted references ''' import time import numpy as np from pyscf import lib from pyscf.lib import logger from pyscf import __config__ from pyscf import ao2mo from pyscf.scf import _vhf from pyscf.agf2 import ragf2, _agf2, mpi_helper from pyscf.agf2 import aux_space as aux from pyscf.agf2.chempot import binsearch_chempot, minimize_chempot from pyscf.mp.ump2 import get_frozen_mask as _get_frozen_mask BLKMIN = getattr(__config__, 'agf2_blkmin', 1) def build_se_part(agf2, eri, gf_occ, gf_vir, os_factor=1.0, ss_factor=1.0): ''' Builds either the auxiliaries of the occupied self-energy, or virtual if :attr:`gf_occ` and :attr:`gf_vir` are swapped, for a single spin. Args: eri : _ChemistsERIs Electronic repulsion integrals gf_occ : tuple of GreensFunction Occupied Green's function for each spin gf_vir : tuple of GreensFunction Virtual Green's function for each spin Kwargs: os_factor : float Opposite-spin factor for spin-component-scaled (SCS) calculations. Default 1.0 ss_factor : float Same-spin factor for spin-component-scaled (SCS) calculations. Default 1.0 Returns: :class:`SelfEnergy` ''' cput0 = (logger.process_clock(), logger.perf_counter()) log = logger.Logger(agf2.stdout, agf2.verbose) assert type(gf_occ[0]) is aux.GreensFunction assert type(gf_occ[1]) is aux.GreensFunction assert type(gf_vir[0]) is aux.GreensFunction assert type(gf_vir[1]) is aux.GreensFunction nmo = eri.nmo noa, nob = gf_occ[0].naux, gf_occ[1].naux nva, nvb = gf_vir[0].naux, gf_vir[1].naux tol = agf2.weight_tol facs = dict(os_factor=os_factor, ss_factor=ss_factor) ci_a, ei_a = gf_occ[0].coupling, gf_occ[0].energy ci_b, ei_b = gf_occ[1].coupling, gf_occ[1].energy ca_a, ea_a = gf_vir[0].coupling, gf_vir[0].energy ca_b, ea_b = gf_vir[1].coupling, gf_vir[1].energy mem_incore = (nmo[0]*noa*(noa*nva+nob*nvb)) * 8/1e6 mem_now = lib.current_memory()[0] if (mem_incore+mem_now < agf2.max_memory) or agf2.incore_complete: qeri = _make_qmo_eris_incore(agf2, eri, (ci_a, ci_a, ca_a), (ci_b, ci_b, ca_b), spin=0) else: qeri = _make_qmo_eris_outcore(agf2, eri, (ci_a, ci_a, ca_a), (ci_b, ci_b, ca_b), spin=0) if isinstance(qeri[0], np.ndarray): vv, vev = _agf2.build_mats_uagf2_incore(qeri, (ei_a, ei_b), (ea_a, ea_b), **facs) else: vv, vev = _agf2.build_mats_uagf2_outcore(qeri, (ei_a, ei_b), (ea_a, ea_b), **facs) e, c = _agf2.cholesky_build(vv, vev) se_a = aux.SelfEnergy(e, c, chempot=gf_occ[0].chempot) se_a.remove_uncoupled(tol=tol) if not (agf2.frozen is None or agf2.frozen == 0): mask = get_frozen_mask(agf2) coupling = np.zeros((nmo[0], se_a.naux)) coupling[mask[0]] = se_a.coupling se_a = aux.SelfEnergy(se_a.energy, coupling, chempot=se_a.chempot) cput0 = log.timer('se part (alpha)', *cput0) mem_incore = (nmo[1]*nob*(nob*nvb+noa*nva)) * 8/1e6 mem_now = lib.current_memory()[0] if (mem_incore+mem_now < agf2.max_memory) or agf2.incore_complete: qeri = _make_qmo_eris_incore(agf2, eri, (ci_a, ci_a, ca_a), (ci_b, ci_b, ca_b), spin=1) else: qeri = _make_qmo_eris_outcore(agf2, eri, (ci_a, ci_a, ca_a), (ci_b, ci_b, ca_b), spin=1) if isinstance(qeri[0], np.ndarray): vv, vev = _agf2.build_mats_uagf2_incore(qeri, (ei_b, ei_a), (ea_b, ea_a), **facs) else: vv, vev = _agf2.build_mats_uagf2_outcore(qeri, (ei_b, ei_a), (ea_b, ea_a), **facs) e, c = _agf2.cholesky_build(vv, vev) se_b = aux.SelfEnergy(e, c, chempot=gf_occ[1].chempot) se_b.remove_uncoupled(tol=tol) if not (agf2.frozen is None or agf2.frozen == 0): mask = get_frozen_mask(agf2) coupling = np.zeros((nmo[1], se_b.naux)) coupling[mask[1]] = se_b.coupling se_b = aux.SelfEnergy(se_b.energy, coupling, chempot=se_b.chempot) cput0 = log.timer('se part (beta)', *cput0) return (se_a, se_b) def get_fock(agf2, eri, gf=None, rdm1=None): ''' Computes the physical space Fock matrix in MO basis. If :attr:`rdm1` is not supplied, it is built from :attr:`gf`, which defaults to the mean-field Green's function. Args: eri : _ChemistsERIs Electronic repulsion integrals Kwargs: gf : GreensFunction Auxiliaries of the Green's function rdm1 : 2D array Reduced density matrix Returns: ndarray of physical space Fock matrix ''' if rdm1 is None: rdm1 = agf2.make_rdm1(gf) vj_aa, vk_aa = agf2.get_jk(eri.eri_aa, rdm1=rdm1[0]) vj_bb, vk_bb = agf2.get_jk(eri.eri_bb, rdm1=rdm1[1]) vj_ab = agf2.get_jk(eri.eri_ab, rdm1=rdm1[1], with_k=False)[0] vj_ba = agf2.get_jk(eri.eri_ba, rdm1=rdm1[0], with_k=False)[0] fock_a = eri.h1e[0] + vj_aa + vj_ab - vk_aa fock_b = eri.h1e[1] + vj_bb + vj_ba - vk_bb fock = (fock_a, fock_b) return fock def fock_loop(agf2, eri, gf, se): ''' Self-consistent loop for the density matrix via the HF self- consistent field. Args: eri : _ChemistsERIs Electronic repulsion integrals gf : tuple of GreensFunction Auxiliaries of the Green's function for each spin se : tuple of SelfEnergy Auxiliaries of the self-energy for each spin Returns: :class:`SelfEnergy`, :class:`GreensFunction` and a boolean indicating whether convergence was successful. ''' assert type(gf[0]) is aux.GreensFunction assert type(gf[1]) is aux.GreensFunction assert type(se[0]) is aux.SelfEnergy assert type(se[1]) is aux.SelfEnergy cput0 = cput1 = (logger.process_clock(), logger.perf_counter()) log = logger.Logger(agf2.stdout, agf2.verbose) diis = lib.diis.DIIS(agf2) diis.space = agf2.fock_diis_space diis.min_space = agf2.fock_diis_min_space focka, fockb = agf2.get_fock(eri, gf) sea, seb = se gfa, gfb = gf nalph, nbeta = agf2.nocc nmoa, nmob = eri.nmo nauxa, nauxb = sea.naux, seb.naux nqmoa, nqmob = nauxa+nmoa, nauxb+nmob bufa, bufb = np.zeros((nqmoa, nqmoa)), np.zeros((nqmob, nqmob)) rdm1a_prev = 0 rdm1b_prev = 0 converged = False opts = dict(tol=agf2.conv_tol_nelec, maxiter=agf2.max_cycle_inner) for niter1 in range(1, agf2.max_cycle_outer+1): sea, opt = minimize_chempot(sea, focka, nalph, x0=sea.chempot, occupancy=1, **opts) seb, opt = minimize_chempot(seb, fockb, nbeta, x0=seb.chempot, occupancy=1, **opts) for niter2 in range(1, agf2.max_cycle_inner+1): wa, va = sea.eig(focka, chempot=0.0, out=bufa) wb, vb = seb.eig(fockb, chempot=0.0, out=bufb) sea.chempot, nerra = \ binsearch_chempot((wa, va), nmoa, nalph, occupancy=1) seb.chempot, nerrb = \ binsearch_chempot((wb, vb), nmob, nbeta, occupancy=1) nerr = max(nerra, nerrb) wa, va = sea.eig(focka, out=bufa) wb, vb = seb.eig(fockb, out=bufb) gfa = aux.GreensFunction(wa, va[:nmoa], chempot=sea.chempot) gfb = aux.GreensFunction(wb, vb[:nmob], chempot=seb.chempot) gf = (gfa, gfb) focka, fockb = agf2.get_fock(eri, gf) rdm1a, rdm1b = agf2.make_rdm1(gf) focka, fockb = diis.update(np.array((focka, fockb)), xerr=None) if niter2 > 1: derra = np.max(np.absolute(rdm1a - rdm1a_prev)) derrb = np.max(np.absolute(rdm1b - rdm1b_prev)) derr = max(derra, derrb) if derr < agf2.conv_tol_rdm1: break rdm1a_prev = rdm1a.copy() rdm1b_prev = rdm1b.copy() log.debug1('fock loop %d cycles = %d dN = %.3g |ddm| = %.3g', niter1, niter2, nerr, derr) cput1 = log.timer_debug1('fock loop %d'%niter1, *cput1) if derr < agf2.conv_tol_rdm1 and abs(nerr) < agf2.conv_tol_nelec: converged = True break se = (sea, seb) log.info('fock converged = %s' % converged) log.info(' alpha: chempot = %.9g dN = %.3g |ddm| = %.3g', sea.chempot, nerra, derra) log.info(' beta: chempot = %.9g dN = %.3g |ddm| = %.3g', seb.chempot, nerrb, derrb) log.timer('fock loop', *cput0) return gf, se, converged def energy_1body(agf2, eri, gf): ''' Calculates the one-body energy according to the UHF form. Args: eri : _ChemistsERIs Electronic repulsion integrals gf : tuple of GreensFunction Auxiliaries of the Green's function for each spin Returns: One-body energy ''' assert type(gf[0]) is aux.GreensFunction assert type(gf[1]) is aux.GreensFunction rdm1 = agf2.make_rdm1(gf) fock = agf2.get_fock(eri, gf) e1b_a = 0.5 * np.sum(rdm1[0] * (eri.h1e[0] + fock[0])) e1b_b = 0.5 * np.sum(rdm1[1] * (eri.h1e[1] + fock[1])) e1b = e1b_a + e1b_b e1b += agf2.energy_nuc() return e1b def energy_2body(agf2, gf, se): ''' Calculates the two-body energy using analytically integrated Galitskii-Migdal formula. The formula is symmetric and only one side needs to be calculated. Args: gf : tuple of GreensFunction Auxiliaries of the Green's function for each spin se : tuple of SelfEnergy Auxiliaries of the self-energy for each spin Returns: Two-body energy ''' e2b_a = ragf2.energy_2body(agf2, gf[0], se[0]) e2b_b = ragf2.energy_2body(agf2, gf[1], se[1]) e2b = (e2b_a + e2b_b) * 0.5 return e2b def energy_mp2(agf2, gf, se): ''' Calculates the two-bdoy energy using analytically integrated Galitskii-Migdal formula for an MP2 self-energy. Per the definition of one- and two-body partitioning in the Dyson equation, this reuslt is half of :func:`energy_2body`. Args: gf : tuple of GreensFunction Auxiliaries of the Green's function for each spin se : tuple of SelfEnergy Auxiliaries of the self-energy for each spin Returns: MP2 energy ''' emp2_a = ragf2.energy_mp2(agf2, gf[0], se[0]) emp2_b = ragf2.energy_mp2(agf2, gf[1], se[1]) emp2 = (emp2_a + emp2_b) * 0.5 return emp2 class UAGF2(ragf2.RAGF2): ''' Unrestricted AGF2 with canonical HF reference Attributes: verbose : int Print level. Default value equals to :class:`Mole.verbose` max_memory : float or int Allowed memory in MB. Default value equals to :class:`Mole.max_memory` incore_complete : bool Avoid all I/O. Default is False. conv_tol : float Convergence threshold for AGF2 energy. Default value is 1e-7 conv_tol_rdm1 : float Convergence threshold for first-order reduced density matrix. Default value is 1e-8. conv_tol_nelec : float Convergence threshold for the number of electrons. Default value is 1e-6. max_cycle : int Maximum number of AGF2 iterations. Default value is 50. max_cycle_outer : int Maximum number of outer Fock loop iterations. Default value is 20. max_cycle_inner : int Maximum number of inner Fock loop iterations. Default value is 50. weight_tol : float Threshold in spectral weight of auxiliaries to be considered zero. Default 1e-11. diis : bool or lib.diis.DIIS Whether to use DIIS, can also be a lib.diis.DIIS object. Default value is True. diis_space : int DIIS space size. Default value is 8. diis_min_space : int Minimum space of DIIS. Default value is 1. fock_diis_space : int DIIS space size for Fock loop iterations. Default value is 6. fock_diis_min_space : Minimum space of DIIS. Default value is 1. os_factor : float Opposite-spin factor for spin-component-scaled (SCS) calculations. Default 1.0 ss_factor : float Same-spin factor for spin-component-scaled (SCS) calculations. Default 1.0 damping : float Damping factor for the self-energy. Default value is 0.0 Saved results e_corr : float AGF2 correlation energy e_tot : float Total energy (HF + correlation) e_1b : float One-body part of :attr:`e_tot` e_2b : float Two-body part of :attr:`e_tot` e_init : float Initial correlation energy (truncated MP2) converged : bool Whether convergence was successful se : tuple of SelfEnergy Auxiliaries of the self-energy for each spin gf : tuple of GreensFunction Auxiliaries of the Green's function for each spin ''' energy_1body = energy_1body energy_2body = energy_2body fock_loop = fock_loop build_se_part = build_se_part def ao2mo(self, mo_coeff=None): ''' Get the electronic repulsion integrals in MO basis. ''' nmo = max(self.nmo) mem_incore = ((nmo*(nmo+1)//2)**2) * 8/1e6 mem_now = lib.current_memory()[0] if (self._scf._eri is not None and (mem_incore+mem_now < self.max_memory or self.incore_complete)): eri = _make_mo_eris_incore(self, mo_coeff) else: logger.warn(self, 'MO eris are outcore - this may be very ' 'slow for agf2. increasing max_memory or ' 'using density fitting is recommended.') eri = _make_mo_eris_outcore(self, mo_coeff) return eri def make_rdm1(self, gf=None): ''' Compute the one-body reduced density matrix in MO basis. Kwargs: gf : tuple of GreensFunction Auxiliaries of the Green's functions for each spin Returns: tuple of ndarray of density matrices ''' if gf is None: gf = self.gf if gf is None: gf = self.init_gf() rdm1_a = gf[0].make_rdm1(occupancy=1) rdm1_b = gf[1].make_rdm1(occupancy=1) return (rdm1_a, rdm1_b) def get_fock(self, eri=None, gf=None, rdm1=None): ''' Computes the physical space Fock matrix in MO basis. ''' if eri is None: eri = self.ao2mo() if gf is None: gf = self.gf return get_fock(self, eri, gf=gf, rdm1=rdm1) def energy_mp2(self, mo_energy=None, se=None): if mo_energy is None: mo_energy = self.mo_energy if se is None: se = self.build_se(gf=self.gf) self.e_init = energy_mp2(self, mo_energy, se) return self.e_init def init_gf(self, frozen=False): ''' Builds the Hartree-Fock Green's function. Returns: tuple of :class:`GreensFunction`, tuple of :class:`SelfEnergy` ''' nmoa, nmob = self.nmo nocca, noccb = self.nocc energy = self.mo_energy coupling = (np.eye(nmoa), np.eye(nmob)) focka = np.diag(energy[0]) fockb = np.diag(energy[1]) cpt_a = binsearch_chempot(focka, nmoa, nocca, occupancy=1)[0] cpt_b = binsearch_chempot(fockb, nmob, noccb, occupancy=1)[1] if frozen: mask = get_frozen_mask(self) energy = (energy[0][mask[0]], energy[1][mask[1]]) coupling = (coupling[0][:,mask[0]], coupling[1][:,mask[1]]) gf_a = aux.GreensFunction(energy[0], coupling[0], chempot=cpt_a) gf_b = aux.GreensFunction(energy[1], coupling[1], chempot=cpt_b) gf = (gf_a, gf_b) return gf def build_gf(self, eri=None, gf=None, se=None): ''' Builds the auxiliaries of the Green's functions by solving the Dyson equation for each spin. Kwargs: eri : _ChemistsERIs Electronic repulsion integrals gf : tuple of GreensFunction Auxiliaries of the Green's function for each spin se : tuple of SelfEnergy Auxiliaries of the self-energy for each spin Returns: tuple of :class:`GreensFunction` ''' if eri is None: eri = self.ao2mo() if gf is None: gf = self.gf if gf is None: gf = self.init_gf() if se is None: se = self.build_se(eri, gf) focka, fockb = self.get_fock(eri, gf) gf_a = se[0].get_greens_function(focka) gf_b = se[1].get_greens_function(fockb) return (gf_a, gf_b) def build_se(self, eri=None, gf=None, os_factor=None, ss_factor=None, se_prev=None): ''' Builds the auxiliaries of the self-energy. Args: eri : _ChemistsERIs Electronic repulsion integrals gf : tuple of GreensFunction Auxiliaries of the Green's function Kwargs: os_factor : float Opposite-spin factor for spin-component-scaled (SCS) calculations. Default 1.0 ss_factor : float Same-spin factor for spin-component-scaled (SCS) calculations. Default 1.0 se_prev : SelfEnergy Previous self-energy for damping. Default value is None Returns tuple of :class:`SelfEnergy` ''' if eri is None: eri = self.ao2mo() if gf is None: gf = self.gf if gf is None: gf = self.init_gf() if os_factor is None: os_factor = self.os_factor if ss_factor is None: ss_factor = self.ss_factor facs = dict(os_factor=os_factor, ss_factor=ss_factor) gf_occ = (gf[0].get_occupied(), gf[1].get_occupied()) gf_vir = (gf[0].get_virtual(), gf[1].get_virtual()) se_occ = self.build_se_part(eri, gf_occ, gf_vir, **facs) se_vir = self.build_se_part(eri, gf_vir, gf_occ, **facs) se_a = aux.combine(se_occ[0], se_vir[0]) se_b = aux.combine(se_occ[1], se_vir[1]) if se_prev is not None and self.damping != 0.0: se_a_prev, se_b_prev = se_prev se_a.coupling *= np.sqrt(1.0-self.damping) se_b.coupling *= np.sqrt(1.0-self.damping) se_a_prev.coupling *= np.sqrt(self.damping) se_b_prev.coupling *= np.sqrt(self.damping) se_a = aux.combine(se_a, se_a_prev) se_b = aux.combine(se_b, se_b_prev) se_a = se_a.compress(n=(None,0)) se_b = se_b.compress(n=(None,0)) return (se_a, se_b) def run_diis(self, se, diis=None): ''' Runs the direct inversion of the iterative subspace for the self-energy. Args: se : SelfEnergy Auxiliaries of the self-energy diis : lib.diis.DIIS DIIS object Returns: tuple of :class:`SelfEnergy` ''' if diis is None: return se se_occ_a, se_occ_b = (se[0].get_occupied(), se[1].get_occupied()) se_vir_a, se_vir_b = (se[0].get_virtual(), se[1].get_virtual()) vv_occ_a = np.dot(se_occ_a.coupling, se_occ_a.coupling.T) vv_occ_b = np.dot(se_occ_b.coupling, se_occ_b.coupling.T) vv_vir_a = np.dot(se_vir_a.coupling, se_vir_a.coupling.T) vv_vir_b = np.dot(se_vir_b.coupling, se_vir_b.coupling.T) vev_occ_a = np.dot(se_occ_a.coupling * se_occ_a.energy[None], se_occ_a.coupling.T) vev_occ_b = np.dot(se_occ_b.coupling * se_occ_b.energy[None], se_occ_b.coupling.T) vev_vir_a = np.dot(se_vir_a.coupling * se_vir_a.energy[None], se_vir_a.coupling.T) vev_vir_b = np.dot(se_vir_b.coupling * se_vir_b.energy[None], se_vir_b.coupling.T) dat = np.array([vv_occ_a, vv_vir_a, vev_occ_a, vev_vir_a, vv_occ_b, vv_vir_b, vev_occ_b, vev_vir_b]) dat = diis.update(dat) vv_occ_a, vv_vir_a, vev_occ_a, vev_vir_a, \ vv_occ_b, vv_vir_b, vev_occ_b, vev_vir_b = dat se_occ_a = aux.SelfEnergy(*_agf2.cholesky_build(vv_occ_a, vev_occ_a), chempot=se[0].chempot) se_vir_a = aux.SelfEnergy(*_agf2.cholesky_build(vv_vir_a, vev_vir_a), chempot=se[0].chempot) se_occ_b = aux.SelfEnergy(*_agf2.cholesky_build(vv_occ_b, vev_occ_b), chempot=se[1].chempot) se_vir_b = aux.SelfEnergy(*_agf2.cholesky_build(vv_vir_b, vev_vir_b), chempot=se[1].chempot) se = (aux.combine(se_occ_a, se_vir_a), aux.combine(se_occ_b, se_vir_b)) return se def density_fit(self, auxbasis=None, with_df=None): from pyscf.agf2 import dfuagf2 myagf2 = dfuagf2.DFUAGF2(self._scf) myagf2.__dict__.update(self.__dict__) if with_df is not None: myagf2.with_df = with_df if auxbasis is not None and myagf2.with_df.auxbasis != auxbasis: import copy myagf2.with_df = copy.copy(myagf2.with_df) myagf2.with_df.auxbasis = auxbasis return myagf2 def get_ip(self, gf, nroots=5): gf_occ = (gf[0].get_occupied(), gf[1].get_occupied()) spin = np.array([0,]*gf_occ[0].naux + [1,]*gf_occ[1].naux) e_ip = np.concatenate([gf_occ[0].energy, gf_occ[1].energy], axis=0) v_ip = np.concatenate([gf_occ[0].coupling, gf_occ[1].coupling], axis=1) mask = np.argsort(e_ip) spin = list(spin[mask][-nroots:])[::-1] e_ip = list(-e_ip[mask][-nroots:])[::-1] v_ip = list(v_ip[:,mask][:,-nroots:].T)[::-1] return e_ip, v_ip, spin def ipagf2(self, nroots=5): e_ip, v_ip, spin = self.get_ip(self.gf, nroots=nroots) for n, en, vn, sn in zip(range(nroots), e_ip, v_ip, spin): qpwt = np.linalg.norm(vn)**2 tag = ['alpha', 'beta'][sn] logger.note(self, 'IP energy level %d E = %.16g QP weight = %0.6g (%s)', n, en, qpwt, tag) if nroots == 1: return e_ip[0], v_ip[0] else: return e_ip, v_ip def get_ea(self, gf, nroots=5): gf_vir = (gf[0].get_virtual(), gf[1].get_virtual()) spin = np.array([0,]*gf_vir[0].naux + [1,]*gf_vir[1].naux) e_ea = np.concatenate([gf_vir[0].energy, gf_vir[1].energy], axis=0) v_ea = np.concatenate([gf_vir[0].coupling, gf_vir[1].coupling], axis=1) mask = np.argsort(e_ea) spin = list(spin[mask][:nroots]) e_ea = list(e_ea[mask][:nroots]) v_ea = list(v_ea[:,mask][:,:nroots].T) return e_ea, v_ea, spin def eaagf2(self, nroots=5): e_ea, v_ea, spin = self.get_ea(self.gf, nroots=nroots) for n, en, vn, sn in zip(range(nroots), e_ea, v_ea, spin): qpwt = np.linalg.norm(vn)**2 tag = ['alpha', 'beta'][sn] logger.note(self, 'EA energy level %d E = %.16g QP weight = %0.6g (%s)', n, en, qpwt, tag) if nroots == 1: return e_ea[0], v_ea[0] else: return e_ea, v_ea @property def nocc(self): if self._nocc is None: self._nocc = (np.sum(self.mo_occ[0] > 0), np.sum(self.mo_occ[1] > 0)) return self._nocc @nocc.setter def nocc(self, val): self._nocc = val @property def nmo(self): if self._nmo is None: self._nmo = (self.mo_occ[0].size, self.mo_occ[1].size) return self._nmo @nmo.setter def nmo(self, val): self._nmo = val @property def qmo_energy(self): return (self.gf[0].energy, self.gf[1].energy) @property def qmo_coeff(self): ''' Gives the couplings in AO basis ''' return (np.dot(self.mo_coeff[0], self.gf[0].coupling), np.dot(self.mo_coeff[1], self.gf[1].coupling)) @property def qmo_occ(self): coeff_a = self.gf[0].get_occupied().coupling coeff_b = self.gf[1].get_occupied().coupling occ_a = np.linalg.norm(coeff_a, axis=0) ** 2 occ_b = np.linalg.norm(coeff_b, axis=0) ** 2 vir_a = np.zeros_like(self.gf[0].get_virtual().energy) vir_b = np.zeros_like(self.gf[1].get_virtual().energy) qmo_occ_a = np.concatenate([occ_a, vir_a]) qmo_occ_b = np.concatenate([occ_b, vir_b]) return qmo_occ_a, qmo_occ_b def get_frozen_mask(agf2): with lib.temporary_env(agf2, _nocc=None, _nmo=None): return _get_frozen_mask(agf2) class _ChemistsERIs: ''' (pq|rs) MO integrals stored in s4 symmetry, we only need QMO integrals in low-symmetry tensors and s4 is highest supported by _vhf ''' def __init__(self, mol=None): self.mol = mol self.mo_coeff = None self.nocc = None self.nmo = None self.fock = None self.h1e = None self.eri = None self.e_hf = None def _common_init_(self, agf2, mo_coeff=None): if mo_coeff is None: mo_coeff = agf2.mo_coeff self.mo_coeff = mo_coeff dm = agf2._scf.make_rdm1(agf2.mo_coeff, agf2.mo_occ) h1e_ao = agf2._scf.get_hcore() vhf = agf2._scf.get_veff(agf2.mol, dm) fock_ao = agf2._scf.get_fock(vhf=vhf, dm=dm) self.h1e = (np.dot(np.dot(mo_coeff[0].conj().T, h1e_ao), mo_coeff[0]), np.dot(np.dot(mo_coeff[1].conj().T, h1e_ao), mo_coeff[1])) self.fock = (np.dot(np.dot(mo_coeff[0].conj().T, fock_ao[0]), mo_coeff[0]), np.dot(np.dot(mo_coeff[1].conj().T, fock_ao[1]), mo_coeff[1])) self.h1e = (mpi_helper.bcast(self.h1e[0]), mpi_helper.bcast(self.h1e[1])) self.fock = (mpi_helper.bcast(self.fock[0]), mpi_helper.bcast(self.fock[1])) self.e_hf = mpi_helper.bcast(agf2._scf.e_tot) self.nmo = agf2.nmo nocca, noccb = self.nocc = agf2.nocc self.mol = agf2.mol mo_e = (self.fock[0].diagonal(), self.fock[1].diagonal()) gap_a = abs(mo_e[0][:nocca,None] - mo_e[0][None,nocca:]).min() gap_b = abs(mo_e[1][:noccb,None] - mo_e[1][None,noccb:]).min() gap = min(gap_a, gap_b) if gap < 1e-5: logger.warn(agf2, 'HOMO-LUMO gap %s may be too small for AGF2', gap) return self def _make_mo_eris_incore(agf2, mo_coeff=None): ''' Returns _ChemistsERIs ''' cput0 = (logger.process_clock(), logger.perf_counter()) log = logger.Logger(agf2.stdout, agf2.verbose) eris = _ChemistsERIs() eris._common_init_(agf2, mo_coeff) moa, mob = eris.mo_coeff nmoa, nmob = eris.nmo eri_aa = ao2mo.incore.full(agf2._scf._eri, moa, verbose=log) eri_bb = ao2mo.incore.full(agf2._scf._eri, mob, verbose=log) eri_aa = ao2mo.addons.restore('s4', eri_aa, nmoa) eri_bb = ao2mo.addons.restore('s4', eri_bb, nmob) eri_ab = ao2mo.incore.general(agf2._scf._eri, (moa,moa,mob,mob), verbose=log) assert eri_ab.shape == (nmoa*(nmob+1)//2, nmob*(nmob+1)//2) eri_ba = np.transpose(eri_ab) eris.eri_aa = eri_aa eris.eri_ab = eri_ab eris.eri_ba = eri_ba eris.eri_bb = eri_bb eris.eri = ((eri_aa, eri_ab), (eri_ba, eri_bb)) log.timer('MO integral transformation', *cput0) return eris def _make_mo_eris_outcore(agf2, mo_coeff=None): ''' Returns _ChemistsERIs ''' log = logger.Logger(agf2.stdout, agf2.verbose) eris = _ChemistsERIs() eris._common_init_(agf2, mo_coeff) mol = agf2.mol moa = np.asarray(eris.mo_coeff[0], order='F') mob = np.asarray(eris.mo_coeff[1], order='F') nmoa, nmob = eris.nmo eris.feri = lib.H5TmpFile() ao2mo.outcore.full(mol, moa, eris.feri, dataname='mo/aa') ao2mo.outcore.full(mol, mob, eris.feri, dataname='mo/bb') ao2mo.outcore.general(mol, (moa,moa,mob,mob), eris.feri, dataname='mo/ab', verbose=log) ao2mo.outcore.general(mol, (mob,mob,moa,moa), eris.feri, dataname='mo/ba', verbose=log) eris.eri_aa = eris.feri['mo/aa'] eris.eri_ab = eris.feri['mo/ab'] eris.eri_ba = eris.feri['mo/ba'] eris.eri_bb = eris.feri['mo/bb'] eris.eri = ((eris.eri_aa, eris.eri_ab), (eris.eri_ba, eris.eri_bb)) return eris def _make_qmo_eris_incore(agf2, eri, coeffs_a, coeffs_b, spin=None): ''' Returns nested tuple of ndarray spin = None: ((aaaa, aabb), (bbaa, bbbb)) spin = 0: (aaaa, aabb) spin = 1: (bbbb, bbaa) ''' cput0 = (logger.process_clock(), logger.perf_counter()) log = logger.Logger(agf2.stdout, agf2.verbose) nmo = eri.nmo nmoa, nmob = nmo cxa = np.eye(nmoa) cxb = np.eye(nmob) if not (agf2.frozen is None or agf2.frozen == 0): mask = get_frozen_mask(agf2) cxa = cxa[:,mask[0]] cxb = cxb[:,mask[1]] # npaira, npairb = nmoa*(nmoa+1)//2, nmob*(nmob+1)//2 cia, cja, caa = coeffs_a cib, cjb, cab = coeffs_b nia, nja, naa = [x.shape[1] for x in coeffs_a] nib, njb, nab = [x.shape[1] for x in coeffs_b] if spin is None or spin == 0: c_aa = (cxa, cia, cja, caa) c_ab = (cxa, cia, cjb, cab) qeri_aa = ao2mo.incore.general(eri.eri_aa, c_aa, compact=False, verbose=log) qeri_ab = ao2mo.incore.general(eri.eri_ab, c_ab, compact=False, verbose=log) qeri_aa = qeri_aa.reshape(cxa.shape[1], nia, nja, naa) qeri_ab = qeri_ab.reshape(cxa.shape[1], nia, njb, nab) if spin is None or spin == 1: c_bb = (cxb, cib, cjb, cab) c_ba = (cxb, cib, cja, caa) qeri_bb = ao2mo.incore.general(eri.eri_bb, c_bb, compact=False, verbose=log) qeri_ba = ao2mo.incore.general(eri.eri_ba, c_ba, compact=False, verbose=log) qeri_bb = qeri_bb.reshape(cxb.shape[1], nib, njb, nab) qeri_ba = qeri_ba.reshape(cxb.shape[1], nib, nja, naa) if spin is None: qeri = ((qeri_aa, qeri_ab), (qeri_ba, qeri_bb)) elif spin == 0: qeri = (qeri_aa, qeri_ab) elif spin == 1: qeri = (qeri_bb, qeri_ba) log.timer('QMO integral transformation', *cput0) return qeri def _make_qmo_eris_outcore(agf2, eri, coeffs_a, coeffs_b, spin=None): ''' Returns nested tuple of H5 dataset spin = None: ((aaaa, aabb), (bbaa, bbbb)) spin = 0: (aaaa, aabb) spin = 1: (bbbb, bbaa) ''' cput0 = (logger.process_clock(), logger.perf_counter()) log = logger.Logger(agf2.stdout, agf2.verbose) nmo = eri.nmo nmoa, nmob = nmo mask = get_frozen_mask(agf2) frozena = np.sum(~mask[0]) frozenb = np.sum(~mask[1]) # npaira, npairb = nmoa*(nmoa+1)//2, nmob*(nmob+1)//2 cia, cja, caa = coeffs_a cib, cjb, cab = coeffs_b nia, nja, naa = [x.shape[1] for x in coeffs_a] nib, njb, nab = [x.shape[1] for x in coeffs_b] # possible to have incore MO, outcore QMO if getattr(eri, 'feri', None) is None: eri.feri = lib.H5TmpFile() else: for key in ['aa', 'ab', 'ba', 'bb']: if 'qmo/%s'%key in eri.feri: del eri.feri['qmo/%s'%key] if spin is None or spin == 0: eri.feri.create_dataset('qmo/aa', (nmoa-frozena, nia, nja, naa), 'f8') eri.feri.create_dataset('qmo/ab', (nmoa-frozena, nia, njb, nab), 'f8') blksize = _agf2.get_blksize(agf2.max_memory, (nmoa**3, nmoa*nja*naa), (nmoa*nmob**2, nmoa*njb*nab)) blksize = min(nmoa, max(BLKMIN, blksize)) log.debug1('blksize (uagf2._make_qmo_eris_outcore) = %d', blksize) tril2sq = lib.square_mat_in_trilu_indices(nmoa) q1 = 0 for p0, p1 in lib.prange(0, nmoa, blksize): if not np.any(mask[0][p0:p1]): # block is fully frozen continue inds = np.arange(p0, p1)[mask[0][p0:p1]] q0, q1 = q1, q1 + len(inds) idx = list(np.concatenate(tril2sq[inds])) # aa buf = eri.eri_aa[idx] # (blk, nmoa, npaira) buf = buf.reshape((q1-q0)*nmoa, -1) # (blk*nmoa, npaira) jasym_aa, nja_aa, cja_aa, sja_aa = ao2mo.incore._conc_mos(cja, caa) buf = ao2mo._ao2mo.nr_e2(buf, cja_aa, sja_aa, 's2kl', 's1') buf = buf.reshape(q1-q0, nmoa, nja, naa) buf = lib.einsum('xpja,pi->xija', buf, cia) eri.feri['qmo/aa'][q0:q1] = np.asarray(buf, order='C') # ab buf = eri.eri_ab[idx] # (blk, nmoa, npairb) buf = buf.reshape((q1-q0)*nmob, -1) # (blk*nmoa, npairb) jasym_ab, nja_ab, cja_ab, sja_ab = ao2mo.incore._conc_mos(cjb, cab) buf = ao2mo._ao2mo.nr_e2(buf, cja_ab, sja_ab, 's2kl', 's1') buf = buf.reshape(q1-q0, nmoa, njb, nab) buf = lib.einsum('xpja,pi->xija', buf, cia) eri.feri['qmo/ab'][q0:q1] = np.asarray(buf, order='C') if spin is None or spin == 1: eri.feri.create_dataset('qmo/ba', (nmob-frozenb, nib, nja, naa), 'f8') eri.feri.create_dataset('qmo/bb', (nmob-frozenb, nib, njb, nab), 'f8') max_memory = agf2.max_memory - lib.current_memory()[0] blksize = int((max_memory/8e-6) / max(nmob**3+nmob*njb*nab, nmob*nmoa**2*nja*naa)) blksize = min(nmob, max(BLKMIN, blksize)) log.debug1('blksize (uagf2._make_qmo_eris_outcore) = %d', blksize) tril2sq = lib.square_mat_in_trilu_indices(nmob) q1 = 0 for p0, p1 in lib.prange(0, nmob, blksize): if not np.any(mask[1][p0:p1]): # block is fully frozen continue inds = np.arange(p0, p1)[mask[1][p0:p1]] q0, q1 = q1, q1 + len(inds) idx = list(np.concatenate(tril2sq[inds])) # ba buf = eri.eri_ba[idx] # (blk, nmob, npaira) buf = buf.reshape((q1-q0)*nmob, -1) # (blk*nmob, npaira) jasym_ba, nja_ba, cja_ba, sja_ba = ao2mo.incore._conc_mos(cja, caa) buf = ao2mo._ao2mo.nr_e2(buf, cja_ba, sja_ba, 's2kl', 's1') buf = buf.reshape(q1-q0, nmob, nja, naa) buf = lib.einsum('xpja,pi->xija', buf, cib) eri.feri['qmo/ba'][q0:q1] = np.asarray(buf, order='C') # bb buf = eri.eri_bb[idx] # (blk, nmob, npairb) buf = buf.reshape((q1-q0)*nmob, -1) # (blk*nmob, npairb) jasym_bb, nja_bb, cja_bb, sja_bb = ao2mo.incore._conc_mos(cjb, cab) buf = ao2mo._ao2mo.nr_e2(buf, cja_bb, sja_bb, 's2kl', 's1') buf = buf.reshape(q1-q0, nmob, njb, nab) buf = lib.einsum('xpja,pi->xija', buf, cib) eri.feri['qmo/bb'][q0:q1] = np.asarray(buf, order='C') if spin is None: qeri = ((eri.feri['qmo/aa'], eri.feri['qmo/ab']), (eri.feri['qmo/ba'], eri.feri['qmo/bb'])) elif spin == 0: qeri = (eri.feri['qmo/aa'], eri.feri['qmo/ab']) elif spin == 1: qeri = (eri.feri['qmo/bb'], eri.feri['qmo/ba']) log.timer('QMO integral transformation', *cput0) return qeri if __name__ == '__main__': from pyscf import gto, scf, mp mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='cc-pvdz', charge=-1, spin=1, verbose=3) uhf = scf.UHF(mol) uhf.conv_tol = 1e-11 uhf.run() uagf2 = UAGF2(uhf, frozen=0) uagf2.run() uagf2.ipagf2(nroots=5) uagf2.eaagf2(nroots=5) uagf2 = uagf2.density_fit() uagf2.run()
sunqm/pyscf
pyscf/agf2/uagf2.py
Python
apache-2.0
36,471
[ "PySCF" ]
8433b056b4c6d80f9abb62f38640bb3f3dff4210eeedf388e589a7aece8ecb99
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals # Version: 0.15+dev """ HEADS UP! ========= You are looking at a heavily modified version of the original [Versioneer](https://github.com/warner/python-versioneer), customized to fit the development of [OctoPrint](https://github.com/foosel/OctoPrint). This version adds * support for inclusion of the current branch in the available pieces to use for version generation * a lookup file to use, to modify version generation based on the currently checked out branch including support for virtual tags pointing to specific commit hashes * a new style pep440-dev: TAG.devDISTANCE[.dirty]+gHEX * a new style pep440-tag: TAG[.postDISTANCE.dev0+gHEX] Please note that this fork is only under maintenance as far as required by the OctoPrint project. If you are looking for an actively maintained version of Versioneer, please go to the original project. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation First, decide on values for the following configuration variables: * `VCS`: the version control system you use. Currently accepts "git". * `style`: the style of version string to be produced. See "Styles" below for details. Defaults to "pep440", which looks like `TAG[+DISTANCE.gSHORTHASH[.dirty]]`. * `versionfile_source`: A project-relative pathname into which the generated version strings should be written. This is usually a `_version.py` next to your project's main `__init__.py` file, so it can be imported at runtime. If your project uses `src/myproject/__init__.py`, this should be `src/myproject/_version.py`. This file should be checked in to your VCS as usual: the copy created below by `setup.py setup_versioneer` will include code that parses expanded VCS keywords in generated tarballs. The 'build' and 'sdist' commands will replace it with a copy that has just the calculated version string. This must be set even if your project does not have any modules (and will therefore never import `_version.py`), since "setup.py sdist" -based trees still need somewhere to record the pre-calculated version strings. Anywhere in the source tree should do. If there is a `__init__.py` next to your `_version.py`, the `setup.py setup_versioneer` command (described below) will append some `__version__`-setting assignments, if they aren't already present. * `versionfile_build`: Like `versionfile_source`, but relative to the build directory instead of the source directory. These will differ when your setup.py uses 'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`, then you will probably have `versionfile_build='myproject/_version.py'` and `versionfile_source='src/myproject/_version.py'`. If this is set to None, then `setup.py build` will not attempt to rewrite any `_version.py` in the built tree. If your project does not have any libraries (e.g. if it only builds a script), then you should use `versionfile_build = None` and override `distutils.command.build_scripts` to explicitly insert a copy of `versioneer.get_version()` into your generated script. * `tag_prefix`: a string, like 'PROJECTNAME-', which appears at the start of all VCS tags. If your tags look like 'myproject-1.2.0', then you should use tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this should be an empty string, using either `tag_prefix=` or `tag_prefix=''`. * `parentdir_prefix`: a optional string, frequently the same as tag_prefix, which appears at the start of all unpacked tarball filenames. If your tarball unpacks into 'myproject-1.2.0', this should be 'myproject-'. To disable this feature, just omit the field from your `setup.cfg`. This tool provides one script, named `versioneer`. That script has one mode, "install", which writes a copy of `versioneer.py` into the current directory and runs `versioneer.py setup` to finish the installation. To versioneer-enable your project: * 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and populating it with the configuration values you decided earlier (note that the option names are not case-sensitive): ```` [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- ```` * 2: Run `versioneer install`. This will do the following: * copy `versioneer.py` into the top of your source tree * create `_version.py` in the right place (`versionfile_source`) * modify your `__init__.py` (if one exists next to `_version.py`) to define `__version__` (by calling a function from `_version.py`) * modify your `MANIFEST.in` to include both `versioneer.py` and the generated `_version.py` in sdist tarballs `versioneer install` will complain about any problems it finds with your `setup.py` or `setup.cfg`. Run it multiple times until you have fixed all the problems. * 3: add a `import versioneer` to your setup.py, and add the following arguments to the setup() call: version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), * 4: commit these changes to your VCS. To make sure you won't forget, `versioneer install` will mark everything it touched for addition using `git add`. Don't forget to add `setup.py` and `setup.cfg` too. ## Post-Installation Usage Once established, all uses of your tree from a VCS checkout should get the current version string. All generated tarballs should include an embedded version string (so users who unpack them will not need a VCS tool installed). If you distribute your project through PyPI, then the release process should boil down to two steps: * 1: git tag 1.0 * 2: python setup.py register sdist upload If you distribute it through github (i.e. users use github to generate tarballs with `git archive`), the process is: * 1: git tag 1.0 * 2: git push; git push --tags Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at least one tag in its history. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". If the underlying VCS supports it and that information is available, this will also be included: * `['branch']`: A string with the VCS branch name the version was built on. Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. Augmenting that with the `branch` information if it is available will give additional hints during bug reporting what kind of setup a user was running. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See details.md in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ### Upgrading to 0.15 Starting with this version, Versioneer is configured with a `[versioneer]` section in your `setup.cfg` file. Earlier versions required the `setup.py` to set attributes on the `versioneer` module immediately after import. The new version will refuse to run (raising an exception during import) until you have provided the necessary `setup.cfg` section. In addition, the Versioneer package provides an executable named `versioneer`, and the installation process is driven by running `versioneer install`. In 0.14 and earlier, the executable was named `versioneer-installer` and was run without an argument. ### Upgrading to 0.14 0.14 changes the format of the version string. 0.13 and earlier used hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a plus-separated "local version" section strings, with dot-separated components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old format, but should be ok with the new one. ### Upgrading from 0.11 to 0.12 Nothing special. ### Upgrading from 0.10 to 0.11 You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running `setup.py setup_versioneer`. This will enable the use of additional version-control systems (SVN, etc) in the future. ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ try: import configparser except ImportError: import ConfigParser as configparser import errno import io import json import os import re import subprocess import sys class VersioneerConfig: pass def get_root(): # we require that all commands are run from the project root, i.e. the # directory that contains setup.py, setup.cfg, and versioneer.py . root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") # TODO: find a py2 compatible solution for the configparser deprecation issues parser = configparser.SafeConfigParser() with io.open(setup_cfg, 'rt', encoding="utf-8") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") cfg.lookupfile = get(parser, "lookupfile") return cfg class NotThisMethod(Exception): pass # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) return None return stdout LONG_VERSION_PY['git'] = '''# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.15+dev (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import io import os import re import subprocess import sys import logging def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.lookupfile = "%(LOOKUP_FILE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) return None return stdout def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. """ dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%%s', but '%%s' doesn't start with " "prefix '%%s'" %% (root, dirname, parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = io.open(versionfile_abs, "rt", encoding="utf-8") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set(r.strip() for r in refnames.strip("()").split(",")) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set(r[len(TAG):] for r in refs if r.startswith(TAG)) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set(r for r in refs if re.search(r'\d', r)) if verbose: print("discarding '%%s', no digits" %% ",".join(refs-tags)) branches = [r for r in refs if not r.startswith(TAG) and r != "HEAD" and not r.startswith("refs/")] if verbose: print("likely branches: %%s" %% ",".join(sorted(branches))) branch = None if branches: branch = branches[0] if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) result = {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None} if branch is not None: result["branch"] = branch return result # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %%s" %% root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # figure out our branch abbrev_ref_out = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) if abbrev_ref_out is not None and abbrev_ref_out != "HEAD": pieces["branch"] = abbrev_ref_out.strip() # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces @register_vcs_handler("git", "parse_lookup_file") def git_parse_lookup_file(path): """Parse a versioneer lookup file. This file allows definition of branch specific data like virtual tags or custom styles to use for version rendering. """ if not os.path.exists(path): return [] import re lookup = [] with io.open(path, "rt", encoding="utf-8") as f: for line in f: if '#' in line: line = line[:line.index("#")] line = line.strip() if not line: continue try: split_line = list(map(lambda x: x.strip(), line.split())) if not len(split_line): continue matcher = re.compile(split_line[0]) if len(split_line) == 1: entry = [matcher, None, None, None] elif len(split_line) == 2: render = split_line[1] entry = [matcher, render, None, None] elif len(split_line) == 3: tag, ref_commit = split_line[1:] entry = [matcher, None, tag, ref_commit] elif len(split_line) == 4: tag, ref_commit, render = split_line[1:] entry = [matcher, render, tag, ref_commit] else: continue lookup.append(entry) except Exception: logging.getLogger(__name__).exception("Versioneer problem") break return lookup @register_vcs_handler("git", "pieces_from_lookup") def git_pieces_from_lookup(lookup, root, verbose, run_command=run_command): """Extract version information based on provided lookup data.""" GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] stdout = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) if stdout is None: raise NotThisMethod("git rev-parse --abbrev-ref HEAD failed") current_branch = stdout.strip() if current_branch == "HEAD": raise NotThisMethod("not on a branch") for matcher, render, tag, ref_commit in lookup: if matcher.match(current_branch): if tag is None or ref_commit is None: raise NotThisMethod("tag or ref_commit is unset for " "this branch") stdout = run_command(GITS, ["rev-list", "%%s..HEAD" %% ref_commit, "--count"], cwd=root) if stdout is None: raise NotThisMethod("git rev-list %%s..HEAD " "--count failed" %% ref_commit) try: num_commits = int(stdout.strip()) except ValueError: raise NotThisMethod("git rev-list %%s..HEAD --count didn't " "return a valid number" %% ref_commit) stdout = run_command(GITS, ["rev-parse", "--short", "HEAD"], cwd=root) if stdout is None: raise NotThisMethod("git describe rev-parse " "--short HEAD failed") short_hash = stdout.strip() stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: raise NotThisMethod("git describe --tags --dirty " "--always failed") dirty = stdout.strip().endswith("-dirty") stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if stdout is None: raise NotThisMethod("git rev-parse HEAD failed") full = stdout.strip() return { "long": full, "short": short_hash, "dirty": dirty, "branch": current_branch, "closest-tag": tag, "distance": num_commits, "error": None, "render": render } raise NotThisMethod("no matching lookup definition found") def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_tag(pieces): """TAG[[.postDISTANCE].dev0+gHEX] -- Just the tag if not dirty, else more info Useful for projects that want commit based tracking on some branches but have the master branch only report tags, to allow for commits that do not modify actual code (e.g. to .github/* or docs). Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_dev(pieces): """TAG[.devDISTANCE]+gHEX[.dirty] . Exceptions: 1: no tags. 0.devDISTANCE+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".dev%%d" %% pieces["distance"] rendered += plus_or_dot(pieces) else: # exception #1 rendered = "0.dev%%d" %% pieces["distance"] rendered += "+" rendered += "g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if "render" in pieces and pieces["render"] is not None: style = pieces["render"] if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "pep440-dev": rendered = render_pep440_dev(pieces) elif style == "pep440-tag": rendered = render_pep440_tag(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) result = {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} if "branch" in pieces and pieces["branch"] is not None: result["branch"] = pieces["branch"] return result def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree"} lookupfile = cfg.lookupfile if cfg.lookupfile is not None \ else ".versioneer-lookup" lookuppath = os.path.join(root, lookupfile) if os.path.exists(lookuppath): try: lookup_data = git_parse_lookup_file(lookuppath) pieces = git_pieces_from_lookup(lookup_data, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = io.open(versionfile_abs, 'rt', encoding="utf-8") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set(r.strip() for r in refnames.strip("()").split(",")) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set(r[len(TAG):] for r in refs if r.startswith(TAG)) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set(r for r in refs if re.search(r'\d', r)) if verbose: print("discarding '%s', no digits" % ",".join(refs-tags)) branches = [r for r in refs if not r.startswith(TAG) and r != "HEAD" and not r.startswith("refs/")] if verbose: print("likely branches: %s" % ",".join(sorted(branches))) branch = None if branches: branch = branches[0] if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) result = {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None} if branch is not None: result["branch"] = branch return result # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # figure out our branch abbrev_ref_out = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) if abbrev_ref_out is not None and abbrev_ref_out != "HEAD": pieces["branch"] = abbrev_ref_out.strip() # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces @register_vcs_handler("git", "parse_lookup_file") def git_parse_lookup_file(path): """Parse a versioneer lookup file. This file allows definition of branch specific data like virtual tags or custom styles to use for version rendering. """ if not os.path.exists(path): return [] import re lookup = [] with io.open(path, 'r') as f: for line in f: if '#' in line: line = line[:line.index("#")] line = line.strip() if not line: continue try: split_line = list(map(lambda x: x.strip(), line.split())) if not len(split_line): continue matcher = re.compile(split_line[0]) if len(split_line) == 1: entry = [matcher, None, None, None] elif len(split_line) == 2: render = split_line[1] entry = [matcher, render, None, None] elif len(split_line) == 3: tag, ref_commit = split_line[1:] entry = [matcher, None, tag, ref_commit] elif len(split_line) == 4: tag, ref_commit, render = split_line[1:] entry = [matcher, render, tag, ref_commit] else: continue lookup.append(entry) except Exception: break return lookup @register_vcs_handler("git", "pieces_from_lookup") def git_pieces_from_lookup(lookup, root, verbose, run_command=run_command): """Extract version information based on provided lookup data.""" GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] stdout = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) if stdout is None: raise NotThisMethod("git rev-parse --abbrev-ref HEAD failed") current_branch = stdout.strip() if current_branch == "HEAD": raise NotThisMethod("not on a branch") for matcher, render, tag, ref_commit in lookup: if matcher.match(current_branch): if tag is None or ref_commit is None: raise NotThisMethod("tag or ref_commit is unset for " "this branch") stdout = run_command(GITS, ["rev-list", "%s..HEAD" % ref_commit, "--count"], cwd=root) if stdout is None: raise NotThisMethod("git rev-list %s..HEAD " "--count failed" % ref_commit) try: num_commits = int(stdout.strip()) except ValueError: raise NotThisMethod("git rev-list %s..HEAD --count didn't " "return a valid number" % ref_commit) stdout = run_command(GITS, ["rev-parse", "--short", "HEAD"], cwd=root) if stdout is None: raise NotThisMethod("git describe rev-parse " "--short HEAD failed") short_hash = stdout.strip() stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: raise NotThisMethod("git describe --tags --dirty " "--always failed") dirty = stdout.strip().endswith("-dirty") stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if stdout is None: raise NotThisMethod("git rev-parse HEAD failed") full = stdout.strip() return { "long": full, "short": short_hash, "dirty": dirty, "branch": current_branch, "closest-tag": tag, "distance": num_commits, "error": None, "render": render } raise NotThisMethod("no matching lookup definition found") def do_vcs_install(manifest_in, versionfile_source, ipy): GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = io.open('.gitattributes', 'rt', encoding="utf-8") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = io.open('.gitattributes', 'a+t', encoding="utf-8") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. """ dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with " "prefix '%s'" % (root, dirname, parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.15+dev) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json import sys version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): try: with io.open(filename, 'rt', encoding="utf-8") as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\s+(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with io.open(filename, 'wt', encoding="utf-8") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_tag(pieces): """TAG[[.postDISTANCE].dev0+gHEX] -- Just the tag if not dirty, else more info Useful for projects that want commit based tracking on some branches but have the master branch only report tags, to allow for commits that do not modify actual code (e.g. to .github/* or docs). Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["dirty"]: rendered += ".post%d" % pieces["distance"] rendered += ".dev0" rendered += "+g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_dev(pieces): """TAG[.devDISTANCE]+gHEX[.dirty] . Exceptions: 1: no tags. 0.devDISTANCE+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".dev%d" % pieces["distance"] rendered += plus_or_dot(pieces) else: # exception #1 rendered = "0.dev%d" % pieces["distance"] rendered += "+" rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if "render" in pieces and pieces["render"] is not None: style = pieces["render"] if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "pep440-dev": rendered = render_pep440_dev(pieces) elif style == "pep440-tag": rendered = render_pep440_tag(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) result = {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} if "branch" in pieces and pieces["branch"] is not None: result["branch"] = pieces["branch"] return result class VersioneerBadRootError(Exception): pass def get_versions(verbose=False): # returns dict with two keys: 'version' and 'full' if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass lookupfile = cfg.lookupfile if cfg.lookupfile is not None \ else ".versioneer-lookup" lookuppath = os.path.join(root, lookupfile) if os.path.exists(lookuppath): parse_lookup_file_f = handlers.get("parse_lookup_file") versions_from_lookup_f = handlers.get("pieces_from_lookup") if parse_lookup_file_f and versions_from_lookup_f: try: lookup_data = parse_lookup_file_f(lookuppath) pieces = versions_from_lookup_f(lookup_data, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from lookup file") return ver except NotThisMethod: pass elif verbose: print("lookup file %s doesn't exist") from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} def get_version(): return get_versions()["version"] def get_cmdclass(): if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) if "branch" in vers: print(" branch: %s" % vers["branch"]) print(" dirty: %s" % vers.get("dirty")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with io.open(cfg.versionfile_source, 'wt', encoding="utf-8") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = #lookupfile = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with io.open(os.path.join(root, "setup.cfg"), 'at', encoding="utf-8") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with io.open(cfg.versionfile_source, 'wt', encoding="utf-8") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, "LOOKUP_FILE": cfg.lookupfile }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with io.open(ipy, 'rt', encoding="utf-8") as f: old = f.read() except EnvironmentError: old = "" if "from ._version import get_versions" not in old: print(" appending to %s" % ipy) with io.open(ipy, 'at', encoding="utf-8") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with io.open(manifest_in, 'r') as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with io.open(manifest_in, 'at', encoding="utf-8") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with io.open(manifest_in, 'at', encoding="utf-8") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-time keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): found = set() setters = False errors = 0 with io.open("setup.py", 'rt', encoding="utf-8") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)
foosel/OctoPrint
versioneer.py
Python
agpl-3.0
81,073
[ "Brian" ]
2fcae61ee2e82c2a84bd2e3ce6b1921cb5f0a4c1446bbd6ecd48f017e110f853
""" Test_RSS_Policy_AlwaysActivePolicy """ import unittest import DIRAC.ResourceStatusSystem.Policy.AlwaysActivePolicy as moduleTested ################################################################################ class AlwaysActivePolicy_TestCase(unittest.TestCase): def setUp(self): """Setup""" self.moduleTested = moduleTested self.testClass = self.moduleTested.AlwaysActivePolicy def tearDown(self): """TearDown""" del self.testClass del self.moduleTested ################################################################################ # Tests class AlwaysActivePolicy_Success(AlwaysActivePolicy_TestCase): def test_instantiate(self): """tests that we can instantiate one object of the tested class""" policy = self.testClass() self.assertEqual("AlwaysActivePolicy", policy.__class__.__name__) def test_evaluate(self): """tests the evaluate method""" policy = self.testClass() res = policy.evaluate() self.assertTrue(res["OK"]) self.assertEqual("Active", res["Value"]["Status"]) ################################################################################ ################################################################################ if __name__ == "__main__": suite = unittest.defaultTestLoader.loadTestsFromTestCase(AlwaysActivePolicy_TestCase) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(AlwaysActivePolicy_Success)) testResult = unittest.TextTestRunner(verbosity=2).run(suite) # EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
DIRACGrid/DIRAC
src/DIRAC/ResourceStatusSystem/Policy/test/Test_RSS_Policy_AlwaysActivePolicy.py
Python
gpl-3.0
1,655
[ "DIRAC" ]
7ccdd09e7983f21508841e9585ee51c548a6ea72d7098cd88976b17576c93e39
# Copyright (c) 2011, 2012 Free Software Foundation # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This project incorporates work covered by the following copyright and permission notice: # Copyright (c) 2009, Julien Fache # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of the author nor the names of other # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED # OF THE POSSIBILITY OF SUCH DAMAGE. """Storage models of gnowsys-studio, all types, relations """ import warnings from datetime import datetime from django.db import models from django.db.models import Q from django.utils.html import strip_tags from django.utils.html import linebreaks from django.contrib.auth.models import User from django.contrib.sites.models import Site from django.db.models.signals import post_save from django.utils.importlib import import_module from django.contrib import comments from django.contrib.comments.models import CommentFlag from django.contrib.comments.moderation import moderator from django.utils.translation import ugettext_lazy as _ from django.contrib.markup.templatetags.markup import markdown from django.contrib.markup.templatetags.markup import textile from django.contrib.markup.templatetags.markup import restructuredtext import mptt from djangoratings.fields import RatingField from tagging.fields import TagField from gstudio.settings import UPLOAD_TO from gstudio.settings import MARKUP_LANGUAGE from gstudio.settings import NODETYPE_TEMPLATES from gstudio.settings import NODETYPE_BASE_MODEL from gstudio.settings import MARKDOWN_EXTENSIONS from gstudio.settings import AUTO_CLOSE_COMMENTS_AFTER from gstudio.settings import GSTUDIO_VERSIONING from gstudio.managers import nodetypes_published from gstudio.managers import NodetypePublishedManager from gstudio.managers import NodePublishedManager from gstudio.managers import AuthorPublishedManager from gstudio.managers import DRAFT, HIDDEN, PUBLISHED from gstudio.moderator import NodetypeCommentModerator from gstudio.url_shortener import get_url_shortener from gstudio.signals import ping_directories_handler from gstudio.signals import ping_external_urls_handler from unidecode import unidecode import json if GSTUDIO_VERSIONING: import reversion from reversion.models import Version from django.core import serializers from reversion.models import * from reversion.helpers import * import ast NODETYPE_CHOICES = ( ('ND', 'Nodes'), ('OB' ,'Objects'), ('ED', 'Edges'), ('NT', 'Node types'), ('OT', 'Object types'), ('RT', 'Relation types'), ('MT', 'Metatypes'), ('AT', 'Attribute types'), ('RN', 'Relations'), ('AS', 'Attributes'), ('ST', 'System type'), ('SY', 'System'), ('NS', 'Node specification'), ('AS', 'Attribute specification'), ('RS', 'Relation specification'), ('IN', 'Intersection'), ('CP', 'Complement'), ('UN', 'Union'), ) DEPTYPE_CHOICES = ( ('0', 'Concept-Concept'), ('1', 'Activity-Activity'), ('2', 'Question-Question'), ('3', 'Concept-Activity'), ('4', 'Activity-Concept'), ('5', 'Question-Concept'), ('6', 'Concept-Question'), ('7', 'Question-Activity'), ('8', 'Activity-Question'), ) FIELD_TYPE_CHOICES = ( ('1', 'CharField'), ('2', 'TextField'), ('3', 'IntegerField'), ('4', 'CommaSeparatedIntegerField'), ('5', 'BigIntegerField'), ('6', 'PositiveIntegerField'), ('7', 'DecimalField'), ('8', 'FloatField'), ('9', 'BooleanField'), ('10', 'NullBooleanField'), ('11', 'DateField'), ('12', 'DateTimeField'), ('13', 'TimeField'), ('14', 'EmailField'), ('15', 'FileField'), ('16', 'FilePathField'), ('17', 'ImageField'), ('18', 'URLField'), ('19', 'IPAddressField'), ) STATUS_CHOICES = ((DRAFT, _('draft')), (HIDDEN, _('hidden')), (PUBLISHED, _('published'))) counter = 1 attr_counter = -1 class Author(User): """Proxy Model around User""" objects = models.Manager() published = AuthorPublishedManager() def nodetypes_published(self): """Return only the nodetypes published""" return nodetypes_published(self.nodetypes) @property def title(self): return self.username @models.permalink def get_absolute_url(self): """Return author's URL""" #return "/authors/%s/" %(self.username) return ('gstudio_author_detail', (self.username,)) class Meta: """Author's Meta""" proxy = True class NID(models.Model): """the set of all nodes. provides node ID (NID) to all nodes in the network, including edges. Edges are also first class citizens in the gnowledge base. """ title = models.CharField(_('title'), help_text=_('give a name to the node'), max_length=255) last_update = models.DateTimeField(_('last update'), default=datetime.now) creation_date = models.DateTimeField(_('creation date'), default=datetime.now) slug = models.SlugField(help_text=_('used for publication'), unique_for_date='creation_date', max_length=255) nodemodel = models.CharField(_('nodemodel'),max_length=255) @property def get_revisioncount(self): """ Returns Number of Version """ i=0 ver=Version.objects.get_for_object(self) for each in ver: i=i+1 return i @property def get_version_list(self): """ Returns Version list """ ver=Version.objects.get_for_object(self) return ver @property def get_ssid(self): """ return snapshot ids (revision id). returns a list. """ slist=[] vlist=self.get_version_list for each in vlist: slist.append(each.id) return slist def version_info(self,ssid): version_object=Version.objects.get(id=ssid) return version_object.field_dict def get_version_nbh(self,ssid): """ Returns Version nbh """ ver_dict=self.version_info(ssid) ver_nbh_list=[] ver_nbh_dict={} for item in self.get_nbh.keys(): if item in ver_dict.keys(): ver_nbh_list.append(item) for each in ver_nbh_list: ver_nbh_dict[each]=ver_dict[each] return ver_nbh_dict def get_serialized_dict(self): """ return the fields in a serialized form of the current object using the __dict__ function. """ return self.__dict__ @models.permalink def get_absolute_url(self): """Return nodetype's URL""" if self.ref.__class__.__name__=='Gbobject' or self.ref.__class__.__name__=='Process' or self.ref.__class__.__name__=='System': return('objectapp_gbobject_detail',(),{ 'year':self.creation_date.strftime('%Y'), 'month':self.creation_date.strftime('%m'), 'day':self.creation_date.strftime('%d'), 'slug':self.slug}) else: return ('gstudio_nodetype_detail', (), { 'year': self.creation_date.strftime('%Y'), 'month': self.creation_date.strftime('%m'), 'day': self.creation_date.strftime('%d'), 'slug': self.slug}) @property def ref(self): from objectapp.models import * return eval(self.nodemodel).objects.get(id=self.id) # """ # Returns the object reference the id belongs to. # """ # try: # """ # ALGO: get object id, go to version model, return for the given id. # """ # # Retrieving only the relevant tupleset for the versioned objects # # vrs = Version.objects.filter(type=0 , object_id=self.id) # # Returned value is a list, so splice it. # vrs = vrs[0] # except: # return None # return vrs.object @property def reftype(self): """ Returns the type the id belongs to. """ try: """ ALGO: simple wrapper for the __class__.__name__ so that it can be used in templates """ # return self.__class__.__name__ obj = self.ref return obj.__class__.__name__ except: return None @property def getat(self): """This is will give the possible attributetypes """ try: pt = [] attributetype = [] returndict = {} pt.append(self.ref) obj = self.ref while obj.parent: pt.append((obj.parent).ref) obj=obj.parent for each in pt: attributetype.append(each.subjecttype_of.all()) attributetype = [num for elem in attributetype for num in elem] for i in attributetype: if str(i.applicable_nodetypes) == 'OT': returndict.update({i.title:i.id}) return returndict.keys() except: return None @property def getrt(self): """pt =[] contains parenttype reltype =[] contains relationtype titledict = {} contains relationtype's title inverselist = [] contains relationtype's inverse finaldict = {} contains either title of relationtype or inverse of relationtype listval=[] contains keys of titledict to check whether parenttype id is equals to listval's left or right subjecttypeid""" pt =[] reltype =[] titledict = {} inverselist = [] finaldict = {} listval=[] pt.append(self.ref) obj = self.ref while obj.parent: pt.append((obj.parent).ref) obj=obj.parent for i in range(len(pt)): if Relationtype.objects.filter(left_subjecttype = pt[i].id): reltype.append(Relationtype.objects.filter(left_subjecttype = pt[i].id)) if Relationtype.objects.filter(right_subjecttype = pt[i].id): reltype.append(Relationtype.objects.filter(right_subjecttype = pt[i].id)) reltype = [num for elem in reltype for num in elem] #this rqud for filtering for i in reltype: titledict.update({i.title:i.id}) for i in range(len(titledict)): listval.append(Relationtype.objects.get(title = titledict.keys()[i])) obj=Relationtype.objects.get(title=titledict.keys()[i]) inverselist.append(str(unidecode(obj.inverse))) for j in range(len(pt)): for i in range(len(listval)): if pt[j].id == listval[i].left_subjecttype_id and str(listval[i].left_applicable_nodetypes) == 'OT' : finaldict.update({titledict.keys()[i]:titledict.values()[i]}) if pt[j].id == listval[i].right_subjecttype_id and str(listval[i].right_applicable_nodetypes)=='OT': finaldict.update({inverselist[i]:titledict.values()[i]}) return finaldict.keys() @property def get_edit_url(self): return "/admin/" + self._meta.app_label + "/" + self._meta.module_name + "/" + str(self.id) def get_serialized_data(self): """ return the fields in a serialized form of the current object. get object id, go to version model, return serialized_data for the given id """ from reversion.models import Version version = Version.objects.get(id=self.id) return version.serialized_data def __unicode__(self): return self.title class Meta: """NID's Meta""" class Node(NID): """ Super class """ altnames = TagField(_('alternate names'), help_text=_('alternate names if any'), blank=True, null=True) plural = models.CharField(_('plural name'), help_text=_('plural form of the node name if any'), max_length=255, blank=True, null=True) rating = RatingField(range=5, can_change_vote = True, help_text=_('your rating'), blank=True, null=True) status = models.IntegerField(choices=STATUS_CHOICES, default=PUBLISHED) start_publication = models.DateTimeField(_('start publication'), help_text=_('date start publish'), default=datetime.now) end_publication = models.DateTimeField(_('end publication'), help_text=_('date end publish'), default=datetime(2042, 3, 15)) sites = models.ManyToManyField(Site, verbose_name=_('sites publication'), related_name='nodetypes') nbhood = models.TextField(help_text="The rendered neighbourhood of the model.") # nbh = models.TextField(help_text="The neighbourhood of the model.") published = NodePublishedManager() def __unicode__(self): title=self.title modelname=self.nodemodel displayname=modelname+": "+title return displayname class Meta: abstract=False class Metatype(Node): """ Metatype object for Nodetype """ description = models.TextField(_('description'), blank=True, null=True) parent = models.ForeignKey('self', null=True, blank=True, verbose_name=_('parent metatype'), related_name='children') def nodetypes_published(self): """ Return only the published nodetypes """ return nodetypes_published(self.member_types) @property def get_nbh(self): """ Returns the neighbourhood of the metatype """ nbh = {} nbh['title'] = self.title nbh['altnames'] = self.altnames nbh['plural'] = self.plural if self.parent: nbh['typeof'] = self.parent # generate ids and names of children/members nbh['contains_subtypes'] = self.children.get_query_set() nbh['contains_members'] = self.nodetypes_published() nbh['left_subjecttype_of'] = Relationtype.objects.filter(left_subjecttype=self.id) nbh['right_subjecttype_of'] = Relationtype.objects.filter(right_subjecttype=self.id) nbh['attributetypes'] = Attributetype.objects.filter(subjecttype=self.id) return nbh @property def get_possible_attributetypes(self): """ Gets the relations possible for this metatype 1. Recursively create a set of all the ancestors i.e. parent/subtypes of the MT. 2. Get all the AT's linked to each ancestor """ #Step 1. ancestor_list = [] this_parent = self.parent # recursive thru parent field and append while this_parent: ancestor_list.append(this_parent) this_parent = this_parent.parent #Step 2. attrtypes = [] for each in ancestor_list: # retrieve all the AT's from each ancestor attrtypes.extend(Attributetype.objects.filter(subjecttype=each.id)) return attrtypes @property def get_possible_rels(self): """ Gets the relations possible for this metatype 1. Recursively create a set of all the ancestors i.e. parent/subtypes of the MT. 2. Get all the R's linked to each ancestor """ #Step 1. ancestor_list = [] this_parent = self.parent # append while this_parent: ancestor_list.append(this_parent) this_parent = this_parent.parent #Step 2. rels = {} rt_set = Relation.objects.all() right_subset = [] left_subset = [] for each in ancestor_list: # retrieve all the RT's from each ancestor right_subset.extend(rt_set.filter(subject1=each.id)) left_subset.extend(rt_set.filter(subject2=each.id)) rels['possible_leftroles'] = left_subset rels['possible_rightroles'] = right_subset return rels @property def get_possible_attributes(self): """ Gets the relations possible for this metatype 1. Recursively create a set of all the ancestors i.e. parent/subtypes of the MT. 2. Get all the RT's linked to each ancestor """ #Step 1. ancestor_list = [] this_parent = self.parent # recursive thru parent field and append while this_parent: ancestor_list.append(this_parent) this_parent = this_parent.parent #Step 2. attrs = [] for each in ancestor_list: # retrieve all the AT's from each ancestor attrs.extend(Attribute.objects.filter(subject=each.id)) return attrs @property def get_rendered_nbh(self): """ Returns the neighbourhood of the metatype """ history=[] version_list=self.get_ssid if version_list: length=len(version_list) history_ssid=version_list[length-1] history_dict=self.version_info(history_ssid) history_nbh_dict=ast.literal_eval(history_dict['nbhood']) #ssid_current.append(history_ssid) history=history_nbh_dict['history'] history.append(history_ssid) else: history.append(0) nbh = {} history_list=self.get_ssid nbh['title'] = self.title nbh['altnames'] = self.altnames nbh['plural'] = self.plural if self.parent: obj=NID.objects.get(id=self.parent) typeof[parent] = obj.ref.get_absolute_url() #nbh['typeof'] = self.parent # generate ids and names of children nbh['contains_subtypes'] = self.children.get_query_set() contains_members_list = [] for each in self.nodetypes_published(): contains_members_list.append('<a href="%s">%s</a>' % (each.get_absolute_url(), each.title)) nbh['contains_members'] = contains_members_list nbh['left_subjecttype_of'] = Relationtype.objects.filter(left_subjecttype=self.id) nbh['right_subjecttype_of'] = Relationtype.objects.filter(right_subjecttype=self.id) nbh['attributetypes'] = Attributetype.objects.filter(subjecttype=self.id) nbh['history']=history return nbh @property def tree_path(self): """Return metatype's tree path, by its ancestors""" if self.parent: return u'%s/%s' % (self.parent.tree_path, self.slug) return self.slug def __unicode__(self): displayname="MT: "+self.title return displayname @property def composed_sentence(self): "composes the relation as a sentence in triple format." if self.parent: return u'%s is a kind of %s' % (self.title, self.parent.tree_path) return u'%s is a root node' % (self.slug) @models.permalink def get_absolute_url(self): """Return metatype's URL""" return ('gstudio_metatype_detail', (self.tree_path,)) class Meta: """Metatype's Meta""" ordering = ['title'] verbose_name = _('metatype') verbose_name_plural = _('metatypes') # Save for metatype def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] super(Metatype, self).save(*args, **kwargs) # Call the "real" save() method. if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Metatype, self).save(*args, **kwargs) # Call the "real" save() method. class Edge(NID): metatypes = models.ManyToManyField(Metatype, verbose_name=_('member of metatypes'), related_name='member_edges', blank=True, null=True) def __unicode__(self): displayname="ED: " + self.title return displayname class Meta: """ Meta class for Edge """ def save(self, *args, **kwargs): if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Edge, self).save(*args, **kwargs) # Call the "real" save() method. super(Edge, self).save(*args, **kwargs) # Call the "real" save() method. class Nodetype(Node): """ Model design for publishing nodetypes. Other nodetypes inherit this class. """ STATUS_CHOICES = ((DRAFT, _('draft')), (HIDDEN, _('hidden')), (PUBLISHED, _('published'))) content = models.TextField(_('content'), null=True, blank=True) content_org = models.TextField(_('content_org'), null=True, blank=True) parent = models.ForeignKey('self', null=True, blank=True, verbose_name=_('is a kind of'), related_name='children') prior_nodes = models.ManyToManyField('self', symmetrical=False,null=True, blank=True, verbose_name=_('its meaning depends on '), related_name='nodetype_prior_nodes') posterior_nodes = models.ManyToManyField('self', symmetrical=False,null=True, blank=True, verbose_name=_('required for the meaning of '), related_name='nodetype_posterior_nodes') image = models.ImageField(_('image'), upload_to=UPLOAD_TO, blank=True, help_text=_('used for illustration')) excerpt = models.TextField(_('excerpt'), blank=True, help_text=_('optional element')) tags = TagField(_('tags')) metatypes = models.ManyToManyField(Metatype, verbose_name=_('member of metatypes'), related_name='member_types', blank=True, null=True) authors = models.ManyToManyField(User, verbose_name=_('authors'), related_name='nodetypes', blank=True, null=False) featured = models.BooleanField(_('featured'), default=False) comment_enabled = models.BooleanField(_('comment enabled'), default=True) pingback_enabled = models.BooleanField(_('linkback enabled'), default=True) login_required = models.BooleanField( _('login required'), default=False, help_text=_('only authenticated users can view the nodetype')) password = models.CharField( _('password'), max_length=50, blank=True, help_text=_('protect the nodetype with a password')) template = models.CharField( _('template'), max_length=250, default='gstudio/nodetype_detail.html', choices=[('gstudio/nodetype_detail.html', _('Default template'))] + NODETYPE_TEMPLATES,help_text=_('template used to display the nodetype')) rurl=models.URLField(_('rurl'),verify_exists=True,null=True, blank=True) objects = models.Manager() published = NodetypePublishedManager() def get_possible_reltypes(self): """ Gets the relations possible for this metatype 1. Recursively create a set of all the ancestors i.e. parent/subtypes of the MT. 2. Get all the RT's linked to each ancestor """ #Step 1. ancestor_list = [] this_parent = self.parent # append while this_parent: ancestor_list.append(this_parent) this_parent = this_parent.parent #Step 2. reltypes = {} rt_set = Relationtype.objects.all() right_subset = [] left_subset = [] for each in ancestor_list: # retrieve all the RT's from each ancestor right_subset.extend(rt_set.filter(subjecttypeLeft=each.id)) left_subset.extend(rt_set.filter(subjecttypeRight=each.id)) reltypes['possible_leftroles'] = left_subset reltypes['possible_rightroles'] = right_subset return reltypes @property def get_edit_url_for_ats(self): ''' Get all the attributes from get_rendered_nbh and return their URLs ''' retdict={} for key,value in self.get_rendered_nbh.items(): if key: if key=='attributes': for akey,avalue in value.items(): ats=Attributetype.objects.filter(title=akey) if ats: ats=Attributetype.objects.get(title=akey) for atrbs in Attribute.objects.all(): if atrbs.attributetype_id==ats.id: gid=NID.objects.get(id=atrbs.id).ref.get_edit_url retdict[gid]=atrbs.svalue return retdict @property def get_at_url_add(self): """ Gets all the ATs(excluding those for which the Attributes are already added) with their urls for adding attributes Get all ATs of NT. Get the attribute-model-name from its 'dataType'. Check whether entry exists in Attribute table for this AT. Else return it along with its admin-add-form-url. """ retats={} ats=self.subjecttype_of.all() if ats: for each in ats: if each.applicable_nodetypes=='OT': atdatatype=each.dataType if atdatatype=='1': model= 'CharField' if atdatatype=='2': model='TextField' if atdatatype=='3': model='IntegerField' if atdatatype=='4': model='CommaSeparatedIntegerField' if atdatatype=='5': model='BigIntegerField' if atdatatype=='6': model='PositiveIntegerField' if atdatatype=='7': model='DecimalField' if atdatatype=='8': model='FloatField' if atdatatype=='9': model='BooleanField' if atdatatype=='10': model='NullBooleanField' if atdatatype=='11': model='DateField' if atdatatype=='12': model='DateTimeField' if atdatatype=='13': model='TimeField' if atdatatype=='14': model= 'EmailField' if atdatatype=='15': model='FileField' if atdatatype=='16': model='FilePathField' if atdatatype=='17': model='ImageField' if atdatatype=='18': model='URLField' if atdatatype=='19': model='IPAddressField' aturl="admin/gstudio/attribute"+model.lower()+"/add/?attributetype="+str(each.id)+"&subject="+str(self.id) atsubject=self.subject_of.all() """ check whether Attribute for the current AT is already added or not """ fl=0 for eachs in atsubject: if eachs.attributetype_id==each.id and eachs.subject_id==each.subjecttype.id: fl=1 """ fl=0 means, Attribute for AT is not added, now show it as to be added """ if fl==0: retats[each.title]=aturl return retats @property def get_possible_attributetypes(self): """ Gets the relations possible for this metatype 1. Recursively create a set of all the ancestors i.e. parent/subtypes of the MT. 2. Get all the AT's linked to each ancestor """ #Step 1. ancestor_list = [] this_parent = self.parent # recursive thru parent field and append while this_parent: ancestor_list.append(this_parent) this_parent = this_parent.parent #Step 2. attrtypes = [] for each in ancestor_list: # retrieve all the AT's from each ancestor attrtypes.extend(Attributetype.objects.filter(subjecttype=each.id)) return attrtypes @property def get_possible_rels(self): """ Gets the relations possible for this metatype 1. Recursively create a set of all the ancestors i.e. parent/subtypes of the MT. 2. Get all the R's linked to each ancestor """ #Step 1. ancestor_list = [] this_parent = self.parent # append while this_parent: ancestor_list.append(this_parent) this_parent = this_parent.parent #Step 2. rels = {} rt_set = Relation.objects.all() right_subset = [] left_subset = [] for each in ancestor_list: # retrieve all the RT's from each ancestor right_subset.extend(rt_set.filter(subject1=each.id)) left_subset.extend(rt_set.filter(subject2=each.id)) rels['possible_leftroles'] = left_subset rels['possible_rightroles'] = right_subset return rels def get_graph_json(self): g_json = {} g_json["node_metadata"]= [] g_json["relations"]=[] global counter global attr_counter nbh = self.get_nbh predicate_id = {} for key in nbh.keys(): val = str(counter) + "b" predicate_id[key] = val counter = counter + 1 #print predicate_id this_node = {"_id":str(self.id),"title":self.title,"screen_name":self.title, "url":self.get_absolute_url(),"refType":self.reftype} g_json["node_metadata"].append(this_node) for key in predicate_id.keys(): if nbh[key]: try: g_json["node_metadata"].append({"_id":str(predicate_id[key]),"screen_name":key}) g_json["relations"].append({"from":self.id ,"type":str(key),"value":1,"to":predicate_id[key] }) if not isinstance(nbh[key],basestring) and len(nbh[key])<=10: for item in nbh[key]: if isinstance(item,unicode): g_json["node_metadata"].append({"_id":(str(attr_counter)+"b"),"screen_name":str(item)}) g_json["relations"].append({"from":predicate_id[key] ,"type":str(key) ,"value":1,"to":(str(attr_counter)+"b") }) attr_counter-=1 elif item.reftype!="Relation": # create nodes g_json["node_metadata"].append({"_id":str(item.id),"screen_name":item.title,"title":self.title, "url":item.get_absolute_url(),"refType":item.reftype}) g_json["relations"].append({"from":predicate_id[key] ,"type":str(key), "value":1,"to":item.id }) else: if item.left_subject.id==self.id: item1=item.right_subject flag=1 elif item.right_subject.id==self.id: item1=item.left_subject flag=0 g_json["node_metadata"].append({"_id":str(item1.id),"screen_name":item1.title,"title":self.title, "url":item1.get_absolute_url(),"refType":item.reftype,"inverse":item.relationtype.inverse,"flag":flag}) g_json["relations"].append({"from":predicate_id[key] ,"type":str(key), "value":1,"to":item1.id }) else: if not isinstance(nbh[key],basestring): g_json["node_metadata"].append({"_id":(str(attr_counter))+"a","screen_name":str(len(nbh[key]))+" nodes...","title":str(key),"url":"/nodetypes/graphs/graph_label/"+str(self.id)+"/"+str(key)}) #g_json["relations"].append({"from":predicate_id[key] ,"type":str(key) ,"value":1,"to":(str(attr_counter))}) else: g_json["node_metadata"].append({"_id":(str(attr_counter)+"a"),"screen_name":nbh[key]}) g_json["relations"].append({"from":predicate_id[key] ,"type":str(key) ,"value":1,"to":(str(attr_counter)+"a")}) attr_counter-=1 except: pass #print g_json return json.dumps(g_json) def get_label(self,key): nbh=self.get_nbh list_of_nodes=[] for item in nbh[key]: node=NID.objects.get(id=item.id) node=node.ref list_of_nodes.append(node) return list_of_nodes @property def get_possible_attributes(self): """ Gets the relations possible for this metatype 1. Recursively create a set of all the ancestors i.e. parent/subtypes of the MT. 2. Get all the RT's linked to each ancestor """ #Step 1. ancestor_list = [] this_parent = self.parent # recursive thru parent field and append while this_parent: ancestor_list.append(this_parent) this_parent = this_parent.parent #Step 2. attrs = [] for each in ancestor_list: # retrieve all the AT's from each ancestor attrs.extend(Attribute.objects.filter(subject=each.id)) return attrs @property def tree_path(self): """Return nodetype's tree path, by its ancestors""" if self.parent: return u'%s/%s' % (self.parent.tree_path, self.slug) return self.slug @property def tree_path_sentence(self): """ Return the parent of the nodetype in a triple form """ if self.parent: return u'%s is a kind of %s' % (self.title, self.parent.tree_path) return u'%s is a root node' % (self.title) @property def html_content(self): """Return the content correctly formatted""" if MARKUP_LANGUAGE == 'markdown': return markdown(self.content, MARKDOWN_EXTENSIONS) elif MARKUP_LANGUAGE == 'textile': return textile(self.content) elif MARKUP_LANGUAGE == 'restructuredtext': return restructuredtext(self.content) # elif not '</p>' in self.content: # return linebreaks(self.content) return self.content @property def get_relations(self): relation_set = {} # ALGO to find the relations and their left-subjecttypes and right_subjecttypes # 1. Get the relations containing a reference to the object. Retrieve where it occurs (left or right) # 2. Find out which RT they come from. # 3. For each RT, create a dict key and a value as a dict. And add the relation as a new key-value pair (rid:subject). # 4. If self is in right value, then add inverse relation as RT and add the relation as a new key-value pair (rid:subject). left_relset = Relation.objects.filter(left_subject=self.id) right_relset = Relation.objects.filter(right_subject=self.id) #return left_relset + right_relset # RT dictionary to store a single relation rel_dict ={} rel_dict['left-subjecttypes'] = {} rel_dict['right_subjecttypes'] ={} for relation in left_relset: # check if relation already exists if relation.relationtype.title not in rel_dict['left-subjecttypes'].keys(): # create a new list field and add to it rel_dict['left-subjecttypes'][str(unidecode(relation.relationtype.title))] = [] # add rel_dict['left-subjecttypes'][str(unidecode(relation.relationtype.title))].append(relation) for relation in right_relset: # check if relation exists if relation.relationtype.inverse not in rel_dict['right_subjecttypes'].keys(): # create a new list key field and add to it rel_dict['right_subjecttypes'][str(unidecode(relation.relationtype.inverse))] = [] # add to the existing key rel_dict['right_subjecttypes'][str(unidecode(relation.relationtype.inverse))].append(relation) relation_set.update(rel_dict['left-subjecttypes']) relation_set.update(rel_dict['right_subjecttypes']) return relation_set @property def get_rendered_relations(self): """ Returns all the relations of the nodetype """ relations={} reltype={} left_relations=Relation.objects.filter(left_subject=self.id) if left_relations: for each in left_relations: relation=each.relationtype.title predicate=each.right_subject predicate_values=[] if reltype: fl=0 for key,value in reltype.items(): if type(value) <> list: t=[] t.append(value) predicate_values=t else: predicate_values=value if each.relationtype.title==key: fl=1 predicate_values.append(predicate) reltype[key]=predicate_values if fl==0: predicate_values=predicate reltype[relation]=predicate_values else: predicate_values.append(predicate) reltype[relation]=predicate_values relations['lrelations']=reltype right_relations=Relation.objects.filter(right_subject=self.id) reltype={} if right_relations: for each in right_relations: relation=each.relationtype.inverse predicate=each.left_subject predicate_values=[] if reltype: fl=0 for key,value in reltype.items(): if type(value) <> list: t=[] t.append(value) prdicate_values=t else: predicate_values=value if each.relationtype.inverse==key: fl=1 predicate_values.append(predicate) reltype[key]=predicate_values if fl==0: predicate_values=predicate reltype[relation]=predicate_values else: predicate_values.append(predicate) reltype[relation]=predicate_values relations['rrelations']=reltype return relations @property def get_attributes(self): attributes_dict = {} all_attributes=self.subject_of.all() for attributes in all_attributes: val=[] atr_key=attributes.attributetype.title val.append(attributes.svalue) if attributes_dict: fl=0 itms=attributes_dict for key,value in itms.items(): if atr_key in key: fl=1 if type(value) <> list: t=[] t.append(value) val.extend(t) else: val.extend(value) attributes_dict[atr_key]=val return attributes_dict @property def previous_nodetype(self): """Return the previous nodetype""" nodetypes = Nodetype.published.filter( creation_date__lt=self.creation_date)[:1] if nodetypes: return nodetypes[0] @property def next_nodetype(self): """Return the next nodetype""" nodetypes = Nodetype.published.filter( creation_date__gt=self.creation_date).order_by('creation_date')[:1] if nodetypes: return nodetypes[0] @property def word_count(self): """Count the words of a nodetype""" return len(strip_tags(self.html_content).split()) @property def is_actual(self): """Check if a nodetype is within publication period""" now = datetime.now() return now >= self.start_publication and now < self.end_publication @property def is_visible(self): """Check if a nodetype is visible on site""" return self.is_actual and self.status == PUBLISHED @property def related_published(self): """Return only related nodetypes published""" return nodetypes_published(self.related) @property def discussions(self): """Return published discussions""" return comments.get_model().objects.for_model( self).filter(is_public=True) @property def comments(self): """Return published comments""" return self.discussions.filter(Q(flags=None) | Q( flags__flag=CommentFlag.MODERATOR_APPROVAL)) @property def pingbacks(self): """Return published pingbacks""" return self.discussions.filter(flags__flag='pingback') @property def trackbacks(self): """Return published trackbacks""" return self.discussions.filter(flags__flag='trackback') @property def comments_are_open(self): """Check if comments are open""" if AUTO_CLOSE_COMMENTS_AFTER and self.comment_enabled: return (datetime.now() - self.start_publication).days < \ AUTO_CLOSE_COMMENTS_AFTER return self.comment_enabled @property def short_url(self): """Return the nodetype's short url""" return get_url_shortener()(self) def __unicode__(self): objref=str(self.ref) reftitle=self.ref.title objref=objref.replace(reftitle,"") objtype=objref.strip() return objtype + " " + self.title @property def memberof_sentence(self): """Return the metatype of which the nodetype is a member of""" if self.metatypes.count: for each in self.metatypes.all(): return u'%s is a member of metatype %s' % (self.title, each) return u'%s is not a fully defined name, consider making it a member of a suitable metatype' % (self.title) @property def subtypeof_sentence(self): "composes the relation as a sentence in triple format." if self.parent: return u'%s is a subtype of %s' % (self.title, self.parent.tree_path) return u'%s is a root node' % (self.title) composed_sentence = property(subtypeof_sentence) def subtypeof(self): "retuns the parent nodetype." if self.parent: return u'%s' % (self.parent.tree_path) return None @models.permalink def get_absolute_url(self): """Return nodetype's URL""" return ('gstudio_nodetype_detail', (), { 'year': self.creation_date.strftime('%Y'), 'month': self.creation_date.strftime('%m'), 'day': self.creation_date.strftime('%d'), 'slug': self.slug}) def get_version_url(self): """Return nodetype's URL""" return "/nodetypes/display/viewhistory/" def get_serialized_data(self): """ return the fields in a serialized form of the current object. get object id, go to version model, return serialized_data for the given id """ from reversion.models import Version version = Version.objects.get(id=self.node_ptr_id) return version.serialized_data class Meta: """Nodetype's Meta""" ordering = ['-creation_date'] verbose_name = _('node type') verbose_name_plural = _('node types') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) class Objecttype(Nodetype): ''' Object class ''' def __unicode__(self): displayname="OT: "+self.title return displayname @property def get_attributetypes(self): return self.subjecttype_of.all() @property def get_relationtypes(self): left_relset = self.left_subjecttype_of.all() right_relset = self.right_subjecttype_of.all() reltypes = {} reltypes['left_subjecttype_of']=left_relset reltypes['right_subjecttype_of']=right_relset return reltypes @property def get_left_subjecttypes(self): """ for which relation types does this object become a domain of any relation type """ reltypes = [] left_relset = self.left_subjecttype_of.all() for relationtype in left_relset: reltypes.append(relationtype) return reltypes @property def get_rightroles(self): """ for which relation types does this object become a domain of any relation type """ reltypes = [] right_relset = self.right_subjecttype_of.all() for relationtype in right_relset: reltypes.append(relationtype) return reltypes @property def get_subjecttypes(self): """ for which relation types does this object become a domain of any relation type """ subjecttypes = [] attrset = self.subjecttype_of.all() for subjecttype in attrset: subjecttypes.append(subjecttype) return subjecttypes @property def member_of_metatypes(self): """ returns if the objecttype is a member of the membership in a metatype class """ types = [] if self.metatypes.all(): for metatype in self.metatypes.all(): types.append(metatype.title) return types @property def get_members(self): """ get members of the object type """ members = [] if self.member_objects.all(): for gbobject in self.member_objects.all(): members.append(gbobject) return members @property def get_nbh(self): """ Returns the neighbourhood of the nodetype """ nbh = {} nbh['title'] = self.title nbh['altnames'] = self.altnames nbh['plural'] = self.plural nbh['member_of_metatype'] = self.metatypes.all() # get all the ATs for the objecttype nbh['subjecttype_of']= self.subjecttype_of.all() # get all the RTs for the objecttype nbh.update(self.get_relationtypes) # Looks like somebody forgot relations ! nbh.update(self.get_relations) if self.parent: nbh['type_of'] = [self.parent] nbh['contains_subtypes'] = Nodetype.objects.filter(parent=self.id) # get all the objects inheriting this OT nbh['contains_members'] = self.member_objects.all() nbh['prior_nodes'] = self.prior_nodes.all() nbh['posterior_nodes'] = self.posterior_nodes.all() #nbh['authors'] = self.authors.all() return nbh @property def get_rendered_nbh(self): """ Returns the neighbourhood of the nodetype """ history=[] version_list=self.get_ssid if version_list: length=len(version_list) history_ssid=version_list[length-1] history_dict=self.version_info(history_ssid) # history_nbh_dict=ast.literal_eval(history_dict['nbhood']) #ssid_current.append(history_ssid) # history=history_nbh_dict['history'] history.append(history_ssid) else: history.append(0) nbh = {} nbh['title'] = self.title nbh['count_title'] = len(nbh['title']) nbh['altnames'] = self.altnames nbh['count_altnames'] = len(nbh['altnames']) nbh['plural'] = self.plural #nbh['count_plural'] = len(nbh['plural']) #get all MTs member_of_dict = {} for each in self.metatypes.all(): member_of_dict[each.title]= each.get_absolute_url() nbh['member_of_metatypes']=member_of_dict nbh['count_member_of_metatypes'] = len(nbh['member_of_metatypes']) typeof={} parentid=self.parent_id if parentid: parent=Nodetype.objects.get(id=parentid) if parent: typeof[parent] = parent.get_absolute_url() nbh['type_of']=typeof nbh['count_type_of'] = len(nbh['type_of']) #get all subtypes subtypes={} for each in Nodetype.objects.filter(parent=self.id): subtypes[each.title] =each.get_absolute_url() nbh['contains_subtypes']=subtypes nbh['count_contains_subtypes'] = len(nbh['contains_subtypes']) # get all the objects inheriting this OT contains_members_dict = {} for each in self.member_objects.all(): contains_members_dict[each.title]= each.get_absolute_url() nbh['contains_members'] = contains_members_dict nbh['count_contains_members'] = len(nbh['contains_members']) #get prior nodes priornodes_dict = {} for each in self.prior_nodes.all(): priornodes_dict[each.title]= each.get_absolute_url() nbh['priornodes'] = priornodes_dict nbh['count_priornodes'] = len(nbh['priornodes']) #get posterior nodes posteriornodes_dict = {} for each in self.posterior_nodes.all(): posteriornodes_dict[each.title]= each.get_absolute_url() nbh['posteriornodes'] = posteriornodes_dict nbh['count_posteriornodes'] = len(nbh['posteriornodes']) #get authors author_dict = {} for each in self.authors.all(): author_dict['User'] = each.get_absolute_url() nbh['authors'] = author_dict #get siblings siblings={} for each in self.get_siblings(): siblings[each.title]=each.get_absolute_url() nbh['siblings']=siblings nbh['count_siblings'] = len(nbh['siblings']) #get Relations relns={} rellft={} relrgt={} if self.get_rendered_relations: NTrelns=self.get_rendered_relations for key,value in NTrelns.items(): if key=="rrelations": relrgt={} for rgtkey,rgtvalue in value.items(): relnvalue={} if isinstance(rgtvalue,list): for items in rgtvalue: relnvalue[items.title]=items.get_absolute_url() else: relnvalue[rgtvalue]=rgtvalue.get_absolute_url() relrgt[rgtkey]=relnvalue else: rellft={} relns['left']=rellft for lftkey,lftvalue in value.items(): relnvalue={} if isinstance(lftvalue,list): for items in lftvalue: relnvalue[items.title]=items.get_absolute_url() else: relnvalue[lftvalue]=lftvalue.get_absolute_url() rellft[lftkey]=relnvalue nbh['relations']=relrgt nbh['relations'].update(rellft) nbh['count_relations'] = len(nbh['relations']) #get Attributes attributes =self.get_attributes nbh['attributes']=attributes nbh['count_attributes'] = len(nbh['attributes']) #get ATs attributetypes={} for each in self.subjecttype_of.all(): attributetypes[each.title]=each.get_absolute_url() nbh['ats']=attributetypes #get RTs as leftroles and rightroles leftroles = {} for each in self.left_subjecttype_of.all(): leftroles[each.title]=each.get_absolute_url() nbh['leftroles']=leftroles nbh['count_leftroles'] = len(nbh['leftroles']) rightroles = {} for each in self.right_subjecttype_of.all(): rightroles[each.title]=each.get_absolute_url() nbh['rightroles']=rightroles nbh['count_rightroles'] = len(nbh['rightroles']) nbh['history']=history return nbh def get_Version_graph_json(self,ssid): # # predicate_id={"plural":"a1","altnames":"a2","contains_members":"a3","contains_subtypes":"a4","prior_nodes":"a5", "posterior_nodes":"a6"} # slist=self.get_ssid ver_dict=self.version_info(ssid) ver_dict1=self.version_info(ssid) #ver_dict=str(ver['nbhood']) ver_dict=ast.literal_eval(ver_dict['nbhood']) g_json = {} g_json["node_metadata"]= [] g_json["relations"]=[] predicate_id = {} counter=1 attr_counter=-1 for key in ver_dict.keys(): val = "a" + str(counter) predicate_id[key] = val counter = counter + 1 #print predicate_id this_node = {"_id":str(self.id),"title":self.title,"screen_name":self.title, "url":self.get_absolute_url(),"refType":self.reftype} g_json["node_metadata"].append(this_node) for key in predicate_id.keys(): if (ver_dict[key] and (ver_dict[key])!=0 and not(isinstance(ver_dict[key],int ) ) ) : try: g_json["node_metadata"].append({"_id":str(predicate_id[key]),"screen_name":key}) g_json["relations"].append({"from":self.id , "to":predicate_id[key],"value":1, "type":str(key) }) if not isinstance(ver_dict[key],basestring): for item in ver_dict[key]: # user g_json["node_metadata"].append({"_id":(str(attr_counter)+"aa"),"screen_name":item }) #create links g_json["relations"].append({"from":predicate_id[key] ,"type":str(key), "value":1,"to":(str(attr_counter)+"aa") }) attr_counter-=1 else: g_json["node_metadata"].append({"_id":(str(attr_counter)+"a"),"screen_name":ver_dict[key]}) g_json["relations"].append({"from":predicate_id[key] , "to":(str(attr_counter)+"a") ,"value":1, "type":str(key) }) attr_counter-=1 except: pass # print g_json return json.dumps(g_json) class Meta: """ object type's meta class """ verbose_name = _('object type') verbose_name_plural = _('object types') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # Save for Objecttype # @reversion.create_revision() def save(self,*args, **kwargs): self.nodemodel = self.__class__.__name__ super(Objecttype, self).save(*args, **kwargs) # Call the "real" save() method. self.nbhood=self.get_rendered_nbh if GSTUDIO_VERSIONING: with reversion.create_revision(): self.nodemodel = self.__class__.__name__ if self.parent: ot=NID.objects.get(id=self.parent.id) ot.ref.save() super(Objecttype, self).save(*args, **kwargs) # Call the "real" save() method. def save_revert_or_merge(self,*args, **kwargs): if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Objecttype, self).save(*args, **kwargs) # Call the "real" save() method. class Relationtype(Nodetype): ''' Properties with left and right subjects (Binary relations) are defined in this class. ''' inverse = models.CharField(_('inverse name'), help_text=_('when subjecttypes are interchanged, what should be the name of the relation type? This is mandatory field. If the relation is symmetric, same name will do.'), max_length=255,db_index=True ) left_subjecttype = models.ForeignKey(NID,related_name="left_subjecttype_of", verbose_name='left role') left_applicable_nodetypes = models.CharField(max_length=2,choices=NODETYPE_CHOICES,default='OT', verbose_name='Applicable node types for left role') left_cardinality = models.IntegerField(null=True, blank=True, verbose_name='cardinality for the left role') right_subjecttype = models.ForeignKey(NID,related_name="right_subjecttype_of", verbose_name='right role') right_applicable_nodetypes = models.CharField(max_length=2,choices=NODETYPE_CHOICES,default='OT', verbose_name='Applicable node types for right role') right_cardinality = models.IntegerField(null=True, blank=True, verbose_name='cardinality for the right role') is_symmetrical = models.NullBooleanField(verbose_name='Is symmetrical?') is_reflexive = models.NullBooleanField(verbose_name='Is reflexive?') is_transitive = models.NullBooleanField(verbose_name='Is transitive?') def get_serialized_data(self): """ return the fields in a serialized form of the current object. get object id, go to version model, return serialized_data for the given id """ from reversion.models import Version version = Version.objects.get(id=self.node_ptr_id) return version.serialized_data def __unicode__(self): displayname="RT: "+self.title return displayname @property def get_rendered_nbh(self): """ Returns the neighbourhood of the Relationtype """ history=[] version_list=self.get_ssid if version_list: length=len(version_list) history_ssid=version_list[length-1] history_dict=self.version_info(history_ssid) history_nbh_dict=ast.literal_eval(history_dict['nbhood']) #ssid_current.append(history_ssid) history=history_nbh_dict['history'] history.append(history_ssid) else: history.append(0) nbh = {} nbh['title'] = self.title nbh['count_title'] = len(nbh['title']) nbh['altnames'] = self.altnames nbh['count_altnames'] = len(nbh['altnames']) nbh['plural'] = self.plural # nbh['count_plural'] = len(nbh['plural']) #get all MTs member_of_dict = {} for each in self.metatypes.all(): member_of_dict[each.title]= each.get_absolute_url() nbh['member_of_metatypes']=member_of_dict nbh['count_member_of_metatypes'] = len(nbh['member_of_metatypes']) typeof={} parent=self.parent_id if parent: obj=NID.objects.get(id=parent) typeof[parent] = obj.ref.get_absolute_url() nbh['type_of']=typeof nbh['count_type_of'] = len(nbh['type_of']) #get all subtypes subtypes={} for each in Nodetype.objects.filter(parent=self.id): subtypes[each.title] =each.get_absolute_url() nbh['contains_subtypes']=subtypes nbh['count_contains_subtypes'] = len(nbh['contains_subtypes']) # get all the objects inheriting this OT contains_members_dict = {} for each in self.member_objects.all(): contains_members_dict[each.title]= each.get_absolute_url() nbh['contains_members'] = contains_members_dict nbh['count_contains_members'] = len(nbh['contains_members']) #get prior nodes priornodes_dict = {} for each in self.prior_nodes.all(): priornodes_dict[each.title]= each.get_absolute_url() nbh['priornodes'] = priornodes_dict nbh['count_priornodes'] = len(nbh['priornodes']) #get posterior nodes posteriornodes_dict = {} for each in self.posterior_nodes.all(): posteriornodes_dict[each.title]= each.get_absolute_url() nbh['posteriornodes'] = posteriornodes_dict nbh['count_posteriornodes'] = len(nbh['posteriornodes']) #get authors author_dict = {} for each in self.authors.all(): author_dict['User'] = each.get_absolute_url() nbh['authors'] = author_dict #get siblings siblings={} for each in self.get_siblings(): siblings[each.title]=each.get_absolute_url() nbh['siblings']=siblings nbh['count_siblings'] = len(nbh['siblings']) #get Relations relns={} rellft={} relrgt={} if self.get_rendered_relations: NTrelns=self.get_rendered_relations for key,value in NTrelns.items(): if key=="rrelations": relrgt={} for rgtkey,rgtvalue in value.items(): relnvalue={} if isinstance(rgtvalue,list): for items in rgtvalue: relnvalue[items]=items.get_absolute_url() else: relnvalue[rgtvalue]=rgtvalue.get_absolute_url() relrgt[rgtkey]=relnvalue else: rellft={} relns['left']=rellft for lftkey,lftvalue in value.items(): relnvalue={} if isinstance(lftvalue,list): for items in lftvalue: relnvalue[items]=items.get_absolute_url() else: relnvalue[lftvalue]=lftvalue.get_absolute_url() rellft[lftkey]=relnvalue nbh['relations']=relrgt nbh['relations'].update(rellft) nbh['count_relations'] = len(nbh['relations']) #get Attributes attributes =self.get_attributes nbh['attributes']=attributes nbh['count_attributes'] = len(nbh['attributes']) #get ATs attributetypes={} for each in self.subjecttype_of.all(): attributetypes[each.title]=each.get_absolute_url() nbh['ats']=attributetypes #get RTs as leftroles and rightroles leftroles = {} for each in self.left_subjecttype_of.all(): leftroles[each.title]=each.get_absolute_url() nbh['leftroles']=leftroles nbh['count_leftroles'] = len(nbh['leftroles']) rightroles = {} for each in self.right_subjecttype_of.all(): rightroles[each.title]=each.get_absolute_url() nbh['rightroles']=rightroles nbh['count_rightroles'] = len(nbh['rightroles']) nbh['history']=history return nbh def get_nbh(self): """ Returns the neighbourhood of the nodetype """ nbh = {} nbh['title'] = self.title nbh['altnames'] = self.altnames nbh['plural'] = self.plural nbh['contains_subtypes'] = Nodetype.objects.filter(parent=self.id) nbh['contains_members'] = self.member_objects.all() nbh['prior_nodes'] = self.prior_nodes.all() nbh['posterior_nodes'] = self.posterior_nodes.all() nbh['inverse']=self.inverse nbh['left_subjecttype']=self.left_subjecttype nbh['left_applicable_nodetypes']=self.left_applicable_nodetypes nbh['left_cardinality']=self.left_cardinality nbh['right_subjecttype']=self.right_subjecttype nbh['right_applicable_nodetypes']=self.right_applicable_nodetypes nbh['right_cardinality']=self.right_cardinality nbh['is_symmetrical']=self.is_symmetrical nbh['is_reflexive']=self.is_reflexive nbh['is_transitive']=self.is_transitive return nbh class Meta: """ relation type's meta class """ verbose_name = _('relation type') verbose_name_plural = _('relation types') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # Save for Relationtype # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ super(Relationtype, self).save(*args, **kwargs) # Call the "real" save() method. self.nbhood=self.get_rendered_nbh if GSTUDIO_VERSIONING: with reversion.create_revision(): self.nodemodel = self.__class__.__name__ super(Relationtype, self).save(*args, **kwargs) # Call the "real" save() method. class Attributetype(Nodetype): ''' To define attributes of objects. First three fields are mandatory. The rest of the fields may be required depending on what type of field is selected for datatype. ''' subjecttype = models.ForeignKey(NID, related_name="subjecttype_of", verbose_name='subject type name') applicable_nodetypes = models.CharField(max_length=2,choices=NODETYPE_CHOICES,default='OT', verbose_name='applicable nodetypes') dataType = models.CharField(max_length=2, choices=FIELD_TYPE_CHOICES,default='01', verbose_name='data type of value') verbose_name = models.CharField(max_length=500, null=True, blank=True, verbose_name='verbosename', help_text='verbose name') null = models.NullBooleanField(verbose_name='Null', help_text='can the value be null?') blank = models.NullBooleanField(verbose_name='Blank', help_text='can the form be left blank?') help_text = models.CharField(max_length=500, null=True, blank=True, verbose_name='Help text', help_text='help text for the field') max_digits = models.IntegerField(max_length=5, null=True, blank=True, verbose_name='Max digit', help_text='If you have selected Decimal Field for datatype, you have to specify the number of digits.') decimal_places = models.IntegerField(max_length=2, null=True, blank=True, verbose_name='Decimal places', help_text='If you have selected Decimal Field for datatype, you have to specify the decimal places.') auto_now = models.NullBooleanField(verbose_name='Auto now', null=True, blank=True, help_text='Use this if DateTime & Time Field was chosen above for datatype') auto_now_add = models.NullBooleanField(verbose_name='Auto now add', null=True, blank=True, help_text='Use this if DateTime & Time Field was chosen above for datatype') upload_to = models.CharField(max_length=500,verbose_name='Upload to', null=True, blank=True, help_text='Required for FileField and ImageField') path=models.CharField(max_length=500,verbose_name='Path', null=True, blank=True, help_text='Required for FilePathField') verify_exists=models.NullBooleanField(verbose_name='Verify exits', null=True, blank=True, help_text='Required for AttributeURLField') min_length=models.IntegerField(max_length=10,null=True, blank=True, verbose_name='min length', help_text='minimum length') required=models.NullBooleanField(verbose_name='required', null=True, blank=True, help_text='Use this for setting mandatory and optional fields') label=models.CharField(max_length=500, null=True,blank=True,verbose_name='label',help_text='specify the "human-friendly" label') unique=models.NullBooleanField(verbose_name='unique', null=True, blank=True, help_text='If True, this field must be unique throughout the table') validators=models.ManyToManyField('self', verbose_name='validators',blank=True, null=True,help_text='A list of validators to run for this field') default=models.CharField(max_length=500, null=True, blank=True, verbose_name='default', help_text='The default value for the field') editable=models.NullBooleanField(verbose_name='required', null=True, blank=True, help_text='If False, the field will not be editable') @property def get_rendered_nbh(self): """ Returns the neighbourhood of the Attributetype """ history=[] version_list=self.get_ssid if version_list: length=len(version_list) history_ssid=version_list[length-1] history_dict=self.version_info(history_ssid) history_nbh_dict=ast.literal_eval(history_dict['nbhood']) #ssid_current.append(history_ssid) history=history_nbh_dict['history'] history.append(history_ssid) else: history.append(0) nbh = {} nbh['title'] = self.title nbh['count_title'] = len(nbh['title']) nbh['altnames'] = self.altnames nbh['count_altnames'] = len(nbh['altnames']) # nbh['plural'] = self.plural # nbh['count_plural'] = len(nbh['plural']) #get all MTs member_of_dict = {} for each in self.metatypes.all(): member_of_dict[each.title]= each.get_absolute_url() nbh['member_of_metatypes']=member_of_dict nbh['count_member_of_metatypes'] = len(nbh['member_of_metatypes']) typeof={} parent=self.parent_id if parent: obj=NID.objects.get(id=parent) typeof[parent] = obj.ref.get_absolute_url() nbh['type_of']=typeof nbh['count_type_of'] = len(nbh['type_of']) #get all subtypes subtypes={} for each in Nodetype.objects.filter(parent=self.id): subtypes[each.title] =each.get_absolute_url() nbh['contains_subtypes']=subtypes nbh['count_contains_subtypes'] = len(nbh['contains_subtypes']) # get all the objects inheriting this OT contains_members_dict = {} for each in self.member_objects.all(): contains_members_dict[each.title]= each.get_absolute_url() nbh['contains_members'] = contains_members_dict nbh['count_contains_members'] = len(nbh['contains_members']) #get prior nodes priornodes_dict = {} for each in self.prior_nodes.all(): priornodes_dict[each.title]= each.get_absolute_url() nbh['priornodes'] = priornodes_dict nbh['count_priornodes'] = len(nbh['priornodes']) #get posterior nodes posteriornodes_dict = {} for each in self.posterior_nodes.all(): posteriornodes_dict[each.title]= each.get_absolute_url() nbh['posteriornodes'] = posteriornodes_dict nbh['count_posteriornodes'] = len(nbh['posteriornodes']) #get authors author_dict = {} for each in self.authors.all(): author_dict['User'] = each.get_absolute_url() nbh['authors'] = author_dict #get siblings siblings={} for each in self.get_siblings(): siblings[each.title]=each.get_absolute_url() nbh['siblings']=siblings nbh['count_siblings'] = len(nbh['siblings']) #get Relations relns={} rellft={} relrgt={} if self.get_rendered_relations: NTrelns=self.get_rendered_relations for key,value in NTrelns.items(): if key=="rrelations": relrgt={} for rgtkey,rgtvalue in value.items(): relnvalue={} if isinstance(rgtvalue,list): for items in rgtvalue: relnvalue[items]=items.get_absolute_url() else: relnvalue[rgtvalue]=rgtvalue.get_absolute_url() relrgt[rgtkey]=relnvalue else: rellft={} relns['left']=rellft for lftkey,lftvalue in value.items(): relnvalue={} if isinstance(lftvalue,list): for items in lftvalue: relnvalue[items]=items.get_absolute_url() else: relnvalue[lftvalue]=lftvalue.get_absolute_url() rellft[lftkey]=relnvalue nbh['relations']=relrgt nbh['relations'].update(rellft) nbh['count_relations'] = len(nbh['relations']) #get Attributes attributes =self.get_attributes nbh['attributes']=attributes nbh['count_attributes'] = len(nbh['attributes']) #get ATs attributetypes={} for each in self.subjecttype_of.all(): attributetypes[each.title]=each.get_absolute_url() nbh['ats']=attributetypes #get RTs as leftroles and rightroles leftroles = {} for each in self.left_subjecttype_of.all(): leftroles[each.title]=each.get_absolute_url() nbh['leftroles']=leftroles nbh['count_leftroles'] = len(nbh['leftroles']) rightroles = {} for each in self.right_subjecttype_of.all(): rightroles[each.title]=each.get_absolute_url() nbh['rightroles']=rightroles nbh['count_rightroles'] = len(nbh['rightroles']) nbh['history']=history return nbh def __unicode__(self): displayname="AT: "+self.title return displayname @property def getdataType(self): at = 'attribute'+str(self.get_dataType_display()) at = at.lower() return at class Meta: """ attribute type's meta class """ verbose_name = _('attribute type') verbose_name_plural = _('attribute types') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # Save for Attributetype def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ super(Attributetype, self).save(*args, **kwargs) # Call the "real" save() method. self.nbhood=self.get_rendered_nbh if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Attributetype, self).save(*args, **kwargs) # Call the "real" save() method. def save_revert_or_merge(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Attributetype, self).save(*args, **kwargs) # Call the "real" save() method. class Relation(Edge): ''' Relations, instances of relationtypes ''' left_subject_scope = models.CharField(max_length=50, verbose_name='subject scope or qualification', null=True, blank=True) left_subject = models.ForeignKey(NID, related_name="left_subject_of", verbose_name='subject name') relationtype_scope = models.CharField(max_length=50, verbose_name='relation scope or qualification', null=True, blank=True) relationtype = models.ForeignKey(Relationtype, verbose_name='relation name') right_subject_scope = models.CharField(max_length=50, verbose_name='object scope or qualification', null=True, blank=True) right_subject = models.ForeignKey(NID, related_name="right_subject_of", verbose_name='object name') def ApplicableNodeTypes_filter(self,choice): nodeslist = [] if choice == 'ED': nodeslist = Edge.objects.all() if choice == 'OB': nodeslist = Objects.objects.all() if choice == 'ND': nodeslist = Node.objects.all() if choice == 'NT': nodeslist = Nodetype.objects.all() if choice == 'OT': nodeslist = Objecttype.objects.all() if choice == 'RT': nodeslist = Relationtype.objects.all() if choice == 'MT': nodeslist = Metatype.objects.all() if choice == 'AT': nodeslist = Attributetype.objects.all() if choice == 'RN': nodeslist = Relation.objects.all() if choice == 'AS': nodeslist = Attribute.objects.all() if choice == 'ST': nodeslist = Systemtype.objects.all() if choice == 'SY': nodeslist = System.objects.all() return nodeslist class Meta: unique_together = (('left_subject_scope','left_subject','relationtype_scope', 'relationtype', 'right_subject_scope','right_subject'),) verbose_name = _('relation') verbose_name_plural = _('relations') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) def __unicode__(self): displayname="RN: "+self.composed_sentence return displayname @property def composed_sentence(self): "composes the relation as a sentence in a triple format." return u'%s %s %s %s %s %s' % (self.left_subject_scope, self.left_subject, self.relationtype_scope, self.relationtype, self.right_subject_scope, self.right_subject) @property def inversed_sentence(self): "composes the inverse relation as a sentence in a triple format." return u'%s %s %s %s %s' % (self.objectScope, self.right_subject, self.relationtype.inverse, self.left_subject_scope, self.left_subject ) @property def key_value(self): return dict({str(self.relationtype):str(self.right_subject)}) @property def inverse_key_value(self): return dict({str(self.relationtype.inverse):str(self.left_subject)}) @property def relation_sentence(self): """Return the relations of the objecttypes""" if self.relationtype: # for relation in self.relationtype(): return u'%s %s %s' % (self.left_subject,self.relationtype,self.right_subject ) @property def partial_composition(self): ''' function that composes the right_subject and relation name, as in "x as a friend", "y as a sibling" ''' return u'%s as a %s' % (self.right_subject, self.relationtype) # Save for Relation def save(self, *args, **kwargs): """ left_subject and right_subject should be saved after creating the relation """ self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Relation, self).save(*args, **kwargs) # Call the "real" save() method. left_subject = self.left_subject right_subject = self.right_subject left_subject.ref.save() right_subject.ref.save() super(Relation, self).save(*args, **kwargs) # Call the "real" save() method. class Attribute(Edge): ''' Attribute value store for default datatype varchar. Subject can be any of the nodetypes. ''' subject_scope = models.CharField(max_length=50, verbose_name='subject scope or qualification', null=True, blank=True) subject = models.ForeignKey(NID, related_name="subject_of", verbose_name='subject name') attributetype_scope = models.CharField(max_length=50, verbose_name='property scope or qualification', null=True, blank=True) attributetype = models.ForeignKey(Attributetype, verbose_name='property name') value_scope = models.CharField(max_length=50, verbose_name='value scope or qualification', null=True, blank=True) svalue = models.CharField(max_length=100, verbose_name='serialized value') class Meta: unique_together = (('subject_scope', 'subject', 'attributetype_scope', 'attributetype', 'value_scope', 'svalue'),) verbose_name = _('attribute') verbose_name_plural = _('attributes') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) def subject_filter(self,attr): """ returns applicaable selection of nodes for selecting as subject """ subjecttype = attr.subjecttype for each in Objecttype.objects.all(): if attr.subjecttype.id == each.id: return each.get_members def __unicode__(self): displayname="AS: "+self.composed_attribution return displayname @property def edge_node_dict(self): ''' composes the attribution as a name:value pair sentence without the subject. ''' return dict({str(self.attributetype_scope) + str(self.attributetype): str(self.value_scope)+ str(self.svalue)}) @property def composed_sentence(self): ''' composes the attribution as a sentence in a triple format. ''' return u'%s %s has %s %s %s %s' % (self.subject_scope, self.subject, self.attributetype_scope, self.attributetype, self.value_scope, self.svalue) @property def composed_attribution(self): ''' composes a name to the attribute ''' return u'the %s of %s is %s' % (self.attributetype, self.subject, self.svalue) @property def partial_composition(self): ''' function that composes the value and attribute name, as in "red as color", "4 as length" ''' return u'%s as %s' % (self.svalue, self.attributetype) def subject_filter(self,attr): """ returns applicable selection of nodes for selecting objects """ for each in Objecttype.objects.all(): if attr.subjecttype.id == each.id: return each.get_members # Save for Attribute def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Attribute, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(Attribute, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeCharField(Attribute): value = models.CharField(max_length=100, verbose_name='string') def __unicode__(self): displayname="ACF: "+ self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeCharField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeCharField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeTextField(Attribute): value = models.TextField(verbose_name='text') def __unicode__(self): displayname="ATF: "+ self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeTextField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeTextField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeIntegerField(Attribute): value = models.IntegerField(max_length=100, verbose_name='Integer') def __unicode__(self): displayname="AIF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeIntegerField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeIntegerField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeCommaSeparatedIntegerField(Attribute): value = models.CommaSeparatedIntegerField(max_length=100, verbose_name='integers separated by comma') def __unicode__(self): displayname="ACSIF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeCommaSeparatedIntegerField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeCommaSeparatedIntegerField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeBigIntegerField(Attribute): value = models.BigIntegerField(max_length=100, verbose_name='big integer') def __unicode__(self): displayname="ABIF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeBigIntegerField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeBigIntegerField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributePositiveIntegerField(Attribute): value = models.PositiveIntegerField(max_length=100, verbose_name='positive integer') def __unicode__(self): displayname="APIF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributePositiveIntegerField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributePositiveIntegerField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeDecimalField(Attribute): value = models.DecimalField(max_digits=3, decimal_places=2, verbose_name='decimal') def __unicode__(self): displayname="ADF: "+self.title return displayname def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeDecimalField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeDecimalField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeFloatField(Attribute): value = models.FloatField(max_length=100, verbose_name='number as float') def __unicode__(self): displayname="AFF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeFloatField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeFloatField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeBooleanField(Attribute): value = models.BooleanField(verbose_name='boolean') def __unicode__(self): displayname="ABF: "+self.title return displayname def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeBooleanField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeBooleanField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeNullBooleanField(Attribute): value = models.NullBooleanField(verbose_name='true false or unknown') def __unicode__(self): displayname="ANBF: "+self.title return displayname def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeNullBooleanField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeNullBooleanField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeDateField(Attribute): value = models.DateField(max_length=100, verbose_name='date') def __unicode__(self): displayname="ADF: "+self.title return displayname def save(self, *args, **kwargs): self.nodemodel=self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeDateField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeDateField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeDateTimeField(Attribute): value = models.DateTimeField(max_length=100, verbose_name='date time') def __unicode__(self): displayname="ADTF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeDateTimeField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeDateTimeField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeTimeField(Attribute): value = models.TimeField(max_length=100, verbose_name='time') def __unicode__(self): displayname="ATIF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeTimeField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeTimeField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeEmailField(Attribute): value = models.EmailField(max_length=100,verbose_name='value') def __unicode__(self): displayname="AEF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel=self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeEmailField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeEmailField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeFileField(Attribute): value = models.FileField(upload_to='media/'+UPLOAD_TO, verbose_name='file') def __unicode__(self): displayname="AFIF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeFileField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeFileField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeFilePathField(Attribute): value = models.FilePathField(verbose_name='path of file') def __unicode__(self): displayname="AFPF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeFilePathField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeFilePathField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeImageField(Attribute): value = models.ImageField(upload_to = UPLOAD_TO, verbose_name='image') def __unicode__(self): displayname="AIMF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeImageField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeImageField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeURLField(Attribute): value = models.URLField(max_length=100, verbose_name='url') def __unicode__(self): displayname="AURLF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeURLField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeURLField, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeIPAddressField(Attribute): value = models.IPAddressField(max_length=100, verbose_name='ip address') def __unicode__(self): displayname="AIPF: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeIPAddressField, self).save(*args, **kwargs) # Call the "real" save() method. subject=self.subject subject.ref.save() super(AttributeIPAddressField, self).save(*args, **kwargs) # Call the "real" save() method. class Processtype(Nodetype): """ A kind of nodetype for defining processes or events or temporal objects involving change. """ changing_attributetype_set = models.ManyToManyField(Attributetype, null=True, blank=True, verbose_name=_('attribute set involved in the process'), related_name=' changing_attributetype_set_of') changing_relationtype_set = models.ManyToManyField(Relationtype, null=True, blank=True, verbose_name=_('relation set involved in the process'), related_name='changing_relationtype_set_of') def __unicode__(self): displayname="PT: "+self.title return displayname class Meta: verbose_name = _('process type') verbose_name_plural = _('process types') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ super(Processtype, self).save(*args, **kwargs) # Call the "real" save() method. self.nbhood=self.get_rendered_nbh if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Processtype, self).save(*args, **kwargs) # Call the "real" save() method. class Systemtype(Nodetype): """ class to organize Systems """ nodetype_set = models.ManyToManyField(Nodetype, related_name="nodetype_set_of", verbose_name='Possible edges in the system', blank=True, null=False) relationtype_set = models.ManyToManyField(Relationtype, related_name="relationtype_set_of", verbose_name='Possible nodetypes in the system', blank=True, null=False) attributetype_set = models.ManyToManyField(Attributetype, related_name="attributetype_set_of", verbose_name='systems to be nested in the system', blank=True, null=False) metatype_set = models.ManyToManyField(Metatype, related_name="metatype_set_of", verbose_name='Possible edges in the system', blank=True, null=False) processtype_set = models.ManyToManyField(Processtype, related_name="processtype_set_of", verbose_name='Possible edges in the system', blank=True, null=False) author_set = models.ManyToManyField(User, related_name="author_set_of", verbose_name='Possible authors in the system', blank=True, null=False) def __unicode__(self): displayname="ST: "+self.title return displayname class Meta: verbose_name = _('system type') verbose_name_plural = _('system types') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] super(Systemtype, self).save(*args, **kwargs) # Call the "real" save() method. # self.nbhood=self.get_rendered_nbh if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Systemtype, self).save(*args, **kwargs) # Call the "real" save() method. class AttributeSpecification(Node): """ specifying an attribute by a subject to say for example: population of India, color of a flower etc. These do not yeild a proposition but a description, which can be used as a subject in another sentence. """ attributetype = models.ForeignKey(Attributetype, verbose_name='property name') subjects = models.ManyToManyField(NID, related_name="subjects_attrspec_of", verbose_name='subjects') metatypes=models.ManyToManyField(Metatype,verbose_name=_('member of metatypes'), related_name='member_attspecns', blank=True, null=True) @property def composed_subject(self): ''' composes a name to the attribute ''' subjects = u'' for each in self.subjects.all(): subjects = subjects + each.title + ' ' return u'the %s of %s' % (self.attributetype, subjects) def __unicode__(self): displayname="ASN: "+self.composed_subject return displayname class Meta: verbose_name = _('attribute specification') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(AttributeSpecification, self).save(*args, **kwargs) # Call the "real" save() method. super(AttributeSpecification, self).save(*args, **kwargs) # Call the "real" save() method. class RelationSpecification(Node): """ specifying a relation with a subject """ relationtype = models.ForeignKey(Relationtype, verbose_name='relation name') subjects = models.ManyToManyField(NID, related_name="subjects_in_relspec", verbose_name='subjects') metatypes=models.ManyToManyField(Metatype,verbose_name=_('member of metatypes'), related_name='member_relnspecns', blank=True, null=True) @property def composed_subject(self): ''' composing an expression with relation name and subject ''' subjects = u'' for each in self.subjects.all(): subjects = subjects + each.title + ' ' return u'the %s of %s' % (self.relationtype, subjects) def __unicode__(self): dispalyname="RSN: "+ self.composed_subject return displayname class Meta: verbose_name = _('relation specification') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(RelationSpecification, self).save(*args, **kwargs) # Call the "real" save() method. super(RelationSpecification, self).save(*args, **kwargs) # Call the "real" save() method. class NodeSpecification(Node): """ A node specified (described) by its relations or attributes or both. """ subject = models.ForeignKey(Node, related_name="subject_nodespec", verbose_name='subject name') relations = models.ManyToManyField(Relation, related_name="relations_in_nodespec", verbose_name='relations used to specify the domain') attributes = models.ManyToManyField(Attribute, related_name="attributes_in_nodespec", verbose_name='attributes used to specify the domain') metatypes=models.ManyToManyField(Metatype,verbose_name=_('member of metatypes'), related_name='member_nodespecns', blank=True, null=True) @property def composed_subject(self): ''' composing an expression subject and relations ''' relations = u'' for each in self.relations.all(): relations = relations + each.partial_composition + ', ' attributes = u'' for each in self.attributes.all(): attributes = attributes + each.partial_composition + ', ' return u'the %s with %s, %s' % (self.subject, self.relations, self.attributes) def __unicode__(self): displayname="NSN: "+ self.composed_subject return displayname class Meta: verbose_name = _('Node specification') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(NodeSpecification, self).save(*args, **kwargs) # Call the "real" save() method. super(NodeSpecification, self).save(*args, **kwargs) # Call the "real" save() method. class Expression(Node): """ Expression constructor """ left_term = models.ForeignKey(NID, related_name="left_term_of", verbose_name='left term name') relationtype = models.ForeignKey(Relationtype, verbose_name='relation name') right_term = models.ForeignKey(NID, related_name="right_term_of", verbose_name='right term name') metatypes=models.ManyToManyField(Metatype,verbose_name=_('member of metatypes'), related_name='member_exprn', blank=True, null=True) def __unicode__(self): displayname="EXPN: "+self.composed_sentence return displayname @property def composed_sentence(self): "composes the relation as a sentence in a triple format." return u'%s %s %s' % (self.left_term, self.relationtype, self.right_term) class Meta: unique_together = (('left_term','relationtype','right_term'),) verbose_name = _('expression') verbose_name_plural = _('expressions') permissions = (('can_view_all', 'Can view all'), ('can_change_author', 'Can change author'), ) # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Expression, self).save(*args, **kwargs) # Call the "real" save() method. super(Expression, self).save(*args, **kwargs) # Call the "real" save() method. class Union(Node): """ union of two classes """ nodetypes = models.ManyToManyField(Nodetype, related_name = 'union_of', verbose_name='node types for union') metatypes=models.ManyToManyField(Metatype,verbose_name=_('member of metatypes'), related_name='member_unions', blank=True, null=True) def __unicode__(self): displayname="UN: "+ self.title return displayname @property def composed_sentence(self): "composes the relation as a sentence in a triple format." return u'%s %s' % (self.nodetypes, self.metatypes) # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Union, self).save(*args, **kwargs) # Call the "real" save() method. super(Union, self).save(*args, **kwargs) # Call the "real" save() method. class Complement(Node): """ complement of a class """ nodetypes = models.ManyToManyField(Nodetype, related_name = 'complement_of', verbose_name='complementary nodes') metatypes=models.ManyToManyField(Metatype,related_name='meta_complement',verbose_name=_('Metanodes'), blank=True, null= True) @property def composed_subject(self): return u'Not of %s' % (self.nodetypes) # @property # def composed_sentence(self): # "composes the complement as a sentence. " # return u'Not of %s %s' % (self.nodetypes,self.metatypes) def __unicode__(self): displayname="CMP: "+self.title return displayname # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Complement, self).save(*args, **kwargs) # Call the "real" save() method. super(Complement, self).save(*args, **kwargs) # Call the "real" save() method. class Intersection(Node): """ Intersection of classes """ nodetypes = models.ManyToManyField(Nodetype, related_name = 'intersection_of', verbose_name='intersection of classes') metatypes=models.ManyToManyField(Metatype,verbose_name=_('member of metatypes'), related_name='member_intersectn', blank=True, null=True) def __unicode__(self): displayname="INTSN: "+self.title return displayname @property def composed_subject(self): return u'And of %s' % (self.nodetypes) # @reversion.create_revision() def save(self, *args, **kwargs): self.nodemodel = self.__class__.__name__ self.nbhood=[] if GSTUDIO_VERSIONING: with reversion.create_revision(): super(Intersection, self).save(*args, **kwargs) # Call the "real" save() method. super(Intersection, self).save(*args, **kwargs) # Call the "real" save() method. if GSTUDIO_VERSIONING == True: reversion.register(NID) if not reversion.is_registered(Systemtype): reversion.register(Systemtype,follow=["nodetype_ptr"] ) if not reversion.is_registered(Objecttype): reversion.register(Objecttype , follow=["nodetype_ptr"]) if not reversion.is_registered(Node): reversion.register(Node , follow=["nid_ptr"]) if not reversion.is_registered(Edge): reversion.register(Edge , follow=["nid_ptr"]) if not reversion.is_registered(Processtype): reversion.register(Processtype, follow=["nodetype_ptr","changing_attributetype_set", "changing_relationtype_set"]) if not reversion.is_registered(Nodetype): reversion.register(Nodetype, follow=["node_ptr","parent", "metatypes","prior_nodes", "posterior_nodes"]) if not reversion.is_registered(Metatype): reversion.register(Metatype, follow=["node_ptr","parent"]) if not reversion.is_registered(Relationtype): reversion.register(Relationtype, follow=["nodetype_ptr","left_subjecttype", "right_subjecttype"]) if not reversion.is_registered(Attributetype): reversion.register(Attributetype, follow=["nodetype_ptr","subjecttype"]) if not reversion.is_registered(Attribute): reversion.register(Attribute, follow=["subject", "attributetype"]) if not reversion.is_registered(Relation): reversion.register(Relation, follow=["left_subject", "right_subject", "relationtype"]) moderator.register(Nodetype, NodetypeCommentModerator) mptt.register(Metatype, order_insertion_by=['title']) mptt.register(Nodetype, order_insertion_by=['title']) mptt.register(Objecttype, order_insertion_by=['title']) mptt.register(Relationtype, order_insertion_by=['title']) mptt.register(Attributetype, order_insertion_by=['title']) mptt.register(Systemtype, order_insertion_by=['title']) mptt.register(Processtype, order_insertion_by=['title']) post_save.connect(ping_directories_handler, sender=Nodetype, dispatch_uid='gstudio.nodetype.post_save.ping_directories') post_save.connect(ping_external_urls_handler, sender=Nodetype, dispatch_uid='gstudio.nodetype.post_save.ping_external_urls') class Peer(User): """Subclass for non-human users""" def __unicode__(self): return self.ip ip = models.IPAddressField("Peer's IP address") pkey = models.CharField(("Peer's public-key"), max_length=255)
gnowledge/ISON
gstudio/models.py
Python
agpl-3.0
114,556
[ "ADF" ]
26ac632a3776598bae75a90c05a68586766e9b3a094828a74a46817dfc9fe38c
"""Rewrite assertion AST to produce nice error messages""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import ast import errno import imp import itertools import marshal import os import re import string import struct import sys import types import atomicwrites import py import six from _pytest.assertion import util from _pytest.compat import spec_from_file_location from _pytest.pathlib import fnmatch_ex from _pytest.pathlib import PurePath # pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: if hasattr(sys, "pypy_version_info"): impl = "pypy" elif sys.platform == "java": impl = "jython" else: impl = "cpython" ver = sys.version_info PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 if sys.version_info >= (3, 5): ast_Call = ast.Call else: def ast_Call(a, b, c): return ast.Call(a, b, c, None, None) class AssertionRewritingHook(object): """PEP302 Import hook which rewrites asserts.""" def __init__(self, config): self.config = config self.fnpats = config.getini("python_files") self.session = None self.modules = {} self._rewritten_names = set() self._register_with_pkg_resources() self._must_rewrite = set() # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, # which might result in infinite recursion (#3506) self._writing_pyc = False self._basenames_to_check_rewrite = {"conftest"} self._marked_for_rewrite_cache = {} self._session_paths_checked = False def set_session(self, session): self.session = session self._session_paths_checked = False def _imp_find_module(self, name, path=None): """Indirection so we can mock calls to find_module originated from the hook during testing""" return imp.find_module(name, path) def find_module(self, name, path=None): if self._writing_pyc: return None state = self.config._assertstate if self._early_rewrite_bailout(name, state): return None state.trace("find_module called for: %s" % name) names = name.rsplit(".", 1) lastname = names[-1] pth = None if path is not None: # Starting with Python 3.3, path is a _NamespacePath(), which # causes problems if not converted to list. path = list(path) if len(path) == 1: pth = path[0] if pth is None: try: fd, fn, desc = self._imp_find_module(lastname, path) except ImportError: return None if fd is not None: fd.close() tp = desc[2] if tp == imp.PY_COMPILED: if hasattr(imp, "source_from_cache"): try: fn = imp.source_from_cache(fn) except ValueError: # Python 3 doesn't like orphaned but still-importable # .pyc files. fn = fn[:-1] else: fn = fn[:-1] elif tp != imp.PY_SOURCE: # Don't know what this is. return None else: fn = os.path.join(pth, name.rpartition(".")[2] + ".py") fn_pypath = py.path.local(fn) if not self._should_rewrite(name, fn_pypath, state): return None self._rewritten_names.add(name) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. write = not sys.dont_write_bytecode cache_dir = os.path.join(fn_pypath.dirname, "__pycache__") if write: try: os.mkdir(cache_dir) except OSError: e = sys.exc_info()[1].errno if e == errno.EEXIST: # Either the __pycache__ directory already exists (the # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e in [errno.EACCES, errno.EROFS, errno.EPERM]: state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) # Notice that even if we're in a read-only directory, I'm going # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc, state.trace) if co is None: state.trace("rewriting %r" % (fn,)) source_stat, co = _rewrite_test(self.config, fn_pypath) if co is None: # Probably a SyntaxError in the test. return None if write: self._writing_pyc = True try: _write_pyc(state, co, source_stat, pyc) finally: self._writing_pyc = False else: state.trace("found cached rewritten pyc for %r" % (fn,)) self.modules[name] = co, pyc return self def _early_rewrite_bailout(self, name, state): """ This is a fast way to get out of rewriting modules. Profiling has shown that the call to imp.find_module (inside of the find_module from this class) is a major slowdown, so, this method tries to filter what we're sure won't be rewritten before getting to it. """ if self.session is not None and not self._session_paths_checked: self._session_paths_checked = True for path in self.session._initialpaths: # Make something as c:/projects/my_project/path.py -> # ['c:', 'projects', 'my_project', 'path.py'] parts = str(path).split(os.path.sep) # add 'path' to basenames to be checked. self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) # Note: conftest already by default in _basenames_to_check_rewrite. parts = name.split(".") if parts[-1] in self._basenames_to_check_rewrite: return False # For matching the name it must be as if it was a filename. path = PurePath(os.path.sep.join(parts) + ".py") for pat in self.fnpats: # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based # on the name alone because we need to match against the full path if os.path.dirname(pat): return False if fnmatch_ex(pat, path): return False if self._is_marked_for_rewrite(name, state): return False state.trace("early skip of rewriting module: %s" % (name,)) return True def _should_rewrite(self, name, fn_pypath, state): # always rewrite conftest files fn = str(fn_pypath) if fn_pypath.basename == "conftest.py": state.trace("rewriting conftest file: %r" % (fn,)) return True if self.session is not None: if self.session.isinitpath(fn): state.trace("matched test file (was specified on cmdline): %r" % (fn,)) return True # modules not passed explicitly on the command line are only # rewritten if they match the naming convention for test files for pat in self.fnpats: if fn_pypath.fnmatch(pat): state.trace("matched test file %r" % (fn,)) return True return self._is_marked_for_rewrite(name, state) def _is_marked_for_rewrite(self, name, state): try: return self._marked_for_rewrite_cache[name] except KeyError: for marked in self._must_rewrite: if name == marked or name.startswith(marked + "."): state.trace("matched marked file %r (from %r)" % (name, marked)) self._marked_for_rewrite_cache[name] = True return True self._marked_for_rewrite_cache[name] = False return False def mark_rewrite(self, *names): """Mark import names as needing to be rewritten. The named module or package as well as any nested modules will be rewritten on import. """ already_imported = ( set(names).intersection(sys.modules).difference(self._rewritten_names) ) for name in already_imported: if not AssertionRewriter.is_rewrite_disabled( sys.modules[name].__doc__ or "" ): self._warn_already_imported(name) self._must_rewrite.update(names) self._marked_for_rewrite_cache.clear() def _warn_already_imported(self, name): from _pytest.warning_types import PytestWarning from _pytest.warnings import _issue_config_warning _issue_config_warning( PytestWarning("Module already imported so cannot be rewritten: %s" % name), self.config, ) def load_module(self, name): co, pyc = self.modules.pop(name) if name in sys.modules: # If there is an existing module object named 'fullname' in # sys.modules, the loader must use that existing module. (Otherwise, # the reload() builtin will not work correctly.) mod = sys.modules[name] else: # I wish I could just call imp.load_compiled here, but __file__ has to # be set properly. In Python 3.2+, this all would be handled correctly # by load_compiled. mod = sys.modules[name] = imp.new_module(name) try: mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc mod.__loader__ = self # Normally, this attribute is 3.4+ mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self) six.exec_(co, mod.__dict__) except: # noqa if name in sys.modules: del sys.modules[name] raise return sys.modules[name] def is_package(self, name): try: fd, fn, desc = self._imp_find_module(name) except ImportError: return False if fd is not None: fd.close() tp = desc[2] return tp == imp.PKG_DIRECTORY @classmethod def _register_with_pkg_resources(cls): """ Ensure package resources can be loaded from this loader. May be called multiple times, as the operation is idempotent. """ try: import pkg_resources # access an attribute in case a deferred importer is present pkg_resources.__name__ except ImportError: return # Since pytest tests are always located in the file system, the # DefaultProvider is appropriate. pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) def get_data(self, pathname): """Optional PEP302 get_data API. """ with open(pathname, "rb") as f: return f.read() def _write_pyc(state, co, source_stat, pyc): # Technically, we don't have to have the same pyc format as # (C)Python, since these "pycs" should never be seen by builtin # import. However, there's little reason deviate, and I hope # sometime to be able to use imp.load_compiled to load them. (See # the comment in load_module above.) try: with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp: fp.write(imp.get_magic()) mtime = int(source_stat.mtime) size = source_stat.size & 0xFFFFFFFF fp.write(struct.pack("<ll", mtime, size)) fp.write(marshal.dumps(co)) except EnvironmentError as e: state.trace("error writing pyc file at %s: errno=%s" % (pyc, e.errno)) # we ignore any failure to write the cache file # there are many reasons, permission-denied, __pycache__ being a # file etc. return False return True RN = "\r\n".encode("utf-8") N = "\n".encode("utf-8") cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+") BOM_UTF8 = "\xef\xbb\xbf" def _rewrite_test(config, fn): """Try to read and rewrite *fn* and return the code object.""" state = config._assertstate try: stat = fn.stat() source = fn.read("rb") except EnvironmentError: return None, None if ASCII_IS_DEFAULT_ENCODING: # ASCII is the default encoding in Python 2. Without a coding # declaration, Python 2 will complain about any bytes in the file # outside the ASCII range. Sadly, this behavior does not extend to # compile() or ast.parse(), which prefer to interpret the bytes as # latin-1. (At least they properly handle explicit coding cookies.) To # preserve this error behavior, we could force ast.parse() to use ASCII # as the encoding by inserting a coding cookie. Unfortunately, that # messes up line numbers. Thus, we have to check ourselves if anything # is outside the ASCII range in the case no encoding is explicitly # declared. For more context, see issue #269. Yay for Python 3 which # gets this right. end1 = source.find("\n") end2 = source.find("\n", end1 + 1) if ( not source.startswith(BOM_UTF8) and cookie_re.match(source[0:end1]) is None and cookie_re.match(source[end1 + 1 : end2]) is None ): if hasattr(state, "_indecode"): # encodings imported us again, so don't rewrite. return None, None state._indecode = True try: try: source.decode("ascii") except UnicodeDecodeError: # Let it fail in real import. return None, None finally: del state._indecode try: tree = ast.parse(source, filename=fn.strpath) except SyntaxError: # Let this pop up again in the real import. state.trace("failed to parse: %r" % (fn,)) return None, None rewrite_asserts(tree, fn, config) try: co = compile(tree, fn.strpath, "exec", dont_inherit=True) except SyntaxError: # It's possible that this error is from some bug in the # assertion rewriting, but I don't know of a fast way to tell. state.trace("failed to compile: %r" % (fn,)) return None, None return stat, co def _read_pyc(source, pyc, trace=lambda x: None): """Possibly read a pytest pyc containing rewritten code. Return rewritten code if successful or None if not. """ try: fp = open(pyc, "rb") except IOError: return None with fp: try: mtime = int(source.mtime()) size = source.size() data = fp.read(12) except EnvironmentError as e: trace("_read_pyc(%s): EnvironmentError %s" % (source, e)) return None # Check for invalid or out of date pyc file. if ( len(data) != 12 or data[:4] != imp.get_magic() or struct.unpack("<ll", data[4:]) != (mtime, size) ): trace("_read_pyc(%s): invalid or out of date pyc" % source) return None try: co = marshal.load(fp) except Exception as e: trace("_read_pyc(%s): marshal.load error %s" % (source, e)) return None if not isinstance(co, types.CodeType): trace("_read_pyc(%s): not a code object" % source) return None return co def rewrite_asserts(mod, module_path=None, config=None): """Rewrite the assert statements in mod.""" AssertionRewriter(module_path, config).run(mod) def _saferepr(obj): """Get a safe repr of an object for assertion error messages. The assertion formatting (util.format_explanation()) requires newlines to be escaped since they are a special character for it. Normally assertion.util.format_explanation() does this but for a custom repr it is possible to contain one of the special escape sequences, especially '\n{' and '\n}' are likely to be present in JSON reprs. """ r = py.io.saferepr(obj) # only occurs in python2.x, repr must return text in python3+ if isinstance(r, bytes): # Represent unprintable bytes as `\x##` r = u"".join( u"\\x{:x}".format(ord(c)) if c not in string.printable else c.decode() for c in r ) return r.replace(u"\n", u"\\n") from _pytest.assertion.util import format_explanation as _format_explanation # noqa def _format_assertmsg(obj): """Format the custom assertion message given. For strings this simply replaces newlines with '\n~' so that util.format_explanation() will preserve them instead of escaping newlines. For other objects py.io.saferepr() is used first. """ # reprlib appears to have a bug which means that if a string # contains a newline it gets escaped, however if an object has a # .__repr__() which contains newlines it does not get escaped. # However in either case we want to preserve the newline. replaces = [(u"\n", u"\n~"), (u"%", u"%%")] if not isinstance(obj, six.string_types): obj = py.io.saferepr(obj) replaces.append((u"\\n", u"\n~")) if isinstance(obj, bytes): replaces = [(r1.encode(), r2.encode()) for r1, r2 in replaces] for r1, r2 in replaces: obj = obj.replace(r1, r2) return obj def _should_repr_global_name(obj): return not hasattr(obj, "__name__") and not callable(obj) def _format_boolop(explanations, is_or): explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")" if isinstance(explanation, six.text_type): return explanation.replace(u"%", u"%%") else: return explanation.replace(b"%", b"%%") def _call_reprcompare(ops, results, expls, each_obj): for i, res, expl in zip(range(len(ops)), results, expls): try: done = not res except Exception: done = True if done: break if util._reprcompare is not None: custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) if custom is not None: return custom return expl unary_map = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} binop_map = { ast.BitOr: "|", ast.BitXor: "^", ast.BitAnd: "&", ast.LShift: "<<", ast.RShift: ">>", ast.Add: "+", ast.Sub: "-", ast.Mult: "*", ast.Div: "/", ast.FloorDiv: "//", ast.Mod: "%%", # escaped for string formatting ast.Eq: "==", ast.NotEq: "!=", ast.Lt: "<", ast.LtE: "<=", ast.Gt: ">", ast.GtE: ">=", ast.Pow: "**", ast.Is: "is", ast.IsNot: "is not", ast.In: "in", ast.NotIn: "not in", } # Python 3.5+ compatibility try: binop_map[ast.MatMult] = "@" except AttributeError: pass # Python 3.4+ compatibility if hasattr(ast, "NameConstant"): _NameConstant = ast.NameConstant else: def _NameConstant(c): return ast.Name(str(c), ast.Load()) def set_location(node, lineno, col_offset): """Set node location information recursively.""" def _fix(node, lineno, col_offset): if "lineno" in node._attributes: node.lineno = lineno if "col_offset" in node._attributes: node.col_offset = col_offset for child in ast.iter_child_nodes(node): _fix(child, lineno, col_offset) _fix(node, lineno, col_offset) return node class AssertionRewriter(ast.NodeVisitor): """Assertion rewriting implementation. The main entrypoint is to call .run() with an ast.Module instance, this will then find all the assert statements and rewrite them to provide intermediate values and a detailed assertion error. See http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html for an overview of how this works. The entry point here is .run() which will iterate over all the statements in an ast.Module and for each ast.Assert statement it finds call .visit() with it. Then .visit_Assert() takes over and is responsible for creating new ast statements to replace the original assert statement: it rewrites the test of an assertion to provide intermediate values and replace it with an if statement which raises an assertion error with a detailed explanation in case the expression is false. For this .visit_Assert() uses the visitor pattern to visit all the AST nodes of the ast.Assert.test field, each visit call returning an AST node and the corresponding explanation string. During this state is kept in several instance attributes: :statements: All the AST statements which will replace the assert statement. :variables: This is populated by .variable() with each variable used by the statements so that they can all be set to None at the end of the statements. :variable_counter: Counter to create new unique variables needed by statements. Variables are created using .variable() and have the form of "@py_assert0". :on_failure: The AST statements which will be executed if the assertion test fails. This is the code which will construct the failure message and raises the AssertionError. :explanation_specifiers: A dict filled by .explanation_param() with %-formatting placeholders and their corresponding expressions to use in the building of an assertion message. This is used by .pop_format_context() to build a message. :stack: A stack of the explanation_specifiers dicts maintained by .push_format_context() and .pop_format_context() which allows to build another %-formatted string while already building one. This state is reset on every new assert statement visited and used by the other visitors. """ def __init__(self, module_path, config): super(AssertionRewriter, self).__init__() self.module_path = module_path self.config = config def run(self, mod): """Find all assert statements in *mod* and rewrite them.""" if not mod.body: # Nothing to do. return # Insert some special imports at the top of the module but after any # docstrings and __future__ imports. aliases = [ ast.alias(py.builtin.builtins.__name__, "@py_builtins"), ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), ] doc = getattr(mod, "docstring", None) expect_docstring = doc is None if doc is not None and self.is_rewrite_disabled(doc): return pos = 0 lineno = 1 for item in mod.body: if ( expect_docstring and isinstance(item, ast.Expr) and isinstance(item.value, ast.Str) ): doc = item.value.s if self.is_rewrite_disabled(doc): return expect_docstring = False elif ( not isinstance(item, ast.ImportFrom) or item.level > 0 or item.module != "__future__" ): lineno = item.lineno break pos += 1 else: lineno = item.lineno imports = [ ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases ] mod.body[pos:pos] = imports # Collect asserts. nodes = [mod] while nodes: node = nodes.pop() for name, field in ast.iter_fields(node): if isinstance(field, list): new = [] for i, child in enumerate(field): if isinstance(child, ast.Assert): # Transform assert. new.extend(self.visit(child)) else: new.append(child) if isinstance(child, ast.AST): nodes.append(child) setattr(node, name, new) elif ( isinstance(field, ast.AST) # Don't recurse into expressions as they can't contain # asserts. and not isinstance(field, ast.expr) ): nodes.append(field) @staticmethod def is_rewrite_disabled(docstring): return "PYTEST_DONT_REWRITE" in docstring def variable(self): """Get a new variable.""" # Use a character invalid in python identifiers to avoid clashing. name = "@py_assert" + str(next(self.variable_counter)) self.variables.append(name) return name def assign(self, expr): """Give *expr* a name.""" name = self.variable() self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) return ast.Name(name, ast.Load()) def display(self, expr): """Call py.io.saferepr on the expression.""" return self.helper("saferepr", expr) def helper(self, name, *args): """Call a helper in this module.""" py_name = ast.Name("@pytest_ar", ast.Load()) attr = ast.Attribute(py_name, "_" + name, ast.Load()) return ast_Call(attr, list(args), []) def builtin(self, name): """Return the builtin called *name*.""" builtin_name = ast.Name("@py_builtins", ast.Load()) return ast.Attribute(builtin_name, name, ast.Load()) def explanation_param(self, expr): """Return a new named %-formatting placeholder for expr. This creates a %-formatting placeholder for expr in the current formatting context, e.g. ``%(py0)s``. The placeholder and expr are placed in the current format context so that it can be used on the next call to .pop_format_context(). """ specifier = "py" + str(next(self.variable_counter)) self.explanation_specifiers[specifier] = expr return "%(" + specifier + ")s" def push_format_context(self): """Create a new formatting context. The format context is used for when an explanation wants to have a variable value formatted in the assertion message. In this case the value required can be added using .explanation_param(). Finally .pop_format_context() is used to format a string of %-formatted values as added by .explanation_param(). """ self.explanation_specifiers = {} self.stack.append(self.explanation_specifiers) def pop_format_context(self, expl_expr): """Format the %-formatted string with current format context. The expl_expr should be an ast.Str instance constructed from the %-placeholders created by .explanation_param(). This will add the required code to format said string to .on_failure and return the ast.Name instance of the formatted string. """ current = self.stack.pop() if self.stack: self.explanation_specifiers = self.stack[-1] keys = [ast.Str(key) for key in current.keys()] format_dict = ast.Dict(keys, list(current.values())) form = ast.BinOp(expl_expr, ast.Mod(), format_dict) name = "@py_format" + str(next(self.variable_counter)) self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) return ast.Name(name, ast.Load()) def generic_visit(self, node): """Handle expressions we don't have custom code for.""" assert isinstance(node, ast.expr) res = self.assign(node) return res, self.explanation_param(self.display(res)) def visit_Assert(self, assert_): """Return the AST statements to replace the ast.Assert instance. This rewrites the test of an assertion to provide intermediate values and replace it with an if statement which raises an assertion error with a detailed explanation in case the expression is false. """ if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: from _pytest.warning_types import PytestWarning import warnings warnings.warn_explicit( PytestWarning("assertion is always true, perhaps remove parentheses?"), category=None, filename=str(self.module_path), lineno=assert_.lineno, ) self.statements = [] self.variables = [] self.variable_counter = itertools.count() self.stack = [] self.on_failure = [] self.push_format_context() # Rewrite assert into a bunch of statements. top_condition, explanation = self.visit(assert_.test) # Create failure message. body = self.on_failure negation = ast.UnaryOp(ast.Not(), top_condition) self.statements.append(ast.If(negation, body, [])) if assert_.msg: assertmsg = self.helper("format_assertmsg", assert_.msg) explanation = "\n>assert " + explanation else: assertmsg = ast.Str("") explanation = "assert " + explanation template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) msg = self.pop_format_context(template) fmt = self.helper("format_explanation", msg) err_name = ast.Name("AssertionError", ast.Load()) exc = ast_Call(err_name, [fmt], []) if sys.version_info[0] >= 3: raise_ = ast.Raise(exc, None) else: raise_ = ast.Raise(exc, None, None) body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: variables = [ast.Name(name, ast.Store()) for name in self.variables] clear = ast.Assign(variables, _NameConstant(None)) self.statements.append(clear) # Fix line numbers. for stmt in self.statements: set_location(stmt, assert_.lineno, assert_.col_offset) return self.statements def visit_Name(self, name): # Display the repr of the name if it's a local variable or # _should_repr_global_name() thinks it's acceptable. locs = ast_Call(self.builtin("locals"), [], []) inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) dorepr = self.helper("should_repr_global_name", name) test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) def visit_BoolOp(self, boolop): res_var = self.variable() expl_list = self.assign(ast.List([], ast.Load())) app = ast.Attribute(expl_list, "append", ast.Load()) is_or = int(isinstance(boolop.op, ast.Or)) body = save = self.statements fail_save = self.on_failure levels = len(boolop.values) - 1 self.push_format_context() # Process each operand, short-circuting if needed. for i, v in enumerate(boolop.values): if i: fail_inner = [] # cond is set in a prior loop iteration below self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) expl_format = self.pop_format_context(ast.Str(expl)) call = ast_Call(app, [expl_format], []) self.on_failure.append(ast.Expr(call)) if i < levels: cond = res if is_or: cond = ast.UnaryOp(ast.Not(), cond) inner = [] self.statements.append(ast.If(cond, inner, [])) self.statements = body = inner self.statements = save self.on_failure = fail_save expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or)) expl = self.pop_format_context(expl_template) return ast.Name(res_var, ast.Load()), self.explanation_param(expl) def visit_UnaryOp(self, unary): pattern = unary_map[unary.op.__class__] operand_res, operand_expl = self.visit(unary.operand) res = self.assign(ast.UnaryOp(unary.op, operand_res)) return res, pattern % (operand_expl,) def visit_BinOp(self, binop): symbol = binop_map[binop.op.__class__] left_expr, left_expl = self.visit(binop.left) right_expr, right_expl = self.visit(binop.right) explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) return res, explanation def visit_Call_35(self, call): """ visit `ast.Call` nodes on Python3.5 and after """ new_func, func_expl = self.visit(call.func) arg_expls = [] new_args = [] new_kwargs = [] for arg in call.args: res, expl = self.visit(arg) arg_expls.append(expl) new_args.append(res) for keyword in call.keywords: res, expl = self.visit(keyword.value) new_kwargs.append(ast.keyword(keyword.arg, res)) if keyword.arg: arg_expls.append(keyword.arg + "=" + expl) else: # **args have `arg` keywords with an .arg of None arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) new_call = ast.Call(new_func, new_args, new_kwargs) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) return res, outer_expl def visit_Starred(self, starred): # From Python 3.5, a Starred node can appear in a function call res, expl = self.visit(starred.value) return starred, "*" + expl def visit_Call_legacy(self, call): """ visit `ast.Call nodes on 3.4 and below` """ new_func, func_expl = self.visit(call.func) arg_expls = [] new_args = [] new_kwargs = [] new_star = new_kwarg = None for arg in call.args: res, expl = self.visit(arg) new_args.append(res) arg_expls.append(expl) for keyword in call.keywords: res, expl = self.visit(keyword.value) new_kwargs.append(ast.keyword(keyword.arg, res)) arg_expls.append(keyword.arg + "=" + expl) if call.starargs: new_star, expl = self.visit(call.starargs) arg_expls.append("*" + expl) if call.kwargs: new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) return res, outer_expl # ast.Call signature changed on 3.5, # conditionally change which methods is named # visit_Call depending on Python version if sys.version_info >= (3, 5): visit_Call = visit_Call_35 else: visit_Call = visit_Call_legacy def visit_Attribute(self, attr): if not isinstance(attr.ctx, ast.Load): return self.generic_visit(attr) value, value_expl = self.visit(attr.value) res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) res_expl = self.explanation_param(self.display(res)) pat = "%s\n{%s = %s.%s\n}" expl = pat % (res_expl, res_expl, value_expl, attr.attr) return res, expl def visit_Compare(self, comp): self.push_format_context() left_res, left_expl = self.visit(comp.left) if isinstance(comp.left, (ast.Compare, ast.BoolOp)): left_expl = "({})".format(left_expl) res_variables = [self.variable() for i in range(len(comp.ops))] load_names = [ast.Name(v, ast.Load()) for v in res_variables] store_names = [ast.Name(v, ast.Store()) for v in res_variables] it = zip(range(len(comp.ops)), comp.ops, comp.comparators) expls = [] syms = [] results = [left_res] for i, op, next_operand in it: next_res, next_expl = self.visit(next_operand) if isinstance(next_operand, (ast.Compare, ast.BoolOp)): next_expl = "({})".format(next_expl) results.append(next_res) sym = binop_map[op.__class__] syms.append(ast.Str(sym)) expl = "%s %s %s" % (left_expl, sym, next_expl) expls.append(ast.Str(expl)) res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper( "call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), ast.Tuple(expls, ast.Load()), ast.Tuple(results, ast.Load()), ) if len(comp.ops) > 1: res = ast.BoolOp(ast.And(), load_names) else: res = load_names[0] return res, self.explanation_param(self.pop_format_context(expl_call))
txomon/pytest
src/_pytest/assertion/rewrite.py
Python
mit
39,457
[ "VisIt" ]
6c1238cc60eaf666d5784f5ec65d27ced2195a4525ab312eeba0c27c270c0512
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the OSL-3.0 license found in the # LICENSE file in the root directory of this source tree. import os import pytest from django.core.urlresolvers import reverse from shuup import configuration from shuup.apps.provides import override_provides from shuup.front.apps.registration.notify_events import ( RegistrationReceivedEmailScriptTemplate ) from shuup.front.notify_script_templates.generics import ( OrderConfirmationEmailScriptTemplate, PaymentCreatedEmailScriptTemplate, RefundCreatedEmailScriptTemplate, ShipmentCreatedEmailScriptTemplate, ShipmentDeletedEmailScriptTemplate ) from shuup.notify.models import Script from shuup.simple_supplier.notify_script_template import ( StockLimitEmailScriptTemplate ) from shuup.testing.browser_utils import wait_until_condition from shuup.testing.notify_script_templates import DummyScriptTemplate from shuup.testing.utils import initialize_admin_browser_test pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.") def initialize(browser, live_server, settings): initialize_admin_browser_test(browser, live_server, settings) configuration.set(None, "shuup_product_tour_complete", True) Script.objects.all().delete() @pytest.mark.browser @pytest.mark.djangodb @pytest.mark.django_db @pytest.mark.parametrize("script_template_cls", [ OrderConfirmationEmailScriptTemplate, PaymentCreatedEmailScriptTemplate, RefundCreatedEmailScriptTemplate, ShipmentCreatedEmailScriptTemplate, ShipmentDeletedEmailScriptTemplate, RegistrationReceivedEmailScriptTemplate ]) def test_generic_script_template(browser, admin_user, live_server, settings, script_template_cls): initialize(browser, live_server, settings) url = reverse("shuup_admin:notify.script.list") browser.visit("%s%s" % (live_server, url)) wait_until_condition(browser, lambda x: x.is_element_present_by_css("div.btn-toolbar a.btn.btn-info")) # find the button to load from template browser.find_by_css("div.btn-toolbar a.btn.btn-info").first.click() identifier = script_template_cls.identifier form_id = "form-" + identifier button_id = "#{} button.btn.btn-success".format(form_id) wait_until_condition(browser, lambda x: x.is_element_present_by_css(button_id)) browser.find_by_css(button_id).first.click() config_url = reverse("shuup_admin:notify.script-template-config", kwargs={"id": identifier}) wait_until_condition(browser, lambda b: b.url.endswith(config_url), timeout=15) wait_until_condition(browser, lambda b: b.is_text_present("Configure the Script Template")) # click to create the script browser.execute_script(""" $(document).ready(function(){ $('#lang-en .summernote-editor').summernote('editor.insertText', 'NEW CONTENT'); }); """) browser.find_by_id("id_en-subject").fill("custom subject!") browser.find_by_css("form button.btn.btn-lg.btn-primary").first.click() wait_until_condition(browser, lambda b: b.url.endswith(reverse("shuup_admin:notify.script.list"))) script = Script.objects.first() serialized_steps = script.get_serialized_steps() assert len(serialized_steps) == 1 assert len(serialized_steps[0]["actions"]) == 1 assert len(serialized_steps[0]["conditions"]) == 0 assert serialized_steps[0]["actions"][0]["recipient"]["variable"] == "customer_email" assert serialized_steps[0]["actions"][0]["template_data"]["en"]["subject"] == "custom subject!" assert "NEW CONTENT" in serialized_steps[0]["actions"][0]["template_data"]["en"]["body"] @pytest.mark.browser @pytest.mark.djangodb @pytest.mark.parametrize("script_template_cls", [ OrderConfirmationEmailScriptTemplate, PaymentCreatedEmailScriptTemplate, RefundCreatedEmailScriptTemplate, ShipmentCreatedEmailScriptTemplate, ShipmentDeletedEmailScriptTemplate, RegistrationReceivedEmailScriptTemplate ]) def test_generic_custom_email_script_template(browser, admin_user, live_server, settings, script_template_cls): initialize(browser, live_server, settings) url = reverse("shuup_admin:notify.script.list") browser.visit("%s%s" % (live_server, url)) wait_until_condition(browser, lambda x: x.is_element_present_by_css("div.btn-toolbar a.btn.btn-info")) # find the button to load from template browser.find_by_css("div.btn-toolbar a.btn.btn-info").first.click() identifier = script_template_cls.identifier form_id = "form-" + identifier button_id = "#{} button.btn.btn-success".format(form_id) wait_until_condition(browser, lambda x: x.is_element_present_by_css(button_id)) browser.find_by_css(button_id).first.click() config_url = reverse("shuup_admin:notify.script-template-config", kwargs={"id": identifier}) wait_until_condition(browser, lambda b: b.url.endswith(config_url), timeout=15) wait_until_condition(browser, lambda b: b.is_text_present("Configure the Script Template")) browser.execute_script(""" $(document).ready(function(){ // EN $("#id_en-subject").val("custom subject!"); $('#lang-en .summernote-editor').summernote('editor.insertText', 'Hi'); // FINNISH $('.nav.nav-tabs a[href="#lang-fi"]').tab('show'); $("#id_fi-subject").val("FINNISH subject!"); $('#lang-fi .summernote-editor').summernote('editor.insertText', 'Hi Finland!'); }); """) # fill form browser.select('base-send_to', 'other') browser.find_by_id("id_base-recipient").fill("other@shuup.com") browser.find_by_css("form button.btn.btn-lg.btn-primary").first.click() wait_until_condition(browser, lambda b: b.url.endswith(reverse("shuup_admin:notify.script.list"))) script = Script.objects.first() serialized_steps = script.get_serialized_steps() assert len(serialized_steps) == 1 assert len(serialized_steps[0]["actions"]) == 1 assert len(serialized_steps[0]["conditions"]) == 0 assert serialized_steps[0]["actions"][0]["recipient"]["constant"] == "other@shuup.com" assert serialized_steps[0]["actions"][0]["template_data"]["en"]["subject"] == "custom subject!" assert "Hi" in serialized_steps[0]["actions"][0]["template_data"]["en"]["body"] assert serialized_steps[0]["actions"][0]["template_data"]["fi"]["subject"] == "FINNISH subject!" assert "Hi Finland!" in serialized_steps[0]["actions"][0]["template_data"]["fi"]["body"] # edit the script url = reverse("shuup_admin:notify.script.edit", kwargs={"pk": script.pk}) browser.visit("%s%s" % (live_server, url)) wait_until_condition(browser, lambda b: b.is_text_present("Edit Script Information")) # find the button to edit the script content through template editor browser.find_by_css("div.btn-toolbar a.btn.btn-info").last.click() edit_url = reverse("shuup_admin:notify.script-template-edit", kwargs={"pk": script.pk}) wait_until_condition(browser, lambda b: b.url.endswith(edit_url)) wait_until_condition(browser, lambda b: b.is_text_present("Configure the Script Template")) # fill form browser.execute_script(""" $(document).ready(function(){ $('#lang-en .summernote-editor').summernote('editor.insertText', 'Changed'); }); """) browser.find_by_id("id_en-subject").fill("changed subject!") browser.select('base-send_to', 'customer') browser.find_by_css("form button.btn.btn-lg.btn-primary").first.click() # hit save wait_until_condition(browser, lambda b: b.url.endswith(reverse("shuup_admin:notify.script.list"))) script = Script.objects.first() serialized_steps = script.get_serialized_steps() assert len(serialized_steps) == 1 assert len(serialized_steps[0]["actions"]) == 1 assert len(serialized_steps[0]["conditions"]) == 0 assert serialized_steps[0]["actions"][0]["recipient"]["variable"] == "customer_email" assert serialized_steps[0]["actions"][0]["template_data"]["en"]["subject"] == "changed subject!" assert "Changed" in serialized_steps[0]["actions"][0]["template_data"]["en"]["body"] @pytest.mark.browser @pytest.mark.djangodb def test_stock_alert_limit_script_template(browser, admin_user, live_server, settings): initialize(browser, live_server, settings) url = reverse("shuup_admin:notify.script.list") browser.visit("%s%s" % (live_server, url)) wait_until_condition(browser, lambda x: x.is_element_present_by_css("div.btn-toolbar a.btn.btn-info")) # find the button to load from template browser.find_by_css("div.btn-toolbar a.btn.btn-info").first.click() identifier = StockLimitEmailScriptTemplate.identifier form_id = "form-" + identifier wait_until_condition(browser, lambda x: x.is_element_present_by_id(form_id)) browser.find_by_css("#{} button.btn.btn-success".format(form_id)).first.click() config_url = reverse("shuup_admin:notify.script-template-config", kwargs={"id": identifier}) wait_until_condition(browser, lambda b: b.url.endswith(config_url)) wait_until_condition(browser, lambda b: b.is_text_present("Configure the Script Template")) subject = "custom subject!" recipient = "email@shuup.com" browser.find_by_id("id_en-subject").fill(subject) browser.find_by_id("id_base-recipient").fill(recipient) browser.find_by_css("form button.btn.btn-lg.btn-primary").first.click() wait_until_condition(browser, lambda b: b.url.endswith(reverse("shuup_admin:notify.script.list"))) script = Script.objects.first() serialized_steps = script.get_serialized_steps() assert len(serialized_steps) == 1 assert len(serialized_steps[0]["actions"]) == 1 assert serialized_steps[0]["actions"][0]["recipient"]["constant"] == recipient assert len(serialized_steps[0]["conditions"]) == 1 assert serialized_steps[0]["conditions"][0]["v1"]["variable"] == "dispatched_last_24hs" assert not serialized_steps[0]["conditions"][0]["v2"]["constant"] assert serialized_steps[0]["actions"][0]["template_data"]["en"]["subject"] == subject # edit the script url = reverse("shuup_admin:notify.script.edit", kwargs={"pk": script.pk}) browser.visit("%s%s" % (live_server, url)) wait_until_condition(browser, lambda b: b.is_text_present("Edit Script Information")) # find the button to edit the script content through template editor browser.find_by_css("div.btn-toolbar a.btn.btn-info").last.click() edit_url = reverse("shuup_admin:notify.script-template-edit", kwargs={"pk": script.pk}) wait_until_condition(browser, lambda b: b.url.endswith(edit_url)) wait_until_condition(browser, lambda b: b.is_text_present("Configure the Script Template")) # fill form subject = "changed sub" recipient = "changed.email@shuup.com" browser.find_by_id("id_en-subject").fill(subject) browser.find_by_id("id_base-recipient").fill(recipient) browser.uncheck("base-last24hrs") browser.find_by_css("form button.btn.btn-lg.btn-primary").first.click() # hit save wait_until_condition(browser, lambda b: b.url.endswith(reverse("shuup_admin:notify.script.list"))) script = Script.objects.first() serialized_steps = script.get_serialized_steps() assert len(serialized_steps) == 1 assert len(serialized_steps[0]["actions"]) == 1 assert serialized_steps[0]["actions"][0]["recipient"]["constant"] == recipient assert len(serialized_steps[0]["conditions"]) == 0 assert serialized_steps[0]["actions"][0]["template_data"]["en"]["subject"] == subject @pytest.mark.browser @pytest.mark.djangodb def test_dummy_script_template(browser, admin_user, live_server, settings): initialize(browser, live_server, settings) with override_provides("notify_script_template", ["shuup.testing.notify_script_templates:DummyScriptTemplate"]): url = reverse("shuup_admin:notify.script.list") browser.visit("%s%s" % (live_server, url)) wait_until_condition(browser, lambda x: x.is_element_present_by_css("div.btn-toolbar a.btn.btn-info")) # find the button to load from template browser.find_by_css("div.btn-toolbar a.btn.btn-info").first.click() identifier = DummyScriptTemplate.identifier form_id = "form-" + identifier wait_until_condition(browser, lambda x: x.is_element_present_by_id(form_id)) btn_create_css = "#{} button.btn.btn-success".format(form_id) wait_until_condition(browser, lambda x: x.is_element_present_by_css(btn_create_css)) browser.find_by_css(btn_create_css).first.click() wait_until_condition(browser, lambda b: b.url.endswith(reverse("shuup_admin:notify.script.list"))) script = Script.objects.first() serialized_steps = script.get_serialized_steps() assert len(serialized_steps) == 1 assert len(serialized_steps[0]["actions"]) == 0 assert len(serialized_steps[0]["conditions"]) == 1 assert serialized_steps[0]["conditions"][0]["v1"]["constant"] assert not serialized_steps[0]["conditions"][0]["v2"]["constant"] # edit the script url = reverse("shuup_admin:notify.script.edit", kwargs={"pk": script.pk}) browser.visit("%s%s" % (live_server, url)) wait_until_condition(browser, lambda b: b.is_text_present("Edit Script Information")) # should exist only a single button to edit the script content assert len(browser.find_by_css("div.btn-toolbar a.btn.btn-info")) == 1 assert "Edit Script Contents" in browser.find_by_css("div.btn-toolbar a.btn.btn-info").first.text
suutari-ai/shoop
shuup_tests/browser/notify/test_script_template.py
Python
agpl-3.0
13,824
[ "VisIt" ]
e689343b3b59bf2c53f0d73b5a1c5ee8773bc14b667595eb53334bfd5e232988
# Copyright (C) 2013 Chris N. Richardson # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see <http://www.gnu.org/licenses/>. # # First added: 2013-04-26 # Last changed: 2013-04-26 from dolfin import * # Create mesh mesh = UnitSquareMesh(20, 20) # Create MeshFunction to hold cell process rank processes = CellFunction('size_t', mesh, MPI.rank(mesh.mpi_comm())) # Output cell distribution to VTK file file = File("processes.pvd") file << processes # Mark all cells on process 0 for refinement marker = CellFunction('bool', mesh, (MPI.rank(mesh.mpi_comm()) == 0)) # Refine mesh, but keep all news cells on parent process mesh0 = refine(mesh, marker, False) # Create MeshFunction to hold cell process rank for refined mesh processes1 = CellFunction('size_t', mesh0, MPI.rank(mesh.mpi_comm())) file << processes1 # Refine mesh, but this time repartition the mesh after refinement mesh1 = refine(mesh, marker, True) # Create MeshFunction to hold cell process rank for refined mesh processes2 = CellFunction('size_t', mesh1, MPI.rank(mesh.mpi_comm())) file << processes2
MiroK/dolfin
demo/undocumented/parallel-refinement/python/demo_parallel-refinement.py
Python
gpl-3.0
1,661
[ "VTK" ]
09ae2eb17d2120fea47f9ee7872e3f98436add7274c98337e5577c9d26d260cb
# -*- coding: utf-8 -*- # @Author: twankim # @Date: 2017-02-24 17:46:51 # @Last Modified by: twankim # @Last Modified time: 2018-03-09 22:14:15 import numpy as np import time import sys import os import argparse import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from ssac import weakSSAC from gen_data import genData from utils import * weak = "local" delta = 0.99 base_dir= os.path.join('./results',weak) def main(args): plotted = False rep = args.rep k = args.k n = args.n m = args.m std = args.std # qs = [float(q) for q in args.qs.split(',')] etas = [float(eta) for eta in args.etas.split(',')] beta = args.beta i_plot = np.random.randint(0,rep) # Index of experiment to plot the figure verbose = args.verbose cs = [float(q) for q in args.cs.split(',')] res_acc = np.zeros((rep,len(cs),len(etas))) # Accuracy of clustering res_mean_acc = np.zeros((rep,len(cs),len(etas))) # Mean accuracy of clustering (per cluster) # res_err = np.zeros((rep,len(qs),len(etas))) # Number of misclustered points res_fail = np.zeros((rep,len(cs),len(etas))) # Number of Failure gammas = np.zeros(rep) nus = np.zeros((rep,len(cs))) rhos = np.zeros((rep,len(cs))) # Make directories to save results if not os.path.exists(base_dir): os.makedirs(base_dir) res_dir = base_dir + '/{}_{}'.format(args.min_gamma,args.max_gamma) if not os.path.exists(res_dir): os.makedirs(res_dir) for i_rep in xrange(rep): # Generate Synthetic data # m dimensional, n points, k cluster # min_gamma: minimum gamma margin if verbose: print "({}/{})... Generating data".format(i_rep+1,rep) dataset = genData(n,m,k,args.min_gamma,args.max_gamma,std) X,y_true,ris = dataset.gen() gamma = dataset.gamma gammas[i_rep] = gamma print "({}/{})... Synthetic data is generated: gamma={}, (n,m,k,std)=({},{},{},{})".format( i_rep+1,rep,gamma,n,m,k,std) algo = weakSSAC(X,y_true,k,wtype=weak,ris=ris) # Test SSAC algorithm for different c's and eta's (fix beta in this case) for i_c,c_dist in enumerate(cs): assert (c_dist>0.5) & (c_dist<=1.0), "c_dist must be in (0.5,1]" nus[i_rep,i_c] = float(gamma) + 1.5*(1-c_dist) rhos[i_rep,i_c] = c_dist # Calculate proper eta and beta based on parameters including delta if verbose: print " - Proper eta={}, beta={} (delta={})".format( dataset.calc_eta(delta,weak=weak,nu=nus[i_rep,i_c],rho=rhos[i_rep,i_c]), dataset.calc_beta(delta,weak=weak,nu=nus[i_rep,i_c],rho=rhos[i_rep,i_c]), delta) for i_eta,eta in enumerate(etas): if verbose: print " <Test: c_dist={}, eta={}, beta={}>".format(c_dist,eta,beta) algo.set_params(eta,beta,rho=rhos[i_rep,i_c],nu=nus[i_rep,i_c]) if not algo.fit(): # Algorithm has failed res_fail[i_rep,i_c,i_eta] = 1 if not plotted: i_plot = np.random.randint(i_rep+1,rep) # Index of experiment to plot the figure y_pred = algo.y mpps = algo.mpps # Estimated cluster centers # print " ... Clustering is done. Number of binary search steps = {}\n".format(algo.bs_num) # For evaluation & plotting, find best permutation of cluster assignment y_pred_perm = find_permutation(dataset,algo) # Calculate accuracy and mean accuracy res_acc[i_rep,i_c,i_eta] = accuracy(y_true,y_pred_perm) res_mean_acc[i_rep,i_c,i_eta] = mean_accuracy(y_true,y_pred_perm) # # Calculate number of errors # res_err[i_rep,i_c,i_eta] = error(y_true,y_pred_perm) if (i_rep == i_plot) and (m<=2) and (not plotted): if (i_eta==len(etas)-1) and (i_c==len(cs)-1): plotted = True title = r"SSAC with {} weak oracle ($\eta={}, \beta={}, \nu={:.2f}, \rho={:.2f}$)".format( weak,eta,beta,nus[i_rep,i_c],rhos[i_rep,i_c]) f_name = res_dir+'/fig_n{}_m{}_k{}_c{:03d}_e{:d}.png'.format(n,m,k,int(100*c_dist),int(eta)) plot_cluster(X,y_true,y_pred_perm,k,mpps,gamma, title,f_name,verbose) # Write result as table print_eval("Accuracy(%)",res_acc,etas, res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("acc",n,m,k),weak=weak,params=cs) print_eval("Mean Accuracy(%)",res_mean_acc,etas, res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("meanacc",n,m,k),weak=weak,params=cs) # print_eval("# Error(%)",res_err,qs,etas, # res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("err",n,m,k)) print_eval("# Failures",res_fail,etas, res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("fail",n,m,k), is_sum=True,weak=weak,params=cs) # if args.isplot: # Plot Accuracy vs. eta fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("acc",n,m,k) plot_eval("Accuracy(%)",res_acc,etas,fig_name,weak=weak,params=cs) # Plot Mean Accuracy vs. eta fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("meanacc",n,m,k) plot_eval("Mean Accuracy(%)",res_mean_acc,etas,fig_name,weak=weak,params=cs) # Plot Failure vs. eta fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("fail",n,m,k) plot_eval("# Failures",res_fail,etas,fig_name,is_sum=True,weak=weak,params=cs) # Plot histogram of gammas fig_name = res_dir+'/fig_gamma_hist.pdf' plot_hist(gammas,args.min_gamma,args.max_gamma,fig_name) if args.isplot: plt.show() def parse_args(): def str2bool(v): return v.lower() in ('true', '1') parser = argparse.ArgumentParser(description= 'Test Semi-Supervised Active Clustering with Weak Oracles: Random-weak model') parser.add_argument('-rep', dest='rep', help='Number of experiments to repeat', default = 10000, type = int) parser.add_argument('-k', dest='k', help='Number of clusters in synthetic data', default = 3, type = int) parser.add_argument('-n', dest='n', help='Number of data points in synthetic data', default = 600, type = int) parser.add_argument('-m', dest='m', help='Dimension of data points in synthetic data', default = 2, type = int) parser.add_argument('-std', dest='std', help='standard deviation of Gaussian distribution (default:1.5)', default = 2.0, type = float) parser.add_argument('-qs', dest='qs', help='Probabilities q (not-sure with 1-q) ex) 0.7,0.85,1', default = '0.7,0.85,1', type = str) parser.add_argument('-etas', dest='etas', help='etas: parameter for sampling (phase 1) ex) 10,50', default = '2,5,10,20,30', type = str) parser.add_argument('-beta', dest='beta', help='beta: parameter for sampling (phase 2)', default = 1, type = int) parser.add_argument('-g_min', dest='min_gamma', help='minimum gamma margin (default:1)', default = 1.0, type = float) parser.add_argument('-g_max', dest='max_gamma', help='minimum gamma margin (default:1)', default = 1.1, type = float) parser.add_argument('-cs', dest='cs', help='Fractions to set distance-weak parameters (0.5,1] ex) 0.7,0.85,1', default = '0.6,0.8,1', type = str) parser.add_argument('-isplot', dest='isplot', help='plot the result: True/False', default = False, type = str2bool) parser.add_argument('-verbose', dest='verbose', help='verbose: True/False', default = False, type = str2bool) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() print "Called with args:" print args sys.exit(main(args))
twankim/weaksemi
main_local.py
Python
mit
8,601
[ "Gaussian" ]
1cf766d826ec96caea7649b118c163c0c7f0e35c91efa33e962f091fa8cd840c
#!/usr/bin/python """ Several functions. See -h output. Assign the 1-student function and the submit-grades functions to a hotkey for easy access during class. Randomly choose a student from the class list, and pop-up their name using operating system Desktop notification. On the size-N algorithm: I want each group to be at least size N, and hopefully not much larger. So no orphan/small groups. Now implemented, along with N groups of roughly equal size. What formats of student names does it recognize? (This the format given by McGill's MyCourses2 system, when you download a CSV classlist) - columns named firstName and lastName - one column called "Student Name" with content formatted "last name, first name" To do: - set up keystroke to indicate someone is absent today? [DONE] - avoid repeating someone who was just called in the last few? [DONE] - set up a parameter for random student name so that the last N graded ones are excluded. N.B. I have hardcoded aspects of my teaching schedule and course numbers into chooseClassListFile. Edit this in the obvious way. """ import pandas as pd import os GRADES_FILE='/home/meuser/courses/activeLearningGrades.tsv' if not os.path.exists(GRADES_FILE): with open(GRADES_FILE,'at') as ff: ff.write('Date classfile studentName studentID grade grade2 grade3 grade4 grade5 grade6\n') ff.write('dummyDate dummyclassfile dummystudentName dummystudentID dummygrade dummygrade2 dummygrade3 dummygrade4 dummygrade5 dummygrade6\n') import time # Wow. "%c" format isn't even consistent between python and ipython on my own system!! DATETIMEFMT='%Y %b %d %a %j %H:%M:%S %Z' now = time.strftime(DATETIMEFMT) # time.strftime("%c") AVOID_PREVIOUS_N_STUDENTS=3 def chooseClassListFile(coursenumber=None): """ Some custom specification of the class list default, if not given on command line Or, suggest a file if """ if isinstance(coursenumber, basestring) and os.path.exists(coursenumber): classlistfile=coursenumber elif coursenumber is not None: classlistfile = '/home/meuser/courses/'+str(coursenumber)+'/classlist.csv' assert os.path.exists(classlistfile) elif "Tue" in now or "Thu" in now: classlistfile='/home/meuser/courses/201/classlist.csv' assert os.path.exists(classlistfile) else: classlistfile='/home/meuser/courses/swb/classlist.csv' assert os.path.exists(classlistfile) return(classlistfile) def recordGradeForLastStudent(thegrade): with open(GRADES_FILE,'at') as ff: ff.write('\t'+str(thegrade)) os.system(' play /usr/share/sounds/KDE-K3B-Finish-Success.ogg &') # Close the (zenity) window (make sure wmctrl is installed) showing the student name os.system("wmctrl -F -c 'ActiveLearning:1student'") def markLastStudentAbsent(): recordGradeForLastStudent('A') def loadGradeLogFromToday(): # Return a dataframe with students called today, in order. df=pd.read_table(GRADES_FILE, sep='\t')#, dialect=None, compression=None, doublequote=True, escapechar=None, quotechar='"', quoting=0, skipinitialspace=False, lineterminator=None, header='infer', index_col=None, names=None, prefix=None, skiprows=None, skipfooter=None, skip_footer=0, na_values=None, na_fvalues=None, true_values=None, false_values=None, delimiter=None, converters=None, dtype=None, usecols=None, engine='c', delim_whitespace=False, as_recarray=False, na_filter=True, compact_ints=False, use_unsigned=False, low_memory=True, buffer_lines=None, warn_bad_lines=True, error_bad_lines=True, keep_default_na=True, thousands=None, comment=None, decimal='.', parse_dates=False, keep_date_col=False, dayfirst=False, date_parser=None, memory_map=False, nrows=None, iterator=False, chunksize=None, verbose=False, encoding=None, squeeze=False, mangle_dupe_cols=True, tupleize_cols=False, infer_datetime_format=False) todays=df[df.Date.map(lambda ss: isinstance(ss,str) and ss[:11]==now[:11])] return(todays) def nChunks(l, n): # From SO, modified. FAILS!! e.g. l=22, n=6: bad allocation. """ Yield n successive chunks from l. Works for lists, pandas dataframes, etc list length N. into n groups. minimum size is floorsize=floor(N/n) average surplus needed on remaining groups: avgsurplus=(N-n*floorsize)/(n-n0) to assign to this group: ceil(avgsurplus) Use recursion!? Not sure how, with yield 2014 Sept: S.O. version nchunks or whatever is crap! I have written my own below. """ import math remaining=len(l) floorsize=int(remaining/n) # int=floor? for ig in xrange(0,n-1): surplus=int(math.ceil( (remaining-(n-ig)*floorsize )*1.0/(n-ig) )) #print(remaining,floorsize,surplus,ig) ifrom,ito=len(l)-remaining , len(l)-remaining +floorsize+surplus remaining=remaining-floorsize-surplus yield l[ifrom:ito] yield l[len(l)-remaining:] if 0: fooey for i in xrange(0, n-1): newn = int(1.0 * (len(l)-sofar) / n + 0.5) sofar+=newn yield l[i*newn:i*newn+newn] yield l[n*newn-newn:] sofar=0 for i in xrange(0, n-1): newn = int(1.0 * (len(l)-sofar) / n + 0.5) sofar+=newn yield l[i*newn:i*newn+newn] yield l[n*newn-newn:] def chunksOfSizeN(l,N): """ To instead yield nearly-equal sized chunks of size <=N, use nChunks( """ return(nChunks(l, N/l)) # floor(N/l) in python 3? def report_all_grades(classfile=None,maxOneZeroPerDay=True,allowOneDayAway=True): # Return a dataframe with all in-class students' records dfr=pd.read_table(GRADES_FILE, sep='\t')#, dialect=None, compression=None, doublequote=True, escapechar=None, quotechar='"', quoting=0, skipinitialspace=False, lineterminator=None, header='infer', index_col=None, names=None, prefix=None, skiprows=None, skipfooter=None, skip_footer=0, na_values=None, na_fvalues=None, true_values=None, false_values=None, delimiter=None, converters=None, dtype=None, usecols=None, engine='c', delim_whitespace=False, as_recarray=False, na_filter=True, compact_ints=False, use_unsigned=False, low_memory=True, buffer_lines=None, warn_bad_lines=True, error_bad_lines=True, keep_default_na=True, thousands=None, comment=None, decimal='.', parse_dates=False, keep_date_col=False, dayfirst=False, date_parser=None, memory_map=False, nrows=None, iterator=False, chunksize=None, verbose=False, encoding=None, squeeze=False, mangle_dupe_cols=True, tupleize_cols=False, infer_datetime_format=False) print len(dfr) df=dfr.dropna(subset=['grade']) print len(df) df=df[-(df.grade.isin(['dummygrade']))] print len(df) df=df[df.Date.str.endswith('T')] # This is a kludge because CPBL old code started with a different format print len(df) df.grade=df.grade.replace({'A':'0'}).map(int) df.datet=pd.to_datetime(df.Date,format=DATETIMEFMT) df['day']=df.datet.map(lambda x: x.strftime('%Y-%m-%d %b %d %a')) df.studentName= df.studentName.map(lambda ss:ss.strip()) print df dropstudents=['260416238','260553281','260350985'] df=df[-(df.studentID.isin(dropstudents))] if maxOneZeroPerDay: dfzeros=df.query('grade == 0').drop_duplicates(cols=['studentID','day','grade']) dfnonzeros=df.query('grade > 0') df=pd.concat([dfzeros,dfnonzeros]) byStudent=df.groupby(['studentName','studentID','grade'])['grade'].count().unstack('grade').fillna(0) byStudent['mean']= df.groupby(['studentName','studentID',])['grade'].mean() byStudent['N']= df.groupby(['studentName','studentID',])['grade'].count() afterAllowance=[] for astudent,adf in df.sort('Date').groupby(['studentName','studentID']): print adf[['studentName','grade','Date','studentID']] """ # You can only have one zero per day. for theday,oneday in adf.groupby('Date'): oneday.sort('grade',inplace=True) while len(oneday[oneday.grade==0])>1: oneday.drop[0] """ print byStudent print(' ') print byStudent.sort('mean') for astudent,adf in df.groupby(['studentName','studentID']): ff=adf.set_index(['studentName','studentID'])['grade'] print(ff.count()) for courseday, adf in df.groupby(['classfile','day']): acourse,aday=courseday if classfile is not None and classfile is not acourse: continue print(str(aday)+'\t%f\t%d'%(adf.grade.mean(),adf.grade.count())) return()#no_output_yet_except_printed) ########################################################################################### ### class cpblClassroomTools(): # # # # # # MAJOR CLASS # # # # # # ### ####################################################################################### """ """ # Caution: If you define variables here, before __init__, you can access them with cpblClassroomTools.avar , but those are common to the entire class, not to an instance. def __init__(self,classlistfile=None): if classlistfile is None or not os.path.exists(classlistfile): classlistfile=chooseClassListFile(classlistfile) self.classlistfile=classlistfile """ Shuffling the classlist initially to make remaining routines simpler """ df=self.loadClassList(self.classlistfile) # Process first, last names if "Student Name" in df: df['firstName']=df['Student Name'].map(lambda ss: ss.split(',')[1].strip()) df['lastName']=df['Student Name'].map(lambda ss: ss.split(',')[0].strip()) df['SNtex']=df.apply(lambda dd: dd.firstName+r' {\bf '+dd.lastName+ r'}',axis=1) df['SNhtml']=df.apply(lambda dd: dd.firstName+r' <b> '+dd.lastName+ r'</b>',axis=1) df['studentName']=df['Student Name'] df['ID']=df.ID.map(str) # Shuffle it. from random import shuffle ii=range(len(df)) shuffle(ii) self.classlist=df.reindex(ii) self.writeEmailList() def loadClassList(self,clfile): # Clean up file a little before parsing import codecs LL=[LL.strip('\n') for LL in codecs.open(clfile,'rt',encoding='iso-8859-1').readlines() if LL.strip('\n')] tmpfn='/home/meuser/tmp/tmpClasslistfile' startrow=[ii for ii in range(len(LL)) if 'Student Name' in LL[ii] or 'Email' in LL[ii]][0] with codecs.open(tmpfn,'wt',encoding='iso-8859-1') as ff: ff.write('\n'.join(LL[startrow:])+'\n') # Load classlist (modified a bit) #df=pd.read_csv(tmpfn,skiprows=8,encoding='iso-8859-1',index_col=False) df=pd.read_csv(tmpfn,encoding='iso-8859-1',index_col=False) return(df) def writeEmailList(self): if 'classlist.csv' in self.classlistfile: with open(self.classlistfile.replace('classlist.csv','classemails.txt'),'wt') as ff: ff.write(' , '.join(self.classlist['Email'].values)) def randomlyChooseOneStudent(self): """ Pick a student randomly, but! avoid those marked as absent today, and those called in the previous AVOID_PREVIOUS_N_STUDENTS=3 calls. """ if 0: df=self.classlist import numpy as np astudent= df.ix[np.random.choice(df.index, 1)]['studentName'].values[0] todays=loadGradeLogFromToday() toAvoid=pd.concat([ todays[todays.grade.isin(['A'])], todays.iloc[::-1][:AVOID_PREVIOUS_N_STUDENTS] ]) #print('Avoiding ',str(toAvoid.studentID.values)) eligible=self.classlist[-self.classlist.ID.isin(toAvoid.studentID)] dropped=self.classlist[self.classlist.ID.isin(toAvoid.studentID)] print('Avoided %d students as ineligible due to being absent or recently picked.'%(len(self.classlist)-len(eligible))) #astudent=self.classlist.iloc[0]['SNhtml'] astudent=eligible.iloc[0]['SNhtml'] print(astudent+' cannot be in ') print(toAvoid.studentName.values) with open(GRADES_FILE,'at') as ff: ff.write('\n'+'\t'.join([now,self.classlistfile,self.classlist.iloc[0]['studentName'],self.classlist.iloc[0]['ID']])) os.system("""zenity --title "ActiveLearning:1student" --info --text "<span foreground='blue' font='32'>%s</span>" & """%astudent) def randomlyAssignGroups(self,groupsize=None,numbergroups=None): """ Split the class up either into roughly size groupsize or roughly into groupnumber of groups. Class size N. Just count of n=groupsize individuals from the shuffled list. So we end up with leftover 1, 2, ...N-1. Distribute the extras to other groups? unless it is preferred to have small groups How (sorry.. this needs a real algorithm) increment group name if more than N remain, assign N+x to next group, where x is 0 or 1, depending on whether remaining number mod N ==0 For display, why not just create/show a PDF, rather than using a GUI text box tool? Seems easy enough, and it can be saved/ recalled. """ # A default behaviour: if groupsize is None and numbergroups is None: groupsize=4 assert groupsize is None or numbergroups is None df=self.classlist df['groupName']='' # Groups are named by letter groupnames='ABCDEFGHIJKLMNOPQRSTUVWXYZ' groupnames=list(groupnames) + [ii+jj for ii in groupnames for jj in groupnames] if numbergroups is None: groupsize,numbergroups = None,len(df)/groupsize # ii=0 # while ii<len(df): # x= (len(df)-ii) % groupsize # thisN=groupsize + 1*(x>0) # df['groupName'].iloc[ii:ii+thisN]=groupnames[0] # #df[ii:(ii+thisN)]['groupName']=groupnames[0] # #fooo # ii=ii+thisN # groupnames=groupnames[1:] assert numbergroups is not None # Number of groups (not size) is specified # Just grab the indices from the chunked dfs: for ii,adf in enumerate(nChunks(df,numbergroups)): df['groupName'].iloc[adf.index]=groupnames[ii] df=df.sort('groupName') html='' tex=r"""\documentclass{article}\begin{document} """ tex=r"""\documentclass{beamer}\usepackage[utf8]{inputenc}\begin{document}\begin{frame}[allowframebreaks] """ closing='\n'+r'\end{document}'+'\n' closing='\n'+r'\end{frame}\end{document}'+'\n' tex=r""" \documentclass{article} \usepackage[utf8]{inputenc} \usepackage{color} \usepackage{lscape} \usepackage[landscape,margin=0cm]{geometry} \begin{document} %\begin{landscape} \huge """ closing='\n'+ r""" \end{document}""" +'\n' ngroups=0 for gg, ss in df.groupby('groupName',sort=False)['SNtex']: ngroups+=1 html+=""" <table><tr><td>"""+gg+"""</td><td>"""+ss.values[0]+'</td></tr>'+ ''.join([ '<tr><td></td><td>'+nn+'</td></tr>' for nn in ss.values[1:]])+"""</table>""" tex+=r' \begin{tabular}{|rl|}\hline {\bf\color{blue} '+gg+':} & '+ss.values[0]+r' \\'+' \n'+ ''.join([ ' & '+nn+r' \\ ' for nn in ss.values[1:]])+ r' \hline \end{tabular}'+' \n' + (ngroups%3==0)*r'\\ ' print(html) tex+=closing import codecs DDR='/home/meuser/tmp/' with codecs.open(DDR+'tmpGroups.tex','wt',encoding='utf8') as ff: ff.write(tex) os.system('cd '+DDR +' && pdflatex tmpGroups.tex') os.system('cd '+DDR +' && pdflatex tmpGroups.tex') os.system('evince '+DDR+'tmpGroups.pdf &') # os.system("""zenity --title "" --text-info --html --text=" """+html+' " &') print("""zenity --title "" --text-info --html --text=" """+html+' " ') #"<span foreground='blue' font='32'>%s</span>" """%astudent) #os.system("""zenity --title "" --info --text " """+html+' " ') #<span foreground='blue' font='32'>%s</span>" """%astudent) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Various desktop pop-up tools for interactive classes.') parser.add_argument('-c','--classlist', #, metavar=None, type=str, nargs='1', action='store', help='A csv file containing data on the class list') parser.add_argument('--choose-student',#, type=int, nargs='+', action='store_true', help=' Display one student name, and record the name for subsequent grading') parser.add_argument('-s', '--record-score', #-choose-student',#, type=int, nargs='+', action='store', help=' Save a mark, associated with the recently displayed individual') parser.add_argument('-n', '--assign-groups-by-size', type=int, # nargs='+', action='store', help=' Assign students into groups of size n (or as close as possible)') parser.add_argument('-g', '--assign-into-groups', type=int, # nargs='+', action='store', help=' Assign students into G groups of roughly equal size') parser.add_argument('-a', '--mark-absent', action='store_true', help=' Mark the most recently displayed individual as absent today') parser.add_argument('--report-grades', action='store_true', help=' Produce reports for each course / each student on in-class assessment so far.') #parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the integers (default: find the max)') args = parser.parse_args() ct=cpblClassroomTools(classlistfile=args.classlist) if args.choose_student: ct.randomlyChooseOneStudent() elif args.mark_absent: markLastStudentAbsent() elif args.record_score is not None: recordGradeForLastStudent(args.record_score) elif args.assign_groups_by_size is not None: ct.randomlyAssignGroups(groupsize=args.assign_groups_by_size) elif args.assign_into_groups is not None: ct.randomlyAssignGroups(numbergroups=args.assign_into_groups) elif args.report_grades: report_all_grades() # Return a dataframe with all in-class students' records else: # Demo ct=cpblClassroomTools(classlistfile=classlistfile) # ct.randomlyAssignGroups(3) # ct.randomlyAssignGroups(4) ct.randomlyAssignGroups(10) #ct.randomlyChooseOneStudent()
cpbl/classroom_active_learning
classroomActiveLearning.py
Python
gpl-3.0
18,696
[ "ADF" ]
f0b7266beefbf904b31a596bb1540f21815ab687633f5bb8931a53b2a5bdfde3
""" FlexGet build and development utilities - unfortunately this file is somewhat messy """ from __future__ import print_function import os import subprocess import shutil import sys from paver.easy import * import paver.virtual import paver.setuputils from paver.shell import sh from paver.setuputils import setup, find_package_data, find_packages sphinxcontrib = False try: from sphinxcontrib import paverutils sphinxcontrib = True except ImportError: pass sys.path.insert(0, '') options = environment.options # There is a bug in sqlalchemy 0.9.0, see gh#127 # There is a bug in beautifulsoup 4.2.0 that breaks imdb parsing, see http://flexget.com/ticket/2091 # There is a bug in requests 2.4.0 where it leaks urllib3 exceptions install_requires = [ 'FeedParser>=5.2.1', 'SQLAlchemy >=0.7.5, !=0.9.0, <1.999', 'PyYAML', 'beautifulsoup4>=4.1, !=4.2.0, <4.4', 'html5lib>=0.11', 'PyRSS2Gen', 'pynzb', 'progressbar', 'rpyc', 'jinja2', 'requests>=1.0, !=2.4.0, <2.99', 'python-dateutil!=2.0, !=2.2', 'jsonschema>=2.0', 'tmdb3', 'path.py', 'guessit>=0.9.3, <0.10.4', 'apscheduler', 'flask>=0.7', 'flask-restful>=0.3.3', 'ordereddict>=1.1', 'flask-restplus==0.7.2', 'cherrypy>=3.7.0', 'flask-assets>=0.11', 'cssmin>=0.2.0', 'flask-compress>=1.2.1', 'flask-login>=0.3.2', 'pyparsing>=2.0.3', 'pyScss>=1.3.4', 'pytvmaze>=1.3.5' ] if sys.version_info < (2, 7): # argparse is part of the standard library in python 2.7+ install_requires.append('argparse') entry_points = {'console_scripts': ['flexget = flexget:main']} # Provide an alternate exe on windows which does not cause a pop-up when scheduled if sys.platform.startswith('win'): entry_points.setdefault('gui_scripts', []).append('flexget-headless = flexget:main') with open("README.rst") as readme: long_description = readme.read() # Populates __version__ without importing the package __version__ = None execfile('flexget/_version.py') if not __version__: print('Could not find __version__ from flexget/_version.py') sys.exit(1) setup( name='FlexGet', version=__version__, # release task may edit this description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) ' 'from different sources like RSS-feeds, html-pages, various sites and more.', long_description=long_description, author='Marko Koivusalo', author_email='marko.koivusalo@gmail.com', license='MIT', url='http://flexget.com', download_url='http://download.flexget.com', install_requires=install_requires, packages=find_packages(exclude=['tests']), package_data=find_package_data('flexget', package='flexget', exclude=['FlexGet.egg-info', '*.pyc'], exclude_directories=['node_modules', 'bower_components'], only_in_packages=False), # NOTE: the exclude does not seem to work zip_safe=False, test_suite='nose.collector', extras_require={ 'memusage': ['guppy'], 'NZB': ['pynzb'], 'TaskTray': ['pywin32'], }, entry_points=entry_points, classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] ) options( minilib=Bunch( # 'version' is included as workaround to https://github.com/paver/paver/issues/112, TODO: remove extra_files=['virtual', 'svn', 'version'] ), virtualenv=Bunch( paver_command_line='develop' ), # sphinxcontrib.paverutils sphinx=Bunch( docroot='docs', builddir='build', builder='html', confdir='docs' ), ) def set_init_version(ver): """Replaces the version with ``ver`` in _version.py""" import fileinput for line in fileinput.FileInput('flexget/_version.py', inplace=1): if line.startswith('__version__ = '): line = "__version__ = '%s'\n" % ver print(line, end='') @task def version(): """Prints the version number of the source""" print(__version__) @task @cmdopts([('dev', None, 'Bumps to new development version instead of release version.')]) def increment_version(options): """Increments either release or dev version by 1""" print('current version: %s' % __version__) ver_split = __version__.split('.') dev = options.increment_version.get('dev') if 'dev' in ver_split[-1]: if dev: # If this is already a development version, increment the dev count by 1 ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1) else: # Just strip off dev tag for next release version ver_split = ver_split[:-1] else: # Increment the revision number by one if len(ver_split) == 2: # We don't have a revision number, assume 0 ver_split.append('1') else: ver_split[-1] = str(int(ver_split[-1]) + 1) if dev: ver_split.append('dev') new_version = '.'.join(ver_split) print('new version: %s' % new_version) set_init_version(new_version) @task @cmdopts([ ('online', None, 'Run online tests') ]) def test(options): """Run FlexGet unit tests""" options.setdefault('test', Bunch()) import nose from nose.plugins.manager import DefaultPluginManager cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2) args = [] # Adding the -v flag makes the tests fail in python 2.7 #args.append('-v') args.append('--processes=4') args.append('-x') if not options.test.get('online'): args.append('--attr=!online') args.append('--where=tests') # Store current path since --where changes it, restore when leaving cwd = os.getcwd() try: return nose.run(argv=args, config=cfg) finally: os.chdir(cwd) @task def clean(): """Cleans up the virtualenv""" import os import glob for p in ('bin', 'Scripts', 'build', 'dist', 'include', 'lib', 'man', 'share', 'FlexGet.egg-info', 'paver-minilib.zip', 'setup.py'): pth = path(p) if pth.isdir(): pth.rmtree() elif pth.isfile(): pth.remove() for pkg in set(options.setup.packages) | set(('tests',)): for filename in glob.glob(pkg.replace('.', os.sep) + "/*.py[oc~]"): path(filename).remove() @task @cmdopts([ ('dist-dir=', 'd', 'directory to put final built distributions in'), ('revision=', 'r', 'minor revision number of this build') ]) def sdist(options): """Build tar.gz distribution package""" print('sdist version: %s' % __version__) # clean previous build print('Cleaning build...') for p in ['build']: pth = path(p) if pth.isdir(): pth.rmtree() elif pth.isfile(): pth.remove() else: print('Unable to remove %s' % pth) # remove pre-compiled pycs from tests, I don't know why paver even tries to include them ... # seems to happen only with sdist though for pyc in path('tests/').files('*.pyc'): pyc.remove() for t in ['minilib', 'generate_setup', 'setuptools.command.sdist']: call_task(t) @task def coverage(): """Make coverage.flexget.com""" # --with-coverage --cover-package=flexget --cover-html --cover-html-dir /var/www/flexget_coverage/ import nose from nose.plugins.manager import DefaultPluginManager cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2) argv = ['bin/paver'] argv.extend(['--attr=!online']) argv.append('--with-coverage') argv.append('--cover-html') argv.extend(['--cover-package', 'flexget']) argv.extend(['--cover-html-dir', '/var/www/flexget_coverage/']) nose.run(argv=argv, config=cfg) print('Coverage generated') @task @cmdopts([ ('docs-dir=', 'd', 'directory to put the documetation in') ]) def docs(): if not sphinxcontrib: print('ERROR: requires sphinxcontrib-paverutils') sys.exit(1) from paver import tasks if not os.path.exists('build'): os.mkdir('build') if not os.path.exists(os.path.join('build', 'sphinx')): os.mkdir(os.path.join('build', 'sphinx')) setup_section = tasks.environment.options.setdefault("sphinx", Bunch()) setup_section.update(outdir=options.docs.get('docs_dir', 'build/sphinx')) call_task('sphinxcontrib.paverutils.html') @task @might_call('test', 'sdist') @cmdopts([('no-tests', None, 'skips unit tests')]) def release(options): """Run tests then make an sdist if successful.""" if not options.release.get('no_tests'): if not test(): print('Unit tests did not pass') sys.exit(1) print('Making src release') sdist() @task def install_tools(): """Install development / jenkins tools and dependencies""" try: import pip except ImportError: print('FATAL: Unable to import pip, please install it and run this again!') sys.exit(1) try: import sphinxcontrib print('sphinxcontrib INSTALLED') except ImportError: pip.main(['install', 'sphinxcontrib-paverutils']) pip.main(['install', '-r', 'jenkins-requirements.txt']) @task def clean_compiled(): for root, dirs, files in os.walk('flexget'): for name in files: fqn = os.path.join(root, name) if fqn[-3:] == 'pyc' or fqn[-3:] == 'pyo' or fqn[-5:] == 'cover': print('Deleting %s' % fqn) os.remove(fqn) @task @consume_args def pep8(args): try: import pep8 except: print('Run bin/paver install_tools') sys.exit(1) # Ignoring certain errors ignore = [ 'E711', 'E712', # These are comparisons to singletons i.e. == False, and == None. We need these for sqlalchemy. 'W291', 'W293', 'E261', 'E128' # E128 continuation line under-indented for visual indent ] styleguide = pep8.StyleGuide(show_source=True, ignore=ignore, repeat=1, max_line_length=120, parse_argv=args) styleguide.input_dir('flexget') @task @cmdopts([ ('file=', 'f', 'name of the requirements file to create') ]) def requirements(options): filename = options.requirements.get('file', 'requirements.txt') with open(filename, mode='w') as req_file: req_file.write('\n'.join(options.install_requires)) @task def build_webui(): cwd = os.path.join('flexget', 'ui') # Cleanup previous builds for folder in ['bower_components' 'node_modules']: folder = os.path.join(cwd, folder) if os.path.exists(folder): shutil.rmtree(folder) # Install npm packages sh(['npm', 'install'], cwd=cwd) # Build the ui sh(['bower', 'install'], cwd=cwd) # Build the ui sh('gulp', cwd=cwd)
offbyone/Flexget
pavement.py
Python
mit
11,354
[ "GULP" ]
248a539675a9088faca064cce1cf941b8b8a1d02940d7e6311e3d6b601c60e26
""" Description here Author: Leonard Berrada Date: 6 Nov 2015 """ import numpy as np import pandas as pd import copy import matplotlib.pyplot as plt from regression import RegressionModel class KalmanFilter(RegressionModel): """ simple Linear Gaussian Kalman Filter""" def __init__(self, data, p): RegressionModel.__init__(self, data) self.p = p def fit(self): self._pred_df = pd.DataFrame() n_pred = self.n_training + self.n_testing - self.p self._pred_df['ypred'] = np.zeros(n_pred) self._pred_df['yerr'] = np.zeros(n_pred) self.embed_data() a_hat = np.array([1.] + [0.] * (self.p - 1)).reshape(self.p, 1) P_up = np.eye(self.p) Q = 0.1 R = 0.1 for i in range(self.n_training - self.p): # prediction step P_pred = P_up + Q # aux variables H = self.Y_training(start=i, stop=i + self.p).reshape(1, self.p) obs = float(self.Y_training([i + self.p])) nu = obs - float(H.dot(a_hat)) S = float(H.dot(P_pred).dot(H.T)) + R K = P_pred.dot(H.T) * 1. / S # predict data value self._pred_df['ypred'][i] = float(H.dot(a_hat)) # update step a_hat += K * nu P_up = (np.eye(self.p) - K.dot(H)).dot(P_pred) self._a_hat = a_hat.flatten() y = copy.copy(self.Y_training(start=-self.p)) for i in range(self.n_training - self.p, n_pred): pred = float(self._a_hat.dot(y)) y[:-1] = y[1:] y[-1] = pred self._pred_df['ypred'][i] = pred ground_truth = np.concatenate((self.Y_training(start=self.p), self.Y_testing())) self._pred_df["yerr"] = ground_truth - self.Y_pred()
leonardbj/AIMS
src/Regression/kalman.py
Python
mit
1,932
[ "Gaussian" ]
b214ade30ddbdc6a3970954e660189b679b02a52d2cdfc6652df25c3b41c3230
from ase.visualize import view from ase.io.opls import OPLSStructure s = OPLSStructure('172_mod.xyz') # 172_mod.xyz if the file name for the structure above view(s) # view with real elements elements = { 'CT' : 'Si', 'HC' : 'H', 'H1' : 'He' } view(s.colored(elements)) # view with fake elements
grhawk/ASE
tools/doc/ase/io/view_172_mod.py
Python
gpl-2.0
296
[ "ASE" ]
3d226e29e9417aa2b98d2ec4e557769acb2671b372cdd3ddc0ebfd879d6fe44b
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """\ Bipartie maximum matching jill-jenn vie et christoph durr - 2014-2018 """ __all__ = ["max_bipartite_matching", "max_bipartite_matching2"] # snip{ def augment(u, bigraph, visit, match): """augment """ for v in bigraph[u]: if not visit[v]: visit[v] = True if match[v] is None or augment(match[v], bigraph, visit, match): match[v] = u # found an augmenting path return True return False def max_bipartite_matching(bigraph): """Bipartie maximum matching :param bigraph: adjacency list, index = vertex in U, value = neighbor list in V :assumption: U = V = {0, 1, 2, ..., n - 1} for n = len(bigraph) :returns: matching list, match[v] == u iff (u, v) in matching :complexity: `O(|V|*|E|)` """ n = len(bigraph) # same domain for U and V match = [None] * n for u in range(n): augment(u, bigraph, [False] * n, match) return match # snip} def max_bipartite_matching2(bigraph): """Bipartie maximum matching :param bigraph: adjacency list, index = vertex in U, value = neighbor list in V :comment: U and V can have different cardinalities :returns: matching list, match[v] == u iff (u, v) in matching :complexity: `O(|V|*|E|)` """ nU = len(bigraph) # the following line works only in Python version ≥ 2.5 # nV = max(max(adjlist, default=-1) for adjlist in bigraph) + 1 nV = 0 for adjlist in bigraph: for v in adjlist: if v + 1 > nV: nV = v + 1 match = [None] * nV for u in range(nU): augment(u, bigraph, [False] * nV, match) return match # snip}
jilljenn/tryalgo
tryalgo/bipartite_matching.py
Python
mit
1,855
[ "VisIt" ]
5aa33a6b2c2e7a02aa5d9128174650ecc778b21351b4c8b3aefb4d2e9276d5ce
#!/usr/bin/env python """ .. py:currentmodule:: FileFormat.MicroscopeParameters .. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca> MCXRay microscope parameters input file. """ # Script information for the file. __author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)" __version__ = "" __date__ = "" __copyright__ = "Copyright (c) 2012 Hendrix Demers" __license__ = "" # Subversion informations for the file. __svnRevision__ = "$Revision$" __svnDate__ = "$Date$" __svnId__ = "$Id$" # Standard library modules. import copy # Third party modules. # Local modules. # Project modules import pymcxray.FileFormat.Version as Version from pymcxray.FileFormat.FileReaderWriterTools import reduceAfterDot # Globals and constants variables. KEY_BEAM_ENERGY_keV = "BeamEnergy" KEY_BEAM_CURRENT_A = "BeamCurrent" KEY_BEAM_TIME_s = "AcquisitionTime" KEY_BEAM_DIAMETER_A = "BeamDiameter" KEY_BEAM_POSITION_X_A = "BeamPosX" KEY_BEAM_POSITION_Y_A = "BeamPosY" KEY_BEAM_TILT_deg = "BeamTilt" KEY_BEAM_STANDARD_DEVIATION_A = "BeamStandardDeviation" KEY_DETECTOR_CRYSTAL_ATOM = "DetectorCrystalAtom" KEY_DETECTOR_CRYSTAL_THICKNESS_cm = "DetectorCrystalThickness" KEY_DETECTOR_CRYSTAL_RADIUS_cm = "DetectorCrystalRadius" KEY_DETECTOR_CRYSTAL_DISTANCE_cm = "DetectorCrystalDistance" KEY_DETECTOR_DEAD_LAYER_A = "DetectorDeadLayer" KEY_DETECTOR_DIFFUSION_LENGHT_A = "DetectorDiffusionLenght" KEY_DETECTOR_SURFACE_QUALITY = "DetectorSurfaceQuality" KEY_DETECTOR_NOISE_eV = "DetectorNoise" KEY_DETECTOR_TOA_deg = "DetectorTOA" KEY_DETECTOR_CHANNEL_WIDTH_eV = "DetectorChannelWidth" KEY_DETECTOR_PITCH_deg = "DetectorPitch" KEY_DETECTOR_BF_LOW_rad = "DetectorBFLow" KEY_DETECTOR_BF_HIGH_RAD = "DetectorBFHigh" KEY_DETECTOR_DF_LOW_rad = "DetectorDFLow" KEY_DETECTOR_DF_HIGH_rad = "DetectorDFHigh" KEY_DETECTOR_HAADF_LOW_rad = "DetectorHAADFLow" KEY_DETECTOR_HAADF_HIGH_rad = "DetectorHAADFHigh" class MicroscopeParameters(object): def __init__(self): self.version = copy.deepcopy(Version.CURRENT_VERSION) self._keys = self._createKeys() self._parameters = {} self.defaultValues() def _createKeys(self): keys = [] keys.append(KEY_BEAM_ENERGY_keV) keys.append(KEY_BEAM_CURRENT_A) keys.append(KEY_BEAM_TIME_s) keys.append(KEY_BEAM_DIAMETER_A) keys.append(KEY_BEAM_POSITION_X_A) keys.append(KEY_BEAM_POSITION_Y_A) keys.append(KEY_BEAM_TILT_deg) keys.append(KEY_BEAM_STANDARD_DEVIATION_A) keys.append(KEY_DETECTOR_CRYSTAL_ATOM) keys.append(KEY_DETECTOR_CRYSTAL_THICKNESS_cm) keys.append(KEY_DETECTOR_CRYSTAL_RADIUS_cm) keys.append(KEY_DETECTOR_CRYSTAL_DISTANCE_cm) keys.append(KEY_DETECTOR_DEAD_LAYER_A) keys.append(KEY_DETECTOR_DIFFUSION_LENGHT_A) keys.append(KEY_DETECTOR_SURFACE_QUALITY) keys.append(KEY_DETECTOR_NOISE_eV) keys.append(KEY_DETECTOR_TOA_deg) #keys.append(KEY_DETECTOR_CHANNEL_WIDTH_eV) keys.append(KEY_DETECTOR_PITCH_deg) keys.append(KEY_DETECTOR_BF_LOW_rad) keys.append(KEY_DETECTOR_BF_HIGH_RAD) keys.append(KEY_DETECTOR_DF_LOW_rad) keys.append(KEY_DETECTOR_DF_HIGH_rad) keys.append(KEY_DETECTOR_HAADF_LOW_rad) keys.append(KEY_DETECTOR_HAADF_HIGH_rad) return keys def defaultValues(self): self.beamEnergy_keV = 20.0 self.beamCurrent_A = 1e-10 self.time_s = 100.0 self.beamDiameter_A = 10.0 self.beamPositionX_A = 0.0 self.beamPositionY_A = 0.0 self.beamTilt_deg = 0.0 self.beamStandardDeviation_A = 3.03030303030303 self.detectorCrystalAtomSymbol = 'Si' self.detectorCrystalThickness_cm = 0.3 self.detectorCrystalRadius_cm = 0.3 self.detectorCrystalDistance_cm = 4.0 self.detectorDeadLayer_A = 200.0 self.detectorDiffusionLenght_A = 0.5 self.detectorSurfaceQuality = 1.0 self.detectorNoise_eV = 50.0 self.detectorTOA_deg = 40.0 self.detectorPitch_deg = 90.0 self.detectorBFLow_rad = 0.0 self.detectorBFHigh_rad = 0.01 self.detectorDFLow_rad = 0.02 self.detectorDFHigh_rad = 0.1 self.detectorHAADFLow_rad = 0.15 self.detectorHAADFHigh_rad = 0.3 self.detectorChannelWidth_eV = 5.0 def _createExtractMethod(self): extractMethods = {} extractMethods[KEY_BEAM_ENERGY_keV] = float extractMethods[KEY_BEAM_CURRENT_A] = float extractMethods[KEY_BEAM_TIME_s] = float extractMethods[KEY_BEAM_DIAMETER_A] = float extractMethods[KEY_BEAM_POSITION_X_A] = float extractMethods[KEY_BEAM_POSITION_Y_A] = float extractMethods[KEY_BEAM_TILT_deg] = float extractMethods[KEY_BEAM_STANDARD_DEVIATION_A] = float extractMethods[KEY_DETECTOR_CRYSTAL_ATOM] = str extractMethods[KEY_DETECTOR_CRYSTAL_THICKNESS_cm] = float extractMethods[KEY_DETECTOR_CRYSTAL_RADIUS_cm] = float extractMethods[KEY_DETECTOR_CRYSTAL_DISTANCE_cm] = float extractMethods[KEY_DETECTOR_DEAD_LAYER_A] = float extractMethods[KEY_DETECTOR_DIFFUSION_LENGHT_A] = float extractMethods[KEY_DETECTOR_SURFACE_QUALITY] = float extractMethods[KEY_DETECTOR_NOISE_eV] = float extractMethods[KEY_DETECTOR_TOA_deg] = float extractMethods[KEY_DETECTOR_CHANNEL_WIDTH_eV] = float extractMethods[KEY_DETECTOR_PITCH_deg] = float extractMethods[KEY_DETECTOR_BF_LOW_rad] = float extractMethods[KEY_DETECTOR_BF_HIGH_RAD] = float extractMethods[KEY_DETECTOR_DF_LOW_rad] = float extractMethods[KEY_DETECTOR_DF_HIGH_rad] = float extractMethods[KEY_DETECTOR_HAADF_LOW_rad] = float extractMethods[KEY_DETECTOR_HAADF_HIGH_rad] = float return extractMethods def read(self, filepath): self.version.readFromFile(filepath) lines = open(filepath, 'r').readlines() extractMethods = self._createExtractMethod() for line in lines: line = line.strip() for key in self._keys: if line.startswith(key): items = line.split('=') self._parameters[key] = extractMethods[key](items[-1]) def write(self, filepath): outputFile = open(filepath, 'w') self._writeHeader(outputFile) self.version.writeLine(outputFile) formats = self._createFormats() for key in self._createKeys(): value = formats[key](self._parameters[key]) if "e-" in value: value = value.replace('e-', 'e-0') if "e+" in value: value = value.replace('e+', 'e+0') line = "%s=%s\n" % (key, value) outputFile.write(line) def _writeHeader(self, outputFile): headerLines = [ "********************************************************************************", "*** MICROSCOPE", "***", "*** BeamEnergy = Tension of the collimated electrons", "*** BeamCurrent = Current of the electron beam", "*** BeamDiameter = Diameter at 90% of the electron beam", "*** BeamPosX = Position in X of the electron beam", "*** BeamPosY = Position in Y of the electron beam", "*** BeamTilt = Theta angle of the electron beam (deg)", "*** BeamStandardDeviation = Standard deviation of the Gaussian distribution of the electrons in the beam", "*** DetectorCrystalAtom = Atomic symbol, name or number of the detector crystal", "*** DetectorCrystalThickness = Thickness of the detector crystal", "*** DetectorCrystalRadius = Radius of the detector crystal", "*** DetectorCrystalDistance = Distance of the detector crystal to the sample", "*** DetectorDeadLayer = Thickness of the detector dead layer", "*** DetectorDiffusionLenght = Diffusion lenght of the detector", "*** DetectorSurfaceQuality = Surface quality of the detector", "*** DetectorNoise = Noise at EDS", "*** DetectorTOA = Take off angle of the detector (deg)", "*** DetectorPitch = Phi angle of the detector (deg)", "*** DetectorBFLow = Bright Field low angle (rad)", "*** DetectorBFHigh = Bright Field high angle (rad)", "*** DetectorDFLow = Dark Field low angle (rad)", "*** DetectorDFHigh = Dark Field high angle (rad)", "*** DetectorHAADFLow = High Angle Annular Dark Field low angle (rad)", "*** DetectorHAADFHigh = High Angle Annular Dark Field high angle (rad)", "***", "********************************************************************************"] for line in headerLines: outputFile.write(line+'\n') def _createFormats(self): formats = {} formats[KEY_BEAM_ENERGY_keV] = lambda value: "%.6f" % (value) formats[KEY_BEAM_CURRENT_A] = lambda value: "%.16g" % (value) formats[KEY_BEAM_TIME_s] = lambda value: "%.16g" % (value) formats[KEY_BEAM_DIAMETER_A] = lambda value: "%.16g" % (value) formats[KEY_BEAM_POSITION_X_A] = lambda value: reduceAfterDot("%.6f" % (value)) formats[KEY_BEAM_POSITION_Y_A] = lambda value: reduceAfterDot("%.6f" % (value)) formats[KEY_BEAM_TILT_deg] = lambda value: "%.6f" % (value) formats[KEY_BEAM_STANDARD_DEVIATION_A] = lambda value: reduceAfterDot("%.6g" % (value)) formats[KEY_DETECTOR_CRYSTAL_ATOM] = lambda value: "%s" % (value) formats[KEY_DETECTOR_CRYSTAL_THICKNESS_cm] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_CRYSTAL_RADIUS_cm] = lambda value: reduceAfterDot("%.6f" % (value)) formats[KEY_DETECTOR_CRYSTAL_DISTANCE_cm] = lambda value: reduceAfterDot("%.6f" % (value)) formats[KEY_DETECTOR_DEAD_LAYER_A] = lambda value: reduceAfterDot("%.6f" % (value)) formats[KEY_DETECTOR_DIFFUSION_LENGHT_A] = lambda value: reduceAfterDot("%.6f" % (value)) formats[KEY_DETECTOR_SURFACE_QUALITY] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_NOISE_eV] = lambda value: reduceAfterDot("%.6f" % (value)) formats[KEY_DETECTOR_TOA_deg] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_CHANNEL_WIDTH_eV] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_PITCH_deg] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_BF_LOW_rad] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_BF_HIGH_RAD] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_DF_LOW_rad] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_DF_HIGH_rad] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_HAADF_LOW_rad] = lambda value: "%.6f" % (value) formats[KEY_DETECTOR_HAADF_HIGH_rad] = lambda value: "%.6f" % (value) return formats @property def version(self): return self._version @version.setter def version(self, version): self._version = version @property def beamEnergy_keV(self): return self._parameters[KEY_BEAM_ENERGY_keV] @beamEnergy_keV.setter def beamEnergy_keV(self, beamEnergy_keV): self._parameters[KEY_BEAM_ENERGY_keV] = beamEnergy_keV @property def beamCurrent_A(self): return self._parameters[KEY_BEAM_CURRENT_A] @beamCurrent_A.setter def beamCurrent_A(self, beamCurrent_A): self._parameters[KEY_BEAM_CURRENT_A] = beamCurrent_A @property def time_s(self): return self._parameters[KEY_BEAM_TIME_s] @time_s.setter def time_s(self, time_s): self._parameters[KEY_BEAM_TIME_s] = time_s @property def beamDiameter_A(self): return self._parameters[KEY_BEAM_DIAMETER_A] @beamDiameter_A.setter def beamDiameter_A(self, beamDiameter_A): self._parameters[KEY_BEAM_DIAMETER_A] = beamDiameter_A @property def beamPositionX_A(self): return self._parameters[KEY_BEAM_POSITION_X_A] @beamPositionX_A.setter def beamPositionX_A(self, beamPositionX_A): self._parameters[KEY_BEAM_POSITION_X_A] = beamPositionX_A @property def beamPositionY_A(self): return self._parameters[KEY_BEAM_POSITION_Y_A] @beamPositionY_A.setter def beamPositionY_A(self, beamPositionY_A): self._parameters[KEY_BEAM_POSITION_Y_A] = beamPositionY_A @property def beamTilt_deg(self): return self._parameters[KEY_BEAM_TILT_deg] @beamTilt_deg.setter def beamTilt_deg(self, beamTilt_deg): self._parameters[KEY_BEAM_TILT_deg] = beamTilt_deg @property def beamStandardDeviation_A(self): return self._parameters[KEY_BEAM_STANDARD_DEVIATION_A] @beamStandardDeviation_A.setter def beamStandardDeviation_A(self, beamStandardDeviation_A): self._parameters[KEY_BEAM_STANDARD_DEVIATION_A] = beamStandardDeviation_A @property def detectorCrystalAtomSymbol(self): return self._parameters[KEY_DETECTOR_CRYSTAL_ATOM] @detectorCrystalAtomSymbol.setter def detectorCrystalAtomSymbol(self, detectorCrystalAtomSymbol): self._parameters[KEY_DETECTOR_CRYSTAL_ATOM] = detectorCrystalAtomSymbol @property def detectorCrystalThickness_cm(self): return self._parameters[KEY_DETECTOR_CRYSTAL_THICKNESS_cm] @detectorCrystalThickness_cm.setter def detectorCrystalThickness_cm(self, detectorCrystalThickness_cm): self._parameters[KEY_DETECTOR_CRYSTAL_THICKNESS_cm] = detectorCrystalThickness_cm @property def detectorCrystalRadius_cm(self): return self._parameters[KEY_DETECTOR_CRYSTAL_RADIUS_cm] @detectorCrystalRadius_cm.setter def detectorCrystalRadius_cm(self, detectorCrystalRadius_cm): self._parameters[KEY_DETECTOR_CRYSTAL_RADIUS_cm] = detectorCrystalRadius_cm @property def detectorCrystalDistance_cm(self): return self._parameters[KEY_DETECTOR_CRYSTAL_DISTANCE_cm] @detectorCrystalDistance_cm.setter def detectorCrystalDistance_cm(self, detectorCrystalDistance_cm): self._parameters[KEY_DETECTOR_CRYSTAL_DISTANCE_cm] = detectorCrystalDistance_cm @property def detectorDeadLayer_A(self): return self._parameters[KEY_DETECTOR_DEAD_LAYER_A] @detectorDeadLayer_A.setter def detectorDeadLayer_A(self, detectorDeadLayer_A): self._parameters[KEY_DETECTOR_DEAD_LAYER_A] = detectorDeadLayer_A @property def detectorDiffusionLenght_A(self): return self._parameters[KEY_DETECTOR_DIFFUSION_LENGHT_A] @detectorDiffusionLenght_A.setter def detectorDiffusionLenght_A(self, detectorDiffusionLenght_A): self._parameters[KEY_DETECTOR_DIFFUSION_LENGHT_A] = detectorDiffusionLenght_A @property def detectorSurfaceQuality(self): return self._parameters[KEY_DETECTOR_SURFACE_QUALITY] @detectorSurfaceQuality.setter def detectorSurfaceQuality(self, detectorSurfaceQuality): self._parameters[KEY_DETECTOR_SURFACE_QUALITY] = detectorSurfaceQuality @property def detectorNoise_eV(self): return self._parameters[KEY_DETECTOR_NOISE_eV] @detectorNoise_eV.setter def detectorNoise_eV(self, detectorNoise_eV): self._parameters[KEY_DETECTOR_NOISE_eV] = detectorNoise_eV @property def detectorTOA_deg(self): return self._parameters[KEY_DETECTOR_TOA_deg] @detectorTOA_deg.setter def detectorTOA_deg(self, detectorTOA_deg): self._parameters[KEY_DETECTOR_TOA_deg] = detectorTOA_deg @property def detectorAzimuthalAngle_deg(self): return self._parameters[KEY_DETECTOR_PITCH_deg] @detectorAzimuthalAngle_deg.setter def detectorAzimuthalAngle_deg(self, detectorAzimuthalAngle_deg): self._parameters[KEY_DETECTOR_PITCH_deg] = detectorAzimuthalAngle_deg @property def detectorChannelWidth_eV(self): return self._parameters[KEY_DETECTOR_CHANNEL_WIDTH_eV] @detectorChannelWidth_eV.setter def detectorChannelWidth_eV(self, detectorChannelWidth_eV): self._parameters[KEY_DETECTOR_CHANNEL_WIDTH_eV] = detectorChannelWidth_eV @property def detectorPitch_deg(self): return self._parameters[KEY_DETECTOR_PITCH_deg] @detectorPitch_deg.setter def detectorPitch_deg(self, detectorPitch_deg): self._parameters[KEY_DETECTOR_PITCH_deg] = detectorPitch_deg @property def detectorBFLow_rad(self): return self._parameters[KEY_DETECTOR_BF_LOW_rad] @detectorBFLow_rad.setter def detectorBFLow_rad(self, detectorBFLow_rad): self._parameters[KEY_DETECTOR_BF_LOW_rad] = detectorBFLow_rad @property def detectorBFHigh_rad(self): return self._parameters[KEY_DETECTOR_BF_HIGH_RAD] @detectorBFHigh_rad.setter def detectorBFHigh_rad(self, detectorBFHigh_rad): self._parameters[KEY_DETECTOR_BF_HIGH_RAD] = detectorBFHigh_rad @property def detectorDFLow_rad(self): return self._parameters[KEY_DETECTOR_DF_LOW_rad] @detectorDFLow_rad.setter def detectorDFLow_rad(self, detectorDFLow_rad): self._parameters[KEY_DETECTOR_DF_LOW_rad] = detectorDFLow_rad @property def detectorDFHigh_rad(self): return self._parameters[KEY_DETECTOR_DF_HIGH_rad] @detectorDFHigh_rad.setter def detectorDFHigh_rad(self, detectorDFHigh_rad): self._parameters[KEY_DETECTOR_DF_HIGH_rad] = detectorDFHigh_rad @property def detectorHAADFLow_rad(self): return self._parameters[KEY_DETECTOR_HAADF_LOW_rad] @detectorHAADFLow_rad.setter def detectorHAADFLow_rad(self, detectorHAADFLow_rad): self._parameters[KEY_DETECTOR_HAADF_LOW_rad] = detectorHAADFLow_rad @property def detectorHAADFHigh_rad(self): return self._parameters[KEY_DETECTOR_HAADF_HIGH_rad] @detectorHAADFHigh_rad.setter def detectorHAADFHigh_rad(self, detectorHAADFHigh_rad): self._parameters[KEY_DETECTOR_HAADF_HIGH_rad] = detectorHAADFHigh_rad
drix00/pymcxray
pymcxray/FileFormat/MicroscopeParameters.py
Python
apache-2.0
19,275
[ "CRYSTAL", "Gaussian" ]
171474da661173b0dcb77e75343d9e6566e12c0cf391a6960069de683bdfade2
# -*- coding: utf-8 -*- import datetime import json import mock import pytest import requests_mock from constance.test import override_config from django.conf import settings from django.contrib.sites.models import Site from django.core import mail from django.template.loader import render_to_string from django.utils.six.moves import html_parser from django.utils.six.moves.urllib.parse import parse_qs, urlencode, urlparse from pyquery import PyQuery as pq from waffle.testutils import override_flag, override_switch from kuma.core.templatetags.jinja_helpers import add_utm from kuma.core.tests import (assert_no_cache_header, assert_shared_cache_header, call_on_commit_immediately, get_user) from kuma.core.urlresolvers import reverse from kuma.core.utils import to_html from kuma.spam.constants import ( SPAM_CHECKS_FLAG, SPAM_SUBMISSIONS_FLAG, VERIFY_URL) from kuma.users.tests import UserTestCase from . import (create_document_tree, document, make_translation, new_document_data, normalize_html, revision, WikiTestCase) from ..content import get_seo_description from ..events import EditDocumentEvent, EditDocumentInTreeEvent from ..forms import MIDAIR_COLLISION from ..models import Document, RevisionIP from ..templatetags.jinja_helpers import get_compare_url from ..views.document import _get_seo_parent_title class ViewTests(UserTestCase, WikiTestCase): fixtures = UserTestCase.fixtures + ['wiki/documents.json'] def test_json_view(self): """bug 875349""" expected_tags = sorted(['foo', 'bar', 'baz']) expected_review_tags = sorted(['tech', 'editorial']) doc = Document.objects.get(pk=1) doc.tags.set(*expected_tags) doc.current_revision.review_tags.set(*expected_review_tags) url = reverse('wiki.json') resp = self.client.get(url, {'title': 'an article title'}) assert resp.status_code == 200 assert_shared_cache_header(resp) data = json.loads(resp.content) assert data['slug'] == 'article-title' result_tags = sorted([str(x) for x in data['tags']]) assert result_tags == expected_tags result_review_tags = sorted([str(x) for x in data['review_tags']]) assert result_review_tags == expected_review_tags url = reverse('wiki.json_slug', args=('article-title',)) with override_switch('application_ACAO', True): resp = self.client.get(url) assert resp.status_code == 200 assert_shared_cache_header(resp) assert resp['Access-Control-Allow-Origin'] == '*' data = json.loads(resp.content) assert data['title'] == 'an article title' assert 'translations' in data result_tags = sorted([str(x) for x in data['tags']]) assert result_tags == expected_tags result_review_tags = sorted([str(x) for x in data['review_tags']]) assert result_review_tags == expected_review_tags def test_toc_view(self): slug = 'toc_test_doc' html = '<h2>Head 2</h2><h3>Head 3</h3>' doc = document(title='blah', slug=slug, html=html, save=True, locale=settings.WIKI_DEFAULT_LANGUAGE) revision(document=doc, content=html, is_approved=True, save=True) url = reverse('wiki.toc', args=[slug]) with override_switch('application_ACAO', True): resp = self.client.get(url) assert resp.status_code == 200 assert_shared_cache_header(resp) assert resp['Access-Control-Allow-Origin'] == '*' assert normalize_html(resp.content) == normalize_html( '<ol><li><a href="#Head_2" rel="internal">Head 2</a></ol>' ) @override_switch('application_ACAO', True) def test_children_view(self): """bug 875349""" test_content = '<p>Test <a href="http://example.com">Summary</a></p>' def _make_doc(title, slug, parent=None, is_redir=False): doc = document(title=title, slug=slug, save=True, is_redirect=is_redir) if is_redir: content = 'REDIRECT <a class="redirect" href="/en-US/blah">Blah</a>' else: content = test_content revision(document=doc, content=test_content, summary=get_seo_description( test_content, strip_markup=False), save=True) doc.html = content if parent: doc.parent_topic = parent doc.save() return doc root_doc = _make_doc('Root', 'Root') child_doc_1 = _make_doc('Child 1', 'Root/Child_1', root_doc) _make_doc('Grandchild 1', 'Root/Child_1/Grandchild_1', child_doc_1) grandchild_doc_2 = _make_doc('Grandchild 2', 'Root/Child_1/Grandchild_2', child_doc_1) _make_doc('Great Grandchild 1', 'Root/Child_1/Grandchild_2/Great_Grand_Child_1', grandchild_doc_2) _make_doc('Child 2', 'Root/Child_2', root_doc) _make_doc('Child 3', 'Root/Child_3', root_doc, True) for expand in (True, False): url = reverse('wiki.children', args=['Root']) if expand: url = '%s?expand' % url resp = self.client.get(url) assert resp.status_code == 200 assert_shared_cache_header(resp) assert resp['Access-Control-Allow-Origin'] == '*' json_obj = json.loads(resp.content) # Basic structure creation testing assert json_obj['slug'] == 'Root' if not expand: assert 'summary' not in json_obj else: assert (json_obj['summary'] == 'Test <a href="http://example.com">Summary</a>') assert 'tags' in json_obj assert 'review_tags' in json_obj assert len(json_obj['subpages']) == 2 assert len(json_obj['subpages'][0]['subpages']) == 2 assert (json_obj['subpages'][0]['subpages'][1]['title'] == 'Grandchild 2') # Depth parameter testing def _depth_test(depth, aught): url = (reverse('wiki.children', args=['Root']) + '?depth=' + str(depth)) resp = self.client.get(url) assert resp.status_code == 200 assert_shared_cache_header(resp) assert resp['Access-Control-Allow-Origin'] == '*' json_obj = json.loads(resp.content) assert (len(json_obj['subpages'][0]['subpages'][1]['subpages']) == aught) _depth_test(2, 0) _depth_test(3, 1) _depth_test(6, 1) # Sorting test sort_root_doc = _make_doc('Sort Root', 'Sort_Root') _make_doc('B Child', 'Sort_Root/B_Child', sort_root_doc) _make_doc('A Child', 'Sort_Root/A_Child', sort_root_doc) resp = self.client.get(reverse('wiki.children', args=['Sort_Root'])) assert resp.status_code == 200 assert_shared_cache_header(resp) assert resp['Access-Control-Allow-Origin'] == '*' json_obj = json.loads(resp.content) assert json_obj['subpages'][0]['title'] == 'A Child' # Test if we are serving an error json if document does not exist no_doc_url = reverse('wiki.children', args=['nonexistentDocument']) resp = self.client.get(no_doc_url) assert resp.status_code == 200 assert_shared_cache_header(resp) assert resp['Access-Control-Allow-Origin'] == '*' assert (json.loads(resp.content) == {'error': 'Document does not exist.'}) # Test error json if document is a redirect _make_doc('Old Name', 'Old Name', is_redir=True) redirect_doc_url = reverse('wiki.children', args=['Old Name']) resp = self.client.get(redirect_doc_url) assert resp.status_code == 200 assert_shared_cache_header(resp) assert resp['Access-Control-Allow-Origin'] == '*' assert json.loads(resp.content) == {'error': 'Document has moved.'} def test_summary_view(self): """The ?summary option should restrict document view to summary""" rev = revision(is_approved=True, save=True, content=""" <p>Foo bar <a href="http://example.com">baz</a></p> <p>Quux xyzzy</p> """) resp = self.client.get('%s?raw&summary' % rev.document.get_absolute_url()) assert resp.status_code == 200 assert_shared_cache_header(resp) assert resp.content == b'Foo bar <a href="http://example.com">baz</a>' @mock.patch('waffle.flag_is_active', return_value=True) @mock.patch('kuma.wiki.jobs.DocumentContributorsJob.get', return_value=[ {'id': 1, 'username': 'ringo', 'email': 'ringo@apple.co.uk'}, {'id': 2, 'username': 'john', 'email': 'lennon@apple.co.uk'}, ]) def test_footer_contributors(self, get_contributors, flag_is_active): get_contributors.return_value = [ {'id': 1, 'username': 'ringo', 'email': 'ringo@apple.co.uk'}, {'id': 2, 'username': 'john', 'email': 'lennon@apple.co.uk'}, ] flag_is_active.return_value = True rev = revision(is_approved=True, save=True, content='some content') resp = self.client.get(rev.document.get_absolute_url()) assert resp.status_code == 200 assert_shared_cache_header(resp) page = pq(resp.content) contributors = (page.find(":contains('Contributors to this page')") .parents('.contributors-sub')) # just checking if the contributor link is rendered assert len(contributors.find('a')) == 2 def test_revision_view_bleached_content(self): """Bug 821988: Revision content should be cleaned with bleach""" rev = revision(is_approved=True, save=True, content=""" <a href="#" onload=alert(3)>Hahaha</a> <svg><svg onload=alert(3);> """) resp = self.client.get(rev.get_absolute_url()) page = pq(resp.content) ct = to_html(page.find('#wikiArticle')) assert '<svg>' not in ct assert '<a href="#">Hahaha</a>' in ct def test_article_revision_content(self): doc = document(title='Testing Article', slug='Article', save=True) r = revision(save=True, document=doc, is_approved=True) resp = self.client.get(r.get_absolute_url()) page = pq(resp.content) assert b'Revision Source' in resp.content assert b'Revision Content' in resp.content assert 'open' == page.find('#wikiArticle').parent().attr('open') assert page.find('#doc-source').parent().attr('open') is None class ReadOnlyTests(UserTestCase, WikiTestCase): """Tests readonly scenarios""" fixtures = UserTestCase.fixtures + ['wiki/documents.json'] def setUp(self): super(ReadOnlyTests, self).setUp() rev = revision(is_approved=True, save=True) self.edit_url = reverse('wiki.edit', args=[rev.document.slug]) def test_everyone(self): """ kumaediting: everyone, kumabanned: none """ self.kumaediting_flag.everyone = True self.kumaediting_flag.save() self.client.login(username='testuser', password='testpass') resp = self.client.get(self.edit_url) assert resp.status_code == 200 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) def test_superusers_only(self): """ kumaediting: superusers, kumabanned: none """ self.kumaediting_flag.everyone = None self.kumaediting_flag.superusers = True self.kumaediting_flag.save() self.client.login(username='testuser', password='testpass') resp = self.client.get(self.edit_url) assert resp.status_code == 403 assert b'The wiki is in read-only mode.' in resp.content assert_no_cache_header(resp) self.client.logout() self.client.login(username='admin', password='testpass') resp = self.client.get(self.edit_url) assert resp.status_code == 200 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) class KumascriptIntegrationTests(UserTestCase, WikiTestCase): """ Tests for usage of the kumascript service. Note that these tests really just check whether or not the service was used, and are not integration tests meant to exercise the real service. """ def setUp(self): super(KumascriptIntegrationTests, self).setUp() self.rev = revision(is_approved=True, save=True, content="TEST CONTENT") self.doc = self.rev.document self.doc.tags.set('foo', 'bar', 'baz') self.url = self.doc.get_absolute_url() # TODO: upgrade mock to 0.8.0 so we can do this. # self.mock_kumascript_get = ( # mock.patch('kuma.wiki.kumascript.get')) # self.mock_kumascript_get.return_value = self.doc.html def tearDown(self): super(KumascriptIntegrationTests, self).tearDown() # TODO: upgrade mock to 0.8.0 so we can do this. # self.mock_kumascript_get.stop() @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_basic_view(self, mock_kumascript_get): """When kumascript timeout is non-zero, the service should be used""" mock_kumascript_get.return_value = (self.doc.html, None) self.client.get(self.url, follow=False) assert mock_kumascript_get.called, "kumascript should have been used" @override_config(KUMASCRIPT_TIMEOUT=0.0) @mock.patch('kuma.wiki.kumascript.get') def test_disabled(self, mock_kumascript_get): """When disabled, the kumascript service should not be used""" mock_kumascript_get.return_value = (self.doc.html, None) self.client.get(self.url, follow=False) assert not mock_kumascript_get.called, "kumascript should not have been used" @override_config(KUMASCRIPT_TIMEOUT=0.0) @mock.patch('kuma.wiki.kumascript.get') def test_disabled_rendering(self, mock_kumascript_get): """When disabled, the kumascript service should not be used in rendering""" mock_kumascript_get.return_value = (self.doc.html, None) self.doc.schedule_rendering('max-age=0') assert not mock_kumascript_get.called, "kumascript should not have been used" @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_nomacros(self, mock_kumascript_get): mock_kumascript_get.return_value = (self.doc.html, None) self.client.get('%s?nomacros' % self.url, follow=False) assert not mock_kumascript_get.called, "kumascript should not have been used" @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_raw(self, mock_kumascript_get): mock_kumascript_get.return_value = (self.doc.html, None) self.client.get('%s?raw' % self.url, follow=False) assert not mock_kumascript_get.called, "kumascript should not have been used" @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_raw_macros(self, mock_kumascript_get): mock_kumascript_get.return_value = (self.doc.html, None) self.client.get('%s?raw&macros' % self.url, follow=False) assert mock_kumascript_get.called, "kumascript should have been used" @override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=600) @requests_mock.mock() def test_preview_nonascii(self, mock_requests): """POSTing non-ascii to kumascript should encode to utf8""" content = u'Français' mock_requests.post(requests_mock.ANY, content=content.encode('utf8')) self.client.login(username='admin', password='testpass') resp = self.client.post(reverse('wiki.preview'), {'content': content}) assert_no_cache_header(resp) # No UnicodeDecodeError mock_requests.request_history[0].body.decode('utf8') @override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=600) @mock.patch('kuma.wiki.kumascript.post') def test_dont_render_previews_for_deferred_docs(self, mock_post): """ When a user previews a document with deferred rendering, we want to force the preview to skip the kumascript POST, so that big previews can't use up too many kumascript connections. bug 1197971 """ self.doc.defer_rendering = True self.doc.save() mock_post.side_effect = Exception("Should not be called") self.client.login(username='admin', password='testpass') resp = self.client.post(reverse('wiki.preview'), {'doc_id': self.doc.id}) assert_no_cache_header(resp) class DocumentSEOTests(UserTestCase, WikiTestCase): """Tests for the document seo logic""" def test_get_seo_parent_doesnt_throw_404(self): """bug 1190212""" doc = document(save=True) slug_dict = {'seo_root': 'Root/Does/Not/Exist'} _get_seo_parent_title(doc, slug_dict, 'bn-BD') # Should not raise Http404 def test_seo_title(self): self.client.login(username='admin', password='testpass') # Utility to make a quick doc def _make_doc(title, aught_titles, slug): doc = document(save=True, slug=slug, title=title, locale=settings.WIKI_DEFAULT_LANGUAGE) revision(save=True, document=doc) response = self.client.get(reverse('wiki.document', args=[slug])) page = pq(response.content) assert page.find('head > title').text() in aught_titles # Test nested document titles _make_doc('One', ['One | MDN'], 'one') _make_doc('Two', ['Two - One | MDN'], 'one/two') _make_doc('Three', ['Three - One | MDN'], 'one/two/three') _make_doc(u'Special Φ Char', [u'Special \u03a6 Char - One | MDN', u'Special \xce\xa6 Char - One | MDN'], 'one/two/special_char') # Additional tests for /Web/* changes _make_doc('Firefox OS', ['Firefox OS | MDN'], 'firefox_os') _make_doc('Email App', ['Email App - Firefox OS | MDN'], 'firefox_os/email_app') _make_doc('Web', ['Web | MDN'], 'Web') _make_doc('HTML', ['HTML | MDN'], 'Web/html') _make_doc('Fieldset', ['Fieldset - HTML | MDN'], 'Web/html/fieldset') _make_doc('Legend', ['Legend - HTML | MDN'], 'Web/html/fieldset/legend') def test_seo_script(self): self.client.login(username='admin', password='testpass') def make_page_and_compare_seo(slug, content, aught_preview): # Create the doc data = new_document_data() data.update({'title': 'blah', 'slug': slug, 'content': content}) response = self.client.post(reverse('wiki.create'), data) assert 302 == response.status_code # Connect to newly created page response = self.client.get(reverse('wiki.document', args=[slug])) page = pq(response.content) meta_content = page.find('meta[name=description]').attr('content') assert str(meta_content) == str(aught_preview) # Test pages - very basic good = 'This is the content which should be chosen, man.' make_page_and_compare_seo('one', '<p>' + good + '</p>', good) # No content, no seo make_page_and_compare_seo('two', 'blahblahblahblah<br />', None) # No summary, no seo make_page_and_compare_seo('three', '<div><p>You cant see me</p></div>', None) # Warning paragraph ignored make_page_and_compare_seo('four', '<div class="geckoVersion">' '<p>No no no</p></div><p>yes yes yes</p>', 'yes yes yes') # Warning paragraph ignored, first one chosen if multiple matches make_page_and_compare_seo('five', '<div class="geckoVersion"><p>No no no</p>' '</div><p>yes yes yes</p>' '<p>ignore ignore ignore</p>', 'yes yes yes') # Don't take legacy crumbs make_page_and_compare_seo('six', u'<p>« CSS</p><p>I am me!</p>', 'I am me!') # Take the seoSummary class'd element make_page_and_compare_seo('seven', u'<p>I could be taken</p>' '<p class="seoSummary">I should be though</p>', 'I should be though') # Two summaries append make_page_and_compare_seo('eight', u'<p>I could be taken</p>' '<p class="seoSummary">a</p>' '<p class="seoSummary">b</p>', 'a b') # No brackets make_page_and_compare_seo('nine', u'<p>I <em>am</em> awesome.' ' <a href="blah">A link</a> is also &lt;cool&gt;</p>', u'I am awesome. A link is also cool') @pytest.mark.parametrize('content,expected', [ ('<div onclick="alert(\'hacked!\')">click me</div>', '<div>click me</div>'), ('<svg><circle onload=confirm(3)>', '&lt;svg&gt;&lt;circle onload="confirm(3)"&gt;&lt;/circle&gt;&lt;/svg&gt;') ], ids=('strip', 'escape')) def test_editor_safety(root_doc, editor_client, content, expected): """ When editing or translating, the content should already have been bleached, so for example, any harmful on* attributes stripped or escaped (see bug 821986). """ rev = root_doc.current_revision rev.content = content rev.save() args = (root_doc.slug,) urls = ( reverse('wiki.edit', args=args), '%s?tolocale=%s' % (reverse('wiki.translate', args=args), 'fr') ) for url in urls: response = editor_client.get(url) assert response.status_code == 200 page = pq(response.content) editor_src = page.find('#id_content').text() assert editor_src == expected class DocumentEditingTests(UserTestCase, WikiTestCase): """Tests for the document-editing view""" def test_create_on_404(self): self.client.login(username='admin', password='testpass') # Create the parent page. rev = revision(is_approved=True, save=True) # Establish attribs of child page. local_slug = 'Some_New_Title' slug = '%s/%s' % (rev.document.slug, local_slug) url = reverse('wiki.document', args=[slug]) # Ensure redirect to create new page on attempt to visit non-existent # child page. resp = self.client.get(url) assert resp.status_code == 302 assert_no_cache_header(resp) assert 'public' not in resp['Cache-Control'] assert 's-maxage' not in resp['Cache-Control'] assert 'docs/new' in resp['Location'] assert ('slug=%s' % local_slug) in resp['Location'] # Ensure real 404 for visit to non-existent page with params common to # kumascript and raw content API. for p_name in ('raw', 'include', 'nocreate'): sub_url = '%s?%s=1' % (url, p_name) resp = self.client.get(sub_url) assert resp.status_code == 404 # Ensure root level documents work, not just children response = self.client.get(reverse('wiki.document', args=['noExist'])) assert response.status_code == 302 assert 'public' not in response['Cache-Control'] assert 'no-cache' in resp['Cache-Control'] assert 'docs/new' in response['Location'] response = self.client.get(reverse('wiki.document', args=['Template:NoExist'])) assert response.status_code == 302 assert 'public' not in response['Cache-Control'] assert 'no-cache' in resp['Cache-Control'] assert 'docs/new' in response['Location'] def test_creating_child_of_redirect(self): """ While try to create a child of a redirect, the parent of the child should be redirect's parent. """ self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True) doc = rev.document doc_first_slug = doc.slug # Move the document to new slug doc._move_tree(new_slug="moved_doc") # Try to create a child with the old slug child_full_slug = doc_first_slug + "/" + "children_document" url = reverse('wiki.document', args=[child_full_slug]) response = self.client.get(url) assert response.status_code == 302 assert 'public' not in response['Cache-Control'] assert 'no-cache' in response['Cache-Control'] assert 'docs/new' in response['Location'] # The parent id of the query should be same because while moving, # a new document is created with old slug and make redirect to the # old document parameters = parse_qs(urlparse(response['Location']).query) assert parameters['parent'][0] == str(doc.id) def test_child_of_redirect_to_non_document(self): """Return a 404 when accessing the child of a non-document redirect.""" self.client.login(username='admin', password='testpass') content = '<p>REDIRECT <a class="redirect" href="/">MDN</a></p>' rev = revision(content=content, is_approved=True, save=True) doc = rev.document assert doc.is_redirect assert doc.get_redirect_url() == '/' assert doc.get_redirect_document() is None doc_url = doc.get_absolute_url() response = self.client.get(doc_url) assert response.status_code == 301 assert response['Location'] == '/' subpage_url = doc_url + '/SubPage' response = self.client.get(subpage_url) assert response.status_code == 404 @pytest.mark.retitle def test_retitling_solo_doc(self): """ Editing just title of non-parent doc: * Changes title * Doesn't cause errors * Doesn't create redirect """ # Not testing slug changes separately; the model tests cover those plus # slug+title changes. If title changes work in the view, the rest # should also. self.client.login(username='admin', password='testpass') new_title = 'Some New Title' rev = revision(is_approved=True, save=True) doc = rev.document old_title = doc.title data = new_document_data() data.update({'title': new_title, 'form-type': 'rev'}) data['slug'] = '' url = reverse('wiki.edit', args=[doc.slug]) response = self.client.post(url, data) assert response.status_code == 302 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) assert (Document.objects.get(slug=doc.slug, locale=doc.locale).title == new_title) assert not Document.objects.filter(title=old_title).exists() @pytest.mark.retitle def test_retitling_parent_doc(self): """ Editing just title of parent doc: * Changes title * Doesn't cause errors * Doesn't create redirect """ # Not testing slug changes separately; the model tests cover those plus # slug+title changes. If title changes work in the view, the rest # should also. self.client.login(username='admin', password='testpass') # create parent doc & rev along with child doc & rev d = document(title='parent', save=True) revision(document=d, content='parent', save=True) d2 = document(title='child', parent_topic=d, save=True) revision(document=d2, content='child', save=True) old_title = d.title new_title = 'Some New Title' data = new_document_data() data.update({'title': new_title, 'form-type': 'rev'}) data['slug'] = '' url = reverse('wiki.edit', args=[d.slug]) response = self.client.post(url, data) assert response.status_code == 302 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) assert (Document.objects.get(slug=d.slug, locale=d.locale).title == new_title) assert not Document.objects.filter(title=old_title).exists() def test_slug_change_ignored_for_iframe(self): """When the title of an article is edited in an iframe, the change is ignored.""" self.client.login(username='admin', password='testpass') new_slug = 'some_new_slug' rev = revision(is_approved=True, save=True) old_slug = rev.document.slug data = new_document_data() data.update({'title': rev.document.title, 'slug': new_slug, 'form': 'rev'}) response = self.client.post('%s?iframe=1' % reverse('wiki.edit', args=[rev.document.slug]), data) assert response.status_code == 200 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) assert (Document.objects.get(slug=rev.document.slug, locale=rev.document.locale).slug == old_slug) assert "REDIRECT" not in Document.objects.get(slug=old_slug).html @pytest.mark.clobber def test_slug_collision_errors(self): """When an attempt is made to retitle an article and another with that title already exists, there should be form errors""" self.client.login(username='admin', password='testpass') exist_slug = "existing-doc" # Create a new doc. data = new_document_data() data.update({"slug": exist_slug}) resp = self.client.post(reverse('wiki.create'), data) assert resp.status_code == 302 # Create another new doc. data = new_document_data() data.update({"slug": 'some-new-title'}) resp = self.client.post(reverse('wiki.create'), data) assert resp.status_code == 302 # Now, post an update with duplicate slug data.update({ 'form-type': 'rev', 'slug': exist_slug }) resp = self.client.post(reverse('wiki.edit', args=['some-new-title']), data) assert resp.status_code == 200 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) p = pq(resp.content) assert p.find('.errorlist').length > 0 assert p.find('.errorlist a[href="#id_slug"]').length > 0 @pytest.mark.clobber def test_redirect_can_be_clobbered(self): """When an attempt is made to retitle an article, and another article with that title exists but is a redirect, there should be no errors and the redirect should be replaced.""" self.client.login(username='admin', password='testpass') exist_title = "Existing doc" exist_slug = "existing-doc" changed_title = 'Changed title' changed_slug = 'changed-title' # Create a new doc. data = new_document_data() data.update({"title": exist_title, "slug": exist_slug}) resp = self.client.post(reverse('wiki.create'), data) assert resp.status_code == 302 # Change title and slug data.update({'form-type': 'rev', 'title': changed_title, 'slug': changed_slug}) resp = self.client.post(reverse('wiki.edit', args=[exist_slug]), data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) # Change title and slug back to originals, clobbering the redirect data.update({'form-type': 'rev', 'title': exist_title, 'slug': exist_slug}) resp = self.client.post(reverse('wiki.edit', args=[changed_slug]), data) assert resp.status_code == 302 def test_slug_revamp(self): self.client.login(username='admin', password='testpass') # Test that slugs with the same "specific" slug but in different levels # in the heiharachy are validated properly upon submission. # Create base doc parent_doc = document(title='Length', slug='length', is_localizable=True, locale=settings.WIKI_DEFAULT_LANGUAGE) parent_doc.save() r = revision(document=parent_doc) r.save() # Create child, try to use same slug, should work child_data = new_document_data() child_data['title'] = 'Child Length' child_data['slug'] = 'length' child_data['content'] = 'This is the content' child_data['is_localizable'] = True child_url = (reverse('wiki.create') + '?parent=' + str(parent_doc.id)) response = self.client.post(child_url, child_data) assert response.status_code == 302 # grab new revision ID child = Document.objects.get(locale='en-US', slug='length/length') rev_id = child.current_revision.id self.assertRedirects(response, reverse('wiki.document', args=['length/length'])) # Editing newly created child "length/length" doesn't cause errors child_data['form-type'] = 'rev' child_data['slug'] = '' edit_url = reverse('wiki.edit', args=['length/length']) response = self.client.post(edit_url, child_data) assert response.status_code == 302 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) url = reverse('wiki.document', args=['length/length']) params = {'rev_saved': rev_id} url = '%s?%s' % (url, urlencode(params)) self.assertRedirects(response, url) # Creating a new translation of parent and child # named "length" and "length/length" respectively # doesn't cause errors child_data['form-type'] = 'both' child_data['slug'] = 'length' translate_url = reverse('wiki.document', args=[child_data['slug']]) response = self.client.post(translate_url + '$translate?tolocale=es', child_data) assert 302 == response.status_code url = reverse('wiki.document', args=[child_data['slug']], locale='es') params = {'rev_saved': ''} url = '%s?%s' % (url, urlencode(params)) self.assertRedirects(response, url) translate_url = reverse('wiki.document', args=['length/length']) response = self.client.post(translate_url + '$translate?tolocale=es', child_data) assert 302 == response.status_code slug = 'length/' + child_data['slug'] url = reverse('wiki.document', args=[slug], locale='es') params = {'rev_saved': ''} url = '%s?%s' % (url, urlencode(params)) self.assertRedirects(response, url) def test_translate_keeps_topical_parent(self): self.client.login(username='admin', password='testpass') en_doc, de_doc = make_translation() en_child_doc = document(parent_topic=en_doc, slug='en-child', save=True) en_child_rev = revision(document=en_child_doc, save=True) de_child_doc = document(parent_topic=de_doc, locale='de', slug='de-child', parent=en_child_doc, save=True) revision(document=de_child_doc, save=True) post_data = {} post_data['slug'] = de_child_doc.slug post_data['title'] = 'New title' post_data['form'] = 'both' post_data['content'] = 'New translation' post_data['tolocale'] = 'de' post_data['toc_depth'] = 0 post_data['based_on'] = en_child_rev.id post_data['parent_id'] = en_child_doc.id translate_url = reverse('wiki.edit', args=[de_child_doc.slug], locale='de') response = self.client.post(translate_url, post_data) assert response.status_code == 302 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) de_child_doc = Document.objects.get(locale='de', slug='de-child') assert en_child_doc == de_child_doc.parent assert de_doc == de_child_doc.parent_topic assert 'New translation' == de_child_doc.current_revision.content def test_translate_keeps_toc_depth(self): self.client.login(username='admin', password='testpass') locale = settings.WIKI_DEFAULT_LANGUAGE original_slug = 'eng-doc' foreign_locale = 'es' foreign_slug = 'es-doc' en_doc = document(title='Eng Doc', slug=original_slug, is_localizable=True, locale=locale) en_doc.save() r = revision(document=en_doc, toc_depth=1) r.save() post_data = new_document_data() post_data['title'] = 'ES Doc' post_data['slug'] = foreign_slug post_data['content'] = 'This is the content' post_data['is_localizable'] = True post_data['form'] = 'both' post_data['toc_depth'] = r.toc_depth translate_url = reverse('wiki.document', args=[original_slug]) translate_url += '$translate?tolocale=' + foreign_locale response = self.client.post(translate_url, post_data) doc_url = reverse('wiki.document', args=[foreign_slug], locale=foreign_locale) params = {'rev_saved': ''} doc_url = '%s?%s' % (doc_url, urlencode(params)) self.assertRedirects(response, doc_url) es_d = Document.objects.get(locale=foreign_locale, slug=foreign_slug) assert r.toc_depth == es_d.current_revision.toc_depth def test_translate_rebuilds_source_json(self): self.client.login(username='admin', password='testpass') # Create an English original and a Spanish translation. en_slug = 'en-doc' es_locale = 'es' es_slug = 'es-doc' en_doc = document(title='EN Doc', slug=en_slug, is_localizable=True) en_doc.save() en_doc.render() en_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=en_slug) json.loads(en_doc.json) r = revision(document=en_doc) r.save() translation_data = new_document_data() translation_data['title'] = 'ES Doc' translation_data['slug'] = es_slug translation_data['content'] = 'This is the content' translation_data['is_localizable'] = False translation_data['form'] = 'both' translate_url = reverse('wiki.document', args=[en_slug]) translate_url += '$translate?tolocale=' + es_locale response = self.client.post(translate_url, translation_data) # Sanity to make sure the translate succeeded. doc_url = reverse('wiki.document', args=[es_slug], locale=es_locale) params = {'rev_saved': ''} doc_url = '%s?%s' % (doc_url, urlencode(params)) self.assertRedirects(response, doc_url) es_doc = Document.objects.get(locale=es_locale, slug=es_slug) es_doc.render() new_en_json = json.loads(Document.objects.get(pk=en_doc.pk).json) assert 'translations' in new_en_json assert (translation_data['title'] in [t['title'] for t in new_en_json['translations']]) es_translation_json = [t for t in new_en_json['translations'] if t['title'] == translation_data['title']][0] assert (es_translation_json['last_edit'] == es_doc.current_revision.created.isoformat()) def test_slug_translate(self): """Editing a translated doc keeps the correct slug""" self.client.login(username='admin', password='testpass') # Settings original_slug = 'eng-doc' child_slug = 'child-eng-doc' foreign_locale = 'es' foreign_slug = 'es-doc' foreign_child_slug = 'child-es-doc' # Create the one-level English Doc en_doc = document(title='Eng Doc', slug=original_slug, is_localizable=True) en_doc.save() r = revision(document=en_doc) r.save() # Translate to ES parent_data = new_document_data() parent_data['title'] = 'ES Doc' parent_data['slug'] = foreign_slug parent_data['content'] = 'This is the content' parent_data['is_localizable'] = True parent_data['form'] = 'both' translate_url = reverse('wiki.document', args=[original_slug]) translate_url += '$translate?tolocale=' + foreign_locale response = self.client.post(translate_url, parent_data) doc_url = reverse('wiki.document', args=[foreign_slug], locale=foreign_locale) params = {'rev_saved': ''} doc_url = '%s?%s' % (doc_url, urlencode(params)) self.assertRedirects(response, doc_url) # Go to edit the translation, ensure the the slug is correct response = self.client.get(reverse('wiki.edit', args=[foreign_slug], locale=foreign_locale)) page = pq(response.content) assert page.find('input[name=slug]')[0].value == foreign_slug # Create an English child now en_doc = document(title='Child Eng Doc', slug=original_slug + '/' + child_slug, is_localizable=True, locale=settings.WIKI_DEFAULT_LANGUAGE, parent_topic=en_doc) en_doc.save() r = revision(document=en_doc) r.save() # Translate to ES child_data = new_document_data() child_data['title'] = 'ES Child Doc' child_data['slug'] = foreign_child_slug child_data['content'] = 'This is the content' child_data['is_localizable'] = True child_data['form'] = 'both' translate_url = reverse('wiki.document', args=[original_slug + '/' + child_slug]) translate_url += '$translate?tolocale=' + foreign_locale response = self.client.post(translate_url, child_data) slug = foreign_slug + '/' + child_data['slug'] doc_url = reverse('wiki.document', args=[slug], locale=foreign_locale) params = {'rev_saved': ''} doc_url = '%s?%s' % (doc_url, urlencode(params)) self.assertRedirects(response, doc_url) def test_restore_translation_source(self): """Edit a localized article without an English parent allows user to set translation parent.""" # Create english doc self.client.login(username='admin', password='testpass') data = new_document_data() self.client.post(reverse('wiki.create'), data) en_d = Document.objects.get(locale=data['locale'], slug=data['slug']) # Create french doc data.update({'locale': 'fr', 'title': 'A Tést Articlé', 'content': "C'ést bon."}) self.client.post(reverse('wiki.create', locale='fr'), data) fr_d = Document.objects.get(locale=data['locale'], slug=data['slug']) # Check edit doc page for choose parent box url = reverse('wiki.edit', args=[fr_d.slug], locale='fr') response = self.client.get(url) assert response.status_code == 200 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) assert pq(response.content)('li.metadata-choose-parent') # Set the parent data.update({'form-type': 'rev', 'parent_id': en_d.id}) resp = self.client.post(url, data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) assert 'fr/docs/a-test-article' in resp['Location'] # Check the languages drop-down resp = self.client.get(resp['Location']) translations = pq(resp.content)('ul#translations li') assert 'English (US)' in translations.text() def test_translation_source(self): """Allow users to change "translation source" settings""" self.client.login(username='admin', password='testpass') data = new_document_data() self.client.post(reverse('wiki.create'), data) parent = Document.objects.get(locale=data['locale'], slug=data['slug']) data.update({'title': 'Another Test Article', 'content': "Yahoooo!", 'parent_id': parent.id}) self.client.post(reverse('wiki.create'), data) child = Document.objects.get(locale=data['locale'], slug=data['slug']) url = reverse('wiki.edit', args=[child.slug]) response = self.client.get(url) assert response.status_code == 200 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) content = pq(response.content) assert content('li.metadata-choose-parent') assert str(parent.id) in to_html(content) @pytest.mark.tags def test_tags_while_document_update(self): self.client.login(username='admin', password='testpass') ts1 = ('JavaScript', 'AJAX', 'DOM') ts2 = ('XML', 'JSON') # Create a revision with some tags rev = revision(save=True, tags=','.join(ts1)) doc = rev.document # Update the document with some other tags data = new_document_data() data.update({'form-type': 'rev', 'tags': ', '.join(ts2)}) response = self.client.post( reverse('wiki.edit', args=[doc.slug]), data) assert response.status_code == 302 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) # Check only last added tags are related with the documents doc_tags = doc.tags.all().values_list('name', flat=True) assert sorted(doc_tags) == sorted(ts2) @pytest.mark.tags def test_tags_showing_correctly_after_doc_update(self): """After any update to the document, the new tags should show correctly""" self.client.login(username='admin', password='testpass') ts1 = ('JavaScript', 'AJAX', 'DOM') ts2 = ('XML', 'JSON') rev = revision(save=True, tags=','.join(ts1)) doc = rev.document # Update the document with some other tags data = new_document_data() del data['slug'] data.update({'form-type': 'rev', 'tags': ', '.join(ts2)}) response = self.client.post( reverse('wiki.edit', args=[doc.slug]), data) assert response.status_code == 302 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) # Check document is showing the new tags response = self.client.get(doc.get_absolute_url(), follow=True) assert response.status_code == 200 page = pq(response.content) response_tags = page.find('.tags li a').contents() assert response_tags == sorted(ts2) @pytest.mark.review_tags @mock.patch.object(Site.objects, 'get_current') def test_review_tags(self, get_current): """Review tags can be managed on document revisions""" get_current.return_value.domain = 'su.mo.com' self.client.login(username='admin', password='testpass') # Create a new doc with one review tag data = new_document_data() data.update({'review_tags': ['technical']}) response = self.client.post(reverse('wiki.create'), data) assert response.status_code == 302 # Ensure there's now a doc with that expected tag in its newest # revision doc = Document.objects.get(slug="a-test-article") rev = doc.revisions.order_by('-id').all()[0] review_tags = [x.name for x in rev.review_tags.all()] assert review_tags == ['technical'] # Now, post an update with two tags data.update({ 'form-type': 'rev', 'review_tags': ['editorial', 'technical'], }) response = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) assert response.status_code == 302 assert_no_cache_header(response) # Ensure the doc's newest revision has both tags. doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug="a-test-article") rev = doc.revisions.order_by('-id').all()[0] review_tags = [x.name for x in rev.review_tags.all()] review_tags.sort() assert review_tags == ['editorial', 'technical'] # Now, ensure that review form appears for the review tags. response = self.client.get(reverse('wiki.document', args=[doc.slug]), data) assert response.status_code == 200 # Since the client is logged-in, the response should not be cached. assert_no_cache_header(response) page = pq(response.content) assert page.find('.page-meta.reviews').length == 1 assert page.find('#id_request_technical').length == 1 assert page.find('#id_request_editorial').length == 1 doc_entry = '<entry><title>{}</title>'.format(doc.title) doc_selector = "ul.document-list li a:contains('{}')".format(doc.title) # Ensure the page appears on the listing pages response = self.client.get(reverse('wiki.list_review')) assert response.status_code == 200 assert_shared_cache_header(response) assert pq(response.content).find(doc_selector).length == 1 response = self.client.get(reverse('wiki.list_review_tag', args=('technical',))) assert response.status_code == 200 assert_shared_cache_header(response) assert pq(response.content).find(doc_selector).length == 1 response = self.client.get(reverse('wiki.list_review_tag', args=('editorial',))) assert response.status_code == 200 assert_shared_cache_header(response) assert pq(response.content).find(doc_selector).length == 1 # Also, ensure that the page appears in the proper feeds # HACK: Too lazy to parse the XML. Lazy lazy. response = self.client.get(reverse('wiki.feeds.list_review', args=('atom',))) assert doc_entry.encode('utf-8') in response.content response = self.client.get(reverse('wiki.feeds.list_review_tag', args=('atom', 'technical', ))) assert doc_entry.encode('utf-8') in response.content response = self.client.get(reverse('wiki.feeds.list_review_tag', args=('atom', 'editorial', ))) assert doc_entry.encode('utf-8') in response.content # Post an edit that removes the technical review tag. data.update({ 'form-type': 'rev', 'review_tags': ['editorial', ] }) response = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) # Ensure only one of the tags' warning boxes appears, now. response = self.client.get(reverse('wiki.document', args=[doc.slug]), data) page = pq(response.content) assert page.find('.page-meta.reviews').length == 1 assert page.find('#id_request_technical').length == 0 assert page.find('#id_request_editorial').length == 1 # Ensure the page appears on the listing pages response = self.client.get(reverse('wiki.list_review')) assert response.status_code == 200 assert_shared_cache_header(response) assert pq(response.content).find(doc_selector).length == 1 response = self.client.get(reverse('wiki.list_review_tag', args=('technical',))) assert response.status_code == 200 assert_shared_cache_header(response) assert pq(response.content).find(doc_selector).length == 0 response = self.client.get(reverse('wiki.list_review_tag', args=('editorial',))) assert response.status_code == 200 assert_shared_cache_header(response) assert pq(response.content).find(doc_selector).length == 1 # Also, ensure that the page appears in the proper feeds # HACK: Too lazy to parse the XML. Lazy lazy. response = self.client.get(reverse('wiki.feeds.list_review', args=('atom',))) assert doc_entry in response.content response = self.client.get(reverse('wiki.feeds.list_review_tag', args=('atom', 'technical', ))) assert doc_entry not in response.content response = self.client.get(reverse('wiki.feeds.list_review_tag', args=('atom', 'editorial', ))) assert doc_entry in response.content @pytest.mark.review_tags def test_quick_review(self): """Test the quick-review button.""" self.client.login(username='admin', password='testpass') test_data = [ { 'params': {'request_technical': 1}, 'expected_tags': ['technical'], 'name': 'technical', 'message_contains': [ 'Editorial review completed.', ] }, { 'params': {'request_editorial': 1}, 'expected_tags': ['editorial'], 'name': 'editorial', 'message_contains': [ 'Technical review completed.', ] }, { 'params': {}, 'expected_tags': [], 'name': 'editorial-technical', 'message_contains': [ 'Technical review completed.', 'Editorial review completed.', ] } ] for data_dict in test_data: slug = 'test-quick-review-%s' % data_dict['name'] data = new_document_data() data.update({'review_tags': ['editorial', 'technical'], 'slug': slug}) resp = self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(slug=slug) rev = doc.revisions.order_by('-id').all()[0] review_url = reverse('wiki.quick_review', args=[doc.slug]) params = dict(data_dict['params'], revision_id=rev.id) resp = self.client.post(review_url, params) assert resp.status_code == 302 assert_no_cache_header(resp) doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug) rev = doc.revisions.order_by('-id').all()[0] review_tags = [x.name for x in rev.review_tags.all()] review_tags.sort() for expected_str in data_dict['message_contains']: assert expected_str in rev.summary assert expected_str in rev.comment assert review_tags == data_dict['expected_tags'] @pytest.mark.midair def test_edit_midair_collisions(self, is_ajax=False, translate_locale=None): """Tests midair collisions for non-ajax submissions.""" self.client.login(username='admin', password='testpass') # Post a new document. data = new_document_data() resp = self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(slug=data['slug']) # This is the url to post new revisions for the rest of this test posting_url = reverse('wiki.edit', args=[doc.slug]) # Edit #1 starts... resp = self.client.get( reverse('wiki.edit', args=[doc.slug]) ) page = pq(resp.content) rev_id1 = page.find('input[name="current_rev"]').attr('value') # Edit #2 starts... resp = self.client.get( reverse('wiki.edit', args=[doc.slug]) ) page = pq(resp.content) rev_id2 = page.find('input[name="current_rev"]').attr('value') # Update data for the POST we are about to attempt data.update({ 'form-type': 'rev', 'content': 'This edit got there first', 'current_rev': rev_id2 }) # If this is a translation test, then create a translation and a # revision on it. Then update data. if translate_locale: translation = document(parent=doc, locale=translate_locale, save=True) translation_rev = revision( document=translation, based_on=translation.parent.current_or_latest_revision(), save=True ) rev_id1 = rev_id2 = translation_rev.id posting_url = reverse( 'wiki.edit', args=[translation_rev.document.slug], locale=translate_locale ) data.update({ 'title': translation.title, 'locale': translation.locale, 'slug': translation.slug, 'current_rev': rev_id2 }) # Edit #2 submits successfully if is_ajax: resp = self.client.post( posting_url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest' ) assert resp.status_code == 200 assert not json.loads(resp.content)['error'] else: resp = self.client.post(posting_url, data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) # Edit #1 submits, but receives a mid-aired notification data.update({ 'form-type': 'rev', 'content': 'This edit gets mid-aired', 'current_rev': rev_id1 }) if is_ajax: resp = self.client.post( posting_url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest' ) else: resp = self.client.post(posting_url, data) # The url of the document's history locale = translate_locale if translate_locale else doc.locale doc_path = translation.slug if translate_locale else doc.slug history_url = reverse( 'wiki.document_revisions', kwargs={'document_path': doc_path}, locale=locale ) # The midair collission error, with the document url midair_collission_error = (unicode( MIDAIR_COLLISION) % {'url': history_url} ).encode('utf-8') if is_ajax: location_of_error = json.loads(resp.content)['error_message'] else: # If this is not an ajax post, then the error comes back in escaped # html. We unescape the resp.content, but not all of it, since that # causes ascii errors. start_of_error = resp.content.index(midair_collission_error[0:20]) # Add an some extra characters to the end, since the unescaped length # is a little less than the escaped length end_of_error = start_of_error + len(midair_collission_error) + 20 location_of_error = html_parser.HTMLParser().unescape( resp.content[start_of_error: end_of_error] ) assert midair_collission_error in location_of_error @pytest.mark.midair def test_edit_midair_collisions_ajax(self): """Tests midair collisions for ajax submissions.""" self.test_edit_midair_collisions(is_ajax=True) @override_flag(SPAM_SUBMISSIONS_FLAG, active=True) @override_flag(SPAM_CHECKS_FLAG, active=True) @override_config(AKISMET_KEY='dashboard') @requests_mock.mock() @mock.patch('kuma.spam.akismet.Akismet.check_comment') def test_edit_spam_ajax(self, mock_requests, mock_akismet_method, translate_locale=None): """Tests attempted spam edits that occur on Ajax POSTs.""" # Note: Akismet is enabled by the Flag overrides mock_requests.post(VERIFY_URL, content='valid') # The return value of akismet.check_comment is set to True mock_akismet_method.return_value = True # self.client.login(username='admin', password='testpass') self.client.login(username='testuser', password='testpass') # Create a new document. doc = document(save=True) data = new_document_data() # Create a revision on the document revision(save=True, document=doc) # This is the url to post new revisions for the rest of this test posting_url = reverse('wiki.edit', args=[doc.slug]) # If this is a translation test, then create a translation and a revision on it if translate_locale: data['locale'] = translate_locale translation = document( parent=doc, locale=translate_locale, save=True ) translation_rev = revision( document=translation, based_on=translation.parent.current_or_latest_revision(), save=True ) # rev_id = translation_rev.id posting_url = reverse( 'wiki.edit', args=[translation_rev.document.slug], locale=translate_locale ) # Get the rev id resp = self.client.get(posting_url) page = pq(resp.content) rev_id = page.find('input[name="current_rev"]').attr('value') # Edit submits data.update({ 'form-type': 'rev', 'content': 'Spam content', 'current_rev': rev_id }) resp = self.client.post( posting_url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest' ) spam_message = render_to_string('wiki/includes/spam_error.html') assert spam_message in json.loads(resp.content)['error_message'] def test_multiple_edits_ajax(self, translate_locale=None): """Tests multiple sequential attempted valid edits that occur as Ajax POSTs.""" self.client.login(username='admin', password='testpass') # Post a new document. data = new_document_data() resp = self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(slug=data['slug']) # This is the url to post new revisions for the rest of this test if translate_locale: posting_url = reverse('wiki.edit', args=[doc.slug], locale=translate_locale) else: posting_url = reverse('wiki.edit', args=[doc.slug]) if translate_locale: # Post a new translation on doc translate_url = reverse( 'wiki.translate', args=[data['slug']] ) + '?tolocale={}'.format(translate_locale) self.client.post(translate_url, data, follow=True) data.update({'locale': translate_locale}) # Edit #1 resp = self.client.get(posting_url) page = pq(resp.content) rev_id1 = page.find('input[name="current_rev"]').attr('value') # Edit #1 submits successfully data.update({ 'form-type': 'rev', 'content': 'Edit #1', 'current_rev': rev_id1 }) resp1 = self.client.post( posting_url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest' ) # Edit #2 resp = self.client.get(posting_url) page = pq(resp.content) rev_id2 = page.find('input[name="current_rev"]').attr('value') # Edit #2 submits successfully data.update({ 'form-type': 'rev', 'content': 'Edit #2', 'current_rev': rev_id2 }) resp2 = self.client.post( posting_url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest' ) # For Ajax requests the response is a JsonResponse for resp in [resp1, resp2]: assert not json.loads(resp.content)['error'] assert 'error_message' not in json.loads(resp.content).keys() def test_multiple_translation_edits_ajax(self): """Tests multiple sequential valid transalation edits that occur as Ajax POSTs.""" self.test_multiple_edits_ajax(translate_locale='es') # test translation fails as well def test_translation_midair_collission(self): """Tests midair collisions for non-ajax translation revisions.""" self.test_edit_midair_collisions(is_ajax=False, translate_locale='az') def test_translation_midair_collission_ajax(self): """Tests midair collisions for ajax translation revisions.""" self.test_edit_midair_collisions(is_ajax=True, translate_locale='af') def test_translation_spam_ajax(self): """Tests attempted translation spam edits that occur on Ajax POSTs.""" self.test_edit_spam_ajax(translate_locale='ru') @pytest.mark.toc def test_toc_toggle_off(self): """Toggling of table of contents in revisions""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True) doc = rev.document data = new_document_data() assert Document.objects.get(slug=doc.slug, locale=doc.locale).show_toc data['form-type'] = 'rev' data['toc_depth'] = 0 data['slug'] = doc.slug data['title'] = doc.title resp = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) doc = Document.objects.get(slug=doc.slug, locale=doc.locale) assert doc.current_revision.toc_depth == 0 @pytest.mark.toc def test_toc_toggle_on(self): """Toggling of table of contents in revisions""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True) new_r = revision(document=rev.document, content=rev.content, toc_depth=0, is_approved=True) new_r.save() assert not Document.objects.get(slug=rev.document.slug, locale=rev.document.locale).show_toc data = new_document_data() data['form-type'] = 'rev' data['slug'] = rev.document.slug data['title'] = rev.document.title resp = self.client.post(reverse('wiki.edit', args=[rev.document.slug]), data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) assert Document.objects.get(slug=rev.document.slug, locale=rev.document.locale).show_toc def test_parent_topic(self): """Selection of a parent topic when creating a document.""" # TODO: Do we need this test? This seems broken in that the # parent specified via the parent topic doesn't get it's # slug prepended to the new document's slug, as happens # when specifying the parent via the URL. self.client.login(username='admin', password='testpass') doc = document(title='HTML8') doc.save() rev = revision(document=doc) rev.save() data = new_document_data() data['title'] = 'Replicated local storage' data['parent_topic'] = doc.id resp = self.client.post(reverse('wiki.create'), data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) assert doc.children.count() == 1 assert doc.children.all()[0].title == 'Replicated local storage' def test_repair_breadcrumbs(self): english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English top', save=True) english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English mid', parent_topic=english_top, save=True) english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English bottom', parent_topic=english_mid, save=True) french_top = document(locale='fr', title='French top', parent=english_top, save=True) french_mid = document(locale='fr', title='French mid', parent=english_mid, parent_topic=english_mid, save=True) french_bottom = document(locale='fr', title='French bottom', parent=english_bottom, parent_topic=english_bottom, save=True) self.client.login(username='admin', password='testpass') resp = self.client.get(reverse('wiki.repair_breadcrumbs', args=[french_bottom.slug], locale='fr')) assert resp.status_code == 302 assert_no_cache_header(resp) assert french_bottom.get_absolute_url() in resp['Location'] french_bottom_fixed = Document.objects.get(locale='fr', title=french_bottom.title) assert french_mid.id == french_bottom_fixed.parent_topic.id assert (french_top.id == french_bottom_fixed.parent_topic.parent_topic.id) def test_translate_on_edit(self): d1 = document(title="Doc1", locale=settings.WIKI_DEFAULT_LANGUAGE, save=True) revision(document=d1, save=True) d2 = document(title="TransDoc1", locale='de', parent=d1, save=True) revision(document=d2, save=True) self.client.login(username='admin', password='testpass') url = reverse('wiki.edit', args=(d2.slug,), locale=d2.locale) resp = self.client.get(url) assert resp.status_code == 200 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) def test_discard_location(self): """Testing that the 'discard' HREF goes to the correct place when it's explicitely and implicitely set""" self.client.login(username='admin', password='testpass') def _create_doc(slug, locale): doc = document(slug=slug, is_localizable=True, locale=locale) doc.save() r = revision(document=doc) r.save() return doc # Test that the 'discard' button on an edit goes to the original page doc = _create_doc('testdiscarddoc', settings.WIKI_DEFAULT_LANGUAGE) response = self.client.get(reverse('wiki.edit', args=[doc.slug])) assert (pq(response.content).find('.btn-discard').attr('href') == reverse('wiki.document', args=[doc.slug])) # Test that the 'discard button on a new translation goes # to the en-US page' response = self.client.get(reverse('wiki.translate', args=[doc.slug]), {'tolocale': 'es'}) assert (pq(response.content).find('.btn-discard').attr('href') == reverse('wiki.document', args=[doc.slug])) # Test that the 'discard' button on an existing translation goes # to the 'es' page foreign_doc = _create_doc('testdiscarddoc', 'es') response = self.client.get(reverse('wiki.edit', args=[foreign_doc.slug], locale=foreign_doc.locale)) assert (pq(response.content).find('.btn-discard').attr('href') == reverse('wiki.document', args=[foreign_doc.slug], locale=foreign_doc.locale)) @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get', return_value=('lorem ipsum dolor sit amet', None)) def test_revert(self, mock_kumascript_get): self.client.login(username='admin', password='testpass') data = new_document_data() data['title'] = 'A Test Article For Reverting' data['slug'] = 'test-article-for-reverting' response = self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug='test-article-for-reverting') rev = doc.revisions.order_by('-id').all()[0] data['content'] = 'Not lorem ipsum anymore' data['comment'] = 'Nobody likes Latin anyway' response = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) mock_kumascript_get.reset_mock() response = self.client.post(reverse('wiki.revert_document', args=[doc.slug, rev.id]), {'revert': True, 'comment': 'Blah blah'}) assert response.status_code == 302 assert_no_cache_header(response) assert mock_kumascript_get.called, "kumascript should have been used" rev = doc.revisions.order_by('-id').all()[0] assert rev.content == 'lorem ipsum dolor sit amet' assert 'Blah blah' in rev.comment mock_kumascript_get.reset_mock() rev = doc.revisions.order_by('-id').all()[1] response = self.client.post(reverse('wiki.revert_document', args=[doc.slug, rev.id]), {'revert': True}) assert response.status_code == 302 rev = doc.revisions.order_by('-id').all()[0] assert ': ' not in rev.comment assert mock_kumascript_get.called, "kumascript should have been used" def test_revert_moved(self): doc = document(slug='move-me', save=True) rev = revision(document=doc, save=True) prev_rev_id = rev.id doc._move_tree('moved-doc') self.client.login(username='admin', password='testpass') resp = self.client.post(reverse('wiki.revert_document', args=[doc.slug, prev_rev_id])) assert resp.status_code == 200 assert_no_cache_header(resp) assert b'cannot revert a document that has been moved' in resp.content def test_store_revision_ip(self): self.client.login(username='testuser', password='testpass') data = new_document_data() slug = 'test-article-for-storing-revision-ip' data.update({'title': 'A Test Article For Storing Revision IP', 'slug': slug}) self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(locale='en-US', slug=slug) data.update({'form-type': 'rev', 'content': 'This revision should NOT record IP', 'comment': 'This revision should NOT record IP'}) resp = self.client.post(reverse('wiki.edit', args=[doc.slug]), data, HTTP_USER_AGENT='Mozilla Firefox', HTTP_REFERER='http://localhost/') assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) assert RevisionIP.objects.all().count() == 0 data.update({'content': 'Store the IP address for the revision.', 'comment': 'Store the IP address for the revision.'}) with override_switch('store_revision_ips', True): self.client.post(reverse('wiki.edit', args=[doc.slug]), data, HTTP_USER_AGENT='Mozilla Firefox', HTTP_REFERER='http://localhost/') assert RevisionIP.objects.all().count() == 1 rev = doc.revisions.order_by('-id').all()[0] rev_ip = RevisionIP.objects.get(revision=rev) assert rev_ip.ip == '127.0.0.1' assert rev_ip.user_agent == 'Mozilla Firefox' assert rev_ip.referrer == 'http://localhost/' @pytest.mark.edit_emails @call_on_commit_immediately def test_email_for_first_edits(self): self.client.login(username='testuser', password='testpass') data = new_document_data() slug = 'test-article-for-storing-revision-ip' data.update({'title': 'A Test Article For First Edit Emails', 'slug': slug}) self.client.post(reverse('wiki.create'), data) assert len(mail.outbox) == 1 doc = Document.objects.get( locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug) data.update({'form-type': 'rev', 'content': 'This edit should not send an email', 'comment': 'This edit should not send an email'}) resp = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) assert len(mail.outbox) == 1 self.client.login(username='admin', password='testpass') data.update({'content': 'Admin first edit should send an email', 'comment': 'Admin first edit should send an email'}) self.client.post(reverse('wiki.edit', args=[doc.slug]), data) assert len(mail.outbox) == 2 def _check_message_for_headers(message, username): assert "%s made their first edit" % username in message.subject assert message.extra_headers == { 'X-Kuma-Document-Url': doc.get_full_url(), 'X-Kuma-Editor-Username': username, 'X-Kuma-Document-Locale': doc.locale, 'X-Kuma-Document-Title': doc.title } testuser_message = mail.outbox[0] admin_message = mail.outbox[1] _check_message_for_headers(testuser_message, 'testuser') _check_message_for_headers(admin_message, 'admin') def test_email_for_watched_edits(self): """ When a user edits a watched document, we should send an email to users who are watching it. """ self.client.login(username='testuser', password='testpass') data = new_document_data() rev = revision(save=True) previous_rev = rev.previous testuser2 = get_user(username='testuser2') EditDocumentEvent.notify(testuser2, rev.document) data.update({'form-type': 'rev', 'slug': rev.document.slug, 'title': rev.document.title, 'content': 'This edit should send an email', 'comment': 'This edit should send an email'}) resp = self.client.post(reverse('wiki.edit', args=[rev.document.slug]), data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) self.assertEquals(1, len(mail.outbox)) message = mail.outbox[0] assert testuser2.email in message.to assert str(rev.document.title) in message.body assert 'sub-articles' not in message.body # Test that the compare URL points to the right revisions rev = Document.objects.get(pk=rev.document_id).current_revision assert rev.id != previous_rev assert (add_utm(get_compare_url(rev.document, rev.previous.id, rev.id), 'Wiki Doc Edits') in message.body) # Subscribe another user and assert 2 emails sent this time mail.outbox = [] testuser01 = get_user(username='testuser01') EditDocumentEvent.notify(testuser01, rev.document) data.update({'form-type': 'rev', 'slug': rev.document.slug, 'content': 'This edit should send 2 emails', 'comment': 'This edit should send 2 emails'}) self.client.post(reverse('wiki.edit', args=[rev.document.slug]), data) self.assertEquals(2, len(mail.outbox)) message = mail.outbox[0] assert testuser2.email in message.to assert rev.document.title in message.body assert 'sub-articles' not in message.body message = mail.outbox[1] assert testuser01.email in message.to assert rev.document.title in message.body assert 'sub-articles' not in message.body @pytest.mark.edit_emails def test_email_for_child_edit_in_watched_tree(self): """ When a user edits a child document in a watched document tree, we should send an email to users who are watching the tree. """ root_doc, child_doc, grandchild_doc = create_document_tree() testuser2 = get_user(username='testuser2') EditDocumentInTreeEvent.notify(testuser2, root_doc) self.client.login(username='testuser', password='testpass') data = new_document_data() data.update({'form-type': 'rev', 'slug': child_doc.slug, 'content': 'This edit should send an email', 'comment': 'This edit should send an email'}) resp = self.client.post(reverse('wiki.edit', args=[child_doc.slug]), data) assert resp.status_code == 302 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) assert len(mail.outbox) == 1 message = mail.outbox[0] assert testuser2.email in message.to assert 'sub-articles' in message.body @pytest.mark.edit_emails def test_email_for_grandchild_edit_in_watched_tree(self): """ When a user edits a grandchild document in a watched document tree, we should send an email to users who are watching the tree. """ root_doc, child_doc, grandchild_doc = create_document_tree() testuser2 = get_user(username='testuser2') EditDocumentInTreeEvent.notify(testuser2, root_doc) self.client.login(username='testuser', password='testpass') data = new_document_data() data.update({'form-type': 'rev', 'slug': grandchild_doc.slug, 'content': 'This edit should send an email', 'comment': 'This edit should send an email'}) self.client.post(reverse('wiki.edit', args=[grandchild_doc.slug]), data) assert len(mail.outbox) == 1 message = mail.outbox[0] assert testuser2.email in message.to assert 'sub-articles' in message.body @pytest.mark.edit_emails def test_single_email_when_watching_doc_and_tree(self): """ When a user edits a watched document in a watched document tree, we should only send a single email to users who are watching both the document and the tree. """ root_doc, child_doc, grandchild_doc = create_document_tree() testuser2 = get_user(username='testuser2') EditDocumentInTreeEvent.notify(testuser2, root_doc) EditDocumentEvent.notify(testuser2, child_doc) self.client.login(username='testuser', password='testpass') data = new_document_data() data.update({'form-type': 'rev', 'slug': child_doc.slug, 'content': 'This edit should send an email', 'comment': 'This edit should send an email'}) self.client.post(reverse('wiki.edit', args=[child_doc.slug]), data) assert len(mail.outbox) == 1 message = mail.outbox[0] assert testuser2.email in message.to class SectionEditingResourceTests(UserTestCase, WikiTestCase): def test_raw_source(self): """The raw source for a document can be requested""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) expected = """ <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """ with override_switch('application_ACAO', True): response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert response.status_code == 200 # Since the client is logged-in, the response should not be cached. assert_no_cache_header(response) assert response['Access-Control-Allow-Origin'] == '*' assert normalize_html(expected) == normalize_html(response.content) def test_raw_editor_safety_filter(self): """Safety filter should be applied before rendering editor bug 821986 """ self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <p onload=alert(3)>FOO</p> <svg><circle onload=confirm(3)>HI THERE</circle></svg> """) response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert response.status_code == 200 # Since the client is logged-in, the response should not be cached. assert_no_cache_header(response) assert b'<p onload=' not in response.content assert b'<circle onload=' not in response.content def test_raw_with_editing_links_source(self): """The raw source for a document can be requested, with section editing links""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) expected = """ <h1 id="s1"><a class="edit-section" data-section-id="s1" data-section-src-url="/en-US/docs/%(slug)s?raw=true&amp;section=s1" href="/en-US/docs/%(slug)s$edit?edit_links=true&amp;section=s1" title="Edit section">Edit</a>s1</h1> <p>test</p> <p>test</p> <h1 id="s2"><a class="edit-section" data-section-id="s2" data-section-src-url="/en-US/docs/%(slug)s?raw=true&amp;section=s2" href="/en-US/docs/%(slug)s$edit?edit_links=true&amp;section=s2" title="Edit section">Edit</a>s2</h1> <p>test</p> <p>test</p> <h1 id="s3"><a class="edit-section" data-section-id="s3" data-section-src-url="/en-US/docs/%(slug)s?raw=true&amp;section=s3" href="/en-US/docs/%(slug)s$edit?edit_links=true&amp;section=s3" title="Edit section">Edit</a>s3</h1> <p>test</p> <p>test</p> """ % {'slug': rev.document.slug} response = self.client.get('%s?raw=true&edit_links=true' % reverse('wiki.document', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert response.status_code == 200 # Since the client is logged-in, the response should not be cached. assert_no_cache_header(response) assert normalize_html(expected) == normalize_html(response.content) def test_raw_section_source(self): """The raw source for a document section can be requested""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) expected = """ <h1 id="s2">s2</h1> <p>test</p> <p>test</p> """ response = self.client.get('%s?section=s2&raw=true' % reverse('wiki.document', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert response.status_code == 200 # Since the client is logged-in, the response should not be cached. assert_no_cache_header(response) assert normalize_html(expected) == normalize_html(response.content) @pytest.mark.midair def test_raw_section_edit_ajax(self): self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) replace = """ <h1 id="s2">s2</h1> <p>replace</p> """ response = self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[rev.document.slug]), {"form-type": "rev", "slug": rev.document.slug, "content": replace}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert response.status_code == 200 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) assert json.loads(response.content) == { 'error': False, 'new_revision_id': rev.id + 1 } expected = """ <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>replace</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """ response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert response.status_code == 200 # Since the client is logged-in, the response should not be cached. assert_no_cache_header(response) assert normalize_html(expected) == normalize_html(response.content) @pytest.mark.midair def test_midair_section_merge_ajax(self): """If a page was changed while someone was editing, but the changes didn't affect the specific section being edited, then ignore the midair warning""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) replace_1 = """ <h1 id="replace1">replace1</h1> <p>replace</p> """ replace_2 = """ <h1 id="replace2">replace2</h1> <p>replace</p> """ expected = """ <h1 id="replace1">replace1</h1> <p>replace</p> <h1 id="replace2">replace2</h1> <p>replace</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """ data = { 'form-type': 'rev', 'content': rev.content, 'slug': '' } # Edit #1 starts... resp = self.client.get('%s?section=s1' % reverse('wiki.edit', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert resp.status_code == 200 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) page = pq(resp.content) rev_id1 = page.find('input[name="current_rev"]').attr('value') # Edit #2 starts... resp = self.client.get('%s?section=s2' % reverse('wiki.edit', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') page = pq(resp.content) rev_id2 = page.find('input[name="current_rev"]').attr('value') # Edit #2 submits successfully data.update({ 'form-type': 'rev', 'content': replace_2, 'current_rev': rev_id2, 'slug': rev.document.slug }) resp = self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[rev.document.slug]), data, HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert resp.status_code == 200 assert resp['X-Robots-Tag'] == 'noindex' assert_no_cache_header(resp) assert not json.loads(resp.content)['error'] # Edit #1 submits, but since it's a different section, there's no # mid-air collision data.update({ 'form-type': 'rev', 'content': replace_1, 'current_rev': rev_id1 }) resp = self.client.post('%s?section=s1&raw=true' % reverse('wiki.edit', args=[rev.document.slug]), data, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # No conflict, but we should get a 205 Reset as an indication that the # page needs a refresh. assert resp.status_code == 205 # Finally, make sure that all the edits landed response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert response.status_code == 200 # Since the client is logged-in, the response should not be cached. assert_no_cache_header(response) assert normalize_html(expected) == normalize_html(response.content) # Also, ensure that the revision is slipped into the headers assert (unicode(Document.objects.get(slug=rev.document.slug, locale=rev.document.locale) .current_revision.id) == unicode(response['x-kuma-revision'])) @pytest.mark.midair def test_midair_section_collision_ajax(self): """If both a revision and the edited section has changed, then a section edit is a collision.""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) replace_1 = """ <h1 id="s2">replace</h1> <p>replace</p> """ replace_2 = """ <h1 id="s2">first replace</h1> <p>first replace</p> """ data = { 'form-type': 'rev', 'content': rev.content } # Edit #1 starts... resp = self.client.get('%s?section=s2' % reverse('wiki.edit', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') page = pq(resp.content) rev_id1 = page.find('input[name="current_rev"]').attr('value') # Edit #2 starts... resp = self.client.get('%s?section=s2' % reverse('wiki.edit', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') page = pq(resp.content) rev_id2 = page.find('input[name="current_rev"]').attr('value') # Edit #2 submits successfully data.update({ 'form-type': 'rev', 'content': replace_2, 'slug': rev.document.slug, 'current_rev': rev_id2 }) resp = self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[rev.document.slug]), data, HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert not json.loads(resp.content)['error'] # Edit #1 submits, but since it's the same section, there's a collision data.update({ 'form': 'rev', 'content': replace_1, 'current_rev': rev_id1 }) resp = self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[rev.document.slug]), data, HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert 200 == resp.status_code # We receive the midair collission message history_url = reverse( 'wiki.document_revisions', kwargs={'document_path': rev.document.slug}) midair_collission_error = (unicode(MIDAIR_COLLISION) % {'url': history_url}).encode('utf-8') assert midair_collission_error in json.loads(resp.content)['error_message'] def test_raw_include_option(self): doc_src = u""" <div class="noinclude">{{ XULRefAttr() }}</div> <dl> <dt>{{ XULAttr(&quot;maxlength&quot;) }}</dt> <dd>Type: <em>integer</em></dd> <dd>Przykłady 例 예제 示例</dd> </dl> <p><iframe></iframe></p> <div class="noinclude"> <p>{{ languages( { &quot;ja&quot;: &quot;ja/XUL/Attribute/maxlength&quot; } ) }}</p> </div> """ rev = revision(is_approved=True, save=True, content=doc_src) expected = u""" <dl> <dt>{{ XULAttr(&quot;maxlength&quot;) }}</dt> <dd>Type: <em>integer</em></dd> <dd>Przykłady 例 예제 示例</dd> </dl> <p><iframe></iframe></p> """ resp = self.client.get('%s?raw&include' % reverse('wiki.document', args=[rev.document.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') assert resp.status_code == 200 assert_shared_cache_header(resp) assert (normalize_html(expected) == normalize_html(resp.content.decode('utf-8'))) def test_section_edit_toc(self): """show_toc is preserved in section editing.""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) rev.toc_depth = 1 rev.save() replace = """ <h1 id="s2">s2</h1> <p>replace</p> """ self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[rev.document.slug]), {"form-type": "rev", "slug": rev.document.slug, "content": replace}, follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest') changed = Document.objects.get(pk=rev.document.id).current_revision assert rev.id != changed.id assert 1 == changed.toc_depth def test_section_edit_review_tags(self): """review tags are preserved in section editing.""" self.client.login(username='admin', password='testpass') rev = revision(is_approved=True, save=True, content=""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) tags_to_save = ['bar', 'foo'] rev.save() rev.review_tags.set(*tags_to_save) replace = """ <h1 id="s2">s2</h1> <p>replace</p> """ self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[rev.document.slug]), {"form-type": "rev", "slug": rev.document.slug, "content": replace}, follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest') changed = Document.objects.get(pk=rev.document.id).current_revision assert rev.id != changed.id assert set(tags_to_save) == set(t.name for t in changed.review_tags.all()) class MindTouchRedirectTests(UserTestCase, WikiTestCase): """ Test that we appropriately redirect old-style MindTouch URLs to new-style kuma URLs. """ # A note on these tests: we could try to use assertRedirects on # these, but for the most part we're just constructing a URL # similar enough to the wiki app's own built-in redirects that # it'll pick up the request and do what we want with it. But it # may end up issuing its own redirects, which are tricky to sort # out from the ones the legacy MindTouch handling will emit, so # instead we just test that A) we did issue a redirect and B) the # URL we constructed is enough for the document views to go on. server_prefix = '/%s/docs' % settings.WIKI_DEFAULT_LANGUAGE namespace_urls = ( # One for each namespace. {'mindtouch': '/Help:Foo', 'kuma': '%s/Help:Foo' % server_prefix}, {'mindtouch': '/Help_talk:Foo', 'kuma': '%s/Help_talk:Foo' % server_prefix}, {'mindtouch': '/Project:En/MDC_editor_guide', 'kuma': '%s/Project:MDC_editor_guide' % server_prefix}, {'mindtouch': '/Project_talk:En/MDC_style_guide', 'kuma': '%s/Project_talk:MDC_style_guide' % server_prefix}, {'mindtouch': '/Special:Foo', 'kuma': '%s/Special:Foo' % server_prefix}, {'mindtouch': '/Talk:en/Foo', 'kuma': '%s/Talk:Foo' % server_prefix}, {'mindtouch': '/Template:Foo', 'kuma': '%s/Template:Foo' % server_prefix}, {'mindtouch': '/User:Foo', 'kuma': '%s/User:Foo' % server_prefix}, ) def test_namespace_urls(self): new_doc = document() new_doc.title = 'User:Foo' new_doc.slug = 'User:Foo' new_doc.save() for namespace_test in self.namespace_urls: resp = self.client.get(namespace_test['mindtouch'], follow=False) assert 301 == resp.status_code assert resp['Location'] == namespace_test['kuma'] def test_document_urls(self): """Check the url redirect to proper document when the url like /<locale>/<document_slug>""" d = document(locale='zh-CN') d.save() mt_url = '/{locale}/{slug}'.format(locale=d.locale, slug=d.slug) resp = self.client.get(mt_url, follow=True) assert resp.status_code == 200 # Check the last redirect chain url is correct document url last_url = resp.redirect_chain[-1][0] assert last_url == d.get_absolute_url() def test_view_param(self): d = document() d.locale = settings.WIKI_DEFAULT_LANGUAGE d.slug = 'HTML/HTML5' d.title = 'HTML 5' d.save() mt_url = '/en-US/%s?view=edit' % (d.slug,) resp = self.client.get(mt_url) assert 301 == resp.status_code expected_url = d.get_absolute_url('wiki.edit') assert resp['Location'] == expected_url @override_config(KUMASCRIPT_TIMEOUT=5.0, KUMASCRIPT_MAX_AGE=600) class DeferredRenderingViewTests(UserTestCase, WikiTestCase): """Tests for the deferred rendering system and interaction with views""" def setUp(self): super(DeferredRenderingViewTests, self).setUp() self.rendered_content = 'HELLO RENDERED CONTENT' self.raw_content = 'THIS IS RAW CONTENT' self.rev = revision(is_approved=True, save=True, content=self.raw_content, # Disable TOC, makes content inspection easier. toc_depth=0) self.doc = self.rev.document self.doc.html = self.raw_content self.doc.rendered_html = self.rendered_content self.doc.save() self.url = self.doc.get_absolute_url() @mock.patch('kuma.wiki.kumascript.get') def test_rendered_content(self, mock_kumascript_get): """Document view should serve up rendered content when available""" mock_kumascript_get.return_value = (self.rendered_content, None) resp = self.client.get(self.url, follow=False) p = pq(resp.content) txt = p.find('#wikiArticle').text() assert self.rendered_content in txt assert self.raw_content not in txt assert 0 == p.find('#doc-rendering-in-progress').length assert 0 == p.find('#doc-render-raw-fallback').length def test_rendering_in_progress_warning(self): # Make the document look like there's a rendering in progress. self.doc.render_started_at = datetime.datetime.now() self.doc.save() resp = self.client.get(self.url, follow=False) p = pq(resp.content) txt = p.find('#wikiArticle').text() # Even though a rendering looks like it's in progress, ensure the # last-known render is displayed. assert self.rendered_content in txt assert self.raw_content not in txt assert 0 == p.find('#doc-rendering-in-progress').length # Only for logged-in users, ensure the render-in-progress warning is # displayed. self.client.login(username='testuser', password='testpass') resp = self.client.get(self.url, follow=False) p = pq(resp.content) assert 1 == p.find('#doc-rendering-in-progress').length @mock.patch('kuma.wiki.kumascript.get') def test_raw_content_during_initial_render(self, mock_kumascript_get): """Raw content should be displayed during a document's initial deferred rendering""" mock_kumascript_get.return_value = (self.rendered_content, None) # Make the document look like there's no rendered content, but that a # rendering is in progress. self.doc.html = self.raw_content self.doc.rendered_html = '' self.doc.render_started_at = datetime.datetime.now() self.doc.save() # Now, ensure that raw content is shown in the view. resp = self.client.get(self.url, follow=False) p = pq(resp.content) txt = p.find('#wikiArticle').text() assert self.rendered_content not in txt assert self.raw_content in txt assert 0 == p.find('#doc-render-raw-fallback').length # Only for logged-in users, ensure that a warning is displayed about # the fallback self.client.login(username='testuser', password='testpass') resp = self.client.get(self.url, follow=False) p = pq(resp.content) assert 1 == p.find('#doc-render-raw-fallback').length @mock.patch.object(Document, 'schedule_rendering') @mock.patch('kuma.wiki.kumascript.get') def test_schedule_rendering(self, mock_kumascript_get, mock_document_schedule_rendering): mock_kumascript_get.return_value = (self.rendered_content, None) self.client.login(username='testuser', password='testpass') data = new_document_data() data.update({ 'form-type': 'rev', 'content': 'This is an update', }) edit_url = reverse('wiki.edit', args=[self.doc.slug]) resp = self.client.post(edit_url, data) assert 302 == resp.status_code assert mock_document_schedule_rendering.called mock_document_schedule_rendering.reset_mock() data.update({ 'form-type': 'both', 'content': 'This is a translation', }) translate_url = (reverse('wiki.translate', args=[data['slug']]) + '?tolocale=fr') response = self.client.post(translate_url, data) assert response.status_code == 302 assert response['X-Robots-Tag'] == 'noindex' assert_no_cache_header(response) assert mock_document_schedule_rendering.called class PageMoveTests(UserTestCase, WikiTestCase): def test_move_conflict(self): parent = revision(title='Test page move views', slug='test-page-move-views', is_approved=True, save=True) parent_doc = parent.document child = revision(title='Child of page-move view test', slug='page-move/test-views', is_approved=True, save=True) child_doc = child.document child_doc.parent_topic = parent.document child_doc.save() revision(title='Conflict for page-move view', slug='moved/test-page-move-views/test-views', is_approved=True, save=True) data = {'slug': 'moved/test-page-move-views'} self.client.login(username='admin', password='testpass') with override_flag('page_move', True): resp = self.client.post(reverse('wiki.move', args=(parent_doc.slug,)), data=data) assert resp.status_code == 200 assert_no_cache_header(resp)
SphinxKnight/kuma
kuma/wiki/tests/test_views.py
Python
mpl-2.0
113,459
[ "VisIt" ]
7949d26fa8244a08d1ef4f13ca1913a88bbc79914bfd554dc75bcf018a4dbe7e
""" TornadoBaseClient contains all the low-levels functionalities and initilization methods It must be instantiated from :py:class:`~DIRAC.Core.Tornado.Client.TornadoClient` Requests library manage itself retry when connection failed, so the __nbOfRetry attribute is removed from DIRAC (For each URL requests manage retries himself, if it still fail, we try next url) KeepAlive lapse is also removed because managed by request, see https://requests.readthedocs.io/en/latest/user/advanced/#keep-alive If necessary this class can be modified to define number of retry in requests, documentation does not give lot of informations but you can see this simple solution from StackOverflow. After some tests request seems to retry 3 times by default. https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request .. warning:: If you use your own certificates, it's like in dips, please take a look at :ref:`using_own_CA` .. warning:: Lots of method are copy-paste from :py:class:`~DIRAC.Core.DISET.private.BaseClient`. And some methods are copy-paste AND modifications, for now it permit to fully separate DISET and HTTPS. """ # pylint: disable=broad-except from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = "$Id$" from io import open import errno import requests import six from six.moves import http_client import DIRAC from DIRAC import S_OK, S_ERROR, gLogger from DIRAC.ConfigurationSystem.Client.Config import gConfig from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import skipCACheck from DIRAC.ConfigurationSystem.Client.Helpers.Registry import findDefaultGroupForDN from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceURL, getServiceFailoverURL from DIRAC.Core.DISET.ThreadConfig import ThreadConfig from DIRAC.Core.Security import Locations from DIRAC.Core.Utilities import List, Network from DIRAC.Core.Utilities.JEncode import decode, encode # TODO CHRIS: refactor all the messy `discover` methods # I do not do it now because I want first to decide # whether we go with code copy of fatorization class TornadoBaseClient(object): """ This class contain initialization method and all utilities method used for RPC """ __threadConfig = ThreadConfig() VAL_EXTRA_CREDENTIALS_HOST = "hosts" KW_USE_CERTIFICATES = "useCertificates" KW_EXTRA_CREDENTIALS = "extraCredentials" KW_TIMEOUT = "timeout" KW_SETUP = "setup" KW_VO = "VO" KW_DELEGATED_DN = "delegatedDN" KW_DELEGATED_GROUP = "delegatedGroup" KW_IGNORE_GATEWAYS = "ignoreGateways" KW_PROXY_LOCATION = "proxyLocation" KW_PROXY_STRING = "proxyString" KW_PROXY_CHAIN = "proxyChain" KW_SKIP_CA_CHECK = "skipCACheck" KW_KEEP_ALIVE_LAPSE = "keepAliveLapse" def __init__(self, serviceName, **kwargs): """ :param serviceName: URL of the service (proper uri or just System/Component) :param useCertificates: If set to True, use the server certificate :param extraCredentials: :param timeout: Timeout of the call (default 600 s) :param setup: Specify the Setup :param VO: Specify the VO :param delegatedDN: Not clear what it can be used for. :param delegatedGroup: Not clear what it can be used for. :param ignoreGateways: Ignore the DIRAC Gatways settings :param proxyLocation: Specify the location of the proxy :param proxyString: Specify the proxy string :param proxyChain: Specify the proxy chain :param skipCACheck: Do not check the CA :param keepAliveLapse: Duration for keepAliveLapse (heartbeat like) (now managed by requests) """ if not isinstance(serviceName, six.string_types): raise TypeError("Service name expected to be a string. Received %s type %s" % (str(serviceName), type(serviceName))) self._destinationSrv = serviceName self._serviceName = serviceName self.__ca_location = False self.kwargs = kwargs self.__useCertificates = None # The CS useServerCertificate option can be overridden by explicit argument self.__forceUseCertificates = self.kwargs.get(self.KW_USE_CERTIFICATES) self.__initStatus = S_OK() self.__idDict = {} self.__extraCredentials = "" # by default we always have 1 url for example: # RPCClient('dips://volhcb38.cern.ch:9162/Framework/SystemAdministrator') self.__nbOfUrls = 1 self.__bannedUrls = [] # For pylint... self.setup = None self.vo = None self.serviceURL = None for initFunc in ( self.__discoverTimeout, self.__discoverSetup, self.__discoverVO, self.__discoverCredentialsToUse, self.__discoverExtraCredentials, self.__discoverURL): result = initFunc() if not result['OK'] and self.__initStatus['OK']: self.__initStatus = result def __discoverSetup(self): """ Discover which setup to use and stores it in self.setup The setup is looked for: * kwargs of the constructor (see KW_SETUP) * in the CS /DIRAC/Setup * default to 'Test' """ if self.KW_SETUP in self.kwargs and self.kwargs[self.KW_SETUP]: self.setup = str(self.kwargs[self.KW_SETUP]) else: self.setup = self.__threadConfig.getSetup() if not self.setup: self.setup = gConfig.getValue("/DIRAC/Setup", "Test") return S_OK() def __discoverURL(self): """ Calculate the final URL. It is called at initialization and in connect in case of issue It sets: * self.serviceURL: the url (dips) selected as target using __findServiceURL * self.__URLTuple: a split of serviceURL obtained by Network.splitURL * self._serviceName: the last part of URLTuple (typically System/Component) WARNING: COPY PASTE FROM BaseClient """ # Calculate final URL try: result = self.__findServiceURL() except Exception as e: return S_ERROR(repr(e)) if not result['OK']: return result self.serviceURL = result['Value'] retVal = Network.splitURL(self.serviceURL) if not retVal['OK']: return retVal self.__URLTuple = retVal['Value'] self._serviceName = self.__URLTuple[-1] res = gConfig.getOptionsDict("/DIRAC/ConnConf/%s:%s" % self.__URLTuple[1:3]) if res['OK']: opts = res['Value'] for k in opts: if k not in self.kwargs: self.kwargs[k] = opts[k] return S_OK() def __discoverVO(self): """ Discover which VO to use and stores it in self.vo The VO is looked for: * kwargs of the constructor (see KW_VO) * in the CS /DIRAC/VirtualOrganization * default to 'unknown' WARNING: COPY/PASTE FROM Core/Diset/private/BaseClient FOR NOW """ if self.KW_VO in self.kwargs and self.kwargs[self.KW_VO]: self.vo = str(self.kwargs[self.KW_VO]) else: self.vo = gConfig.getValue("/DIRAC/VirtualOrganization", "unknown") return S_OK() def __discoverCredentialsToUse(self): """ Discovers which credentials to use for connection. * Server certificate: -> If KW_USE_CERTIFICATES in kwargs, sets it in self.__useCertificates -> If not, check gConfig.useServerCertificate(), and sets it in self.__useCertificates and kwargs[KW_USE_CERTIFICATES] * Certification Authorities check: -> if KW_SKIP_CA_CHECK is not in kwargs and we are using the certificates, set KW_SKIP_CA_CHECK to false in kwargs -> if KW_SKIP_CA_CHECK is not in kwargs and we are not using the certificate, check the skipCACheck * Proxy Chain WARNING: MOSTLY COPY/PASTE FROM Core/Diset/private/BaseClient """ # Use certificates? if self.KW_USE_CERTIFICATES in self.kwargs: self.__useCertificates = self.kwargs[self.KW_USE_CERTIFICATES] else: self.__useCertificates = gConfig.useServerCertificate() self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates if self.KW_SKIP_CA_CHECK not in self.kwargs: if self.__useCertificates: self.kwargs[self.KW_SKIP_CA_CHECK] = False else: self.kwargs[self.KW_SKIP_CA_CHECK] = skipCACheck() # Rewrite a little bit from here: don't need the proxy string, we use the file if self.KW_PROXY_CHAIN in self.kwargs: try: self.kwargs[self.KW_PROXY_STRING] = self.kwargs[self.KW_PROXY_CHAIN].dumpAllToString()['Value'] del self.kwargs[self.KW_PROXY_CHAIN] except Exception: return S_ERROR("Invalid proxy chain specified on instantiation") # ==== REWRITED FROM HERE ==== # For certs always check CA's. For clients skipServerIdentityCheck return S_OK() def __discoverExtraCredentials(self): """ Add extra credentials informations. * self.__extraCredentials -> if KW_EXTRA_CREDENTIALS in kwargs, we set it -> Otherwise, if we use the server certificate, we set it to VAL_EXTRA_CREDENTIALS_HOST -> If we have a delegation (see bellow), we set it to (delegatedDN, delegatedGroup) -> otherwise it is an empty string * delegation: -> if KW_DELEGATED_DN in kwargs, or delegatedDN in threadConfig, put in in self.kwargs -> If we have a delegated DN but not group, we find the corresponding group in the CS WARNING: COPY/PASTE FROM Core/Diset/private/BaseClient """ # which extra credentials to use? if self.__useCertificates: self.__extraCredentials = self.VAL_EXTRA_CREDENTIALS_HOST else: self.__extraCredentials = "" if self.KW_EXTRA_CREDENTIALS in self.kwargs: self.__extraCredentials = self.kwargs[self.KW_EXTRA_CREDENTIALS] # Are we delegating something? delegatedDN, delegatedGroup = self.__threadConfig.getID() if self.KW_DELEGATED_DN in self.kwargs and self.kwargs[self.KW_DELEGATED_DN]: delegatedDN = self.kwargs[self.KW_DELEGATED_DN] elif delegatedDN: self.kwargs[self.KW_DELEGATED_DN] = delegatedDN if self.KW_DELEGATED_GROUP in self.kwargs and self.kwargs[self.KW_DELEGATED_GROUP]: delegatedGroup = self.kwargs[self.KW_DELEGATED_GROUP] elif delegatedGroup: self.kwargs[self.KW_DELEGATED_GROUP] = delegatedGroup if delegatedDN: if not delegatedGroup: result = findDefaultGroupForDN(self.kwargs[self.KW_DELEGATED_DN]) if not result['OK']: return result self.__extraCredentials = (delegatedDN, delegatedGroup) return S_OK() def __discoverTimeout(self): """ Discover which timeout to use and stores it in self.timeout The timeout can be specified kwargs of the constructor (see KW_TIMEOUT), with a minimum of 120 seconds. If unspecified, the timeout will be 600 seconds. The value is set in self.timeout, as well as in self.kwargs[KW_TIMEOUT] WARNING: COPY/PASTE FROM Core/Diset/private/BaseClient """ if self.KW_TIMEOUT in self.kwargs: self.timeout = self.kwargs[self.KW_TIMEOUT] else: self.timeout = False if self.timeout: self.timeout = max(120, self.timeout) else: self.timeout = 600 self.kwargs[self.KW_TIMEOUT] = self.timeout return S_OK() def __findServiceURL(self): """ Discovers the URL of a service, taking into account gateways, multiple URLs, banned URLs If the site on which we run is configured to use gateways (/DIRAC/Gateways/<siteName>), these URLs will be used. To ignore the gateway, it is possible to set KW_IGNORE_GATEWAYS to False in kwargs. If self._destinationSrv (given as constructor attribute) is a properly formed URL, we just return this one. If we have to use a gateway, we just replace the server name in the url. The list of URLs defined in the CS (<System>/URLs/<Component>) is randomized This method also sets some attributes: * self.__nbOfUrls = number of URLs * self.__nbOfRetry removed in HTTPS (Managed by requests) * self.__bannedUrls is reinitialized if all the URLs are banned :return: the selected URL WARNING (Mostly) COPY PASTE FROM BaseClient (protocols list is changed to https) """ if not self.__initStatus['OK']: return self.__initStatus # Load the Gateways URLs for the current site Name gatewayURL = False if not self.kwargs.get(self.KW_IGNORE_GATEWAYS): dRetVal = gConfig.getOption("/DIRAC/Gateways/%s" % DIRAC.siteName()) if dRetVal['OK']: rawGatewayURL = List.randomize(List.fromChar(dRetVal['Value'], ","))[0] gatewayURL = "/".join(rawGatewayURL.split("/")[:3]) # If what was given as constructor attribute is a properly formed URL, # we just return this one. # If we have to use a gateway, we just replace the server name in it if self._destinationSrv.startswith("https://"): gLogger.debug("Already given a valid url", self._destinationSrv) if not gatewayURL: return S_OK(self._destinationSrv) gLogger.debug("Reconstructing given URL to pass through gateway") path = "/".join(self._destinationSrv.split("/")[3:]) finalURL = "%s/%s" % (gatewayURL, path) gLogger.debug("Gateway URL conversion:\n %s -> %s" % (self._destinationSrv, finalURL)) return S_OK(finalURL) if gatewayURL: gLogger.debug("Using gateway", gatewayURL) return S_OK("%s/%s" % (gatewayURL, self._destinationSrv)) # If nor url is given as constructor, we extract the list of URLs from the CS (System/URLs/Component) try: urls = getServiceURL(self._destinationSrv, setup=self.setup) except Exception as e: return S_ERROR("Cannot get URL for %s in setup %s: %s" % (self._destinationSrv, self.setup, repr(e))) if not urls: return S_ERROR("URL for service %s not found" % self._destinationSrv) failoverUrls = [] # Try if there are some failover URLs to use as last resort try: failoverUrlsStr = getServiceFailoverURL(self._destinationSrv, setup=self.setup) if failoverUrlsStr: failoverUrls = failoverUrlsStr.split(',') except Exception as e: pass # We randomize the list, and add at the end the failover URLs (System/FailoverURLs/Component) urlsList = List.randomize(List.fromChar(urls, ",")) + failoverUrls self.__nbOfUrls = len(urlsList) # __nbOfRetry removed in HTTPS (managed by requests) if self.__nbOfUrls == len(self.__bannedUrls): self.__bannedUrls = [] # retry all urls gLogger.debug("Retrying again all URLs") if self.__bannedUrls and len(urlsList) > 1: # we have host which is not accessible. We remove that host from the list. # We only remove if we have more than one instance for i in self.__bannedUrls: gLogger.debug("Removing banned URL", "%s" % i) urlsList.remove(i) # Take the first URL from the list # randUrls = List.randomize( urlsList ) + failoverUrls sURL = urlsList[0] # If we have banned URLs, and several URLs at disposals, we make sure that the selected sURL # is not on a host which is banned. If it is, we take the next one in the list using __selectUrl if self.__bannedUrls and self.__nbOfUrls > 2: # when we have multiple services then we can # have a situation when two services are running on the same machine with different ports... retVal = Network.splitURL(sURL) nexturl = None if retVal['OK']: nexturl = retVal['Value'] found = False for i in self.__bannedUrls: retVal = Network.splitURL(i) if retVal['OK']: bannedurl = retVal['Value'] else: break # We found a banned URL on the same host as the one we are running on if nexturl[1] == bannedurl[1]: found = True break if found: nexturl = self.__selectUrl(nexturl, urlsList[1:]) if nexturl: # an url found which is in different host sURL = nexturl gLogger.debug("Discovering URL for service", "%s -> %s" % (self._destinationSrv, sURL)) return S_OK(sURL) def __selectUrl(self, notselect, urls): """In case when multiple services are running in the same host, a new url has to be in a different host Note: If we do not have different host we will use the selected url... :param notselect: URL that should NOT be selected :param urls: list of potential URLs :return: selected URL WARNING: COPY/PASTE FROM Core/Diset/private/BaseClient """ url = None for i in urls: retVal = Network.splitURL(i) if retVal['OK']: if retVal['Value'][1] != notselect[1]: # the hots are different url = i break else: gLogger.error(retVal['Message']) return url def getServiceName(self): """ Returns the name of the service, if you had given a url at init, returns the URL. """ return self._serviceName def getDestinationService(self): """ Returns the url the service. """ return getServiceURL(self._serviceName) def _getBaseStub(self): """ Returns a tuple with (self._destinationSrv, newKwargs) self._destinationSrv is what was given as first parameter of the init serviceName newKwargs is an updated copy of kwargs: * if set, we remove the useCertificates (KW_USE_CERTIFICATES) in newKwargs This method is just used to return information in case of error in the InnerRPCClient WARNING: COPY/PASTE FROM Core/Diset/private/BaseClient """ newKwargs = dict(self.kwargs) # Remove useCertificates as the forwarder of the call will have to # independently decide whether to use their cert or not anyway. if 'useCertificates' in newKwargs: del newKwargs['useCertificates'] return (self._destinationSrv, newKwargs) def _request(self, retry=0, outputFile=None, **kwargs): """ Sends the request to server :param retry: internal parameters for recursive call. TODO: remove ? :param outputFile: (default None) path to a file where to store the received data. If set, the server response will be streamed for optimization purposes, and the response data will not go through the JDecode process :param **kwargs: Any argument there is used as a post parameter. They are detailed bellow. :param method: (mandatory) name of the distant method :param args: (mandatory) json serialized list of argument for the procedure :returns: The received data. If outputFile is set, return always S_OK """ # Adding some informations to send if self.__extraCredentials: kwargs[self.KW_EXTRA_CREDENTIALS] = encode(self.__extraCredentials) kwargs["clientVO"] = self.vo # Getting URL url = self.__findServiceURL() if not url['OK']: return url url = url['Value'] # Getting CA file (or skip verification) verify = (not self.kwargs.get(self.KW_SKIP_CA_CHECK)) if verify: cafile = Locations.getCAsLocation() if not cafile: gLogger.error("No CAs found!") return S_ERROR("No CAs found!") verify = self.__ca_location # getting certificate # Do we use the server certificate ? if self.kwargs[self.KW_USE_CERTIFICATES]: cert = Locations.getHostCertificateAndKeyLocation() # CHRIS 04.02.21 # TODO: add proxyLocation check ? else: cert = Locations.getProxyLocation() if not cert: gLogger.error("No proxy found") return S_ERROR("No proxy found") # We have a try/except for all the exceptions # whose default behavior is to try again, # maybe to different server try: # And we have a second block to handle specific exceptions # which makes it not worth retrying try: rawText = None # Default case, just return the result if not outputFile: call = requests.post(url, data=kwargs, timeout=self.timeout, verify=verify, cert=cert) # raising the exception for status here # means essentialy that we are losing here the information of what is returned by the server # as error message, since it is not passed to the exception # However, we can store the text and return it raw as an error, # since there is no guarantee that it is any JEncoded text # Note that we would get an exception only if there is an exception on the server side which # is not handled. # Any standard S_ERROR will be transfered as an S_ERROR with a correct code. rawText = call.text call.raise_for_status() return decode(rawText)[0] else: # Instruct the server not to encode the response kwargs['rawContent'] = True rawText = None # Stream download # https://requests.readthedocs.io/en/latest/user/advanced/#body-content-workflow with requests.post(url, data=kwargs, timeout=self.timeout, verify=verify, cert=cert, stream=True) as r: rawText = r.text r.raise_for_status() with open(outputFile, 'wb') as f: for chunk in r.iter_content(4096): # if chunk: # filter out keep-alive new chuncks f.write(chunk) return S_OK() # Some HTTPError are not worth retrying except requests.exceptions.HTTPError as e: status_code = e.response.status_code if status_code == http_client.NOT_IMPLEMENTED: return S_ERROR(errno.ENOSYS, "%s is not implemented" % kwargs.get('method')) elif status_code in (http_client.FORBIDDEN, http_client.UNAUTHORIZED): return S_ERROR(errno.EACCES, "No access to %s" % url) # if it is something else, retry raise # Whatever exception we have here, we deem worth retrying except Exception as e: # CHRIS TODO review this part: retry logic is fishy # self.__bannedUrls is emptied in findServiceURLs if url not in self.__bannedUrls: self.__bannedUrls += [url] if retry < self.__nbOfUrls - 1: self._request(retry=retry + 1, outputFile=outputFile, **kwargs) errStr = "%s: %s" % (str(e), rawText) return S_ERROR(errStr) # --- TODO ---- # Rewrite this method if needed: # /Core/DISET/private/BaseClient.py # __delegateCredentials
yujikato/DIRAC
src/DIRAC/Core/Tornado/Client/private/TornadoBaseClient.py
Python
gpl-3.0
22,782
[ "DIRAC" ]
d33ee813234c908ad9b64cf708e64f4d448a72a761558a525ca900c5433b9316
import numpy as np import pyroms from pyroms_toolbox.BGrid_SODA import BGrid_SODA def get_nc_BGrid_SODA(grdfile, name='SODA_2.1.6_CORAL', area='regional', \ xrange=(185,340), yrange=(100, 210), ystart=245): """ grd = get_nc_BGrid_SODA(grdfile) Load Bgrid object for SODA 2.1.6 from netCDF file """ nc = pyroms.io.Dataset(grdfile) lon_t = nc.variables['LON'][:] lat_t = nc.variables['LAT'][:] # All the data have been interpolated at the t-point # lon_t = lon_uv, lat_t = lat_uv #lon_uv = 0.5 * (lon_t[1:] + lon_t[:-1]) #lat_uv = 0.5 * (lat_t[1:] + lat_t[:-1]) lon_uv = lon_t lat_uv = lat_t depth = nc.variables['DEPTH'][:] dep = nc.variables['DEPTH_bnds'][:] depth_bnds = np.zeros(depth.shape[0]+1) depth_bnds[:-1] = dep[:,0] depth_bnds[-1] = dep[-1,1] nc_mask_t = nc.variables['MASK_T'] mask_t = np.array(~nc_mask_t[:].mask, dtype='int') nc_mask_uv = nc.variables['MASK_UV'] mask_uv = np.array(~nc_mask_uv[:].mask, dtype='int') bottom = pyroms.utility.get_bottom(nc_mask_t[::-1,:,:], mask_t[0,:], spval=nc_mask_t.missing_value) nlev = mask_t.shape[0] bottom = (nlev-1) - bottom h = np.zeros(mask_t[0,:].shape) for i in range(mask_t[0,:].shape[1]): for j in range(mask_t[0,:].shape[0]): if mask_t[0,j,i] == 1: h[j,i] = depth_bnds[bottom[j,i]] if area == 'global': #add one row in the north and the south lon_t = lon_t[np.r_[0,:len(lon_t),-1]] lon_t[0] = lon_t[1] - (lon_t[2]-lon_t[1]) lon_t[-1] = lon_t[-2] + (lon_t[-2]-lon_t[-3]) lat_t = lat_t[np.r_[0,0,:len(lat_t),-1,-1]] lat_t[0] = -85 lat_t[1] = -80 lat_t[-2] = 90 lat_t[-1] = 91 lon_uv = lon_t lat_uv = lat_t mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:] mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]] mask_t[:,:,0] = mask_t[:,:,-2] mask_t[:,:,-1] = mask_t[:,:,1] mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:] mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]] mask_uv[:,:,0] = mask_uv[:,:,-2] mask_uv[:,:,-1] = mask_uv[:,:,1] h = h[np.r_[0,0,:np.size(h,0),-1,-1]] h = h[:,np.r_[0,:np.size(h,1),-1]] h[:,0] = h[:,-2] h[:,-1] = h[:,1] m,l = h.shape xrange=(1,l-2) yrange=(1,m-2) if area == 'npolar': #add one row in the north and the south lon_t = lon_t[np.r_[0,:len(lon_t),-1]] lon_t[0] = lon_t[1] - (lon_t[2]-lon_t[1]) lon_t[-1] = lon_t[-2] + (lon_t[-2]-lon_t[-3]) lat_t = lat_t[np.r_[0,0,:len(lat_t),-1,-1]] lat_t[0] = -85 lat_t[1] = -80 lat_t[-2] = 90 lat_t[-1] = 91 lon_uv = lon_t lat_uv = lat_t mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:] mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]] mask_t[:,:,0] = mask_t[:,:,-2] mask_t[:,:,-1] = mask_t[:,:,1] mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:] mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]] mask_uv[:,:,0] = mask_uv[:,:,-2] mask_uv[:,:,-1] = mask_uv[:,:,1] h = h[np.r_[0,0,:np.size(h,0),-1,-1]] h = h[:,np.r_[0,:np.size(h,1),-1]] h[:,0] = h[:,-2] h[:,-1] = h[:,1] m,l = h.shape xrange=(1,l-2) yrange=(ystart+2,m-2) return BGrid_SODA(lon_t, lat_t, lon_uv, lat_uv, mask_t, mask_uv, depth, depth_bnds, h, \ name, xrange, yrange)
dcherian/pyroms
pyroms_toolbox/pyroms_toolbox/BGrid_SODA/get_nc_BGrid_SODA.py
Python
bsd-3-clause
3,646
[ "NetCDF" ]
b532e0100192b01a127df67dec28144448c931cdcb106c4f2cd926feed2c9aa7
import numpy as np from bayespy.utils import misc from bayespy.utils import linalg from .gaussian import GaussianMoments from .deterministic import Deterministic class ConcatGaussian(Deterministic): """Concatenate Gaussian vectors along the variable axis (not plate axis) NOTE: This concatenates on the variable axis! That is, the dimensionality of the resulting Gaussian vector is the sum of the dimensionalities of the input Gaussian vectors. TODO: Add support for Gaussian arrays and arbitrary concatenation axis. """ def __init__(self, *nodes, **kwargs): # Number of nodes to concatenate N = len(nodes) # This is stuff that will be useful when implementing arbitrary # concatenation. That is, first determine ndim. # # # Convert nodes to Gaussians (if they are not nodes, don't worry) # nodes_gaussian = [] # for node in nodes: # try: # node_gaussian = node._convert(GaussianMoments) # except AttributeError: # Moments.NoConverterError: # nodes_gaussian.append(node) # else: # nodes_gaussian.append(node_gaussian) # nodes = nodes_gaussian # # # Determine shape from the first Gaussian node # shape = None # for node in nodes: # try: # shape = node.dims[0] # except AttibuteError: # pass # else: # break # if shape is None: # raise ValueError("Couldn't determine shape from the input nodes") # # ndim = len(shape) nodes = [self._ensure_moments(node, GaussianMoments, ndim=1) for node in nodes] D = sum(node.dims[0][0] for node in nodes) shape = (D,) self._moments = GaussianMoments(shape) self._parent_moments = [node._moments for node in nodes] # Make sure all parents are Gaussian vectors if any(len(node.dims[0]) != 1 for node in nodes): raise ValueError("Input nodes must be (Gaussian) vectors") self.slices = tuple(np.cumsum([0] + [node.dims[0][0] for node in nodes])) D = self.slices[-1] return super().__init__(*nodes, dims=((D,), (D, D)), **kwargs) def _compute_moments(self, *u_nodes): x = misc.concatenate(*[u[0] for u in u_nodes], axis=-1) xx = misc.block_diag(*[u[1] for u in u_nodes]) # Explicitly broadcast xx to plates of x x_plates = np.shape(x)[:-1] xx = np.ones(x_plates)[...,None,None] * xx # Compute the cross-covariance terms using the means of each variable # (because covariances are zero for factorized nodes in the VB # approximation) i_start = 0 for m in range(len(u_nodes)): i_end = i_start + np.shape(u_nodes[m][0])[-1] j_start = 0 for n in range(m): j_end = j_start + np.shape(u_nodes[n][0])[-1] xm_xn = linalg.outer(u_nodes[m][0], u_nodes[n][0], ndim=1) xx[...,i_start:i_end,j_start:j_end] = xm_xn xx[...,j_start:j_end,i_start:i_end] = misc.T(xm_xn) j_start = j_end i_start = i_end return [x, xx] def _compute_message_to_parent(self, i, m, *u_nodes): r = self.slices # Pick the proper parts from the message array m0 = m[0][...,r[i]:r[i+1]] m1 = m[1][...,r[i]:r[i+1],r[i]:r[i+1]] # Handle cross-covariance terms by using the mean of the covariate node for (j, u) in enumerate(u_nodes): if j != i: m0 = m0 + 2 * np.einsum( '...ij,...j->...i', m[1][...,r[i]:r[i+1],r[j]:r[j+1]], u[0] ) return [m0, m1]
jluttine/bayespy
bayespy/inference/vmp/nodes/concat_gaussian.py
Python
mit
3,889
[ "Gaussian" ]
72dd48b9c71ffbc7f7062123b5fbc65ee7272d180357aa6bd104f249d28cf4d4
# -*- coding: utf-8 -*- """ ORCA Open Remote Control Application Copyright (C) 2013-2020 Carsten Thielepape Please contact me by : http://www.orca-remote.org/ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import annotations from typing import Dict from typing import List from typing import Union import re import select import socket import struct import threading from time import sleep from kivy.clock import Clock from kivy.uix.button import Button from ORCA.scripts.BaseScriptSettings import cBaseScriptSettings from ORCA.scripttemplates.Template_Discover import cDiscoverScriptTemplate from ORCA.ui.ShowErrorPopUp import ShowMessagePopUp from ORCA.utils.TypeConvert import ToFloat from ORCA.utils.TypeConvert import ToUnicode from ORCA.vars.QueryDict import TypedQueryDict from ORCA.utils.FileName import cFileName import ORCA.Globals as Globals ''' <root> <repositorymanager> <entry> <name>iTach Discover</name> <description language='English'>Discover iTach devices</description> <description language='German'>Erkennt sucht iTach Geräte über beacon</description> <author>Carsten Thielepape</author> <version>5.0.4</version> <minorcaversion>5.0.4</minorcaversion> <sources> <source> <local>$var(APPLICATIONPATH)/scripts/discover/discover_itach</local> <sourcefile>$var(REPOSITORYWWWPATH)/scripts/discover_itach.zip</sourcefile> <targetpath>scripts/discover</targetpath> </source> </sources> <skipfiles> </skipfiles> </entry> </repositorymanager> </root> ''' class cScript(cDiscoverScriptTemplate): """ WikiDoc:Doc WikiDoc:Context:Scripts WikiDoc:Page:Scripts-Discover-iTach WikiDoc:TOCTitle:Discover Itach = Script Discover iTach = The iTach discover script discover iTach Infrared transmitter devices. Not reliable by now. <div style="overflow:auto; "> {| class="wikitable" ! align="left" | Attribute ! align="left" | Description |- |timeout |Specifies the timout for discover |}</div> WikiDoc:End """ class cScriptSettings(cBaseScriptSettings): def __init__(self,oScript:cScript): super().__init__(oScript) self.aIniSettings.fTimeOut = 30.0 def __init__(self): super().__init__() self.fTimeOut:float = 3 self.uSubType:str = u'iTach (Global Cache)' self.aResults:List[TypedQueryDict] = [] self.aThreads:List[cThread_Discover_iTach] = [] self.iDiscoverCount:int = 0 self.iMaxDiscoverCount:int = 3 self.uIPVersion = u'IPv4Only' self.dReq:TypedQueryDict = TypedQueryDict() self.uScriptTitle = u"Global Cache:iTach Discovery" def __del__(self): self.StopThread([]) def DeInit(self,**kwargs) -> None: super().DeInit(**kwargs) self.StopThread([]) def Init(self,uObjectName:str,oFnScript:Union[cFileName,None]=None) -> None: """ Init function for the script :param str uObjectName: The name of the script (to be passed to all scripts) :param cFileName oFnScript: The file of the script (to be passed to all scripts) """ # iTach just send beacons to the network, so we start listening immediately super().Init(uObjectName=uObjectName, oFnObject=oFnScript) self.oObjectConfig.dDefaultSettings['TimeOut']['active'] = "enabled" self.StartThread() def StartThread(self) -> None: if self.iDiscoverCount < self.iMaxDiscoverCount: self.iDiscoverCount += 1 if self.uIPVersion == "IPv4Only" or self.uIPVersion == "All" or (self.uIPVersion == "Auto" and Globals.uIPAddressV6 == ""): self.ShowInfo(uMsg="Start Discover Thread V4") oThread = cThread_Discover_iTach(dReq=self.dReq,uIPVersion=self.uIPVersion, fTimeOut=self.fTimeOut, oCaller=self) self.aThreads.append(oThread) self.aThreads[-1].start() Clock.schedule_once(self.StopThread, int(self.fTimeOut)*4) # noinspection PyUnusedLocal def StopThread(self,*largs) -> None: for oThread in self.aThreads: oThread.Close() def GetHeaderLabels(self) -> List[str]: return ['$lvar(5029)','$lvar(5034)','$lvar(5031)','Revision'] def ListDiscover(self) -> None: self.SendStartNotification() Clock.schedule_once(self.ListDiscover_Step2, 0) return def ListDiscover_Step2(self, *largs): oSetting:cBaseScriptSettings = self.GetSettingObjectForConfigName(uConfigName=self.uConfigName) if len(self.aResults)>0: for dDevice in self.aResults: Globals.oNotifications.SendNotification(uNotification="DISCOVER_SCRIPTFOUND",**{"script":self,"scriptname":self.uObjectName,"line":[dDevice.uIP , dDevice.uUUID , dDevice.uModel ,dDevice.uRevision ],"device":dDevice}) self.SendEndNotification() return self.ClockCheck=Clock.schedule_interval(self.CheckFinished,0.1) def CreateDiscoverList_ShowDetails(self,oButton:Button) -> None: dDevice:TypedQueryDict = oButton.dDevice uText= u"$lvar(5029): %s \n"\ u"$lvar(5034): %s \n"\ u"$lvar(5031): %s \n"\ u"\n"\ u"Revision: %s" % (dDevice.uIP,dDevice.uUUID,dDevice.uModel,dDevice.uRevision) ShowMessagePopUp(uMessage=uText) def Discover(self,**kwargs) -> Dict[str,str]: uConfigName:str = kwargs.get('configname',self.uConfigName) oSetting:cBaseScriptSettings = self.GetSettingObjectForConfigName(uConfigName=uConfigName) self.fTimeOut:float = ToFloat(kwargs.get('timeout',oSetting.aIniSettings.fTimeOut)) self.dReq.uModels = kwargs.get('models',"") self.uIPVersion:str = kwargs.get('ipversion',"IPv4Only") self.bDoNotWait = ToBool(kwargs.get('donotwait',"0")) self.ShowDebug(uMsg=u'Try to discover iTach device') del self.aResults[:] try: oThread:cThread_Discover_iTach if len(self.aThreads) == 0: self.StartThread() if not self.aThreads[0].is_alive(): self.StartThread() for oT in self.aThreads: oT.join() self.SendEndNotification() if len(self.aResults)>0: return {"Host":self.aResults[0].uFoundIP} if len(self.aResults)>0: #for dDevice in self.aResults: # Globals.oNotifications.SendNotification(uNotification="DISCOVER_SCRIPTFOUND",**{"script":self,"scriptname":self.uObjectName,"line":[dDevice.uIP , dDevice.uUUID , dDevice.uModel ,dDevice.uRevision ],"device":dRet}) return {'Model': self.aResults[0].uModel, 'Host': self.aResults[0].uIP, 'Port': '4998', 'Exception': None} else: self.ShowWarning(uMsg='No iTach device found') except Exception as e: self.ShowError(uMsg=u'No iTach device found, possible timeout',oException=e) return {"Host":""} def OnPause(self,**kwargs) -> None: cDiscoverScriptTemplate.OnPause(self) self.StopThread() @classmethod def GetConfigJSONforParameters(cls,dDefaults:Dict) -> Dict[str,Dict]: return {"TimeOut":{"type": "numericfloat", "order":0, "title": "$lvar(6019)", "desc": "$lvar(6020)","key": "timeout", "default":"2.0"}} class cThread_Discover_iTach(threading.Thread): oWaitLock = threading.Lock() def __init__(self, dReq:TypedQueryDict,uIPVersion:str, fTimeOut:float,oCaller:cScript): threading.Thread.__init__(self) self.uIPVersion:str = uIPVersion self.oCaller:cScript = oCaller self.fTimeOut:float = fTimeOut self.dReq:TypedQueryDict = dReq self.oSocket:socket.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.bStopThreadEvent:bool = False def run(self) -> None: self.bStopThreadEvent = False self.oCaller.ShowDebug(uMsg=u'iTach Start Discover Thread') p = re.compile((r'AMXB<-UUID=GlobalCache_(?P<UUID>.{12}).+' r'Model=iTach(?P<Model>.+?)>.+' r'Revision=(?P<Revision>.+?)>.+' r'Config-URL=http://(?P<IP>.+?)>.+' r'PCB_PN=(?P<PN>.+?)>.+' r'Status=(?P<Status>.+?)>')) try: self.oSocket.settimeout(self.fTimeOut) self.oSocket.bind(('', 9131)) group = socket.inet_aton('239.255.250.250') mreq = struct.pack('4sL', group, socket.INADDR_ANY) self.oSocket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) except Exception as e: self.oCaller.ShowError(uMsg=" -3: Error occured",oException= e) try: while not self.bStopThreadEvent: if self.oSocket is not None: aReady:List = select.select([self.oSocket], [], [],int(self.fTimeOut)) # the first element of the returned list is a list of readable sockets if aReady[0]: byData:bytes = self.oSocket.recv(1024) match = p.match(ToUnicode(byData)) if match: diTachEntry = TypedQueryDict() diTachEntry.uIP = match.group('IP') diTachEntry.uUUID = match.group('UUID') diTachEntry.uModel = match.group('Model') diTachEntry.uRevision = match.group('Revision') diTachEntry.uPartNumber = match.group('PN') diTachEntry.uStatus = match.group('Status') cThread_Discover_iTach.oWaitLock.acquire() self.oCaller.aResults.append(diTachEntry) cThread_Discover_iTach.oWaitLock.release() self.oCaller.ShowInfo(uMsg="iTach-Discover: iTach found! IP: %s, UUID:%s, Model:%s, Revision:%s, Part number:%s, Status:%s" % (diTachEntry.uIP , diTachEntry.uUUID , diTachEntry.uModel , diTachEntry.uRevision , diTachEntry.uPartNumber , diTachEntry.uStatus )) # by now, we finished if we found one device break except Exception as e: self.oCaller.ShowError(uMsg="Error occured",oException=e) finally: if len(self.oCaller.aResults)==0: self.oCaller.ShowDebug (uMsg=u'Stop Discover Thread, nothing found') else: self.oCaller.ShowDebug (uMsg=u'Stop Discover Thread, device(s) found') self.oSocket.close() return def Close(self): self.bStopThreadEvent=True
thica/ORCA-Remote
src/scripts/discover/discover_itach/script.py
Python
gpl-3.0
12,378
[ "ORCA" ]
7a623842a3379ab0e3c8631266193efb50b3b1d53034c7541051bcdc9106ddfe
"""A kernel manager for multiple kernels Authors: * Brian Granger """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import absolute_import import os import uuid import zmq from IPython.config.configurable import LoggingConfigurable from IPython.utils.importstring import import_item from IPython.utils.traitlets import ( Instance, Dict, Unicode, Any, DottedObjectName ) from IPython.utils.py3compat import unicode_type #----------------------------------------------------------------------------- # Classes #----------------------------------------------------------------------------- class DuplicateKernelError(Exception): pass def kernel_method(f): """decorator for proxying MKM.method(kernel_id) to individual KMs by ID""" def wrapped(self, kernel_id, *args, **kwargs): # get the kernel km = self.get_kernel(kernel_id) method = getattr(km, f.__name__) # call the kernel's method r = method(*args, **kwargs) # last thing, call anything defined in the actual class method # such as logging messages f(self, kernel_id, *args, **kwargs) # return the method result return r return wrapped class MultiKernelManager(LoggingConfigurable): """A class for managing multiple kernels.""" kernel_manager_class = DottedObjectName( "IPython.kernel.ioloop.IOLoopKernelManager", config=True, help="""The kernel manager class. This is configurable to allow subclassing of the KernelManager for customized behavior. """ ) def _kernel_manager_class_changed(self, name, old, new): self.kernel_manager_factory = import_item(new) kernel_manager_factory = Any(help="this is kernel_manager_class after import") def _kernel_manager_factory_default(self): return import_item(self.kernel_manager_class) context = Instance('zmq.Context') def _context_default(self): return zmq.Context.instance() connection_dir = Unicode('') _kernels = Dict() def list_kernel_ids(self): """Return a list of the kernel ids of the active kernels.""" # Create a copy so we can iterate over kernels in operations # that delete keys. return list(self._kernels.keys()) def __len__(self): """Return the number of running kernels.""" return len(self.list_kernel_ids()) def __contains__(self, kernel_id): return kernel_id in self._kernels def start_kernel(self, **kwargs): """Start a new kernel. The caller can pick a kernel_id by passing one in as a keyword arg, otherwise one will be picked using a uuid. To silence the kernel's stdout/stderr, call this using:: km.start_kernel(stdout=PIPE, stderr=PIPE) """ kernel_id = kwargs.pop('kernel_id', unicode_type(uuid.uuid4())) if kernel_id in self: raise DuplicateKernelError('Kernel already exists: %s' % kernel_id) # kernel_manager_factory is the constructor for the KernelManager # subclass we are using. It can be configured as any Configurable, # including things like its transport and ip. km = self.kernel_manager_factory(connection_file=os.path.join( self.connection_dir, "kernel-%s.json" % kernel_id), parent=self, autorestart=True, log=self.log ) km.start_kernel(**kwargs) self._kernels[kernel_id] = km return kernel_id @kernel_method def shutdown_kernel(self, kernel_id, now=False): """Shutdown a kernel by its kernel uuid. Parameters ========== kernel_id : uuid The id of the kernel to shutdown. now : bool Should the kernel be shutdown forcibly using a signal. """ self.log.info("Kernel shutdown: %s" % kernel_id) self.remove_kernel(kernel_id) def remove_kernel(self, kernel_id): """remove a kernel from our mapping. Mainly so that a kernel can be removed if it is already dead, without having to call shutdown_kernel. The kernel object is returned. """ return self._kernels.pop(kernel_id) def shutdown_all(self, now=False): """Shutdown all kernels.""" for kid in self.list_kernel_ids(): self.shutdown_kernel(kid, now=now) @kernel_method def interrupt_kernel(self, kernel_id): """Interrupt (SIGINT) the kernel by its uuid. Parameters ========== kernel_id : uuid The id of the kernel to interrupt. """ self.log.info("Kernel interrupted: %s" % kernel_id) @kernel_method def signal_kernel(self, kernel_id, signum): """Sends a signal to the kernel by its uuid. Note that since only SIGTERM is supported on Windows, this function is only useful on Unix systems. Parameters ========== kernel_id : uuid The id of the kernel to signal. """ self.log.info("Signaled Kernel %s with %s" % (kernel_id, signum)) @kernel_method def restart_kernel(self, kernel_id, now=False): """Restart a kernel by its uuid, keeping the same ports. Parameters ========== kernel_id : uuid The id of the kernel to interrupt. """ self.log.info("Kernel restarted: %s" % kernel_id) @kernel_method def is_alive(self, kernel_id): """Is the kernel alive. This calls KernelManager.is_alive() which calls Popen.poll on the actual kernel subprocess. Parameters ========== kernel_id : uuid The id of the kernel. """ def _check_kernel_id(self, kernel_id): """check that a kernel id is valid""" if kernel_id not in self: raise KeyError("Kernel with id not found: %s" % kernel_id) def get_kernel(self, kernel_id): """Get the single KernelManager object for a kernel by its uuid. Parameters ========== kernel_id : uuid The id of the kernel. """ self._check_kernel_id(kernel_id) return self._kernels[kernel_id] @kernel_method def add_restart_callback(self, kernel_id, callback, event='restart'): """add a callback for the KernelRestarter""" @kernel_method def remove_restart_callback(self, kernel_id, callback, event='restart'): """remove a callback for the KernelRestarter""" @kernel_method def get_connection_info(self, kernel_id): """Return a dictionary of connection data for a kernel. Parameters ========== kernel_id : uuid The id of the kernel. Returns ======= connection_dict : dict A dict of the information needed to connect to a kernel. This includes the ip address and the integer port numbers of the different channels (stdin_port, iopub_port, shell_port, hb_port). """ @kernel_method def connect_iopub(self, kernel_id, identity=None): """Return a zmq Socket connected to the iopub channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """ @kernel_method def connect_shell(self, kernel_id, identity=None): """Return a zmq Socket connected to the shell channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """ @kernel_method def connect_stdin(self, kernel_id, identity=None): """Return a zmq Socket connected to the stdin channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """ @kernel_method def connect_hb(self, kernel_id, identity=None): """Return a zmq Socket connected to the hb channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """
WillisXChen/django-oscar
oscar/lib/python2.7/site-packages/IPython/kernel/multikernelmanager.py
Python
bsd-3-clause
9,183
[ "Brian" ]
0e8db364ae5ab08ae4715fdb9c4941ae64e20013effc689f5c1b60a9c5c77995
# coding=utf-8 # Copyright 2022 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Checkpointing utilities for the ViT experiments. Several functions in this file were ported from https://github.com/google-research/vision_transformer. """ import collections import dataclasses import io from typing import Any, Iterable, MutableMapping, Optional from absl import logging from clu import parameter_overview import flax import flax.jax_utils as flax_utils import jax import jax.numpy as jnp import ml_collections import numpy as np import scipy from tensorflow.io import gfile Params = MutableMapping[str, Any] @dataclasses.dataclass class CheckpointData: """Container class for data stored and loaded into checkpoints.""" train_loop_rngs: jnp.ndarray optimizer: flax.optim.Optimizer accumulated_train_time: float fixed_model_states: Optional[Params] = None def _convert_and_recover_bfloat16(x): """Converts to JAX arrays, while correctly loading any bfloat16 arrays.""" if hasattr(x, "dtype") and x.dtype.type is np.void: assert x.itemsize == 2, "Unknown dtype!" return jnp.array(x.view(jnp.bfloat16)) else: return jnp.array(x) def _recover_tree(keys, values): """Recovers a tree as a nested dict from flat names and values. This function is useful to analyze checkpoints that are without need to access the exact source code of the experiment. In particular, it can be used to extract an reuse various subtrees of the checkpoint, e.g. subtree of parameters. Args: keys: A list of keys, where "/" is used as separator between nodes. values: A list of leaf values. Returns: A JAX pytree whose structure was recovered from the naming of the keys. """ tree = {} sub_trees = collections.defaultdict(list) for k, v in zip(keys, values): if "/" not in k: tree[k] = v else: k_left, k_right = k.split("/", 1) sub_trees[k_left].append((k_right, v)) for k, kv_pairs in sub_trees.items(): k_subtree, v_subtree = zip(*kv_pairs) tree[k] = _recover_tree(k_subtree, v_subtree) return tree def load_checkpoint(tree, path): """Loads JAX pytrees that were stored on disk in a NumPy `.npz` file. Args: tree: Optional JAX pytree to be restored. If None, then the tree will be recovered from the naming scheme used within the checkpoint. path: A path to the checkpoint. Returns: A JAX pytree with the same structure as `tree`, but with the leaf values restored from the saved checkpoint. """ with gfile.GFile(path, "rb") as f: data = f.read() keys, values = zip( *list(np.load(io.BytesIO(data), allow_pickle=False).items())) # NOTE: NumPy loses any bfloat16 dtypes when saving, so we recover them here. values = jax.tree_util.tree_map(_convert_and_recover_bfloat16, values) if tree: treedef = jax.tree_util.tree_structure(tree) tree = jax.tree_util.tree_unflatten(treedef, values) else: tree = _recover_tree(keys, values) return tree def _traverse_with_names(tree): """Traverses nested dicts/dataclasses and emits (leaf_name, leaf_val).""" if dataclasses.is_dataclass(tree): tree = flax.serialization.to_state_dict(tree) if isinstance(tree, dict) or isinstance(tree, flax.core.FrozenDict): keys = sorted(tree.keys()) for key in keys: for path, v in _traverse_with_names(tree[key]): yield (key + "/" + path).rstrip("/"), v else: yield "", tree def _tree_flatten_with_names(tree): """Populates tree_flatten with leaf names. This function populates the output of jax.tree_util.tree_flatten with leaf names, using a custom traversal that produces names. Args: tree: A JAX PyTree. Returns: A list of values with names: [(name, value), ...]. """ vals, tree_def = jax.tree_flatten(tree) # "Fake" token tree that is use to track jax internal tree traversal and # adjust our custom tree traversal to be compatible with it. tokens = range(len(vals)) token_tree = tree_def.unflatten(tokens) val_names, perm = zip(*_traverse_with_names(token_tree)) inv_perm = np.argsort(perm) # Custom traversal should visit the same number of leaves. assert len(val_names) == len(vals) return [(val_names[i], v) for i, v in zip(inv_perm, vals)], tree_def def save_checkpoint(tree: Params, path: str, step_for_copy: Optional[int] = None) -> None: """Saves the values of JAX pytrees to disk in a NumPy `.npz` file. Args: tree: A JAX pytree to be saved. path: A path to save the checkpoint. step_for_copy: Optional integer that, when not None, will be used to save a copy of the checkpoint with the name `path-{step_for_copy}`. """ # NOTE: In general, this could be greatly simplified as follows. However, we # currently need to store the leaf names as well in order to be able to load # and reconstruct the tree directly from the checkpoint when initialized a # subset of a model from a pretrained model for fine tuning. # ``` # values, _ = jax.tree_util.tree_flatten(tree) # io_buffer = io.BytesIO() # np.savez(io_buffer, *values) # ``` names_and_vals, _ = _tree_flatten_with_names(tree) io_buffer = io.BytesIO() np.savez(io_buffer, **{k: v for k, v in names_and_vals}) # In order to be robust to interruptions during saving, we first save the # checkpoint to a temporary file, and then rename it to the actual path name. path_tmp = path + "-TEMPORARY" with gfile.GFile(path_tmp, "wb") as f: f.write(io_buffer.getvalue()) gfile.rename(path_tmp, path, overwrite=True) if step_for_copy is not None: gfile.copy(path, f"{path}-{step_for_copy:09d}", overwrite=True) def checkpoint_trained_model( checkpoint_data: CheckpointData, path: str, step_for_copy: Optional[int] = None) -> None: """Saves all information pertaining to a trained model in .npz format. Args: checkpoint_data: CheckpointData instance. path: A path to save the checkpoint. step_for_copy: Optional integer that, when not None, will be used to save a copy of the checkpoint with the name `path-{step_for_copy}`. """ # TODO(zmariet, dusenberrymw): Remove intermediate `checkpoint_extra` dict. tree = dict( opt=checkpoint_data.optimizer, extra=dict( rngs_loop=checkpoint_data.train_loop_rngs, accum_train_time=checkpoint_data.accumulated_train_time), ) if checkpoint_data.fixed_model_states is not None: tree["states"] = checkpoint_data.fixed_model_states save_checkpoint(tree, path, step_for_copy) def _flatten_jax_params_dict(d: Params, parent_key: str = "", sep: str = "/") -> Params: """Flattens a dictionary, keeping empty leaves.""" items = [] for k, v in d.items(): path = parent_key + sep + k if parent_key else k if isinstance(v, collections.abc.Mapping): items.extend(_flatten_jax_params_dict(v, path, sep=sep).items()) else: items.append((path, v)) # Keeps the empty dict if it was set explicitly. if parent_key and not d: items.append((parent_key, {})) return dict(items) def _unflatten_jax_params_dict(flat_params: Params, sep: str = "/") -> Params: """Unflattens a dictionary that maps strings to non-dictionaries. Args: flat_params: A dictionary mapping strings to non-dictionary values. sep: Separator indicating key hierarchy in `flat_params`. For example, unflattening {"a/b": 1} with separator "/" will yield {"a": {"b": 1}}. Returns: A dictionary mapping strings to arbitrary values (including dictionaries). """ tuple_to_value = {tuple(k.split(sep)): v for k, v in flat_params.items()} return flax.traverse_util.unflatten_dict(tuple_to_value) def _tree_map_with_names(f, tree, *rest): """Performs a tree map with a filter on the leaf path name. Args: f: A function accepting a name (path-like "a/b/c"), a tree, and an optional additional list of trees. tree: The tree of parameters for which `f` should be applied. *rest: More trees of the exact same structure. Returns: A tree identical in structure to `tree` and `*rest` but with the leaves the result of calling `f` on corresponding name/leaves in `tree` and `*rest`. """ names_and_vals, tree_def = _tree_flatten_with_names(tree) names, vals = zip(*names_and_vals) rest_vals = [list(zip(*_tree_flatten_with_names(t)[0]))[1] for t in rest] vals = [f(*name_and_vals) for name_and_vals in zip(names, vals, *rest_vals)] return tree_def.unflatten(vals) def _reinit(restored_params, init_params, to_reinit): """Reinitializes a subset of the parameters in the restored parameter tree.""" f = lambda name, restored, init: init if name in to_reinit else restored return _tree_map_with_names(f, restored_params, init_params) def restore_from_pretrained_params(init_params, loaded_params, model_representation_size, model_classifier, reinit_params): """Initializes (some) model parameters based on pretrained parameters. Args: init_params: Tree of (possibly randomly) initialized parameters for the model. The structure will be kept, and a subset of the values will be replaced with values loaded from the pretrained checkpoint. loaded_params: Tree with pretrained weights. model_representation_size: Optional integer representation size hyperparameter for the model. If None, then the representation layer in the checkpoint will be removed (if present). model_classifier: String containing the classifier hyperparameter used for the model. reinit_params: List of parameter names to reinitialize. Returns: A tree of parameters with the same structure as `init_params`, but loaded with pretrained weights in `loaded_params` and adapted accordingly. """ if "opt" in loaded_params: loaded_params = loaded_params["opt"]["target"] restored_params = adapt_upstream_architecture(init_params, loaded_params) # The following allows implementing fine-tuning head variants depending on the # value of `representation_size` in the fine-tuning job: # - `None`: drop the whole head and attach a nn.Linear. # - Same number as in pre-training: keep the head but reset the last # layer (logits) for the new task. if model_representation_size is None: if "pre_logits" in restored_params: logging.info("load_pretrained: drop-head variant") del restored_params["pre_logits"] if reinit_params: restored_params = _reinit(restored_params, init_params, reinit_params) if "posembed_input" in restored_params.get("Transformer", {}): # Rescale the grid of position embeddings. Param shape is (1,N,1024). posemb = restored_params["Transformer"]["posembed_input"]["pos_embedding"] posemb_new = init_params["Transformer"]["posembed_input"]["pos_embedding"] if posemb.shape != posemb_new.shape: logging.info("load_pretrained: resized variant: %s to %s", posemb.shape, posemb_new.shape) ntok_new = posemb_new.shape[1] if model_classifier == "token": posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:] ntok_new -= 1 else: posemb_tok, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(np.sqrt(len(posemb_grid))) gs_new = int(np.sqrt(ntok_new)) logging.info("load_pretrained: grid-size from %s to %s", gs_old, gs_new) posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1) zoom = (gs_new / gs_old, gs_new / gs_old, 1) posemb_grid = scipy.ndimage.zoom(posemb_grid, zoom, order=1) posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1) posemb = jnp.array(np.concatenate([posemb_tok, posemb_grid], axis=1)) restored_params["Transformer"]["posembed_input"]["pos_embedding"] = posemb return restored_params def maybe_load_checkpoint(train_loop_rngs: jnp.ndarray, save_checkpoint_path: str, init_optimizer: flax.optim.Optimizer, init_params: Params, init_fixed_model_states: Optional[Params], default_reinit_params: Iterable[str], config: ml_collections.ConfigDict) -> CheckpointData: """Loads a model from an existing checkpoint if so indicated by the config. Whether to resume training, initialize from a previous checkpoint, or do nothing is set by the `config` ConfigDict, based on the existence of fields `resume` (resume training) or `model_init` (initialize from pretrained checkpoint). When resuming training, both the model weights and optimizer state (including the training step) are restored. When initializing, only the model parameters are updated. The way in which initializing is prioritized in the following way: 1. Always resume from an existing checkpoint, e.g. resume a finetune job. 2. Resume from a previous checkpoint, e.g. start a cooldown training job. 3. Initialize model from something, e,g, start a fine-tuning job. 4. Do nothing (training from scratch). Args: train_loop_rngs: unreplicated jax.PRNGKey. save_checkpoint_path: File pointing to pretrained checkpoint stored in NumPy `.npz` file. init_optimizer: flax.Optimizer to be updated. init_params: Tree of (possibly randomly) initialized parameters for the model. init_fixed_model_states: Optional pytree of non-trainable parameters. Currently only passed when using SNGP models. default_reinit_params: List of parameter names to reinitialize if not provided by the config file. config: ConfigDict which contains fields indicating if, and how, to load an available checkpoint into the optimizer. If resuming from a previous checkpoint *to start a cooldown job*, the flag `resume` must be set. If initializing a (subset of) model parameters to start a file tuning job, fields `model_init`, `representation_size` and `classifier` must be set. Returns: A CheckpointData instance containing a new rng key, the new optimizer state, the new untrainable parameters (if resuming from a checkpoint), and a dictionary of information about the reloaded state. """ optimizer = init_optimizer fixed_model_states = init_fixed_model_states accum_train_time = 0.0 # TODO(dusenberrymw, zmariet): Directly return an unreplicated rng and the # cumulative training time instead of storing them in `checkpoint_extra`. checkpoint_extra = dict( accum_train_time=accum_train_time, rngs_loop=flax_utils.replicate(train_loop_rngs)) # Parse config file to figure out which setting we are in. resume_from_checkpoint = ( (save_checkpoint_path is not None and gfile.exists(save_checkpoint_path)) or config.get("resume") is not None) reinitialize_model = config.get( "model_init") is not None and not resume_from_checkpoint if resume_from_checkpoint: logging.info("Resume training from checkpoint...") # Always prioritize loading from a checkpoint from the current training job. if save_checkpoint_path and gfile.exists(save_checkpoint_path): resume_checkpoint_path = save_checkpoint_path # Otherwise, we reload from a previous checkpoint provided by the config. else: resume_checkpoint_path = config.resume checkpoint_tree = {"opt": init_optimizer, "extra": checkpoint_extra} if init_fixed_model_states is not None: checkpoint_tree["states"] = init_fixed_model_states checkpoint = load_checkpoint(checkpoint_tree, resume_checkpoint_path) optimizer, checkpoint_extra = checkpoint["opt"], checkpoint["extra"] fixed_model_states = checkpoint.get("states", None) elif reinitialize_model: logging.info("Initialize model...") reinit_params = config.get("model_reinit_params", default_reinit_params) logging.info("Reinitializing these parameters: %s", reinit_params) loader = lambda path: load_checkpoint(tree=None, path=path) loaded_params = loader(config.model_init) loaded_params = restore_from_pretrained_params( init_params=init_params, loaded_params=loaded_params, model_representation_size=config.model.representation_size, model_classifier=config.model.classifier, reinit_params=reinit_params) optimizer = init_optimizer.replace(target=loaded_params) if jax.process_index() == 0: logging.info("Restored parameter overview:") parameter_overview.log_parameter_overview(loaded_params) else: logging.info("No checkpoint to recover from; using default initialization.") return CheckpointData( optimizer=optimizer, fixed_model_states=fixed_model_states, train_loop_rngs=checkpoint_extra["rngs_loop"], accumulated_train_time=checkpoint_extra["accum_train_time"]) def adapt_upstream_architecture( init_params: Params, loaded_params: Params) -> Params: """Align upstream parameters with those expected by the current architecture. This function converts the loaded architecture into the architecture expected by `init_params` when using a pretrained model of a different architecture (e.g., finetuning an SGNP model based on an upstream deterministic model). This function relies upon the fact that the parameters in `loaded_params` that should be kept will have the same name in `init_params`. If that is not the case, loaded parameter values will be lost. Args: init_params: Tree of (possibly randomly) initialized parameters for the model. loaded_params: Tree of parameters loaded from a checkpoint (in practice, the upstream model). Returns: A tree with similar structure to that of `init_params`, where values match those of `loaded_params` when possible. """ loaded_flat = _flatten_jax_params_dict(loaded_params) init_flat = _flatten_jax_params_dict(init_params) missing_keys = set(init_flat.keys()) - set(loaded_flat.keys()) extra_keys = set(loaded_flat.keys()) - set(init_flat.keys()) logging.info("Deleting %s from checkpoint architecture.", extra_keys) logging.info("Adding %s from checkpoint architecture.", missing_keys) # Remove extra parameters. for extra_key in extra_keys: del loaded_flat[extra_key] # Add missing parameters using initialized values. for missing_key in missing_keys: loaded_flat[missing_key] = init_flat[missing_key] return _unflatten_jax_params_dict(loaded_flat)
google/uncertainty-baselines
baselines/jft/checkpoint_utils.py
Python
apache-2.0
19,080
[ "VisIt" ]
5002fc5084faac011fd31d1606b1f9566f819b35e7254edcd2d863b96b782ec3
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html ''' import time import numpy as np import tensorflow as tf from tensorflow.python.ops import rnn import random import matplotlib.pyplot as plt import re, string from sklearn.feature_extraction.text import CountVectorizer from collections import defaultdict import pickle as pkl import itertools import ctc_loss import os n=2**19-3 def map_lambda(): return n+1 def rev_map_lambda(): return "<UNK>" def load_text(n,num_samples=None): # fname = 'Oxford_English_Dictionary.txt' # txt = [] # with open(fname,'rb') as f: # txt = f.readlines() # txt = [x.decode('utf-8').strip() for x in txt] # txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1] # List of words # word_list = [x.split(' ', 1)[0].strip() for x in txt] # # List of definitions # def_list = [x.split(' ', 1)[1].strip()for x in txt] with open('./training_data/training_data.pkl','rb') as raw: word_list,dl=pkl.load(raw) def_list=[] # def_list=[' '.join(defi) for defi in def_list] i=0 # words={} while i<len( dl): defi=dl[i] if len(defi)>0: def_list+=[' '.join(defi)] i+=1 else: dl.pop(i) word_list.pop(i) # for w,d in zip(word_list,def_list): # if w not in words: # words[w]=[] # words[w].append(d) # word_list=[] # def_list=[] # for word in words: # word_list.append(word) # # def_list.append(random.choice(words[word])) # def_list.append(words[word][0]) maxlen=0 minlen=100 for defi in def_list: minlen=min(minlen,len(defi.split())) maxlen=max(maxlen,len(defi.split())) print(minlen) print(maxlen) maxlen=30 # # Initialize the "CountVectorizer" object, which is scikit-learn's # # bag of words tool. # vectorizer = CountVectorizer(analyzer = "word", \ # tokenizer = None, \ # preprocessor = None, \ # stop_words = None, \ # max_features = None, \ # token_pattern='\\b\\w+\\b') # Keep single character words _map,rev_map=get_one_hot_map(word_list,def_list,n) pkl.dump(_map,open('mapa.pkl','wb')) pkl.dump(rev_map,open('rev_mapa.pkl','wb')) # exit() if num_samples is not None: num_samples=len(word_list) # X = (36665, 56210) # X = map_one_hot(word_list[:num_samples],_map,1,n) # # y = (36665, 56210) # # print _map # y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n) # np.save('Xa',X) # np.save('ya',y) # np.save('maska',mask) X=np.load('Xa.npy','r') y=np.load('ya.npy','r') mask=np.load('maska.npy','r') print (np.max(y)) return X, y, mask,rev_map def get_one_hot_map(to_def,corpus,n): # words={} # for line in to_def: # if line: # words[line.split()[0]]=1 # counts=defaultdict(int) # uniq=defaultdict(int) # for line in corpus: # for word in line.split(): # if word not in words: # counts[word]+=1 # words=list(words.keys()) words=[] counts=defaultdict(int) uniq=defaultdict(int) for line in to_def+corpus: for word in line.split(): if word not in words: counts[word]+=1 _map=defaultdict(map_lambda) rev_map=defaultdict(rev_map_lambda) # words=words[:25000] for i in counts.values(): uniq[i]+=1 print (len(words)) # random.shuffle(words) words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)] print (len(words)) i=0 # random.shuffle(words) # for num_bits in range(binary_dim): # for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1): # bitmap=np.zeros(binary_dim) # bitmap[np.array(bit_config)]=1 # num=bitmap*(2** np.arange(binary_dim )) # num=np.sum(num) # num=int(num) # word=words[i] # _map[word]=num # rev_map[num]=word # i+=1 # if i>=len(words): # break # if i>=len(words): # break i+=1 for word in words: i+=1 _map[word]=i rev_map[i]=word rev_map[n+2]='<UNK>' if zero_end_tok: rev_map[1]='.' else: rev_map[1]='Start' rev_map[n+3]='End' print (list(reversed(sorted(uniq.items())))) print (len(list(uniq.items()))) print (len(rev_map.keys())) print(len(_map.keys())) print ('heyo') # print rev_map return _map,rev_map def map_word_emb(corpus,_map): ### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY) rtn=[] rtn2=[] num_failed=0 num_counted=0 for word in corpus: w=word.lower() num_counted+=1 if w not in _map: num_failed+=1 mapped=_map[w] rtn.append(mapped) if get_rand_vec: mapped_rand=random.choice(list(_map.keys())) while mapped_rand==word: mapped_rand=random.choice(list(_map.keys())) mapped_rand=_map[mapped_rand] rtn2.append(mapped_rand) print 'fuck',num_failed/float(num_counted) if get_rand_vec: return np.array(rtn),np.array(rtn2) return np.array(rtn) def map_one_hot(corpus,_map,maxlen,n): if maxlen==1: if not form2: total_not=0 rtn=np.zeros([len(corpus),n+3],dtype=np.float32) for l,line in enumerate(corpus): if len(line)==0: rtn[l,-1]=1 else: mapped=_map[line] if mapped==75001: total_not+=1 rtn[l,mapped]=1 print (total_not,len(corpus)) return rtn else: total_not=0 if not onehot: rtn=np.zeros([len(corpus),binary_dim],dtype=np.float32) else: rtn=np.zeros([len(corpus),2**binary_dim],dtype=np.float32) for l,line in enumerate(corpus): # if len(line)==0: # rtn[l]=n+2 # else: # if line not in _map: # total_not+=1 mapped=_map[line] if mapped==75001: total_not+=1 if onehot: binrep=np.zeros(2**binary_dim) print line binrep[mapped]=1 else: binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32) rtn[l]=binrep print (total_not,len(corpus)) return rtn else: if form2: rtn=np.zeros([len(corpus),maxlen+2,binary_dim],dtype=np.float32) else: rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32) print (rtn.shape) mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32) print (mask.shape) mask[:,1]=1.0 totes=0 nopes=0 wtf=0 for l,_line in enumerate(corpus): x=0 line=_line.split() for i in range(min(len(line),maxlen)): # if line[i] not in _map: # nopes+=1 mapped=_map[line[i]] if form2: binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32) rtn[l,i+1,:]=binrep else: rtn[l,i+1]=mapped if mapped==75001: wtf+=1 mask[l,i+1]=1.0 totes+=1 x=i+1 to_app=n+2 if zero_end_tok: to_app=0 if form2: rtn[l,x+1,:]=(1&(to_app/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32) else: rtn[l,x+1]=to_app mask[l,x+1]=1.0 print (nopes,totes,wtf) return rtn,mask def xavier_init(fan_in, fan_out, constant=1e-4): """ Xavier initialization of network weights""" # https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow low = -constant*np.sqrt(6.0/(fan_in + fan_out)) high = constant*np.sqrt(6.0/(fan_in + fan_out)) return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32) class VariationalAutoencoder(object): """ Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow. This implementation uses probabilistic encoders and decoders using Gaussian distributions and realized by multi-layer perceptrons. The VAE can be learned end-to-end. See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details. """ def __init__(self, network_architecture, transfer_fct=tf.nn.softplus, learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False,global_step=None): self.network_architecture = network_architecture self.transfer_fct = transfer_fct self.learning_rate = learning_rate print self.learning_rate self.batch_size = batch_size if global_step is None: global_step=tf.Variable(0,trainiable=False) self.global_step=global_step # tf Graph input self.n_words=network_architecture['n_input'] if not form2: self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in') else: self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in') self.intype=type(self.x) if not form2: self.caption_placeholder = tf.placeholder(tf.int32, [None,network_architecture["maxlen"]],name='caption_placeholder') else: self.caption_placeholder = tf.placeholder(tf.float32, [None, network_architecture["maxlen"],self.n_words],name='caption_placeholder') print self.caption_placeholder.shape self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask') self.timestep=tf.placeholder(tf.float32,[],name='timestep') # Create autoencoder network to_restore=None self.embw=tf.Variable(xavier_init(network_architecture['n_input'],network_architecture['n_z']),name='embw') self.embb=tf.Variable(tf.zeros([network_architecture['n_z']]),name='embb') if not generative: self._create_network() # Define loss function based variational upper-bound and # corresponding optimizer to_restore=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) self._create_loss_optimizer() self.test=test else: self._build_gen() # Initializing the tensor flow variables init = tf.global_variables_initializer() # Launch the session self.sess = tf.InteractiveSession() if embeddings_trainable: self.saver = tf.train.Saver(var_list=to_restore,max_to_keep=100) saved_path=tf.train.latest_checkpoint(model_path) else: self.saver= tf.train.Saver(var_list=self.untrainable_variables,max_to_keep=100) mod_path=model_path if use_ctc: mod_path=mod_path[:-3] saved_path=tf.train.latest_checkpoint(mod_path.replace('defdef','embtransfer')) self.sess.run(init) if ctrain: self.saver.restore(self.sess, saved_path) self.saver=tf.train.Saver(max_to_keep=100) def _create_network(self): # Initialize autoencode network weights and biases network_weights = self._initialize_weights(**self.network_architecture) start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32) self.network_weights=network_weights seqlen=tf.cast(tf.reduce_sum(self.mask,reduction_indices=-1),tf.int32) self.embedded_input_KLD_loss=tf.constant(0.0) self.input_embedding_KLD_loss=tf.constant(0.0) def train_encoder(): embedded_input,self.embedded_input_KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],tf.reshape(self.caption_placeholder,[-1,self.network_architecture['n_input']]),logit=True) embedded_input=tf.reshape(embedded_input,[-1,self.network_architecture['maxlen'],self.network_architecture['n_lstm_input']]) if not vanilla: self.embedded_input_KLD_loss=tf.reshape(embedded_input_KLD_loss,[-1,self.network_architecture['maxlen']])[:,1:] encoder_input=embedded_input[:,1:,:] cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input']) if lstm_stack>1: cell=tf.contrib.rnn.MultiRNNCell([cell]*lstm_stack) if not use_bdlstm: encoder_outs,encoder_states=rnn.dynamic_rnn(cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False) else: backward_cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input']) if lstm_stack>1: backward_cell=tf.contrib.rnn.MultiRNNCell([backward_cell]*lstm_stack) encoder_outs,encoder_states=rnn.bidirectional_dynamic_rnn(cell,backward_cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False) ix_range=tf.range(0,self.batch_size,1) ixs=tf.expand_dims(ix_range,-1) to_cat=tf.expand_dims(seqlen-2,-1) gather_inds=tf.concat([ixs,to_cat],axis=-1) print encoder_outs outs=tf.gather_nd(encoder_outs,gather_inds) outs=tf.nn.dropout(outs,.75) self.deb=tf.gather_nd(self.caption_placeholder[:,1:,:],gather_inds) print outs.shape input_embedding,self.input_embedding_KLD_loss=self._get_middle_embedding([network_weights['middle_encoding'],network_weights['biases_middle_encoding']],network_weights['middle_encoding'],outs,logit=True) return input_embedding # input_embedding=tf.nn.l2_normalize(input_embedding,dim=-1) self.other_loss=tf.constant(0,dtype=tf.float32) KLD_penalty=tf.tanh(tf.cast(self.timestep,tf.float32)/1.0) cos_penalty=tf.maximum(-0.1,tf.tanh(tf.cast(self.timestep,tf.float32)/(5.0))) self.input_KLD_loss=tf.constant(0.0) def train_decoder(): if form3: _x,self.input_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['variational_encoding']) self.input_KLD_loss=tf.reduce_mean(self.input_KLD_loss)*KLD_penalty#*tf.constant(0.0,dtype=tf.float32) normed_embedding= tf.nn.l2_normalize(self.mid_var, dim=-1) normed_target=tf.nn.l2_normalize(self.word_var,dim=-1) cos_sim=(tf.reduce_sum(tf.multiply(normed_embedding,normed_target),axis=-1)) # # self.exp_loss=tf.reduce_mean((-cos_sim)) # # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size) self.other_loss += tf.reduce_mean(-(cos_sim))*cos_penalty # other_loss+=tf.reduce_mean(tf.reduce_sum(tf.square(_x-input_embedding),axis=-1))*cos_penalty return _x input_embedding=tf.cond(tf.equal(self.timestep%5,0),train_decoder,train_encoder) # Use recognition network to determine mean and # (log) variance of Gaussian distribution in latent # space # if not same_embedding: # input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning']) # else: # input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM']) if not embeddings_trainable: input_embedding=tf.stop_gradient(input_embedding) # embed2decoder=tf.Variable(xavier_init(self.network_architecture['n_z_m_2'],self.network_architecture['n_lstm_input']),name='decoder_embedding_weight') # embed2decoder_bias=tf.Variable(tf.zeros(self.network_architecture['n_lstm_input']),name='decoder_embedding_bias') state = self.lstm.zero_state(self.batch_size, dtype=tf.float32) # input_embedding=tf.matmul(input_embedding,embed2decoder)+embed2decoder_bias loss = 0 self.debug=0 probs=[] with tf.variable_scope("RNN"): for i in range(self.network_architecture['maxlen']): if i > 0: # current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias if form4: current_embedding,KLD_loss=input_embedding,0 elif form2: current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1,:],logit=True) else: current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1]) loss+=tf.reduce_sum(KLD_loss*self.mask[:,i])*KLD_penalty else: current_embedding = input_embedding if i > 0: tf.get_variable_scope().reuse_variables() out, state = self.lstm(current_embedding, state) if i > 0: if not form2: labels = tf.expand_dims(self.caption_placeholder[:, i], 1) ix_range=tf.range(0, self.batch_size, 1) ixs = tf.expand_dims(ix_range, 1) concat = tf.concat([ixs, labels],1) onehot = tf.sparse_to_dense( concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0) else: onehot=self.caption_placeholder[:,i,:] logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias'] if not use_ctc: if form2: # best_word=tf.nn.softmax(logit) # best_word=tf.round(best_word) # all_the_f_one_h.append(best_word) xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=onehot) xentropy=tf.reduce_sum(xentropy,reduction_indices=-1) else: xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=onehot) xentropy = xentropy * self.mask[:,i] xentropy=tf.reduce_sum(xentropy) self.debug+=xentropy loss += xentropy else: probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1)) self.debug=[input_KLD_loss,tf.reduce_mean(input_embedding_KLD_loss)/self.batch_size*KLD_penalty,other_loss,KLD_penalty] if not use_ctc: loss_ctc=0 # self.debug=other_loss # self.debug=[input_KLD_loss,embedded_input_KLD_loss,input_embedding_KLD_loss] else: probs=tf.concat(probs,axis=1) probs=ctc_loss.get_output_probabilities(probs,self.caption_placeholder[:,1:,:]) loss_ctc=ctc_loss.loss(probs,self.caption_placeholder[:,1:,:],self.network_architecture['maxlen']-2,self.batch_size,seqlen-1) self.debug=loss_ctc # loss = (loss / tf.reduce_sum(self.mask[:,1:]))+tf.reduce_sum(self.input_embedding_KLD_loss)/self.batch_size*KLD_penalty+tf.reduce_sum(self.embedded_input_KLD_loss*self.mask[:,1:])/tf.reduce_sum(self.mask[:,1:])*KLD_penalty+loss_ctc+self.input_KLD_loss+self.other_loss self.loss=loss def _initialize_weights(self, n_lstm_input, maxlen, n_input, n_z, n_z_m,n_z_m_2): all_weights = dict() if form3: n_in=n_z else: n_in=n_input if not same_embedding: all_weights['input_meaning'] = { 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight',trainable=embeddings_trainable), 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias',trainable=embeddings_trainable)} if not vanilla: all_weights['biases_variational_encoding'] = { 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable), 'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab',trainable=embeddings_trainable)} all_weights['variational_encoding'] = { 'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable), 'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='out_log_sigma',trainable=embeddings_trainable), 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'), 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias') } else: all_weights['biases_variational_encoding'] = { 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable)} all_weights['variational_encoding'] = { 'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable), 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'), 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')} self.untrainable_variables=all_weights['input_meaning'].values()+all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values() if mid_vae: all_weights['biases_middle_encoding'] = { 'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb'), 'out_log_sigma': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_log_sigmab')} all_weights['middle_encoding'] = { 'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'), 'out_log_sigma': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_log_sigma'), 'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'), 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')} all_weights['embmap']={ 'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean'), 'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_log_sigma') } all_weights['embmap_biases']={ 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable), 'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_log_sigmab',trainable=embeddings_trainable) } else: all_weights['biases_middle_encoding'] = { 'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb')} all_weights['middle_encoding'] = { 'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'), 'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'), 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')} all_weights['embmap']={ 'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean') } all_weights['embmap_biases']={ 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable) } self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input) if lstm_stack>1: self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack) all_weights['LSTM'] = { 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'), 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'), 'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'), 'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'), 'lstm': self.lstm} return all_weights def _get_input_embedding(self, ve_weights, aff_weights): if not form3: z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x) else: x=tf.matmul(self.x,self.embw)+self.embb z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x) self.word_var=z embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias'] return embedding,vae_loss def _get_middle_embedding(self, ve_weights, lstm_weights, x,logit=False): if logit: z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x) else: if not form2: z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x, True) else: z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input'])) all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input'])) print z.shape self.mid_var=z embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias'] return embedding,vae_loss def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False): if form3: x=tf.matmul(x,self.embw)+self.embb if logit: z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x) else: if not form2: z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True) else: z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input'])) all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input'])) embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias'] return embedding,vae_loss def _vae_sample(self, weights, biases, x, lookup=False): #TODO: consider adding a linear transform layer+relu or softplus here first if not lookup: mu=tf.matmul(x,weights['out_mean'])+biases['out_mean'] if not vanilla: logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma'] else: mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean'] if not vanilla: logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma'] if not vanilla: epsilon=tf.random_normal(tf.shape(logvar),name='epsilon') std=tf.exp(.5*logvar) z=mu+tf.multiply(std,epsilon) else: z=mu KLD=0.0 if not vanilla: KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1) print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape return z,KLD def _vae_sample_mid(self, weights, biases, x, lookup=False): #TODO: consider adding a linear transform layer+relu or softplus here first if not lookup: mu=tf.matmul(x,weights['out_mean'])+biases['out_mean'] if mid_vae: logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma'] else: mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean'] if mid_vae: logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma'] if mid_vae: epsilon=tf.random_normal(tf.shape(logvar),name='epsilon') std=tf.exp(.5*logvar) z=mu+tf.multiply(std,epsilon) else: z=mu KLD=0.0 if mid_vae: print 'stop fucking sampling',mid_vae KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1) print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape return z,KLD def _create_loss_optimizer(self): if clip_grad: opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), .1) self.optimizer = opt_func.apply_gradients(zip(grads, tvars)) else: self.optimizer = \ tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss) def _create_loss_test(self): self.test_op = \ tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={}) def partial_fit(self, X,y,mask,testify=False,timestep=0): """Train model based on mini-batch of input data. Return cost of mini-batch. """ if self.test and testify: print tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask}) exit() else: opt, cost,shit = self.sess.run((self.optimizer, self.loss,self.debug), feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask,self.timestep:timestep}) # print shit # print deb # exit() return cost,shit def _build_gen(self): #same setup as `_create_network` function network_weights = self._initialize_weights(**self.network_architecture) if form2: start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32) else: start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32) self.network_weights=network_weights if not same_embedding: input_embedding,_=self._get_input_embedding([network_weights['embmap'],network_weights['embmap_biases']],network_weights['embmap']) else: input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM']) print input_embedding.shape # image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias state = self.lstm.zero_state(self.batch_size,dtype=tf.float32) #declare list to hold the words of our generated captions all_words = [] with tf.variable_scope("RNN"): # in the first iteration we have no previous word, so we directly pass in the image embedding # and set the `previous_word` to the embedding of the start token ([0]) for the future iterations output, state = self.lstm(input_embedding, state) print state,output.shape if form4: previous_word,_=input_embedding,None elif form2: previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True) else: previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor) print previous_word.shape # previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias for i in range(self.network_architecture['maxlen']): tf.get_variable_scope().reuse_variables() print i out, state = self.lstm(previous_word, state) # get a one-hot word encoding from the output of the LSTM logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias'] if not form2: best_word = tf.argmax(logit, 1) else: best_word=tf.nn.sigmoid(logit) best_word=tf.round(best_word) # with tf.device("/cpu:0"): # # get the embedding of the best_word to use as input to the next iteration of our LSTM # previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word) # previous_word += self.embedding_bias print logit.shape if form4: previous_word,_=input_embedding,None elif form2: previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True) else: previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word) print previous_word.shape all_words.append(best_word) self.generated_words=all_words def generate(self, _map, x): """ Generate data by sampling from latent space. If z_mu is not None, data for this point in latent space is generated. Otherwise, z_mu is drawn from prior in latent space. # """ # if z_mu is None: # z_mu = np.random.normal(size=self.network_architecture["n_z"]) # # Note: This maps to mean of distribution, we could alternatively # # sample from Gaussian distribution # return self.sess.run(self.x_reconstr_mean, # feed_dict={self.z: z_mu}) # saver = tf.train.Saver() # saver.restore(self.sess, tf.train.latest_checkpoint(model_path)) generated_word_index,f_it= self.sess.run([self.generated_words,all_the_f_one_h], feed_dict={self.x:x}) print f_it print generated_word_index if form2: generated_word_index=np.array(bin_to_int(generated_word_index)) generated_word_index=np.rollaxis(generated_word_index,1) else: generated_word_index=np.array(generated_word_index) return generated_word_index # generated_sentence = ixtoword(_map,generated_word_index) # return generated_sentence def ixtoword(_map,ixs): return [[_map[x] for x in y] for y in ixs] def bin_to_int(a): return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a] def train(network_architecture, learning_rate=0.001, batch_size=100, training_epochs=10, display_step=2,gen=False,ctrain=False,test=False): global_step=tf.Variable(0,trainable=False) total_batch = int(n_samples / batch_size) if should_decay and not gen: learning_rate = tf.train.exponential_decay(learning_rate, global_step, total_batch, 0.95, staircase=True) vae = VariationalAutoencoder(network_architecture, learning_rate=learning_rate, batch_size=batch_size,generative=gen,ctrain=ctrain,test=test,global_step=global_step) # Training cycle # if test: # maxlen=network_architecture['maxlen'] # return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[]) if gen: return vae costs=[] indlist=np.arange(all_samps).astype(int) # indlist=np.arange(10*batch_size).astype(int) for epoch in range(training_epochs): avg_cost = 0. # Loop over all batches np.random.shuffle(indlist) testify=False avg_loss=0 # for i in range(1): for i in range(total_batch): # break ts=i # i=0 inds=np.random.choice(indlist,batch_size) # print indlist[i*batch_size:(i+1)*batch_size] # batch_xs = X[indlist[i*batch_size:(i+1)*batch_size]] batch_xs = X[inds] # Fit training using batch data # if epoch==2 and i ==0: # testify=True # cost,loss = vae.partial_fit(batch_xs,y[indlist[i*batch_size:(i+1)*batch_size]].astype(np.uint32),mask[indlist[i*batch_size:(i+1)*batch_size]],timestep=epoch*total_batch+ts,testify=testify) cost,loss = vae.partial_fit(batch_xs,y[inds].astype(np.uint32),mask[inds],timestep=(epoch)+1e-3,testify=testify) # Compute average loss avg_cost = avg_cost * i /(i+1) +cost/(i+1) # avg_loss=avg_loss*i/(i+1)+loss/(i+1) if i% display_step==0: print avg_cost,loss,cost if epoch == 0 and ts==0: costs.append(avg_cost) costs.append(avg_cost) # Display logs per epoch step if epoch % (display_step*10) == 0 or epoch==1: if should_save: print 'saving' vae.saver.save(vae.sess, os.path.join(model_path,'model')) pkl.dump(costs,open(loss_output_path,'wb')) print("Epoch:", '%04d' % (epoch+1), "cost=", avg_cost) return vae if __name__ == "__main__": import sys form2=True vanilla=True if sys.argv[1]!='vanilla': vanilla=False mid_vae=False form3= True form4=False vanilla=True if sys.argv[2]=='mid_vae': mid_vae=True print 'mid_vae' same_embedding=False clip_grad=True if sys.argv[3]!='clip': clip_grad=False should_save=True should_train=True # should_train=not should_train should_continue=False # should_continue=True should_decay=True zero_end_tok=True training_epochs=int(sys.argv[13]) batch_size=int(sys.argv[4]) onehot=False embeddings_trainable=False if sys.argv[5]!='transfer': print 'true embs' embeddings_trainable=True transfertype2=True binary_dim=int(sys.argv[6]) all_the_f_one_h=[] if not zero_end_tok: X, y, mask, _map = load_text(2**binary_dim-4) else: X, y, mask, _map = load_text(2**binary_dim-3) n_input =binary_dim n_samples = 30000 lstm_dim=int(sys.argv[7]) model_path = sys.argv[8] vartype='' transfertype='' maxlen=int(sys.argv[9])+2 n_z=int(sys.argv[10]) n_z_m=int(sys.argv[11]) n_z_m_2=int(sys.argv[12]) if not vanilla: vartype='var' if not embeddings_trainable: transfertype='transfer' cliptype='' if clip_grad: cliptype='clip' use_ctc=False losstype='' if sys.argv[14]=='ctc_loss': use_ctc=True losstype='ctc' lstm_stack=int(sys.argv[15]) use_bdlstm=False bdlstmtype='' if sys.argv[16]!='forward': use_bdlstm=True bdlstmtype='bdlstm' loss_output_path= 'losses/%s%ss_%sb_%sl_%sh_%sd_%sz_%szm_%s%s%sdefdef%s4.pkl'%(bdlstmtype,str(lstm_stack),str(batch_size),str(maxlen-2),str(lstm_dim),str(n_input),str(n_z),str(n_z_m),str(losstype),str(cliptype),str(vartype),str(transfertype)) all_samps=len(X) n_samples=all_samps # X, y = X[:n_samples, :], y[:n_samples, :] network_architecture = \ dict(maxlen=maxlen, # 2nd layer decoder neurons n_input=n_input, # One hot encoding input n_lstm_input=lstm_dim, # LSTM cell size n_z=n_z, # dimensionality of latent space n_z_m=n_z_m, n_z_m_2=n_z_m_2 ) # batch_size=1 if should_train: # vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue) # print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True) vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005) else: vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True) # # vae_2d._build_gen() ind_list=np.arange(len(X)).astype(int) # np.random.shuffle(ind_list) x_sample = X[ind_list[:batch_size]] print x_sample y_sample = y[ind_list[:batch_size]] print y_sample y_hat = vae_2d.generate(_map,x_sample) y_hat=y_hat[:10] # print y_hat y_hat_words=ixtoword(_map,y_hat) print y_hat_words if form2: y_words=ixtoword(_map,np.array(bin_to_int(y_sample[:10]))) else: y_words=ixtoword(_map,y_sample) print(y_hat) print(y_hat_words) print(y_words) print(ixtoword(_map,bin_to_int(np.expand_dims(x_sample[:10],axis=0)))) # # plt.figure(figsize=(8, 6)) # plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1)) # plt.colorbar() # plt.grid() # plt.show()
dricciardelli/vae2vec
def_def_alt.py
Python
mit
36,437
[ "Gaussian" ]
85e4c8f13bf1f33ec5ae4d162b8f75c8db4165d742e544d830b62295effa9be0
#!/usr/bin/env python # -*- coding: utf-8 -*- import pylid from collections import Counter sk = pylid.PyLID(3) sk.total_ngrams = 85522929 sk.lang = 'sk' sk.ngrams = Counter({ u'#pr': 817084, u'#po': 561483, u'ch#': 483526, u'#a#': 475597, u'#na': 412411, u'ie#': 388611, u'pre': 363041, u'to#': 349111, u'om#': 346549, u'#v#': 303741, u'na#': 294958, u'ova': 292523, u'ej#': 284482, u'je#': 279085, u'\xfdch': 277955, u'ia#': 273122, u'#sa': 266719, u'nie': 262384, u'tor': 250719, u'sa#': 247670, u'kto': 243795, u'ost': 237269, u'\u017ee#': 236594, u'#je': 235314, u'ne#': 234626, u'a#p': 228638, u'me#': 226750, u'e#p': 224025, u'ov#': 221635, u'ho#': 219689, u'#za': 218681, u'#do': 214544, u'ani': 211165, u'by#': 210461, u'#kt': 208668, u'#ne': 202434, u'n\xe9#': 201334, u'e#s': 199809, u'a\u0165#': 198031, u'#sp': 197013, u'#\u017ee': 193624, u'#ro': 190504, u'sti': 188236, u'mi#': 187984, u'#to': 186942, u'eni': 179771, u'li#': 178019, u'e#v': 176709, u'ti#': 174031, u'pod': 169253, u'nos': 169216, u'red': 166062, u'van': 165581, u'a#v': 163971, u'eur': 162324, u'#ko': 161986, u'spo': 161868, u'pri': 160231, u'o#p': 159173, u'a#s': 158769, u'#vy': 158587, u'pr\xe1': 153408, u'#eu': 151341, u'nia': 148704, u'ur\xf3': 147088, u'r\xf3p': 147084, u'y#s': 144217, u'o#s': 143716, u'#ob': 143278, u'i\u0165#': 142731, u'e#n': 140621, u'pro': 137736, u'a#n': 136061, u'#by': 133870, u'#o#': 130647, u'#st': 130579, u'ou#': 130222, u'men': 128629, u'ko#': 128000, u'r\xe1v': 127719, u'#n\xe1': 127328, u'sta': 127088, u'tre': 127001, u'pol': 125943, u'nej': 125040, u'nov': 124531, u'e#a': 123513, u'a#z': 122787, u'#s\xfa': 121775, u'ky#': 121086, u'str': 120929, u'ali': 119521, u'kom': 119284, u'de#': 118814, u'#ak': 118794, u'a#t': 118653, u'ent': 118520, u'uje': 117341, u'roz': 117258, u'la#': 113894, u'psk': 113246, u'\xf3ps': 113172, u'pra': 112742, u'kon': 112591, u'#so': 112568, u'n\xfdc': 110161, u'\xe9ho': 109133, u'rov': 108859, u'hod': 108623, u'#ve': 108200, u'#z\xe1': 107790, u'ako': 107614, u's\u0165#': 106428, u'e#t': 105148, u'i#p': 104495, u'\xfdm#': 103991, u're#': 101861, u'len': 101730, u'm#p': 101494, u'ame': 101348, u'ske': 101179, u'#te': 100982, u'o#v': 100864, u'n\xfd#': 99639, u'cie': 99443, u'e#z': 99376, u'ku#': 98828, u'#s#': 98700, u'va\u0165': 98675, u'pr\xed': 98413, u'aj\xfa': 98174, u'#pa': 98002, u'e#o': 97614, u'odn': 97345, u'ove': 96987, u'\xe1ci': 96753, u'#v\xfd': 96671, u'edn': 96063, u'sku': 95935, u'j\xfac': 95218, u'a#a': 95069, u'#ma': 93748, u'rav': 93409, u'os\u0165': 92535, u'a#k': 92420, u'i#s': 91249, u'u#p': 91086, u'ci#': 90971, u'ka#': 90773, u'ran': 90140, u'kov': 89939, u'va#': 89754, u'a#m': 89753, u'som': 89612, u'n\xed#': 89509, u'te#': 88805, u'las': 88768, u'ast': 88599, u'ny#': 88141, u'or\xe9': 87867, u'ick': 87810, u'bud': 87672, u'kej': 87344, u'#kr': 87178, u'm#s': 86663, u'sme': 86623, u'r\xe9#': 86399, u'#od': 85332, u'tov': 85325, u'#sm': 84884, u'u#a': 84855, u'#in': 84804, u'o\u010dn': 84596, u'a#o': 84182, u'eto': 83959, u'ved': 82912, u'h#p': 82898, u'#ab': 82447, u'oli': 82379, u'nsk': 81985, u'\xe1va': 81855, u'j\xfa#': 81734, u'rok': 81179, u'a#d': 80967, u'ale': 80872, u'est': 80571, u'e#m': 80022, u'pos': 79665, u'aby': 78986, u'#v\u0161': 78766, u'o#n': 78605, u'olo': 78480, u'#ni': 78186, u't\xe1t': 77904, u'och': 77876, u'#si': 77226, u'spr': 77057, u'#me': 76760, u'ak#': 76728, u'omi': 76561, u'\xedm#': 76516, u'#bo': 76458, u'uj\xfa': 76416, u'i#v': 76096, u'tu#': 76012, u'#ho': 75845, u'iu#': 75074, u'e#k': 75055, u'#\u0161t': 74982, u'#mi': 74903, u'val': 74747, u'tra': 74403, u'#bu': 74388, u'mis': 74280, u'pot': 74251, u'odp': 73970, u'lo#': 73898, u'ili': 73897, u'lad': 73864, u'#tr': 73178, u'ich': 72875, u'#mo': 72502, u'ist': 72453, u'tie': 72308, u'sia': 71679, u'eme': 71665, u'por': 71574, u'y#a': 71466, u'kra': 71017, u'mu#': 70983, u'eho': 70840, u'n\xe1#': 70741, u'aj#': 70561, u'pov': 70344, u'voj': 70151, u'lit': 70119, u'#re': 70069, u'dpo': 69938, u'e#d': 69648, u'k\xfdc': 69050, u'ret': 68772, u'za#': 68287, u'mus': 68139, u'i#a': 68027, u'eda': 68021, u'tom': 67856, u'#d\xf4': 67788, u'rie': 67775, u'ren': 67219, u'oho': 67113, u'eds': 66600, u'p\xe1n': 66493, u'\xe9#p': 66480, u'\u010das': 66154, u'iad': 65806, u'#al': 65707, u'#p\xe1': 65441, u'vo#': 65414, u'a#r': 65242, u'\xe1ln': 65223, u'kla': 65209, u'y#p': 65191, u'bol': 65013, u'dy#': 64974, u'obl': 64931, u'rob': 64905, u'mie': 64735, u'dno': 64523, u'u#s': 64304, u'par': 64197, u'cov': 63904, u'\u017eit': 63904, u'#t\xfd': 63812, u'\xfaci': 63776, u'o#z': 63153, u'stu': 62923, u'ami': 62777, u'#aj': 62553, u'\u0161t\xe1': 62517, u'#z#': 62334, u'\u017een': 62305, u'ovn': 62279, u'le#': 61917, u'#sk': 61514, u'cho': 61398, u'ni#': 61369, u'n\xe9h': 61112, u'ens': 60984, u'#vo': 60776, u'dne': 60681, u'v#p': 60201, u'dov': 60160, u'#mu': 60099, u'v#s': 59857, u'lne': 59841, u'tvo': 59734, u'reb': 59620, u'mal': 59557, u'a#b': 59456, u'otr': 59446, u'hla': 59135, u'v\u0161e': 59057, u'ade': 59020, u'e#j': 58973, u'isi': 58949, u've\u013e': 58812, u'sky': 58511, u'tic': 57858, u'oko': 57714, u'rad': 57621, u'\u0165#p': 57564, u'#ch': 57480, u'or\xfd': 57254, u'k\xe9#': 57087, u'i#n': 57017, u'#sv': 56895, u'u#v': 56823, u'#ra': 56473, u'en\xfd': 56259, u'osp': 56048, u'o#a': 55800, u'\u0161et': 55648, u'o#o': 55519, u'ori': 55432, u'den': 55402, u'em#': 55353, u'bez': 55269, u'e\u017ei': 55179, u'jed': 55052, u'o#t': 54982, u'm#v': 54980, u'iti': 54926, u'n\xfdm': 54699, u'nan': 54335, u'#k#': 53491, u'ene': 53484, u'en\xed': 53447, u'od\xe1': 53395, u'mer': 53261, u'ria': 53228, u'sed': 53098, u'dos': 53085, u'lam': 52962, u'o#d': 52720, u'ovi': 52165, u'ina': 52074, u'te\u013e': 51953, u'ty#': 51911, u'raj': 51686, u'#ta': 51646, u'ych': 51444, u'ten': 51354, u'le\u017e': 51278, u'e#r': 51213, u'\xfd#p': 51165, u'j#p': 51096, u'ude': 50982, u'nut': 50926, u'u#k': 50894, u'naj': 50752, u'\u010dno': 50660, u'uto': 50597, u'era': 50528, u'j#s': 50432, u'\u010dle': 50414, u'iac': 50340, u'med': 50112, u'dse': 50086, u'oje': 50060, u'daj': 50016, u'hos': 49989, u'n\xe1v': 49742, u'adn': 49634, u'#\u010dl': 49355, u'ekt': 49308, u'sk\xfd': 49286, u'ce#': 49210, u'edz': 49198, u'nes': 49148, u'\u0165#s': 48981, u'hov': 48864, u'ovo': 48834, u's\xfa#': 48810, u'tak': 48698, u'vor': 48684, u'stv': 48657, u'sto': 48586, u'rsk': 48582, u'us\xed': 48550, u'y#v': 48459, u'do#': 48458, u'd\xe1r': 48440, u'ano': 48389, u'am#': 48369, u'o#r': 48230, u'\u0165#a': 48205, u'sko': 48155, u'\xe1n#': 47845, u'#le': 47688, u'e#b': 47673, u'arl': 47638, u'dob': 47629, u'jem': 47540, u'rla': 47428, u'pok': 47288, u'i#k': 47265, u'#de': 47251, u'da#': 46963, u'etk': 46872, u'vod': 46861, u'\xe1rs': 46857, u'\u0165#v': 46736, u'#ce': 46713, u'tne': 46587, u'stn': 46509, u'm#z': 46347, u'du#': 46228, u'v#t': 46060, u'm#a': 46056, u'\xe9#s': 46049, u'ver': 46035, u'sle': 45789, u'si#': 45711, u'cel': 45567, u'lo\u010d': 45565, u'to\u010d': 45508, u'ok#': 45477, u'poz': 45458, u'ate': 45425, u'\u017eia': 45321, u'h#s': 45318, u'i#z': 45317, u'nom': 45301, u'vne': 45235, u'an\xed': 45211, u'\u010do#': 45155, u'al#': 45129, u'#\u017ei': 44982, u'u#n': 44973, u'en#': 44951, u'o#k': 44919, u'tej': 44874, u'm#n': 44716, u'zna': 44691, u'nu#': 44672, u'v#a': 44572, u'\xfani': 44564, u'\xfdmi': 44517, u'\u0165#n': 44488, u'rac': 44412, u'\xe1to': 44330, u'v\xe1\u017e': 44230, u'n\xfa#': 44124, u'oru': 43971, u'#zo': 43893, u'#\xfan': 43855, u'k\xfdm': 43817, u'slo': 43776, u'hu#': 43642, u'ska': 43639, u'raz': 43533, u'ros': 43510, u'vrh': 43297, u'rej': 43256, u'ala': 43212, u'tro': 43028, u'ter': 42980, u'avi': 42917, u'edo': 42903, u'a#j': 42803, u'y#n': 42776, u't\xedv': 42632, u'rod': 42540, u'#ti': 42502, u'#v\xe1': 42489, u'\xe1m#': 42456, u'e\xfa#': 42347, u'my#': 42144, u'ov\xfd': 41866, u'ite': 41863, u'dzi': 41674, u'ada': 41652, u'pom': 41618, u'dom': 41601, u'bla': 41528, u'tup': 41522, u'\xedme': 41499, u'#zm': 41484, u'rat': 41482, u'oro': 41297, u'nic': 41081, u'#vi': 41079, u'okr': 41049, u'ezp': 40955, u'#zn': 40928, u'\xe1ro': 40928, u'sov': 40915, u'chc': 40883, u'nen': 40687, u'e#e': 40637, u'a#e': 40490, u'#e\xfa': 40394, u'\u010den': 40386, u'iek': 40373, u'led': 40363, u'r\xe1c': 40296, u'res': 40257, u'#op': 40205, u'#pl': 40185, u'#zd': 40153, u'nem': 39987, u'y#k': 39925, u'\u0161ie': 39810, u'\xfa#p': 39761, u'odo': 39761, u'nam': 39672, u'kut': 39510, u'#\u010do': 39493, u'hra': 39473, u'o#m': 39462, u'pe\u010d': 39430, u'po\u010d': 39419, u'zpe': 39223, u'svo': 39198, u'm\xf4\u017e': 39184, u'tan': 39092, u'tik': 39089, u'vy#': 39017, u'ies': 38955, u'sob': 38867, u'#\u013eu': 38850, u'o\u017ee': 38726, u'odu': 38597, u'o#b': 38570, u'ii#': 38421, u'ati': 38293, u'ebo': 38259, u'#be': 38182, u'cia': 38106, u'mo\u017e': 38045, u'hce': 38036, u'en\xe9': 37873, u'ner': 37872, u'ele': 37838, u'#ci': 37823, u'ri#': 37800, u'v#k': 37710, u'\u010dne': 37678, u'v#r': 37628, u'via': 37611, u'e\u013eo': 37605, u'i#d': 37540, u'na\u0161': 37468, u'hto': 37436, u'#no': 37425, u'#ot': 37389, u'toh': 37335, u've#': 37089, u'asn': 36882, u'\xe1\u017ee': 36816, u'#sl': 36778, u'aji': 36748, u'y#m': 36742, u'in\xe1': 36661, u'vie': 36627, u'ach': 36566, u'alo': 36541, u'\xe1vn': 36420, u'jin': 36309, u's\xedm': 36219, u'ado': 36214, u'a#u': 36188, u'jto': 36156, u'ejt': 36107, u'nto': 35790, u'vu#': 35783, u'ru#': 35767, u'e\u013em': 35748, u'oci': 35696, u'\u013emi': 35541, u'dn\xfd': 35515, u'\u013eud': 35437, u'm#k': 35403, u'o\u017en': 35366, u'ca#': 35340, u'a#\u017e': 35287, u'ito': 35237, u'obi': 35209, u'sk\xe9': 35125, u'isk': 34995, u'en\xe1': 34835, u'o#j': 34775, u'sil': 34712, u'ern': 34667, u'vis': 34603, u'\u010dn\xfd': 34587, u'lov': 34534, u'\xe1vr': 34491, u'j#\xfa': 34435, u'u#z': 34429, u'v#n': 34362, u'lat': 34359, u'tav': 34297, u'h#k': 34171, u'r\xfd#': 34098, u'\xf4le': 34008, u'h#a': 33935, u'i#o': 33906, u'od#': 33903, u'fin': 33879, u'\xedvn': 33876, u'#hl': 33865, u'pan': 33613, u'#os': 33611, u'aco': 33590, u'\u0161en': 33575, u'd\xf4l': 33564, u'iet': 33559, u'v#o': 33492, u't\xed#': 33473, u'sve': 33437, u'ore': 33413, u'h#v': 33399, u'i#m': 33303, u'z\xe1k': 33233, u'sie': 33219, u'ej\u0161': 33190, u'\xe9#v': 33142, u'\xfa\u010da': 33090, u'n#p': 33057, u'#ka': 33057, u'#vz': 33050, u'\u010dn\xe9': 33049, u'edk': 33030, u'mys': 33023, u'iel': 32939, u'v\xfdc': 32889, u'ere': 32803, u'#fi': 32592, u'tn\xe9': 32589, u'v\u0161a': 32578, u'el#': 32575, u'avo': 32528, u'm#\u017e': 32463, u'ane': 32413, u'osi': 32405, u'i#t': 32388, u'#di': 32294, u'ot\xe1': 32280, u'\u010din': 32181, u'#ri': 32084, u'\u0161ak': 32053, u'orm': 32025, u'h#o': 31986, u'dem': 31977, u'v#e': 31892, u'ide': 31875, u'n\xe1r': 31870, u'm#t': 31866, u'ov\xe9': 31844, u'#m\xf4': 31783, u'ck\xfd': 31675, u'anc': 31648, u'u#o': 31602, u'\xe9#a': 31587, u'rom': 31351, u'ven': 31274, u'\u0148uj': 31029, u'e\u010dn': 30925, u'\xf4\u017ee': 30902, u'\xed#p': 30899, u'ke#': 30885, u'\u013ead': 30884, u'\xface': 30877, u'inn': 30819, u'h\u013ea': 30783, u'itu': 30726, u'zne': 30706, u'nyc': 30641, u'ien': 30538, u'n\xe1s': 30526, u'#r\xe1': 30519, u'#zv': 30518, u'or\xed': 30473, u'\u010fal': 30409, u'leb': 30392, u'nik': 30353, u'opa': 30334, u't\xe1z': 30322, u'v\xe9#': 30311, u'ud\xfa': 30230, u'ni\u0165': 30208, u'vet': 30138, u'\xe1ch': 30096, u'e#u': 30051, u'\xe1#p': 29918, u'e#\u017e': 29902, u'r\xe1#': 29898, u'los': 29865, u'iny': 29844, u'hnu': 29830, u'aso': 29805, u'roj': 29738, u'a#\xfa': 29685, u'pla': 29615, u'\xe1kl': 29574, u'vom': 29573, u'ra#': 29500, u't\xe9#': 29410, u'olu': 29335, u'#ja': 29333, u'#m\xe1': 29328, u'#ke': 29265, u'tia': 29259, u'ech': 29240, u'i#b': 29223, u'ero': 29207, u'dis': 29189, u'ila': 29180, u'ozp': 29048, u'noh': 29038, u'ste': 29026, u't\xfdm': 28992, u'bo#': 28988, u'tva': 28985, u'#\u010di': 28971, u'ivo': 28953, u'for': 28885, u'ysl': 28867, u'doh': 28810, u'vej': 28784, u'r\xed#': 28771, u'e\u017e#': 28763, u'ier': 28760, u'zho': 28731, u'die': 28723, u'oku': 28723, u'isl': 28524, u'dn\xe9': 28509, u'oda': 28406, u'nep': 28369, u'rij': 28301, u'n\xedc': 28294, u'zme': 28290, u'r\xeds': 28278, u'oti': 28160, u'\xfa#v': 28139, u'n\xe1m': 28097, u'i\xed#': 28066, u'oto': 28064, u'not': 28008, u'ad#': 27949, u'iah': 27925, u'tat': 27887, u'a#i': 27837, u'omo': 27748, u'\xe9#z': 27740, u'\xe1zk': 27729, u'ke\u010f': 27727, u'or\xe1': 27727, u'pat': 27683, u'obe': 27674, u'ie\u013e': 27601, u'\u013ea#': 27586, u'lo\u017e': 27535, u'pad': 27507, u'akt': 27501, u'nci': 27486, u'ci\xe1': 27420, u'nte': 27400, u'\xfa#s': 27349, u'otn': 27342, u'pln': 27319, u'#ic': 27285, u'\xe1dz': 27267, u'y#z': 27223, u'zab': 27126, u'\u0165#\u017e': 27025, u'n\xedm': 27003, u't\xfdc': 26992, u'tk\xfd': 26965, u'rot': 26957, u'dst': 26921, u'\xed#s': 26912, u'an\xe9': 26882, u'e##': 26654, u'ol#': 26644, u'ck\xe9': 26591, u'm#o': 26556, u'atr': 26499, u'j#k': 26488, u'at\xed': 26472, u'az#': 26419, u'h#n': 26408, u'sla': 26284, u'nap': 26257, u'obr': 26216, u'eno': 26185, u'\xfato': 26177, u'\xe9mu': 26125, u'obn': 26070, u'omn': 26040, u'r\xfdc': 25979, u'o\u017ei': 25920, u'iem': 25856, u'lie': 25826, u'#ur': 25826, u'zam': 25755, u'osl': 25731, u'\u0165#z': 25729, u'ciu': 25642, u'lan': 25587, u'#\u010da': 25513, u'e#i': 25457, u'nou': 25415, u'm#r': 25382, u'\xed#a': 25355, u'zdr': 25339, u'egi': 25317, u'i#r': 25256, u'ede': 25235, u'z\xe1v': 25168, u'orn': 25139, u'\u0148ov': 25136, u'\xe9#n': 25102, u'dza': 25017, u'y\u0165#': 25008, u'\xe1ve': 24957, u'dop': 24946, u'#vl': 24853, u'il#': 24788, u'#u\u017e': 24719, u'by\u0165': 24709, u'v#m': 24642, u'l\xe9m': 24586, u'st\xe1': 24557, u'spe': 24528, u'edi': 24515, u'j#v': 24510, u'r\xedp': 24495, u'tri': 24429, u'rit': 24268, u'yst': 24245, u'zov': 24211, u'h#z': 24125, u'z\xe1c': 24115, u'ozv': 24098, u'bl\xe9': 24083, u'chr': 24049, u'i\xe1l': 24037, u'bil': 24037, u'\u017eno': 24001, u'ete': 23983, u'liv': 23973, u'moc': 23955, u'ozh': 23919, u'it\xe9': 23861, u'st\xed': 23751, u'obo': 23749, u'i#e': 23735, u'ija': 23703, u'\u0165#o': 23684, u'e\u010f#': 23653, u'usi': 23646, u'del': 23642, u'y#b': 23617, u'kro': 23604, u'tn\xfd': 23596, u'\u010dan': 23583, u't\xe9m': 23555, u'dok': 23555, u'\xe1le': 23553, u'u##': 23550, u'\xe1me': 23539, u'tuj': 23536, u'a#c': 23518, u'sad': 23501, u'u\u017e#': 23493, u'ave': 23461, u'p\xf4s': 23454, u'\xf4so': 23450, u'n\u010dn': 23433, u'ari': 23422, u'dr\u017e': 23413, u'po#': 23410, u'mov': 23353, u'mno': 23346, u'ces': 23339, u'dia': 23339, u'eko': 23306, u'sen': 23249, u'min': 23223, u'lep': 23172, u'r\xe1m': 23142, u'ov\xe1': 23135, u'#\u010fa': 23080, u'ctv': 23057, u'zi#': 23036, u'iky': 23005, u'o#\u017e': 22978, u'ojo': 22965, u'amo': 22957, u'e\u013en': 22925, u'k\xfd#': 22924, u'ie\u017e': 22898, u'vid': 22885, u'\xedva': 22881, u'\xed#v': 22800, u'e#h': 22797, u'zor': 22768, u'ob\u010d': 22759, u'sch': 22728, u'iz\xe1': 22719, u'#oc': 22662, u'u#m': 22650, u'h#\u0161': 22645, u'to\u017e': 22637, u'\u013eov': 22637, u's\xfa\u010d': 22629, u'v#v': 22590, u'ep\u0161': 22587, u'vot': 22548, u'reg': 22482, u'trh': 22395, u'#\xfar': 22325, u'ebu': 22319, u'#or': 22294, u'cu#': 22290, u'uti': 22253, u'l\xedm': 22240, u'eli': 22238, u'a#h': 22226, u'#en': 22196, u'ry#': 22195, u'\xfdka': 22146, u's#p': 22135, u'dn\xed': 22081, u'j\u0161i': 22063, u'vam': 22016, u'ini': 21998, u't\xfdk': 21989, u'niu': 21962, u'an\xfd': 21929, u'zvo': 21920, u'es#': 21910, u'keh': 21885, u'ba#': 21838, u'kou': 21838, u'org': 21829, u'ady': 21812, u'mto': 21780, u'#us': 21716, u'sam': 21703, u'adu': 21670, u'dal': 21617, u'vin': 21557, u'\xe1#s': 21552, u'apr': 21542, u's\xed#': 21539, u'emo': 21521, u'###': 21513, u'o#u': 21484, u'ie\u0161': 21469, u'vys': 21454, u'#zl': 21429, u'vyh': 21418, u'ris': 21406, u'neh': 21375, u'ta#': 21371, u'bor': 21342, u'an\u010d': 21334, u'ebn': 21324, u'oj#': 21323, u'rne': 21296, u'y#t': 21268, u'mes': 21258, u'st\xe9': 21240, u'rst': 21216, u'kup': 21204, u'kol': 21196, u'y#o': 21167, u'\u0165#d': 21127, u'k\xe9h': 21120, u'ch\xe1': 21069, u'vny': 21023, u'eti': 20991, u'\xe9#o': 20970, u't\xe1l': 20950, u'ans': 20942, u'ino': 20925, u'gra': 20925, u'\xfa#n': 20835, u'jov': 20812, u'dnu': 20807, u'nez': 20805, u'obc': 20804, u'da\u0165': 20802, u'r\xe1n': 20773, u'nt#': 20769, u'\u0165#t': 20719, u'sp\xf4': 20663, u'\u0165#k': 20628, u'ia\u013e': 20600, u'#\xfa\u010d': 20597, u'soc': 20574, u'k\xe1#': 20548, u'ja#': 20533, u's\xfav': 20502, u'i#\u017e': 20490, u'a\u010dn': 20473, u'tot': 20465, u'\xedn#': 20463, u'ied': 20424, u'erg': 20398, u'#sy': 20393, u'osk': 20373, u'rhu': 20358, u'bn\xe9': 20317, u'ole': 20255, u'uro': 20254, u'nal': 20247, u'a##': 20216, u'ned': 20215, u'a#\u010d': 20208, u'v\xfdr': 20178, u'\xe1ni': 20175, u'a\u013e#': 20149, u'ntu': 20147, u'nil': 20145, u'vyt': 20128, u'u#t': 20107, u'hli': 20070, u'prv': 20060, u'hy#': 20051, u'aci': 20032, u'zni': 20011, u'\xe1no': 20003, u'oht': 19983, u'#\xfas': 19980, u'#id': 19975, u'al\u0161': 19968, u'kaj': 19961, u'lia': 19928, u'ilo': 19920, u'nno': 19910, u'cii': 19904, u'#ba': 19900, u'nev': 19899, u'l#p': 19891, u'enc': 19889, u'#li': 19887, u'#dr': 19879, u'#tu': 19879, u'k#p': 19876, u'#se': 19845, u'upr': 19844, u'\xfavi': 19836, u'u#d': 19830, u'omu': 19821, u'iam': 19806, u'tos': 19757, u'tal': 19746, u'adi': 19735, u'niz': 19718, u'd\xf4v': 19682, u'u\u017ei': 19642, u'\u013eom': 19640, u'\xedpa': 19627, u'epo': 19593, u'nco': 19586, u'dro': 19584, u'dsk': 19580, u'lob': 19579, u'cic': 19505, u'o\u013en': 19485, u'nak': 19477, u'ytv': 19403, u'\u017eiv': 19402, u'dru': 19397, u'kyt': 19387, u'\xedct': 19360, u'vl\xe1': 19333, u'o#\u010d': 19332, u'moh': 19278, u'tu\xe1': 19274, u'ek#': 19265, u'oji': 19256, u'#uv': 19256, u'#uz': 19209, u'jen': 19193, u'\xedci': 19118, u'rh#': 19112, u'sys': 19099, u'v\xe4\u010d': 19082, u'\xe4\u010d\u0161': 19082, u'\xfa#a': 19056, u'dni': 19035, u'e#c': 19019, u'ola': 19007, u'liz': 19006, u'ozn': 18999, u'\xe1mc': 18954, u'no#': 18920, u'ram': 18918, u'#mn': 18903, u'v#z': 18890, u'ajm': 18834, u'#up': 18823, u'bod': 18804, u'lny': 18799, u'slu': 18756, u'ahn': 18751, u'b\u010da': 18738, u'\xf4vo': 18708, u'so#': 18699, u'\u017e\xedv': 18679, u'dmi': 18679, u'avu': 18676, u'ac#': 18625, u'm#d': 18620, u'vna': 18590, u'v\xe1#': 18568, u'jej': 18519, u'ava': 18499, u'ci\xed': 18498, u'aho': 18446, u'\u010di#': 18441, u'ogr': 18426, u'onk': 18423, u'poj': 18403, u'j#z': 18340, u'u#r': 18302, u'h#r': 18297, u'neb': 18281, u'iat': 18276, u'iko': 18274, u'v\xfds': 18264, u'leg': 18232, u'ici': 18216, u'j#n': 18189, u'm#m': 18179, u'k#n': 18158, u'po\u013e': 18155, u'iln': 18143, u'o#h': 18137, u'ela': 18128, u'm\xe1#': 18114, u'lej': 18100, u'ona': 18082, u'bch': 18081, u'er#': 18043, u'#my': 18043, u'azn': 18019, u'ato': 18018, u'ohl': 18005, u'ri\u0165': 17947, u'abe': 17939, u'oso': 17928, u'h\xe1d': 17916, u'jme': 17909, u'as#': 17863, u'k#s': 17804, u'd\xfa#': 17787, u'\u013eno': 17787, u'sit': 17769, u'ont': 17762, u'inf': 17756, u'vi#': 17751, u'\xfaro': 17744, u'y#e': 17702, u'u#e': 17698, u'atn': 17681, u'za\u010d': 17646, u'edl': 17632, u'roc': 17617, u'zas': 17615, u'on\xe1': 17597, u'h#d': 17591, u'u\u017e\xed': 17544, u'e\u013ek': 17537, u'ril': 17529, u'ody': 17487, u'u\xe1c': 17479, u'r\xe1t': 17457, u'm#b': 17430, u'o\u010de': 17408, u'l\xe1d': 17392, u'pin': 17377, u't\xfar': 17360, u'h#m': 17354, u'd\xfac': 17334, u'e#\u010d': 17299, u'ika': 17289, u'cht': 17245, u'k#v': 17222, u'rog': 17217, u'ruh': 17201, u'\xed#n': 17195, u'u#b': 17188, u'ote': 17175, u'\xfd#s': 17170, u'z\xe1s': 17142, u'vat': 17138, u'\u010dov': 17116, u'zpo': 17089, u'v#d': 17043, u'or\xfa': 17004, u'kyc': 17000, u'j#a': 16995, u'#sc': 16995, u'adr': 16992, u'i\xf3n': 16989, u'#v\xe4': 16974, u'upi': 16966, u'zav': 16965, u'exi': 16915, u'd\xed#': 16893, u'oby': 16847, u'cke': 16840, u'\xe9#m': 16827, u'lup': 16799, u't\xfat': 16788, u'\xfa#z': 16762, u'vol': 16755, u'k#t': 16734, u'dan': 16723, u'avy': 16716, u'sne': 16695, u'duj': 16660, u'#dv': 16642, u'lu#': 16633, u'odl': 16607, u'mor': 16604, u'j#d': 16570, u'o#e': 16570, u'pis': 16561, u'#da': 16560, u'rni': 16531, u'uzn': 16480, u'l\xe1n': 16453, u's#v': 16429, u'\xedst': 16419, u'v#b': 16408, u'ods': 16389, u'l#v': 16388, u'opr': 16372, u'cem': 16366, u'\xedch': 16356, u'kr\xed': 16349, u'pou': 16330, u'ity': 16313, u'\xe9#\u0161': 16303, u'l#s': 16303, u'uds': 16302, u'ras': 16302, u'tky': 16276, u'e\u0148#': 16217, u'\u010fak': 16214, u'o\u010da': 16200, u'j#o': 16150, u'int': 16146, u'ber': 16142, u'ode': 16102, u'n\xe1c': 16093, u'm\xe1m': 16092, u'i#j': 16083, u'u#j': 16075, u'k\xfa#': 16069, u'r\xedz': 16063, u'usk': 16037, u'eck': 16029, u'\u010dia': 16021, u'r\xedk': 16002, u'\u017eem': 16002, u'a\u017ed': 15996, u'ko\u013e': 15963, u'\xed#k': 15921, u's\u0165o': 15912, u'vaj': 15881, u'e\u0161t': 15877, u'n\xfat': 15869, u'jas': 15851, u'\xe1s#': 15850, u'kam': 15837, u'ply': 15832, u'n\xedk': 15819, u'#t\xfa': 15793, u'im#': 15745, u'#an': 15741, u'\xfdro': 15725, u'sl\xed': 15676, u'\xe1ty': 15669, u'zaj': 15640, u'tli': 15616, u'##a': 15593, u'din': 15588, u'n\xfa\u0165': 15569, u'vno': 15565, u'\xfa\u0165#': 15563, u'\xedkl': 15542, u'avn': 15540, u'vyu': 15539, u'ni\u010d': 15529, u'\u0161te': 15521, u'ed#': 15512, u'#fo': 15509, u'nym': 15507, u'v\xfdz': 15480, u'or#': 15468, u'hop': 15468, u'dli': 15457, u'ak\xfd': 15424, u'a\u0161e': 15416, u'ine': 15401, u'\u0165#r': 15390, u'enk': 15388, u'vy\u0161': 15366, u'nav': 15357, u'yu\u017e': 15336, u'epr': 15334, u'zal': 15316, u'inu': 15305, u'ozr': 15298, u'eri': 15286, u'e\u013ea': 15273, u'm#j': 15258, u'ym#': 15237, u'dra': 15235, u'z#t': 15222, u'elo': 15219, u'n\u0161t': 15216, u'po\u017e': 15163, u'odm': 15136, u'\u0165ah': 15127, u'man': 15115, u'zml': 15105, u'o#c': 15104, u'lis': 15100, u'mok': 15100, u'#hr': 15097, u'dlo': 15085, u'\u0165ou': 15031, u'\xe1#v': 15013, u'\u010di\u0165': 15004, u'omt': 14997, u'v\xfdb': 14996, u's#n': 14958, u'#dn': 14947, u'mci': 14930, u'ust': 14917, u'jm\xe4': 14916, u'cha': 14916, u'm\xe4#': 14916, u'\xfa\u010di': 14909, u'\u0165a\u017e': 14905, u'sah': 14900, u'mne': 14852, u'bit': 14851, u'\u0161\xed#': 14835, u'v\xfd#': 14789, u'ur\u010d': 14775, u'j#r': 14764, u'ieh': 14730, u'jad': 14722, u'yko': 14702, u'zre': 14679, u'gen': 14678, u'vn\xfd': 14658, u'rea': 14631, u'zin': 14630, u'loh': 14603, u'e#\xfa': 14593, u'kt\xed': 14542, u'o\u010di': 14534, u'ejn': 14511, u'ak\xe9': 14507, u'odv': 14494, u'odi': 14451, u'\xe1v#': 14451, u'j#m': 14439, u'opl': 14434, u'buj': 14413, u'xis': 14410, u'dko': 14377, u'nfo': 14358, u'r\xfa#': 14347, u'\xed#z': 14343, u'at\xe9': 14292, u'#kl': 14286, u'vo\u013e': 14269, u'gov': 14268, u'\u017ei\u0165': 14267, u'luv': 14260, u'kaz': 14259, u'v\xfdm': 14241, u'vec': 14234, u'ruj': 14230, u'iou': 14228, u'imi': 14225, u'vyk': 14207, u'mlu': 14205, u'zod': 14203, u'ma\u0165': 14201, u'r\xedl': 14192, u'd\u013ea': 14180, u'ec#': 14153, u'\xfalo': 14152, u'#pe': 14148, u'o#i': 14139, u'edp': 14111, u'cen': 14102, u'o\u010dt': 14068, u'any': 14068, u's\xe1r': 14057, u's\xfad': 14041, u'y#d': 14037, u'd\xe1v': 14030, u'h#t': 14025, u'bra': 13994, u'ruk': 13991, u'tno': 13943, u'is\xe1': 13909, u'poc': 13901, u'#ud': 13898, u'a\u0161i': 13890, u'vni': 13877, u'maj': 13865, u'#vn': 13856, u'nad': 13856, u'tr\xe1': 13854, u'ome': 13847, u'ud\xed': 13829, u'ntr': 13815, u'i\u010dn': 13801, u'ka\u017e': 13792, u'rip': 13774, u'\u0161\xedm': 13762, u'#ex': 13761, u'\xe1vo': 13757, u'od\u013e': 13753, u'ese': 13747, u'vn\xfa': 13735, u'#zr': 13726, u's#t': 13722, u'e#l': 13720, u'zn\xe1': 13714, u'p\xeds': 13701, u'i#u': 13697, u'oni': 13685, u'pe#': 13685, u'oce': 13675, u'rev': 13646, u'\xfdbo': 13631, u'vuj': 13615, u'dod': 13610, u'\u0161ej': 13600, u'\xe1st': 13588, u'py#': 13580, u'gan': 13574, u'e\u0161e': 13572, u'mil': 13541, u'#\xfal': 13520, u'\xe9#k': 13442, u'nst': 13435, u'mat': 13424, u'lav': 13424, u'v\xfd\u0161': 13410, u'tit': 13398, u'zuj': 13394, u'ie\u0165': 13380, u'\xf4ra': 13374, u'd\xf4r': 13374, u'boj': 13311, u'rho': 13297, u'\xfa#d': 13296, u'oja': 13265, u'n\xe1l': 13216, u'\xe9m#': 13211, u'dvo': 13194, u'dve': 13173, u'evy': 13153, u'as\u0165': 13148, u'ozo': 13146, u'ema': 13137, u'jat': 13134, u'oh\u013e': 13110, u'dar': 13095, u'\xe1#a': 13073, u'kia': 13066, u'kci': 13045, u'n#k': 13010, u'j\u0161\xed': 12991, u'udo': 12985, u'ora': 12974, u'\xe1ra': 12960, u'nul': 12950, u'ahu': 12930, u'rm\xe1': 12909, u'##m': 12901, u'\xe9#b': 12892, u'ma#': 12876, u'a#l': 12869, u'ez#': 12858, u'es\u0165': 12854, u'ukt': 12835, u'o#\xfa': 12835, u'art': 12832, u'oki': 12826, u'\xe9#r': 12818, u'\u0165#m': 12803, u'\u0161i\u0165': 12796, u'zpr': 12783, u'gie': 12758, u'm#c': 12754, u'#e\u0161': 12752, u'\xedko': 12747, u'#ru': 12746, u'o#f': 12721, u'tam': 12712, u'zo#': 12699, u'it\xfa': 12682, u'y#r': 12665, u'esk': 12649, u'z#n': 12639, u'#uk': 12630, u'\xe1sa': 12614, u'\xfa#m': 12612, u'ju#': 12599, u'\xed#o': 12591, u'\u0161e#': 12590, u'k#a': 12586, u'l#b': 12584, u'na\u010d': 12570, u'rem': 12540, u'pek': 12535, u'jeh': 12533, u'up#': 12508, u'imo': 12481, u'#su': 12458, u'\xedta': 12457, u'\u010d\u0161i': 12453, u'e\u010di': 12447, u'\xfaca': 12432, u'\xe1zo': 12428, u'\xe9#d': 12420, u'\u0161tr': 12418, u'sol': 12393, u'v\xe9h': 12389, u'odr': 12358, u'upl': 12351, u'kum': 12321, u'#um': 12317, u'tru': 12309, u'\u0161\xedc': 12301, u'ntn': 12265, u'ly#': 12259, u'\xfd#v': 12235, u'iev': 12232, u'kre': 12227, u'm#\u010d': 12221, u'rim': 12212, u'm\xe1c': 12207, u'tec': 12192, u'in\u0161': 12188, u'uve': 12176, u'nar': 12172, u'\u017eil': 12154, u'avr': 12152, u'u#\xfa': 12149, u's#c': 12145, u'opn': 12131, u'\xfdva': 12119, u'sk\xfa': 12119, u'e\u0165#': 12098, u'\xfa#o': 12041, u'\u0161ti': 12037, u'z#p': 12005, u'dky': 11966, u'iku': 11964, u'ozm': 11925, u'ons': 11911, u'vla': 11909, u'tok': 11898, u'tna': 11890, u'nit': 11887, u'eci': 11878, u'o##': 11868, u'chu': 11862, u'ro\u010d': 11860, u'#d\xe1': 11852, u'tko': 11848, u'cno': 11844, u'rep': 11837, u'upo': 11825, u'\xe9gi': 11791, u'##p': 11784, u'v\xe4z': 11767, u'isp': 11764, u'p\u0161i': 11746, u'bu#': 11730, u'\xfdzn': 11723, u'vn\xe9': 11671, u'zen': 11670, u'\xe1ce': 11667, u'#ok': 11660, u'a#f': 11648, u'emi': 11645, u'#ku': 11642, u'o#\u0161': 11637, u'zah': 11635, u'\u010dit': 11630, u'ejm': 11625, u'j#e': 11622, u'izo': 11579, u'izm': 11560, u'kri': 11552, u'eva': 11539, u'k#z': 11531, u'm#u': 11527, u'#\xfap': 11523, u's\xfah': 11521, u'pl\xe1': 11520, u'u\u0165#': 11509, u'rga': 11500, u'zv\xfd': 11492, u'nky': 11488, u'ou\u017e': 11485, u'vyj': 11480, u'ena': 11455, u'd\xf4s': 11439, u'cio': 11433, u't\xe9g': 11419, u'eny': 11416, u'tar': 11414, u'umo': 11395, u'va\u017e': 11376, u'vyp': 11372, u'ru\u0161': 11363, u'ed\u010d': 11360, u'\xe9mo': 11358, u'vzd': 11343, u'd#p': 11318, u'dpi': 11289, u'\xfahl': 11289, u'emu': 11264, u'r\u010di': 11250, u'bno': 11217, u'\xe1#n': 11166, u'bal': 11129, u'h#\u010d': 11128, u'pu#': 11127, u've\u0148': 11105, u'ebi': 11105, u'n\xe1z': 11099, u'et#': 11092, u'hor': 11052, u'm#h': 11032, u'sel': 11022, u'i#\u010d': 11011, u'otv': 11010, u'\u013e#i': 10987, u'z\xe1u': 10986, u'\xe1uj': 10985, u'\xe1d#': 10973, u'oju': 10968, u'it\xe1': 10929, u'zd\xf4': 10926, u'\xfd#n': 10917, u'ak\xe1': 10906, u'yja': 10901, u'#t\xe1': 10897, u'zle': 10870, u'nku': 10860, u'ond': 10856, u'rus': 10828, u'ion': 10816, u'it\xfd': 10786, u'riz': 10777, u'z\xe1r': 10776, u'nu\u0165': 10761, u'r\u017ea': 10758, u'\xf3pe': 10751, u'#vr': 10743, u'ana': 10738, u'jom': 10712, u'eba': 10710, u'\xfa#k': 10706, u'vac': 10705, u'er\xe1': 10696, u'anu': 10678, u'rge': 10676, u'i#c': 10661, u'r\xe1d': 10659, u'\xfa#t': 10635, u'dlh': 10632, u'com': 10600, u'st\xfa': 10594, u'in\xfd': 10580, u'#br': 10558, u'\u0148a#': 10539, u'\xe1za': 10523, u'dav': 10521, u'idi': 10506, u'nec': 10506, u'n\xe1\u0161': 10499, u'r\xfdm': 10496, u'\xe1na': 10485, u's\u0165a': 10484, u'\xfd#r': 10480, u'jek': 10476, u'mec': 10465, u'##v': 10463, u'#\u010d\xed': 10461, u'o\u013ek': 10452, u'i#i': 10444, u'gic': 10439, u'nod': 10433, u'esi': 10427, u'lu\u017e': 10413, u'\xe1vu': 10392, u'huj': 10376, u't\xfac': 10353, u'uch': 10338, u'hro': 10338, u'mim': 10332, u'rgi': 10328, u'ure': 10327, u'onc': 10324, u'bne': 10322, u'and': 10316, u'z\u0165a': 10305, u'vz\u0165': 10302, u'#kv': 10299, u'tel': 10297, u'#dl': 10288, u'g\xe1n': 10280, u'zde': 10241, u'st#': 10234, u'd\xe1m': 10227, u'utn': 10224, u'zar': 10221, u'ice': 10217, u'omp': 10213, u'rg\xe1': 10212, u'itn': 10210, u'zky': 10203, u'kva': 10193, u'#p\xf4': 10186, u'ti\u0165': 10177, u'ktu': 10157, u'e#f': 10154, u'k\xe1z': 10144, u'\u017eov': 10136, u'\xe1#z': 10129, u'ujm': 10100, u'vil': 10080, u'\xf3pa': 10077, u'#is': 10066, u'hyb': 10061, u'rek': 10052, u'v#j': 10015, u'u#\u017e': 9976, u'vpl': 9971, u'v\xfa#': 9957, u't\xe1#': 9956, u'har': 9937, u'ra\u010d': 9937, u'\xf4sl': 9936, u'get': 9924, u'eru': 9917, u'rin': 9911, u'ted': 9905, u'pen': 9893, u'\xedso': 9890, u'\xfdsl': 9878, u'nka': 9869, u'tou': 9868, u'gi\xf3': 9863, u'oba': 9847, u'ian': 9847, u'nis': 9844, u'\u017en\xe9': 9818, u'rol': 9813, u'et\xed': 9809, u'lyv': 9805, u'ahl': 9801, u'n#a': 9799, u'ial': 9790, u'\xed#b': 9754, u'sno': 9741, u'be#': 9737, u'\xe1vi': 9736, u'd\xe1#': 9731, u'em\xf4': 9726, u'v#\u010d': 9721, u'l\u0161\xed': 9717, u'ves': 9716, u'\u010dil': 9690, u'pec': 9675, u'enu': 9670, u'moz': 9669, u'n#v': 9664, u'ahr': 9660, u'\u013eko': 9648, u'zka': 9642, u'\u017ene': 9627, u'tv\xe1': 9623, u'zn\xed': 9623, u'aro': 9620, u'\xfara': 9604, u'i#h': 9599, u'ono': 9584, u'it\xed': 9584, u's#k': 9570, u'ank': 9562, u'j\xedn': 9560, u'hr\xe1': 9548, u'atk': 9547, u'jav': 9540, u'ajv': 9538, u'na\u0165': 9534, u'abo': 9534, u'aj\xed': 9530, u'rno': 9529, u'sn\xe9': 9523, u'\u0165#e': 9521, u'zy#': 9502, u'ifi': 9492, u'r\xe1l': 9484, u'\xe1cu': 9466, u'\u0161iu': 9464, u'vos': 9456, u'vyz': 9451, u'k#d': 9436, u'##s': 9433, u'ieb': 9432, u'vek': 9427, u'nve': 9426, u'#v\xed': 9424, u'han': 9423, u'\xe9my': 9421, u'fon': 9420, u'm#e': 9419, u'vn\xed': 9400, u'neu': 9399, u'#au': 9397, u'#ju': 9395, u'rec': 9390, u'esv': 9379, u'\u017e#p': 9367, u'r\xedm': 9361, u'ref': 9352, u'\u017eu#': 9336, u'el\xe1': 9335, u're\u010d': 9335, u'teg': 9323, u'ymi': 9321, u'\xfd#a': 9306, u'me\u0148': 9303, u'#va': 9302, u'lom': 9294, u'l\xf3g': 9292, u'chl': 9288, u'ik\xe1': 9272, u'\xe1tn': 9269, u'lek': 9268, u'bi\u0165': 9268, u'\u0165#j': 9245, u'kr\xe1': 9232, u't#v': 9224, u'nas': 9223, u'rtn': 9212, u'ra\u0165': 9212, u'#h\u013e': 9211, u'emy': 9208, u'mom': 9208, u'yhn': 9207, u'ime': 9206, u'bec': 9203, u'\xfada': 9200, u'e\u010de': 9198, u'ebe': 9187, u'l\xe1c': 9185, u'\u017eie': 9184, u't#a': 9182, u'vit': 9182, u'rak': 9177, u'nkr': 9173, u'tn\xfa': 9173, u'lik': 9168, u'\u013en\xe9': 9158, u'an\xe1': 9151, u'\xfasp': 9143, u'les': 9133, u'vr\xe1': 9130, u'v\xe1d': 9127, u'\xfacn': 9119, u'kus': 9115, u'ta\u0165': 9115, u'aku': 9114, u'd#n': 9109, u'e\u0161i': 9108, u'r\xe9t': 9101, u'e\u0148u': 9085, u'ovu': 9084, u'ric': 9082, u'\xf3py': 9081, u'ral': 9081, u'\xed#t': 9063, u'a\u010fa': 9055, u's#o': 9053, u'lni': 9052, u'l#n': 9051, u'u\u0161e': 9039, u'isa': 9024, u'ers': 9023, u'cko': 9019, u't\xfd#': 9010, u'tsk': 8988, u'nim': 8980, u'chy': 8965, u'e\u010do': 8961, u'hol': 8948, u'uni': 8941, u'#vp': 8941, u'l\u0148u': 8940, u'kr\xe9': 8937, u'v\xedt': 8929, u'\xfapl': 8914, u'ext': 8911, u'ohu': 8908, u'v\xe1r': 8899, u'\xe1r#': 8879, u'toc': 8863, u'su#': 8862, u'\xedle': 8861, u'\u017eel': 8861, u'inv': 8847, u'kti': 8837, u'\xe9tn': 8828, u'j#t': 8824, u'iav': 8821, u'ajc': 8817, u'ume': 8807, u'se#': 8804, u'til': 8800, u'bon': 8789, u'ovs': 8781, u'tab': 8777, u'\xe9#t': 8775, u'\u010dn\xfa': 8771, u'kuj': 8770, u'\u0161ic': 8769, u'ov\u0161': 8743, u'\u0165#i': 8741, u'ult': 8733, u'upu': 8727, u'chn': 8726, u'\xe1vy': 8721, u'd\u010de': 8712, u'otl': 8711, u'mun': 8678, u'#\u010de': 8678, u't\xe1v': 8667, u'apo': 8662, u'n\xed\u017e': 8656, u'l\xe1s': 8650, u'n\xe9m': 8646, u'etv': 8643, u'per': 8640, u'j#b': 8631, u'\xe9#\u017e': 8616, u'tur': 8608, u'obm': 8603, u'iro': 8585, u'#\u0161p': 8584, u'\u0161pe': 8576, u'\xfafa': 8575, u'pa#': 8575, u'\xed#m': 8563, u'ara': 8560, u'esa': 8556, u'chv': 8549, u'udr': 8537, u'h#e': 8531, u't#p': 8530, u'abi': 8529, u'dze': 8526, u'ozd': 8526, u'tvr': 8519, u'mon': 8519, u'i#\u0161': 8507, u'zmu': 8505, u'bro': 8501, u'ohr': 8500, u'on\u010d': 8495, u'\xe1ko': 8495, u'fun': 8494, u'\xed#d': 8486, u'\u013ek\xfd': 8484, u'yhl': 8477, u'one': 8456, u'dku': 8444, u'\xedma': 8441, u'eha': 8441, u's#r': 8438, u'bie': 8438, u'oc#': 8431, u'\xf3no': 8420, u'u#\u010d': 8420, u'ozi': 8407, u't\xfap': 8405, u'\xfasi': 8403, u'k#b': 8400, u'sab': 8393, u'k#m': 8375, u'rdi': 8375, u'\xfa#r': 8364, u'pon': 8350, u'd#v': 8349, u'obs': 8347, u'at\u0148': 8343, u'#sn': 8341, u'a#\u0161': 8335, u'#as': 8324, u'\u017ead': 8316, u'l\xe1v': 8302, u'bme': 8298, u'#oh': 8289, u'o#l': 8289, u'vyv': 8277, u'edu': 8276, u'z\xe1l': 8271, u'\xfast': 8270, u'v\xe1m': 8267, u'tny': 8264, u'pak': 8264, u'#fu': 8260, u'\u0161in': 8236, u'l#a': 8233, u'eja': 8230, u'kor': 8221, u'lno': 8221, u'\xe1my': 8214, u'zem': 8210, u'are': 8207, u'z#v': 8203, u'poh': 8202, u'rmo': 8198, u'tla': 8196, u'moj': 8185, u'u#u': 8182, u'd\xfaf': 8168, u'\xf4\u017eu': 8167, u'a\u010do': 8159, u'fik': 8147, u'det': 8140, u'ov\xfa': 8135, u'mit': 8126, u'y#j': 8121, u'rhy': 8120, u'trv': 8116, u'yto': 8106, u'sna': 8103, u'hl\xe1': 8092, u'\u0161ia': 8088, u'#bi': 8081, u'zlo': 8070, u'pl\u0148': 8064, u'ut\xed': 8054, u'vi\u0165': 8050, u'nca': 8049, u'\u0161ov': 8038, u'eob': 8030, u't\u0148o': 8030, u'avk': 8024, u'#fa': 8022, u'oz\xed': 8018, u'#fr': 8015, u'r\u017ei': 8013, u'bli': 8013, u'cky': 7991, u'r#p': 7964, u'\xe1#k': 7962, u'nor': 7958, u'\u010dom': 7949, u'tex': 7949, u'kym': 7944, u'e\u0161n': 7941, u'ne\u010d': 7935, u'\xe9#u': 7933, u'\u010del': 7923, u'bom': 7915, u'vou': 7912, u'ory': 7903, u'h#b': 7901, u'd#s': 7891, u'udi': 7875, u'nt\xe1': 7853, u'u#h': 7842, u'\u0161ko': 7833, u'kod': 7833, u'\xfd#z': 7832, u'ace': 7826, u'n#s': 7826, u'edy': 7824, u'uk\xe1': 7823, u'k##': 7817, u'j\xedm': 7807, u'ru\u010d': 7804, u'\u010f\u017ee': 7791, u'e\u010f\u017e': 7791, u'\xfala': 7789, u'k\xe1c': 7770, u'sek': 7759, u'sk\xe1': 7756, u'kul': 7755, u'hoc': 7750, u'#im': 7740, u'\xfd#t': 7735, u'm#i': 7730, u'vyr': 7719, u'koj': 7691, u'cit': 7682, u'm\xe1l': 7676, u's#a': 7667, u'y#u': 7658, u'\u017euj': 7642, u'duc': 7634, u'#ov': 7629, u'nn\xe9': 7620, u'etn': 7616, u'\xe1#b': 7613, u'#tv': 7611, u'zao': 7610, u'fra': 7608, u'mn\xe9': 7608, u'iv\xfd': 7604, u'zit': 7602, u'ban': 7601, u'h\xe9#': 7596, u'lsk': 7594, u'asl': 7587, u'efe': 7584, u'ery': 7578, u'rva': 7577, u'ivi': 7570, u'nii': 7566, u'oma': 7564, u'vov': 7550, u'ant': 7545, u'pop': 7544, u'\xfd#b': 7539, u'br\xe1': 7533, u'u#i': 7527, u'a\u010da': 7527, u'kur': 7518, u'zri': 7514, u'a\u017e#': 7509, u'po\u010f': 7506, u'\xedsk': 7496, u'eta': 7495, u'kan': 7478, u'o\u010fa': 7474, u'bn\xfd': 7469, u'ytn': 7462, u'yse': 7459, u'e#\u0161': 7458, u'as\xed': 7451, u'oka': 7449, u'obu': 7448, u'idl': 7446, u'\u010dak': 7442, u'\xe1rn': 7439, u'aru': 7439, u'l\u0161i': 7438, u't\xedm': 7433, u'etr': 7423, u'use': 7417, u'\xedna': 7400, u'r\xf4z': 7398, u'sok': 7384, u'i\u010do': 7383, u'#oz': 7382, u'zat': 7382, u'yso': 7377, u'##n': 7374, u'ja\u0165': 7372, u'ulo': 7361, u'u#c': 7359, u'l\xe1#': 7358, u'oky': 7340, u'glo': 7339, u'j#i': 7330, u'amu': 7327, u'nne': 7320, u'bre': 7318, u'l#z': 7315, u'let': 7310, u't\xedc': 7307, u'ob\xe1': 7307, u'eby': 7307, u'nti': 7305, u'jal': 7304, u'ok\xe1': 7304, u't\xe1c': 7302, u'fam': 7300, u'n\xe1k': 7299, u'tnu': 7298, u'n#n': 7289, u'#\xfad': 7280, u'di\u0165': 7279, u'z\xedc': 7278, u'#\u0161k': 7265, u'\xf4zn': 7264, u'efo': 7258, u'opo': 7251, u'\xedk#': 7249, u'zoh': 7244, u's\xfal': 7244, u'\u010dn\xe1': 7243, u'\xe1#\u017e': 7241, u'\u010dto': 7235, u'zko': 7230, u'zos': 7228, u'ida': 7228, u'eve': 7211, u'sn\xfd': 7200, u'vst': 7177, u'net': 7176, u'nsp': 7167, u'#zh': 7161, u'#ha': 7151, u'y#\u010d': 7149, u'\u0165#\u010d': 7149, u'rio': 7143, u'dri': 7126, u'\xe1\u017en': 7122, u'#ml': 7117, u'#ry': 7101, u'h#h': 7100, u'nn\xfd': 7096, u'ita': 7093, u'aob': 7089, u'ryb': 7086, u'ob\xed': 7085, u'\u010det': 7084, u'#r\xf4': 7084, u'\xe1t#': 7084, u'lko': 7080, u'zan': 7077, u'rez': 7075, u'tmi': 7075, u'aut': 7073, u'akc': 7073, u'\u013en\xfd': 7071, u'vsk': 7069, u'asa': 7064, u'zku': 7063, u'#gl': 7063, u'smi': 7043, u'zmi': 7041, u'\u017e#v': 7040, u'z\xe1p': 7035, u'rn\xe9': 7031, u'zdi': 7029, u'vky': 7023, u'asi': 7023, u'iba': 7022, u'hlo': 7016, u'lho': 7010, u'obd': 7007, u'\xfd#d': 6998, u'iso': 6993, u'dn\xfa': 6992, u'elk': 6990, u'tn\xed': 6986, u'sy#': 6974, u'li\xf3': 6973, u'i##': 6962, u'k#o': 6961, u'ozu': 6956, u'#ag': 6925, u'#gr': 6922, u'zap': 6918, u'li\u0165': 6911, u'eb#': 6908, u'\xfdsk': 6907, u'pno': 6902, u'koc': 6888, u'zvy': 6880, u'h#i': 6878, u'\xe1#d': 6866, u'bdo': 6861, u'nok': 6860, u'\u0165#b': 6850, u'\u010f#s': 6849, u't#s': 6845, u'ed\xe1': 6828, u'cim': 6827, u'k\xe1\u017e': 6812, u'tin': 6812, u'\xe1v\xe4': 6803, u'ahy': 6799, u'jho': 6794, u's#m': 6793, u'eza': 6780, u'r#v': 6774, u'onf': 6767, u'zu#': 6767, u'kat': 6762, u'esm': 6759, u'mod': 6758, u'ajo': 6750, u'\xfdmt': 6748, u'\xf4r#': 6744, u'ars': 6741, u'kde': 6737, u'za\u0165': 6731, u'\xfa#\xfa': 6725, u'aka': 6724, u'obk': 6722, u'ln\xe9': 6722, u'k\xf4r': 6716, u'ez\xe1': 6708, u'sk\xf4': 6701, u'#lo': 6694, u'spa': 6689, u'#r\xfd': 6683, u'dzo': 6674, u're\u0161': 6673, u'gis': 6664, u'pie': 6664, u'v\xedz': 6663, u'ojn': 6660, u'#am': 6651, u'ze#': 6618, u'ecn': 6612, u'jic': 6611, u'i#l': 6608, u'zh\u013e': 6607, u'vzh': 6605, u'c#a': 6604, u'ikt': 6604, u'dla': 6598, u'mn\xfd': 6595, u'z\xeds': 6593, u'v\xfdh': 6582, u'#\u0165a': 6576, u'ur#': 6573, u'fer': 6573, u'h#\xfa': 6571, u'um#': 6570, u'ive': 6563, u'avd': 6562, u'ron': 6559, u'age': 6539, u'siu': 6538, u's#d': 6534, u'met': 6525, u'mni': 6523, u'dou': 6510, u'al\xfd': 6510, u'\xe1#j': 6493, u'vrd': 6482, u'us#': 6482, u'ha#': 6475, u'\u010duj': 6475, u'\u010dtu': 6472, u'\xedsl': 6453, u'ipo': 6450, u'azy': 6448, u'\xed#r': 6432, u'a\u010di': 6430, u'\u0161eo': 6427, u'eal': 6427, u'#o\u010d': 6422, u'is\u0165': 6421, u'ior': 6418, u'log': 6416, u'ck\xfa': 6413, u'kt#': 6407, u'##r': 6405, u'ipr': 6404, u'd#b': 6403, u'egu': 6401, u'oz\u0161': 6396, u'azu': 6383, u'iaz': 6383, u'\xe9#h': 6383, u'etc': 6381, u'tci': 6380, u'u\u017eb': 6380, u'toj': 6378, u'a\u017eu': 6375, u'ike': 6373, u'\u013eve': 6365, u'o\u013ev': 6365, u'\xe1#o': 6351, u'amn': 6348, u'gio': 6339, u'hud': 6336, u'hom': 6329, u'kos': 6322, u'm#\u0161': 6316, u'#z\xed': 6316, u'odk': 6310, u'yva': 6282, u'a#\u013e': 6273, u'bsa': 6268, u'\u0161ta': 6266, u'umu': 6265, u'#\xfav': 6241, u'zid': 6239, u'\u0161\xedr': 6236, u'\xedli': 6228, u'\xfary': 6224, u'#em': 6224, u'e\u013es': 6221, u'zac': 6219, u'\xfd#k': 6216, u'\xe1ta': 6214, u'#kd': 6213, u'voz': 6208, u'uvy': 6201, u'\xfa#b': 6199, u'ln\xfd': 6198, u'\xe1se': 6195, u'ord': 6190, u'ego': 6181, u'\u0161il': 6180, u'k#k': 6174, u'ben': 6173, u'ne\u017e': 6172, u'\u010d\xedn': 6170, u'l\xe9#': 6164, u'en\xfa': 6164, u'neo': 6162, u'l#\u017e': 6157, u'z#d': 6156, u'rn\xfd': 6154, u'oh\xfd': 6153, u'e#\u013e': 6152, u'v#i': 6152, u'byv': 6151, u'\xfatr': 6146, u'on#': 6143, u'jno': 6143, u'##k': 6143, u'#ib': 6142, u'dch': 6134, u'v#c': 6132, u'uka': 6117, u'v\xfdv': 6112, u'\xedzy': 6102, u'rvo': 6100, u'oza': 6099, u'doc': 6099, u'ajn': 6099, u'ogi': 6097, u'nao': 6096, u'\xe1si': 6093, u'r#a': 6081, u'ul\xe1': 6079, u'\xfdvo': 6071, u'tve': 6064, u'zke': 6052, u'mot': 6050, u'tn\xe1': 6036, u'j#h': 6034, u'u\u0161n': 6034, u'\u0161a#': 6033, u'h#c': 6012, u'lid': 6011, u'ann': 6003, u'it#': 5998, u'ata': 5996, u'kl\xed': 5995, u'\xed#e': 5993, u'ck\xe1': 5993, u'y#i': 5989, u'ri\u010d': 5985, u'zai': 5980, u'\u017eat': 5980, u'\xed#\u017e': 5969, u'c#p': 5948, u'esn': 5947, u'p\u0161e': 5944, u'eln': 5938, u'cej': 5937, u'nko': 5935, u'v\xe1s': 5934, u'ro#': 5931, u'\u010d\xed#': 5928, u'al\xed': 5917, u'ung': 5913, u'ase': 5910, u'zva': 5908, u'\xfava': 5905, u'l\xfd#': 5904, u'ica': 5899, u'z#k': 5890, u'c#n': 5888, u'nat': 5887, u'adz': 5871, u'z\xfdv': 5863, u'lyn': 5857, u'#bl': 5854, u'ybn': 5854, u'h#f': 5849, u'\u0161li': 5846, u'\xfd\u0161e': 5845, u't#n': 5844, u'n#z': 5839, u'rid': 5839, u'asp': 5832, u'o\u0161t': 5820, u'dam': 5818, u'cou': 5816, u'esp': 5816, u'\xe9#i': 5813, u'dny': 5811, u'na\u010f': 5799, u'i#\xfa': 5790, u'ozs': 5785, u'bar': 5785, u'mob': 5776, u'zny': 5774, u'#zi': 5770, u'ngo': 5767, u'gul': 5763, u'duk': 5759, u'upn': 5753, u'bav': 5753, u'ie\u010d': 5751, u'lt\xfa': 5751, u'#ge': 5750, u'dl\xe1': 5749, u'\xfd#\u017e': 5748, u'#zj': 5744, u'rop': 5740, u'\xfd#m': 5735, u'en\u0161': 5732, u'\xe9#j': 5726, u'va\u0161': 5724, u'l#o': 5724, u'arm': 5722, u'li\u0161': 5721, u'skr': 5713, u'\u013eah': 5710, u'#el': 5702, u'ji#': 5692, u's#s': 5677, u'vyn': 5675, u'z#h': 5674, u'ad\xe1': 5673, u'\xe1#m': 5668, u'nol': 5658, u'v\xe1c': 5653, u'fic': 5649, u'hle': 5639, u'yri': 5639, u'pev': 5632, u'av\xed': 5631, u'nt\xfa': 5629, u'tis': 5629, u'in\xe9': 5627, u'rik': 5620, u'rof': 5616, u'h#j': 5612, u'\u013esk': 5602, u'aj\u0161': 5599, u'\u010dl\xe1': 5597, u'iv\xe9': 5596, u'k\xe1v': 5596, u'h#u': 5594, u'kis': 5593, u'#zb': 5589, u'ypr': 5583, u'\u017e#n': 5583, u'dn\xe1': 5582, u'egr': 5582, u'dot': 5580, u'c\u0165#': 5580, u'\xe1nk': 5575, u'ota': 5565, u'mar': 5557, u'iec': 5544, u'zno': 5544, u'a\u017eo': 5544, u'a#g': 5537, u'lil': 5536, u'\xe1#t': 5535, u'odb': 5532, u'm\xf4c': 5527, u'p\xe4\u0165': 5521, u'd#z': 5521, u'hno': 5510, u'#ek': 5510, u're\u017e': 5509, u'vy\u017e': 5508, u'\xfd#o': 5500, u'om\xf4': 5496, u'\u0165#\xfa': 5485, u'enz': 5485, u'\u0165#u': 5483, u'\xedmy': 5482, u'\xf4c\u0165': 5477, u'd#k': 5470, u'\xe1tu': 5468, u'lu\u0161': 5468, u'aki': 5454, u'm\xedn': 5451, u'na\u017e': 5448, u'ort': 5447, u'#\xfaz': 5446, u'esu': 5446, u'\u013e\xfa\u010d': 5436, u'k\u013e\xfa': 5436, u'#a\u017e': 5436, u'ceh': 5431, u'\xe9#\u010d': 5430, u'abs': 5426, u'tvi': 5424, u'e\u013e#': 5424, u'v\xed#': 5423, u'reh': 5419, u'du\u0161': 5402, u'\xe4\u0165#': 5400, u'\u0165#l': 5389, u'\xe9#\xfa': 5385, u'##d': 5385, u'ro\u0161': 5382, u'\xe1da': 5381, u'emb': 5379, u'oty': 5364, u'\u010dn\xed': 5359, u'obj': 5350, u'dol': 5335, u'zum': 5332, u'\xed\u017ee': 5325, u'\xfa\u010do': 5320, u'ne\u0161': 5300, u'dbo': 5297, u'z#o': 5296, u'kty': 5294, u'rmy': 5293, u'jn\xfd': 5289, u'pus': 5280, u'jne': 5278, u'\xe1tm': 5275, u'#af': 5273, u'dil': 5272, u'mo#': 5264, u'hv\xe1': 5259, u'l\xedk': 5246, u'nah': 5241, u'v\xe1l': 5238, u'om\xe1': 5234, u'ktr': 5226, u'##o': 5214, u'a\u0148o': 5206, u'm\xf4j': 5185, u'h\xfdc': 5185, u'om\xed': 5183, u'icu': 5181, u'\xe1nu': 5178, u'esc': 5172, u'\u0165#h': 5157, u'\u017edy': 5156, u'\u017eni': 5153, u'tni': 5147, u'cuj': 5146, u'v\xedn': 5144, u'y##': 5140, u's#e': 5140, u'koo': 5139, u'\xfd#j': 5124, u'kt\xfa': 5123, u'sud': 5121, u'dka': 5115, u'#k\u013e': 5113, u'ohy': 5106, u'i#f': 5105, u'y\u0161e': 5099, u'\xe1ne': 5099, u'is#': 5097, u'a\u017ek': 5095, u'epu': 5093, u'vo\u010d': 5090, u'zra': 5088, u'dva': 5086, u'ndu': 5084, u'd#a': 5084, u'\u017e#s': 5081, u'da\u0148': 5059, u'ut\xe9': 5058, u'k\xfas': 5057, u'\u0161n\xe9': 5057, u'\xfd\u0161i': 5055, u'y\u0161\u0161': 5047, u'u\u010di': 5046, u'yz\xfd': 5044, u'oor': 5029, u'jn\xe9': 5020, u'v#u': 5020, u'tiv': 5020, u'tol': 5018, u'k#r': 5013, u'okl': 5013, u'r#n': 5012, u'ytu': 5011, u'k#j': 5004, u'j#c': 4997, u'dzu': 4996, u'zik': 4978, u'eso': 4976, u'v\u010fa': 4971, u'p#k': 4971, u'no\u017e': 4967, u'y\u017ea': 4967, u'\xe1ca': 4967, u'\u0161ou': 4966, u'seb': 4965, u'v\u017ed': 4957, u'\u0161ir': 4953, u'fek': 4948, u't\xfa#': 4946, u'\u017est': 4943, u'\xf4ve': 4939, u'\u017ed\xfd': 4938, u'ni\u017e': 4938, u'oze': 4935, u'rou': 4928, u'z#r': 4922, u'ard': 4911, u'jsk': 4909, u'ol\xf3': 4909, u'rma': 4904, u'adm': 4901, u'an\xfa': 4898, u'\xe1sl': 4894, u'\u013ene': 4886, u'd\xfd#': 4875, u'em\xe1': 4875, u'isn': 4873, u'etl': 4871, u'a\u017ei': 4868, u'\xe1#r': 4864, u'ais': 4858, u'zis': 4857, u'##z': 4855, u'stl': 4852, u'op\xe4': 4851, u'sni': 4849, u'zje': 4845, u'o\u017es': 4844, u'd#t': 4836, u'iar': 4828, u'\xfa#e': 4827, u'ezi': 4821, u'spl': 4813, u'lnu': 4810, u'h##': 4809, u'm#\xfa': 4804, u'uvi': 4800, u'onn': 4790, u'al\xf3': 4789, u'kap': 4788, u'b\xed#': 4781, u'nmi': 4778, u'tim': 4778, u'\xed\u017ei': 4778, u'nta': 4774, u'ut\xfd': 4771, u'\xe1lo': 4768, u'spi': 4761, u'dec': 4760, u'\xe1ny': 4760, u'l#t': 4759, u'#t\xe9': 4759, u'lem': 4751, u'opi': 4751, u'izi': 4747, u'z\u0161\xed': 4741, u'#ar': 4738, u'bou': 4737, u'dej': 4735, u'uho': 4732, u'der': 4728, u'#co': 4727, u'#d\xfa': 4727, u'mic': 4725, u'a#\u010f': 4723, u'mos': 4723, u'\xfama': 4722, u'#ze': 4720, u'\xf3gi': 4716, u'k\xfam': 4710, u'zvi': 4710, u'ple': 4709, u'\xfa\u010de': 4706, u'gu#': 4703, u'mbr': 4695, u'l#k': 4694, u'\u017ede': 4687, u'\u0161om': 4686, u'osn': 4684, u'\xe9ri': 4683, u'\xfadr': 4682, u'\xfdho': 4681, u'pit': 4670, u'b\xe1l': 4667, u'tku': 4661, u'end': 4660, u'vah': 4650, u'#pi': 4645, u'u\xe1l': 4644, u'che': 4639, u'zsk': 4636, u'n\u010di': 4627, u'\xfape': 4622, u'lex': 4606, u'uko': 4603, u'o\u010d\xed': 4594, u'zhr': 4583, u'i\xe1c': 4580, u'\xedto': 4575, u'im\xe1': 4570, u't\xe9r': 4568, u'ama': 4568, u'zmy': 4568, u'hav': 4559, u'ror': 4558, u'rv\xe1': 4556, u'z#m': 4546, u'#\u0161i': 4545, u't#k': 4543, u'mpl': 4542, u'\xedns': 4538, u'ing': 4531, u'\u0161im': 4527, u'aoz': 4521, u'uva': 4514, u'zok': 4512, u'\xedrs': 4510, u'hot': 4510, u'ajk': 4507, u'\u0165#c': 4502, u'yni': 4501, u'afr': 4499, u'l\xfdc': 4499, u'#ef': 4496, u'sk#': 4495, u'rka': 4494, u'acu': 4487, u'ak\xfa': 4472, u'sli': 4469, u'\xfdra': 4469, u'\xe1dy': 4466, u'k#e': 4462, u'bri': 4460, u'sii': 4458, u'#\xedr': 4441, u'fri': 4434, u'pal': 4434, u'pe\u0161': 4434, u'av\xe1': 4433, u'yro': 4430, u'rv\xe9': 4430, u'zer': 4424, u'c\xed#': 4420, u'yho': 4415, u'odh': 4408, u'\u010der': 4405, u'r\xe9h': 4405, u'd#r': 4404, u'\u0161u#': 4398, u'des': 4390, u'evn': 4387, u'gr\xe1': 4387, u'san': 4381, u'\xfacu': 4380, u'a\u0161u': 4380, u'dre': 4377, u'api': 4377, u'isy': 4375, u'ked': 4370, u'\xe9#c': 4367, u'\u0161uj': 4365, u'var': 4347, u'edc': 4337, u'evi': 4330, u'bko': 4329, u'\xedja': 4328, u'lim': 4326, u'j#\u010d': 4320, u'ula': 4315, u'aze': 4306, u't\xfd\u017e': 4305, u'\xfd\u017ed': 4305, u'haj': 4303, u'nc\xfa': 4300, u'te\u0161': 4295, u'la\u010d': 4286, u'oln': 4284, u'h#\u013e': 4284, u'ajs': 4283, u'#v\u017e': 4282, u'#ad': 4278, u'uh\xe9': 4278, u'\xedzk': 4273, u'erz': 4266, u'ibe': 4266, u'pil': 4261, u'#v\xf4': 4261, u'\xf3ri': 4258, u'\u013eu\u010f': 4252, u'ndi': 4244, u'arc': 4242, u'mla': 4241, u'##j': 4239, u'm##': 4238, u'imu': 4237, u'byt': 4230, u'\xfa#h': 4229, u'\u010d\xedt': 4229, u'\xf3ny': 4228, u'rib': 4223, u'#v\u010f': 4222, u'\xe1tk': 4221, u'kal': 4216, u'u\u0161i': 4215, u'\u017e#d': 4214, u'jco': 4213, u'eku': 4210, u'#tl': 4207, u'noc': 4201, u'eg\xe1': 4199, u'\xe1pa': 4197, u'n#o': 4197, u'\u010d\xedm': 4193, u'n#m': 4193, u'oly': 4189, u'els': 4187, u'jmo': 4182, u'tih': 4182, u'm#f': 4181, u'dna': 4180, u'\xedpr': 4180, u'r\u017en': 4175, u'\xe1rk': 4175, u'azo': 4168, u'nac': 4159, u'tas': 4157, u'vho': 4148, u'hne': 4146, u'\xedza': 4145, u'abr': 4140, u'zru': 4139, u'\xe1te': 4137, u'pel': 4135, u'amy': 4132, u'n#b': 4131, u'\xe1li': 4128, u'\xedre': 4126, u'b#a': 4125, u'an#': 4123, u'yv#': 4119, u'lah': 4116, u'j#j': 4115, u'\xedvy': 4114, u'ok\xfa': 4109, u'e\u0161p': 4105, u'\xfapi': 4102, u'\u017eij': 4100, u'bia': 4099, u'bso': 4092, u'ubl': 4090, u'ope': 4090, u'voc': 4089, u'oha': 4083, u'kli': 4078, u'riv': 4078, u'#la': 4066, u'dme': 4065, u'vko': 4064, u'vzn': 4063, u'is\xed': 4063, u'ah#': 4062, u'mam': 4061, u'\u0161n\xfd': 4060, u'##h': 4053, u'\u013ek\xe9': 4053, u'\xe1\u0161#': 4049, u'zak': 4046, u'rv\xfd': 4043, u'c\xfaz': 4042, u'r\xfa\u010d': 4041, u'u#f': 4039, u'y#h': 4035, u'\xfa#j': 4035, u'zau': 4033, u'uhe': 4033, u'l#d': 4026, u'\xe1l#': 4024, u'v#l': 4016, u'onz': 4012, u'er\xed': 4008, u'b\u010di': 4005, u'\xfa\u017ei': 3996, u'amb': 3995, u'p\xf4v': 3991, u'\xfa#i': 3989, u'bov': 3986, u'osa': 3982, u'\u017e#z': 3982, u'asu': 3980, u'mpr': 3973, u'auj': 3972, u'\xfa#\u017e': 3970, u'ybo': 3968, u'y\u0161l': 3966, u'von': 3965, u'hej': 3961, u'n\xe1j': 3957, u'a\u0161o': 3956, u'zvl': 3953, u'a\u0161a': 3951, u'bje': 3947, u'osv': 3943, u'kmi': 3943, u'\u010dal': 3943, u'js\u0165': 3942, u'de\u0148': 3941, u'\xedsa': 3938, u'bet': 3936, u'u#l': 3929, u'al\xe9': 3926, u'lib': 3926, u'vz\xe1': 3921, u'h#l': 3921, u'z\xe1j': 3919, u'#vs': 3918, u'm#l': 3916, u'ece': 3913, u'ajl': 3910, u'vra': 3906, u'\xe1\u0161h': 3898, u'\u0161ho': 3898, u'r\xedj': 3897, u'i#\u013e': 3885, u'uv\xe1': 3884, u'#es': 3884, u'jde': 3882, u'keb': 3878, u'\xfa#\u010d': 3873, u'rum': 3863, u'r#s': 3863, u'doz': 3861, u'kac': 3854, u'v#h': 3850, u'boh': 3840, u'ob#': 3839, u'\xed#j': 3839, u'\xf4d#': 3838, u'\xedka': 3838, u'v##': 3837, u'\xfazs': 3836, u'k#c': 3829, u'div': 3828, u'#ut': 3827, u'\u017eim': 3826, u's#\u010d': 3821, u'def': 3821, u'nzu': 3817, u'eak': 3817, u'dat': 3817, u'\xfaze': 3815, u'osu': 3807, u'v\xedj': 3802, u'\xfa#u': 3801, u'uza': 3795, u'#du': 3794, u'k#\u017e': 3790, u'ake': 3790, u'#hi': 3789, u'\u013ete': 3787, u'\xe1jo': 3785, u'rmi': 3785, u'taj': 3784, u'nfl': 3782, u'l#m': 3781, u'giu': 3778, u'zd\xe1': 3774, u'jak': 3769, u'gia': 3769, u'd#o': 3769, u'o\u013et': 3768, u'uli': 3768, u'\xfadi': 3768, u'eka': 3765, u'\xfaco': 3760, u'ign': 3758, u'#z\xfa': 3744, u'l\xed#': 3744, u'\xe9#e': 3743, u'a\u017en': 3739, u'e#\u010f': 3737, u'dit': 3734, u'nuj': 3734, u'tif': 3734, u'\xe4#v': 3727, u'o\u010du': 3727, u'r\xedt': 3723, u'ely': 3719, u'##e': 3719, u'\u017en\xfd': 3713, u'\xe1ti': 3712, u'\xe1ha': 3707, u'l\xe9h': 3707, u'jv\xe4': 3704, u'ndo': 3703, u'\u010d\u0161\xed': 3700, u'\xe9#f': 3694, u'jan': 3693, u'ka\u0165': 3690, u't\xe1r': 3689, u'tle': 3689, u'o#\u013e': 3685, u'\u017edo': 3684, u'vyb': 3683, u'fak': 3681, u'\xe1dn': 3678, u'cam': 3675, u'on\u0161': 3674, u'ega': 3672, u'k#i': 3665, u'\xedzo': 3665, u'i\u0161#': 3663, u'li\u017e': 3663, u'ek\xe1': 3662, u'nkc': 3660, u'e#g': 3657, u'\u017eby': 3651, u'ed\xfa': 3646, u'\u010die': 3642, u'\xf3pu': 3640, u'si\xed': 3639, u'o\u017e\u0148': 3637, u'v#\xfa': 3634, u'\xed#u': 3632, u'odz': 3629, u'ima': 3628, u'a\u017ee': 3627, u'\u0161lo': 3622, u'lna': 3621, u'pac': 3620, u'\xf4jh': 3619, u'jky': 3615, u'br\xe9': 3613, u'd\xe1t': 3613, u'mul': 3612, u'ch\xfd': 3611, u'ur\xe1': 3608, u'ind': 3607, u'ij\xed': 3606, u'#hu': 3606, u'\xe1ri': 3606, u'vym': 3604, u'ezn': 3599, u'oh\xe9': 3598, u'jle': 3594, u'\u010dni': 3592, u'##b': 3592, u'z\xfa\u010d': 3591, u'ors': 3590, u'pub': 3590, u'##t': 3589, u'vnu': 3587, u'efi': 3587, u'at#': 3583, u'di\u010d': 3576, u'\u0161i#': 3572, u'sus': 3572, u'm\u0148a': 3569, u'jaz': 3569, u'ri\xe1': 3567, u'k\xe9m': 3564, u's#u': 3564, u'rnu': 3561, u'y#c': 3561, u'enn': 3561, u'\xfadn': 3557, u'\xfa\u0165a': 3555, u's\xfa\u0165': 3555, u'beh': 3553, u'rtu': 3550, u'#m\u0148': 3550, u'lak': 3549, u'gr\xe9': 3549, u'#ir': 3547, u'my\u0161': 3544, u'h\xf4d': 3543, u'y#\u017e': 3542, u'rer': 3532, u'alt': 3525, u'ra\u0161': 3524, u'mut': 3519, u'ajt': 3517, u'aje': 3515, u'\u017e\xed#': 3509, u'adk': 3506, u'ton': 3504, u'ukr': 3495, u'tvu': 3494, u'd\xf4k': 3493, u'p\xe4t': 3487, u'emn': 3482, u'uma': 3476, u'cez': 3473, u'\xe4zk': 3472, u'sio': 3472, u'sl\xfa': 3465, u'#i#': 3464, u'\xedmi': 3461, u'\u0165am': 3459, u'ihn': 3459, u'bl\xed': 3456, u'k\u010dn': 3455, u'v\xfdn': 3455, u'u\u010dn': 3453, u'd#d': 3453, u'upc': 3451, u's#j': 3449, u'nel': 3449, u'nfe': 3448, u'r\xe9c': 3447, u'\u010dis': 3446, u'\xedno': 3444, u'\xe1pi': 3442, u'ago': 3440, u'z#s': 3439, u'\xfdzv': 3438, u'i\u010d#': 3435, u'rna': 3430, u'v\xe1h': 3429, u'ac\xed': 3429, u'z\xf3n': 3422, u'k#u': 3421, u'n#d': 3419, u'nde': 3413, u'otu': 3413, u'lic': 3407, u'n#t': 3405, u'ypl': 3402, u'ysv': 3402, u'e\u0148o': 3400, u'o\u017ea': 3398, u'ybu': 3398, u'\xfapr': 3394, u'd#e': 3393, u'\xe9ck': 3387, u'ajd': 3386, u'li\u010d': 3384, u'aza': 3382, u's#b': 3382, u'ho\u017e': 3379, u'\u017e\u0148u': 3376, u'nd#': 3374, u'hrn': 3373, u'\xfakr': 3370, u'udz': 3370, u'zie': 3368, u'ta\u010d': 3367, u'#\xfat': 3364, u'o\u013eb': 3364, u'ble': 3363, u's\xfak': 3359, u'\xedc#': 3354, u'msk': 3349, u'umn': 3348, u'ol\xfa': 3340, u'or\u0161': 3332, u'#vh': 3332, u'\u013est': 3330, u'vyd': 3330, u'lab': 3323, u'rel': 3323, u'eag': 3322, u't\xf3r': 3321, u'bio': 3321, u's#i': 3318, u'ins': 3315, u'\xedsn': 3309, u't\xedk': 3307, u'\u0155\u0148a': 3304, u'h\u0155\u0148': 3303, u'vok': 3299, u'\xfa#c': 3296, u'fli': 3296, u'\xedzi': 3295, u'dkl': 3295, u'gal': 3293, u'ah\u0155': 3288, u'ev\xe1': 3285, u'tes': 3281, u'e\u0165a': 3278, u'mbi': 3277, u'd\u0148o': 3277, u'l\xfa\u017e': 3277, u'kar': 3276, u'upe': 3276, u'on\xfa': 3275, u'\xe1#e': 3275, u'el\xe9': 3257, u'ma\u010f': 3256, u'\u010far': 3256, u'ol\xe1': 3254, u'os\xfa': 3251, u'\u017ei#': 3251, u'v#\u0161': 3248, u'od\xed': 3247, u't#z': 3245, u'z\xed#': 3237, u'\xe1\u0161a': 3235, u'hal': 3231, u'erm': 3229, u'zve': 3227, u'fr#': 3223, u'igr': 3218, u'rn\xed': 3214, u'kle': 3212, u't#m': 3211, u'ofi': 3210, u'\xe1de': 3205, u'eje': 3203, u'hni': 3198, u'in\xfa': 3197, u'\u017eko': 3197, u'd\xe1c': 3196, u'j#f': 3193, u'la\u0165': 3193, u'h\xfdb': 3190, u'rti': 3186, u'av\u0161': 3185, u'pn\xe9': 3181, u'zda': 3179, u'i\u010di': 3179, u'mig': 3173, u'erv': 3172, u'\xe4#p': 3166, u'es\xfa': 3164, u'kme': 3163, u'sub': 3162, u'zia': 3160, u'eus': 3151, u've\u010f': 3151, u'\xfarn': 3148, u'\xed#c': 3144, u'\xedte': 3144, u'l\xe1\u0161': 3142, u'i\xe1m': 3138, u'v\xfdk': 3138, u'nda': 3137, u'akm': 3132, u'he#': 3131, u'\xe1bo': 3130, u'kte': 3123, u'\xf3ne': 3119, u'\u0161\u0165a': 3116, u'inc': 3114, u'ul#': 3111, u'bja': 3109, u'r#b': 3107, u'ezo': 3105, u'ynu': 3101, u'vir': 3099, u'skl': 3099, u'\u017en\xed': 3099, u'l#r': 3097, u'ute': 3097, u'#ga': 3095, u'\u010des': 3093, u'lod': 3092, u'vic': 3089, u'#sr': 3086, u'ect': 3085, u'sn#': 3084, u'vy\u010d': 3079, u'juj': 3078, u'\xed#\u010d': 3077, u'e\u017en': 3076, u'u\u017en': 3075, u'rmu': 3074, u'vte': 3072, u'\xf4b#': 3067, u'zbr': 3067, u'd\xedm': 3058, u'r\xe9m': 3057, u'yvo': 3057, u'hou': 3053, u'ir\xe1': 3052, u'bru': 3052, u'y#f': 3049, u'out': 3043, u'pl#': 3037, u'#\u013ea': 3030, u'opu': 3030, u'#n\xed': 3028, u'z\xe1m': 3028, u'onm': 3027, u'rzi': 3026, u'oj\xed': 3025, u'\xfdni': 3023, u'ept': 3022, u'ouk': 3019, u'hn\xfa': 3018, u'\xfd#\u010d': 3013, u'udn': 3012, u'puj': 3007, u'n#j': 3004, u'y#\xfa': 3000, u'p\u0161\xed': 2997, u'n\xfak': 2996, u'lec': 2995, u's\xedc': 2994, u'u#\u0161': 2992, u'ru\u017e': 2985, u'az\u0148': 2982, u'ang': 2978, u'\xe1js': 2977, u'tv\xed': 2974, u'evo': 2971, u'#az': 2969, u'sa\u0165': 2969, u'pob': 2961, u'vyl': 2961, u'\u0161ne': 2959, u'tem': 2956, u'niv': 2954, u'st\xfd': 2951, u'a\u010du': 2949, u'nvi': 2948, u'noz': 2947, u'lok': 2947, u'nme': 2943, u'env': 2942, u'e\u0161\xed': 2934, u'a\u010dl': 2932, u'apl': 2930, u'ev\xed': 2926, u'br\xfd': 2920, u'z#a': 2915, u't#o': 2914, u'\xf3zn': 2908, u'i\u017e\u0161': 2908, u'ngu': 2904, u'\xe4zn': 2903, u'\xe1mo': 2900, u'r\u010de': 2900, u'\xe1ka': 2900, u'plo': 2896, u'ura': 2893, u'\u0165an': 2885, u'cer': 2883, u'aud': 2883, u'ad\xfd': 2882, u'koh': 2882, u'hum': 2880, u'jmy': 2879, u'yna': 2877, u'rto': 2873, u'uda': 2873, u'ypo': 2866, u'usp': 2858, u'i#g': 2855, u'\xe1ja': 2853, u'nee': 2852, u'nio': 2850, u'\u017e#b': 2847, u'el\xfd': 2843, u'usa': 2843, u'#iz': 2839, u'ul\xfd': 2836, u'zsa': 2831, u'\u013euj': 2829, u'his': 2826, u'\xe1ns': 2824, u'pia': 2822, u'lin': 2820, u'ak\u017e': 2816, u'az\xed': 2814, u'id\xed': 2813, u'atv': 2805, u'a\u0161t': 2805, u'nce': 2804, u'ha\u0165': 2803, u'muj': 2802, u'r#d': 2800, u'k\u017ee': 2798, u'c#s': 2797, u'a\u017e\xed': 2795, u'trp': 2793, u'dep': 2789, u'pi\u0165': 2788, u'#fl': 2786, u'tiz': 2785, u'naz': 2778, u'jd\xf4': 2770, u'ibl': 2769, u'av#': 2769, u'nfr': 2765, u'zyk': 2764, u'#fe': 2760, u'\u010da\u0165': 2754, u'u\u010de': 2739, u'\u0165##': 2737, u'jim': 2733, u'ozb': 2733, u'sun': 2733, u'b\xfdv': 2731, u'zv\xed': 2731, u'd#m': 2730, u'm\xe1h': 2728, u'axi': 2728, u'jca': 2728, u'gi\xed': 2726, u'oco': 2726, u't#b': 2723, u'uga': 2723, u'#j\xfa': 2717, u'z#e': 2714, u'ovz': 2713, u'lal': 2713, u'mco': 2712, u'z\xedv': 2710, u'ark': 2710, u'\xe1\u017ea': 2708, u'odc': 2702, u'knu': 2694, u'\xe9to': 2692, u'\xedck': 2689, u'cku': 2688, u'o\u0161l': 2682, u'n\u010de': 2680, u'c#v': 2678, u'edm': 2677, u'#et': 2677, u'#t\xed': 2677, u'r\xf3m': 2676, u'zel': 2676, u'i\xf3z': 2675, u'unk': 2674, u'p\u013a\u0148': 2673, u'\u013a\u0148a': 2672, u't#j': 2670, u'\xfase': 2666, u'ev#': 2663, u'lor': 2661, u'n\u0161i': 2661, u'rd#': 2660, u'\xe9di': 2659, u'o\u013es': 2653, u'arr': 2650, u'r#k': 2649, u's\xfas': 2649, u'nge': 2645, u'mik': 2645, u'\u017e#m': 2645, u'uns': 2644, u'#v\u010d': 2643, u'i\u017eo': 2642, u'rny': 2640, u'l\xe1m': 2639, u'\xedt#': 2638, u'ad\xed': 2636, u'de\u013e': 2636, u's#z': 2633, u'jtr': 2632, u'\u017e##': 2632, u'sev': 2631, u'kem': 2630, u'uj\xed': 2625, u'\xe1so': 2625, u'd\u0148a': 2623, u'i\u0161l': 2617, u'\u0165#f': 2615, u'cep': 2614, u'at\xfd': 2613, u'atu': 2611, u'dzk': 2608, u'lon': 2608, u'in#': 2604, u'id\xe1': 2603, u'abl': 2602, u'a\u010d\xed': 2600, u'iv\xe1': 2599, u'\xfdta': 2599, u'do\u0161': 2596, u'v\xfdd': 2595, u'mu\u017e': 2594, u'#un': 2591, u'kyn': 2589, u'\u0161\u0161i': 2588, u'\xe9#l': 2584, u'\u017ed\xe9': 2578, u'v#g': 2575, u'l\xe1t': 2574, u'\u017e#t': 2573, u'esl': 2571, u'c#z': 2570, u'\xedv#': 2568, u'mia': 2567, u'#om': 2565, u'r#z': 2561, u'\u0161pa': 2560, u'p\xfdt': 2560, u'\xed#i': 2556, u'os\xf4': 2554, u's\xf4b': 2554, u'c\xfa#': 2549, u'mli': 2549, u'be\u017e': 2549, u'hc\xfa': 2547, u'mpe': 2546, u'pci': 2545, u'j#u': 2544, u'iov': 2544, u'apa': 2542, u'co#': 2541, u'guj': 2540, u'ojh': 2536, u'ski': 2536, u'\u0148#p': 2535, u'rhn': 2535, u'umi': 2531, u'\xe1ts': 2530, u'#at': 2530, u'cka': 2530, u'ar\xe1': 2530, u'\xfdda': 2529, u'\xedsp': 2526, u'ezh': 2524, u'eex': 2523, u'dik': 2521, u'utu': 2519, u'nz\xed': 2516, u'xte': 2516, u'ikd': 2516, u'v\xf4b': 2515, u'\xf4be': 2515, u'##\u010d': 2514, u'n\xe1b': 2513, u'y\u0148a': 2512, u'd\xfdc': 2508, u'jmu': 2505, u'r\xe1\u017e': 2504, u'r#m': 2503, u'yv\xe1': 2498, u'\xfdme': 2497, u'\xe1\u017ei': 2494, u'hat': 2493, u'zom': 2493, u'ovp': 2493, u'ki#': 2492, u'z#j': 2492, u'ky\u0148': 2490, u'#l\xe1': 2489, u'amp': 2485, u'zbu': 2481, u'ds\xfa': 2475, u'\xe1\u017ek': 2474, u'\xfa#l': 2473, u'yne': 2473, u'omb': 2472, u'bic': 2472, u'y\u0161o': 2468, u'#l\xed': 2466, u'z\u0148u': 2466, u'a\u010de': 2465, u'ln\xfa': 2465, u'k\xe9t': 2462, u'n#r': 2460, u'opy': 2460, u'\u017e#o': 2459, u'g\xe1l': 2458, u'zad': 2457, u'sal': 2454, u'pet': 2454, u'\xedro': 2451, u'ot#': 2449, u'ert': 2449, u'\xfade': 2448, u'c#k': 2446, u'h#\u017e': 2445, u'y#\u0161': 2443, u'v#\u017e': 2440, u'mrt': 2439, u's#h': 2438, u'bo\u017e': 2437, u'eng': 2434, u'#r\xf3': 2433, u'sty': 2432, u'dha': 2430, u'y#l': 2430, u'dpa': 2428, u'\xedjm': 2427, u'yme': 2425, u'p\xe1c': 2425, u'oz\xf3': 2424, u'#ca': 2423, u'#b\xfd': 2422, u'alk': 2421, u'\u017eal': 2419, u'rn\xfa': 2416, u'\u017ean': 2413, u'sn\xe1': 2408, u'rae': 2407, u'imn': 2406, u'r\xed\u010d': 2403, u'\xed#h': 2402, u'\xe1ru': 2400, u'erc': 2399, u'\xfatn': 2398, u'ael': 2396, u'pul': 2396, u'u\u010fo': 2396, u'p\xedn': 2395, u'vde': 2391, u'cne': 2390, u'#on': 2386, u'#uh': 2386, u'exn': 2383, u'rro': 2381, u'edt': 2377, u'kdy': 2377, u'ovy': 2370, u'nd\xe1': 2369, u'\xe4to': 2365, u'u\xe1r': 2362, u'\xedzu': 2360, u'r\xedr': 2360, u'#n\xfa': 2359, u'ji\u0165': 2356, u'ink': 2356, u'z#b': 2356, u'pl\xfd': 2349, u'\xed\u010di': 2348, u'ekl': 2345, u'typ': 2343, u'l\xfdv': 2343, u'bco': 2341, u'civ': 2340, u'k#\u010d': 2340, u'pn\xfd': 2340, u'\xf4ch': 2339, u'##f': 2337, u'dt\xfd': 2336, u'\u017ea\u0165': 2332, u'\xfaru': 2332, u'#hn': 2331, u'abu': 2331, u'\xe1mi': 2326, u'ob\xe9': 2325, u'ehu': 2325, u'\xfd\u0161k': 2323, u'\xe4zo': 2320, u'\xedvu': 2319, u'r\xedn': 2319, u'm\u017ei': 2319, u'am\u017e': 2317, u'u\u0161o': 2317, u'raf': 2317, u'k\xed#': 2316, u'ci\xf3': 2311, u'aja': 2311, u'\xe1lu': 2307, u'k#h': 2305, u'rag': 2305, u'ezm': 2304, u'odd': 2304, u'smr': 2301, u'd\xf4c': 2301, u'\xe9me': 2300, u'i\u017en': 2300, u'#nu': 2299, u'yno': 2299, u'izr': 2296, u'v\xe1v': 2295, u'als': 2294, u'j#l': 2293, u'bsk': 2293, u'l\xfa#': 2292, u'#k\xfd': 2292, u'\xe1#u': 2291, u'upy': 2291, u'#d\u0148': 2291, u'l#j': 2285, u'tr\xed': 2283, u'l\xfat': 2283, u'tyr': 2283, u'zul': 2279, u'r\xe1\u013e': 2278, u'mas': 2277, u'up\xed': 2268, u'ec\xed': 2267, u'k\xe1l': 2265, u'ij\xfa': 2265, u'el\xed': 2265, u'bam': 2265, u'rvk': 2265, u'ut\xe1': 2263, u'ol\xed': 2261, u'\xfd#\u0161': 2258, u't\xe9h': 2253, u'\u0161tv': 2253, u'zoz': 2252, u'\u0165#\u0161': 2251, u'#ty': 2249, u'#\xfam': 2248, u'h\xfdm': 2247, u'\u010dor': 2246, u'g\xe1c': 2244, u'#vt': 2243, u'tug': 2242, u'\xe1#\xfa': 2241, u'\xfd#c': 2240, u'rgu': 2238, u'dvi': 2237, u'afi': 2234, u'\u010fom': 2234, u'pt#': 2233, u'ily': 2233, u'dp\xed': 2232, u'sig': 2231, u'm\xe4t': 2231, u'si\u0165': 2228, u'\xe1\u013eo': 2228, u'itl': 2225, u'\u017e\u0161i': 2224, u'gro': 2219, u'\u0161\u0165#': 2217, u'z#z': 2217, u'uci': 2216, u'izu': 2216, u'e\u0148a': 2216, u'\u0161ty': 2214, u'ekv': 2214, u'bul': 2209, u'\xeds\u0165': 2203, u'di#': 2202, u'\xe1vk': 2202, u'd##': 2201, u'fal': 2201, u'ar\xed': 2201, u'iv\xed': 2197, u'\xe1#h': 2196, u'\xf3g#': 2195, u't\u0148u': 2195, u'ido': 2195, u'ibi': 2189, u'\u010f#n': 2189, u'b\xe1c': 2189, u'l\xedz': 2187, u'e\xe1l': 2183, u'\xfdba': 2183, u'\u0148#s': 2183, u'en\u010d': 2183, u'ikl': 2179, u'b\xfad': 2175, u'\xe1\u0161\u0165': 2175, u'p\xf4d': 2174, u'\u013e#n': 2171, u'\xfave': 2171, u'nty': 2171, u'\xedri': 2170, u'n\xe1h': 2166, u'e\u010d\xed': 2158, u'zby': 2157, u'\xe9ma': 2155, u'lez': 2155, u'r\u0161i': 2153, u'zmo': 2151, u'rme': 2149, u'\xe1mk': 2148, u'akl': 2148, u'reu': 2148, u'gru': 2148, u'zv\xe1': 2148, u'n\xedz': 2147, u'v\xe1t': 2144, u'anm': 2144, u'l\xfa\u010d': 2143, u'uz\xed': 2142, u'exu': 2142, u'ose': 2139, u'ytl': 2135, u'#he': 2133, u'ilu': 2132, u'zyl': 2131, u'mou': 2130, u'jvi': 2128, u'vn\xe1': 2125, u'euk': 2123, u'hen': 2122, u'\xfa##': 2117, u'pli': 2117, u'\u013eke': 2116, u'ias': 2115, u'd\xedt': 2115, u'uhl': 2113, u'zli': 2113, u'lot': 2110, u'akr': 2110, u'ajp': 2109, u'suj': 2106, u'ets': 2104, u'pas': 2103, u'l\u0161o': 2102, u'tuc': 2099, u'jvy': 2099, u'b\xedm': 2098, u'adl': 2095, u'#\u0161v': 2092, u'i\xe1#': 2090, u'bur': 2084, u'u\u010fm': 2084, u'nsa': 2082, u'ruz': 2080, u'l\xfdm': 2079, u'\xf3gu': 2078, u'\u017eky': 2078, u'usm': 2078, u'#s\xed': 2075, u'\u010f#p': 2075, u'ije': 2073, u'\xfdko': 2071, u'u#\u013e': 2069, u'ju\u017e': 2062, u'#gu': 2059, u'dor': 2055, u'pne': 2053, u'ov\xed': 2053, u'egy': 2053, u't#d': 2052, u'vda': 2050, u'zsi': 2049, u'lte': 2048, u'jet': 2045, u'xt#': 2044, u'pe\u0148': 2043, u'neg': 2042, u'\u0165as': 2041, u'bn\xe1': 2038, u'i\xe9r': 2037, u'#\u010d#': 2034, u'rce': 2033, u'ube': 2032, u'av\xfd': 2032, u'\xfd#\xfa': 2031, u'yzv': 2030, u'pam': 2030, u'gat': 2029, u'ikn': 2029, u'm\xed#': 2027, u'git': 2023, u'vrt': 2020, u'riu': 2020, u'#\u0161e': 2019, u'uja': 2018, u'\xed#\u013e': 2016, u'et\xf3': 2015, u'rde': 2014, u'p\xe1j': 2014, u'mn\xfa': 2012, u'\xe1#i': 2008, u'okm': 2007, u'o\u0148#': 2006, u'gn\xe1': 2005, u'rco': 2005, u'#eg': 2005, u'\u010dul': 2002, u'gii': 2001, u'av\xe9': 2001, u'ak\u010d': 2000, u'l#u': 1997, u'r##': 1996, u'zn\xe9': 1993, u'lif': 1993, u'lu\u010d': 1989, u'z\xedn': 1987, u'fle': 1984, u't#t': 1983, u'eu\u017e': 1980, u'pru': 1978, u'arg': 1978, u'obv': 1977, u'mpa': 1977, u'jst': 1976, u'sum': 1974, u'lha': 1973, u'ab\xfa': 1973, u'nek': 1972, u's##': 1964, u'h\xed#': 1963, u't\xf3d': 1963, u'li\xe1': 1963, u'ti\u010d': 1960, u'luj': 1957, u'ie\u0148': 1956, u'eze': 1955, u'o\u013ea': 1954, u'evz': 1953, u'zn\xfd': 1953, u't\xfad': 1952, u'hv\xed': 1950, u'\u0148#v': 1949, u'ehl': 1949, u'j\u0161e': 1948, u'at\xe1': 1948, u'onu': 1946, u'##c': 1946, u'ols': 1946, u'rch': 1944, u'ozl': 1943, u'i\xe1r': 1942, u'o#\u010f': 1940, u'at\xfa': 1939, u'dho': 1937, u'o#g': 1934, u'm\xe9d': 1934, u'\u017ed\u0148': 1932, u'uhu': 1932, u'dpr': 1931, u'pn\xed': 1930, u'lou': 1926, u'mac': 1926, u'rir': 1925, u'l#e': 1922, u'c#o': 1920, u'\u0161t\xfa': 1920, u'd#j': 1919, u'\u010dka': 1917, u'\u0165ov': 1915, u't#e': 1913, u'\xed\u010dk': 1912, u'r#o': 1911, u'bok': 1908, u'ukc': 1906, u'\xe1rd': 1906, u'sex': 1905, u'b\xe1v': 1905, u'tiu': 1905, u'v\xe9d': 1903, u'\u0161v\xe9': 1903, u'#m\xe9': 1902, u'sur': 1897, u'ekr': 1895, u'bel': 1895, u'oh\xed': 1893, u'z##': 1893, u'h\xe1p': 1891, u'j\u0161o': 1891, u'lut': 1890, u'mca': 1890, u'otk': 1889, u'cn\xe9': 1889, u'tka': 1887, u'ipl': 1887, u'v\u0161t': 1885, u'ojm': 1884, u'b\xe1m': 1884, u'ler': 1883, u'\u0165#\u010f': 1882, u'po\u0148': 1881, u'rax': 1880, u'u#g': 1878, u'byr': 1878, u'j#\u013e': 1877, u'l#\u010d': 1877, u'sma': 1875, u'vyc': 1874, u'ne\xfa': 1873, u'do\u010d': 1870, u'ed\xed': 1869, u'ri\u0161': 1867, u'u\u017eo': 1863, u'elu': 1861, u'\xe9ds': 1861, u'\u010fmi': 1859, u'kv\xf3': 1858, u'v\xf3t': 1858, u'a\u0148#': 1857, u'l\xe1r': 1856, u'\xf3mo': 1855, u't\xedn': 1854, u'\xfdb#': 1854, u'\u010div': 1852, u'n\xed\u010d': 1850, u'z#u': 1850, u'\xf4ka': 1846, u'h\xe1#': 1842, u'z#c': 1841, u'ln\xed': 1841, u'kry': 1840, u'os#': 1839, u'jpr': 1839, u'juh': 1836, u'vzb': 1835, u'ma\u010d': 1833, u'\xed#l': 1832, u'imp': 1828, u'yda': 1828, u'#ap': 1826, u'\u0148ou': 1823, u'etu': 1822, u'eke': 1821, u'sp\xe1': 1820, u'cif': 1818, u'm\xe9#': 1818, u'st\u0148': 1818, u'\u0148#z': 1817, u'ulh': 1816, u'yvu': 1815, u'm#\u013e': 1813, u'rve': 1811, u'\xed##': 1810, u'ah\u010d': 1810, u'v\u010de': 1809, u'isu': 1809, u'ok\xe9': 1805, u'ok\xfd': 1804, u'had': 1802, u'am\xe4': 1802, u'k#\xfa': 1801, u'ced': 1801, u'\xfaka': 1799, u'arn': 1799, u'oh\xf4': 1797, u'her': 1794, u'\xf3nu': 1794, u'pes': 1794, u't#\u017e': 1793, u'bky': 1790, u'epa': 1790, u'#\u0148o': 1789, u'xim': 1788, u'nse': 1786, u'ira': 1785, u'mi\u0165': 1784, u'\xf3n#': 1783, u'po\u0161': 1783, u'zi\u0165': 1781, u'gre': 1780, u'imk': 1779, u'leh': 1777, u'oc\xed': 1774, u'yl\xfa': 1771, u'hlb': 1770, u'\u0161an': 1768, u'r\u0161\xed': 1768, u'ra\u0148': 1766, u're\xe1': 1765, u'sn\xfa': 1764, u'alc': 1764, u'aty': 1762, u'kop': 1761, u'u\u013ea': 1760, u'uis': 1758, u'zii': 1757, u'baj': 1757, u'l\xfdz': 1756, u'j\xfan': 1754, u'k\xe1m': 1749, u'ndy': 1748, u'p#a': 1748, u'vre': 1746, u'erp': 1745, u'\u010d\u0161e': 1743, u'nei': 1743, u'r#t': 1743, u'd\u010di': 1742, u'yb\xe1': 1741, u'bsi': 1739, u'oke': 1739, u'y#\u013e': 1738, u'c\xedt': 1737, u'k\xfa\u0161': 1736, u'dde': 1734, u'rdo': 1734, u'\xfazk': 1731, u'iho': 1731, u'xu\xe1': 1731, u'sp#': 1729, u'\xfd#e': 1728, u'\xfar#': 1727, u'max': 1726, u'gom': 1725, u'n#e': 1724, u'uh\xfd': 1723, u'mbu': 1722, u'n\xe1d': 1722, u'aha': 1721, u'klo': 1718, u'b#v': 1718, u'\u0148om': 1717, u'ruc': 1717, u'\xe1#\u010d': 1715, u'\xedze': 1712, u'\xfa#f': 1710, u'ubj': 1710, u'z#i': 1710, u'te\u010d': 1709, u'\u0161ke': 1708, u'ezv': 1706, u'zdv': 1705, u'j#\u0161': 1704, u'\u013eby': 1703, u'\xfa\u010dt': 1702, u'st\xf3': 1700, u'il\u0148': 1699, u'rte': 1698, u'ykl': 1698, u'tun': 1697, u'ma\u017e': 1694, u'vka': 1689, u'n\u0161\xed': 1686, u'es\xed': 1686, u'\u0148aj': 1683, u'\xfate': 1682, u'os\xed': 1681, u'orb': 1680, u'a\u0161\xed': 1680, u'\u0161ka': 1677, u'ik#': 1677, u'ka\u010d': 1671, u'ile': 1671, u'zaz': 1670, u'ia\u0165': 1669, u'srb': 1669, u'evk': 1666, u'gno': 1666, u'#u\u013e': 1665, u'\u0148a\u0165': 1664, u'd\xe9h': 1664, u'#p\xe4': 1664, u'o\u017ek': 1663, u'\u010f#v': 1663, u'diu': 1663, u'mad': 1660, u'ad\u0148': 1659, u'orv': 1658, u'#kn': 1657, u'uku': 1655, u'\u010d##': 1655, u'ejd': 1655, u'ryh': 1653, u'kne': 1652, u'b\xe1r': 1652, u'b\xe1n': 1651, u'\xf4kl': 1647, u'z\u0161i': 1640, u'uri': 1639, u'vco': 1639, u'dco': 1638, u'sem': 1637, u'pte': 1632, u'\u017e#u': 1630, u'ga#': 1630, u'obh': 1628, u'o\u017e\xed': 1627, u't\xe1n': 1626, u'aop': 1626, u'd\xe1n': 1626, u'#k\xf3': 1625, u'rv#': 1623, u'zma': 1621, u'nih': 1619, u'rhl': 1619, u'ojs': 1614, u'#s\u0165': 1612, u'ur\xfd': 1612, u'n\xe1p': 1612, u'ham': 1608, u'lco': 1608, u'ico': 1608, u'#s\xe1': 1604, u'd#\u017e': 1603, u'jm\xe9': 1602, u'\xfa\u0161a': 1601, u'klu': 1601, u'a\u0165a': 1601, u'\xe1nm': 1601, u'blo': 1600, u'di\xe1': 1595, u'rdy': 1594, u'nc\xed': 1591, u'nga': 1591, u'\u010daj': 1590, u'\u0161po': 1589, u'bha': 1589, u'l\xedn': 1589, u'r\xe1r': 1587, u'uvo': 1586, u'eod': 1585, u'ash': 1584, u'yv\xed': 1583, u'\xfa#\u0161': 1583, u'p\u0161o': 1583, u'mn\xed': 1582, u'n#c': 1582, u'\xe1ly': 1581, u'sep': 1579, u'\u013eav': 1577, u'd#h': 1577, u'okt': 1576, u'\u010d\xeds': 1575, u'h\xfd#': 1575, u'#cy': 1574, u'\u013eut': 1567, u'ocn': 1566, u'epl': 1564, u'\u017e\xedm': 1564, u'd#u': 1561, u'z#\u010d': 1558, u'iva': 1557, u'sak': 1557, u'nl#': 1556, u'p#p': 1556, u'\u010d\xedv': 1555, u'\xe1zn': 1554, u'zba': 1554, u'\u013ek\xfa': 1552, u'pej': 1551, u'lyh': 1551, u'd\xedn': 1551, u'd\u013a\u017e': 1551, u'\u010d\xfav': 1550, u'kn\xfa': 1549, u'\u0165az': 1547, u'hu\u017e': 1547, u'o\u0161k': 1545, u'ofe': 1544, u'all': 1544, u'son': 1543, u'lk\xe1': 1543, u'r#r': 1543, u'k\xe1n': 1542, u'\xfd#u': 1541, u'sp\xe4': 1541, u'ebr': 1541, u'\u010dky': 1540, u'#ol': 1538, u'ra\u017e': 1538, u'\xfamy': 1537, u'ysi': 1537, u'c\xedp': 1536, u'rbs': 1535, u'\u017e#k': 1535, u'y#g': 1535, u'eis': 1534, u'xib': 1532, u'#f\xe1': 1531, u'ut\xf3': 1531, u'kav': 1530, u'ex#': 1530, u'\xfati': 1529, u's#\u013e': 1529, u'dio': 1526, u'vku': 1524, u'j\xed#': 1524, u'pco': 1522, u's\xedd': 1521, u'col': 1520, u'tek': 1520, u'j#\u017e': 1519, u'n##': 1519, u'em\xed': 1516, u'\xe1\u0165a': 1516, u'z\xe1\u0165': 1516, u'lt\xe1': 1513, u'did': 1513, u'f\xe1z': 1513, u'\u013e#\u010d': 1512, u'\xed#\xfa': 1512, u'b#p': 1512, u'\u0161\u0161\xed': 1511, u'vzi': 1511, u'\xf4st': 1509, u'\u010f#b': 1507, u'vpr': 1506, u'\xedny': 1502, u'y\u010dl': 1502, u'rup': 1501, u'rn\xe1': 1498, u'y\u0161u': 1498, u'p#n': 1498, u'cn\xfd': 1498, u'ijm': 1497, u'mky': 1494, u'#of': 1493, u'b\xe9#': 1493, u'\xe1#l': 1492, u'\xe1hu': 1492, u'l#h': 1492, u'\u0148az': 1491, u'erk': 1491, u'v\xe1\u0161': 1488, u'z#f': 1487, u'its': 1485, u'jn\xe1': 1483, u'nds': 1483, u'old': 1483, u'vem': 1483, u'\u0165#\u013e': 1483, u'uvu': 1481, u'mo\u010d': 1481, u'adc': 1480, u'i#\u010f': 1480, u'r\xe1b': 1480, u'\u010dte': 1480, u'ain': 1479, u'jil': 1478, u'zop': 1478, u'ob\xfd': 1478, u'geo': 1476, u'va\u010d': 1476, u'atl': 1476, u'ekc': 1476, u'be\u0165': 1476, u'\u013eub': 1472, u'kni': 1472, u'ium': 1471, u'h\u0161i': 1471, u'in\xed': 1470, u'o\u013eu': 1470, u'v\xe9m': 1470, u'\xedje': 1468, u'nu\xe1': 1468, u'ald': 1467, u'yvi': 1466, u'a\u010dk': 1466, u'\u017e#a': 1465, u'\u017eny': 1465, u'eco': 1463, u's\xedv': 1462, u'mav': 1461, u'e\u017e\xed': 1460, u'hmi': 1460, u'\xe1#c': 1459, u'\u013e#s': 1458, u'\u0148am': 1456, u'edv': 1452, u'kl\xe1': 1452, u'i\u0161n': 1450, u'utr': 1450, u'\u017ek\xe9': 1448, u'ehn': 1447, u'##u': 1447, u'jko': 1446, u'b\xe9h': 1446, u'caj': 1444, u'udk': 1443, u'\xfd#f': 1441, u'u\u017ee': 1441, u'ln\xe1': 1441, u'\u0161\u013ea': 1438, u'zil': 1438, u'o\u010d\xfa': 1437, u'\xedne': 1436, u'edb': 1435, u'y\u010da': 1432, u'\xe1ba': 1431, u'et\xe1': 1428, u'n\u017ee': 1425, u'vel': 1423, u'idu': 1423, u'ad\u0161': 1418, u'rty': 1418, u'v\xfdl': 1418, u'l##': 1418, u'#ub': 1416, u'trn': 1414, u'dr\xe1': 1414, u'rdn': 1413, u'\u0161no': 1413, u'm\xfa#': 1412, u'akz': 1412, u'\u013eu#': 1411, u'kzv': 1411, u'il\xed': 1411, u'ti\u017e': 1411, u'uky': 1409, u'ige': 1409, u'\u017eb\xe1': 1408, u'k#l': 1408, u'tr\xe9': 1405, u'#lu': 1405, u'adv': 1404, u'p\xe1r': 1402, u'nso': 1401, u'\xf4jd': 1401, u'\xf4j#': 1400, u'fro': 1399, u'yt#': 1399, u'\u0165ro': 1398, u'e\u0165o': 1397, u'ziu': 1396, u'mbo': 1394, u'r\xe1z': 1394, u'ds#': 1392, u'fga': 1389, u'sp\u013a': 1389, u'o\u010d\u0148': 1389, u'far': 1389, u'rca': 1388, u'afg': 1388, u'#ep': 1387, u'tud': 1387, u'ikr': 1387, u'zvr': 1387, u'lev': 1385, u'ppe': 1385, u'ri\xed': 1384, u'xtr': 1381, u'lig': 1379, u'rah': 1379, u'#p\xed': 1379, u'nop': 1377, u'kru': 1376, u's\u013eu': 1375, u'sym': 1374, u'mel': 1370, u'v\xf4l': 1370, u'rke': 1370, u'\u010dlo': 1368, u'\xfdhr': 1367, u't#u': 1365, u'urg': 1365, u'rpe': 1365, u'\xf4t#': 1364, u'\xe4ti': 1363, u'\u017eet': 1363, u'\u0148#n': 1361, u'msp': 1359, u'\xf3ns': 1358, u'dev': 1358, u'ir\u0161': 1356, u'lst': 1355, u'zol': 1353, u's#l': 1350, u'kve': 1349, u'zdu': 1349, u'rko': 1347, u'\xe9mi': 1345, u'm\xe1t': 1343, u'#ec': 1343, u'pti': 1343, u'i\u010dk': 1343, u'\u013e#p': 1338, u'nzi': 1335, u'v\u010da': 1335, u'e\u013eu': 1335, u'jaj': 1334, u'bvi': 1333, u'c\xedc': 1333, u'zbe': 1331, u'yvn': 1330, u'gum': 1330, u'##i': 1329, u'\xedce': 1328, u'r#j': 1328, u'#ms': 1327, u'kt\xf3': 1327, u'\u017eba': 1326, u'\xedti': 1322, u'#s\u013e': 1321, u'\u0161es': 1318, u'\u0161#p': 1317, u'\xe4#n': 1317, u'cro': 1316, u'#\u0161a': 1316, u'#pp': 1316, u'\xe1du': 1314, u'laj': 1314, u'\u0165a#': 1312, u'n#h': 1311, u'k\xe1r': 1309, u'ngl': 1308, u'tkn': 1307, u'mn\xe1': 1306, u'omy': 1306, u'tst': 1306, u'\xfd#h': 1305, u'zly': 1304, u'ga\u017e': 1303, u't\xf3b': 1303, u'za\u017e': 1303, u're\u0165': 1303, u'bn\xfa': 1302, u'dsa': 1302, u'sl\xe1': 1301, u'jmi': 1299, u'ahk': 1298, u's#\xfa': 1298, u'mol': 1298, u'\u013en\xe1': 1297, u'skv': 1296, u'zou': 1295, u'\xfare': 1294, u'udu': 1294, u'n\xe1t': 1293, u'\u017e#r': 1293, u'azi': 1292, u'l\xed\u017e': 1292, u'ped': 1291, u'c#b': 1290, u'm\xe1j': 1290, u'ryt': 1290, u'cip': 1288, u'dip': 1287, u't#r': 1285, u'\xfdto': 1285, u'ony': 1284, u'c#m': 1282, u'ev\xfd': 1281, u'hva': 1279, u'ch\xf4': 1277, u'\u010dat': 1277, u'\xe1pe': 1277, u'zro': 1276, u'd\xf4j': 1276, u'p#v': 1276, u'rm\xed': 1275, u'\xf4dz': 1274, u'mi\xe9': 1274, u'vza': 1274, u'\xe1zi': 1273, u'\u0161n\xe1': 1273, u'#cu': 1271, u'pn\xe1': 1271, u'di\xed': 1270, u'vrc': 1270, u'lbo': 1269, u'kad': 1268, u'#hy': 1268, u'yv\u0148': 1267, u'\u010dos': 1267, u'\xf3br': 1265, u'vap': 1265, u'\xe1di': 1264, u'ahe': 1264, u'ar\u0161': 1262, u'opt': 1262, u'ul\xe9': 1262, u'ar#': 1260, u'\u013e#v': 1259, u'\u0161tu': 1259, u'kok': 1258, u'\xfd\u0161\u013e': 1256, u'm\xfd\u0161': 1256, u'ovl': 1255, u'lar': 1254, u'zja': 1254, u'pto': 1253, u'#ac': 1250, u'#g#': 1249, u'\u0161k\xe1': 1248, u'uh#': 1247, u'dle': 1243, u'\u013ek\xe1': 1243, u'c#d': 1242, u'sar': 1242, u't\xe1m': 1241, u'\u0161n\xed': 1241, u'#av': 1240, u'k\xfdt': 1239, u'p\xfa\u0161': 1238, u'jte': 1238, u'tib': 1238, u's\xfab': 1237, u'ypu': 1236, u'mko': 1235, u'\xfd#i': 1234, u'aly': 1234, u'sat': 1234, u'z\u0148o': 1233, u'i\u017e#': 1233, u'fes': 1232, u'srd': 1231, u'#ig': 1229, u'ekn': 1229, u'izn': 1228, u'el\xfa': 1226, u'dsm': 1226, u'nke': 1225, u'e#\u0165': 1224, u'rab': 1224, u'zvu': 1224, u'ihl': 1222, u'#ps': 1222, u'una': 1220, u'lde': 1220, u'\xe1do': 1220, u'nn\xfa': 1220, u'rci': 1218, u'ivc': 1217, u'yba': 1217, u'b#k': 1216, u'v\u0161i': 1214, u'd\xe9m': 1214, u'ng#': 1213, u'vez': 1212, u'v\u0148u': 1211, u'\xfabo': 1209, u'\u0161#v': 1208, u'rd\xed': 1206, u'er\xfd': 1205, u'm#g': 1205, u'sid': 1205, u'nn\xe1': 1205, u'cyk': 1204, u'ape': 1204, u'\u017e#j': 1204, u'\xfat#': 1200, u'\u0148u#': 1196, u'\xfavo': 1196, u'\xfako': 1195, u'od\xf4': 1195, u'ihu': 1194, u'iza': 1194, u'yby': 1194, u'rku': 1193, u'yha': 1192, u'l\u0148o': 1191, u'ois': 1190, u'xtu': 1189, u'ajz': 1189, u'\xfdlu': 1187, u'lur': 1186, u've\u010d': 1184, u'lub': 1184, u'va\u013e': 1183, u'koz': 1183, u'##\u017e': 1181, u'l\xedc': 1181, u'ubs': 1179, u'bid': 1179, u'tad': 1178, u'\u0148#b': 1177, u'vih': 1177, u'd\u0161e': 1175, u'a\u013eo': 1175, u'n\xf4t': 1175, u'p#s': 1175, u'ylo': 1174, u'dn\xf4': 1172, u'm#\u010f': 1172, u'l#i': 1172, u'dsu': 1170, u'c#e': 1169, u'r\xfdv': 1167, u'n\u010d\xed': 1167, u'\u010f#j': 1166, u'jou': 1164, u'#ul': 1164, u'tev': 1163, u'lac': 1162, u'nef': 1162, u'd#c': 1161, u'lgi': 1159, u'r#\u017e': 1158, u'ncu': 1157, u'to\u013e': 1157, u'zv\xe4': 1157, u'pyt': 1156, u'dek': 1156, u'mb\xed': 1155, u'\xfdbe': 1155, u'coc': 1155, u'dif': 1155, u'aj\u010d': 1155, u'#ki': 1155, u'mem': 1154, u'n#\u010d': 1154, u'c\xedd': 1150, u'liu': 1146, u'hly': 1145, u'ne\u010f': 1144, u'd\u0148u': 1143, u'\u017e\u0161\xed': 1142, u'elg': 1141, u's#f': 1139, u'#\u0161\xed': 1139, u'\xf3na': 1137, u'\u010dko': 1137, u'kr\xfd': 1135, u'c#t': 1134, u'tep': 1134, u'd#\u010d': 1132, u'ohe': 1131, u'bin': 1131, u'#b\xe1': 1127, u'sv#': 1126, u'faj': 1126, u'f\xe9r': 1125, u'uso': 1124, u'kvi': 1123, u'ch\xe9': 1123, u'hko': 1123, u'idv': 1119, u'kce': 1119, u'\xedby': 1118, u'v#\u013e': 1117, u'\u0161pi': 1116, u'\xe1s\u0165': 1116, u'v#\xed': 1115, u'gar': 1114, u'aba': 1114, u'miz': 1113, u'b\xedc': 1113, u'vdu': 1112, u'b#s': 1112, u't\xedt': 1111, u'#ii': 1111, u'plu': 1111, u'dp#': 1110, u'yl#': 1110, u'\u0161\xedn': 1108, u'l\xfac': 1108, u'\xe9ro': 1107, u's\xedl': 1107, u'sht': 1107, u'k#\u010f': 1106, u'#jo': 1106, u'poi': 1106, u'\xf4dy': 1105, u'gyp': 1105, u'#go': 1105, u'#u#': 1105, u'\u010fa\u010d': 1104, u'#uc': 1103, u'br\xfa': 1099, u'\xedda': 1099, u'\u017e#\u017e': 1099, u'\xe1jd': 1099, u'r\xe1s': 1098, u'v\xf4\u013e': 1097, u'l\xedb': 1095, u'u\u0161u': 1094, u'ysk': 1091, u'fil': 1090, u'bus': 1090, u'r#c': 1090, u'yd\xe1': 1089, u'jap': 1088, u'u\u010du': 1088, u'tai': 1088, u'ajb': 1087, u'k#f': 1087, u'\u010fov': 1085, u'#m\xe4': 1084, u'\xfdkr': 1084, u'ell': 1082, u'\u0165uj': 1082, u'dzb': 1081, u'nau': 1081, u'pag': 1080, u'il\xe1': 1079, u'sl\xfd': 1077, u'fed': 1077, u'i\u010de': 1077, u'yr\xe1': 1076, u'pp#': 1075, u'asy': 1075, u'\xe1\u0161t': 1074, u'obz': 1073, u'\xfa\u0161\u0165': 1071, u'rii': 1068, u'\u0161ev': 1067, u'a\u0165r': 1067, u'\xfdst': 1066, u'\xe1re': 1065, u'ce\u0148': 1064, u'#dy': 1064, u'bst': 1062, u'nab': 1062, u'#\xeds': 1061, u'kot': 1060, u'n#l': 1059, u'r\xedd': 1059, u'\u017ea#': 1059, u'j#g': 1057, u'#wa': 1057, u'eol': 1057, u'k\xfat': 1054, u'\u010f#t': 1054, u'\xf3re': 1053, u'\u0161t\xed': 1053, u'ats': 1053, u'e\u0161\u0165': 1053, u'dv\xe1': 1052, u'ejs': 1051, u'al\xe1': 1050, u'k\xe1t': 1049, u'\u013en\xfa': 1049, u'z\xedl': 1049, u'ymb': 1047, u'doj': 1046, u'ypt': 1046, u'v\xedl': 1045, u'\xeddl': 1045, u'epc': 1044, u'lti': 1043, u't#c': 1042, u'\xfana': 1042, u's\xe1m': 1042, u'jiv': 1040, u'nze': 1038, u'\xed\u0165a': 1038, u'\xed#f': 1036, u'chi': 1036, u'sis': 1033, u'pir': 1033, u'\xedkm': 1032, u'ork': 1031, u'gaz': 1031, u'spp': 1031, u'\xedde': 1030, u'sl\xe9': 1030, u'atb': 1026, u'mpi': 1026, u'jni': 1025, u'et\xfd': 1024, u'\xe1zv': 1023, u'mka': 1022, u'#\xe1n': 1021, u'\xe9##': 1020, u'jcu': 1020, u'#it': 1019, u'oxi': 1019, u'eov': 1017, u'd\xfdm': 1015, u'our': 1015, u'e\u0161k': 1015, u'yti': 1015, u'#pt': 1015, u'ktn': 1014, u'\u0161aj': 1012, u'orc': 1011, u'tob': 1011, u'lda': 1009, u'\xe1cn': 1009, u'vak': 1009, u'eut': 1008, u'mba': 1007, u'h\xe9z': 1007, u'ovk': 1007, u'nin': 1007, u'op\xfd': 1007, u'hdp': 1007, u'\u013e\xfat': 1006, u'\u0148#o': 1005, u'\xe1\u017eu': 1004, u'd#i': 1004, u'#u\u010d': 1003, u'boc': 1002, u'hna': 997, u'n#\xfa': 997, u'xi#': 996, u'l#c': 996, u'v\u0148o': 995, u'i\u017eu': 994, u'r#e': 994, u'bzv': 992, u'iak': 992, u'bak': 991, u'wto': 989, u'#c\xed': 987, u'hrd': 987, u'sbu': 986, u'\u010da#': 986, u'#pu': 986, u'mzd': 985, u'\xe1hr': 985, u'hl\xed': 984, u'iop': 984, u'eky': 982, u'\xfac#': 981, u'j\xfal': 981, u'ajr': 980, u'#\xe1z': 980, u'uh\xe1': 979, u'v\xedm': 978, u'\xf3ms': 977, u'le\u0148': 977, u'mak': 976, u'#hd': 976, u'i\u010da': 975, u'oh#': 974, u'ob\xf4': 973, u'ed\u013a': 972, u'avs': 971, u'\xe9r#': 969, u'b\xf4d': 969, u'm\xe9n': 969, u'zob': 968, u'#mz': 968, u'\u0161ok': 967, u'zdo': 964, u'top': 964, u'mog': 962, u'exp': 961, u'\u013eaj': 960, u'#wt': 960, u'rsp': 960, u'#\u013e\xfa': 959, u'\xedz#': 959, u'sts': 959, u'\xe9zn': 959, u'jit': 958, u'rba': 958, u'\u0161#n': 957, u'lts': 957, u'ck\xed': 957, u'i\u010d\xed': 957, u'op\xe1': 956, u'unu': 954, u'rdz': 954, u'idn': 954, u'\xfad#': 954, u't#h': 953, u'bu\u010f': 953, u'\xe1v\u0161': 952, u'ein': 950, u'err': 950, u'\u017ei\u010d': 949, u'kt\xe9': 949, u'n#u': 948, u'\u010d#n': 947, u'rub': 947, u'l\xedv': 947, u'iod': 947, u'a\u013eu': 946, u'#er': 945, u'op\u013a': 945, u'ezd': 944, u'm\xe1r': 944, u'oz#': 942, u'cs#': 942, u'sf\xe9': 941, u'ud\xe1': 940, u'oeu': 939, u'aca': 939, u'k\xf3r': 939, u'lym': 939, u'ire': 938, u'pem': 937, u'#\xfa\u017e': 935, u'de\u0165': 935, u'd\xe9#': 935, u'e\u010du': 934, u'\u0148oc': 934, u'b#n': 934, u'fy#': 933, u'tul': 932, u'ill': 931, u'igi': 931, u'ybr': 931, u'\xf4li': 929, u'ded': 929, u'alb': 928, u'ru\xe1': 928, u'rpa': 928, u'rpi': 926, u'pap': 926, u'\xe1\u010da': 925, u'leo': 925, u'bos': 924, u'feb': 922, u'\xe4#s': 920, u'\u0148#a': 919, u'ual': 917, u'dim': 917, u'ybe': 917, u'\xe1#f': 916, u'roe': 916, u'ou\u010d': 915, u'\xedl#': 915, u'a\u0161l': 915, u'ic\xed': 915, u'odt': 914, u'#k\xfa': 914, u'asc': 912, u'hel': 911, u'\xe9ne': 911, u'\u010d\u0148o': 910, u'v\xed\u013e': 909, u't##': 908, u'an\u017e': 908, u'ary': 908, u'hai': 907, u'e#\xed': 907, u'\u013ea\u0165': 906, u'asb': 906, u'sm\xfa': 905, u'agu': 905, u'\u017eka': 904, u'dyn': 903, u'\xfakn': 903, u'\u010doh': 903, u'teh': 903, u'hem': 902, u'nk\u010d': 902, u'\u013eb\xe1': 902, u'sk\xed': 902, u'v#f': 901, u'n\u0161o': 900, u'i\u0161k': 899, u'dig': 898, u'\xe9ko': 898, u'dbe': 897, u'c##': 897, u'i#\u0165': 897, u'l\xfav': 896, u'\xfav#': 896, u'ml\xfa': 896, u'\xe1ze': 893, u'#ui': 892, u'lli': 892, u'\xednu': 891, u'uvn': 891, u'c#j': 890, u'zsu': 890, u'lio': 890, u'f\xf3r': 885, u'nn\xed': 885, u'ads': 884, u'm\xe4s': 884, u'\xfadz': 884, u'\xfa#\u013e': 883, u'hlu': 883, u'\xedku': 883, u'rut': 882, u'uhy': 882, u'umy': 879, u'h\xfa#': 879, u'ipu': 879, u'\xe1##': 878, u'ofy': 877, u'hru': 877, u'\u0148#m': 876, u'zon': 875, u'yzn': 875, u'lud': 875, u'\xfask': 875, u'\u010f#m': 874, u'ab\xe1': 874, u'rky': 872, u'iv\xfa': 871, u'a#\u0148': 869, u'\u017ed\xe1': 869, u'r\xfdb': 869, u'op\xed': 869, u'ulz': 869, u'n\xfad': 869, u'acn': 866, u'uce': 866, u'tke': 865, u'\u0161to': 865, u'##\u013e': 865, u'agr': 865, u'zim': 865, u'ams': 864, u'fi\u0161': 863, u'euv': 862, u'##l': 862, u'isc': 862, u'#f\xf3': 861, u'd\xedc': 860, u'\xe1co': 860, u'n#i': 860, u'yku': 859, u'jny': 858, u'r\xf3n': 857, u'can': 857, u'hok': 857, u'und': 855, u'\u0161tn': 855, u'u#\u010f': 855, u'asm': 855, u'azd': 854, u'k\xfap': 853, u'\xed#\u0161': 853, u'cyp': 853, u'p#d': 852, u'\u013e\xfab': 851, u'ijs': 848, u'\xedve': 848, u'nli': 848, u'epi': 848, u'l#\xfa': 847, u'#ei': 847, u'ait': 845, u'a#\u0165': 844, u'ph#': 844, u'nk#': 843, u'\xedmn': 843, u'\u0161la': 843, u'ism': 843, u'nuk': 842, u'\xf3t#': 841, u'#\xfak': 840, u'h\xe9h': 839, u'au\u010d': 839, u'ejk': 839, u'\u0161a\u0165': 837, u'er\u010d': 837, u'osy': 836, u'b#z': 836, u'ess': 836, u'\u017ebu': 835, u'avb': 835, u'#uj': 834, u'vad': 834, u'n\u0161p': 833, u'lel': 833, u'jn\xed': 832, u'ep#': 832, u'jes': 831, u'k\xe9k': 830, u'\xe4#t': 830, u'avz': 830, u'dum': 829, u'rt#': 828, u'ap\xe1': 828, u'ack': 827, u'smu': 826, u'\xfatu': 825, u'de\u017e': 824, u'd\xf3n': 823, u'b#o': 823, u'rhm': 823, u'ean': 822, u'qui': 822, u'z#\xfa': 822, u'\u010d\u0161o': 821, u'ass': 821, u'n#f': 821, u'\u0148al': 820, u'#dp': 820, u'ed\xf3': 819, u'uen': 819, u'\xe4ze': 819, u'l\xed\u0161': 819, u'lh\u0161': 818, u'z#l': 817, u'don': 815, u'ajh': 815, u'g#a': 813, u'syn': 812, u'eh\u013e': 812, u'pa\u010d': 812, u'\xe1hy': 811, u'h\u013ab': 809, u'\xfd#l': 809, u'\u017en\xe1': 809, u'act': 808, u'dph': 808, u'ot\xed': 808, u'ezl': 807, u'yle': 807, u'o\u010dk': 807, u'urd': 806, u'#p\xfd': 804, u'ubu': 804, u'ba\u0165': 804, u'\u010f#z': 804, u'uv\xed': 803, u'\u010f#h': 803, u'bat': 803, u'\xe1\u017eo': 802, u'\xedvi': 801, u'c#r': 801, u's\u0165u': 800, u'ser': 800, u'se\u013e': 800, u'rkt': 799, u'agi': 799, u'eop': 799, u'ohn': 798, u'\xf3dy': 797, u'reo': 797, u'tac': 797, u'\xe1#\u0161': 795, u'd#f': 795, u'lku': 794, u'\xe9ra': 793, u'cas': 793, u'\u013en\xed': 793, u'#z\xf3': 793, u'v\xed\u0165': 793, u'idr': 792, u'nog': 791, u'#m\xfa': 791, u'\u0165#g': 790, u'p\xe1s': 790, u'onv': 789, u'emc': 789, u'\u017eok': 789, u'gam': 789, u'xti': 789, u'eg\xf3': 789, u'e\xfas': 789, u'\u013e#z': 788, u'or\u0148': 786, u'aur': 785, u'e\u010db': 785, u'h\u010di': 785, u'rta': 785, u'g\xf3r': 785, u'gf#': 784, u's#\u017e': 784, u'mst': 784, u'acr': 783, u'dic': 783, u'rmn': 779, u'ap\xed': 778, u'dv\xed': 777, u'j#\u010f': 776, u'ket': 776, u'l#l': 776, u'spu': 776, u'i\u0161i': 774, u'#o\u017e': 772, u'arb': 771, u'\xe4#z': 770, u'\u010de#': 770, u'lla': 770, u'v\xedd': 770, u'egf': 770, u'mku': 770, u'p#z': 770, u'ag\xe9': 769, u'\xfadu': 769, u'yb#': 768, u'fa#': 768, u'kv\xf4': 767, u'rbe': 766, u'jbl': 766, u'hon': 765, u'dus': 765, u'oek': 763, u'yhr': 763, u'pse': 762, u'wal': 762, u'ask': 762, u'od\u0148': 762, u'nt\xed': 762, u'mpo': 762, u'tte': 761, u'esy': 761, u'si\xe1': 760, u'\xe4z\u0148': 759, u'sny': 759, u'boz': 756, u'hie': 756, u'ezr': 754, u'#we': 753, u'cqu': 753, u'fir': 752, u'acq': 752, u'l\xe9n': 752, u'rei': 752, u'ivu': 751, u'ml\xe1': 751, u'sin': 750, u'\xedla': 749, u'ety': 748, u'ule': 748, u'#th': 748, u'aos': 747, u'r\u0148u': 747, u'pic': 747, u'ziv': 746, u'gue': 745, u'luk': 745, u'ov\u0148': 744, u'itv': 744, u'ikv': 744, u'tk\xe9': 743, u'dyk': 743, u't#\u010d': 741, u'r\u0161o': 741, u'i#w': 741, u'#sf': 740, u'r\xe1\u010d': 740, u'agn': 739, u'dex': 739, u'ail': 738, u'h\xe1v': 738, u'yra': 738, u'cet': 737, u'me\u0161': 737, u'lms': 737, u'\xed\u0161i': 736, u'n\xf3m': 736, u'ken': 736, u'un#': 736, u'on\xf3': 736, u'g#s': 736, u'#m\xed': 736, u'xne': 736, u'dun': 736, u'uta': 735, u'bab': 734, u'jch': 733, u'd\xeds': 733, u'lus': 732, u'ktm': 731, u'al\xfa': 729, u'i\u010ds': 729, u'kub': 728, u'yka': 728, u'l\xe1\u010d': 728, u'\u010dku': 727, u'\u017eku': 726, u'sfo': 726, u'kau': 725, u's\xedt': 725, u'vi\u010d': 725, u'a#\xed': 725, u'ri\xe9': 725, u'\u017e#i': 723, u'g\xe9d': 722, u'rih': 722, u'alm': 721, u'#ev': 721, u'eib': 720, u'the': 720, u'un\xfa': 719, u'z\xe1z': 719, u'a\u017eb': 719, u'ymp': 718, u'ubo': 718, u'yzd': 717, u'gui': 717, u'deo': 717, u'lyz': 717, u'h#g': 716, u'of#': 715, u'ozy': 714, u'di\u0161': 714, u'ukl': 713, u'kt\xe1': 713, u'enm': 713, u'du\xe1': 713, u'sci': 711, u'apu': 711, u'tl\xe1': 711, u'ar\xf3': 711, u'er\xe9': 710, u'\u0161k\xf4': 710, u'up\u0148': 710, u'\xedvo': 708, u'\u0148#j': 708, u't\xf3n': 708, u'\xf3ts': 708, u'loc': 707, u'gli': 707, u'ise': 706, u'\xe1pr': 705, u'tby': 704, u'eor': 704, u'\xedpu': 704, u'r#h': 704, u'car': 704, u'n#g': 703, u'j##': 702, u'\xf3de': 702, u'ar\xfd': 702, u'j\u010di': 702, u'fi#': 701, u'tti': 701, u'\u0161n\xfa': 701, u'l\u0161e': 701, u'rre': 701, u'az\xfd': 700, u'sim': 700, u'\xfact': 700, u'heu': 699, u'zus': 699, u't\xfa\u017e': 698, u'\u0165a\u0165': 698, u'udl': 698, u'b\xfd#': 698, u'ap\xe4': 697, u'at\xf3': 697, u'iom': 697, u'ch\u0161': 696, u'l\xedt': 696, u'ob\xfa': 695, u'ib#': 695, u'c\xedm': 695, u'tua': 695, u'mej': 694, u'kin': 694, u'a\u017e\u010f': 694, u'yb\u0148': 694, u'#c#': 692, u'att': 692, u'\xfamr': 691, u'dz\xed': 690, u's\u013e\xfa': 689, u'#\u0148u': 689, u'\u0161sk': 689, u'ub#': 688, u'\u010dty': 688, u'#tz': 688, u'lt#': 687, u'ong': 686, u'mpu': 686, u'sd#': 686, u'ud#': 686, u'\u0148#k': 685, u'\u0165om': 685, u'r#u': 684, u'\u017en\xfa': 684, u'\xe9ry': 683, u'imb': 683, u'krm': 682, u'k\xf3d': 682, u'\xfano': 681, u'bse': 681, u'flo': 681, u'\xe4#o': 679, u'eir': 678, u'lo\u0161': 678, u'\u010dst': 676, u'stk': 676, u'\xe4ta': 676, u'olv': 676, u'\xe4zu': 676, u'sso': 676, u'lhu': 674, u'ndr': 674, u'ids': 672, u'\xe1ho': 670, u'by\u010d': 670, u'\u0161#s': 669, u'\xf4\u017ei': 669, u'p\xf4\u017e': 669, u'nma': 669, u'll#': 668, u'v#\u010f': 668, u'iga': 668, u'r\u0161e': 667, u'dzn': 667, u'\u0161ku': 667, u'\u017e#l': 667, u'dur': 667, u'dln': 666, u'yte': 666, u'xe#': 664, u'dau': 663, u'vzo': 663, u'#fy': 662, u'm\xe1d': 662, u'd#\u0161': 662, u'\xf3nm': 661, u'\xe1sm': 661, u'zl\xe9': 659, u'olm': 659, u'#hm': 659, u'tzv': 658, u'\u0161mu': 658, u'\xe1\u0161m': 658, u'ulu': 658, u'aga': 657, u'eog': 657, u'\u017e#e': 657, u'zv#': 657, u'\u0161\u0161e': 656, u'iol': 656, u'aju': 656, u'cti': 656, u'ivl': 655, u'b\xfdc': 655, u'\xe1\u0165#': 654, u'op#': 654, u'ss#': 654, u'bis': 653, u'oga': 653, u'jr\xfd': 652, u'b\xfar': 651, u't\xe1b': 651, u'zdn': 651, u'\u0148#t': 651, u'veb': 651, u'vri': 651, u'auk': 650, u'u\u017e\u0161': 649, u'\u013a\u017ee': 648, u'gma': 648, u'lau': 648, u'un\xe1': 647, u'\u013eni': 647, u'p\xe1d': 647, u'\u013eal': 646, u'uin': 646, u'\xfdli': 646, u'wan': 645, u'ad\xe9': 645, u'\xe1ji': 645, u'ett': 644, u'\u010f#a': 644, u'ej\xfa': 643, u'ger': 642, u'xto': 642, u'jn\xfa': 642, u'erl': 641, u'#mr': 641, u'aus': 640, u'\u010du\u0165': 640, u'\u017ed\xfa': 640, u'mja': 639, u'tlo': 638, u't#i': 637, u'k#\u0161': 637, u'ibu': 637, u'ntm': 637, u'sse': 637, u'joc': 636, u'd#\xfa': 636, u'\xeddo': 635, u'viz': 635, u'v\xfdp': 635, u'#\xfac': 634, u'ab\xed': 634, u'\xe9mn': 633, u'uby': 633, u'lmo': 633, u'urz': 632, u'r\xed\u017e': 632, u'pca': 630, u'xn\xe9': 630, u'anb': 630, u'\xf4\u013eu': 628, u'av\xfa': 628, u'ict': 627, u'u\u010d\xed': 627, u'e\u0161l': 625, u'nuc': 625, u'r\u0148o': 624, u'jus': 624, u'ems': 622, u'#il': 622, u'dl\u017e': 622, u'yze': 621, u'gua': 620, u'jv\xfd': 620, u'cuk': 620, u'suv': 620, u'iez': 620, u'myc': 620, u'zif': 620, u'nts': 620, u'of\xe1': 619, u'd\u010da': 619, u'anz': 619, u'tlm': 619, u'#i\u0161': 618, u'r\u010dn': 618, u'iin': 617, u'jma': 616, u'\u013e#m': 616, u'\u013e#t': 616, u'\u0161al': 615, u'\u0148#d': 615, u'xn\xfd': 615, u'hma': 615, u'\u017e\u010fo': 614, u'c#\u010d': 613, u'zio': 613, u'a#w': 612, u'\u017esk': 612, u'c#i': 612, u'hys': 612, u'o#\u0165': 611, u'nea': 611, u'\xe1ku': 610, u'ty\u0161': 610, u'ikm': 610, u'u\u010f#': 609, u'sp\xfd': 608, u'pi#': 608, u'lz#': 607, u't\xe1d': 607, u'cud': 607, u'lle': 607, u'atm': 607, u'rdc': 607, u'yzi': 606, u'rso': 606, u'\u010dut': 606, u'\u013e#a': 605, u'zbo': 605, u's#g': 604, u'jar': 603, u'tsa': 602, u'\xe1pl': 602, u'zdy': 601, u'lh\xe9': 601, u'bej': 601, u'fyz': 601, u'\xe1zd': 600, u'e\u017ee': 600, u's\xe1l': 600, u'lb\xe1': 599, u'zbi': 599, u'con': 599, u'#b#': 598, u'ozc': 597, u'g#v': 596, u'ot\xfd': 596, u'cor': 595, u'#ox': 593, u'iru': 593, u'r\u0161u': 593, u'\u010f#k': 593, u'znu': 593, u'pa\u0148': 593, u'rbu': 592, u'nsf': 590, u'v\xfd\u017e': 589, u'\xfd\u017ei': 589, u'\xedzn': 588, u'vig': 588, u'hyt': 588, u'am\xfd': 587, u'at\u010f': 587, u'abw': 587, u't\u010f#': 587, u'\xfdzu': 586, u'dv\xe4': 586, u'c#u': 586, u'\xedha': 586, u'\u010f#d': 586, u'en\u017e': 586, u'\xf3nk': 585, u'ed\xf4': 585, u'rud': 585, u'\xed\u0161e': 584, u'rs#': 583, u'#\u0161\u0165': 583, u'azc': 581, u's#\u010f': 581, u'y\u0161s': 581, u'yk#': 581, u'#mj': 580, u'xid': 580, u'r#\u010d': 580, u'aug': 579, u'aky': 579, u'axe': 578, u'#\u0161o': 578, u'mrz': 576, u'u\u0161\xed': 576, u'#u\u0161': 576, u'dba': 575, u'\xedpo': 575, u'fl\xe1': 575, u'vk\xe1': 574, u'gyn': 574, u'lh\xfd': 574, u'mag': 573, u'emp': 571, u'tyk': 571, u'azs': 569, u'vzr': 569, u'ytk': 568, u'z#g': 568, u'\xfapa': 567, u'air': 567, u'u\u010do': 567, u'buc': 567, u'hul': 567, u'fre': 566, u'ib\xfa': 566, u'uov': 565, u'b\xe1z': 565, u'd#l': 565, u'yje': 565, u'\u0148an': 564, u'p#m': 564, u'erb': 563, u'efu': 563, u't\xe1\u0165': 562, u'\xe1la': 562, u'tmo': 561, u'\u013eba': 561, u'zic': 561, u'de\u010d': 561, u'p#j': 561, u'#cl': 560, u'ep\u013e': 560, u'iz\xed': 560, u'p\xed#': 559, u'nij': 559, u'mu\u010d': 558, u'uel': 558, u'\xfa\u017eb': 558, u'pa\u0165': 558, u'\u017ek\xfd': 557, u'\xeddu': 557, u'cat': 556, u'cos': 556, u'\u010f#o': 556, u'kta': 555, u'ofa': 554, u'ovr': 554, u'y\u017ei': 554, u'z\xedk': 554, u'v#\u0148': 554, u'p\u013eo': 552, u'n\xe1n': 552, u'\u017eaj': 552, u'\xfado': 552, u'zr\xe1': 551, u'osf': 550, u'evl': 550, u'uru': 550, u'rts': 550, u'yn#': 550, u'zco': 549, u'kho': 549, u'luh': 549, u'y\u0161k': 548, u'\xe9ns': 547, u'ylu': 547, u'#ky': 547, u'anl': 546, u'rap': 546, u'mex': 545, u'eum': 545, u'\u010f#i': 545, u'dca': 544, u'upk': 544, u'\u017e#c': 543, u'p\xe1\u010d': 543, u'viu': 542, u'od\u0161': 542, u'r\xedb': 542, u'\u0161#d': 541, u'bn\xed': 540, u'non': 540, u'\u010drt': 540, u'\xfabi': 540, u'at\u0161': 540, u'\xed\u0148a': 539, u'gy#': 539, u'\xf3ty': 539, u'\u010d\u0148u': 538, u'llo': 538, u'tl\xed': 538, u'eo#': 538, u'\xe1zy': 537, u'c\xe9n': 537, u'uk#': 536, u'uly': 536, u'sc\xe9': 535, u'\u010fuj': 535, u'vop': 535, u'cb#': 535, u'cin': 534, u'e\u010fo': 534, u'ceg': 533, u'xic': 533, u'vca': 533, u'azv': 532, u'\xe1\u010di': 532, u'#nl': 532, u'ka\u0161': 532, u'we#': 531, u'agm': 531, u'lef': 531, u'\xfdh\u013e': 531, u'\u013e#b': 530, u'usu': 530, u'zlu': 529, u'l\xf3z': 528, u'\u013eka': 528, u'\xf3po': 528, u'n#\u017e': 528, u't\xedd': 527, u'\u010d#s': 527, u'alu': 527, u'\u0161ad': 526, u'\xe4tn': 526, u'une': 525, u'vln': 525, u'pky': 524, u'lmi': 523, u'\u017e#h': 523, u'\xe1ls': 523, u'iep': 523, u'jzr': 522, u'mra': 522, u'p\xe1v': 522, u'#sw': 521, u'zla': 521, u'irm': 520, u'loe': 519, u'ugu': 517, u'r\u010do': 517, u'eki': 517, u'lyt': 517, u'd\u0161k': 516, u'ytr': 516, u'rvi': 516, u'#t#': 516, u'ehr': 515, u'\u0161kr': 515, u'cco': 514, u'\u013e#k': 513, u's\u0148o': 513, u'\xe4#m': 512, u'epe': 512, u'tfo': 511, u'xpe': 511, u'de\xe1': 510, u'd\u013eo': 510, u'\xf4l#': 509, u'rda': 509, u'pid': 509, u't\xedh': 508, u'lta': 508, u'p\xf4j': 508, u'#\u017ea': 508, u'jvz': 507, u'eh\xf4': 507, u'atf': 507, u'b\xedj': 507, u'l#f': 507, u'c#\u017e': 506, u'bot': 506, u'sac': 506, u'et\u013e': 506, u'es\u0148': 506, u'\xedlo': 505, u'eb\xe1': 505, u'acm': 503, u'af#': 503, u'iot': 503, u'meh': 502, u'zi\u010d': 502, u'f\xedn': 502, u'eji': 502, u'b\xfa#': 501, u'mr\u0165': 501, u'rt\xe9': 501, u'lap': 501, u'h#\u010f': 501, u'v\u0161\xed': 500, u'\xe4zb': 500, u'hry': 499, u'zl\xfd': 499, u'bes': 499, u'#gi': 498, u'krv': 498, u'uan': 498, u'pl\xe9': 498, u'oct': 498, u'prs': 498, u'hei': 497, u'k#\u013e': 497, u'ipa': 497, u'tvy': 496, u'chk': 496, u'ker': 495, u'tch': 495, u'tr\xf6': 495, u'r\xf6m': 495, u'erh': 494, u'omr': 494, u'\u013eoc': 494, u'\xedni': 493, u'rz\xed': 492, u'y\u010de': 492, u'kob': 492, u'hin': 492, u'y\u0148o': 492, u'nz\xe1': 491, u'nzo': 490, u'eat': 490, u'sn\xed': 490, u'#r\xfa': 490, u't\xfdl': 490, u'bwe': 489, u'#pn': 488, u'laf': 488, u'zn\xfa': 487, u'kep': 486, u'\xfa#\u010f': 486, u'okh': 486, u'avc': 486, u'l\u010da': 485, u'jna': 485, u'tio': 485, u'd\xfdn': 484, u'luo': 484, u'nd\xfd': 484, u'\u013ean': 483, u'ea#': 483, u'd\u017ea': 483, u'##\u0161': 483, u'lux': 483, u'bun': 483, u'ar\xe9': 483, u'o#\xed': 482, u'\xe9#\u010f': 482, u'ecy': 482, u'\u013a\u017ek': 481, u'h\xe1m': 481, u'vdo': 480, u'ns#': 479, u'ecb': 479, u'pez': 479, u'\u0161#z': 478, u'er\u0148': 478, u'bka': 478, u'eup': 478, u'zm\xfd': 478, u'sce': 477, u'z\u010fa': 477, u'un\xed': 477, u'p#b': 477, u'#vk': 476, u'evr': 475, u'\u017eke': 475, u'\xe1hl': 475, u'y\u010dn': 473, u'vci': 473, u'dvt': 472, u'#r\xed': 472, u'zmr': 472, u'#d\u013a': 472, u'\u0148at': 471, u'##g': 471, u'\u017emi': 471, u'ar\xfa': 471, u'rz\xe1': 470, u'ts#': 470, u'ed\u013e': 470, u'\xedvc': 470, u'e\xfa\u010d': 470, u'rt\xe1': 469, u'\u013eou': 469, u'u\u017em': 469, u'pij': 469, u'of\xf3': 468, u'ep\xe1': 468, u'uob': 467, u'a\u0148u': 467, u'\xfabe': 466, u'#mc': 466, u'ztr': 466, u'set': 466, u'\u013e#d': 466, u'auh': 465, u'ah\xe1': 465, u'dda': 465, u'#h\xe1': 465, u'\xe4sa': 464, u'\xf6mo': 464, u'\xe1po': 464, u'il\xe9': 464, u'\xe9#\u013e': 463, u'eh\u013a': 462, u'jam': 461, u'#f\xed': 461, u'roh': 461, u'\xedd\u013e': 461, u'ipi': 461, u'rpr': 460, u'f#a': 460, u'\xfdzy': 459, u'\xedru': 459, u'\xfdno': 459, u'o\u0161n': 459, u'\u010dok': 459, u'bsu': 458, u'p\xf4r': 458, u'ej\u0148': 458, u'\u010f#e': 458, u'ut#': 458, u'gus': 457, u'iv#': 456, u'a\u0161n': 456, u'lyc': 456, u'sou': 455, u'ah\u0161': 455, u'm\xe1v': 455, u'\xe1tl': 455, u'tum': 455, u'\xe1\u0148a': 455, u'\xe9re': 454, u'vkl': 454, u'\xfa\u0161k': 454, u'\xf4ro': 454, u'tir': 454, u'yzo': 453, u'f\xf3n': 453, u'um\xe1': 453, u'\xedcr': 453, u'\xf3da': 453, u'\u017ebe': 452, u'p\xed\u0161': 452, u'\xe9#g': 452, u'b#m': 452, u'smo': 451, u'kie': 450, u'a\u0161\u0165': 450, u'ojv': 449, u'r\xfa\u0161': 449, u'#oe': 447, u'irk': 447, u'kil': 447, u'g#m': 447, u'haz': 446, u'mf#': 446, u'krt': 445, u'l\xe1k': 445, u'ev\xe4': 444, u'r\u017e\xed': 444, u'r\u0165#': 443, u'iag': 443, u'\xedho': 443, u'\xe1ma': 442, u'diz': 442, u'nny': 442, u'\xfatv': 441, u'ezu': 441, u'nob': 441, u'#s\xe9': 441, u'oh\xe1': 441, u'otc': 441, u'rbo': 440, u'i\xe1t': 440, u'##\xfa': 440, u't\xe1\u017e': 439, u'gl#': 439, u'\xedmu': 438, u'\xe4#a': 438, u'n\xe9t': 438, u'ock': 438, u'sco': 437, u'jvo': 437, u'\xe4t\xe1': 437, u'o#w': 437, u'roa': 437, u'b\xfdm': 437, u'gol': 436, u'\u010f#u': 436, u'k\xf3t': 436, u'ufi': 435, u'eos': 435, u'ca\u0165': 435, u'\u010fou': 435, u'\xe9nn': 434, u'sl#': 434, u'pmi': 434, u'\u013eam': 433, u'ozk': 433, u'rp\xed': 433, u'luf': 432, u'n\xfac': 432, u'vke': 431, u'cr#': 431, u'\xfd##': 431, u'm\xedm': 431, u'\u0155tv': 431, u'm\u0155t': 431, u'gna': 431, u's\xe9r': 430, u'\u0161#m': 430, u'web': 430, u'\xe4#k': 430, u'cla': 430, u'hk\xe9': 430, u'ddi': 430, u'tip': 430, u'rzn': 429, u'ivn': 429, u'bg#': 429, u'#j#': 429, u'om\xe9': 429, u'sif': 429, u'ac\xfa': 429, u'h\u0161\xed': 429, u'zbl': 429, u'zrk': 428, u'dcu': 428, u'\u0161ky': 428, u'eoc': 428, u'l\xedd': 428, u'bni': 427, u'\u0148#r': 427, u'gi\xe1': 427, u'\u010di\u017e': 427, u'#m\u0155': 427, u'zm\xe4': 427, u'kel': 426, u'l#\u0161': 426, u'\xeddy': 426, u'i\u017ee': 426, u'cn\xe1': 426, u'or\xf4': 425, u'r\xf4b': 425, u'\xfdpo': 425, u'dte': 425, u'uv\xe4': 424, u'mbe': 423, u'tr\u017e': 423, u'oul': 423, u'itr': 423, u'ssa': 423, u'puk': 422, u'eud': 422, u'oob': 422, u'azm': 421, u'ceu': 421, u'dce': 421, u'\u017e#\u010d': 421, u'o#\u0148': 421, u'bvo': 420, u'svi': 420, u'#eh': 420, u'mmf': 420, u'fe#': 420, u'e\xe1n': 419, u'r\u010du': 419, u'pl\xed': 419, u'wsk': 419, u'v\xe4\u0165': 418, u'\u010dam': 418, u'#b\xfa': 418, u'miv': 417, u'k\xf4l': 417, u'tap': 417, u'#ai': 416, u'tco': 416, u'o\u0161e': 416, u'luz': 416, u'yi#': 415, u'\xed#g': 415, u'es\xe1': 415, u'ce\xe1': 414, u'b#d': 414, u'b#t': 414, u'dei': 414, u'tid': 414, u'b\u0148u': 413, u'k\xfdk': 413, u'eh#': 413, u'\xfazi': 413, u'#mm': 413, u'yh\xfd': 412, u'fen': 412, u'\u010dk\xe1': 412, u'mny': 411, u'jac': 411, u'yn\xfa': 411, u'iri': 410, u'b#j': 410, u'ott': 410, u'usy': 409, u'd\xe1l': 409, u'er\xfa': 408, u'dci': 408, u'\xf3mi': 408, u'ml\u010d': 408, u'hyn': 408, u'b#e': 408, u'\xf3ta': 408, u'p\u0148o': 407, u'ne\u013e': 407, u'r\xe9n': 407, u'uh\xfa': 407, u'\u017e\u0161e': 406, u'\xfa#g': 406, u'\u013eky': 406, u'tha': 406, u'n\u010da': 406, u'h\u010de': 405, u'\xfahr': 405, u'ci\u0165': 404, u'nk\xe1': 404, u'm\xfdl': 404, u'\u0161#k': 403, u'f\xf3b': 403, u'ovc': 403, u'tox': 403, u'shi': 403, u'pa\u013e': 403, u'ysp': 403, u'c#\u013e': 402, u'\u010dad': 402, u'\u0165de': 402, u'\u0161k\xf3': 402, u'ype': 402, u'yp#': 402, u'oel': 401, u'b#b': 401, u'a\u017e\u0161': 401, u'\xe9ru': 400, u'hed': 400, u'ei#': 400, u'rd\xe1': 400, u'\xedco': 400, u'ydi': 399, u'zl\xfa': 399, u'vdi': 398, u'xem': 398, u'\xfali': 397, u'e\u0161a': 397, u'rt\xed': 397, u'\u017eme': 397, u'r#i': 397, u'usd': 397, u'itt': 396, u'okv': 396, u'cni': 396, u'j\xfa\u010d': 396, u'evh': 395, u'c#c': 394, u'alz': 394, u'sa\u010d': 394, u'j\u010de': 394, u'sot': 393, u'udc': 393, u's\xfdr': 393, u'xik': 393, u'p\u0161u': 393, u'itk': 393, u'b\xfac': 392, u'dzr': 392, u'lop': 392, u'ag\xe1': 392, u'pni': 391, u'oak': 391, u'dsi': 391, u'fos': 391, u'ypi': 391, u'bau': 391, u'l\xeds': 391, u'\u0148#u': 390, u'\xf4de': 390, u'#z\u010f': 390, u'ahm': 390, u'#m\xfd': 390, u'has': 389, u't#l': 389, u'j\u0148o': 389, u'm\xfar': 389, u'cn\xfa': 389, u'\u013e#j': 388, u'ihy': 388, u'bmi': 388, u'rby': 387, u'l\xe9g': 387, u'hn#': 386, u'dii': 386, u'nna': 386, u'dl\u0148': 385, u'hob': 385, u'a\u010d#': 385, u're\u010f': 385, u'uk\u010d': 384, u'em\xfd': 384, u'tto': 384, u'seu': 384, u'\xf4js': 384, u'\xf4\u013ea': 384, u'ucc': 383, u'zca': 383, u'ym\xe1': 383, u'\u017edn': 383, u'#ue': 383, u'f\xe1l': 383, u'\xf4du': 382, u'm\xedr': 382, u'\u010fme': 382, u'ct#': 382, u'nip': 382, u'ecr': 382, u'e\u010d#': 381, u'hut': 381, u'\u0148##': 380, u'uno': 380, u'utv': 380, u'buz': 380, u'loo': 379, u'vi\u0148': 379, u'b\xe1#': 379, u's#\u0148': 379, u'p#o': 379, u'uke': 378, u'ye#': 378, u'yky': 378, u'b#r': 378, u'n#\u0161': 378, u'lnk': 378, u'fan': 378, u'\u017enu': 378, u'tv\xe9': 377, u'm#\xed': 377, u'd\xedk': 377, u'r#f': 377, u'cmi': 376, u'j#\xe1': 376, u'zst': 376, u'\xed\u013eu': 376, u'edr': 376, u'oll': 376, u'\u017eas': 376, u're\u0148': 376, u'p#t': 376, u'e#w': 375, u'on\u017e': 374, u'eim': 374, u'psy': 374, u'id#': 374, u'\xfdkl': 374, u'm\xfad': 373, u'\u0161le': 373, u'ab\xe9': 373, u'\xf3d#': 373, u'h\xfdl': 373, u'jer': 372, u'i\u0161o': 372, u'n\u017es': 372, u'teo': 372, u'ri\xf3': 372, u'kj\xf3': 371, u'c\xfav': 371, u'j\xf3t': 371, u'io#': 371, u'\xfak#': 370, u'uxe': 370, u'\xedn\u010d': 369, u'\xed\u013eo': 369, u'ku\u0161': 369, u'g\xe9n': 369, u'koa': 369, u'dub': 369, u'evu': 368, u'\u0161va': 368, u'\u013e#o': 367, u'oms': 367, u'tub': 367, u'jun': 367, u'brz': 366, u'y#\u010f': 366, u'#o\u0161': 365, u'cir': 365, u'eoh': 365, u'\u017ena': 365, u'aiw': 364, u'puy': 364, u'\u010dny': 364, u'syc': 364, u'cta': 364, u'oca': 364, u'#wi': 363, u'urk': 363, u'rd\xfd': 363, u'iwa': 363, u'r\xfac': 363, u'ef\xf3': 363, u'ojk': 362, u'\u0165mi': 362, u'b\u0161i': 362, u'dr\xed': 362, u'oaf': 362, u'm\xed\u0148': 361, u'yt\xfd': 361, u'ioa': 361, u'v\xe4t': 360, u'gab': 360, u'#kj': 359, u'rzd': 359, u'mau': 359, u'tta': 359, u'ptu': 359, u'byi': 359, u'jm\xfa': 358, u'#s\xfd': 358, u'ogo': 358, u'\xe4so': 357, u'gel': 357, u'\xfdza': 356, u'jro': 356, u'ya#': 356, u'aul': 356, u'eld': 356, u'urs': 356, u'\xfa\u017ea': 356, u'nkl': 355, u'#sz': 355, u'\u017eob': 355, u'mih': 355, u'sop': 354, u'sr\xed': 354, u'om\xfd': 354, u'viv': 354, u'zpl': 354, u'n#w': 354, u'cvi': 353, u'a\u010fo': 353, u'ub\xe9': 353, u'dap': 353, u'ez\xfa': 352, u'gor': 352, u'bp#': 352, u'jz\xe1': 351, u'gne': 351, u'gur': 351, u'\xf3ra': 351, u'ld#': 351, u'd\u017ei': 351, u'lh\xfa': 351, u'\u0161t\xfd': 351, u'a\u010dr': 351, u'ab\u0161': 351, u'rv\xfa': 351, u'\xed#\u010f': 350, u'gy\u0148': 350, u'ath': 350, u'\u017eli': 350, u'tig': 350, u'bku': 349, u'\xfdlo': 349, u'ege': 349, u'dup': 349, u'sor': 348, u'\xe1dl': 348, u'ge#': 348, u'\xedln': 348, u'\u010d#a': 348, u'gri': 348, u'sv\xe4': 347, u'uba': 347, u'e\u0165m': 347, u'lto': 347, u'b\u0148o': 347, u'xu#': 347, u'zu\xe1': 347, u'z\xfaf': 347, u'ead': 346, u'ey#': 346, u'\u013e#e': 345, u'aid': 345, u'lky': 345, u'd\u010d\xed': 345, u'av\u017e': 345, u'tba': 344, u'\xe1\u010do': 344, u'\xe4#d': 344, u'p\xe1l': 344, u't#\xfa': 343, u'#h\u013a': 343, u'deb': 343, u'rm\xe9': 343, u'\xe1rm': 343, u'hri': 342, u'eou': 342, u'#km': 342, u'r#\xfa': 342, u'ev\u010d': 341, u'\u013abk': 341, u'utk': 341, u'oam': 340, u'rp\xe1': 340, u'zef': 340, u'vy\xfa': 340, u'upa': 340, u'a\u0148a': 339, u'\u017edi': 339, u'uge': 338, u'\xf3ru': 338, u'c\xedn': 338, u'\xe4#\u010d': 338, u'ga\u010d': 338, u'hs#': 338, u'd#g': 338, u'p#e': 338, u'b#\u017e': 337, u'put': 337, u'zue': 337, u'u#\u0165': 336, u'eot': 336, u'rig': 336, u'\xfame': 335, u'zd#': 335, u'tod': 335, u'g#p': 335, u'\xf4sm': 335, u'yt\xe1': 334, u'ssi': 334, u'ha\u013e': 333, u'mre': 333, u'vy\u0148': 333, u'xn\xfa': 333, u'\u013a\u017ei': 332, u'm\xfd#': 332, u'cto': 332, u'naf': 332, u'ab\xfd': 332, u'iki': 332, u'yve': 331, u'da\u017e': 331, u'jnu': 331, u'euz': 331, u'jd\u017e': 331, u'deg': 331, u'yma': 330, u'm\xe1n': 330, u'ed\u017e': 330, u'rze': 329, u'\xe1#g': 329, u'eam': 329, u'bow': 329, u'gin': 329, u'lmu': 329, u'ows': 329, u'lvi': 329, u'h\xe1j': 328, u'mym': 328, u'olk': 328, u'ujt': 328, u'#\xf4s': 328, u'y\xfas': 327, u'i\u0148o': 327, u'\u010f#r': 327, u'nai': 327, u'f#v': 327, u'vav': 327, u'suc': 326, u'myl': 326, u'h\xe9m': 325, u'#ih': 325, u'ihe': 325, u'chm': 324, u'rli': 324, u'rif': 324, u'rri': 324, u'vlo': 323, u'mid': 323, u'mum': 323, u'\xfdsa': 323, u'\u010d#p': 323, u'pr\xfa': 322, u'\xfdri': 322, u'dke': 321, u'n\xedt': 321, u'lb\u0161': 321, u'f\xfar': 320, u'vro': 320, u'uss': 320, u'zot': 319, u'\xe1dk': 319, u'ijn': 318, u'hyp': 318, u'dvr': 317, u'mep': 317, u'ia\u010f': 317, u'k\xf4d': 317, u'dt#': 317, u'r#l': 317, u'oss': 316, u'ch\xfa': 316, u'pko': 316, u'g#n': 316, u'rf\xfa': 316, u'ep\xfa': 316, u'ilm': 316, u'onl': 315, u'\u0161#\u010d': 315, u'zhe': 315, u'g#o': 315, u'sup': 315, u'sau': 315, u'lzh': 314, u'elc': 314, u'#dh': 314, u'\xeds\u013e': 314, u'cal': 313, u'dox': 313, u'\xf3bi': 313, u'siz': 313, u'\xedp#': 313, u'go#': 313, u'ad\u013e': 312, u'pea': 312, u'ims': 311, u'utl': 311, u'c#h': 310, u'i#\xed': 310, u'erd': 310, u'\xf3ni': 309, u'akv': 309, u'\xfatl': 308, u'yan': 308, u'rgo': 308, u'b\xe1d': 308, u'run': 308, u'p#u': 308, u't#\u013e': 307, u'\xf3go': 307, u'\u010d#v': 307, u'pa\u0161': 307, u'rvy': 307, u'tag': 307, u'nr#': 306, u'z#\u017e': 306, u'eug': 306, u'b\u0161\xed': 305, u'\u013evy': 305, u'new': 305, u'e\u013ev': 305, u'\xe1rf': 305, u'fit': 304, u'pnr': 304, u'kas': 304, u'ges': 304, u'iha': 304, u'og#': 304, u'sca': 303, u'vob': 303, u'dto': 303, u'\u017eou': 303, u'zd\u013a': 303, u'ndd': 303, u'yn\xe1': 303, u'k\xe1k': 302, u'ngr': 302, u'k\xfak': 302, u'z\xe1b': 302, u'up\xe1': 302, u'j\u010da': 302, u'g#k': 301, u'hci': 301, u'hp#': 301, u'mug': 300, u'\xeddr': 300, u'aln': 300, u'nbe': 300, u'rra': 300, u'zof': 299, u'\xfdzi': 299, u'\xe4\u0165d': 299, u'jev': 299, u'jea': 299, u'hre': 299, u'ap\u013a': 299, u'cot': 299, u'x#a': 299, u'uts': 299, u'r\xfad': 299, u'ovt': 298, u'jos': 298, u'ump': 298, u'rhe': 298, u'f#p': 298, u'\u010dbu': 298, u'av\xe4': 298, u'ps#': 297, u'\xe1h#': 297, u'elm': 297, u'\xe9#\u0165': 297, u'r\xfak': 297, u't\xeds': 296, u'tso': 296, u'atc': 296, u'ses': 296, u'hiv': 296, u'h\xe1c': 295, u'nof': 295, u'e#\xe1': 295, u'ukn': 294, u'#ct': 294, u'\xe9ni': 294, u'i\u0165u': 294, u'kab': 294, u'upm': 294, u'kv\xe1': 293, u'i\u0161t': 293, u'omk': 293, u'dac': 293, u'p#r': 293, u'\xfata': 292, u'h\xe1\u0148': 292, u'\u0161\u0161o': 292, u'\xfane': 292, u'gik': 292, u'ol\xe9': 292, u'is\u013e': 292, u'lci': 290, u'dk\xe1': 290, u'\xe1sk': 290, u'#cr': 289, u'\u010d\xed\u0161': 289, u'miu': 289, u'add': 289, u'evs': 289, u'r\xedh': 289, u'tus': 289, u'ry\u0165': 289, u'fu#': 289, u'#cs': 288, u'ife': 288, u'bca': 288, u'ip\xe1': 287, u'yh\u013e': 287, u'se\u0148': 287, u'ko\u0161': 287, u'\u010dke': 287, u'\xed\u0161n': 286, u'n\xf3z': 286, u'ozt': 286, u'rcu': 286, u'imy': 286, u'siv': 286, u'sm\xed': 286, u'vmi': 286, u'#k\xe1': 286, u'eac': 285, u'iml': 285, u'\xfdmk': 285, u'ak\xed': 285, u'\xedpy': 285, u'opc': 285, u't\u013eu': 285, u'ezt': 284, u'no\u010d': 284, u'ehs': 284, u'ft#': 284, u'\u017ein': 284, u'r\xe1p': 284, u'dki': 284, u'lve': 284, u'hes': 283, u'was': 283, u'\xf3nn': 283, u't\xf3m': 283, u'rys': 283, u'nid': 283, u'igu': 283, u'loz': 282, u'#s\u010d': 282, u'#wo': 282, u'\xe1\u017ed': 282, u'job': 282, u'nig': 282, u'ew#': 282, u'\u0161#r': 281, u'h\xe1z': 281, u'alg': 281, u'v\xedr': 281, u'v#\u0165': 281, u'ygi': 281, u'hyg': 281, u'\u0161am': 280, u'dby': 280, u'hab': 280, u'lh#': 280, u'tyc': 280, u'gn\xf3': 279, u'kun': 279, u'\u013eku': 279, u'bi\u010d': 279, u'ree': 279, u'#lt': 278, u'cid': 277, u'dys': 277, u'kir': 276, u'd\u017eo': 276, u'\u010d#i': 276, u'ypy': 276, u'\xf3pi': 276, u'ul\xf3': 276, u'gle': 276, u'wat': 275, u'pn\xfa': 275, u'uer': 275, u'\xedbe': 274, u'rha': 274, u'swi': 274, u'hao': 273, u'omm': 273, u't#g': 272, u'ad\xfa': 272, u'\xfa#\u0165': 271, u'c#\xfa': 271, u'\xfark': 271, u'iis': 271, u'row': 271, u'xen': 271, u'oom': 271, u'#\xe9r': 270, u'adb': 270, u'\u010dmi': 270, u'r\xfat': 270, u'\u010dby': 270, u'zr\xfd': 269, u'hmo': 269, u'ehk': 269, u's#\u0161': 269, u'z\xe1n': 269, u'cte': 269, u'zy\u010d': 269, u'\u0148me': 268, u'z\xe1t': 268, u'usc': 268, u'#xe': 268, u'tho': 268, u'i\u0161u': 267, u'dd\xe1': 267, u'#yo': 267, u'\u013abi': 267, u'ldn': 266, u'iii': 266, u'ip#': 266, u'cof': 266, u'rui': 266, u'ull': 266, u'mcc': 266, u'rza': 265, u'ez\xed': 265, u'lo\u010f': 265, u'aps': 265, u'saj': 265, u'na\u0148': 265, u'ut\xfa': 265, u'd\u013eu': 265, u'#p\xfa': 265, u'db\xe1': 264, u's\xe1d': 264, u'th#': 264, u'x#s': 264, u'vby': 264, u'iss': 264, u'p##': 264, u'\u017e\u0148o': 263, u'n\xf3r': 263, u'maa': 263, u'uit': 263, u'fa\u0165': 263, u'r#\u0161': 263, u'cre': 262, u'mai': 262, u'ctu': 262, u'zij': 262, u'dtl': 262, u'lip': 262, u'dez': 262, u'bye': 262, u'ngt': 261, u'auz': 261, u'xii': 261, u'ua#': 261, u'map': 260, u'rg#': 260, u'sea': 260, u'\xfdle': 260, u'\xe1#\u013e': 259, u'l\xfaz': 259, u'c#f': 259, u'orl': 259, u'gto': 259, u'h#\u0165': 259, u'zi\u0161': 259, u'ecu': 259, u'paj': 259, u'c\xfan': 258, u'umb': 258, u'\xf3rs': 258, u'ymu': 258, u'wif': 258, u'ov\xe4': 258, u'rdu': 258, u'op\xfa': 258, u'r\xedc': 257, u'\u013e##': 256, u'gau': 256, u'jor': 256, u'\xf3du': 256, u'fia': 255, u'urb': 255, u'#n\xf3': 255, u'yli': 255, u'n\xedn': 255, u'eca': 255, u'bad': 255, u'km#': 255, u'yjs': 255, u't#f': 254, u'a#\xe1': 254, u'ift': 254, u'unt': 254, u'bdi': 254, u'abc': 254, u'og\xe9': 254, u'zb\xe1': 254, u'lso': 253, u'z#\u010f': 253, u'fuk': 253, u'am\xe9': 252, u'zpa': 252, u'ekd': 252, u'yov': 252, u'mop': 252, u'#d\u017e': 252, u'jvh': 252, u'aas': 251, u'ub\xe1': 251, u'\xfady': 251, u'avl': 251, u'cap': 250, u'lei': 250, u'lai': 250, u'jdu': 250, u'tma': 250, u's\u0148u': 249, u'e\u010fu': 249, u'fou': 249, u'cty': 249, u'et\xe9': 249, u'hyd': 249, u'fac': 249, u'hir': 249, u'\xfaty': 248, u'\xe9nu': 248, u'nch': 248, u'nss': 248, u'rl\xed': 248, u'vrs': 248, u'\xe1zs': 247, u'\u0165#\u0165': 247, u'eu#': 247, u'ck#': 247, u'yor': 247, u'id\xfa': 247, u'vbu': 247, u'uco': 246, u'\xfdne': 246, u'u\u013ek': 246, u'f#s': 246, u'uy#': 246, u't\xfd\u010d': 246, u'eye': 245, u'nba': 245, u'hte': 245, u'uty': 245, u'ty\u010d': 245, u'r\xed\u0165': 245, u'het': 244, u'dzm': 244, u'\u010dek': 244, u'h\u010d\xed': 244, u'dhp': 244, u'ip\xed': 244, u'ys\xed': 244, u'\xe1zu': 243, u'ofo': 243, u'\u0148#i': 243, u'ykr': 243, u'fut': 243, u'\xe9no': 242, u'ip\xfa': 242, u'eas': 242, u'oal': 242, u'hk\xfd': 242, u'eo\u010d': 242, u'bed': 242, u'sha': 242, u'\u0161ik': 242, u'jur': 242, u'efr': 242, u'giz': 241, u'l\xf3n': 241, u'fie': 240, u'nza': 240, u'\xe4dz': 240, u'v\xe4d': 240, u'ia\u017e': 240, u'za\u0161': 240, u'li\xed': 240, u'lum': 240, u'lun': 240, u'pi\u010d': 240, u'nbu': 240, u'v\u010d#': 239, u'v\xf4d': 239, u'\xedmo': 239, u'rkm': 239, u'\xe9ty': 239, u'biv': 239, u'\xe1nc': 239, u'hus': 239, u'ize': 239, u'noe': 238, u'pta': 238, u'ndm': 238, u'coe': 238, u'\xe1nn': 238, u'bvy': 237, u'elh': 237, u'ro\xfa': 237, u'l#\u010f': 237, u'ecd': 237, u'rey': 237, u'bu\u013e': 237, u'\u0148#c': 236, u'v\xe1k': 236, u'yh\xe1': 236, u'pim': 236, u'nn#': 236, u'ir#': 235, u'\u0148a\u017e': 235, u'\u0148#h': 235, u'sv\u010d': 235, u'\xe4#r': 235, u'gas': 235, u'hoa': 235, u'acc': 235, u'e\u017ea': 234, u'yln': 234, u'\xf3mn': 234, u'ikc': 234, u'nu\u017e': 234, u'os\xe1': 233, u'ezc': 233, u'#eb': 233, u'vik': 233, u'dhl': 233, u'g\xe9r': 233, u'xty': 232, u'v\u010di': 232, u's\xe1#': 232, u'\u010f#\u010d': 232, u'ao#': 232, u'\u0161it': 232, u'dmy': 232, u'#ly': 232, u'fig': 231, u'oj\u0161': 231, u'\u0165me': 231, u'tv\xfd': 231, u'sf#': 231, u'utb': 231, u'ax#': 231, u'l#g': 231, u'\xedky': 231, u'\xfdbu': 230, u'dv\xfd': 230, u'urt': 230, u't\u0161\xed': 230, u'km\xe9': 230, u'y\u0161n': 230, u'sez': 230, u'baz': 230, u'r\xedv': 230, u'pr#': 230, u'yvy': 229, u'h\u010du': 229, u'm\xedv': 228, u'igo': 228, u'e\xfan': 228, u'tet': 228, u'\xe1vc': 228, u'ofu': 227, u'xpo': 227, u'maz': 227, u'jkr': 227, u'bah': 227, u'#sd': 227, u'\xf3zy': 226, u'\u0148#e': 226, u'\u017ek\xe1': 226, u'ozg': 226, u'rar': 226, u'alv': 226, u'rtm': 226, u'\u010f#c': 226, u'lme': 226, u'\u0148#\u010d': 225, u'adt': 225, u'l\u017ee': 225, u'#r\xe9': 225, u'n\xe9r': 225, u'id\xe9': 225, u'jl\xe1': 225, u'a\u017el': 225, u'z\xfar': 225, u'hit': 225, u'fst': 224, u'ejo': 224, u'ojr': 223, u'hr\xed': 223, u'\xedms': 223, u'#ik': 223, u'v\xeds': 223, u'\xfakl': 223, u'swo': 223, u'byj': 223, u'muk': 222, u'pts': 222, u'cd#': 222, u'ouh': 222, u'wob': 222, u'dul': 222, u'\u017ebo': 221, u'yvr': 221, u'a\u010fu': 221, u'dog': 221, u'kip': 221, u'\u010d#z': 221, u'luc': 221, u't\u013eo': 221, u'\u017e#\u0161': 221, u'rms': 221, u'\xedbu': 220, u'ydl': 220, u'ig\xe9': 220, u'ni\xe1': 220, u'\u017eid': 220, u'a\u017em': 220, u'hil': 220, u'fis': 219, u'mnu': 219, u'hrb': 219, u'boe': 219, u'vas': 219, u'hts': 219, u't#\u0161': 218, u'cis': 218, u'\xe1#\u010f': 218, u'ogn': 218, u'zl\xe1': 218, u'que': 218, u'apt': 218, u'o\u0161i': 218, u'oxn': 218, u'\u010dti': 218, u'e\u0148m': 218, u'o\xfav': 217, u'p\xfat': 217, u'gme': 217, u'esf': 217, u'dag': 217, u'rau': 217, u'nfi': 217, u'hac': 216, u'\u0161#o': 216, u'mrh': 216, u'\xe1ut': 216, u'#zu': 216, u'#\xe1u': 216, u'#\u010dr': 216, u'##\u010f': 216, u'ez\xf3': 215, u'e\u010da': 215, u'dk\xfd': 215, u'tkl': 215, u'\xedse': 215, u'd\xe1f': 215, u'lka': 214, u'r\u017eb': 214, u'rlo': 214, u'jpo': 214, u'enb': 214, u'drv': 213, u'exa': 213, u'ley': 213, u'rds': 213, u'od\u010d': 213, u'\u017e#\xfa': 213, u'tei': 213, u'\u010fte': 213, u'#gm': 212, u'\u013e#r': 212, u'oec': 212, u'\u013aha': 212, u'f#k': 212, u'd\u013ah': 212, u'\xfapn': 211, u'\xedrn': 211, u'uze': 211, u'tj#': 211, u'\u013abe': 211, u'tog': 211, u'app': 211, u'il\xfa': 211, u'utt': 211, u'bac': 211, u'#\u0165i': 211, u'ocu': 211, u'haa': 210, u'rzo': 210, u'lol': 210, u'ymo': 210, u'ra\u010f': 210, u'n\u0161e': 210, u'ouo': 210, u'thi': 210, u'ek\xfa': 210, u'\xe1sn': 210, u'f\xe1m': 210, u'osm': 209, u'orf': 209, u'a\u013ek': 209, u'#qu': 209, u'#gb': 208, u'pso': 208, u'do\u017e': 208, u'\xfa\u013ea': 208, u'orr': 208, u'ubn': 208, u'lh\xe1': 208, u'saa': 208, u'fla': 208, u'fot': 207, u'\xfd#\u013e': 207, u'seg': 207, u'coh': 207, u'lii': 207, u'isd': 207, u'rkv': 206, u'zet': 206, u'l\xed\u010d': 206, u't\xedl': 205, u'snu': 205, u'oet': 205, u'chs': 205, u'nus': 205, u'rai': 205, u'm#\u0165': 204, u'pcu': 204, u'gla': 204, u'lag': 204, u'\xedpk': 204, u'b\xedt': 204, u'\xe1f\xed': 204, u'\xe1r\u0148': 204, u'avm': 204, u'\xfach': 203, u'\xe1mu': 203, u'urc': 203, u'#\xfa\u013e': 203, u'maf': 203, u'\u013e#u': 203, u'm\xe4k': 203, u'ot\xe9': 203, u'sdi': 203, u'arz': 203, u'aml': 202, u'cig': 202, u'eev': 202, u'aue': 202, u'yu\u010d': 202, u'gr\xf3': 202, u'ivy': 201, u'kim': 201, u'joh': 201, u'\u017e\xe9r': 201, u'zek': 201, u'v\u0155\u0161': 201, u'as\u0148': 201, u'ciz': 200, u'\u013eak': 200, u'bas': 200, u'\u013emo': 200, u'i\u010dt': 200, u'ibr': 200, u'hau': 199, u'zch': 199, u'd#\u013e': 199, u'h\xfal': 199, u'k\xfan': 199, u'mde': 199, u'xan': 199, u'f#n': 199, u'efa': 199, u'\u0161em': 198, u'uc#': 198, u'aiv': 198, u'a\u017e\xe9': 198, u'hia': 198, u'k\xe1d': 197, u'rbt': 197, u'emd': 197, u'y\u010d\xed': 197, u'#ip': 197, u'\xeddn': 197, u'ho\u013e': 197, u'\xe9zi': 197, u'hic': 197, u'c#l': 196, u'elt': 196, u'el\u010d': 196, u'tut': 196, u'nje': 196, u'\xfd\u0161#': 195, u'kya': 195, u'ehe': 195, u'z\xe1\u0161': 195, u'lma': 195, u'uth': 195, u'rle': 195, u'\xe1\u017e#': 194, u'v\u0148a': 194, u'jbe': 194, u'gil': 194, u'\xe8so': 194, u'\xed\u017eo': 194, u'j#\u0165': 193, u'\xe9n#': 193, u'rbi': 193, u'vdy': 193, u'n\xe9v': 193, u'eon': 193, u'k\u0155m': 193, u'bik': 193, u'ej\xed': 193, u'w#y': 192, u'a#x': 192, u'dk\xe9': 192, u'sic': 192, u'rp#': 192, u'\xfav\xe4': 192, u'r\u0148a': 192, u'fd#': 192, u'ezb': 191, u'd\u0161\xed': 191, u'\xedr#': 191, u'\xfa\u010dk': 191, u'#\xfah': 191, u'l\u017en': 191, u'lul': 191, u'oj\u010d': 190, u'#gs': 190, u'erf': 190, u'cyn': 190, u'orp': 190, u'tk\xe1': 190, u'poo': 190, u'u#\xed': 190, u'ccr': 190, u'efd': 190, u'pau': 190, u'xn\xe1': 190, u'lvo': 190, u'eul': 189, u'ldo': 189, u'\xe1he': 189, u'\u013ebu': 189, u'l\xf3r': 189, u'aag': 189, u'mp\xe1': 189, u'ynt': 189, u'y\u0148u': 189, u'oz\xe1': 188, u'o\u013eo': 188, u'lbu': 188, u'ltr': 188, u'bii': 188, u'lbe': 188, u'#cc': 187, u'\xe4te': 187, u'urm': 187, u'z\xedt': 187, u'v\xedk': 187, u'hti': 187, u'zyh': 187, u'p\u010dn': 187, u'jud': 187, u'ar\u010d': 187, u'\xfdze': 186, u'\u0165aj': 186, u'rk#': 186, u'ob\u0165': 186, u'who': 186, u'fov': 186, u'z\xe1h': 186, u'jci': 186, u'rth': 186, u'\u017eam': 186, u'nni': 186, u'b\u0165a': 186, u'jv\xe1': 185, u'lls': 185, u'r\u010da': 185, u'how': 185, u'm\xfdt': 185, u'b##': 185, u'igm': 185, u'hek': 184, u'\u017e\u0161o': 184, u'roo': 184, u'n#\u010f': 184, u'x#p': 184, u'cca': 184, u'aft': 184, u'ad\u017e': 183, u'#ah': 183, u'n\xe1\u010f': 183, u'rd\xe9': 183, u'i\u0148u': 183, u'you': 183, u'kc\xed': 183, u'\xe1\u010f#': 183, u'afo': 183, u'fa\u0161': 183, u'zgo': 182, u'aun': 182, u's\u010da': 182, u'aar': 182, u'bi#': 182, u'\u010foc': 182, u'sof': 181, u'zr\xfa': 181, u'\xe9vr': 181, u'#b\xf6': 181, u'rki': 181, u'le\u010d': 181, u'rn#': 180, u'au#': 180, u'jsi': 180, u'\u010d#o': 180, u'ipc': 180, u'b\xedn': 180, u'\xedsm': 180, u'e\xfam': 180, u'emr': 179, u'uz\xe1': 179, u'#ed': 179, u'#r\xfc': 179, u'ieu': 179, u'ckr': 179, u'rr#': 179, u'ybl': 179, u'#cv': 178, u'noj': 178, u'eef': 178, u'bop': 178, u'aya': 178, u'uj#': 178, u'mme': 178, u'\u010d\xed\u0148': 178, u'u\u0161k': 178, u'\u017ete': 178, u'hof': 178, u'lea': 178, u'\xedke': 178, u'#dc': 178, u'u\u017es': 178, u'ykn': 178, u'fid': 177, u'azl': 177, u'\u010dar': 177, u'ev\u0148': 177, u'vy\u0165': 177, u'ish': 177, u'\u0148ac': 176, u'zci': 176, u'mri': 176, u'ifo': 176, u'jod': 176, u'nd\xe9': 176, u'y\u0165a': 176, u'b#u': 176, u'#db': 176, u'ush': 176, u'os\xe9': 175, u'oj\xe1': 175, u'lds': 175, u'szb': 175, u'zbp': 175, u'ic#': 175, u'zoc': 174, u'aak': 174, u'\xe9na': 174, u'apy': 174, u'g#b': 174, u'zpt': 174, u'\u013ek\xed': 174, u'st\xf4': 174, u'bij': 174, u'ajj': 174, u'i\u010dm': 174, u'ofs': 173, u'\xed#\xed': 173, u'#\xfab': 173, u'ldt': 173, u'pka': 173, u'z\xedm': 173, u'#ia': 173, u'kr\xfa': 173, u'sav': 173, u'gda': 173, u'akp': 173, u'sln': 173, u'nui': 173, u'vce': 173, u'jns': 172, u'\xfa\u0161t': 172, u'sey': 172, u'ogy': 172, u'av\u0155': 172, u'c#\u0161': 171, u'dr#': 171, u'z#\u013e': 171, u'dym': 171, u'#t\xf3': 171, u'\xe9rs': 170, u'\xe9rm': 170, u'lgs': 170, u'\xf4da': 170, u'lln': 170, u'lhy': 170, u'\u010dud': 170, u'itc': 170, u'f#b': 170, u's\xfac': 170, u's\xfap': 170, u'\u0161#c': 169, u'kak': 169, u'urn': 169, u'\xe4#e': 169, u'exe': 169, u'exo': 169, u'za\u0148': 169, u'ko\u017e': 169, u'\xfd\u010di': 169, u'r#\u010f': 169, u'p\u0148a': 168, u'lca': 168, u'ob\u017e': 168, u'y#\xed': 168, u'ibs': 168, u'l\xfap': 168, u'ia\u010d': 168, u'tt#': 168, u'#ug': 168, u'uhm': 168, u'#d\xe9': 168, u'aan': 167, u'rnn': 167, u'#j\xe1': 167, u'vba': 167, u'ogl': 167, u'cja': 167, u'fru': 166, u'\u0161ar': 166, u'hr\xf4': 166, u'doy': 166, u'v\xe1z': 166, u'\u010deb': 166, u'oyl': 166, u'gsp': 166, u'\xfd\u0148#': 166, u'\xf3li': 166, u'\xe1bi': 166, u'\u0161mi': 165, u'y#\u0165': 165, u'ymn': 165, u'zhl': 165, u'agh': 165, u'dyc': 165, u'e\xfap': 165, u'uha': 165, u'lv#': 165, u'jef': 164, u'rf#': 164, u'ev\u0161': 164, u'lzo': 164, u'k\xfav': 164, u'\u0161l\xfd': 164, u'lhe': 164, u'acj': 164, u'ghe': 164, u'\xf4jm': 164, u'a\u0161k': 164, u'uhr': 164, u'yzb': 163, u'inm': 163, u'loj': 163, u'doa': 163, u'a\u013en': 163, u'cl\xe1': 163, u'\u010d#m': 163, u'vr\u0165': 163, u'azb': 162, u'lro': 162, u'cme': 162, u'ogm': 162, u'rwa': 162, u'bto': 162, u'yt\xe9': 162, u'n#\u0165': 162, u'\xe1be': 162, u'f#j': 162, u'rb\xe1': 161, u'em\xf3': 161, u'eil': 161, u'ayo': 161, u'yzu': 161, u'hst': 161, u'rt\xfd': 161, u'ek\u010d': 161, u'bek': 161, u'ney': 161, u'on\xed': 160, u'oui': 160, u'\xf6ge': 160, u'\u010d\u0161m': 160, u'dad': 160, u'pio': 160, u'cny': 160, u'#tj': 160, u'ymy': 159, u'ah\xe9': 159, u'g#j': 159, u'uye': 159, u'\xe1pu': 159, u'\u010d#k': 159, u'ipe': 159, u'#a\u0161': 159, u'rlu': 159, u'fel': 159, u'usz': 159, u'avt': 159, u'fr\xe1': 158, u'k#\xe1': 158, u'amm': 158, u'yet': 158, u'\xfand': 158, u'rtf': 158, u'r\xfah': 158, u'v#\xe1': 158, u'r\xe8s': 158, u'cki': 158, u'oth': 158, u'y\u013eu': 158, u'nue': 158, u'#k\u0155': 158, u'irg': 157, u'osc': 157, u'aer': 157, u't\u0161i': 157, u'eys': 157, u'mir': 157, u'sfe': 157, u'wle': 157, u'\xe1c#': 157, u'wol': 157, u'\u0148te': 157, u'rzu': 156, u'f\xf3l': 156, u'may': 156, u'kma': 156, u'#ea': 156, u'ouy': 156, u'vai': 156, u'u#w': 156, u'd#\u010f': 156, u'pi\xe1': 156, u'kno': 155, u'tf\xf3': 155, u'l\u010di': 155, u'vii': 155, u'sir': 155, u'olt': 155, u'acs': 155, u'zir': 155, u'thy': 155, u'x#v': 155, u'zaa': 155, u'\xfa\u017ee': 155, u'xno': 155, u'owi': 155, u'#gd': 154, u'lza': 154, u'uca': 154, u'inz': 154, u'am\xe1': 154, u'ovh': 154, u'ppa': 154, u'ugo': 154, u'sij': 154, u'rpo': 154, u'\xfd#g': 154, u'cn\xed': 154, u'd\xe9\u0161': 154, u'lva': 154, u'i\u0165a': 153, u'\xed\u010do': 153, u'tb\xe1': 153, u'\xedls': 153, u'id\u017e': 153, u'\xf6tt': 153, u'tau': 153, u'byl': 153, u'#oo': 152, u'rzs': 152, u'gmo': 152, u'uij': 152, u'\u010fa\u013e': 152, u'bh\xe1': 152, u'er\xe8': 152, u'oat': 152, u'owl': 152, u'nz\xfa': 151, u'p\xf6t': 151, u'\u017e#f': 151, u'soh': 151, u'amt': 151, u'#s\xf3': 151, u'doe': 151, u'\xe1h\u013e': 151, u'jsl': 151, u'oh\u0155': 151, u'exc': 151, u'zig': 151, u'ms#': 151, u'#p\xf6': 151, u'#ou': 150, u'ngy': 150, u'g#z': 150, u'#\u0161l': 150, u'\xe4kk': 150, u'\u0155da': 150, u'dl\xed': 150, u'ko\u010d': 150, u'h\u0155d': 150, u'guv': 150, u'gbp': 149, u'\u017eom': 149, u'ils': 149, u'cok': 149, u'a\u0165t': 149, u'rgh': 148, u'mio': 148, u'ous': 148, u'jka': 148, u'izs': 148, u'te\xf3': 148, u'jm\xfd': 147, u'\xe1zr': 147, u'#f\xfc': 147, u'ubi': 147, u'n\u0161u': 147, u'm\xed\u013e': 147, u'ouv': 147, u'xce': 147, u'ht#': 147, u'h\u010do': 147, u'yrm': 147, u'e\xf3r': 147, u'koe': 147, u'ig\xe1': 147, u'yny': 147, u'bu\u010d': 146, u'dvs': 146, u'k\xfd\u0148': 146, u'z#\u0161': 146, u'wor': 146, u'sh#': 146, u'l\xe1\u017e': 146, u'dea': 146, u'p#c': 146, u'ijt': 145, u'\u010dba': 145, u'dc#': 145, u'kys': 145, u'sut': 145, u'ltu': 145, u'#aa': 145, u'goe': 145, u'\xe1z#': 144, u'\xf3za': 144, u'd\u0161i': 144, u'br\xed': 144, u'uz\xe8': 144, u'z\xe8s': 144, u'\u017ek\xfa': 144, u'g#d': 144, u'\xfari': 144, u'c\u0165o': 144, u'\u010f#l': 144, u'cc#': 144, u'n\u017ei': 144, u'#l\xf3': 144, u'rdt': 144, u'hez': 143, u'zl\xed': 143, u'll\xe1': 143, u'\xfa\u0161o': 143, u'si\u010d': 143, u'ue#': 143, u'nif': 143, u'lm#': 143, u'\u017eik': 143, u'shm': 143, u'p#\u010d': 143, u'b\xf6g': 143, u'frr': 142, u't\xe9z': 142, u'xov': 142, u'tr\xfa': 142, u'aes': 142, u'in\u017e': 142, u'o\u010f#': 142, u'hnb': 142, u'l\xf3p': 142, u'pog': 142, u'oua': 142, u'b#\u010d': 142, u'r\xe1h': 142, u'bag': 142, u'sly': 142, u'esb': 142, u'\xfdcv': 142, u'ryl': 142, u'ow#': 142, u'#c\u0165': 141, u'byo': 141, u'\u0161#t': 141, u'\u0165#\xed': 141, u'ius': 141, u'f\xfcl': 141, u'z\u013ea': 141, u'de\u0161': 141, u'cst': 141, u'fiu': 140, u'wes': 140, u'uzb': 140, u'eit': 140, u'#sh': 140, u'\xe4tk': 140, u'\xfcle': 140, u'as\xe1': 140, u'dij': 140, u'ool': 140, u'd\u0161t': 139, u'ob\u013e': 139, u'or\u010d': 139, u'l#\u013e': 139, u'pt\xfd': 139, u'atz': 139, u'tp#': 139, u'od\xe9': 139, u'kpo': 139, u'\xf3do': 139, u'ys\u0165': 139, u'own': 139, u'rzy': 138, u'jel': 138, u'uct': 138, u'bo\u010d': 138, u'sho': 138, u'u\u010dk': 138, u'apk': 138, u's\xe1n': 138, u'm\xfdc': 138, u'goo': 138, u'udy': 138, u'r#g': 138, u'i\u010du': 138, u'\xe4\u0165r': 137, u'mma': 137, u'eof': 137, u't\xf4l': 137, u'tym': 137, u'mce': 137, u'enr': 137, u'\u0161ny': 137, u'byd': 137, u'p\u0148u': 136, u'r\u0161a': 136, u'ub\xfd': 136, u'\u013ebo': 136, u'\xe9te': 136, u'od\u017e': 136, u'\u010d\u0161u': 136, u'ojt': 136, u'fas': 136, u'\u0117ov': 135, u'eau': 135, u'zha': 135, u'chw': 135, u'#\u010du': 135, u'mpt': 135, u'u\u017eu': 135, u'anj': 135, u'\u010dsk': 134, u'\u010dbe': 134, u's\u0165m': 134, u'cp#': 134, u'o\u0161\xed': 134, u'l\xe1\u0148': 134, u'rz#': 133, u'\xf3ci': 133, u'#ay': 133, u'jbo': 133, u'yus': 133, u'r\xed\u0161': 133, u'jdi': 133, u'm\xf3c': 132, u'mna': 132, u'\u0161el': 132, u'psu': 132, u'kev': 132, u'do\u0148': 132, u'mah': 132, u'oes': 132, u'ch\u013e': 132, u'qua': 132, u'\xed\u013ea': 132, u'ubd': 132, u'\u0165ti': 132, u'sei': 132, u'x#n': 132, u'b\xeds': 132, u'lih': 132, u'e\u010fa': 132, u'#d\xed': 132, u'\xed\u017e#': 132, u'#py': 132, u'usn': 132, u'j\u0165a': 131, u'gsk': 131, u'j\xe1n': 131, u'nv\xe1': 131, u'emk': 131, u'unc': 131, u'e#\u0148': 131, u'd\u010du': 131, u'e\u0161u': 131, u'\u017ed#': 131, u'b\xe1s': 131, u'aj\u0165': 131, u'icc': 131, u'hub': 131, u'fio': 130, u'kn\xe9': 130, u'now': 130, u'nck': 130, u'#wh': 130, u'r\u010d\xed': 130, u's#\u0165': 130, u'xua': 130, u'\u0165oc': 130, u'pt\xe1': 130, u'oog': 130, u'tuk': 130, u'\u0161ni': 130, u'm\xf3n': 129, u'\u0148#\u017e': 129, u'h\u013eo': 129, u'\xe1lm': 129, u'ydr': 129, u'b#c': 129, u'l\xe9r': 129, u'ud\u0148': 129, u'b\u013e\xfa': 129, u'hae': 128, u'guc': 128, u'\xe1\u010dt': 128, u'hwa': 128, u'wit': 128, u'jku': 128, u'n\xedh': 128, u'ewa': 128, u'\u017e#\u010f': 128, u'mmu': 128, u'h#\xe1': 128, u'irb': 127, u'hee': 127, u'bcu': 127, u'eyc': 127, u'm\xfat': 127, u'mps': 127, u'su\u017e': 127, u'\xf3ro': 126, u'm#w': 126, u's\xe9#': 126, u'hl\xfa': 126, u'h\xe1r': 125, u'ls#': 125, u'f#z': 125, u'rc#': 125, u'\u010de\u010d': 125, u'\xe1lk': 125, u'xpl': 125, u'mcg': 125, u'ryu': 125, u'ys#': 125, u'mso': 125, u'ujg': 124, u'z\xedd': 124, u'jgu': 124, u'aou': 124, u'\xfdl#': 124, u'hy\u013e': 124, u'hm\xfa': 124, u'sos': 123, u'pnu': 123, u'\u010dna': 123, u'\xe4#j': 123, u'pch': 123, u'm\xe9h': 123, u'l\xe1h': 123, u'ood': 123, u'biz': 123, u'\u017eed': 123, u'p#h': 123, u'efl': 123, u'ri\u017e': 123, u'ta\u0148': 123, u'eb\xfd': 123, u'azk': 122, u't\xe9k': 122, u'\xf3ze': 122, u'aio': 122, u'eid': 122, u's\u0165d': 122, u'ehy': 122, u'euf': 122, u'\u0161#h': 122, u'ues': 122, u'ag\xf3': 122, u'nt\xe9': 122, u'asv': 122, u'cgu': 122, u'n#\u013e': 122, u'mke': 122, u'p\xe1k': 122, u'\u0161#a': 121, u'r\u0165r': 121, u'#sb': 121, u't\u0161o': 121, u'aum': 121, u'r\u017eo': 121, u'yp\xfa': 121, u'vuz': 121, u'b#i': 121, u'da\u010d': 121, u'geb': 120, u't\xe9n': 120, u't\xfam': 120, u'\xe4t\xe9': 120, u'zm\xfa': 120, u'ucu': 120, u'dti': 120, u'up\u010d': 120, u'izl': 120, u'k#g': 119, u'\xedra': 119, u'a\u013eb': 119, u'vti': 119, u'b\u017ea': 119, u'lty': 119, u'\xedh#': 119, u'j\xe1r': 119, u'an\xe7': 119, u'czo': 118, u'war': 118, u'\u013eac': 118, u'v\xe1w': 118, u'r\u017eu': 118, u'r\u017ee': 118, u'z#\u0165': 118, u'\xe4#b': 118, u'\xe4#u': 118, u'#z\u013e': 118, u'#iv': 118, u'hk\xe1': 118, u'e\u0161o': 118, u'itm': 118, u'\u0165kr': 118, u'cop': 118, u'x\xedn': 118, u'ox\xed': 118, u'nag': 118, u'pav': 118, u'ews': 118, u'ejw': 118, u'#cd': 118, u'fao': 118, u'lv\xe1': 118, u'uki': 117, u'yz\xe1': 117, u'sm#': 117, u'dow': 117, u'\xe1wa': 117, u'bkl': 117, u'uil': 117, u'\xe1\u017el': 117, u'#pk': 117, u'eok': 117, u'dpu': 117, u'dmo': 117, u'piv': 117, u'by\u013e': 117, u'fii': 116, u'inj': 116, u'cm#': 116, u'wie': 116, u'g#c': 116, u'iap': 116, u'\u010d#d': 116, u'mt#': 116, u'egn': 116, u'a\u010d\xfa': 116, u'ul\xfa': 116, u'gou': 116, u'scu': 115, u'cra': 115, u'bry': 115, u'k#\u0165': 115, u'\u013eap': 115, u'lke': 115, u'\xe4#\u017e': 115, u'uya': 115, u'oun': 115, u'ouz': 115, u'hoj': 115, u'\xe1v\u0148': 115, u'k\xe1i': 114, u'b\xfat': 114, u'kn\xed': 114, u'ijo': 114, u'\xf3zi': 114, u'\xe1id': 114, u'sre': 114, u'd\u017ee': 114, u'\xe9#\xed': 114, u'axo': 114, u'\u0144sk': 114, u'zi\xed': 114, u'\u017e\xedt': 114, u'dta': 114, u'zym': 114, u'lyo': 114, u'f#m': 114, u'\xfaha': 114, u'a\u0161r': 114, u'#k\xf4': 114, u'\u013e#c': 113, u'e\xe1r': 113, u'y#w': 113, u'i\u0165o': 113, u'yer': 113, u'ah\xfd': 113, u'roi': 113, u'oup': 113, u'jon': 113, u'ndt': 113, u'gs#': 113, u'd\xedd': 113, u'u\u010ft': 113, u'av\u010d': 113, u'j#\xe9': 112, u'\u0161is': 112, u'hr#': 112, u'\xb0c#': 112, u'\xfdu\u010d': 112, u'unh': 112, u'i\u013eu': 112, u's\u010d\xed': 112, u'iao': 112, u'\u010d#\u010d': 112, u'n\xe9z': 112, u'v\xfdu': 112, u'egg': 112, u'bya': 112, u'\u0161er': 112, u'ul\xed': 112, u'u\u017ea': 112, u'hyl': 112, u'j\xe1l': 112, u't\xe1k': 111, u'\xe1\u0161n': 111, u'g#t': 111, u'u\u010db': 111, u'\xedpe': 111, u'\xfdlk': 111, u'mof': 111, u'ild': 111, u'gon': 111, u'dpl': 111, u'ebh': 111, u'uzo': 110, u'#\xb0c': 110, u'rc\xed': 110, u'icy': 110, u'n\xedv': 110, u'lew': 110, u'ih#': 110, u'pe\u017e': 110, u'\xfd\u010de': 110, u's\xfam': 110, u'ebl': 110, u'\xfdns': 109, u'tts': 109, u'oei': 109, u'#rw': 109, u'ilv': 109, u'opk': 109, u'rvu': 109, u't\xed\u017e': 108, u'\xf3rn': 108, u'dvl': 108, u'wis': 108, u'#\xf3p': 108, u'kha': 108, u'\u010fan': 108, u'zag': 108, u'ccs': 108, u'b#f': 108, u'n\u010du': 108, u'oop': 108, u'rvn': 108, u'oft': 107, u'c\xe9r': 107, u'aup': 107, u'#bh': 107, u'oen': 107, u'#f\xfa': 107, u'dc\xe9': 107, u'vt\xe1': 107, u'oub': 107, u'\xfarl': 107, u'rd\u0161': 107, u'zec': 107, u'ekm': 107, u'hl\xf3': 107, u'owa': 107, u'cza': 106, u'\u0163ic': 106, u'azp': 106, u'oz\xfd': 106, u'wil': 106, u'key': 106, u'#\u0163i': 106, u'#+#': 106, u'\u010d#t': 106, u'sp\xfa': 106, u'\xfasm': 106, u'hym': 106, u'e\u013eb': 106, u're\u013e': 106, u'ioe': 106, u'vv#': 106, u'yjd': 106, u'irt': 105, u'sva': 105, u'eap': 105, u'geh': 105, u'zd\xed': 105, u'to\u0161': 105, u'd\u017e\xe1': 105, u'v\xe9r': 105, u'apc': 105, u'uml': 105, u'\u017eda': 105, u'zip': 105, u'\xe1sy': 105, u'\xe4zi': 105, u'tu\u0161': 105, u'hur': 105, u'#kh': 105, u'\xe4t\xed': 104, u'mii': 104, u'kuk': 104, u'mu\u0161': 104, u't\xfan': 104, u'ic\u0103': 104, u'i\xe8r': 104, u'uid': 104, u'ui#': 104, u'epp': 104, u'eth': 104, u'rh\xe1': 104, u'vbe': 104, u'ba\u0161': 104, u'deu': 104, u'o\u013e\u0148': 104, u'igy': 104, u'ynn': 104, u'\xfapo': 103, u'#gy': 103, u'ai#': 103, u'uzu': 103, u'\xed\u010de': 103, u'\u010d\xedc': 103, u'l\u017e\xed': 103, u'\xed\u013en': 103, u'\u0155\u0161i': 103, u'ihr': 103, u'etm': 103, u'hth': 103, u'ghu': 103, u'#\u017eo': 103, u'\u017ear': 103, u'\xe9rn': 102, u'ir\xf3': 102, u'#gn': 102, u'gns': 102, u'eey': 102, u'mei': 102, u'ay#': 102, u'oin': 102, u'xil': 102, u'hlt': 102, u'bu\u0161': 102, u'pr\u0161': 102, u'#oj': 101, u't#\u010f': 101, u'wab': 101, u'uzd': 101, u'\xeda#': 101, u'yhy': 101, u'ehm': 101, u'#vf': 101, u'sa\u017e': 101, u'efs': 101, u'#tb': 101, u'fem': 101, u'eb\xfa': 101, u'\xe9ny': 100, u'oz\u010d': 100, u'ov\u010d': 100, u'uny': 100, u'goc': 100, u'jzn': 100, u'dlu': 100, u'ags': 100, u'\xe4zy': 100, u'idy': 100, u'iox': 100, u'aal': 99, u'kn\xfd': 99, u'#gh': 99, u'ob\u0161': 99, u'v\u010do': 99, u'j\u0161k': 99, u'ovm': 99, u'jje': 99, u'pcc': 99, u'olr': 99, u'\u0161ob': 99, u'x#k': 99, u'#h#': 99, u'big': 99, u'ilt': 99, u'es\u013e': 99, u'f\xfaz': 99, u'\u0161av': 98, u'\u0161#b': 98, u'tsb': 98, u'k#\xed': 98, u'dof': 98, u'mix': 98, u'adp': 98, u'nha': 98, u'\u013ebe': 98, u'i#\xe1': 98, u'\u013e#h': 98, u'nth': 98, u'r\xfdk': 98, u'bea': 98, u'\u017eis': 98, u'jhl': 98, u'udb': 98, u'\xe1\u013ea': 98, u'nbo': 98, u'guy': 98, u'p\xedm': 97, u'on\xe9': 97, u'dfo': 97, u'amk': 97, u'o#\xe1': 97, u'sp+': 97, u'ec\u0165': 97, u'm\xedk': 97, u'zte': 97, u'#e#': 97, u'gha': 97, u'\u0161vi': 97, u'og\xe1': 97, u'ioc': 97, u'\xe1je': 97, u'p+#': 97, u's\xfar': 97, u'xou': 96, u'\u0161#j': 96, u'n\xe7o': 96, u'er\xf3': 96, u'sr#': 96, u'ifr': 96, u'rse': 96, u'c#g': 96, u'een': 96, u'vle': 96, u'\u0155\u0161e': 96, u'agl': 96, u'lay': 96, u'idt': 96, u'z#\xed': 96, u'ook': 96, u'oge': 96, u'owh': 96, u'\u0161\xedp': 95, u'tzi': 95, u'zo\u010d': 95, u'yal': 95, u'o\xe1z': 95, u'oep': 95, u's\xedr': 95, u'lly': 95, u'yu#': 95, u'alp': 95, u'ith': 95, u'a\u0161v': 95, u'stm': 95, u'vfr': 95, u'ryj': 95, u'bi\xe1': 95, u'rao': 95, u'\xed\u017ea': 95, u'rv\xed': 95, u'\xed\u0161u': 94, u'zo\u0161': 94, u'ivk': 94, u'\xedv\u0148': 94, u'\xed#\u0165': 94, u'sba': 94, u'l\u017eo': 94, u'tsc': 94, u'quo': 94, u'kyv': 94, u'g\xe1t': 94, u'\u0161ra': 94, u'u#\xe1': 94, u'eps': 94, u'\u010f##': 94, u'hk\xfa': 94, u'rym': 94, u'pe\u013e': 94, u'z\u0161t': 94, u'fav': 94, u'nz#': 93, u'cil': 93, u'\xe1#\u0165': 93, u'\xe1ms': 93, u'eia': 93, u'#ji': 93, u'dry': 93, u'\xfan#': 93, u'i#\u0163': 93, u'\xf3m#': 93, u'\xfdpl': 93, u'\u010f#\u017e': 93, u'kof': 93, u'#d#': 93, u'ed\u0161': 93, u'biu': 93, u'zb\xfa': 93, u'okn': 93, u'fat': 93, u'hiz': 93, u'gr#': 93, u'\u0161ef': 92, u'dzt': 92, u'eez': 92, u'u\u0161t': 92, u'dr\xf4': 92, u'ieg': 92, u'#if': 92, u'\xe1cs': 92, u'kl\xe9': 92, u'\u010fam': 92, u'\xe9la': 92, u'zej': 92, u'b\xe1\u0165': 92, u'vuk': 92, u'b#h': 92, u'buk': 92, u'pr\xe9': 92, u'c\xfar': 91, u'zce': 91, u'am\xed': 91, u'win': 91, u'rss': 91, u'ays': 91, u'ldi': 91, u'kuu': 91, u'clo': 91, u'\xfam#': 91, u'yla': 91, u'#vc': 91, u'\u013e#l': 91, u'bte': 91, u'ykm': 91, u'aov': 91, u'uxu': 91, u'na\u013e': 91, u'utm': 91, u'ggi': 91, u'##\xb0': 91, u'arv': 91, u'jeg': 90, u't\xe1p': 90, u'\u0165ij': 90, u'uzi': 90, u'n\u010fa': 90, u'ear': 90, u'meg': 90, u'i\xe1n': 90, u't\xeat': 90, u'gem': 90, u'vim': 90, u'z\xe9l': 90, u's#\xed': 90, u'et\xea': 90, u'il\xfd': 90, u'za\xfa': 90, u'pur': 90, u'r\xe1k': 90, u'oo#': 90, u'ggl': 90, u'an\u010f': 90, u'sb#': 90, u'gr\xe4': 90, u'#mv': 90, u's#w': 90, u'\u013a\u017e#': 89, u'jm\xe1': 89, u'\xfdna': 89, u'\xe4#i': 89, u'sbi': 89, u'v\xe1n': 89, u'rck': 89, u'ppi': 89, u'd\u017eu': 89, u'vip': 89, u'ih\xe1': 89, u'egm': 89, u'yce': 89, u'mcu': 89, u'dih': 89, u'bui': 89, u'p#l': 89, u'f\xedh': 89, u'hun': 89, u'#pm': 89, u'zzo': 89, u'b\xfav': 88, u'iqu': 88, u'\xe9tu': 88, u'zp\xe4': 88, u'ntk': 88, u'o\u010do': 88, u'yon': 88, u'yot': 88, u'hl\xfd': 88, u'f\xe1c': 88, u'#bg': 88, u'rm\xf3': 88, u'\u0165ia': 88, u'j\xfas': 88, u'uo#': 87, u'\xfdzo': 87, u'r\xf3z': 87, u'er\u0161': 87, u'am\xfa': 87, u'jja': 87, u'fsa': 87, u'\xe4#c': 87, u'nsy': 87, u'rox': 87, u'xpa': 87, u'\xfaza': 87, u'hug': 87, u'\u017edu': 87, u'lao': 87, u'nbi': 87, u'#g\xe1': 87, u'jd\xfa': 87, u'mvo': 87, u'udf': 87, u'\xe7oi': 87, u'\u0161\xe1l': 87, u'#pv': 87, u'#p\u0161': 87, u'rru': 87, u'm\xf3d': 86, u'wej': 86, u'wei': 86, u'bce': 86, u'dr\xfd': 86, u'ma\u013e': 86, u'ubr': 86, u'#z\xe9': 86, u'mub': 86, u'su\u0161': 86, u'od\xfd': 86, u'uad': 86, u'\xe1ki': 86, u'rtl': 86, u'\xf4\u013eo': 86, u'jdl': 86, u'tu\u010d': 86, u'mse': 86, u'rug': 85, u'lsa': 85, u'gaa': 85, u'mur': 85, u'#rp': 85, u'#rc': 85, u'\u010d#b': 85, u'#mt': 85, u'sas': 85, u'vme': 85, u'phi': 85, u'yk\xe1': 85, u'\xe4z#': 85, u'is\xfa': 85, u'\xe1bu': 85, u'dak': 85, u'p#i': 85, u'irr': 84, u'\xfdz#': 84, u'xom': 84, u'nk\xfd': 84, u'\u0148#f': 84, u'ufo': 84, u'nsu': 84, u'pki': 84, u'uu#': 84, u'apn': 84, u'olf': 84, u'jce': 84, u'\u0161kv': 84, u'uei': 84, u'ep\xfd': 84, u'ep\xf4': 84, u'dtr': 84, u'#h\xfc': 84, u'ef\u010d': 84, u'f\u010do': 84, u'\u0161#e': 83, u'bve': 83, u'jah': 83, u'r\u0161t': 83, u'obt': 83, u'ff#': 83, u'\xe4t\xfd': 83, u'kio': 83, u'ubv': 83, u'cli': 83, u'\xe1l\u010d': 83, u'aa#': 83, u'\u010dme': 83, u'kyj': 83, u'jok': 83, u'eom': 83, u'epk': 83, u'zur': 83, u'tuz': 83, u'#ph': 83, u'hea': 82, u'aia': 82, u'\xfa\u010d#': 82, u'\xfcbn': 82, u'pcp': 82, u'\xfa#\xe1': 82, u'mev': 82, u'jk#': 82, u'aks': 82, u'h\xfcb': 82, u'l\xe1z': 82, u's\xf3j': 82, u'tuh': 82, u'd#\xed': 82, u'mk\xe1': 82, u'\xed\u017es': 82, u'p\xe1t': 82, u'ig#': 82, u'x#z': 81, u'cun': 81, u'zog': 81, u'\u0161#\u010f': 81, u'try': 81, u'eio': 81, u'ea\u010d': 81, u'au\u0161': 81, u'u\xfa\u010d': 81, u'oiz': 81, u'\xe1\u0161i': 81, u'sbo': 81, u'#f\xe9': 81, u'jwa': 81, u'xt\xed': 81, u'seo': 81, u'i\xf3p': 81, u'ntv': 81, u'\xedpm': 81, u'lu\xfa': 81, u'lui': 81, u'xus': 81, u'r\xe1\u0165': 81, u'zub': 81, u'g\xf3g': 81, u'bub': 81, u'ti\xf3': 81, u'vz\u010f': 81, u'tca': 81, u'#kg': 81, u'aic': 80, u'j#w': 80, u'eiz': 80, u'\xfcbi': 80, u'ifa': 80, u'gmy': 80, u'\u0165ac': 80, u'm\u010di': 80, u'\u0165#\xe1': 80, u'gea': 80, u'\xe1hi': 80, u'r\xfcb': 80, u'\xe4\xdfl': 80, u'r\xe4\xdf': 80, u'\u010dim': 80, u'bl\xe1': 80, u'ckx': 80, u'eoz': 80, u'ox#': 80, u'h\xf4t': 80, u'gba': 80, u'ios': 80, u'a\xfat': 80, u'tae': 80, u'\u0161\xe9f': 80, u'o\xfa\u010d': 79, u'sza': 79, u'r\u0165o': 79, u'\xeato': 79, u'ce\u013e': 79, u'rsn': 79, u'kic': 79, u'\xe1\u010d#': 79, u'h\u013eu': 79, u'oit': 79, u'\xe4#f': 79, u'ur\xed': 79, u'rtk': 79, u'\xfdpa': 79, u'dhe': 79, u'ips': 79, u'kl\xfd': 79, u'uat': 79, u'b\xe1\u0161': 79, u'ulk': 79, u'fur': 79, u'sdo': 79, u'lba': 79, u'\xfadm': 79, u'hu\u0165': 79, u'p\xe1p': 79, u'ef#': 79, u'iry': 78, u'\u0161#\xfa': 78, u'eic': 78, u'y\u010di': 78, u'rsa': 78, u'm\xfaz': 78, u'\xf4\u017em': 78, u'rue': 78, u'nsi': 78, u'u\u0161\xe1': 78, u'oa#': 78, u'pyr': 78, u'\u0161l\xe9': 78, u'#ee': 78, u'kl\xf3': 78, u'ftv': 78, u'uar': 78, u'ek\xe9': 78, u'\xdfle': 78, u'utc': 78, u'd\u010do': 78, u'\xe9si': 78, u'upp': 78, u'z\u0148a': 78, u'r#\u013e': 78, u'hme': 78, u'frs': 77, u'osb': 77, u'\xfatk': 77, u'p\xedr': 77, u'e\xe1t': 77, u'inh': 77, u'\u013eme': 77, u'gni': 77, u'wia': 77, u'bci': 77, u'e\u010dl': 77, u'\xfd#\u010f': 77, u'rcz': 77, u'toi': 77, u'rkn': 77, u'sip': 77, u'uiz': 77, u'eh\xe1': 77, u'\u010d\u0161a': 77, u'\xf3to': 77, u'k\xf3p': 77, u'\xfdsi': 77, u'l\xedh': 77, u'\u017eeh': 77, u'f#d': 77, u'thr': 77, u'izb': 77, u'tr\xfd': 76, u'jzl': 76, u'nki': 76, u'\xe1tr': 76, u'jr\xf4': 76, u'k\xfds': 76, u'o\u017eu': 76, u'rpk': 76, u'\xe1cl': 76, u'sag': 76, u'uez': 76, u'le\xe1': 76, u'#ya': 76, u'ckm': 76, u'ypa': 76, u'b\xe9m': 76, u'ejc': 76, u'iz\xf3': 76, u'wn#': 76, u'osr': 75, u't\xe1\u013e': 75, u'em\u010d': 75, u'kvr': 75, u'\xf4do': 75, u'o\u010fm': 75, u'e\u010dk': 75, u'lsi': 75, u'#fj': 75, u'bwi': 75, u'u\u0161a': 75, u'fje': 75, u'iab': 75, u'al\u017e': 75, u'\xe1pc': 75, u'zi\xe1': 75, u'#p\u013e': 75, u'\u017e\xedr': 75, u'\u010f#\u013e': 75, u'b\xedd': 75, u'arp': 75, u'ab#': 75, u'\xf3bn': 75, u'bib': 75, u'fei': 75, u'afe': 75, u'ks#': 75, u'ynd': 75, u'zkl': 74, u'nva': 74, u'hvi': 74, u'\u0148#\xfa': 74, u'kes': 74, u'boo': 74, u'oi#': 74, u'p\u013e\xfa': 74, u'l\u017eu': 74, u'syr': 74, u'\xe1dr': 74, u'\xe1dh': 74, u'\u013e\u0148o': 74, u'jsp': 74, u'\xf3ma': 74, u'eph': 74, u'#ez': 74, u'fty': 74, u'm\xfdm': 74, u'ntl': 74, u'mti': 74, u'hta': 74, u'yty': 74, u'as\xfd': 74, u'\u010dln': 74, u'nay': 74, u'#l\xfa': 74, u'uh\u013e': 74, u'\xfaho': 74, u'num': 74, u'ti\u013e': 74, u'tza': 73, u'ojc': 73, u'\u013e#\xfa': 73, u'\u0161i\u0148': 73, u'k\xe9s': 73, u'kiv': 73, u'\xe1\u0161\u013e': 73, u's\u0165r': 73, u'\u013e\xfac': 73, u'r\xf4t': 73, u'oee': 73, u'hec': 73, u'sul': 73, u'\u010dik': 73, u'ki\u0161': 73, u'#xy': 73, u'xyn': 73, u'dma': 73, u'\u010dta': 73, u'#\u0161\xe9': 73, u'f#e': 73, u'vrn': 73, u'hr\u0148': 73, u'n\xfav': 73, u'goj': 73, u'f\xed#': 73, u'an\u0161': 73, u'hah': 72, u'uff': 72, u'k\xe1b': 72, u'#gp': 72, u'ij#': 72, u'mro': 72, u'kai': 72, u'wik': 72, u'\xe1\u017ec': 72, u'\xe1\u0161k': 72, u'ch\xed': 72, u'alf': 72, u'\u0161iz': 72, u'f#t': 72, u'him': 72, u'#ts': 72, u'h\xfdk': 72, u'x#m': 71, u'heg': 71, u'ogu': 71, u'cao': 71, u'mby': 71, u'ffe': 71, u'lo\u015f': 71, u'rct': 71, u'ngh': 71, u'bc#': 71, u'i\xe1d': 71, u'\xe4#h': 71, u'o#\xf4': 71, u'#io': 71, u'edh': 71, u'ouc': 71, u'#p\xf3': 71, u'\xf4z#': 71, u'zp\xfa': 71, u'\xe1gn': 71, u'\xe1k#': 71, u'aot': 71, u'vuh': 71, u'jla': 71, u'r\xe1f': 71, u'r\xeda': 71, u'paa': 71, u'pao': 71, u'msi': 71, u'p\xe9r': 70, u'k\xe1h': 70, u'\xfchl': 70, u'r\xf3f': 70, u'jru': 70, u'eet': 70, u'urj': 70, u'j\u0148u': 70, u'ubb': 70, u'pku': 70, u'd\u017e#': 70, u'\xfaby': 70, u'pty': 70, u'\xfaz\u0161': 70, u'blu': 70, u'#ey': 70, u'ek\xfd': 70, u'n\u010do': 70, u'abd': 70, u'r\xe9k': 70, u'jzi': 70, u'\u017e#\u013e': 70, u'#tk': 70, u'tze': 69, u'tz#': 69, u'\u0148mi': 69, u'\xe9nd': 69, u'jab': 69, u'lce': 69, u'ob\u0148': 69, u'z\xf3r': 69, u'l\u010do': 69, u'ngi': 69, u'd\u010fa': 69, u'dk\xfa': 69, u'oan': 69, u'kdr': 69, u'#ae': 69, u'od\u010f': 69, u'yru': 69, u'\u013ep#': 69, u'\u0165n\xe1': 69, u'a\u0165p': 69, u'en\u0117': 69, u'ejr': 69, u'\xe9zy': 69, u'#tf': 69, u'taz': 69, u'kfu': 68, u'sz\xe1': 68, u'wel': 68, u'nkf': 68, u'wa#': 68, u'kvy': 68, u'cef': 68, u'eer': 68, u'rsc': 68, u'amf': 68, u'dr\xe9': 68, u'\xe1\u010de': 68, u'd\xedv': 68, u'sbe': 68, u'ohi': 68, u'm\xfan': 68, u'g##': 68, u'i\u010fa': 68, u'\u017e\xeds': 68, u'\u010f#\xfa': 68, u'\u017e\xe1d': 68, u'up\xfa': 68, u'bai': 68, u'z\xfa\u017e': 68, u'g\xf3n': 68, u't\u0117o': 68, u'ri\u010f': 68, u'mbs': 67, u'gfc': 67, u'rff': 67, u'zki': 67, u'kna': 67, u'lfa': 67, u'\xfdn#': 67, u'tft': 67, u'enl': 67, u'drz': 67, u'ge\u013e': 67, u'\xfank': 67, u'l\u017ei': 67, u'#hv': 67, u'eun': 67, u'n\u0117o': 67, u'llm': 67, u'fcm': 67, u'iew': 67, u'si\xe8': 67, u'r\xfaf': 67, u'#gf': 67, u'u#x': 67, u'ftp': 67, u'la\u010f': 67, u'vy\u013e': 67, u'gps': 67, u'\xfasu': 67, u'vr\u017e': 67, u'p#\u017e': 67, u'doo': 67, u'cri': 66, u'\u013e#\u017e': 66, u'h\xe1t': 66, u't\xfdr': 66, u'rj\xe1': 66, u'eeu': 66, u'\xe1\u017et': 66, u'syh': 66, u'bsd': 66, u'k\u013eu': 66, u'a\u0148m': 66, u'rtz': 66, u'##\xe1': 66, u'hnd': 66, u'dhi': 66, u'\u0155me': 66, u'z\xfdm': 66, u'p\u0165a': 66, u'yp\u0165': 66, u'l#\xed': 66, u'uld': 66, u'hig': 66, u'mo\u0161': 66, u'buo': 66, u'fai': 66, u'h#\xed': 66, u'usv': 66, u'nz\xfd': 65, u'\u0161ek': 65, u'\xedvr': 65, u'amr': 65, u'v\xe1b': 65, u'\xedaz': 65, u'vd#': 65, u'#h\xf6': 65, u'gap': 65, u'mi\xed': 65, u'\u017e\u010fu': 65, u'\u0155mn': 65, u'gdo': 65, u'ddy': 65, u'itz': 65, u'sec': 65, u'kp#': 65, u'\xe1kn': 65, u'cch': 65, u'b#\u013e': 65, u'ruo': 65, u'h\u0148a': 65, u'mcd': 65, u'flu': 65, u'fet': 65, u'agb': 65, u'd\xeda': 65, u'\xe1nd': 65, u'rm#': 65, u'\u0161at': 64, u'\u0103ov': 64, u'off': 64, u'h\xedv': 64, u'cav': 64, u'ncl': 64, u'm#\xe1': 64, u'\xe4t\xfa': 64, u'wic': 64, u'ngd': 64, u'ngs': 64, u'kih': 64, u'ma\u0161': 64, u'#b\xf4': 64, u'\xe4#\xfa': 64, u'tse': 64, u'sfa': 64, u's\xedk': 64, u'+##': 64, u'uyo': 64, u'ix#': 64, u'sib': 64, u'smy': 64, u'bhu': 64, u'\xe1ky': 64, u'ni\xed': 64, u'dir': 64, u'\u0148ho': 64, u'\xedov': 64, u'tuo': 64, u'esd': 64, u'#pc': 64, u'duf': 64, u'fma': 63, u'\xf3gy': 63, u'obb': 63, u'gut': 63, u'rsh': 63, u'\xfa#\xed': 63, u'bc\xed': 63, u'\xe1dc': 63, u's\xedn': 63, u'ppo': 63, u'ltt': 63, u'#vd': 63, u's#q': 63, u'acz': 63, u'kl\u010d': 63, u'see': 63, u'rdm': 63, u'zib': 63, u'\xfddo': 63, u'woo': 63, u'wog': 63, u'awi': 63, u'ar\u017e': 63, u'm\xf4t': 63, u'\u010d#e': 63, u'd#w': 63, u'ok\xf3': 63, u'kog': 63, u'afa': 63, u'igg': 63, u'j\xfap': 63, u'hm\xf4': 63, u'rfo': 62, u'b#l': 62, u'udh': 62, u's\xfdt': 62, u'auv': 62, u'oer': 62, u'#zf': 62, u'r\xfch': 62, u'elr': 62, u'g#r': 62, u'\u010di\u0148': 62, u'#m\xf3': 62, u'\u0155mi': 62, u'zpu': 62, u'teb': 62, u'r\xfdn': 62, u'\xe8ra': 62, u'#\u017er': 62, u'uls': 62, u'kke': 62, u'\xfaso': 62, u'#b\xe9': 62, u'nnu': 62, u'ssy': 62, u'swa': 62, u'sm\xe9': 62, u'mfa': 62, u'\xfdrs': 62, u'irs': 61, u'cik': 61, u'\u013an#': 61, u'zfe': 61, u'veh': 61, u'v\u013an': 61, u'c\u0103o': 61, u'omc': 61, u'unn': 61, u'\xe9ci': 61, u'vli': 61, u'bsl': 61, u'#v\u013a': 61, u'e\u0161#': 61, u'ipp': 61, u'oh\u0148': 61, u'yp\xe1': 61, u'xy#': 61, u'y\u013eo': 61, u'ooi': 61, u'oot': 61, u'ry\u017e': 61, u'#d\xfd': 61, u'but': 61, u'piu': 61, u'igh': 61, u'yn\xed': 61, u'\xfdky': 61, u'rry': 61, u'fe\xfa': 61, u'us\xfa': 61, u'#cb': 60, u'tv#': 60, u'r\xf3k': 60, u'ae#': 60, u'jn#': 60, u'hn\xed': 60, u'ma\u0148': 60, u'oij': 60, u'h\xf6k': 60, u'v\xe1g': 60, u'a\u013ea': 60, u'gib': 60, u'\xf6km': 60, u'ep\xe9': 60, u'\xe1r\xf3': 60, u'rya': 60, u'd\u013ep': 60, u'f##': 60, u'goi': 60, u'gog': 60, u'pm#': 60, u'czy': 59, u'\xfapy': 59, u'nru': 59, u'#gk': 59, u'dbi': 59, u'yvs': 59, u'srp': 59, u'\u0148#\u0161': 59, u'aub': 59, u'dr\xfa': 59, u'lbr': 59, u'\xe1\u0165m': 59, u'\u013e\u0148u': 59, u'kyb': 59, u'tbi': 59, u'\u010d#l': 59, u'\u010d#u': 59, u'r\xfan': 59, u'xa#': 59, u'k\xf4z': 59, u'fol': 59, u'n\xe1\u010d': 59, u'm\xe4\u0165': 59, u'rl#': 59, u'x#b': 59, u'yt\xed': 59, u'cct': 59, u'xek': 59, u'#h\xfa': 59, u'rr\xe1': 59, u'lb\xed': 59, u'ocy': 59, u'\xedsu': 59, u'jis': 58, u'hrc': 58, u'hve': 58, u'eea': 58, u'tb#': 58, u'auf': 58, u'euc': 58, u'mi\xf3': 58, u'ub\u013e': 58, u'ubm': 58, u'muc': 58, u'gig': 58, u'llu': 58, u'tbu': 58, u'u\u010d\u0148': 58, u'a\u0148t': 58, u'sue': 58, u'al\u010d': 58, u'ap#': 58, u'ap\xf4': 58, u'joe': 58, u'rd\xfa': 58, u'ep\xed': 58, u'ght': 58, u'\u017eih': 58, u'#l\u017e': 58, u'r\xe1\u0148': 58, u'la\u017e': 58, u'ulm': 58, u'abn': 58, u'deh': 58, u'pex': 58, u'afu': 58, u'kso': 58, u'thu': 58, u'hi#': 58, u'\xf3ko': 58, u'byu': 58, u'\xe9ve': 57, u'tms': 57, u'brn': 57, u'em\u013e': 57, u'kvo': 57, u'\xf3rm': 57, u'ify': 57, u'yei': 57, u'dkr': 57, u'gah': 57, u'\u010dec': 57, u'nhr': 57, u'mud': 57, u'jsm': 57, u'jsr': 57, u'rt\xfa': 57, u'hoz': 57, u'o\u0148a': 57, u'uim': 57, u'fta': 57, u'ctb': 57, u'\xedtk': 57, u'aol': 57, u'dtp': 57, u'woj': 57, u'n\u017eo': 57, u'\xf4to': 57, u'y\u013es': 57, u'moo': 57, u'#dt': 57, u'z\xfah': 57, u'pei': 57, u'bi\u0161': 57, u'f#o': 57, u'b\u013ea': 57, u'fyr': 57, u'hua': 57, u'\xf3dn': 57, u'p\xedt': 56, u'#cn': 56, u'aef': 56, u'ofr': 56, u'gnu': 56, u'aik': 56, u'uzm': 56, u'cek': 56, u'hke': 56, u'#tm': 56, u'npi': 56, u'\xe1kr': 56, u'lax': 56, u'\xe9\u0161i': 56, u'ghr': 56, u'ux#': 56, u'pvv': 56, u'a\u010dm': 56, u'ryc': 56, u'epy': 56, u'icz': 56, u'\xe8s#': 56, u'anf': 56, u'\xed\u017en': 56, u'p\xe1#': 56, u'yvm': 56, u'\xe1vs': 56, u'x#o': 55, u'rfe': 55, u'\xf3zu': 55, u'tr\u010d': 55, u'lzi': 55, u't\xe1\u010d': 55, u'cad': 55, u'aqu': 55, u'enp': 55, u'gej': 55, u'syl': 55, u's\xe9h': 55, u'k\u013az': 55, u'r\xfcm': 55, u'ehp': 55, u'aph': 55, u'd\u0155\u017e': 55, u'laz': 55, u'ze\u0148': 55, u'\u010f#f': 55, u'lm\xe1': 55, u'ccc': 55, u'\xf3tu': 55, u'mp#': 55, u'lij': 55, u'cdd': 55, u'jza': 55, u'tyh': 55, u'a\u010ds': 55, u'zu\u017e': 55, u'abb': 55, u'buv': 55, u'\u0155\u017ea': 55, u'\xeds\u0148': 55, u'e\u0148t': 55, u'sk\u013a': 55, u'jml': 54, u'fix': 54, u't#w': 54, u'dbl': 54, u'mr#': 54, u'\u0148#l': 54, u'rsi': 54, u'dr\xf3': 54, u'id\u0155': 54, u's\u013eo': 54, u'#h\xfd': 54, u'syt': 54, u'ub\xfa': 54, u'ad\u010d': 54, u'kuc': 54, u'nhc': 54, u'bs#': 54, u'fc#': 54, u'uum': 54, u'v\xedh': 54, u'v\xedc': 54, u'\xf3my': 54, u'y\u0161i': 54, u'olc': 54, u'hcr': 54, u'\xe9ta': 54, u'tr\xf3': 54, u'x#j': 54, u'yta': 54, u'\u010dl#': 54, u'e\u010fm': 54, u'she': 54, u'\u010d#j': 54, u'r\xedf': 54, u'\xe9k#': 54, u'\u0165st': 54, u'\xfahy': 54, u'j\xedv': 54, u'o\u013em': 54, u'\u017era': 54, u'pr\xfc': 54, u'\u010doc': 54, u'fau': 54, u'arh': 54, u'#cz': 53, u'umr': 53, u'noo': 53, u'cut': 53, u'a\u013et': 53, u'cec': 53, u'goz': 53, u'd\xfar': 53, u'eyh': 53, u'\xe4#l': 53, u'adh': 53, u'edd': 53, u'x##': 53, u'#\u017eu': 53, u'#lh': 53, u'a\u0161m': 53, u'nnn': 53, u'ejb': 53, u'taf': 53, u'w#a': 52, u'ja\u010d': 52, u'\u0161m\xed': 52, u'ib\xed': 52, u'\xf3r#': 52, u'\xed#w': 52, u'uvr': 52, u'ffa': 52, u'if#': 52, u'l\u010d\xed': 52, u'b\u0161e': 52, u'\xe1\u0161e': 52, u'\xe4#\u0161': 52, u'gae': 52, u'\xfdms': 52, u'eub': 52, u'ylv': 52, u'jot': 52, u'fto': 52, u'n\xe1\u017e': 52, u'#nn': 52, u'mp\xe9': 52, u'\u017eu\u0161': 52, u'\xfdsm': 52, u'tu\u0165': 52, u'mkn': 52, u'a\u0165o': 52, u'pmo': 52, u'\xe8sa': 52, u'uzz': 52, u'izv': 52, u'saf': 52, u'h#w': 52, u'ceb': 51, u'kay': 51, u'ymk': 51, u'l\u010de': 51, u'svv': 51, u'au\u017e': 51, u'me\u013e': 51, u'c\xedz': 51, u'gy\u0151': 51, u'gop': 51, u'oed': 51, u'ah\xed': 51, u'a\u0148k': 51, u'\xfa\u0161i': 51, u'\u0151ri': 51, u'#m#': 51, u'tth': 51, u'klz': 51, u'exy': 51, u'ag#': 51, u'b\xedr': 51, u'pup': 51, u'bem': 51, u'lua': 51, u'dp\xfa': 51, u'\xe9s#': 51, u'bao': 51, u'#dd': 51, u'e\u013e\xfa': 51, u'nja': 51, u'y\u0151r': 51, u'dua': 51, u'zvn': 51, u'tef': 50, u'#g\xf6': 50, u'nri': 50, u'kn\xe1': 50, u'tr\xfc': 50, u'sge': 50, u'\xfdfu': 50, u'lzy': 50, u'\u017e#g': 50, u'\xf3fs': 50, u'eu\u0161': 50, u'r\xfcp': 50, u'elf': 50, u'ro\u017e': 50, u'ho\u010f': 50, u'ttj': 50, u'sai': 50, u'nby': 50, u'pl\xf4': 50, u'mtt': 50, u'u\xed#': 50, u'#\xe0#': 50, u'ghi': 50, u'lga': 50, u'awo': 50, u'#\u0161n': 50, u'\u017eev': 50, u'p#\xfa': 50, u'\xedf#': 50, u'\xe9\u0161#': 50, u'owe': 50, u't\xed\u0161': 49, u'\u013eu\u010d': 49, u'zk\xe1': 49, u'\xf3zo': 49, u'of\xed': 49, u'in\u010d': 49, u'fom': 49, u'\u010dbo': 49, u'uzj': 49, u'iff': 49, u'\xf3je': 49, u'do\u013e': 49, u'\xe1\u010dm': 49, u'enw': 49, u'zhu': 49, u'\u0161ga': 49, u'\u0161l\xfa': 49, u'ngk': 49, u'b\u017ei': 49, u'\xfdmy': 49, u'pt\xf3': 49, u'\u010did': 49, u'\xeddi': 49, u'\xf8rg': 49, u'j\xf8r': 49, u'dyh': 49, u'it\xf3': 49, u'ued': 49, u'cks': 49, u'la\u0161': 49, u'akk': 49, u'\u010f#\u0161': 49, u'\xedpl': 49, u'asr': 49, u'#\u0161\u013e': 49, u'a\u017es': 49, u'moi': 49, u'peu': 49, u'\xe1\u017em': 49, u'dai': 49, u'lfo': 49, u'esz': 49, u'\xfcpe': 49, u'a\u0161g': 49, u'got': 49, u'taw': 49, u'k\u0148a': 49, u'hml': 49, u'j#\xed': 48, u'\xednm': 48, u'\xednc': 48, u'\xf3cr': 48, u'ugh': 48, u'lp#': 48, u'\u013eat': 48, u'h\xfa\u017e': 48, u'o\u010ft': 48, u'o\u010fs': 48, u'ur\u017e': 48, u'\u010fst': 48, u'ke\u0148': 48, u'\u017eo\xe1': 48, u'\u017eco': 48, u'ujs': 48, u'\xfanu': 48, u'eyr': 48, u'nwe': 48, u'kyp': 48, u'g#e': 48, u'z\xedr': 48, u'rkh': 48, u'o\u017eo': 48, u'ixu': 48, u'jo#': 48, u'ftu': 48, u'it\u0148': 48, u'\u0165te': 48, u'agd': 48, u'b\xe1t': 48, u'ypn': 48, u'x#d': 48, u'pud': 48, u'pyk': 48, u'otm': 48, u'r\xe1\u0161': 48, u's\xf3c': 48, u'\xe1br': 48, u'\u013eto': 48, u'ws#': 48, u't\xe4l': 48, u'ko\u0148': 48, u'piz': 48, u'cna': 48, u'r\xfab': 48, u'tee': 48, u'#k\u0148': 48, u'mb#': 47, u'rfi': 47, u'vcu': 47, u'szw': 47, u'sz#': 47, u'rb\xed': 47, u'nk\xfa': 47, u'ai\u0165': 47, u'\xedry': 47, u'tfr': 47, u'ym\xfd': 47, u'tme': 47, u'zwo': 47, u'aye': 47, u'eyn': 47, u'cur': 47, u'ufm': 47, u'nhe': 47, u'\xfdtu': 47, u'dl\xfa': 47, u'myk': 47, u'zp#': 47, u'it\xe4': 47, u'v\xfdf': 47, u'htf': 47, u'\u010d#h': 47, u'tu\u013e': 47, u'mo\xfa': 47, u'ewi': 47, u'gko': 47, u'tea': 47, u'du\xed': 47, u'\xe9rk': 46, u'ezs': 46, u'gsa': 46, u'cac': 46, u'\u013ea\u010d': 46, u'y\u010dk': 46, u'rwe': 46, u'if\xe9': 46, u'eeb': 46, u'#wu': 46, u'r\xe3o': 46, u'cul': 46, u'#fd': 46, u'euo': 46, u'mi\u010d': 46, u'\u010dev': 46, u'aht': 46, u'gir': 46, u'm\xe1\u0161': 46, u'rk\xe1': 46, u'\u0161ed': 46, u'\u0161ro': 46, u'sej': 46, u'zaw': 46, u'\u0165pe': 46, u'aws': 46, u'kg#': 46, u'\xedkr': 46, u'jdy': 46, u'msa': 46, u'\u0161\u0148o': 46, u'f\xedm': 46, u'cbr': 46, u'hih': 46, u'bbe': 46, u'hz#': 45, u'dzv': 45, u'ee#': 45, u'oxo': 45, u'c\xeda': 45, u'do\u011f': 45, u'eed': 45, u'\xe1\u017e\xed': 45, u'mmi': 45, u'oex': 45, u'o\u011fa': 45, u'm\u013ea': 45, u'\xf4mb': 45, u'gt#': 45, u'g#u': 45, u'oya': 45, u'\xe9#\xe1': 45, u'\u010d#r': 45, u'b\xf4m': 45, u'uck': 45, u'anh': 45, u'fo#': 45, u'\xf4zy': 45, u'it\u0117': 45, u'exk': 45, u'et\xfa': 45, u'l\xf4b': 45, u'v\xfdt': 45, u'ypm': 45, u'hlc': 45, u'\u011fan': 45, u'slz': 45, u'kk#': 45, u'\xe1bl': 45, u'd#\u0165': 45, u'a\u0165k': 45, u'z\u0165#': 45, u'e\xfac': 45, u'igl': 45, u'\xfdvi': 45, u'tah': 45, u'lmy': 44, u't\xe9v': 44, u'k\xfar': 44, u'\xfapu': 44, u'ojz': 44, u'\u0161#\u0161': 44, u'cij': 44, u'afc': 44, u'nb\xed': 44, u'guw': 44, u'jra': 44, u'\u0165al': 44, u'cy#': 44, u'yar': 44, u'eaf': 44, u'sap': 44, u'ayl': 44, u'\xf3be': 44, u'#bb': 44, u'twi': 44, u'\xe1\u0161o': 44, u'pcb': 44, u'tsi': 44, u'kuo': 44, u'pt\xe9': 44, u'al\xf4': 44, u'cdc': 44, u'sik': 44, u'bt#': 44, u'gso': 44, u'uwu': 44, u'yre': 44, u'awa': 44, u'\u017eic': 44, u'd\xf4b': 44, u'dja': 44, u'ksh': 44, u'vr\xfa': 44, u'enh': 44, u'hio': 44, u'xkl': 43, u'jtv': 43, u'ez\u0165': 43, u'\xe4\u0165k': 43, u'#g\xe9': 43, u'\xe1#\xe1': 43, u'weg': 43, u'wen': 43, u'vsp': 43, u'a#y': 43, u'zcu': 43, u'way': 43, u'jre': 43, u'l\xfan': 43, u'#j\xf8': 43, u'wur': 43, u'c\u0103u': 43, u'\xfans': 43, u'vbo': 43, u'syp': 43, u'gm#': 43, u'v\u0161o': 43, u'g#l': 43, u'g#w': 43, u'eht': 43, u'g\xe1m': 43, u'yll': 43, u'dwa': 43, u'oh\xfa': 43, u'mly': 43, u'epn': 43, u'pl\xf3': 43, u'ghs': 43, u'\xe1su': 43, u'egt': 43, u'z#w': 43, u'#h\xe4': 43, u'hl\xe9': 43, u'tyl': 43, u'mcz': 43, u'hyo': 43, u'e\u013er': 43, u'\xe1j#': 43, u'ud\u017e': 43, u'cse': 43, u'\xe4l\xe4': 42, u'hew': 42, u'#o\u0148': 42, u'ijk': 42, u'daw': 42, u'wer': 42, u'inb': 42, u'eij': 42, u'ffo': 42, u'ifu': 42, u'ury': 42, u'u\u0161\u013e': 42, u'orz': 42, u'iun': 42, u'ro\u013e': 42, u'jwe': 42, u'rks': 42, u'bde': 42, u'jol': 42, u'blk': 42, u'bho': 42, u'pha': 42, u'\u0161of': 42, u'ccp': 42, u'of\xe9': 42, u'aw#': 42, u'\u0103uo': 42, u'#tw': 42, u'\u017eah': 42, u'jdr': 42, u'\u0165se': 42, u'a\u0165s': 42, u'a\u0165u': 42, u'day': 42, u'szo': 42, u'p#f': 42, u'ssu': 42, u'yvl': 42, u'bbi': 42, u'p\xe9#': 41, u'frv': 41, u'trk': 41, u'szy': 41, u'db\xfa': 41, u'mrc': 41, u'a\u0161p': 41, u'lwa': 41, u'c\xedo': 41, u'c\xedl': 41, u'\xfcms': 41, u'p\xf3l': 41, u'mif': 41, u'bee': 41, u'roy': 41, u'ro\xed': 41, u'ahi': 41, u'ojp': 41, u'\xfazo': 41, u'v\u0103l': 41, u'zi\u013e': 41, u'mte': 41, u'\xe1ke': 41, u'lge': 41, u'eg#': 41, u'l#w': 41, u'i\u017ei': 41, u'h\u0161o': 41, u'op\u010d': 41, u'ik\xe9': 41, u'ly\u017e': 41, u'\u017eer': 41, u'\u0103le': 41, u'#x#': 41, u'yss': 41, u'\u010dch': 41, u'yj\u010d': 41, u'\u010di\u010d': 41, u'ssc': 41, u'#v\u0103': 41, u'arf': 41, u'wna': 41, u'gry': 41, u'\xf4ck': 40, u'cib': 40, u'szp': 40, u'rns': 40, u'y#x': 40, u'pna': 40, u'cea': 40, u'dts': 40, u'b\u0161t': 40, u'eel': 40, u'e#q': 40, u'ajf': 40, u'\u0165u#': 40, u'aen': 40, u'imc': 40, u'o\u017et': 40, u'oud': 40, u'xie': 40, u'zt\xe1': 40, u'lae': 40, u'cow': 40, u'nib': 40, u'ecs': 40, u'i\u0144s': 40, u'\xfdso': 40, u'\xe1p#': 40, u'opp': 40, u'kk\xe9': 40, u'd#\u0148': 40, u'\xe1f#': 40, u'af\xe9': 40, u'god': 40, u'd\xe9s': 40, u'ssn': 40, u'uhn': 40, u'\u010dou': 40, u'f\xfak': 40, u'b\xe1b': 40, u'bbc': 40, u'hak': 39, u'onb': 39, u'ojj': 39, u'y\u017ee': 39, u'nkt': 39, u'\xedrk': 39, u'y#\xe1': 39, u'eik': 39, u'o\u010fo': 39, u'oia': 39, u'\xfann': 39, u'clu': 39, u'im\xfa': 39, u'pkk': 39, u'muh': 39, u'g#h': 39, u'\xfdte': 39, u'whi': 39, u'n\u0165i': 39, u'hoo': 39, u'nle': 39, u'yde': 39, u'np#': 39, u'x#u': 39, u'uau': 39, u'yp\u013a': 39, u'niq': 39, u'plp': 39, u'jta': 39, u'#h\u0155': 39, u'#lv': 39, u'oph': 39, u'\xf4lk': 39, u'oos': 39, u'#bs': 39, u'enf': 39, u'nud': 39, u'o\u015f#': 39, u'h\u0155s': 39, u'cf#': 39, u'ybi': 39, u'\u0155st': 39, u'skn': 39, u'sk\u013e': 39, u'fi\xe1': 38, u'so\u010d': 38, u'heo': 38, u'\xf3z#': 38, u'dbu': 38, u'\u0161iv': 38, u'aim': 38, u'umm': 38, u'hn\xe9': 38, u'kij': 38, u'e\u017eo': 38, u'bob': 38, u's\u0165k': 38, u'a\u0144s': 38, u'rg\xe9': 38, u'zh\xfd': 38, u's\xe9m': 38, u'mi\xe1': 38, u'eu\u013e': 38, u'\xed\u013ek': 38, u'\xed\u013e#': 38, u'im\xed': 38, u'tof': 38, u'pke': 38, u'sui': 38, u'jsc': 38, u'nd\u017e': 38, u'atp': 38, u'zta': 38, u'tkv': 38, u'ipn': 38, u've\u017e': 38, u'kl\xfa': 38, u'ct\xed': 38, u'uef': 38, u'leu': 38, u'law': 38, u'sp\xed': 38, u'#qi': 38, u'mpc': 38, u'dm#': 38, u'lqu': 38, u'opj': 38, u'nys': 38, u'\xfal#': 38, u'\xf3di': 38, u'ixn': 38, u'r\xfcf': 38, u'p\u010di': 38, u'jul': 38, u'\xfcff': 38, u'fif': 37, u'\u0151k\xe9': 37, u'gbo': 37, u'in\u0165': 37, u'pje': 37, u'uzk': 37, u'e\u010dm': 37, u'\u013er\xfd': 37, u'eux': 37, u'ku\xe1': 37, u'elj': 37, u'g#g': 37, u'\xe1pn': 37, u'df#': 37, u'##\xed': 37, u'sms': 37, u'qim': 37, u'm\xfdv': 37, u'ueg': 37, u'phe': 37, u'uaj': 37, u'ilk': 37, u'\u017eun': 37, u'\u017eur': 37, u'vyo': 37, u'z#\xe1': 37, u'ne\xe1': 37, u'nyi': 37, u'\xe1by': 37, u'moe': 37, u't\u0151k': 37, u'cso': 37, u'n\xfar': 37, u'gkm': 37, u'\xe9za': 37, u'\xe9zu': 37, u'faz': 37, u'\u0165bo': 37, u'dut': 37, u'#t\u0151': 37, u'rlg': 36, u'fi\xed': 36, u'hag': 36, u'p\xedd': 36, u'\u0161#l': 36, u'\u0161#\u013e': 36, u'\xe1#\xed': 36, u'lgr': 36, u'f#r': 36, u'v\u010dl': 36, u'bzo': 36, u'aux': 36, u'u\u013eu': 36, u'eyo': 36, u'pc#': 36, u'equ': 36, u'oe#': 36, u'zh\xf4': 36, u'u\u0161l': 36, u'c\u0148u': 36, u'ah\u0148': 36, u'llh': 36, u'ics': 36, u'v#\xe9': 36, u'odf': 36, u'vzl': 36, u'nyh': 36, u'koi': 36, u'bir': 36, u'e\u013e\u0148': 36, u'pii': 36, u'nur': 36, u'\xedsi': 36, u'ti\xe9': 36, u'csa': 36, u'\u0165p\xe4': 36, u'jut': 36, u'\xe9rr': 35, u'onr': 35, u'jec': 35, u'b#\xfa': 35, u'\xfd\u0165a': 35, u'\u013e#f': 35, u'szk': 35, u'sze': 35, u'gb#': 35, u'rew': 35, u'\u017e\xfar': 35, u'eie': 35, u'y\u010du': 35, u'\xf4dn': 35, u'\u0103se': 35, u'#bd': 35, u'gad': 35, u'\u0161\u0165o': 35, u'nsc': 35, u'#fs': 35, u'dcm': 35, u'\xfdmo': 35, u'o\u017e\xfa': 35, u'l\xf4c': 35, u'ptn': 35, u'alq': 35, u'o\u0161a': 35, u'ep\u010d': 35, u'\xe8re': 35, u'v\xfd\u0165': 35, u'pu\u010d': 35, u'b\xe9z': 35, u'dpy': 35, u'kgb': 35, u'#l\xe9': 35, u'd#\xf4': 35, u'a\u0161s': 35, u'z\xfas': 35, u'das': 35, u'lfi': 35, u'ogh': 35, u'ysm': 35, u'gny': 35, u'n\xfa\u010d': 35, u'ra\xe7': 35, u'ocl': 35, u'\xe1ry': 35, u'fae': 35, u'feu': 35, u'ebb': 35, u'bbo': 35, u'kx#': 34, u'ezk': 34, u'scf': 34, u'ad\xf4': 34, u'\xe1sz': 34, u'nm\xe4': 34, u'szl': 34, u'kr\xf4': 34, u'a#\xf4': 34, u'o\xedn': 34, u'azz': 34, u'\u013ea\u0161': 34, u'psi': 34, u'esr': 34, u'f\u013ea': 34, u'u\u013en': 34, u'gai': 34, u'oeb': 34, u'#f\u013e': 34, u'oar': 34, u'\xf4\u010di': 34, u's\xe1c': 34, u'g#\u010d': 34, u'alh': 34, u'ief': 34, u'tga': 34, u'i#\u0148': 34, u'bdu': 34, u'y\u0144s': 34, u't\xf3p': 34, u'olz': 34, u'ddp': 34, u'zmn': 34, u'vio': 34, u'#eq': 34, u'se\xfa': 34, u'#p\xe9': 34, u'#y#': 34, u'\xe4ki': 34, u'zeu': 34, u'nt\xf3': 34, u'\xf6sc': 34, u'jti': 34, u'pue': 34, u'zy\u0144': 34, u'\xe9sa': 34, u'\xe3ov': 34, u'kko': 34, u'#\u0161u': 34, u'\u0142aw': 34, u'f#u': 34, u'ysy': 34, u'ys\xe9': 34, u'n\xfa\u0161': 34, u'p\xfdc': 34, u'h\xe1\u010d': 34, u'f\xedk': 34, u'rm\xfa': 34, u'vz\xed': 34, u'fab': 34, u'\xfdku': 34, u'\xeddm': 34, u'hiu': 34, u'b\xf6s': 34, u'h\xfdr': 34, u'#mb': 34, u'd\xe1k': 34, u'bb\xe1': 34, u'\xfdre': 34, u'\u015foi': 33, u'e\xed#': 33, u'mr\xfa': 33, u'zo\u017e': 33, u'aae': 33, u'\xfchr': 33, u'bu\u015f': 33, u'pfr': 33, u'\xe1#w': 33, u'uzr': 33, u'#s\xf8': 33, u'\u0148#\u013e': 33, u'ncz': 33, u'\u0165ar': 33, u'dvy': 33, u'b\u0103s': 33, u'urv': 33, u'\xf4zo': 33, u'#j\xe4': 33, u'hn\xfd': 33, u'hnp': 33, u'omf': 33, u'\xf3nc': 33, u'v#w': 33, u'ayu': 33, u'oir': 33, u'#b\u0103': 33, u'\xe4tt': 33, u'u\u015fo': 33, u'\xe1d\u017e': 33, u'k\u013ab': 33, u'kuh': 33, u's\xe1k': 33, u'l\u0148a': 33, u'yad': 33, u'ylw': 33, u'#vu': 33, u'joa': 33, u'\xfdpr': 33, u've\u0161': 33, u'ttu': 33, u'u#q': 33, u'zm\xe1': 33, u'phn': 33, u'glu': 33, u'la\xdf': 33, u'\xedtu': 33, u'epf': 33, u'vd\xfd': 33, u'z\u010da': 33, u'#\u017el': 33, u'j\xe4\xe4': 33, u'ma\u0144': 33, u'ecv': 33, u'jz#': 33, u'dp\xe1': 33, u'rp\u010d': 33, u'ak\u0155': 33, u'#\u0161r': 33, u'pay': 33, u'\xe9ka': 33, u'usl': 33, u'a\u0165n': 33, u'\u0165iu': 33, u'j\xedt': 33, u'p#\u0161': 33, u'p#g': 33, u'nup': 33, u'f\xedl': 33, u'k#w': 33, u'vzt': 33, u'h\xe4u': 33, u'\u010d\xedr': 33, u'\xe4\xe4t': 33, u'hid': 33, u'dui': 33, u'ljo': 33, u'#mh': 33, u's\xfa\u0161': 33, u'cz#': 32, u'\xe4se': 32, u'zrn': 32, u'\xedn\u0161': 32, u'g\xf6n': 32, u'\xf6nc': 32, u'jew': 32, u'zkv': 32, u'\u0161#u': 32, u'lzu': 32, u'a#\xf3': 32, u'hrk': 32, u'wag': 32, u'kv\xed': 32, u'gu\xe9': 32, u'gu\u013e': 32, u'yih': 32, u'mr\xe1': 32, u'ffi': 32, u'urr': 32, u'\xf4\u017et': 32, u'ov\u0155': 32, u'o\u013e\u0161': 32, u'kig': 32, u'eep': 32, u'vvd': 32, u'd\xfav': 32, u'eyb': 32, u'r\xf4m': 32, u'r\xf4\u010d': 32, u'nsh': 32, u'nsm': 32, u'iit': 32, u'tsv': 32, u'tsu': 32, u'mkl': 32, u'adw': 32, u'\xf4m#': 32, u'\xfa#\xe9': 32, u'suu': 32, u'cdm': 32, u'i#y': 32, u'#vv': 32, u'y\u0161t': 32, u'jkv': 32, u'fog': 32, u'\xe1ct': 32, u'mhz': 32, u'\xe9le': 32, u'x#t': 32, u'yt\xfa': 32, u'b\xe9r': 32, u'jpl': 32, u'jpe': 32, u'h\u0148o': 32, u'krk': 32, u'kr\u010d': 32, u'isg': 32, u'kk\xfd': 32, u'r\xedg': 32, u'iwo': 32, u'#d\xfc': 32, u'ysz': 32, u'\xe9by': 32, u'tii': 32, u'd\xe9b': 32, u'e\xfar': 32, u'vz#': 32, u'arx': 32, u'spz': 32, u'bb#': 32, u'pz#': 32, u'ird': 31, u'so\u017e': 31, u'uom': 31, u'ezz': 31, u'nra': 31, u'w#f': 31, u'yr#': 31, u'\u0161me': 31, u'umf': 31, u'ncy': 31, u'm\u010de': 31, u'wir': 31, u'ngv': 31, u't\u0161e': 31, u'gyi': 31, u'fne': 31, u'#ng': 31, u'e\u017e\xfa': 31, u'orw': 31, u'gee': 31, u's\u0165b': 31, u'\xf8nd': 31, u'dgr': 31, u'm\xfa\u010d': 31, u'nho': 31, u'viw': 31, u'kyi': 31, u'el\u0148': 31, u'm\xedl': 31, u'm\xedd': 31, u's\xf8n': 31, u'ed\xe9': 31, u'\u010dip': 31, u'ndg': 31, u'hoh': 31, u'ssr': 31, u'pd#': 31, u'laa': 31, u'ao\u010d': 31, u'x#e': 31, u'\u017e\xe1b': 31, u'\xf3te': 31, u'n\xfdk': 31, u'jp\xe1': 31, u'opm': 31, u'oon': 31, u'znj': 31, u'\xf6de': 31, u'ysc': 31, u'ys\xe1': 31, u'\xe1jm': 31, u'fyt': 31, u'njz': 31, u'\xe1\u013e#': 31, u'oc\u0148': 31, u'ti\xfa': 31, u'fah': 31, u'ynm': 31, u'eft': 31, u'nfa': 31, u'#kk': 31, u'lvu': 31, u'xta': 30, u'y\xed#': 30, u'zoo': 30, u'uon': 30, u'ivd': 30, u'fvo': 30, u'\xe4\u0165s': 30, u'ojd': 30, u'b#\u0161': 30, u'p\xfa#': 30, u'\xfd\u0161o': 30, u'aig': 30, u'uzv': 30, u'ffl': 30, u'psa': 30, u'srr': 30, u'uv#': 30, u'#j\u0119': 30, u's\xfd#': 30, u'\u017eca': 30, u'\xedmk': 30, u'zl\xf3': 30, u'\xedgu': 30, u'two': 30, u'uih': 30, u'#zs': 30, u'cle': 30, u'm\xe1s': 30, u'o\u017ec': 30, u'uuj': 30, u'#md': 30, u'sa\xef': 30, u'sef': 30, u'seh': 30, u'ev\xf4': 30, u'v\xfd\u010d': 30, u'yom': 30, u'iz#': 30, u'rur': 30, u'py\u0161': 30, u'bey': 30, u'd\xfch': 30, u'ops': 30, u'www': 30, u'zuz': 30, u'nya': 30, u'ry\xed': 30, u'dah': 30, u'j\u0119d': 30, u'aj\xe1': 30, u'dee': 30, u'anp': 30, u'\u0119dr': 30, u'yns': 30, u'us\u0165': 30, u'onw': 29, u'#cm': 29, u'uou': 29, u'\u0161#\u017e': 29, u'tr\xe3': 29, u'\u013e#\u0161': 29, u'w#s': 29, u'rbr': 29, u'h\xe1l': 29, u'ib\xe9': 29, u'low': 29, u'#ss': 29, u'ifn': 29, u'bzr': 29, u'gmu': 29, u'oz\u0148': 29, u'urp': 29, u'eaz': 29, u'auc': 29, u'ov\u010f': 29, u'meo': 29, u'\u0165#w': 29, u'#n\xf4': 29, u'i\u013ea': 29, u'e#\xf4': 29, u'ujn': 29, u'\xfd#\u0165': 29, u'uft': 29, u'vp#': 29, u'elb': 29, u'gi#': 29, u'\u010dav': 29, u'#ie': 29, u'si\xf3': 29, u'\xfdti': 29, u'nd\xed': 29, u'kda': 29, u'ndh': 29, u'hoe': 29, u'sm\xe1': 29, u'lps': 29, u'zt\xfd': 29, u'ttp': 29, u'dft': 29, u'#eo': 29, u'\u010fa#': 29, u'uev': 29, u'kxa': 29, u'yos': 29, u'\u017e\xfav': 29, u'\xe4zm': 29, u'l#\u0165': 29, u'di\xf3': 29, u'\xe1b#': 29, u'\xebov': 29, u'\xe9ky': 29, u'd\xf4m': 29, u'ka\xed': 29, u'd\xedl': 29, u'\u0161nu': 29, u'\xf4my': 29, u'd\xe9n': 29, u'd\xe9r': 29, u'sru': 29, u'eju': 29, u'\xe9gu': 29, u'ju\u0161': 29, u'\xdfov': 29, u'hii': 29, u'u\xe9l': 28, u'tzo': 28, u'fr\xe9': 28, u'\xfdzb': 28, u'#g\xfc': 28, u'\u0161#i': 28, u'zgu': 28, u'emm': 28, u'bny': 28, u'\xf3rd': 28, u'veg': 28, u's\xe2r': 28, u'#s\xe2': 28, u'tf#': 28, u'h\xfat': 28, u'tbc': 28, u'keo': 28, u'e\u010dt': 28, u'om\u010d': 28, u'#ny': 28, u'iuj': 28, u'r\xf4d': 28, u'\xe4#\u013e': 28, u'\xe9in': 28, u'gay': 28, u'u\u0165a': 28, u'ufe': 28, u'#f#': 28, u'#fv': 28, u'k\xfac': 28, u'v\xe1\u0148': 28, u'#zw': 28, u'muf': 28, u'elz': 28, u'lhm': 28, u'rtt': 28, u'n\u0161a': 28, u'#iu': 28, u'\u010d#c': 28, u'ouf': 28, u'lpo': 28, u'prc': 28, u'wua': 28, u'hwo': 28, u'laq': 28, u'ilb': 28, u'\xe2rb': 28, u'z\u010dl': 28, u'jt\xfd': 28, u'vuo': 28, u'ync': 28, u'b\xe9n': 28, u'shw': 28, u'\u017ei\u0161': 28, u'\u0161ij': 28, u'isr': 28, u'h\u0161e': 28, u'tyn': 28, u'b\u010de': 28, u'y\u013ea': 28, u'y\u013e#': 28, u'kkr': 28, u'dge': 28, u'f\xe9i': 28, u'a\u0161#': 28, u'icl': 28, u'pme': 28, u'r\xfas': 28, u'ej\u010d': 28, u'r#w': 28, u'efk': 28, u'eff': 28, u'ri\u013e': 28, u'a\xdfo': 28, u'#df': 28, u'ta\u0161': 28, u'j\xe1k': 28, u'hmy': 28, u'\u0161\xedt': 27, u'irw': 27, u'a\u0142o': 27, u'ezj': 27, u'rfa': 27, u'\u0161#\u0165': 27, u'\xe7u#': 27, u'lcu': 27, u'gun': 27, u'\xf3ry': 27, u'b\u0161o': 27, u'vei': 27, u'\xedzv': 27, u'ur\xe3': 27, u'\xe1ur': 27, u'kib': 27, u'#nk': 27, u'fse': 27, u'zss': 27, u'e#\xf3': 27, u'gei': 27, u'\u013azl': 27, u'fdp': 27, u'eym': 27, u'rg\xf3': 27, u'im\xe9': 27, u'a\u0148h': 27, u'\u0165d\u0148': 27, u'o\u017em': 27, u'bdr': 27, u'\xfdtv': 27, u'm\xe9s': 27, u'f#\u010d': 27, u'vaz': 27, u'mez': 27, u'jke': 27, u'uyn': 27, u'o\u0148o': 27, u'n\xeds': 27, u'fte': 27, u'x#r': 27, u'td#': 27, u'b\xe9g': 27, u'\xfa\u017eo': 27, u'z#\xf4': 27, u'#ht': 27, u'fl\xf3': 27, u'ec\u0148': 27, u's\u0142a': 27, u'oov': 27, u'uln': 27, u'mo\u013e': 27, u'r\xe9u': 27, u'\u017eeb': 27, u'a\u0165d': 27, u'j\xe1u': 27, u'o\u013e\xfa': 27, u'raw': 27, u'lbi': 27, u'ti\u0161': 27, u'yj\xfa': 27, u'#pf': 27, u'juk': 27, u'tlu': 27, u'tax': 27, u'vc#': 27, u'mfo': 27, u'\xfdru': 27, u'nzn': 26, u'hap': 26, u'#c\xf3': 26, u'a\xedn': 26, u'u\u0161\u0148': 26, u'\xe4\u0165m': 26, u'zto': 26, u'aek': 26, u'cve': 26, u'\xf6m#': 26, u'rex': 26, u'\xe9fo': 26, u'psc': 26, u'psr': 26, u'if\xed': 26, u'\xfdja': 26, u'\xf4\u017e#': 26, u'eab': 26, u'c#\u010f': 26, u'drs': 26, u'c\xedr': 26, u'#n#': 26, u'#nd': 26, u'e#x': 26, u'\xe9un': 26, u'mm#': 26, u'\xe4si': 26, u'gmi': 26, u'ub\xed': 26, u'gi\u010d': 26, u'hsv': 26, u'g#\u010f': 26, u'\u0161l\xe1': 26, u'po\xfa': 26, u'#rh': 26, u'rtr': 26, u'm\xedt': 26, u'ltn': 26, u's#\xe1': 26, u'myz': 26, u'\u017elt': 26, u'olb': 26, u'gdy': 26, u'\u0148ku': 26, u'vi\xe1': 26, u'n#\xed': 26, u'vmu': 26, u'\xe3o#': 26, u'\u0161pl': 26, u'm\xe4d': 26, u'coo': 26, u'plh': 26, u'cci': 26, u'pua': 26, u'k\xf3s': 26, u'\u0155t#': 26, u'\xfas#': 26, u'kow': 26, u'ys\xfd': 26, u'lch': 26, u'ryo': 26, u'c\xf3r': 26, u'htt': 26, u'vch': 26, u'yb\xed': 26, u'j\xfar': 26, u'mb\xf3': 25, u'zr#': 25, u'\u0161ac': 25, u'onh': 25, u'hep': 25, u'\u010d\u0155t': 25, u'pfl': 25, u'lze': 25, u'aeu': 25, u'a#q': 25, u'erw': 25, u'hr\xf6': 25, u'wa\u0142': 25, u'pny': 25, u'#xv': 25, u'dv\u010d': 25, u'rsm': 25, u'#j\xf3': 25, u'mek': 25, u'ln#': 25, u'e#\xe0': 25, u'\xe7a#': 25, u'toe': 25, u'oef': 25, u'sfu': 25, u'\u013ena': 25, u'+#n': 25, u'ppc': 25, u'\xfaba': 25, u's\xe1\u017e': 25, u'pt\xed': 25, u'lhb': 25, u'\u0142\u0119s': 25, u'l\xe4#': 25, u'bda': 25, u'#\u010d\u0155': 25, u'zk\xfd': 25, u'\xfdtn': 25, u'oj\xfa': 25, u's\xe1t': 25, u'\u010diu': 25, u'ndp': 25, u'e\u0161\u013e': 25, u'\u010du\u013e': 25, u'z\xe1\u017e': 25, u'hbt': 25, u't\xebo': 25, u'\xfcnt': 25, u'pho': 25, u'v\xe1p': 25, u'yk\xfa': 25, u'u\xedn': 25, u'spy': 25, u'vum': 25, u'oxa': 25, u'#u\u0165': 25, u'hba': 25, u'beb': 25, u'rqu': 25, u'isv': 25, u'mc#': 25, u'r\xf6d': 25, u'jdo': 25, u'yp\xed': 25, u'a\u0142\u0119': 25, u'lyg': 25, u'a\u0165\u0161': 25, u'af\xfa': 25, u'\xe1n\u010d': 25, u'csi': 25, u'gvi': 25, u'rv\xf3': 25, u'\xfaos': 25, u'pve': 25, u'us\xe1': 25, u's\xfao': 25, u'j\xe1d': 25, u'eb\xed': 25, u'v\xf3z': 25, u'bba': 25, u'frh': 24, u'\u0161ah': 24, u'#c\xfa': 24, u'bys': 24, u'zk\xe9': 24, u'trb': 24, u'\xeffi': 24, u'dbr': 24, u'xvi': 24, u'\u0161ii': 24, u'i\xf1a': 24, u'uzl': 24, u'z\xf3d': 24, u'g\xfan': 24, u'loi': 24, u'\u0165at': 24, u'c\xedu': 24, u'zwi': 24, u'gyu': 24, u'unm': 24, u'pvc': 24, u'\u013azn': 24, u'oib': 24, u'uji': 24, u'\u0165u\u010d': 24, u'bke': 24, u'iim': 24, u'u\u0161\u010d': 24, u'imm': 24, u's\xe1g': 24, u'poa': 24, u'yhu': 24, u'#rf': 24, u'apv': 24, u'\xe4us': 24, u'cdu': 24, u'\u010du#': 24, u'a\xeff': 24, u'tty': 24, u'axn': 24, u'itb': 24, u'vzp': 24, u'g\xfcn': 24, u'ntg': 24, u'\xf1as': 24, u'mts': 24, u'gh\xe1': 24, u'\xe9do': 24, u'utz': 24, u'ulv': 24, u'\u0161v\xe1': 24, u'\u017eib': 24, u'\xf3lo': 24, u'ty\u013e': 24, u'ba\u017e': 24, u'zun': 24, u'kkp': 24, u'iwi': 24, u'pai': 24, u'i\xfat': 24, u'de\xed': 24, u'\xe1fa': 24, u'\u017eac': 24, u'f#l': 24, u'#bp': 24, u'tmu': 24, u'tl\xe9': 24, u'aj\u013e': 24, u'p\xfd\u0161': 24, u'wap': 24, u'prh': 24, u'\xe1\u0148m': 24, u'te\u017e': 24, u'hik': 24, u'\u0161\xe9#': 24, u'pzi': 24, u'#\xf3#': 23, u'sc#': 23, u'rfm': 23, u'\u0161#f': 23, u'\xe1#\xe9': 23, u'\xe0#l': 23, u'so\u013e': 23, u'mrl': 23, u'eig': 23, u'uvl': 23, u'iya': 23, u'pwa': 23, u'c\u0148o': 23, u'ng\xfa': 23, u'aua': 23, u'mey': 23, u'e\u010d\xfa': 23, u'ovg': 23, u'om\xf3': 23, u'yeu': 23, u'bo\u013e': 23, u'jf\xfa': 23, u'\u010dre': 23, u'eyt': 23, u'\u010d\xedk': 23, u'he\u0148': 23, u'u\u0165o': 23, u'ldm': 23, u'nsr': 23, u'#\xf6z': 23, u'ahb': 23, u'\xe9e#': 23, u'dfi': 23, u'#ix': 23, u'i#\xf4': 23, u'i#x': 23, u'\u010di\u016b': 23, u'\u010di\u0161': 23, u'rpu': 23, u'ol\u010d': 23, u'ipk': 23, u'#\xe8#': 23, u'zpi': 23, u'i\u016bt': 23, u'\u017ed\xed': 23, u'\u015fa#': 23, u'\xe1km': 23, u'etf': 23, u'epw': 23, u'yts': 23, u'\u016bt\u0117': 23, u'etz': 23, u'#r#': 23, u'p\xe4s': 23, u'cds': 23, u'mab': 23, u'\u017eiu': 23, u'baa': 23, u'a\u017et': 23, u'ikk': 23, u'bih': 23, u'bi\u0144': 23, u'gcc': 23, u'f#c': 23, u'cod': 23, u'ys\xfa': 23, u'pi\xf3': 23, u'p\xfd#': 23, u'\xe1n\xed': 23, u'ejp': 23, u'\xe1\u0148t': 23, u'#pz': 23, u'riq': 23, u'f\xfan': 23, u'i\u010dl': 23, u'rrs': 23, u'ebs': 23, u'\u015fov': 22, u'\xedba': 22, u'tzu': 22, u'\xe9va': 22, u'\xe1zc': 22, u'b\xfak': 22, u'\xe4\u0165n': 22, u'#gc': 22, u'vgo': 22, u'w#k': 22, u'lgo': 22, u'eml': 22, u'h\xe1\u0161': 22, u'kvu': 22, u'i\u0161e': 22, u'w\u010da': 22, u'\xf6ld': 22, u'dz#': 22, u'puo': 22, u'asz': 22, u'nc#': 22, u'\xedzr': 22, u'yas': 22, u'\u0161\u010de': 22, u'om\u0161': 22, u'u\u013eo': 22, u'iur': 22, u's\u0165t': 22, u'cua': 22, u'\xe1\u0165t': 22, u'v\xe1\u010d': 22, u'zd\xfa': 22, u'+#a': 22, u'd\u017es': 22, u'kyo': 22, u's\xe1v': 22, u'vtr': 22, u'pk\xe1': 22, u'\xf6zd': 22, u'g\xe1r': 22, u'i#q': 22, u'\xf3#n': 22, u'y\u0161p': 22, u'e\u0161\u0148': 22, u'\xfavn': 22, u'\xf4za': 22, u'v#\xf4': 22, u'\xe9t#': 22, u'tpe': 22, u'jj\xe1': 22, u'agg': 22, u'le\u0161': 22, u'\xedhl': 22, u'la\u0148': 22, u'ntd': 22, u'sp\xf3': 22, u'aok': 22, u'z\u010du': 22, u'dtv': 22, u'oxf': 22, u'cdo': 22, u'ecl': 22, u'ots': 22, u'upt': 22, u'#ln': 22, u'jhr': 22, u'uor': 22, u'l\xe9v': 22, u'f#i': 22, u'ys\u013e': 22, u'vr\xed': 22, u'\xe1\u013eu': 22, u'r\xfa\u017e': 22, u'anv': 22, u'gkl': 22, u'lng': 22, u'ri\xf1': 22, u'tao': 22, u'd\xe1\u017e': 22, u't\xedz': 21, u'\xe1z\xed': 21, u't\xf4t': 21, u'b\xedv': 21, u'ha\u0161': 21, u'\xednn': 21, u'jt#': 21, u'xot': 21, u'xod': 21, u'je\u017e': 21, u'aaa': 21, u'zkr': 21, u'w#v': 21, u'krz': 21, u'\u0165in': 21, u'\xedr\u010d': 21, u'kv#': 21, u'i\u0161a': 21, u'stf': 21, u'#\u0151r': 21, u'#w#': 21, u'l\xfad': 21, u'\u017eoc': 21, u'i\xedt': 21, u'#nt': 21, u'bog': 21, u'aym': 21, u'ayr': 21, u'lfe': 21, u'oid': 21, u'#bm': 21, u'p\xf3r': 21, u'#b\xfc': 21, u'bk\u013e': 21, u'rgy': 21, u'zhn': 21, u'\xf6rg': 21, u'vp\xe1': 21, u'\xe1l\xed': 21, u'pk\xe9': 21, u'n\u0161s': 21, u'z\xe9r': 21, u'r\u010dk': 21, u'rps': 21, u'dsb': 21, u'olh': 21, u'ttr': 21, u'b\xfct': 21, u'acv': 21, u'tp\xe4': 21, u'agy': 21, u'lee': 21, u'v\xfdj': 21, u'\u010f#\xed': 21, u'\xe9\u0161a': 21, u'\u0161i\xed': 21, u'l#\xe1': 21, u'idg': 21, u'duh': 21, u'ba\u010d': 21, u'ba\u0148': 21, u'zui': 21, u'\xe1bn': 21, u'a\u017ea': 21, u'hdi': 21, u'ok\xed': 21, u'iks': 21, u'kmu': 21, u'\u017e#\u0165': 21, u'#xi': 21, u'p\xf3z': 21, u'aiz': 21, u'mw#': 21, u'o\u015fa': 21, u'efp': 21, u'#oi': 20, u'irl': 20, u'#c\xe9': 20, u'#cf': 20, u'ezy': 20, u'\u0151ry': 20, u'h\xe9l': 20, u'cih': 20, u'r\xf3b': 20, u'wad': 20, u'i\u0161\u0165': 20, u'noi': 20, u'azh': 20, u'eif': 20, u'\xe9fa': 20, u'sra': 20, u'\u010dnu': 20, u'\u0148#\u010f': 20, u'kao': 20, u'oz\xe9': 20, u'lk\xfd': 20, u'xho': 20, u'unr': 20, u'mao': 20, u'lse': 20, u'zse': 20, u'vkr': 20, u'kmh': 20, u'p\xf3d': 20, u'k\xfd\u0161': 20, u'\xe4#g': 20, u'jbr': 20, u'j\u017ei': 20, u'\xedu#': 20, u'ku\u010d': 20, u'tf\xe1': 20, u'elv': 20, u't\u010di': 20, u'z\xedz': 20, u'ehi': 20, u'\u013e\u0161t': 20, u'o\u017e#': 20, u'o\xe1r': 20, u'r\u010ds': 20, u'oum': 20, u'lth': 20, u'tc#': 20, u'nd\xf3': 20, u'sm\xe4': 20, u'e\u0161v': 20, u'dso': 20, u'ui\xe8': 20, u'kly': 20, u'trs': 20, u'akn': 20, u'd\xf3m': 20, u'\u017e\xedc': 20, u'\u010f#g': 20, u'\xe9\u0161s': 20, u'l\xfcg': 20, u'o\u010d#': 20, u'pum': 20, u'pun': 20, u'dtu': 20, u'dt\xf3': 20, u'x\xe1c': 20, u'rul': 20, u'ycv': 20, u'\xedca': 20, u'\xfcge': 20, u'mcm': 20, u'op\xf4': 20, u'fl\xfc': 20, u'uh\xed': 20, u'#dm': 20, u'r\xe9d': 20, u'l\xe9t': 20, u'fuj': 20, u'kst': 20, u'd\xed\u010d': 20, u'##+': 20, u'csu': 20, u'ejq': 20, u'juv': 20, u'fku': 20, u'hip': 20, u'jqu': 20, u'dps': 20, u'j\xfa\u017e': 20, u'rlh': 19, u'\u0161\xedk': 19, u'lr#': 19, u'djo': 19, u'hev': 19, u'vlh': 19, u'ci\u010d': 19, u'o\xfar': 19, u'r\xf3j': 19, u'h#\xf4': 19, u'euj': 19, u'yr\xfa': 19, u'cab': 19, u'psd': 19, u'kae': 19, u'pww': 19, u'oz\u013e': 19, u'#ww': 19, u'l\xfak': 19, u'\xf3js': 19, u'\xf3jo': 19, u'#\xb0#': 19, u'a\u010fm': 19, u'me\u010d': 19, u'wu#': 19, u'fsf': 19, u'i\xf6l': 19, u'oim': 19, u'oic': 19, u'#\xe9t': 19, u'rgr': 19, u'iia': 19, u'g\xfd\u0148': 19, u'\u0103ne': 19, u'\u017e\u010fa': 19, u'oaq': 19, u'nwo': 19, u'kuv': 19, u'kua': 19, u'\u010dej': 19, u'\xfcrt': 19, u'+#k': 19, u'kye': 19, u'\xe1lp': 19, u'poe': 19, u'\u0165dv': 19, u'rkl': 19, u'lhk': 19, u'ixo': 19, u'ixe': 19, u'\xfdty': 19, u'lt\xed': 19, u'oug': 19, u'k\xfaz': 19, u'b\xf4b': 19, u'\xf3mu': 19, u'voi': 19, u'e\u0161m': 19, u'mdc': 19, u'lpe': 19, u'um\xfd': 19, u'ydo': 19, u'bl\xfa': 19, u'rxi': 19, u'tpw': 19, u'odj': 19, u'm\xfd\u013e': 19, u'le\u0165': 19, u'bti': 19, u'phy': 19, u'mh#': 19, u'mho': 19, u'\u0161o#': 19, u'ak\u0161': 19, u'\xedty': 19, u'b\xe1k': 19, u'gh#': 19, u'k\u017ei': 19, u'ytt': 19, u'eg\xfd': 19, u'asf': 19, u'jpa': 19, u'nex': 19, u'ba\xfa': 19, u'\xe1bk': 19, u'paz': 19, u'o\u0161p': 19, u't\u013ea': 19, u'ikh': 19, u'bim': 19, u'peh': 19, u'\xfcti': 19, u'sde': 19, u'lf#': 19, u'ai\u0161': 19, u'byh': 19, u'ysa': 19, u'\u0161na': 19, u'a\xfac': 19, u'ss\xe1': 19, u'e\xfat': 19, u'unf': 19, u'izy': 19, u'hi\xf6': 19, u'zza': 19, u'ga\u0161': 19, u'#o\u013e': 18, u'az\xe1': 18, u't#\u0165': 18, u't#\xe1': 18, u'\xe4\u0165o': 18, u'\xed\u010ft': 18, u'nry': 18, u'nrw': 18, u'evg': 18, u'pf#': 18, u'szt': 18, u'rb#': 18, u'emh': 18, u'bnu': 18, u'gn#': 18, u'bna': 18, u'\xedr\xed': 18, u'y#\xf3': 18, u'cai': 18, u'cag': 18, u'uz\xf3': 18, u'\xe1qu': 18, u'\xed#\xe1': 18, u'loa': 18, u'v\xe1q': 18, u'kah': 18, u'lki': 18, u'rs\xfd': 18, u'\xfa#w': 18, u'hns': 18, u'auo': 18, u'mea': 18, u'o\u0148h': 18, u'm\xf6l': 18, u'r\u017em': 18, u'na\u0144': 18, u'axh': 18, u'fsk': 18, u'\u0142o#': 18, u'svz': 18, u'\xe4##': 18, u'j\xf3z': 18, u'o#\xe9': 18, u'o#y': 18, u'gac': 18, u'uf#': 18, u'chb': 18, u'qu\xed': 18, u'bw\u010d': 18, u'y\xe1#': 18, u'a\u0144#': 18, u'+#s': 18, u'k\u013ea': 18, u'\xe9an': 18, u'g#i': 18, u'ehd': 18, u'#rt': 18, u'ieo': 18, u'w\xfcr': 18, u'yly': 18, u'ugg': 18, u'siw': 18, u'dlr': 18, u'#m\u0103': 18, u'nd\u013e': 18, u'#m\xf6': 18, u'n\xf4\u017e': 18, u'y\u0161#': 18, u'hog': 18, u'atd': 18, u'xia': 18, u'jk\xfd': 18, u'uyt': 18, u'#tc': 18, u'z\xe1\u013e': 18, u'ipy': 18, u'\xf4zu': 18, u'ttw': 18, u'ssb': 18, u'\u0165t\xfd': 18, u'le\u010f': 18, u'm\u0103n': 18, u'zaf': 18, u'vu\u010d': 18, u'zgy': 18, u'#h\xe9': 18, u'\xe1p\xe4': 18, u'ot\xf3': 18, u'isf': 18, u'zuh': 18, u'jhu': 18, u'ulg': 18, u'r\xed\u010f': 18, u's\xf3l': 18, u'ryv': 18, u'z\xfa#': 18, u'fum': 18, u'aff': 18, u'ud\u0161': 18, u'\xf6lz': 18, u'ilc': 18, u'ysu': 18, u'pik': 18, u'huo': 18, u'\xe9zs': 18, u'\xfdvk': 18, u'ta\u0142': 18, u'tay': 18, u'j\u010d\xed': 18, u'vau': 18, u'\xed\u0161#': 17, u'ir\xfd': 17, u'jia': 17, u'hex': 17, u'heb': 17, u't#\xed': 17, u'#\xf3d': 17, u'k\xe1s': 17, u'zk\xfa': 17, u'vge': 17, u'\xf4p#': 17, u'evp': 17, u'w#b': 17, u'w#d': 17, u'w#n': 17, u'aev': 17, u'nvo': 17, u'krs': 17, u'a#\xe8': 17, u'cmr': 17, u'noa': 17, u'd\xeat': 17, u'mrv': 17, u'ffr': 17, u'\xeatr': 17, u'jr\xe1': 17, u'ffs': 17, u'i\xe9t': 17, u'lkl': 17, u'c\xe9z': 17, u'wij': 17, u'wi#': 17, u'rsu': 17, u'urf': 17, u'm\xfam': 17, u'\u010d\xfal': 17, u'do\xe1': 17, u'r\u017ek': 17, u'\u017ecu': 17, u'e#y': 17, u'\xfd#\xed': 17, u'cus': 17, u'zhm': 17, u'#\xedl': 17, u'adj': 17, u'im\xe4': 17, u'too': 17, u'bsb': 17, u'\xfcrk': 17, u'el\u0161': 17, u'kyr': 17, u'm\xe1p': 17, u'rk\xfa': 17, u'js#': 17, u'tge': 17, u'edf': 17, u'#\u010dc': 17, u'\xe9\u010dt': 17, u'#v\xe9': 17, u'jop': 17, u'vag': 17, u'dhr': 17, u'olg': 17, u'\xe1ck': 17, u'n\xed\u0161': 17, u'b\xf3#': 17, u'dd#': 17, u'it\xeb': 17, u'\xf4va': 17, u'toa': 17, u'\xf6st': 17, u'kxo': 17, u'z\u010d\xfa': 17, u'cvr': 17, u'box': 17, u'fdl': 17, u'k\xf3z': 17, u'hl#': 17, u'e\u010ft': 17, u'awe': 17, u'\xf3l#': 17, u'n#\xe1': 17, u'bay': 17, u'jha': 17, u'nyr': 17, u'nyl': 17, u'kku': 17, u'bmw': 17, u'#d\xea': 17, u'ik\xfd': 17, u'r\xe9\u010d': 17, u'pee': 17, u'lyp': 17, u'\xe7ov': 17, u'pi\u0161': 17, u'bum': 17, u'ocr': 17, u'l\u0161u': 17, u'\xe9f#': 17, u'rnd': 17, u'p\xfdm': 17, u'cnd': 17, u'twa': 17, u'wno': 17, u'i\u010dc': 17, u'ot\u013e': 17, u'ndz': 17, u'e\u0148h': 17, u'#k\u013a': 17, u'x#i': 16, u'p\xe9h': 16, u'czm': 16, u'fiz': 16, u'\xedbi': 16, u'rgn': 16, u'jiz': 16, u'#cp': 16, u'aam': 16, u'#g\xf3': 16, u'\u013e#g': 16, u'r\u0165s': 16, u'lg\xe1': 16, u'\xedvl': 16, u'em\xfa': 16, u'\xe4s\u0165': 16, u'rnk': 16, u'bze': 16, u'stg': 16, u'sth': 16, u'l\u010du': 16, u'\u0161un': 16, u'\xe4ty': 16, u'e+#': 16, u'tbe': 16, u'#j\xf6': 16, u'\u0161ab': 16, u'\xe1\u010dk': 16, u'eek': 16, u'i\u017em': 16, u'uf\xf3': 16, u'nsg': 16, u's\xe9a': 16, u'#ft': 16, u'euh': 16, u'#ej': 16, u'ah\xfa': 16, u'tbv': 16, u'ppl': 16, u'g#f': 16, u'l\xf3#': 16, u'g#\u017e': 16, u'eh\xe9': 16, u'#jp': 16, u'cda': 16, u'ugr': 16, u'##x': 16, u'##\u0165': 16, u'fka': 16, u'\xe1t\xed': 16, u'vax': 16, u's#+': 16, u'fox': 16, u'v\xed\u010d': 16, u'u#\xf4': 16, u'ddn': 16, u'acl': 16, u'\u0148ko': 16, u'ex\xe1': 16, u'xae': 16, u'mle': 16, u'm\xfdk': 16, u'rdj': 16, u'npe': 16, u'o\u0161s': 16, u'\xedts': 16, u'teu': 16, u'nir': 16, u'aoi': 16, u'aor': 16, u'za\u013e': 16, u'rls': 16, u't\xf4p': 16, u'xiu': 16, u'#dz': 16, u'gsm': 16, u'idh': 16, u'vb\xe1': 16, u'ecc': 16, u'ndb': 16, u'upf': 16, u'bap': 16, u'ua\u010d': 16, u'di\xe9': 16, u'b\xfdk': 16, u'peg': 16, u'lya': 16, u'\u0165io': 16, u'buu': 16, u'p#\u013e': 16, u'gge': 16, u'b\u013ak': 16, u'jyl': 16, u'r\u0161k': 16, u'\u0161r\xe1': 16, u'ssv': 16, u'i\u0161\xe9': 16, u'pv#': 16, u'zzu': 16, u'cfc': 16, u'ta\u015f': 16, u'x#f': 15, u'#oa': 15, u'aat': 15, u'lri': 15, u'afk': 15, u'\xfatm': 15, u't\xe9s': 15, u'zr\u0148': 15, u'b\xe4t': 15, u'#cg': 15, u'\xf8me': 15, u'cry': 15, u'\xf3zs': 15, u'ij\u010d': 15, u'gby': 15, u'#\u013ek': 15, u'kr\u0161': 15, u'brl': 15, u'emt': 15, u'\xe0#v': 15, u'r\u0161#': 15, u'h\xe1k': 15, u'k\xedt': 15, u'eiu': 15, u'uvk': 15, u'\u0148#g': 15, u'ozz': 15, u'ub\u0161': 15, u'ngp': 15, u'ng\xe1': 15, u'djy': 15, u'c#\xed': 15, u'c#\u0165': 15, u'gy\xf6': 15, u'\xedam': 15, u'yed': 15, u'axa': 15, u'e\u017eb': 15, u'hbi': 15, u'ayt': 15, u'e#\xe9': 15, u'gec': 15, u'oig': 15, u's\u0165n': 15, u'sby': 15, u'\xe4ne': 15, u'zh\xe1': 15, u'#f\xe4': 15, u'y\u017eu': 15, u'pg#': 15, u'+#p': 15, u'pp\xe4': 15, u'icr': 15, u'ia\u0161': 15, u'iaa': 15, u'l\u0148t': 15, u'\u0161l\xed': 15, u'suk': 15, u'#r\xf8': 15, u'm\xedb': 15, u'tgr': 15, u'j\u010fa': 15, u'ltz': 15, u'jog': 15, u'#m\xfc': 15, u'lpy': 15, u'n\xe9s': 15, u'ol\xfd': 15, u'ttg': 15, u'sae': 15, u'say': 15, u'uig': 15, u'fti': 15, u'u\xe1#': 15, u'mld': 15, u'f\xe4r': 15, u'r\xf8m': 15, u'\u013ekv': 15, u'\xe4fn': 15, u'\xe8ne': 15, u'fn\xfd': 15, u'ihi': 15, u'l\xf4t': 15, u'u\xf1i': 15, u'zea': 15, u'zeg': 15, u'zeo': 15, u'o\u0161o': 15, u'\xe1r\u010d': 15, u'jts': 15, u'vun': 15, u'egl': 15, u'p\xe4n': 15, u'#h\xf4': 15, u'd\u010dl': 15, u'snc': 15, u'jzd': 15, u'dp\xf4': 15, u'ups': 15, u'\xf4bo': 15, u'\xf3ly': 15, u'yga': 15, u'n#+': 15, u'\xe4rm': 15, u'h\xf4r': 15, u'kk\xe1': 15, u'vja': 15, u'\xe1b\xe4': 15, u'r\xf6n': 15, u'\xfas\u0165': 15, u'#dj': 15, u'#\xf4k': 15, u'peo': 15, u'mkr': 15, u'g\xf3l': 15, u'a\u0165m': 15, u'\u0161t#': 15, u'iof': 15, u'iob': 15, u'xfa': 15, u'gos': 15, u'thm': 15, u'aj\u010f': 15, u'nuo': 15, u'ti\xed': 15, u'\xf3s\xe1': 15, u'\xe1n\xe9': 15, u'r\xfar': 15, u'ryz': 15, u'ssk': 15, u'h\xe4f': 15, u'm\xe9r': 15, u'\xf4k#': 15, u'izz': 15, u'\u0165\u0161t': 15, u'\xfdry': 15, u'fih': 14, u'\xed\u0161o': 14, u'e\xedn': 14, u'\xf1iz': 14, u'aai': 14, u'aap': 14, u'iju': 14, u'tr#': 14, u'tdt': 14, u'r\xf3#': 14, u'lzn': 14, u'jaf': 14, u'nks': 14, u'hrl': 14, u't\xfal': 14, u'wai': 14, u'fob': 14, u'nox': 14, u'no\u0161': 14, u'cei': 14, u'\xf3vi': 14, u'kaf': 14, u'ymf': 14, u'l\u010dk': 14, u'\u0161um': 14, u'lks': 14, u'ngw': 14, u'\u010d\xfa#': 14, u'd\u0161o': 14, u'drk': 14, u'sja': 14, u'xha': 14, u'fn\xe9': 14, u'oml': 14, u'un\u010d': 14, u'\xe1\u010ds': 14, u'\xe1\u010dr': 14, u'\xfa\u013e#': 14, u'#nx': 14, u'fsh': 14, u'e\u017eu': 14, u'\u010d\xe1k': 14, u'\u010dr#': 14, u'\xf3bo': 14, u'o#q': 14, u'p\u013eu': 14, u'dg#': 14, u'vlk': 14, u'y\u017eo': 14, u'eu\u010d': 14, u'mu\xf1': 14, u'\u0165je': 14, u'vib': 14, u'ah\u013e': 14, u'squ': 14, u'g#+': 14, u'iae': 14, u'g#\u0165': 14, u'su\u0148': 14, u'jsa': 14, u'i#\xf6': 14, u'edj': 14, u'r\u010d#': 14, u'\xfdtr': 14, u'lt\xe9': 14, u'crd': 14, u'\xfamo': 14, u'#vj': 14, u'joo': 14, u'tce': 14, u'#mp': 14, u'\xe1t\u0148': 14, u'nd\u010d': 14, u'ho\xe1': 14, u'yur': 14, u'jkm': 14, u'yd\xfd': 14, u'fp#': 14, u'u#\xf3': 14, u'u#y': 14, u'l\xe8n': 14, u'eoi': 14, u'n\xe7u': 14, u'yke': 14, u'r\xfdz': 14, u'ytm': 14, u'ytc': 14, u'uxh': 14, u'js\u013e': 14, u'n+#': 14, u'st\u013a': 14, u'oxy': 14, u'oxu': 14, u'hlm': 14, u'bei': 14, u'j\u010d\xe1': 14, u'l\xe1b': 14, u'\u017em\xfd': 14, u'akh': 14, u'\xfawa': 14, u'jhm': 14, u'ulr': 14, u'd#\xe1': 14, u'v\xfac': 14, u'v\xfaw': 14, u'pae': 14, u'\u017eab': 14, u'\xfasl': 14, u'ok\u013e': 14, u'r\xe9v': 14, u'ewt': 14, u't\u013ap': 14, u'zfa': 14, u'bym': 14, u'iog': 14, u'\xe4k\u010d': 14, u'wso': 14, u'ray': 14, u'\xeds#': 14, u'mss': 14, u'dl\xfd': 14, u'd\xe9j': 14, u'd\xe9z': 14, u'prz': 14, u'#pd': 14, u'jua': 14, u'jug': 14, u'igf': 14, u'k\u0161a': 14, u'fko': 14, u'fe+': 14, u'vaa': 14, u's#\xf4': 14, u'sk\xf3': 14, u'ewy': 13, u'\xf3ov': 13, u'jo\u010d': 13, u'dje': 13, u'\xbac#': 13, u'onj': 13, u'fib': 13, u'ezf': 13, u'\u013euk': 13, u'jep': 13, u'r\xf3d': 13, u'ch\xf3': 13, u'\xe9nm': 13, u'rbm': 13, u'e\xe1#': 13, u'nv#': 13, u'a#\xe9': 13, u'\u0161\u013eo': 13, u'nkn': 13, u'br\xf3': 13, u'ai\xfa': 13, u'\xeate': 13, u'\u010d\u013er': 13, u'g\xfar': 13, u'k\xedn': 13, u'y\xfa\u010d': 13, u'ncb': 13, u'\xfd\u013em': 13, u'ymr': 13, u'v\u0155t': 13, u'l\u010dn': 13, u'dvd': 13, u'\u013abo': 13, u'm#\xf4': 13, u'ngm': 13, u'\u013er#': 13, u'\xe4mu': 13, u'r\u017e#': 13, u'\xe1\u010dn': 13, u'#np': 13, u'u\u0148m': 13, u'orh': 13, u'#b\u0159': 13, u'gob': 13, u'eya': 13, u'u\u015fe': 13, u'\u010d\xedh': 13, u'o#x': 13, u'ufl': 13, u'rfu': 13, u't\xe2n': 13, u'rcs': 13, u'oad': 13, u'm\u013eo': 13, u'\xedl\xed': 13, u'i+#': 13, u'#\xe1l': 13, u'#\xe1g': 13, u's\xe1b': 13, u'nla': 13, u'o\u015fo': 13, u'\xe9#w': 13, u'#r\xf6': 13, u'm\xfas': 13, u'a\xe7a': 13, u'a\xe7u': 13, u'l\xe4m': 13, u'edg': 13, u'edw': 13, u'##\xba': 13, u'joz': 13, u'cr\xfd': 13, u'mya': 13, u'atg': 13, u'xiz': 13, u'lph': 13, u'i\u0107#': 13, u'b\u0159e': 13, u'au\u015f': 13, u'\xf3a#': 13, u'#l#': 13, u'bl\xf3': 13, u'p\u0161t': 13, u'o\u0148u': 13, u'ax\xed': 13, u'zp\xed': 13, u'\u015fes': 13, u'\u0165tr': 13, u'yai': 13, u'od\xfa': 13, u'ckh': 13, u'etw': 13, u'n\xe7a': 13, u'gly': 13, u'yp\xe4': 13, u'ni\u0161': 13, u'rh\xf4': 13, u'spd': 13, u'x#\u010d': 13, u'x#\u017e': 13, u'vul': 13, u'gwi': 13, u'#\xbac': 13, u'\xe9de': 13, u'v\u017ei': 13, u'jpi': 13, u'idm': 13, u'ulf': 13, u'\u016bna': 13, u'y\xf6r': 13, u'jzm': 13, u'pys': 13, u'\u0155ta': 13, u'lug': 13, u'#ll': 13, u'isz': 13, u'\u017em\xfa': 13, u'zu\u013e': 13, u'm\xf4r': 13, u'h\xf4n': 13, u'b\xfd\u010d': 13, u'abm': 13, u'i\xfan': 13, u'z\xfat': 13, u'a\u0165j': 13, u'\u017e#\xed': 13, u'f#f': 13, u'f#h': 13, u'udm': 13, u'vru': 13, u'cob': 13, u'nji': 13, u'\xf4ne': 13, u'\xf3#s': 13, u'nua': 13, u'ti\xe1': 13, u'dey': 13, u'msy': 13, u'sst': 13, u'ig\xfa': 13, u'p\xe1m': 13, u'fay': 13, u'pvz': 13, u'#t\u0155': 13, u'#k\xf6': 13, u'j\xe1z': 13, u'rld': 12, u'\xed\u0161k': 12, u'sow': 12, u'osz': 12, u'osh': 12, u'ufa': 12, u'p\xedl': 12, u'rz\xe9': 12, u'l\u012b#': 12, u'hef': 12, u'g\xf6t': 12, u'dsh': 12, u'x\xedk': 12, u'je\u0161': 12, u'\xe9j\xe0': 12, u'jeu': 12, u'\xf3gm': 12, u't\xe1s': 12, u'w#m': 12, u'jai': 12, u'\u0161if': 12, u'e\xe1c': 12, u'er\xf6': 12, u'o\xedr': 12, u'\xe0#h': 12, u'jvp': 12, u'jv\xf4': 12, u'uz#': 12, u'gu\u0161': 12, u'cau': 12, u'yim': 12, u'dzl': 12, u'loy': 12, u'if\xe1': 12, u'azr': 12, u'ym\xf4': 12, u'\u0165ad': 12, u'ees': 12, u'u\u0161s': 12, u'url': 12, u'urq': 12, u'\xf3ja': 12, u'\xfa#\u0148': 12, u'\u010d\xfac': 12, u'a\u010ft': 12, u'\u017eol': 12, u'gya': 12, u'ki\u010d': 12, u'on\xfd': 12, u'uk\xe9': 12, u'\u0159ez': 12, u'ged': 12, u'jfl': 12, u'\xe1\u017e\u010f': 12, u'p\xf3n': 12, u'\xe1\u0161l': 12, u'ra\xfa': 12, u'#hf': 12, u'zh\u0155': 12, u'nsd': 12, u'\xf6nt': 12, u'#q#': 12, u'iir': 12, u'\xe1ds': 12, u'pge': 12, u'mip': 12, u'imr': 12, u'toz': 12, u'tow': 12, u'jzo': 12, u'\u010de\u0161': 12, u'muz': 12, u'#ew': 12, u'll\xed': 12, u'll\u012b': 12, u'tbo': 12, u'u\u010d#': 12, u'fco': 12, u'l\u0148#': 12, u'nlo': 12, u'hwe': 12, u'vyf': 12, u'#rs': 12, u'a\xfcl': 12, u'i\u010f#': 12, u'iea': 12, u'\xe1pt': 12, u'yl\xe9': 12, u'#\u010d\u0161': 12, u'ed\xfd': 12, u'#\u010d\u013e': 12, u'rpy': 12, u'ndj': 12, u'\xe8#t': 12, u'\xf6te': 12, u'fpg': 12, u'vey': 12, u'ssl': 12, u'acd': 12, u'vi\u0107': 12, u'vi\u010f': 12, u'zm\xed': 12, u'n#\xf3': 12, u'trm': 12, u'\u010fat': 12, u'uet': 12, u'rdl': 12, u't\xfcr': 12, u'uam': 12, u'zei': 12, u'zew': 12, u'ze\xe1': 12, u'yen': 12, u'#nh': 12, u'gho': 12, u'aey': 12, u'aea': 12, u'x#c': 12, u'yt\u013a': 12, u'b#g': 12, u'\xe1s\xed': 12, u'pu\u0161': 12, u'j\xe0#': 12, u'fg#': 12, u'lue': 12, u'\xe9so': 12, u'dmu': 12, u'l\xe1j': 12, u'\xe9l\xe8': 12, u'diy': 12, u'j\u013ea': 12, u'j\u013eu': 12, u'r\xedu': 12, u'#\u0161m': 12, u'\u0107a#': 12, u'l\xedl': 12, u'\xfasa': 12, u'o\u0161r': 12, u'jd\u0148': 12, u'cs\xe1': 12, u'ry\xe1': 12, u'oks': 12, u'tu\u017e': 12, u'\u0142os': 12, u'#dk': 12, u'ewo': 12, u'ly\u0148': 12, u'\xe1v\xed': 12, u'dae': 12, u'dwi': 12, u'ysf': 12, u'coa': 12, u'tmy': 12, u'esj': 12, u'nbr': 12, u'ra\xfc': 12, u'ry\u0161': 12, u'l\u0161a': 12, u'a\xfa#': 12, u'a\xfal': 12, u'cnu': 12, u'\xed\u017eu': 12, u'p\u010de': 12, u'rpc': 12, u'#p#': 12, u'\xe1r\xed': 12, u'hib': 12, u'lja': 12, u'#mg': 12, u'\xfdr\u010d': 12, u'p\xe9z': 11, u'cze': 11, u'dgo': 11, u'uo\u017e': 11, u'\u0161\xedv': 11, u'cuo': 11, u'soi': 11, u'ha\u010d': 11, u'p\xedj': 11, u'a\xeds': 11, u'#\xf3s': 11, u'\u0119te': 11, u'\xfapm': 11, u'\u0161e\u013e': 11, u'aac': 11, u'ojf': 11, u'\xf3ga': 11, u'nro': 11, u'w#p': 11, u'szc': 11, u'jag': 11, u'tsw': 11, u'a#\xf6': 11, u'brv': 11, u'k\xe9r': 11, u'hr\xf3': 11, u'\xe3a#': 11, u'aib': 11, u'zcz': 11, u'\xedrm': 11, u'y#\u0148': 11, u'mbl': 11, u'mr\u0161': 11, u'eix': 11, u'\xed#\u0148': 11, u'\xfa\u010d\xed': 11, u'v\u010d\xed': 11, u'ffn': 11, u'uvm': 11, u'h\xfac': 11, u'\xe2nc': 11, u'eem': 11, u'sv\xe1': 11, u'lk#': 11, u'wid': 11, u'wim': 11, u'ur\u0103': 11, u'#w\xf6': 11, u'#j\xfc': 11, u'eah': 11, u'hn\xe1': 11, u'sji': 11, u'kiu': 11, u'kif': 11, u'#n+': 11, u'#n\xe9': 11, u'i\xe1\u0161': 11, u'y\xf6s': 11, u'or\u013e': 11, u'jf\xe1': 11, u'oio': 11, u'\xfany': 11, u'\xfcm#': 11, u'iul': 11, u'iud': 11, u'mm\xfa': 11, u'jba': 11, u'ceo': 11, u'ldu': 11, u'\u0161\u0165u': 11, u'oem': 11, u'nsj': 11, u'nsb': 11, u'\u03b5\u03b4\u03c1': 11, u'y\u017e\xed': 11, u'mi\u0144': 11, u'miq': 11, u'ubk': 11, u'nwa': 11, u'\xedum': 11, u'kuw': 11, u'to\xe1': 11, u'+#j': 11, u'gif': 11, u'ahh': 11, u'\u010da\u0161': 11, u'hzo': 11, u'\u0161e\u0148': 11, u'#\xe1\u0161': 11, u'suf': 11, u'a\u015fo': 11, u'gpo': 11, u'iei': 11, u'g\xe1#': 11, u'xp#': 11, u'bd#': 11, u'apm': 11, u'ed\u0148': 11, u'sih': 11, u'ouw': 11, u'r\xfav': 11, u'#v\u013e': 11, u'\xedd#': 11, u'ndl': 11, u'vab': 11, u's#x': 11, u'k\xf4\u0148': 11, u'yul': 11, u'i\u0107a': 11, u'umj': 11, u'nxp': 11, u'fpa': 11, u'dds': 11, u'vi\u0161': 11, u'\xedl\u010d': 11, u'xak': 11, u'ctr': 11, u'vzk': 11, u'odg': 11, u'rdf': 11, u'zih': 11, u'npo': 11, u'cbd': 11, u'\xf4\u0148#': 11, u'o\u0161\xfa': 11, u'\u013e\u0161e': 11, u'epg': 11, u'g\u0142a': 11, u'g\u0142o': 11, u'\xe8ro': 11, u'sps': 11, u'wca': 11, u'jtm': 11, u'yt\u010d': 11, u'tdo': 11, u'l\xf4\u017e': 11, u'\u017eub': 11, u'li\xe8': 11, u'st\xe2': 11, u'ut\u0155': 11, u'ruu': 11, u'idz': 11, u'naw': 11, u'#hc': 11, u'be\u0148': 11, u'jli': 11, u'dm\xed': 11, u'\xedcm': 11, u'#l\xf4': 11, u'ygr': 11, u'n#q': 11, u'wwe': 11, u'\u0142as': 11, u'\u013ak#': 11, u'xpr': 11, u'tm\xe1': 11, u'bm#': 11, u'i\xfas': 11, u'o\u0161#': 11, u'de\xdf': 11, u'ab\u013a': 11, u'\xfale': 11, u'#dg': 11, u'r\xe9s': 11, u'ik\xf3': 11, u'\xe1fi': 11, u'r\u013ea': 11, u'\xf3ba': 11, u'ly\u0161': 11, u'sdp': 11, u'e\u013et': 11, u'og\u0142': 11, u'ksw': 11, u'#xx': 11, u'j\xed\u010d': 11, u'nfp': 11, u'pi\xed': 11, u'p#\u010f': 11, u'a\xe7o': 11, u'aj\u017e': 11, u'zb\xed': 11, u'nun': 11, u'tij': 11, u'w\xf6g': 11, u'\xf3sc': 11, u'd\xe9e': 11, u'\xe1\u0148#': 11, u'vzc': 11, u'tey': 11, u'uw\xfc': 11, u'k\u0161i': 11, u'r#\xe1': 11, u'\u0148sk': 11, u'arq': 11, u'arw': 11, u'us\u0142': 11, u'\xe1vd': 11, u's\xfa\u017e': 11, u'bby': 11, u'mf\xe1': 11, u'mf\xf3': 11, u'\xed\u0161t': 10, u'\xf4co': 10, u'yfo': 10, u'i\u0171t': 10, u'irp': 10, u'm\u0161e': 10, u'mb\xe1': 10, u'osw': 10, u'\xednk': 10, u'heh': 10, u'scv': 10, u'xon': 10, u'lo\u0148': 10, u'\xfamm': 10, u'psb': 10, u'\u0161#g': 10, u'\xe8ge': 10, u'\u013e#\xed': 10, u'zti': 10, u'\u013e#\u013e': 10, u'ch\xe4': 10, u'sz\xe9': 10, u'b#\u010f': 10, u'lfr': 10, u'mi\u0161': 10, u'nkv': 10, u'emj': 10, u'hr\xfa': 10, u'\u0161ma': 10, u'\xe0#p': 10, u'\xe1m\xed': 10, u'no\u0165': 10, u'\xe9fk': 10, u'am\xf3': 10, u'lof': 10, u'jri': 10, u'ce\u0161': 10, u'j\u0161\u0165': 10, u'kaa': 10, u'ozj': 10, u'wig': 10, u'#wr': 10, u'rs\xed': 10, u'ng\xf3': 10, u'kea': 10, u'ng\u013e': 10, u'eai': 10, u'\u0119sa': 10, u'ov\xf4': 10, u'dr\u010d': 10, u'\xf3n\u010d': 10, u'r\u017et': 10, u'fs#': 10, u'ayy': 10, u'\u017ece': 10, u'zsv': 10, u'vhi': 10, u'gaf': 10, u'sye': 10, u'syd': 10, u'chz': 10, u'j\u0161p': 10, u'oac': 10, u'zdc': 10, u'vpu': 10, u'nhs': 10, u'toy': 10, u'pk\xfa': 10, u'+#v': 10, u'#\xe1d': 10, u'hsk': 10, u'ky\xf6': 10, u'ro\u015f': 10, u'\xfa\u0165m': 10, u'pps': 10, u'u\u010dt': 10, u'u\u010dm': 10, u'iau': 10, u'uyh': 10, u'tki': 10, u'pow': 10, u'rk\u016b': 10, u'oy#': 10, u'r\xe3a': 10, u'\xfa\u0161e': 10, u'jse': 10, u'jso': 10, u'jss': 10, u'm\xedo': 10, u'g\xe1d': 10, u'cts': 10, u'hku': 10, u'ltd': 10, u'#g\u0142': 10, u'#vm': 10, u'jo\xe3': 10, u'\u010di\u0171': 10, u'##w': 10, u'dl#': 10, u'my\u0165': 10, u'\u015fia': 10, u'dh\xed': 10, u'jk\xe1': 10, u'fop': 10, u'k\u016bn': 10, u'#a+': 10, u'#a\xfa': 10, u'iph': 10, u'\xfarm': 10, u'#pb': 10, u'\u0171t\xeb': 10, u'\xe1ga': 10, u'\xe1gu': 10, u'\xe9ze': 10, u'se\u010d': 10, u'ziz': 10, u'o\u0165a': 10, u'bta': 10, u'ze\xed': 10, u'yps': 10, u'ilf': 10, u'ilh': 10, u'u\xeds': 10, u'\xe9\u0161u': 10, u'x#l': 10, u'jty': 10, u'#\u017ed': 10, u'#\u017en': 10, u'uxa': 10, u'uxo': 10, u'cce': 10, u'mpy': 10, u'mp\xf3': 10, u'wov': 10, u'wea': 10, u'gst': 10, u'pyc': 10, u'i\xe8g': 10, u'lu\xed': 10, u'zys': 10, u'\xe1\u010fa': 10, u'#l\xf6': 10, u'u\xe1n': 10, u'n#y': 10, u'\u010d#\u017e': 10, u'ba\u013e': 10, u'zn#': 10, u's\xf3g': 10, u'r\xe4s': 10, u'd#y': 10, u'pab': 10, u'xbr': 10, u'\xe9ov': 10, u'\u017e#\xe1': 10, u'lfg': 10, u'ogg': 10, u'f#\u017e': 10, u'ksa': 10, u'b\u013eu': 10, u'\u0161\xfac': 10, u'io\xe1': 10, u'cox': 10, u'\xe9ch': 10, u'wsa': 10, u'a\xfa\u010d': 10, u'o\u015fi': 10, u'dl\xe9': 10, u'an\u0103': 10, u'e\xfau': 10, u'\xe9z#': 10, u'ju\xe1': 10, u'yn\u010d': 10, u'oj\u017e': 10, u'\u0165\u0161e': 10, u'efg': 10, u'due': 10, u'ar\u0148': 10, u'\u010dk\xfd': 10, u'mfy': 10, u'zv\xfa': 10, u'\xed\u0161a': 9, u'ir\xf4': 9, u'yfa': 9, u'irc': 9, u'j\xf6r': 9, u'az\xfa': 9, u'ji\u010d': 9, u'rzl': 9, u'vk\xfd': 9, u'uot': 9, u'yz#': 9, u'yzl': 9, u'ivr': 9, u'ez\xfd': 9, u'hey': 9, u'pbt': 9, u'xoc': 9, u'#fb': 9, u'wro': 9, u'jex': 9, u'\xe4d#': 9, u'rfs': 9, u'vo\u0161': 9, u'vo\xfa': 9, u'h\xe9r': 9, u'o\xfa\u017e': 9, u'\u0161##': 9, u'\u03af\u03b1\u03c3': 9, u't\xe1h': 9, u'w#c': 9, u'jaa': 9, u'\u0161id': 9, u'vsu': 9, u'h\xedm': 9, u'a#+': 9, u'inl': 9, u'br\xfc': 9, u'hrs': 9, u'wzn': 9, u'em\u0148': 9, u'\u0161my': 9, u'jvr': 9, u'ibo': 9, u'ibb': 9, u'z\xf3#': 9, u'il\xf3': 9, u'\xf4h#': 9, u'\xe4ss': 9, u'\xfdn\xed': 9, u'#xb': 9, u'\xe9fr': 9, u'wy#': 9, u'h\xfa\u0148': 9, u'vr\u0161': 9, u'l\u010dy': 9, u'm#x': 9, u'\u03c3\u03c4\u03b9': 9, u'n\u0103#': 9, u'#wz': 9, u'rs\xe9': 9, u'yam': 9, u'yag': 9, u'ke\u0161': 9, u'\xf4\u017ek': 9, u'v\u0165a': 9, u'kit': 9, u'\xf6ff': 9, u'\u03c2##': 9, u'e#+': 9, u'\u0142ow': 9, u'yya': 9, u'#bj': 9, u'\xe4tu': 9, u'o#\xb0': 9, u'jby': 9, u'ldr': 9, u'oea': 9, u'sff': 9, u'r\xfcc': 9, u'k\u013ed': 9, u'elp': 9, u'\xf3f#': 9, u'\u03b4\u03c1\u03af': 9, u'\u010dac': 9, u'tk#': 9, u'alw': 9, u'suo': 9, u'\u03b1\u03c3\u03b7': 9, u'fge': 9, u'o\xe1n': 9, u'ylm': 9, u'drc': 9, u'r\u010dm': 9, u'\xe4ub': 9, u'#mk': 9, u'sod': 9, u'\xfaz#': 9, u'm\xe9m': 9, u'\xe1th': 9, u'dsp': 9, u'dsl': 9, u'dh\u013e': 9, u'h\xf3z': 9, u'ipt': 9, u'sa\u010f': 9, u'v#y': 9, u'n\xedl': 9, u'rx#': 9, u'\u0148ka': 9, u'zm\xf4': 9, u'ty\u0148': 9, u'evm': 9, u'\xf4vt': 9, u'\u0117ni': 9, u'tpa': 9, u'xas': 9, u'\xfaky': 9, u'et\u010d': 9, u'xej': 9, u'\xfddz': 9, u't\u0148a': 9, u'\xe9li': 9, u'\u010dyt': 9, u'r\xfdd': 9, u'ek\xf3': 9, u'htu': 9, u'lm\xed': 9, u'rl\xfa': 9, u'tl\xfd': 9, u'aee': 9, u'x#h': 9, u'\u015f#d': 9, u'ucs': 9, u'yo#': 9, u'asd': 9, u'p\xfa\u0165': 9, u'idj': 9, u'yc\xfa': 9, u'k\xf3n': 9, u'otw': 9, u'zyn': 9, u'foo': 9, u'up\xf4': 9, u'cmo': 9, u'o\xe3o': 9, u'i\u0119t': 9, u'\xfaus': 9, u'wwf': 9, u'ul\u0117': 9, u'kk\xfa': 9, u'\xfcck': 9, u'#\u0161\xe1': 9, u'l\xedr': 9, u'l\u0117n': 9, u'pa\xed': 9, u'r\xf6f': 9, u'#tg': 9, u'f\xe1r': 9, u'\u013etr': 9, u'\u03c5\u03bd\u03b5': 9, u'\xe9ku': 9, u'\xe9ke': 9, u'\u03c1\u03af\u03b1': 9, u'#d\u0103': 9, u'\xe1fo': 9, u'ewc': 9, u'bip': 9, u'feh': 9, u'lys': 9, u't\xe0#': 9, u'afy': 9, u't\u013ak': 9, u'f#g': 9, u'vry': 9, u'vrk': 9, u'iok': 9, u'fyl': 9, u'\xfad\u017e': 9, u'icm': 9, u'\u03c4\u03b9\u03c2': 9, u'\u03bd\u03b5\u03b4': 9, u'sro': 9, u'e\xfaa': 9, u'\xed\u017em': 9, u'gv#': 9, u'igs': 9, u'k\u0161e': 9, u'r#\xed': 9, u'ef\xed': 9, u'\u013edr': 9, u'#td': 9, u'#kf': 9, u'\u03c3\u03c5\u03bd': 9, u'av\xf3': 9, u'aad': 9, u'#\u03c3\u03c5': 9, u'#\u03c3\u03c4': 9, u'\u03b9\u03c2#': 9, u't\xed\xe1': 8, u'rly': 8, u'dnv': 8, u'zae': 8, u'#o\xe1': 8, u'frd': 8, u'tzm': 8, u'tzn': 8, u'j\xf6n': 8, u'soa': 8, u'dji': 8, u'\u010ds#': 8, u'#c\xe1': 8, u'\xfcl#': 8, u'yzr': 8, u'#qa': 8, u'scr': 8, u'sc\xed': 8, u'\xfapt': 8, u'm\u0148u': 8, u'\xe4\u0165p': 8, u'\xe4du': 8, u'ug#': 8, u'ef\xe9': 8, u'\u013e#\u010f': 8, u'lzb': 8, u'\u0103o#': 8, u'lz\xed': 8, u'szu': 8, u'jae': 8, u'weu': 8, u'\xedvd': 8, u'\xf6ma': 8, u'vsa': 8, u'vs#': 8, u'h\xedh': 8, u'\xe1il': 8, u'brd': 8, u'\xed\xe1n': 8, u'emf': 8, u'v\xe4c': 8, u'aie': 8, u'rnc': 8, u'kv\u0155': 8, u'wah': 8, u'waz': 8, u'y#\xf4': 8, u'mrk': 8, u'\u013ea\u010f': 8, u'as\xfa': 8, u'\u0159ic': 8, u'#s\u0155': 8, u'\xfa\u010dm': 8, u'cew': 8, u'nct': 8, u'\u0165av': 8, u'\u0165a\u010d': 8, u'dv\xe9': 8, u'lkr': 8, u'oxe': 8, u'vpa': 8, u'kek': 8, u'keg': 8, u'yah': 8, u's\xfdm': 8, u's\xfdc': 8, u'rz\u0161': 8, u'c#\xe1': 8, u'\u017eon': 8, u'mee': 8, u'\u0119su': 8, u'nfs': 8, u'\u0165#q': 8, u'zwa': 8, u'\u0165#\xf4': 8, u'fmo': 8, u'gym': 8, u'v\u013e\xfa': 8, u'\xfa\u013eo': 8, u'zd\u0148': 8, u'hbo': 8, u'\u0155n#': 8, u'ay\xe1': 8, u'\u010dri': 8, u'dkm': 8, u'uju': 8, u'nae': 8, u'#bt': 8, u'iuo': 8, u's\u0165s': 8, u's\u0165p': 8, u'\u013e\xfad': 8, u'sbs': 8, u'o#\xf3': 8, u'p\u013ea': 8, u'gaw': 8, u'gag': 8, u'ldc': 8, u'cum': 8, u'd\u0103i': 8, u'y\u017em': 8, u'eui': 8, u'sf\xe1': 8, u'\xdfun': 8, u'\u017e\u010f#': 8, u's\u0155d': 8, u'j\u0161t': 8, u'oap': 8, u'ubt': 8, u'nwi': 8, u'nhu': 8, u'vij': 8, u'\xfa\u0148o': 8, u'gid': 8, u'gih': 8, u'lu\xf3': 8, u'\xfabk': 8, u'\xfab\xed': 8, u'vtl': 8, u'gtp': 8, u'g#\xfa': 8, u'\xe1l\u0148': 8, u'\u013aka': 8, u'g#\u0161': 8, u'\xfdmu': 8, u'alj': 8, u'\xf1a#': 8, u'oyo': 8, u'oyd': 8, u'fg\xe1': 8, u'ie\xdf': 8, u'#i+': 8, u'yl\xe1': 8, u'yl\xed': 8, u'g\xe1z': 8, u'++#': 8, u'i#\xf3': 8, u'z\xe9n': 8, u'\xf1ez': 8, u'ouj': 8, u'\xf3#k': 8, u'joi': 8, u'fky': 8, u'l\xe0#': 8, u'n\xf4h': 8, u'at\xfc': 8, u'mdl': 8, u'dsv': 8, u'lp\xe1': 8, u'\xfdpi': 8, u'ds\u0165': 8, u'dh\xe9': 8, u'i\u0107o': 8, u'\u0161ch': 8, u'up\xf3': 8, u'foc': 8, u'#aq': 8, u'ydd': 8, u'dyr': 8, u'sao': 8, u'\u0103ia': 8, u'\xfars': 8, u'n\xedo': 8, u'gdf': 8, u'dd\xfd': 8, u'ddo': 8, u'acy': 8, u'uio': 8, u'fth': 8, u'\xe1gy': 8, u'pdl': 8, u'\xe9zo': 8, u'rdv': 8, u'\xfaku': 8, u'#ye': 8, u'ihm': 8, u'jsv': 8, u'ckl': 8, u'xei': 8, u'ptm': 8, u'te\xed': 8, u'u\xf3r': 8, u'uay': 8, u'uaz': 8, u'zes': 8, u'ntc': 8, u'ni\u0119': 8, u'em\u017e': 8, u'\xb0#\xb0': 8, u'hty': 8, u'\u0103es': 8, u'ek\u013a': 8, u'x#\u013e': 8, u'vuv': 8, u'tde': 8, u'v\u0155n': 8, u'\u017eup': 8, u'wee': 8, u'\xfa\u017ek': 8, u'ma\u0142': 8, u'boa': 8, u'rua': 8, u'cgt': 8, u'naa': 8, u'ec\xfa': 8, u'hld': 8, u'rt\xf3': 8, u'mg#': 8, u'ot\u0148': 8, u'xle': 8, u'jlo': 8, u'\u0161ib': 8, u'\xf3le': 8, u'#ld': 8, u'l\xe1\u010f': 8, u'is\xe9': 8, u'z\xe9#': 8, u'isb': 8, u't\u0155\u0148': 8, u'opd': 8, u'ixa': 8, u'hd#': 8, u'pah': 8, u'\u010d#\xfa': 8, u'hya': 8, u'rr\xfa': 8, u'abh': 8, u'gga': 8, u'a\u0161\xe9': 8, u'jda': 8, u'ab\u010d': 8, u'jd\xe1': 8, u'ik\u010d': 8, u'g\xf3o': 8, u'lyr': 8, u'm\xfch': 8, u'ksu': 8, u'#k\xed': 8, u'ioz': 8, u'nju': 8, u'buf': 8, u'bue': 8, u'pig': 8, u'\u0155dc': 8, u'aih': 8, u'xfo': 8, u'enj': 8, u'o\u013e#': 8, u'a\u0161c': 8, u'\xf3#a': 8, u'ocz': 8, u'wf#': 8, u'ywa': 8, u'\xf3sa': 8, u'hcf': 8, u'msc': 8, u'\u0161\u0148u': 8, u'e\xdf#': 8, u'r\u0103o': 8, u'e\xdfu': 8, u'\xe9gl': 8, u'huc': 8, u'bo\u0161': 8, u'rm\u0155': 8, u'juz': 8, u'uwa': 8, u'dud': 8, u'#t\xfc': 8, u'cfp': 8, u'vc\xed': 8, u'h#x': 8, u'#mf': 8, u'#mw': 8, u'#k\u0161': 8, u'v\xf3#': 8, u'lvs': 8, u'\xf8re': 7, u'p\xe9d': 7, u'czw': 7, u'fim': 7, u'ir\xed': 7, u'rgl': 7, u'hay': 7, u'haw': 7, u'\u015fti': 7, u'azg': 7, u'os\xf3': 7, u'ufg': 7, u'\xe9vy': 7, u'\u017ee\u0161': 7, u'\xe4ho': 7, u'iv\u0161': 7, u'\xfcla': 7, u'kja': 7, u'ivm': 7, u'tv\xfa': 7, u'k\xfal': 7, u'bj\xed': 7, u'ez\u013e': 7, u'pbe': 7, u'b\xfan': 7, u'gfl': 7, u'aab': 7, u'\u0161eh': 7, u'ugb': 7, u'rfl': 7, u'amd': 7, u'a\xf1a': 7, u'w#t': 7, u'w#w': 7, u'w#z': 7, u'szi': 7, u'jau': 7, u'ucp': 7, u'ja\u0161': 7, u'cvm': 7, u'wef': 7, u'a#\xe0': 7, u'br#': 7, u'aiu': 7, u'i\xf1e': 7, u'lck': 7, u'\xe0##': 7, u't\xfav': 7, u'jve': 7, u'uzy': 7, u'l\xf2#': 7, u'\xe1mt': 7, u'sox': 7, u'pn#': 7, u'mrd': 7, u'lfs': 7, u'hvo': 7, u'\u013eas': 7, u'y\u010do': 7, u'\xed#\xf4': 7, u'#s\u013a': 7, u'=sk': 7, u'k\u010do': 7, u'vea': 7, u'\u0148#\xed': 7, u'uv\xfd': 7, u'ioi': 7, u'\xfd\u013et': 7, u'kag': 7, u'bzp': 7, u'lk\xf3': 7, u'pi\xf1': 7, u'\xf4\u0161t': 7, u'ur\xe9': 7, u'urh': 7, u'b\xe9c': 7, u'c#w': 7, u'#z\u012b': 7, u'meu': 7, u'#\u0155#': 7, u'\u0119so': 7, u'n\u017ea': 7, u'om\xfa': 7, u'kiz': 7, u'boi': 7, u'ayn': 7, u'bo\u015f': 7, u'lsr': 7, u'\u013az#': 7, u'vhz': 7, u'e\u015fb': 7, u'\xe1\u0161\u0148': 7, u'eyk': 7, u'mmo': 7, u'\xe4#\u0165': 7, u'#hb': 7, u'aeg': 7, u'aei': 7, u'\xe4st': 7, u'\u03b5\u03b9#': 7, u'jgl': 7, u'#fm': 7, u'vly': 7, u'\xf6ns': 7, u'\u03c3\u03b7#': 7, u'iiz': 7, u'#\xf6v': 7, u'v\xe1\u013e': 7, u'\u015fbo': 7, u'u\u0161m': 7, u'gm\xe1': 7, u'ad\u013a': 7, u'kuf': 7, u'\u010de\u013e': 7, u'\u013e\u0148a': 7, u'\u0165ho': 7, u'+#z': 7, u'hsi': 7, u'ppp': 7, u'llg': 7, u'ro\xe1': 7, u'\xe1ng': 7, u'shr': 7, u'l\xf3a': 7, u'\xe1lv': 7, u'a\u0148s': 7, u'o\u015ft': 7, u'nns': 7, u'\u012b#d': 7, u'v\xe9e': 7, u'eh\xfd': 7, u'\xe9en': 7, u'lh\u0103': 7, u'al\xe0': 7, u'sug': 7, u'ap\xfa': 7, u'cdi': 7, u'uun': 7, u'n\u0161#': 7, u'\xf2#r': 7, u'tgv': 7, u'#iw': 7, u'yl\xfd': 7, u'ylb': 7, u'l\xe4h': 7, u'hk#': 7, u'\u03b9#\u03c3': 7, u'\xf1er': 7, u'#v\u0155': 7, u'#v\u0165': 7, u'\xf6vp': 7, u'\xf3me': 7, u'kdo': 7, u'myh': 7, u'myd': 7, u'ho\u0148': 7, u'voo': 7, u'atw': 7, u'atx': 7, u'\xfavl': 7, u'ztv': 7, u'\xe1ll': 7, u'ol\xf2': 7, u'khu': 7, u'nlu': 7, u'#ax': 7, u'fpv': 7, u'ttf': 7, u'\xf8kk': 7, u'u#\xe9': 7, u'acp': 7, u'agf': 7, u'ohd': 7, u'p\xf4\u0161': 7, u'itg': 7, u'ex\xed': 7, u'bh#': 7, u'ev\xe9': 7, u'\u0165os': 7, u'o\u0165m': 7, u'npr': 7, u'\xedhk': 7, u'#yv': 7, u'ckf': 7, u'ckb': 7, u'xer': 7, u'\u0161pr': 7, u'\xfcan': 7, u'ak\u013e': 7, u'kik': 7, u'b\xe1j': 7, u'\u017e\xedn': 7, u'bps': 7, u'ykj': 7, u'mta': 7, u'yk\u013e': 7, u'sp\u013e': 7, u'eks': 7, u'aog': 7, u'#\u017em': 7, u'x\xedv': 7, u'etd': 7, u'tdi': 7, u'nmo': 7, u'yol': 7, u'yob': 7, u'\u017e\xfa#': 7, u'\xfdly': 7, u'mpb': 7, u'\xe9d#': 7, u'puv': 7, u'\xe9d\xe9': 7, u'\u017euv': 7, u'b\xe9l': 7, u's\u013az': 7, u'\u012ble': 7, u'ru\xed': 7, u'l#x': 7, u'l\xf8k': 7, u'z#y': 7, u'ycy': 7, u'yci': 7, u'yco': 7, u'd\u010dt': 7, u'be\u015f': 7, u'be\u0161': 7, u'yry': 7, u'e\xf3n': 7, u'tgi': 7, u'swe': 7, u'#l\xf8': 7, u'yge': 7, u'ne\u0165': 7, u'er\u017e': 7, u'n#\u0151': 7, u'isj': 7, u'n#\xf6': 7, u'#\u03b1#': 7, u'el\xf3': 7, u'\u010d#\u0161': 7, u'mcl': 7, u'\u017emu': 7, u't\u0155\u017e': 7, u'op\xe9': 7, u'zut': 7, u't\u0155h': 7, u'hde': 7, u'pax': 7, u'hda': 7, u'ggc': 7, u'okk': 7, u'#ds': 7, u'\u0161pv': 7, u'\u0155ha': 7, u'lyi': 7, u'\u017eej': 7, u'afj': 7, u'gc#': 7, u'lfu': 7, u'm\xfcn': 7, u'udw': 7, u'ogd': 7, u'ksi': 7, u'\u0155\u0148o': 7, u'#xa': 7, u'ysn': 7, u'pip': 7, u'#kw': 7, u'ajg': 7, u'\u017er\xfa': 7, u'raa': 7, u'\xfazy': 7, u'lb#': 7, u'occ': 7, u'iu\u010d': 7, u'eyj': 7, u'eyl': 7, u'ti\xe8': 7, u'z\u012bl': 7, u'd\xe9p': 7, u'\xe9go': 7, u'juy': 7, u'\u010don': 7, u'i\u0161m': 7, u'ynk': 7, u'hij': 7, u'duo': 7, u'\xf3mk': 7, u'\u010f#\u0165': 7, u'zz\xe1': 7, u'fbi': 7, u'zze': 7, u'#tt': 7, u'#tn': 7, u'te\xfa': 7, u'h\xfdn': 7, u's\xfa\u010f': 7, u'bbl': 7, u'hm\xe1': 7, u'mfi': 7, u'h\u0103e': 7, u'p\xe9e': 6, u'kfw': 6, u'\xed\u0161\u0165': 6, u'ir\xe9': 6, u'irh': 6, u'\u015f#p': 6, u'fr\xed': 6, u'haf': 6, u'k\xf6s': 6, u'j\xf6k': 6, u'b\xedh': 6, u'm\u0161u': 6, u'\u0144#a': 6, u'mbp': 6, u'uks': 6, u'\u0161a\u0161': 6, u'fm#': 6, u'a\xedd': 6, u'yza': 6, u'jez': 6, u'b\xfa\u013e': 6, u'\xe4dl': 6, u'\u0161ep': 6, u'ugm': 6, u'ojb': 6, u'\xfamu': 6, u'nre': 6, u'voh': 6, u'ij\xe1': 6, u's\xf6d': 6, u'ev\u010f': 6, u'sgo': 6, u'lzk': 6, u't\u013ac': 6, u'w#e': 6, u'w#j': 6, u'w#l': 6, u'jaw': 6, u'db\xed': 6, u'\u0161i\u010d': 6, u'a#\u0151': 6, u'p\xfa\u013e': 6, u'\xedvt': 6, u'\u0163i#': 6, u'\u017e#w': 6, u'l\xf6v': 6, u'lcz': 6, u'rnw': 6, u'ums': 6, u'gud': 6, u'uz\xfa': 6, u'z\xf3v': 6, u'no\xfa': 6, u'\xe4su': 6, u'yio': 6, u'\xe8te': 6, u'\u013eaz': 6, u'psh': 6, u'gja': 6, u'srs': 6, u'#aw': 6, u'#s\xf6': 6, u'tfe': 6, u'h\xfaf': 6, u'ymj': 6, u'os\u0142': 6, u'l\xfa\u0161': 6, u'\xe1uy': 6, u'amj': 6, u'\xe8le': 6, u'#\xfa#': 6, u'ovb': 6, u'\u0165#y': 6, u'zwe': 6, u'\u013ery': 6, u'gss': 6, u'gy\xfc': 6, u'fnl': 6, u'kid': 6, u'#nr': 6, u'vd\xe1': 6, u'e\u017es': 6, u'cge': 6, u'bo\u010f': 6, u'mae': 6, u'a\u0163i': 6, u'ge=': 6, u'\u015bni': 6, u'oil': 6, u'\xfarz': 6, u'vhp': 6, u'\xfd#w': 6, u'iuu': 6, u'v\xf4\u0148': 6, u'fw#': 6, u'k\xfdv': 6, u'hf#': 6, u'\xe4#\u010f': 6, u'gao': 6, u'gak': 6, u'syo': 6, u'ld\u0159': 6, u's\xe9g': 6, u'd\u0103#': 6, u'ch\xe8': 6, u'eue': 6, u'bwa': 6, u'ai\xe9': 6, u'mua': 6, u'v\xe1u': 6, u'gm\xe9': 6, u'dct': 6, u'm\xfac': 6, u'oau': 6, u'oas': 6, u'xxx': 6, u'kuy': 6, u'nh\xe1': 6, u'xx#': 6, u'+#t': 6, u'xxo': 6, u'hsa': 6, u'ppm': 6, u'fja': 6, u'ah\xf4': 6, u'd\u017e\xfa': 6, u'd\u017e\xed': 6, u'y\xfcr': 6, u'\xf6vi': 6, u'iaf': 6, u'l\xf3v': 6, u'rwi': 6, u'#\xe5s': 6, u'vyg': 6, u'#rd': 6, u'js\xfa': 6, u'jsu': 6, u'ylk': 6, u'\u010d#f': 6, u'z\xe9k': 6, u'd\u0159i': 6, u'hka': 6, u'j\xfca': 6, u'#\u010dm': 6, u'\xf3#o': 6, u'\xfazn': 6, u'\xfazm': 6, u'\xf3#p': 6, u'\xfami': 6, u'\u013ac\u0165': 6, u'joy': 6, u'##\xe8': 6, u'\xe8#r': 6, u's#\xe0': 6, u'myt': 6, u'xt\xe1': 6, u'txe': 6, u'#gg': 6, u'atj': 6, u'xin': 6, u'xio': 6, u'oqu': 6, u'lpa': 6, u'nr\xe1': 6, u'dh\xe1': 6, u'khs': 6, u'um\u010d': 6, u'a\xefd': 6, u'h\xf3r': 6, u'ssm': 6, u'ttl': 6, u'bl\u010d': 6, u'vi\u017e': 6, u'\xf6ku': 6, u'exx': 6, u'pdc': 6, u'n\xe1u': 6, u'ueo': 6, u'ueb': 6, u'ctp': 6, u'rd\u010d': 6, u'zi\xfa': 6, u'zi\xe8': 6, u'npa': 6, u'od\u0103': 6, u'tl\u010d': 6, u'phd': 6, u'la\u0163': 6, u'i\xf3m': 6, u'uag': 6, u'ze\xfa': 6, u'sua': 6, u'm\xe4l': 6, u'coz': 6, u'niy': 6, u'epd': 6, u'\u010f#\u010f': 6, u'aoc': 6, u'aom': 6, u'ghz': 6, u'\xfac\xed': 6, u'm\xe0#': 6, u'\u017e\u013ea': 6, u'\xf3ti': 6, u'lgy': 6, u'o\xebl': 6, u'yn\xe9': 6, u'hho': 6, u'dib': 6, u'asg': 6, u'rh\xf3': 6, u'lir': 6, u'rhi': 6, u'woe': 6, u'ruf': 6, u'na\u012b': 6, u'utg': 6, u'yc#': 6, u'yrr': 6, u'beg': 6, u'awr': 6, u'pym': 6, u'zyf': 6, u'\xe9se': 6, u'jlu': 6, u'tya': 6, u'is\u0142': 6, u'a\u010dt': 6, u'\xf4lo': 6, u'di\u0107': 6, u'ooc': 6, u'kka': 6, u'tui': 6, u'f\xe1t': 6, u'f\xe1n': 6, u'gg#': 6, u'ggs': 6, u'\u0142op': 6, u'h\xe8l': 6, u'g\u013ea': 6, u'z\xfal': 6, u'fus': 6, u'bif': 6, u'bi\xf3': 6, u'jzj': 6, u'gow': 6, u'af\xed': 6, u'\xe1v\xe1': 6, u'lf\xed': 6, u'm\xfcl': 6, u'udt': 6, u'udd': 6, u're\u015f': 6, u'zf#': 6, u'\xf6ll': 6, u'es\xfd': 6, u'\xe1jk': 6, u'njo': 6, u'esg': 6, u'esh': 6, u'aif': 6, u'nf\xe1': 6, u'aj\xf6': 6, u'\xfd\u010d\xed': 6, u'thn': 6, u'\xf3#t': 6, u'ocd': 6, u'\xfadk': 6, u'kwa': 6, u'lcd': 6, u'cn#': 6, u'\u0161\xe1r': 6, u'd\xe9t': 6, u'hue': 6, u'hui': 6, u'rm\xe0': 6, u'\xed\u017et': 6, u'ln\u0161': 6, u'a\u012b#': 6, u'rvh': 6, u'\xe1rg': 6, u'fax': 6, u'jva': 6, u'r#\u0165': 6, u'iz\xe9': 6, u'guo': 6, u'izj': 6, u'rix': 6, u'\u010dk\xfa': 6, u'rrh': 6, u'taa': 6, u'byb': 6, u'j\xe1c': 6, u'd\xe1\u0161': 6, u'ybm': 6, u'avf': 6, u'j\xfat': 6, u'mfl': 6, u'skp': 6, u'\xe9rv': 5, u'p\xe9m': 5, u't\xed\u010d': 5, u'jm\xed': 5, u'uoc': 5, u'\xedbr': 5, u'irn': 5, u'rgw': 5, u'u#\xe0': 5, u'\u0144#v': 5, u'\xe4li': 5, u'mbh': 5, u'f\xe2n': 5, u'os\xfd': 5, u't\u0103l': 5, u'jid': 5, u'jiu': 5, u'ji\u0159': 5, u'fmi': 5, u'zoe': 5, u'jtu': 5, u'sda': 5, u'#c\u0103': 5, u'bjr': 5, u'\u0103t\u0103': 5, u'byz': 5, u'pb#': 5, u'\xfap#': 5, u'\xe4\u0165t': 5, u'ugl': 5, u'ojl': 5, u'cru': 5, u'vo\u0148': 5, u'vo\xe1': 5, u'vog': 5, u'amz': 5, u'o\xfas': 5, u'\u0107om': 5, u'\u0155zd': 5, u'w#u': 5, u'#\u013ev': 5, u'ucn': 5, u'\u0161i\u0107': 5, u'cva': 5, u'zga': 5, u'a#\u0163': 5, u'wed': 5, u'inx': 5, u'inr': 5, u'a#\xf8': 5, u'a#\xb0': 5, u'req': 5, u'udg': 5, u'k\xe9\u0161': 5, u'aii': 5, u'obp': 5, u'waf': 5, u'yvk': 5, u'a+#': 5, u'y#\xe9': 5, u'\xfdn\u010d': 5, u'\xfdnu': 5, u'hvd': 5, u'k\xedl': 5, u'eih': 5, u'k\xe4i': 5, u'amc': 5, u'psp': 5, u'ps\xed': 5, u'am\xfc': 5, u'sri': 5, u'rja': 5, u'\xed#q': 5, u'jrm': 5, u'puu': 5, u'#sj': 5, u'a++': 5, u'#s\xe8': 5, u'm#y': 5, u'm#\xf3': 5, u'm#\u0148': 5, u'n\xf3t': 5, u'hjo': 5, u'eec': 5, u'\xe4tc': 5, u'c\xe9u': 5, u'rs\u0165': 5, u'#wy': 5, u'\xe1uk': 5, u'\xf3ju': 5, u's\xfdp': 5, u'uk\u0161': 5, u'mza': 5, u'eav': 5, u'a\u010f#': 5, u'\xe2nt': 5, u'b\xf3o': 5, u'c\u0103t': 5, u'om\xe4': 5, u'\xf3nt': 5, u'kii': 5, u'v\u013ea': 5, u'yea': 5, u'#nv': 5, u'sn\u0161': 5, u'boy': 5, u'fee': 5, u'i\u013ep': 5, u'c\xe1r': 5, u'\u013evo': 5, u'\xfcls': 5, u'\xe4in': 5, u'oie': 5, u'ujo': 5, u'ujd': 5, u'iuk': 5, u'iua': 5, u'twe': 5, u'\xe1\u0161\xfa': 5, u'hfc': 5, u'pce': 5, u'\u015fen': 5, u'sbz': 5, u'mmt': 5, u'i\u017ea': 5, u'gav': 5, u'jbu': 5, u'jbi': 5, u'cup': 5, u'ld\xed': 5, u'oeo': 5, u'nsn': 5, u'\xf4tu': 5, u'\xf3fa': 5, u'ns\xe1': 5, u'dd+': 5, u'ch\xf6': 5, u'sfr': 5, u'g\xfdn': 5, u'#\u015fe': 5, u'wyo': 5, u'wyn': 5, u'oa\u0161': 5, u'm\xfak': 5, u'ubp': 5, u'\xfafo': 5, u'kud': 5, u'a\xdf#': 5, u'ecz': 5, u'#zk': 5, u'\u010dez': 5, u'bsc': 5, u'\xfcrg': 5, u'pk\xfd': 5, u'+#d': 5, u'+#m': 5, u'ppr': 5, u'gij': 5, u'j\xfcr': 5, u'\xfa\u0165t': 5, u'shu': 5, u'ahs': 5, u'iai': 5, u'\u010dau': 5, u'l\xf3s': 5, u'fcc': 5, u'tkr': 5, u'nld': 5, u'lcs': 5, u'oys': 5, u'#ao': 5, u'#rv': 5, u'gpa': 5, u'cde': 5, u'iex': 5, u'tgo': 5, u'qat': 5, u'i\u0159\xed': 5, u'g\xe1b': 5, u'exh': 5, u'i#+': 5, u'hky': 5, u'hki': 5, u'#\u010ds': 5, u'\u0161ts': 5, u'b\xf4\u017e': 5, u'ltl': 5, u'wh#': 5, u't\xf3l': 5, u't\xf3z': 5, u'rph': 5, u'rp\xe9': 5, u'\xe8#a': 5, u'ndv': 5, u's#\xf3': 5, u'myj': 5, u's#y': 5, u'b\u0155z': 5, u'hoi': 5, u'\u0161re': 5, u'dhu': 5, u'\u010dua': 5, u'ol\u017e': 5, u'ip\xf3': 5, u'sa\xed': 5, u'vfo': 5, u'v#\xf3': 5, u'v#q': 5, u'v#x': 5, u'\xe9ti': 5, u'\xfar\xe1': 5, u'\xfar\xed': 5, u't\xeb#': 5, u'cp\xe4': 5, u'uie': 5, u'cpn': 5, u'\xedlu': 5, u'klb': 5, u'klm': 5, u'fts': 5, u'\xe1g#': 5, u'\xe1go': 5, u'se\xe1': 5, u'ml#': 5, u'p\u010da': 5, u'mlo': 5, u'\xfdma': 5, u'le\xf3': 5, u'rd\u017e': 5, u's\xe8t': 5, u'bty': 5, u'tl#': 5, u'unb': 5, u'\u0103a#': 5, u'hpe': 5, u'\u0161oc': 5, u'gls': 5, u'o\u0161m': 5, u'ntz': 5, u'ntt': 5, u'\xedtn': 5, u'cog': 5, u'\xb0#c': 5, u'il\u010d': 5, u'\xfcss': 5, u'bpt': 5, u'vdc': 5, u'xys': 5, u'ht\xf3': 5, u'ek\xed': 5, u'\u010f#\xe1': 5, u'#\xe0s': 5, u'\u0161st': 5, u'x#\xfa': 5, u'wc#': 5, u'\xfa\u010fm': 5, u'#\u017e\u013e': 5, u'gwe': 5, u'vut': 5, u'\u017e\xe1v': 5, u'uxy': 5, u'\xe1sc': 5, u'tdv': 5, u'\u017e\xfas': 5, u'\xe9dk': 5, u'\xe9da': 5, u'std': 5, u'stt': 5, u'as\u013e': 5, u'liq': 5, u'lix': 5, u'oxl': 5, u'wom': 5, u'won': 5, u'jp\xf4': 5, u'wep': 5, u'z#q': 5, u'yc\xed': 5, u'n\u017eu': 5, u'i\u017ek': 5, u'#hp': 5, u'\u0155ni': 5, u'#\xf8r': 5, u'jzv': 5, u'pyw': 5, u'd+#': 5, u'ot\xfa': 5, u'zyd': 5, u'\xe9sz': 5, u'c\u0142a': 5, u'\xfd\u0148a': 5, u'\xedcp': 5, u'up\xe9': 5, u'vf#': 5, u'#lc': 5, u'r\xe1j': 5, u'l\xe1l': 5, u'ba\xf1': 5, u'\xe9u#': 5, u'sj\xf8': 5, u'wws': 5, u'b\u010d\xed': 5, u'\u013epo': 5, u't\u0155n': 5, u'tnt': 5, u'di\u013e': 5, u'ooa': 5, u'ooo': 5, u'di\u017e': 5, u'nyo': 5, u'on\xe7': 5, u'd#q': 5, u'paw': 5, u'bm\xfd': 5, u'a\u017e\xfa': 5, u'xn\xed': 5, u'f\xe1g': 5, u'\u017eak': 5, u'b\xfdj': 5, u'a\u0161\xe1': 5, u'hy\u0165': 5, u'ryk': 5, u'zj\xed': 5, u'b\xe6k': 5, u'a\u015fc': 5, u'\u015bwi': 5, u'ko\xfa': 5, u'r\xe9b': 5, u'ik\xe4': 5, u'ewe': 5, u'pep': 5, u'#b\u0155': 5, u'pe=': 5, u'fej': 5, u'lyu': 5, u'lyw': 5, u'ufr': 5, u'e=s': 5, u'\u0103li': 5, u'e\u013ep': 5, u'f#\xfa': 5, u'\xe0s#': 5, u'og\xfd': 5, u'\u0161\xfar': 5, u'vr\u010f': 5, u'\xe9bu': 5, u'nfc': 5, u'tm#': 5, u'\xe1\u013et': 5, u'buy': 5, u'\u0144ig': 5, u'bug': 5, u'en\xf3': 5, u'tl\xfa': 5, u'kef': 5, u'd\xed\u010f': 5, u'hyh': 5, u'ajw': 5, u'\xfd\u010dk': 5, u'\u0155\u017ei': 5, u'\xe4co': 5, u'oc\u0142': 5, u'zb\u013a': 5, u'nu\u0161': 5, u'nu\u010d': 5, u'ywo': 5, u'd\xe9c': 5, u'an\u0163': 5, u'huk': 5, u'ylc': 5, u'e\xfav': 5, u'e\xfal': 5, u'e\xfad': 5, u'z\u0161k': 5, u's\xe3o': 5, u'\xe1rt': 5, u'uwe': 5, u'\xf3we': 5, u'\xf3w#': 5, u'y#y': 5, u'duu': 5, u'riy': 5, u'lje': 5, u'cae': 5, u'#ks': 5, u'\xf3p\u010d': 5, u'nf\xfa': 5, u'eb\xe6': 5, u'hmu': 5, u'tje': 5, u'az\xf3': 5, u'p\xe9t': 4, u'czn': 4, u'\xed\u0161s': 4, u'\xe6k#': 4, u'kfi': 4, u'yfu': 4, u'cub': 4, u'tzs': 4, u'\u015f#n': 4, u'\xfdnm': 4, u'j\xf6l': 4, u'\u0144#k': 4, u'wsp': 4, u'oeg': 4, u't\xe9p': 4, u't\xe9l': 4, u'rz\u0105': 4, u'jik': 4, u'\u0161af': 4, u'ld\u017e': 4, u'\u010dse': 4, u'\u03c1\u03c7\u03af': 4, u'ukm': 4, u'rz\xfd': 4, u'\u017el\u010d': 4, u't#x': 4, u't#q': 4, u'ez\u0161': 4, u'#fn': 4, u'je\u013e': 4, u'\u013eul': 4, u'\xe4\u0165h': 4, u'dfr': 4, u'aaf': 4, u'ugs': 4, u'crc': 4, u'd\u0165a': 4, u'voy': 4, u'a\xf1e': 4, u'\xf3z\u0148': 4, u'chj': 4, u'ci+': 4, u'fza': 4, u'ci\xf2': 4, u'\u013e#\xe1': 4, u'tr\xf4': 4, u'\u013e#w': 4, u'pfe': 4, u'srh': 4, u'xch': 4, u'sg\xe9': 4, u'r\xf3v': 4, u'gch': 4, u'lz\xe1': 4, u'w##': 4, u'\u015fcu': 4, u'w#i': 4, u'\xe9n\xe9': 4, u'uc\xed': 4, u'r\u0165a': 4, u'cv\xe1': 4, u'wet': 4, u'p\xfan': 4, u'l\xf6s': 4, u'c\xfa\u0161': 4, u'c\xfa\u010d': 4, u'\xfdbi': 4, u'mv\u010d': 4, u'gnp': 4, u'aip': 4, u'gn\xe9': 4, u'\xe0#d': 4, u't\xfa\u013e': 4, u'h\xe1\u013e': 4, u'wam': 4, u'wak': 4, u'wax': 4, u'r\u0161\u0165': 4, u'k#\xe9': 4, u'k#\xf4': 4, u'ca\u010d': 4, u'guu': 4, u'ibn': 4, u'\u03b6\u03b5\u03b9': 4, u'\xe1mp': 4, u'soe': 4, u'no\xeb': 4, u's\u012bt': 4, u'yia': 4, u'pns': 4, u'no\u015b': 4, u'j#x': 4, u'azt': 4, u'dz\xe1': 4, u'ps\u012b': 4, u'\xe1d\xe1': 4, u'lo\xe1': 4, u's\u0148a': 4, u'v\u010dk': 4, u'#sg': 4, u'#sq': 4, u'ce=': 4, u'h\xfa\u0165': 4, u'k\u010di': 4, u'if\xfa': 4, u'h\xfab': 4, u'h\xfar': 4, u'kaw': 4, u'm#q': 4, u'yms': 4, u'y\xe1\xf1': 4, u'e#\u0163': 4, u'dv\u0161': 4, u'\u0161up': 4, u'i\xe9f': 4, u'wiz': 4, u'\xe9\u017ei': 4, u'\xf4\u0161#': 4, u'l\xfas': 4, u'\xede#': 4, u'ngc': 4, u'ngj': 4, u'\xf4ze': 4, u't\u0161t': 4, u'qf#': 4, u'kee': 4, u't\u0161#': 4, u'ng\u0165': 4, u'jir': 4, u'ibp': 4, u'ya\xf1': 4, u'sj#': 4, u'o\u0148m': 4, u'me\u010f': 4, u'ovd': 4, u'\u0161a\u010d': 4, u'b\xf3j': 4, u'b\xf3a': 4, u'\u0165#\xf3': 4, u'ukh': 4, u'gyt': 4, u'#\u0148a': 4, u'#uf': 4, u'doi': 4, u'\u03c2#\u03c3': 4, u'm\xf6#': 4, u'rpl': 4, u'omj': 4, u'omq': 4, u'omw': 4, u'unl': 4, u'\xe1\u010d\xed': 4, u'\xe1\u010du': 4, u'yek': 4, u'\u03af\u03b6\u03b5': 4, u'#ns': 4, u'#nc': 4, u'axu': 4, u'\xed\xf1i': 4, u'fsm': 4, u'\xedu\u010d': 4, u'v\xf4k': 4, u'\xf3\u0142y': 4, u'v\xf4n': 4, u'\u03b7#\u03b1': 4, u'ayd': 4, u'i\u013en': 4, u'lsm': 4, u'lsh': 4, u'xl#': 4, u'ls\xed': 4, u'd\xed\u0161': 4, u'zsc': 4, u'zso': 4, u'\u013aza': 4, u'uj\u015b': 4, u'dkk': 4, u'b\xedk': 4, u'#bc': 4, u'#bz': 4, u'\xfd#\xe1': 4, u'vh\xe1': 4, u'iut': 4, u'h#\xe9': 4, u'\xe7ai': 4, u's\u0165h': 4, u's\u013ep': 4, u'\xe1\u0161s': 4, u'sbr': 4, u'mmy': 4, u'\u03c4\u03bf#': 4, u'l\u017eb': 4, u'\xe1\xf1e': 4, u'\xe9ir': 4, u'i\xf2#': 4, u'dga': 4, u'rgs': 4, u'cuz': 4, u'nsv': 4, u'rg\u013e': 4, u'zhi': 4, u'#fc': 4, u'#fp': 4, u'iik': 4, u'ts\xe1': 4, u'sf\xe2': 4, u'#\xed\u0144': 4, u'#\xed\xf1': 4, u'gmb': 4, u'rcl': 4, u'ub\xf3': 4, u'\u013abu': 4, u'\xfafn': 4, u's\xedh': 4, u'#zt': 4, u'eqf': 4, u'\u010de\u0148': 4, u'pkn': 4, u'\xfcr#': 4, u'cyl': 4, u'mue': 4, u'+#b': 4, u'xxi': 4, u'viq': 4, u'awl': 4, u'ahv': 4, u'ppy': 4, u'roq': 4, u'\xf4\u010dk': 4, u'\xfab#': 4, u'\xfabp': 4, u'tbt': 4, u'\xe1nb': 4, u'u\u010da': 4, u'b\u0155d': 4, u'm\xe1k': 4, u'l\xf3d': 4, u'uyu': 4, u'uym': 4, u'tkm': 4, u'tkj': 4, u'l\u0148m': 4, u'mqu': 4, u'\xf6ne': 4, u'\xf3fk': 4, u'k\u0142o': 4, u'z\xed\u010d': 4, u'suh': 4, u'suy': 4, u'al\u0165': 4, u'al\u0148': 4, u'rk\xfd': 4, u'k\xe1\u010d': 4, u'gpb': 4, u'jsy': 4, u'\xe9#\xf3': 4, u'iey': 4, u'iee': 4, u'\xf2#c': 4, u'tgy': 4, u'\u0144cz': 4, u'\u010d#\u013e': 4, u'i#\u0151': 4, u'ix\xe1': 4, u'r\u010dl': 4, u'ap\u013e': 4, u'ap\u010d': 4, u'v\xed\u0161': 4, u'\xfazl': 4, u'\xf3#v': 4, u't\xf3w': 4, u'whe': 4, u'##y': 4, u'##q': 4, u'rp\xe4': 4, u'tcs': 4, u'tcl': 4, u'#m\u01ce': 4, u'=re': 4, u'myr': 4, u'y\u0161a': 4, u'ho\u0142': 4, u'k\xf4\u0161': 4, u'e\u0161s': 4, u'mdo': 4, u'e\u0161\xe1': 4, u'\xf1ig': 4, u'xit': 4, u'\xfdpk': 4, u'sm\u0103': 4, u'\u013akn': 4, u'n\xe9e': 4, u'zt#': 4, u'\xe9pa': 4, u'\xe9ph': 4, u'\u015fi#': 4, u'zt\u0161': 4, u'\xfav\u010d': 4, u'\xe4ns': 4, u'umd': 4, u'kh\xe1': 4, u'ol\u0161': 4, u'khl': 4, u'khm': 4, u'khe': 4, u'kh#': 4, u'nxl': 4, u'yds': 4, u'y\u0165t': 4, u'fpr': 4, u'tt\xe0': 4, u'vfl': 4, u'axm': 4, u'xma': 4, u'u#+': 4, u'\xebll': 4, u'ddt': 4, u'ddh': 4, u'acf': 4, u'uia': 4, u'ohm': 4, u'ohv': 4, u'cpo': 4, u'cpa': 4, u'cpd': 4, u'cpr': 4, u'p\xf4t': 4, u'\u0155#l': 4, u'itf': 4, u'ftw': 4, u'\u010fau': 4, u'bhl': 4, u'\xefd#': 4, u'vz\xfd': 4, u'\u0161kl': 4, u'zi\u0144': 4, u'uem': 4, u'rdp': 4, u'\xfake': 4, u'od\u0165': 4, u'o\u0165t': 4, u'#yu': 4, u'vmp': 4, u'vm#': 4, u'#y\xe1': 4, u'v\xfd\u0148': 4, u'etb': 4, u'\u03b7\u03c2#': 4, u'g\xfcl': 4, u'phu': 4, u'\xdf#v': 4, u'\xf4\u0148u': 4, u'\xe3oa': 4, u'akb': 4, u'\xe9l\u010d': 4, u'i\xf3d': 4, u'uak': 4, u'zee': 4, u'zev': 4, u'zez': 4, u'k\u0161#': 4, u'\xf4ko': 4, u'o\u0161\u013e': 4, u'coi': 4, u'ni\xf3': 4, u'th\xe9': 4, u'nix': 4, u'thw': 4, u'thk': 4, u'pls': 4, u'm\u01cen': 4, u'xye': 4, u'yk\u013a': 4, u'\u010f#w': 4, u'\u03c7\u03af\u03b6': 4, u'\u013eci': 4, u'aoa': 4, u'aon': 4, u'lm\xf6': 4, u'gbe': 4, u'\u01cene': 4, u'\u015f#s': 4, u'wcz': 4, u'x#\u010f': 4, u'll\xe9': 4, u'\u017e\xe1n': 4, u'ccu': 4, u'ccl': 4, u'\xed\u010fm': 4, u'\u0148ak': 4, u'\u010d\u0161#': 4, u'\u010fu#': 4, u'b#\u0165': 4, u'\u0165\u010dl': 4, u'hha': 4, u'puz': 4, u'\xe9d\u010d': 4, u'li\xe9': 4, u'a#\u015f': 4, u'\u0103#s': 4, u'\xf3\u0142#': 4, u'gse': 4, u'l#q': 4, u'l#y': 4, u'z#\u0148': 4, u'\xedvm': 4, u'z#\xe9': 4, u'\xedg#': 4, u'h\xfcs': 4, u'fly': 4, u'asq': 4, u'#h\xed': 4, u'yck': 4, u'ec\xe9': 4, u'aw\u010d': 4, u'snp': 4, u'beo': 4, u'k\xf3\u0142': 4, u'\u017eii': 4, u'shl': 4, u'\u03b1\u03b9#': 4, u'\u017ei\u0107': 4, u'zyc': 4, u'z\u0105d': 4, u'\u010dho': 4, u'dmr': 4, u'\u0165ky': 4, u'ne\xed': 4, u'#\u03b1\u03c1': 4, u'\xedcn': 4, u'\xf3lu': 4, u'\xf3lm': 4, u'z\u013eu': 4, u'#l\xfd': 4, u'p\u0119k': 4, u'ygd': 4, u'#lg': 4, u'gl\xe9': 4, u'u\xe1m': 4, u'baf': 4, u'bae': 4, u'ifl': 4, u'mck': 4, u'\xe9l\xe9': 4, u'zuv': 4, u'o\u0142o': 4, u'\u012bti': 4, u'\u0161o\u0161': 4, u'uj\u010d': 4, u'#\u0161\xfa': 4, u'iwc': 4, u'pa\xf1': 4, u'v\xfab': 4, u'v\xfar': 4, u'r\xf6s': 4, u'f\xe1i': 4, u'f\xe1d': 4, u'\u017eaz': 4, u'hyu': 4, u'ab\u0155': 4, u'ryd': 4, u'ko\u0144': 4, u'ko\u0165': 4, u'a\u015f#': 4, u'\u0142od': 4, u'a\u015fa': 4, u'cfr': 4, u'fu\u0161': 4, u'g\u013eo': 4, u'r\xe9\u017e': 4, u'fuc': 4, u'ik\u013e': 4, u'g\xf3\u0142': 4, u'mki': 4, u'sdr': 4, u'lyl': 4, u'\u017eec': 4, u'm\u0155o': 4, u'e\u013ec': 4, u'daf': 4, u'dab': 4, u'dao': 4, u'nqu': 4, u're\u0163': 4, u'l\u0165a': 4, u'ksp': 4, u't\u0117#': 4, u'vr#': 4, u'\u0105dk': 4, u'ks\xe1': 4, u'ilg': 4, u'\u0161\xfa\u013e': 4, u'ioo': 4, u'ioj': 4, u'm\u011b\u0159': 4, u'esq': 4, u'aij': 4, u'en\xeb': 4, u'n\xfab': 4, u'\xe9cl': 4, u'\u013ap\u010d': 4, u'\xfazv': 4, u'\u013apo': 4, u'p#\xe1': 4, u'ocv': 4, u'l\u0161t': 4, u'kwh': 4, u'\xf6pf': 4, u'ic\xe3': 4, u'cnt': 4, u'cnn': 4, u'icp': 4, u'zcv': 4, u'a\u0161\u0161': 4, u'yj#': 4, u'ssp': 4, u'dyl': 4, u'dyg': 4, u'rmt': 4, u'lns': 4, u'j\u015bc': 4, u'lnn': 4, u'rml': 4, u'e\xfab': 4, u'gv\xe1': 4, u'p\xe1\u013e': 4, u'h\xe4n': 4, u'jub': 4, u'jum': 4, u'jup': 4, u'\u015bci': 4, u'igt': 4, u'#p\u0119': 4, u'l\xfdb': 4, u'r#\xf4': 4, u'm\u017e\xed': 4, u'y#q': 4, u'\xed\u0144i': 4, u'r\u010ft': 4, u'hiw': 4, u'\u0161ve': 4, u'hi\xe1': 4, u'hi\xed': 4, u'dug': 4, u'\xfdv#': 4, u'lj#': 4, u'\u03b1\u03c1\u03c7': 4, u'zz#': 4, u'grz': 4, u'i\u010dh': 4, u'\u010dk\xe9': 4, u'\u010dk#': 4, u'y\u0148s': 4, u'caf': 4, u'fec': 4, u'sph': 4, u'\xedfi': 4, u'\xf3ku': 4, u'kba': 4, u'byp': 4, u'sk\u0142': 4, u's\xfaz': 4, u'd\xe1i': 4, u'ebk': 4, u'g\u013eu': 4, u'bbs': 4, u'hms': 4, u'sks': 4, u'skj': 4, u'owo': 4, u'owc': 4, u'\u0142y#': 4, u'ow\xfc': 4, u'n\xebo': 4, u'\u015fom': 3, u'b\xed\u0161': 3, u'p\xe9c': 3, u'dn#': 3, u'czk': 3, u'\u03bf\u03c2#': 3, u'uop': 3, u'\u0148uk': 3, u'kfa': 3, u'kfo': 3, u'nzj': 3, u'#og': 3, u'k\xf6p': 3, u'x#\u0161': 3, u'm\u0161i': 3, u'\xedpi': 3, u'lra': 3, u'oev': 3, u'\xe9vi': 3, u't\u0103n': 3, u'p\xedc': 3, u'p\xedk': 3, u'\u03c0\u03bf\u03c7': 3, u'uk\xf3': 3, u'zow': 3, u'm\u013ak': 3, u'kj\xf6': 3, u'a\xedm': 3, u'uod': 3, u'ivs': 3, u'vk\u013a': 3, u'l\xee#': 3, u't#y': 3, u't#\xf3': 3, u'scn': 3, u'g\xf6r': 3, u'dsc': 3, u'\u0119tr': 3, u'vlc': 3, u'wre': 3, u'wra': 3, u'k\xe1u': 3, u'\xe4\u0165b': 3, u'jeb': 3, u'\xe4\u0165\u010d': 3, u'crv': 3, u'zk#': 3, u'y\xf1s': 3, u'h\xe9s': 3, u'h\xe9n': 3, u'ijd': 3, u'\u0159\xedm': 3, u'\u0159\xed#': 3, u'\u0107ov': 3, u'pfa': 3, u'srn': 3, u'r\xf3w': 3, u'\u0103oa': 3, u'\xe1#x': 3, u'w#r': 3, u'w#o': 3, u'rb\u010d': 3, u'db#': 3, u'aet': 3, u'gbt': 3, u'sz\xf6': 3, u'#\u013es': 3, u'ucf': 3, u'ucl': 3, u'lgb': 3, u'e\xe1m': 3, u'ofk': 3, u'vs\xfa': 3, u'kr\xf3': 3, u'h\xedr': 3, u'b\u0148a': 3, u'br\u010d': 3, u'nkh': 3, u'\xfd\u0161m': 3, u'\xfd\u0161t': 3, u'\xeb#b': 3, u'mv#': 3, u'udv': 3, u'\u0159rg': 3, u'\u0165ie': 3, u'hr\xfd': 3, u'em\xe9': 3, u'lc#': 3, u'\xe0#j': 3, u'\xe0#m': 3, u'\xe0#a': 3, u'k#q': 3, u'h\xe1n': 3, u'h\xe1b': 3, u'jvl': 3, u'i\u0161v': 3, u'\xe1m\u010d': 3, u'y#\xf6': 3, u'cak': 3, u'caa': 3, u'ae\xfa': 3, u'\xf3rf': 3, u'no\xee': 3, u'yin': 3, u'azj': 3, u'o\xe9z': 3, u'eii': 3, u'\u013eab': 3, u'dt\u0155': 3, u'rje': 3, u'lo\xfa': 3, u'\xed#+': 3, u'sr\u0161': 3, u'wmo': 3, u'p\u0159e': 3, u'z\u017ei': 3, u'jr\xfa': 3, u'cee': 3, u'cey': 3, u'ka\u013e': 3, u'ka\u0144': 3, u'\xf3ve': 3, u'o\u010fk': 3, u'ncf': 3, u'\u03b1\u03c0\u03bf': 3, u'stc': 3, u'm#\xe9': 3, u'm#\u015f': 3, u'hju': 3, u'#\u03bb\u03ae': 3, u'eeh': 3, u'm\u010da': 3, u'\u0148#\u0165': 3, u'\u0161uc': 3, u'i\xe9#': 3, u'dv#': 3, u'i\xe9h': 3, u'\xedz\u0161': 3, u'oz\u0165': 3, u'#wm': 3, u'#wf': 3, u'rcm': 3, u'rsd': 3, u'rsf': 3, u'rsb': 3, u'vpm': 3, u'\xe1um': 3, u'\xe1uz': 3, u'ngf': 3, u'\xfa#y': 3, u'yaf': 3, u'#jr': 3, u'#js': 3, u'ke\u010d': 3, u'\xf4\u017eo': 3, u'bcg': 3, u'iye': 3, u'a\xf1i': 3, u'\u017eoj': 3, u'au\xed': 3, u'\u0161\u010do': 3, u'mwo': 3, u'xhu': 3, u'\u03c0\u03bc#': 3, u'\u0165#x': 3, u'\u03b1\u03c4\u03ac': 3, u'do\u0165': 3, u'gyh': 3, u'\u0107#s': 3, u'gsb': 3, u'omd': 3, u'omh': 3, u'omv': 3, u'om\u011b': 3, u'yeh': 3, u'l\u010dm': 3, u'\u013e#\xf3': 3, u'cg#': 3, u'fsb': 3, u'e\u017e\xe9': 3, u'\xee#d': 3, u'e\u017ec': 3, u'e#\u03b1': 3, u'ayf': 3, u'ayp': 3, u'lsb': 3, u'or\xf3': 3, u'c\xe1c': 3, u'zs#': 3, u'zsy': 3, u'or\u0119': 3, u'\u0159er': 3, u'u\u013e\xf4': 3, u'jfa': 3, u'jfr': 3, u'km\xfd': 3, u'o\u0103o': 3, u'\xf3bm': 3, u'h\xf6g': 3, u'h\xf6p': 3, u'e\u015f#': 3, u't\u0119t': 3, u'goy': 3, u'ra\u015f': 3, u'd\xfa\u0161': 3, u'j\xf3d': 3, u'\u012b\u0137e': 3, u'eqa': 3, u'o#\xe0': 3, u'\xedng': 3, u'lby': 3, u'gaj': 3, u'sya': 3, u'dgh': 3, u'ldw': 3, u'ldy': 3, u'cui': 3, u'\u0161\u0165s': 3, u'\u03c5\u03c0\u03ad': 3, u'\u0142ka': 3, u'oeh': 3, u'nsl': 3, u'\u013apc': 3, u'ii\u010d': 3, u'chf': 3, u'd\xf6r': 3, u'ch\xfc': 3, u'eua': 3, u'mi\u015f': 3, u'#\xf6r': 3, u'#\xf6g': 3, u'\xfcv#': 3, u'sfm': 3, u'a\u013e\xfa': 3, u'r\u0119b': 3, u'wyb': 3, u'gml': 3, u'ad\xf3': 3, u'ub\u010d': 3, u'ubc': 3, u's\xed\u010d': 3, u'q#s': 3, u'q#a': 3, u's\xeds': 3, u'ku\u017a': 3, u'ku\u0165': 3, u'vp\xed': 3, u'imf': 3, u'vpe': 3, u'nhy': 3, u'nh#': 3, u'\u03c0\u03ad\u03c1': 3, u'\u043d\u0430#': 3, u'\u0165hv': 3, u'hsh': 3, u'l\xfah': 3, u'#\xe1#': 3, u'ahd': 3, u'ahw': 3, u'gim': 3, u'd\u0119t': 3, u'll\xee': 3, u's\u010di': 3, u'kyh': 3, u'ia\u015f': 3, u'\xfa#x': 3, u'vto': 3, u'gtu': 3, u'vtz': 3, u'm\xe1\u010d': 3, u'p\u0155s': 3, u'm\xe1g': 3, u'\u013akl': 3, u'\u010dap': 3, u'l\xf3m': 3, u'\xe1lc': 3, u'po\xe9': 3, u's+#': 3, u'k#x': 3, u'\u012b#a': 3, u'd\u017el': 3, u'j\u0159r': 3, u'rkr': 3, u'\u017eto': 3, u'oye': 3, u'oyc': 3, u'\xe9#\u0148': 3, u'\xf6ra': 3, u'\xfa\u0161#': 3, u'\xf6rf': 3, u'#rm': 3, u'\xe9#x': 3, u'\xe9#q': 3, u'rtv': 3, u'm\xeds': 3, u'\xe1gh': 3, u'\xe1pm': 3, u'uus': 3, u'\u010dm\xe1': 3, u'tg#': 3, u'\u03c4\u03ac#': 3, u'n\u0161\u0165': 3, u'ilq': 3, u'\u010d#\u010f': 3, u'\xfbte': 3, u'#i\xf3': 3, u'ixy': 3, u'v\xedo': 3, u'\xfa\u0165k': 3, u'tcu': 3, u'\xfdt#': 3, u'\xfdtl': 3, u'lt\xfd': 3, u'oux': 3, u'ncr': 3, u'#\xe1b': 3, u'##\xf3': 3, u'\u010dic': 3, u'#v\u012b': 3, u'jo\u0103': 3, u'uqu': 3, u'\xe1tv': 3, u'ndf': 3, u'\xedd\u017e': 3, u'vay': 3, u's#\xe9': 3, u'myo': 3, u'##\u03bc': 3, u'#m\xe3': 3, u'y\u0161\u013e': 3, u'\xe8me': 3, u'xi\u010d': 3, u'pr\xe8': 3, u'#\u03c5\u03c0': 3, u'dsz': 3, u'f\xfch': 3, u'\xf1in': 3, u'f\xfcr': 3, u'\u015b\u0107#': 3, u'xi\xf3': 3, u'lpi': 3, u'lpg': 3, u'zty': 3, u'n\xe9n': 3, u'n\xe9o': 3, u'jk\u013e': 3, u'dhs': 3, u'jkl': 3, u'\xeame': 3, u'olj': 3, u'olp': 3, u'foi': 3, u'um=': 3, u'\u017ami': 3, u'lwe': 3, u'khi': 3, u'nxo': 3, u'h\xf3l': 3, u'ydn': 3, u'ydy': 3, u'sa\u0161': 3, u'ovv': 3, u'xmi': 3, u'dly': 3, u'n\xede': 3, u'u#\u0151': 3, u'gde': 3, u'gdi': 3, u'dd\xf4': 3, u'e\xfao': 3, u'ddl': 3, u'acg': 3, u'dfu': 3, u'g\u0165i': 3, u'cpc': 3, u'\xfaop': 3, u'\xedlb': 3, u'\u0117#k': 3, u'\xfcnc': 3, u'\xfcn#': 3, u'dfa': 3, u'xuj': 3, u'vao': 3, u'uee': 3, u'exm': 3, u'tpi': 3, u'sew': 3, u'xal': 3, u'\u0165or': 3, u'\u0165ot': 3, u'jcy': 3, u'\u0161ki': 3, u'ueu': 3, u'\xedh\u013e': 3, u'\u03bc\u03bc#': 3, u'\xfakm': 3, u't\xfcv': 3, u'\u0148o\u0161': 3, u'npd': 3, u'npt': 3, u'\xb1##': 3, u'#yp': 3, u'kpi': 3, u'v\xe1j': 3, u'ckt': 3, u'bt\xed': 3, u'hfr': 3, u'vma': 3, u'qtr': 3, u'btr': 3, u'btk': 3, u'etp': 3, u'tly': 3, u'et\u0119': 3, u'#p\u0155': 3, u'xel': 3, u'\xfddr': 3, u'hwi': 3, u'eo\u017e': 3, u'mch': 3, u'ak\xf3': 3, u'\xe1rb': 3, u'ptv': 3, u'alr': 3, u'i\u0148t': 3, u'i\u0148m': 3, u'uai': 3, u'zeh': 3, u'fad': 3, u'nt\xfd': 3, u't\xe1g': 3, u'\xfag#': 3, u'\u03b1#p': 3, u'\xf6sz': 3, u'\xf6si': 3, u'epm': 3, u'bpe': 3, u'bpr': 3, u'\u03b5\u03c4\u03b1': 3, u'plv': 3, u'plc': 3, u'xym': 3, u'r\xfdp': 3, u'mt\xe9': 3, u'uc\u0165': 3, u'htm': 3, u'ekk': 3, u'\u0161s#': 3, u'\xe9\u0161o': 3, u'aod': 3, u'ghl': 3, u'#\u015bw': 3, u'rl\xe9': 3, u'rlb': 3, u'\u015f#v': 3, u'yth': 3, u'\u03ba\u03b1\u03c4': 3, u'gwa': 3, u'vur': 3, u'uxi': 3, u'\xed\u010f#': 3, u'yt\u0161': 3, u'yt\u0117': 3, u'\u0163ov': 3, u'ccw': 3, u'b#\xed': 3, u'r\u0148m': 3, u'\xf3tn': 3, u'o\u010dm': 3, u'il\xe4': 3, u'hh\xe1': 3, u'egh': 3, u'egb': 3, u'pu\xe1': 3, u'#\u015f\u0131': 3, u'\xe9du': 3, u'asj': 3, u'\xe4ov': 3, u'\u017euc': 3, u'stb': 3, u'r\u016bn': 3, u'woh': 3, u'wo#': 3, u'\u03bf\u03c7\u03ae': 3, u'\xfa\u017e#': 3, u'\u03b7#\u03bb': 3, u'jp#': 3, u'nvy': 3, u'gsd': 3, u'l#\xe9': 3, u'c\u010dl': 3, u'ut\u0148': 3, u'ru\xf3': 3, u'id\xf3': 3, u'ruy': 3, u'idf': 3, u'idc': 3, u'nln': 3, u'na\xee': 3, u'na\xed': 3, u'z#\xf3': 3, u'vsi': 3, u'ut\xeb': 3, u'kch': 3, u'ycu': 3, u'i\u017et': 3, u'v\u012b\u0137': 3, u'du\u010d': 3, u'#\u03ba\u03b1': 3, u'#\u03c0\u03bc': 3, u'k\xf3m': 3, u'ecf': 3, u'ecm': 3, u'aw\xfc': 3, u'd+c': 3, u'awy': 3, u'awu': 3, u'pyl': 3, u'\u010d\xe1n': 3, u'\u017eir': 3, u'mgm': 3, u'mgl': 3, u'shd': 3, u'luu': 3, u'zy\xf1': 3, u'zyb': 3, u'\u03ad\u03c1#': 3, u'\u015f\u0131k': 3, u'#\u03c4\u03b7': 3, u'#\u03c4\u03bf': 3, u'#\u03b1\u03c0': 3, u'\xedc\u010d': 3, u'#lf': 3, u'#lp': 3, u'xup': 3, u'n#\xe9': 3, u'ofp': 3, u'tys': 3, u'sl\u0148': 3, u'baw': 3, u'f\u0148u': 3, u'mcj': 3, u'mcs': 3, u'zuk': 3, u'zu\u0161': 3, u'\xf1sk': 3, u'di\xf1': 3, u'fl#': 3, u'jh\u013e': 3, u'ooh': 3, u'ulp': 3, u'nyj': 3, u'nye': 3, u'a\xee#': 3, u'pa\u015f': 3, u'i#\xb0': 3, u'\xe1bs': 3, u'k\u0151#': 3, u'l\xedg': 3, u'v\xfak': 3, u'd#\xb0': 3, u'paf': 3, u'd#+': 3, u'moa': 3, u'\u017eav': 3, u'\u017eau': 3, u'c\xe3o': 3, u'#\u03b5\u03c5': 3, u'\xfasv': 3, u'i\xfa\u010d': 3, u'gg\xe1': 3, u'\xe4k\u0161': 3, u'ho\xfa': 3, u'hy\u0148': 3, u'csy': 3, u'css': 3, u'jd\xed': 3, u'uhj': 3, u'okc': 3, u'#d\u0119': 3, u'\xfalt': 3, u'#d+': 3, u'r\xe9o': 3, u'r\xe9e': 3, u'+c#': 3, u'k\u0155\u010d': 3, u'o\xfbt': 3, u'ikw': 3, u'\u0142#k': 3, u'z\xfac': 3, u'fug': 3, u'ewl': 3, u'\u0165sk': 3, u'ik\u0161': 3, u'ik\u0151': 3, u'a\u0142#': 3, u'#\u010d\xe1': 3, u'peb': 3, u'mkt': 3, u'sdm': 3, u'lyb': 3, u'jzb': 3, u'ufu': 3, u'm\u0155#': 3, u'a\u0165b': 3, u'afm': 3, u'afs': 3, u'#uu': 3, u'\u017e#\xf4': 3, u'daa': 3, u'co\xfb': 3, u'f#w': 3, u'f#y': 3, u'\u0161\u013eu': 3, u'cw#': 3, u'emz': 3, u'\xe4lo': 3, u'f#\u010f': 3, u'kss': 3, u'kse': 3, u'ksk': 3, u't\xfdf': 3, u't\xfd\u013e': 3, u'cjo': 3, u'#\xb1#': 3, u'pi\xe8': 3, u'es\u0142': 3, u'ta\u017e': 3, u'j\xe1s': 3, u'mwa': 3, u'\u0155dn': 3, u'#kp': 3, u'\u0107#a': 3, u'f\xe9n': 3, u'd\xedj': 3, u'\xe4cu': 3, u'p#w': 3, u'e\u0163o': 3, u'thd': 3, u'p#\xed': 3, u'\u017ere': 3, u'wfo': 3, u'#g+': 3, u'wfp': 3, u'rnh': 3, u'vve': 3, u'u\u017am': 3, u'icb': 3, u'ths': 3, u'eyf': 3, u'nnt': 3, u'\xf3s#': 3, u'tiy': 3, u'tiq': 3, u'\xf3su': 3, u'##\u03c0': 3, u'pmf': 3, u'\u0161\xe1v': 3, u'msz': 3, u'msu': 3, u'msh': 3, u'o\xeet': 3, u'f\xedg': 3, u'csh': 3, u'r\u0103a': 3, u'huy': 3, u'\xe9g#': 3, u'hft': 3, u'anq': 3, u'bl\xfd': 3, u'ss\xe3': 3, u'\u03c7\u03ae#': 3, u'uht': 3, u'vzm': 3, u'juc': 3, u'rvl': 3, u'o\u015b\u0107': 3, u'l\xfds': 3, u'tez': 3, u'tew': 3, u'\u013e\xf4\u010d': 3, u'i\u0161p': 3, u'te\u0148': 3, u'm\u017ee': 3, u'\xfdk#': 3, u'ef\xfa': 3, u'efy': 3, u'efn': 3, u'a\u015fi': 3, u'hiq': 3, u'du\u013e': 3, u'\u03c4\u03b1\u03b9': 3, u'arj': 3, u'ri\u0148': 3, u'qar': 3, u'ar\u016b': 3, u'f\xfag': 3, u'lju': 3, u'b\xf6l': 3, u'zzy': 3, u'#t\u0103': 3, u'gr\xf6': 3, u'#t\xe2': 3, u'wne': 3, u'rr\xe9': 3, u'ibm': 3, u'y#\u0163': 3, u'\xe1vl': 3, u'usq': 3, u'fe\xed': 3, u'kbe': 3, u'kby': 3, u'h#q': 3, u'nfu': 3, u'bly': 3, u'j\u010dl': 3, u'ebt': 3, u'mfu': 3, u'\u0155s#': 3, u'\xfdrn': 3, u'#\u03bc\u03bc': 3, u't\xedr': 2, u'\xe9rg': 2, u'\u015foa': 2, u'\u0155\u010do': 2, u'cz\u0105': 2, u'jm#': 2, u'\xe4la': 2, u'\xe4lj': 2, u'=co': 2, u'czi': 2, u'#\u043d\u0430': 2, u'\xe1zq': 2, u'nzy': 2, u'nzk': 2, u'#ow': 2, u'\u0161\xedd': 2, u'e\u0144#': 2, u'nz\xe9': 2, u'ldh': 2, u'yfi': 2, u'yf\xe9': 2, u'xv#': 2, u'tz\xe1': 2, u'\u03ba\u03cc\u03c0': 2, u'rgm': 2, u'ir\u017e': 2, u'a\u0142g': 2, u'\u03bd\u03b9#': 2, u'k\xf6v': 2, u'k\xf6y': 2, u'ha\xe7': 2, u'm\u0161a': 2, u'\u0144#p': 2, u'sog': 2, u'ar\u0165': 2, u'xku': 2, u'osd': 2, u'osg': 2, u't\xe9o': 2, u'\xe9v#': 2, u't\u0103o': 2, u'rz\u010f': 2, u'jig': 2, u'jib': 2, u'\xedn\u015b': 2, u'\u010dso': 2, u'ji\u0107': 2, u'\u0119ko': 2, u'rzb': 2, u'#c\xf4': 2, u'\u0430\u044f#': 2, u'\u03ac#z': 2, u'a\xeda': 2, u'\u03ba\u03bf\u03b9': 2, u'i#\u03c5': 2, u'uoe': 2, u'ivf': 2, u'#c\u0153': 2, u'ivz': 2, u'yz\xfa': 2, u'yz\xe9': 2, u't#\u015f': 2, u'p#\xe0': 2, u't#\u0148': 2, u'\xf4ty': 2, u'w\xed#': 2, u'bjp': 2, u'\xe8ce': 2, u'he\xe1': 2, u'w\xed\u0161': 2, u'g\xf6d': 2, u'g\xf6s': 2, u'#\u017e#': 2, u'pbr': 2, u'\xf6#n': 2, u'\xf6#z': 2, u'sc\xe1': 2, u'\u0430\u0441\u043d': 2, u'\xfdzl': 2, u'vlj': 2, u'\xfdz\u0165': 2, u'\u017e\u0161a': 2, u'\xe9ja': 2, u'wrz': 2, u'x\xedr': 2, u'#=#': 2, u'm\u0148o': 2, u'#=r': 2, u'\u013eus': 2, u'jeo': 2, u'ccy': 2, u'\u0161ec': 2, u'aav': 2, u'ugi': 2, u'ugp': 2, u'crt': 2, u'ug\xe1': 2, u'\xf3ge': 2, u'rf\xf3': 2, u'vo\u010f': 2, u'cr\xe8': 2, u'rf\xed': 2, u'nrg': 2, u'nrr': 2, u'\xedju': 2, u'h\xe9i': 2, u'h\xe9o': 2, u'vgr': 2, u'\u0131k#': 2, u'ijc': 2, u'iji': 2, u'o\xfad': 2, u'o\xfam': 2, u'o\xfaz': 2, u'iid': 2, u'bvz': 2, u'sgp': 2, u'sgr': 2, u'sga': 2, u'ev\u013e': 2, u'sg#': 2, u'o\u010dl': 2, u'mjo': 2, u'\u013e#\u0165': 2, u'w#\u010d': 2, u'\u0103om': 2, u'\xe1#y': 2, u'u\xf3z': 2, u'fda': 2, u'w#h': 2, u'\u013eia': 2, u'\u0165m\xed': 2, u'\u0161ih': 2, u'tsm': 2, u'tsf': 2, u'a#\u03b1': 2, u're\xf3': 2, u'\xe5se': 2, u'\xfaaf': 2, u'cv#': 2, u'lgu': 2, u'r\u0165h': 2, u'r\u0165f': 2, u'ofg': 2, u'rb\xf3': 2, u'ofl': 2, u'a#\u0142': 2, u'a#\u015b': 2, u'weo': 2, u'wez': 2, u'nvu': 2, u'vsl': 2, u'h\xedl': 2, u'h\xedd': 2, u'nv\xed': 2, u'in\xf3': 2, u'lft': 2, u'\u0105ck': 2, u'\u0105cy': 2, u'yr\xed': 2, u'er\u010f': 2, u'\u017e#x': 2, u'cma': 2, u'\xe1in': 2, u'l\xf6n': 2, u'brt': 2, u'nkm': 2, u'nkw': 2, u'erj': 2, u'pjo': 2, u'pju': 2, u'\xeb#a': 2, u'\xfdbt': 2, u'\xed\xe1\u0161': 2, u'mvs': 2, u'\u0155ch': 2, u'da\u0161': 2, u'ogj': 2, u'\u0165il': 2, u'bnp': 2, u'#\u0142\u0105': 2, u'\u0161m\xfd': 2, u'\xe0#k': 2, u'\xe0#s': 2, u'rnm': 2, u'rnl': 2, u'f\xf3m': 2, u'h\xe1\u010f': 2, u'il\xf6': 2, u'waj': 2, u'wac': 2, u'p\u0155c': 2, u'\xedrt': 2, u'z\u010fu': 2, u'h\xe1g': 2, u'kvd': 2, u'gub': 2, u'gug': 2, u'guz': 2, u'fok': 2, u'gu\xea': 2, u'y#\xe0': 2, u'uzf': 2, u'ksd': 2, u'jv\u0161': 2, u'ibc': 2, u'\xe1mm': 2, u'z\xf3l': 2, u'\u0155ov': 2, u'ha\u0142': 2, u'\u0155om': 2, u'yip': 2, u'pnl': 2, u'mrs': 2, u'mrm': 2, u'mrf': 2, u'\u011b\u0159i': 2, u'c+x': 2, u'j#y': 2, u'\xf1ov': 2, u'k\xedk': 2, u'k\xedj': 2, u'eiv': 2, u'\u0153ur': 2, u'rwc': 2, u'psz': 2, u'pst': 2, u'\xed#\xe9': 2, u'am\u0163': 2, u'rjo': 2, u'\xed#x': 2, u'\xfaas': 2, u'lo\xed': 2, u'#s\u0148': 2, u'#s\u0142': 2, u'\xfa\u010dn': 2, u'k\u010de': 2, u'\xfa\u010ds': 2, u'pui': 2, u'\xfcbe': 2, u'jrc': 2, u'\u03c0\u03c4\u03b5': 2, u'#s\xe3': 2, u'jd#': 2, u'\u03bf\u03c5#': 2, u'\u0163a#': 2, u'cev': 2, u'ifs': 2, u'ifz': 2, u'ip\u0117': 2, u'rw\xed': 2, u'ce\u010d': 2, u'rw\u0119': 2, u'ncs': 2, u'o\u010fu': 2, u'tfi': 2, u'bzu': 2, u'\xf3v\xe1': 2, u'qba': 2, u'm#+': 2, u'g\xe9l': 2, u'bl#': 2, u'stz': 2, u'tm\xfd': 2, u'iyv': 2, u'iys': 2, u'n\xf3n': 2, u'hja': 2, u'i\u0146\u0161': 2, u'\xfd\u013eo': 2, u'hj\xf3': 2, u'\u013eev': 2, u'pu\u017c': 2, u'm\u010dl': 2, u'aqs': 2, u'svs': 2, u'lkk': 2, u'lkh': 2, u'lkp': 2, u'\u0146\u0161#': 2, u'lk\xfa': 2, u'n\u0103s': 2, u'wih': 2, u'wix': 2, u'\xedzs': 2, u'oz\u017e': 2, u'\xf6yk': 2, u'\u013aba': 2, u'\u013abm': 2, u'n\u010dc': 2, u'rsl': 2, u'cyc': 2, u'rsz': 2, u'm\xeam': 2, u'l\xfar': 2, u'\u0142go': 2, u'rs\xfa': 2, u'ngn': 2, u'\xedef': 2, u'kei': 2, u'keu': 2, u'p\u0117d': 2, u'yao': 2, u'yac': 2, u'xst': 2, u'c#\xe9': 2, u'#\u03bf#': 2, u'h\u013at': 2, u'eao': 2, u'hnt': 2, u'auu': 2, u'\xe2ne': 2, u'\u017eor': 2, u'sjo': 2, u'drn': 2, u'\u0161\u010da': 2, u'\xfaf#': 2, u'e\xf1o': 2, u'e\xf1a': 2, u'c\xede': 2, u'wuj': 2, u'wuo': 2, u'\xedn\xed': 2, u'\u0165#\xe9': 2, u'\u03b1\u03c4\u03bf': 2, u'yh#': 2, u'jju': 2, u'fms': 2, u'gye': 2, u'gyo': 2, u'+vs': 2, u'c\u0103#': 2, u'\u0137ef': 2, u'r\u017e\xe1': 2, u'fn\xfa': 2, u'fn\xe1': 2, u'r\u017el': 2, u'omz': 2, u'\xe1y#': 2, u'fna': 2, u'\xedas': 2, u'saw': 2, u'kiw': 2, u'yes': 2, u'#nj': 2, u'u\u0148h': 2, u'm+d': 2, u'\xedu\u0161': 2, u'iq#': 2, u'd\xe8c': 2, u'en\xf6': 2, u'\xe7e#': 2, u'o\u0148\u017e': 2, u'fsw': 2, u'rnt': 2, u'\xf3\u0142k': 2, u'e#\u03b5': 2, u'ayi': 2, u'ayg': 2, u'ayv': 2, u'ma\xf1': 2, u'maq': 2, u'lss': 2, u'xlo': 2, u'\u013ev\xe1': 2, u'e#\xb0': 2, u'\xfcll': 2, u'aqb': 2, u'gev': 2, u'y\xe9s': 2, u'u\u013ev': 2, u'\xe9co': 2, u'\xe4is': 2, u'\xe9cr': 2, u'\u0161\u0161a': 2, u'oik': 2, u'oif': 2, u'\u0161\u0161\xe1': 2, u'\xf3b#': 2, u'\xfant': 2, u'h\xf6n': 2, u'cl#': 2, u'\xedge': 2, u'kmr': 2, u'\xedgn': 2, u'\xedm\xed': 2, u'yyi': 2, u'vh#': 2, u'#bk': 2, u'#bn': 2, u'#bq': 2, u'vha': 2, u'\xfcmu': 2, u'e\u015fl': 2, u'#b\xe2': 2, u'p\xf3k': 2, u'iuc': 2, u'z\xfcr': 2, u'\xfa\xe1d': 2, u'eyg': 2, u'wi\u015b': 2, u'g+#': 2, u'kcj': 2, u'hfi': 2, u'\xe4#w': 2, u'j\xf3g': 2, u'sb\xfc': 2, u'zp\xe1': 2, u'j\xf3v': 2, u'sb\xe1': 2, u'r\xf4f': 2, u'r\xf4l': 2, u'\xe4#\xe1': 2, u'\u010d\xedl': 2, u'\u03c1#p': 2, u'o#+': 2, u'ga\u013e': 2, u'l\u017es': 2, u'\xfaqu': 2, u'#\xe9c': 2, u'#\xe9d': 2, u'#\xe9i': 2, u'#\xe9#': 2, u'aj\u0148': 2, u'jb\xfa': 2, u'syj': 2, u'syz': 2, u'f=e': 2, u'ldz': 2, u'cuc': 2, u'ld\xe1': 2, u'ufn': 2, u'\xf3fo': 2, u'o\u011fl': 2, u'\xf4tk': 2, u'\u017adz': 2, u'zhd': 2, u'aeo': 2, u'\xf6n#': 2, u'vl#': 2, u'w\u0119#': 2, u'ch\u0165': 2, u's\xe9t': 2, u's\xe9c': 2, u'm\u0103a': 2, u'e\u015bn': 2, u'\xf6n\xf6': 2, u'vl\xfa': 2, u'iiu': 2, u'iiy': 2, u'iie': 2, u'iib': 2, u'iil': 2, u'd\xf6l': 2, u'#f\u0148': 2, u'\xe1dv': 2, u'tsh': 2, u'bw#': 2, u'\u03c3\u03b7\u03c2': 2, u'\xe8ch': 2, u'mij': 2, u'sfi': 2, u'\u0103n\u0103': 2, u'a\u013e\u0161': 2, u'k\xe4o': 2, u'ohj': 2, u'a\u013ee': 2, u'\xe9ms': 2, u'wye': 2, u'wyh': 2, u'ad\xf6': 2, u'y\xe1c': 2, u'\u0119ba': 2, u'vv\xfd': 2, u'\u0165\xfcn': 2, u'oae': 2, u'oaj': 2, u'oai': 2, u'##\u03b1': 2, u'nwr': 2, u'\xfafy': 2, u'q#k': 2, u'\u03b5\u03c5\u03c1': 2, u'vpo': 2, u'u\u017ca': 2, u'+it': 2, u'ku\u017e': 2, u'#z\xfc': 2, u'vp\xfa': 2, u'yqu': 2, u'to\u015f': 2, u'to\u011f': 2, u'nht': 2, u'\xf3\u017e\xed': 2, u'toq': 2, u'im\u013e': 2, u'cys': 2, u'bsp': 2, u'bss': 2, u'a\u0144c': 2, u'+#\u010d': 2, u'\u0155\u0161t': 2, u'r\xfcn': 2, u'beu': 2, u'apf': 2, u'+#c': 2, u'+#o': 2, u'+#h': 2, u'+#x': 2, u'k\u013eo': 2, u'n\u0117#': 2, u'\xfa\u0148m': 2, u'#\xe1r': 2, u'\xe9at': 2, u'zm\u0155': 2, u'hsc': 2, u'hse': 2, u'awf': 2, u'giv': 2, u'jzh': 2, u'\u03c0\u03c1\u03cc': 2, u'd\u017eb': 2, u'llc': 2, u'i\xfad': 2, u'\xe3se': 2, u'ro\u0148': 2, u'hsy': 2, u'll\u0151': 2, u'kyl': 2, u'\xfa\u0165o': 2, u'\u03b9#\u03c4': 2, u'#j\u0159': 2, u'ic\u01ce': 2, u'gtc': 2, u'vtu': 2, u'm\xe1\u017e': 2, u'l\xf3\u017e': 2, u'#+\xb0': 2, u'\xf6v\xe9': 2, u'ng=': 2, u't\u010d\xed': 2, u'yub': 2, u'yue': 2, u'\u010dag': 2, u'uyq': 2, u'po\u0165': 2, u'tr\xe8': 2, u'yh\u0155': 2, u'lcy': 2, u'yat': 2, u'\xf3sk': 2, u'u\xeas': 2, u'\xe9es': 2, u'ptc': 2, u'pt\xfa': 2, u'\u013e\u0161o': 2, u'\xf1al': 2, u'\xf1an': 2, u'\u03c4\u03b7\u03c2': 2, u'rk\xe9': 2, u'\u015flu': 2, u'o\u017ev': 2, u'\u0119k#': 2, u'gpe': 2, u'\xf6ri': 2, u'rtp': 2, u'iej': 2, u'#\u03ba\u03bf': 2, u'rt\xe4': 2, u'uur': 2, u'ty\u0144': 2, u'aaz': 2, u'ie\u0142': 2, u'ie\u010f': 2, u'qas': 2, u'qad': 2, u'qa#': 2, u's\xf8r': 2, u'a\xe7i': 2, u'g\xe1j': 2, u'ylp': 2, u'i\u015fo': 2, u'\u010d#g': 2, u'++i': 2, u'i#\xe9': 2, u'o\u0144s': 2, u'l\xe4r': 2, u'#\u015fa': 2, u'ix\xed': 2, u'z\xe9c': 2, u'z\xe9m': 2, u'z\xfd\u010d': 2, u'o\u0144c': 2, u'si\u013e': 2, u'apb': 2, u'z\u0105c': 2, u'i\xe8c': 2, u'b\xf4\u013e': 2, u'\xfdtk': 2, u'oue': 2, u'ouq': 2, u'ltm': 2, u'\xfazu': 2, u'\xf3#c': 2, u'\xf3#b': 2, u'\xf3#r': 2, u'\xf3gn': 2, u'lt\u010d': 2, u'wha': 2, u'r\xfaz': 2, u'inw': 2, u'jo\xeb': 2, u'#v\xf6': 2, u'jow': 2, u'joj': 2, u'#++': 2, u'm\xe9l': 2, u'm\xe9t': 2, u'fki': 2, u'y\u0144#': 2, u'\xedds': 2, u'\u03ae\u03be\u03b7': 2, u'#m\u011b': 2, u'ndn': 2, u'\xe1t\u010d': 2, u'\u0123ir': 2, u'\u043e\u0439#': 2, u'myi': 2, u'#m\xea': 2, u'##\u03b2': 2, u'xt+': 2, u'y\u0161\xed': 2, u'voe': 2, u'yun': 2, u'm\u0163#': 2, u'\u0165po': 2, u'mdg': 2, u'mdr': 2, u'mds': 2, u'mdu': 2, u'dsy': 2, u'md#': 2, u'xij': 2, u'xix': 2, u'au\xdf': 2, u'at\u013a': 2, u'\xe9p\xf4': 2, u'uy\xe8': 2, u'\xfavz': 2, u'\xfavs': 2, u'\xf4by': 2, u'\xe9pe': 2, u'\xe9pu': 2, u'\u013ese': 2, u'cdt': 2, u'=al': 2, u'up\xfd': 2, u'jkt': 2, u'um\xed': 2, u'\u011b\u0161\xed': 2, u'\u010duc': 2, u'\u010duk': 2, u'umg': 2, u'umc': 2, u'epj': 2, u'vev': 2, u'h\xf3k': 2, u'e\u0142#': 2, u'\xdfa#': 2, u'z\xe1d': 2, u'\u017ev\xe1': 2, u'fpl': 2, u'fpi': 2, u'ttn': 2, u'#\u03b2#': 2, u'\xe1cv': 2, u'hc#': 2, u'\u0151fi': 2, u'saz': 2, u'sax': 2, u'bl\u0161': 2, u'axl': 2, u'd\xfcs': 2, u'u#\xb1': 2, u'\u0119#w': 2, u'\u03cc\u03b5\u03b4': 2, u'xml': 2, u'u#\xe5': 2, u'\xfard': 2, u'n\xedr': 2, u'n\xedp': 2, u'\xe9tm': 2, u'\xe9th': 2, u'\xe9tr': 2, u'u#\u015b': 2, u'gdp': 2, u'ddr': 2, u'acb': 2, u'y\xe8r': 2, u'uib': 2, u'cpi': 2, u'rxa': 2, u'#e\u013e': 2, u'\xedlm': 2, u'\xfcne': 2, u'\xfcns': 2, u'\u0155za': 2, u'#e+': 2, u'xud': 2, u'\u0155#t': 2, u'\u0155#a': 2, u'n\u0163o': 2, u'xul': 2, u'=##': 2, u'kl\xf4': 2, u'n\u0163a': 2, u'ft\xe1': 2, u'r\xe8m': 2, u'it\xe0': 2, u'ftg': 2, u'z\xfd#': 2, u'\u010fas': 2, u'\xe1gi': 2, u'\xe1ge': 2, u'it\u0103': 2, u'bhi': 2, u'\u0171te': 2, u'exj': 2, u'exs': 2, u'xa\u010d': 2, u'pde': 2, u'pdb': 2, u'xap': 2, u'ml+': 2, u'xav': 2, u'xam': 2, u'xac': 2, u'rdw': 2, u'\u0161k\u013e': 2, u'\u0161k\u0155': 2, u'\u015fan': 2, u'\u015fah': 2, u'\u0165ok': 2, u'ag\xed': 2, u'ag\xfa': 2, u'gvo': 2, u'agw': 2, u'rd\xe8': 2, u'le=': 2, u'rdr': 2, u'rdh': 2, u't\xfcm': 2, u'zi\xf3': 2, u'\xf3n\u0161': 2, u'npc': 2, u'rd\u0148': 2, u'vms': 2, u'\u012bks': 2, u'kpa': 2, u'a\u017ad': 2, u'ihh': 2, u'+do': 2, u'ckn': 2, u'l\xf4n': 2, u'btt': 2, u'btv': 2, u'cbi': 2, u'cbo': 2, u'v++': 2, u'et\u0151': 2, u'phr': 2, u'\xdf#k': 2, u'\xdf#m': 2, u'xeq': 2, u'\xdf#a': 2, u'mhl': 2, u'mha': 2, u'eo\u013e': 2, u'u\xf1a': 2, u'\u0165ku': 2, u'\u013eos': 2, u'\xe3os': 2, u'\xe9lo': 2, u'\xe9lu': 2, u'\u0161od': 2, u'\u0161oa': 2, u'la\u015f': 2, u'zeb': 2, u'zed': 2, u'zep': 2, u'zey': 2, u'ntp': 2, u'ntj': 2, u'\u03bf\u03b9\u03bd': 2, u'ntb': 2, u'ze\u015b': 2, u'nbw': 2, u'ktk': 2, u'ypp': 2, u'=pr': 2, u'\u03bf##': 2, u'\u017e\xedd': 2, u'\u017e\xedl': 2, u'd\xf3#': 2, u'ilj': 2, u'\xf4n\xed': 2, u'ep\xe7': 2, u'th\xe1': 2, u'\xf4n#': 2, u'co\u0171': 2, u'epz': 2, u'il\u0161': 2, u'\u0435#k': 2, u'yk\xf3': 2, u'\u03b9\u03bd\u03b9': 2, u'\u03b5ur': 2, u'ykk': 2, u'plz': 2, u'mtf': 2, u'mt\xfd': 2, u'ht\xe1': 2, u'u\xedc': 2, u'szz': 2, u'htr': 2, u'\u03be\u03b7#': 2, u'gh\xfa': 2, u'\xe1nf': 2, u'spk': 2, u'spc': 2, u'spf': 2, u'ek\u0155': 2, u'ghb': 2, u'x#g': 2, u'lmc': 2, u't\xf4\u010d': 2, u'zay': 2, u'\u015f#t': 2, u'\u015f#k': 2, u'\u015f#j': 2, u'\u015f#a': 2, u'\u015f#b': 2, u'b\xedb': 2, u'\xfa\u010ft': 2, u'bsy': 2, u'vus': 2, u'vup': 2, u'#qt': 2, u'\u03b9\u03b1\u03ba': 2, u'\u017e\xe1m': 2, u'uxl': 2, u'#\u017e\xe1': 2, u'#\u017e\xed': 2, u'#\u017e\xfa': 2, u'yt\u013e': 2, u'\u0163oc': 2, u'r\u0148t': 2, u'l\xfcb': 2, u'b#w': 2, u'\xf3tm': 2, u'dwe': 2, u'yok': 2, u'\xdflo': 2, u'\u017e\xfap': 2, u'\u03b3\u03b5\u03b9': 2, u'n\xfdn': 2, u'u\xe9n': 2, u'u\xe9#': 2, u'hh#': 2, u'cyj': 2, u'p\u012bk': 2, u'as\xe9': 2, u'#\u03b9\u03bd': 2, u'li\u0107': 2, u'#rz': 2, u'\u03b7#\u03c4': 2, u'ofn': 2, u'rhv': 2, u'st\xe4': 2, u'pu\u0142': 2, u'n\u010d#': 2, u'\xdf#p': 2, u'oxt': 2, u'a\xfcs': 2, u'gp#': 2, u'wou': 2, u'wod': 2, u'b\xe9k': 2, u'b\xe9a': 2, u'#u\u0148': 2, u'jp\xe4': 2, u'jp\xf3': 2, u'hbe': 2, u'gsv': 2, u'l#\xf4': 2, u'+pv': 2, u'f\u0165#': 2, u'p\xe4i': 2, u'cgr': 2, u'idd': 2, u'u\xdf#': 2, u'\xf4f#': 2, u'z#x': 2, u'v\u01cel': 2, u'kc#': 2, u'\u0161v\xed': 2, u'#h\xf3': 2, u'ycl': 2, u'l+v': 2, u'ulc': 2, u'#hj': 2, u'\xfdhi': 2, u'i#\u03b1': 2, u'n\u015bw': 2, u'#\u03bd\u03b1': 2, u'#\u03c0\u03c1': 2, u'we\u0142': 2, u'p\xfal': 2, u'k\xf3w': 2, u'jze': 2, u'bk#': 2, u'pya': 2, u'pyg': 2, u'\u017eio': 2, u'lu\xf1': 2, u'mga': 2, u'shk': 2, u'e#\u0123': 2, u'ot\xf4': 2, u'zyo': 2, u'zyp': 2, u'=do': 2, u'ie\u0144': 2, u'i\u015f#': 2, u'\xe9s\xfa': 2, u'f\xfa\xe1': 2, u'#\u0123i': 2, u'\u0165ke': 2, u'dm\xfd': 2, u'l\u0151#': 2, u'\xf3la': 2, u'#l\xe4': 2, u'#l\xfc': 2, u'ygh': 2, u'c\xe1n': 2, u'#lj': 2, u'#lb': 2, u'#lr': 2, u'r\xe1u': 2, u'r\xe1y': 2, u'u\xe1d': 2, u'l\xe1g': 2, u'tye': 2, u'tyt': 2, u'baq': 2, u'\u0161oj': 2, u'\u010d#\u0165': 2, u'mcy': 2, u'\u017em\xe1': 2, u'slv': 2, u'\u0142\u0105c': 2, u'xjs': 2, u'o\u0171t': 2, u'wwd': 2, u'wwb': 2, u'wwk': 2, u's\u0142u': 2, u'zu\u010d': 2, u'+xm': 2, u'oo\u010d': 2, u'jhv': 2, u'\xe4dn': 2, u'tnk': 2, u'\u0142a#': 2, u'nyp': 2, u'nyv': 2, u'nyk': 2, u'#\u043e\u0442': 2, u'\xedom': 2, u'\xedoc': 2, u'\u017cak': 2, u'a\u015bn': 2, u'#\u0161\u010d': 2, u'\xf6d\xf6': 2, u'g=p': 2, u'kk\xe4': 2, u'r\xede': 2, u'r\xedo': 2, u'\u03ae\u03b3\u03b5': 2, u'iwe': 2, u'#\u03b5s': 2, u'tu\xed': 2, u'pa\u0142': 2, u'pa\u017a': 2, u'pa\u017e': 2, u'tuu': 2, u'tuf': 2, u'd#\xe9': 2, u'v\xfaq': 2, u'v\xfap': 2, u'qsa': 2, u'bmu': 2, u'a\u017er': 2, u'd#x': 2, u'bm\xe4': 2, u'\u03cc\u03c0\u03c4': 2, u'\u017ea\xed': 2, u'mo\xe1': 2, u'zqu': 2, u'\xfasc': 2, u't\u0151f': 2, u'rr\xed': 2, u'\xe9km': 2, u't\xe4t': 2, u'hyc': 2, u'hyz': 2, u'ggy': 2, u'cs\xe9': 2, u'deq': 2, u'ryf': 2, u'rye': 2, u'de\xf1': 2, u'uhf': 2, u'okw': 2, u'zj#': 2, u'\xf3dm': 2, u'\xfalu': 2, u't\u011b\u0161': 2, u'cfa': 2, u'ko\xed': 2, u'\xf6hl': 2, u'u\u0142a': 2, u'ikb': 2, u'\u03bb\u03ae\u03b3': 2, u'l\xe9e': 2, u'l\xe9o': 2, u'l\xe9k': 2, u'l\xe9s': 2, u'fud': 2, u'fub': 2, u'fuo': 2, u'ful': 2, u'fu\xe1': 2, u'pe\xfa': 2, u'\u0117da': 2, u'pey': 2, u'pew': 2, u'bi\xed': 2, u'bi\xe8': 2, u'\u017ee\u0148': 2, u'usb': 2, u'\xfcte': 2, u'\xfctt': 2, u'sdu': 2, u'c\u0153u': 2, u'lyd': 2, u'lye': 2, u'lyf': 2, u'lyk': 2, u'sd\xe1': 2, u'\u017eez': 2, u'e=r': 2, u'e=e': 2, u'e=a': 2, u'e=o': 2, u'\u0165na': 2, u'm\u0155z': 2, u'em+': 2, u'af\xe1': 2, u'af\xf3': 2, u'p\xf3\u0142': 2, u'afz': 2, u'afp': 2, u'e\u013ed': 2, u're\xfa': 2, u'\xe5sy': 2, u'\xe5sa': 2, u'\u03b1\u03ba\u03cc': 2, u'\u017e#\xb0': 2, u'\u0161\u013ei': 2, u'\u013ata': 2, u'#\u0142u': 2, u'ud\xfd': 2, u'ksy': 2, u'ksm': 2, u'b\u013eo': 2, u'vrl': 2, u'\xfcmt': 2, u'vr\u0148': 2, u'z\xf6n': 2, u'\xe7o#': 2, u'pi\u0148': 2, u'pi\u0144': 2, u'r\xeb#': 2, u'\xe1\u013em': 2, u'#\u0159\xed': 2, u'es\u010d': 2, u'd\xed\xe1': 2, u'\u0107#k': 2, u'd\xedo': 2, u'#gw': 2, u'\u03c1\u03cc\u03b5': 2, u'\xe9ca': 2, u'aj\xf3': 2, u'\xe9cs': 2, u'p#\u0165': 2, u'gok': 2, u'ra\u013e': 2, u'\u0155\u017ee': 2, u'=ep': 2, u'ra\xed': 2, u'lb\xe9': 2, u'\xe4ck': 2, u'+\xb0c': 2, u'thb': 2, u'abk': 2, u'\u0142ub': 2, u'ocp': 2, u'oc+': 2, u'nuu': 2, u'oc\xe9': 2, u'kwe': 2, u'kw#': 2, u'wfd': 2, u'\xf6pe': 2, u'bd\u017e': 2, u'u\u010fa': 2, u'cns': 2, u'm\xe3o': 2, u'\xe1nr': 2, u'r\u0161m': 2, u'a\u0161\u013e': 2, u'nnh': 2, u'\xf3so': 2, u'ti+': 2, u'bq#': 2, u'pma': 2, u'msb': 2, u'\u0161\u0148a': 2, u'd\xe9l': 2, u'hu\u010d': 2, u'\u0165\u0165#': 2, u't\xfaz': 2, u't\xfab': 2, u'k#y': 2, u'ejf': 2, u'\u0165fi': 2, u'ejz': 2, u'znz': 2, u'e\xdfa': 2, u'an\xf3': 2, u'huf': 2, u'hup': 2, u'hux': 2, u'prm': 2, u'\u0161ri': 2, u'gki': 2, u'an+': 2, u'dyz': 2, u'ssz': 2, u'sss': 2, u'dya': 2, u'e\xfai': 2, u'=en': 2, u'\u0103#p': 2, u'\u03c4\u03b5\u03c4': 2, u'\xe4#\xed': 2, u'\u0148\u017ee': 2, u'r\xf4c': 2, u'cbe': 2, u'\u03b4\u03b9\u03b1': 2, u'igv': 2, u'igd': 2, u'#p\u012b': 2, u'\u010dot': 2, u'\u010doj': 2, u'faa': 2, u'fag': 2, u'r#x': 2, u'r#y': 2, u'nbs': 2, u'nbl': 2, u'r#\xf3': 2, u'r#\xe9': 2, u'rmd': 2, u'r#\u015f': 2, u'#v\u01ce': 2, u'rmh': 2, u'iz\xfd': 2, u'yng': 2, u'guk': 2, u'izc': 2, u'ef\u0165': 2, u'wri': 2, u'hm\xe9': 2, u'i\u013ej': 2, u'iz\u0148': 2, u'r\u010fa': 2, u'ef=': 2, u'hif': 2, u'pvd': 2, u'ri\u0146': 2, u'pv+': 2, u'\u013edi': 2, u'ri\xfa': 2, u'duz': 2, u'ac\u0103': 2, u'\xfdvn': 2, u'\u03b5s#': 2, u'zz\xfa': 2, u'\u01cele': 2, u'nd\xfa': 2, u'#m\u013a': 2, u'#t\u011b': 2, u'zzi': 2, u'wnb': 2, u'wni': 2, u'\u03bd\u03b1\u03c4': 2, u'#tp': 2, u'\xeas#': 2, u'i\u010dr': 2, u'\xfdr#': 2, u'cay': 2, u'umt': 2, u'rrl': 2, u'cfu': 2, u'ta\u0144': 2, u'ta\u013e': 2, u'vox': 2, u'\xe1vm': 2, u'few': 2, u'fey': 2, u'\xf3ka': 2, u'tpo': 2, u'kb#': 2, u'h#y': 2, u'#kc': 2, u'ybd': 2, u'd\xe1d': 2, u'h\xfd\u0148': 2, u'yb\xf4': 2, u'eb\xe9': 2, u'wst': 2, u'av\u0148': 2, u'lvl': 2, u'n\xf6m': 2, u't+#': 2, u'bbb': 2, u'\u03bb\u03ae\u03be': 2, u't+p': 2, u'avv': 2, u'pzf': 2, u'j\xfab': 2, u'tjo': 2, u'eb\u010d': 2, u'mfe': 2, u'mff': 2, u'mfr': 2, u'\u0142#z': 2, u'l\xe9a': 2, u'lvy': 2, u'cfi': 2, })
dmort27/pylid
pylid/langs/sk.py
Python
mit
317,125
[ "ASE", "BWA", "EPW", "Elk", "MOE" ]
7f144f22d9f5695c2e23be6ef7d1cba4e4ee30ad7c2c77d7b4cc533514d12179
# -*- coding: utf-8 -*- """ Regression tests for the Test Client, especially the customized assertions. """ from __future__ import unicode_literals import os from django.core.urlresolvers import reverse from django.template import (TemplateSyntaxError, Context, Template, loader) import django.template.context from django.test import Client, TestCase from django.test.client import encode_file, RequestFactory from django.test.utils import ContextList, override_settings, str_prefix from django.template.response import SimpleTemplateResponse from django.utils._os import upath from django.utils.translation import ugettext_lazy from django.http import HttpResponse from .views import CustomTestException @override_settings( TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),) ) class AssertContainsTests(TestCase): def test_contains(self): "Responses can be inspected for content, including counting repeated substrings" response = self.client.get('/test_client_regress/no_template_view/') self.assertNotContains(response, 'never') self.assertContains(response, 'never', 0) self.assertContains(response, 'once') self.assertContains(response, 'once', 1) self.assertContains(response, 'twice') self.assertContains(response, 'twice', 2) try: self.assertContains(response, 'text', status_code=999) except AssertionError as e: self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertContains(response, 'text', status_code=999, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'text', status_code=999) except AssertionError as e: self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'once') except AssertionError as e: self.assertIn("Response should not contain 'once'", str(e)) try: self.assertNotContains(response, 'once', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response should not contain 'once'", str(e)) try: self.assertContains(response, 'never', 1) except AssertionError as e: self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e)) try: self.assertContains(response, 'never', 1, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e)) try: self.assertContains(response, 'once', 0) except AssertionError as e: self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e)) try: self.assertContains(response, 'once', 0, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e)) try: self.assertContains(response, 'once', 2) except AssertionError as e: self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e)) try: self.assertContains(response, 'once', 2, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e)) try: self.assertContains(response, 'twice', 1) except AssertionError as e: self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e)) try: self.assertContains(response, 'twice', 1, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e)) try: self.assertContains(response, 'thrice') except AssertionError as e: self.assertIn("Couldn't find 'thrice' in response", str(e)) try: self.assertContains(response, 'thrice', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't find 'thrice' in response", str(e)) try: self.assertContains(response, 'thrice', 3) except AssertionError as e: self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e)) try: self.assertContains(response, 'thrice', 3, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e)) def test_unicode_contains(self): "Unicode characters can be found in template context" #Regression test for #10183 r = self.client.get('/test_client_regress/check_unicode/') self.assertContains(r, 'さかき') self.assertContains(r, b'\xe5\xb3\xa0'.decode('utf-8')) def test_unicode_not_contains(self): "Unicode characters can be searched for, and not found in template context" #Regression test for #10183 r = self.client.get('/test_client_regress/check_unicode/') self.assertNotContains(r, 'はたけ') self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8')) def test_binary_contains(self): r = self.client.get('/test_client_regress/check_binary/') self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e') with self.assertRaises(AssertionError): self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e', count=2) def test_binary_not_contains(self): r = self.client.get('/test_client_regress/check_binary/') self.assertNotContains(r, b'%ODF-1.4\r\n%\x93\x8c\x8b\x9e') with self.assertRaises(AssertionError): self.assertNotContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e') def test_nontext_contains(self): r = self.client.get('/test_client_regress/no_template_view/') self.assertContains(r, ugettext_lazy('once')) def test_nontext_not_contains(self): r = self.client.get('/test_client_regress/no_template_view/') self.assertNotContains(r, ugettext_lazy('never')) def test_assert_contains_renders_template_response(self): """ Test that we can pass in an unrendered SimpleTemplateReponse without throwing an error. Refs #15826. """ response = SimpleTemplateResponse(Template('Hello'), status=200) self.assertContains(response, 'Hello') def test_assert_contains_using_non_template_response(self): """ Test that auto-rendering does not affect responses that aren't instances (or subclasses) of SimpleTemplateResponse. Refs #15826. """ response = HttpResponse('Hello') self.assertContains(response, 'Hello') def test_assert_not_contains_renders_template_response(self): """ Test that we can pass in an unrendered SimpleTemplateReponse without throwing an error. Refs #15826. """ response = SimpleTemplateResponse(Template('Hello'), status=200) self.assertNotContains(response, 'Bye') def test_assert_not_contains_using_non_template_response(self): """ Test that auto-rendering does not affect responses that aren't instances (or subclasses) of SimpleTemplateResponse. Refs #15826. """ response = HttpResponse('Hello') self.assertNotContains(response, 'Bye') @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class AssertTemplateUsedTests(TestCase): fixtures = ['testdata.json'] def test_no_context(self): "Template usage assertions work then templates aren't in use" response = self.client.get('/test_client_regress/no_template_view/') # Check that the no template case doesn't mess with the template assertions self.assertTemplateNotUsed(response, 'GET Template') try: self.assertTemplateUsed(response, 'GET Template') except AssertionError as e: self.assertIn("No templates used to render the response", str(e)) try: self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: No templates used to render the response", str(e)) def test_single_context(self): "Template assertions work when there is a single context" response = self.client.get('/test_client/post_view/', {}) try: self.assertTemplateNotUsed(response, 'Empty GET Template') except AssertionError as e: self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e)) try: self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e)) try: self.assertTemplateUsed(response, 'Empty POST Template') except AssertionError as e: self.assertIn("Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e)) try: self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e)) def test_multiple_context(self): "Template assertions work when there are multiple contexts" post_data = { 'text': 'Hello World', 'email': 'foo@example.com', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view_with_template/', post_data) self.assertContains(response, 'POST data OK') try: self.assertTemplateNotUsed(response, "form_view.html") except AssertionError as e: self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e)) try: self.assertTemplateNotUsed(response, 'base.html') except AssertionError as e: self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e)) try: self.assertTemplateUsed(response, "Valid POST Template") except AssertionError as e: self.assertIn("Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html", str(e)) class AssertRedirectsTests(TestCase): def test_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/test_client/permanent_redirect_view/') try: self.assertRedirects(response, '/test_client/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) try: self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) def test_lost_query(self): "An assertion is raised if the redirect location doesn't preserve GET parameters" response = self.client.get('/test_client/redirect_view/', {'var': 'value'}) try: self.assertRedirects(response, '/test_client/get_view/') except AssertionError as e: self.assertIn("Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e)) try: self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e)) def test_incorrect_target(self): "An assertion is raised if the response redirects to another target" response = self.client.get('/test_client/permanent_redirect_view/') try: # Should redirect to get_view self.assertRedirects(response, '/test_client/some_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) def test_target_page(self): "An assertion is raised if the response redirect target cannot be retrieved as expected" response = self.client.get('/test_client/double_redirect_view/') try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/') except AssertionError as e: self.assertIn("Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e)) try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e)) def test_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', status_code=301, target_status_code=200) self.assertEqual(len(response.redirect_chain), 1) self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301)) def test_multiple_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get('/test_client_regress/redirects/', {}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', status_code=301, target_status_code=200) self.assertEqual(len(response.redirect_chain), 3) self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301)) self.assertEqual(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301)) self.assertEqual(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301)) def test_redirect_chain_to_non_existent(self): "You can follow a chain to a non-existent view" response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True) self.assertRedirects(response, '/test_client_regress/non_existent_view/', status_code=301, target_status_code=404) def test_redirect_chain_to_self(self): "Redirections to self are caught and escaped" response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True) # The chain of redirects stops once the cycle is detected. self.assertRedirects(response, '/test_client_regress/redirect_to_self/', status_code=301, target_status_code=301) self.assertEqual(len(response.redirect_chain), 2) def test_circular_redirect(self): "Circular redirect chains are caught and escaped" response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True) # The chain of redirects will get back to the starting point, but stop there. self.assertRedirects(response, '/test_client_regress/circular_redirect_2/', status_code=301, target_status_code=301) self.assertEqual(len(response.redirect_chain), 4) def test_redirect_chain_post(self): "A redirect chain will be followed from an initial POST post" response = self.client.post('/test_client_regress/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_head(self): "A redirect chain will be followed from an initial HEAD request" response = self.client.head('/test_client_regress/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_options(self): "A redirect chain will be followed from an initial OPTIONS request" response = self.client.options('/test_client_regress/redirects/', follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_put(self): "A redirect chain will be followed from an initial PUT request" response = self.client.put('/test_client_regress/redirects/', follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_delete(self): "A redirect chain will be followed from an initial DELETE request" response = self.client.delete('/test_client_regress/redirects/', follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_to_different_host(self): "The test client will preserve scheme, host and port changes" response = self.client.get('/test_client_regress/redirect_other_host/', follow=True) self.assertRedirects(response, 'https://otherserver:8443/test_client_regress/no_template_view/', status_code=301, target_status_code=200) # We can't use is_secure() or get_host() # because response.request is a dictionary, not an HttpRequest self.assertEqual(response.request.get('wsgi.url_scheme'), 'https') self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver') self.assertEqual(response.request.get('SERVER_PORT'), '8443') def test_redirect_chain_on_non_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/test_client/get_view/', follow=True) try: self.assertRedirects(response, '/test_client/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) try: self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) def test_redirect_on_non_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/test_client/get_view/') try: self.assertRedirects(response, '/test_client/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) try: self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) class AssertFormErrorTests(TestCase): def test_unknown_form(self): "An assertion is raised if the form name is unknown" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.') except AssertionError as e: self.assertIn("The form 'wrong_form' was not used to render the response", str(e)) try: self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e)) def test_unknown_field(self): "An assertion is raised if the field name is unknown" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'some_field', 'Some error.') except AssertionError as e: self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e)) try: self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e)) def test_noerror_field(self): "An assertion is raised if the field doesn't have any errors" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'value', 'Some error.') except AssertionError as e: self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e)) try: self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e)) def test_unknown_error(self): "An assertion is raised if the field doesn't contain the provided error" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'email', 'Some error.') except AssertionError as e: self.assertIn(str_prefix("The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e)) try: self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn(str_prefix("abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e)) def test_unknown_nonfield_error(self): """ Checks that an assertion is raised if the form's non field errors doesn't contain the provided error. """ post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', None, 'Some error.') except AssertionError as e: self.assertIn("The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e)) try: self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e)) class AssertFormsetErrorTests(TestCase): msg_prefixes = [("", {}), ("abc: ", {"msg_prefix": "abc"})] def setUp(self): """Makes response object for testing field and non-field errors""" # For testing field and non-field errors self.response_form_errors = self.getResponse({ 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '2', 'form-0-text': 'Raise non-field error', 'form-0-email': 'not an email address', 'form-0-value': 37, 'form-0-single': 'b', 'form-0-multi': ('b','c','e'), 'form-1-text': 'Hello World', 'form-1-email': 'email@domain.com', 'form-1-value': 37, 'form-1-single': 'b', 'form-1-multi': ('b','c','e'), }) # For testing non-form errors self.response_nonform_errors = self.getResponse({ 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '2', 'form-0-text': 'Hello World', 'form-0-email': 'email@domain.com', 'form-0-value': 37, 'form-0-single': 'b', 'form-0-multi': ('b','c','e'), 'form-1-text': 'Hello World', 'form-1-email': 'email@domain.com', 'form-1-value': 37, 'form-1-single': 'b', 'form-1-multi': ('b','c','e'), }) def getResponse(self, post_data): response = self.client.post('/test_client/formset_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") return response def test_unknown_formset(self): "An assertion is raised if the formset name is unknown" for prefix, kwargs in self.msg_prefixes: with self.assertRaises(AssertionError) as cm: self.assertFormsetError(self.response_form_errors, 'wrong_formset', 0, 'Some_field', 'Some error.', **kwargs) self.assertIn(prefix + "The formset 'wrong_formset' was not " "used to render the response", str(cm.exception)) def test_unknown_field(self): "An assertion is raised if the field name is unknown" for prefix, kwargs in self.msg_prefixes: with self.assertRaises(AssertionError) as cm: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'Some_field', 'Some error.', **kwargs) self.assertIn(prefix + "The formset 'my_formset', " "form 0 in context 0 " "does not contain the field 'Some_field'", str(cm.exception)) def test_no_error_field(self): "An assertion is raised if the field doesn't have any errors" for prefix, kwargs in self.msg_prefixes: with self.assertRaises(AssertionError) as cm: self.assertFormsetError(self.response_form_errors, 'my_formset', 1, 'value', 'Some error.', **kwargs) self.assertIn(prefix + "The field 'value' " "on formset 'my_formset', form 1 " "in context 0 contains no errors", str(cm.exception)) def test_unknown_error(self): "An assertion is raised if the field doesn't contain the specified error" for prefix, kwargs in self.msg_prefixes: with self.assertRaises(AssertionError) as cm: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'email', 'Some error.', **kwargs) self.assertIn(str_prefix(prefix + "The field 'email' " "on formset 'my_formset', form 0 in context 0 does not " "contain the error 'Some error.' (actual errors: " "[%(_)s'Enter a valid email address.'])"), str(cm.exception)) def test_field_error(self): "No assertion is raised if the field contains the provided error" for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'email', ['Enter a valid email address.'], **kwargs) def test_no_nonfield_error(self): "An assertion is raised if the formsets non-field errors doesn't contain any errors." for prefix, kwargs in self.msg_prefixes: with self.assertRaises(AssertionError) as cm: self.assertFormsetError(self.response_form_errors, 'my_formset', 1, None, 'Some error.', **kwargs) self.assertIn(prefix + "The formset 'my_formset', form 1 in " "context 0 does not contain any " "non-field errors.", str(cm.exception)) def test_unknown_nonfield_error(self): "An assertion is raised if the formsets non-field errors doesn't contain the provided error." for prefix, kwargs in self.msg_prefixes: with self.assertRaises(AssertionError) as cm: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, None, 'Some error.', **kwargs) self.assertIn(str_prefix(prefix + "The formset 'my_formset', form 0 in context 0 does not " "contain the non-field error 'Some error.' (actual errors: " "[%(_)s'Non-field error.'])"), str(cm.exception)) def test_nonfield_error(self): "No assertion is raised if the formsets non-field errors contains the provided error." for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, None, 'Non-field error.', **kwargs) def test_no_nonform_error(self): "An assertion is raised if the formsets non-form errors doesn't contain any errors." for prefix, kwargs in self.msg_prefixes: with self.assertRaises(AssertionError) as cm: self.assertFormsetError(self.response_form_errors, 'my_formset', None, None, 'Some error.', **kwargs) self.assertIn(prefix + "The formset 'my_formset' in context 0 " "does not contain any non-form errors.", str(cm.exception)) def test_unknown_nonform_error(self): "An assertion is raised if the formsets non-form errors doesn't contain the provided error." for prefix, kwargs in self.msg_prefixes: with self.assertRaises(AssertionError) as cm: self.assertFormsetError(self.response_nonform_errors, 'my_formset', None, None, 'Some error.', **kwargs) self.assertIn(str_prefix(prefix + "The formset 'my_formset' in context 0 does not contain the " "non-form error 'Some error.' (actual errors: [%(_)s'Forms " "in a set must have distinct email addresses.'])"), str(cm.exception)) def test_nonform_error(self): "No assertion is raised if the formsets non-form errors contains the provided error." for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_nonform_errors, 'my_formset', None, None, 'Forms in a set must have distinct email ' 'addresses.', **kwargs) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class LoginTests(TestCase): fixtures = ['testdata'] def test_login_different_client(self): "Check that using a different test client doesn't violate authentication" # Create a second client, and log in. c = Client() login = c.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Get a redirection page with the second client. response = c.get("/test_client_regress/login_protected_redirect_view/") # At this points, the self.client isn't logged in. # Check that assertRedirects uses the original client, not the # default client. self.assertRedirects(response, "http://testserver/test_client_regress/get_view/") @override_settings( PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',), SESSION_ENGINE='test_client_regress.session' ) class SessionEngineTests(TestCase): fixtures = ['testdata'] def test_login(self): "A session engine that modifies the session key can be used to log in" login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Try to access a login protected page. response = self.client.get("/test_client/login_protected_view/") self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') class URLEscapingTests(TestCase): def test_simple_argument_get(self): "Get a view that has a simple string argument" response = self.client.get(reverse('arg_view', args=['Slartibartfast'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Howdy, Slartibartfast') def test_argument_with_space_get(self): "Get a view that has a string argument that requires escaping" response = self.client.get(reverse('arg_view', args=['Arthur Dent'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Hi, Arthur') def test_simple_argument_post(self): "Post for a view that has a simple string argument" response = self.client.post(reverse('arg_view', args=['Slartibartfast'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Howdy, Slartibartfast') def test_argument_with_space_post(self): "Post for a view that has a string argument that requires escaping" response = self.client.post(reverse('arg_view', args=['Arthur Dent'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Hi, Arthur') @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class ExceptionTests(TestCase): fixtures = ['testdata.json'] def test_exception_cleared(self): "#5836 - A stale user exception isn't re-raised by the test client." login = self.client.login(username='testclient',password='password') self.assertTrue(login, 'Could not log in') try: response = self.client.get("/test_client_regress/staff_only/") self.fail("General users should not be able to visit this page") except CustomTestException: pass # At this point, an exception has been raised, and should be cleared. # This next operation should be successful; if it isn't we have a problem. login = self.client.login(username='staff', password='password') self.assertTrue(login, 'Could not log in') try: self.client.get("/test_client_regress/staff_only/") except CustomTestException: self.fail("Staff should be able to visit this page") class TemplateExceptionTests(TestCase): def setUp(self): # Reset the loaders so they don't try to render cached templates. if loader.template_source_loaders is not None: for template_loader in loader.template_source_loaders: if hasattr(template_loader, 'reset'): template_loader.reset() @override_settings( TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'bad_templates'),) ) def test_bad_404_template(self): "Errors found when rendering 404 error templates are re-raised" try: response = self.client.get("/no_such_view/") self.fail("Should get error about syntax error in template") except TemplateSyntaxError: pass # We need two different tests to check URLconf substitution - one to check # it was changed, and another one (without self.urls) to check it was reverted on # teardown. This pair of tests relies upon the alphabetical ordering of test execution. class UrlconfSubstitutionTests(TestCase): urls = 'test_client_regress.urls' def test_urlconf_was_changed(self): "TestCase can enforce a custom URLconf on a per-test basis" url = reverse('arg_view', args=['somename']) self.assertEqual(url, '/arg_view/somename/') # This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the # name is to ensure alphabetical ordering. class zzUrlconfSubstitutionTests(TestCase): def test_urlconf_was_reverted(self): "URLconf is reverted to original value after modification in a TestCase" url = reverse('arg_view', args=['somename']) self.assertEqual(url, '/test_client_regress/arg_view/somename/') @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class ContextTests(TestCase): fixtures = ['testdata'] def test_single_context(self): "Context variables can be retrieved from a single context" response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'}) self.assertEqual(response.context.__class__, Context) self.assertTrue('get-foo' in response.context) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['request-foo'], 'whiz') self.assertEqual(response.context['data'], 'sausage') try: response.context['does-not-exist'] self.fail('Should not be able to retrieve non-existent key') except KeyError as e: self.assertEqual(e.args[0], 'does-not-exist') def test_inherited_context(self): "Context variables can be retrieved from a list of contexts" response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'}) self.assertEqual(response.context.__class__, ContextList) self.assertEqual(len(response.context), 2) self.assertTrue('get-foo' in response.context) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['request-foo'], 'whiz') self.assertEqual(response.context['data'], 'bacon') try: response.context['does-not-exist'] self.fail('Should not be able to retrieve non-existent key') except KeyError as e: self.assertEqual(e.args[0], 'does-not-exist') def test_contextlist_keys(self): c1 = Context() c1.update({'hello': 'world', 'goodbye': 'john'}) c1.update({'hello': 'dolly', 'dolly': 'parton'}) c2 = Context() c2.update({'goodbye': 'world', 'python': 'rocks'}) c2.update({'goodbye': 'dolly'}) l = ContextList([c1, c2]) # None, True and False are builtins of BaseContext, and present # in every Context without needing to be added. self.assertEqual(set(['None', 'True', 'False', 'hello', 'goodbye', 'python', 'dolly']), l.keys()) def test_15368(self): # Need to insert a context processor that assumes certain things about # the request instance. This triggers a bug caused by some ways of # copying RequestContext. try: django.template.context._standard_context_processors = (lambda request: {'path': request.special_path},) response = self.client.get("/test_client_regress/request_context_view/") self.assertContains(response, 'Path: /test_client_regress/request_context_view/') finally: django.template.context._standard_context_processors = None @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class SessionTests(TestCase): fixtures = ['testdata.json'] def test_session(self): "The session isn't lost if a user logs in" # The session doesn't exist to start. response = self.client.get('/test_client_regress/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'NO') # This request sets a session variable. response = self.client.get('/test_client_regress/set_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'set_session') # Check that the session has been modified response = self.client.get('/test_client_regress/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'YES') # Log in login = self.client.login(username='testclient',password='password') self.assertTrue(login, 'Could not log in') # Session should still contain the modified value response = self.client.get('/test_client_regress/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'YES') def test_logout(self): """Logout should work whether the user is logged in or not (#9978).""" self.client.logout() login = self.client.login(username='testclient',password='password') self.assertTrue(login, 'Could not log in') self.client.logout() self.client.logout() class RequestMethodTests(TestCase): def test_get(self): "Request a view via request method GET" response = self.client.get('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: GET') def test_post(self): "Request a view via request method POST" response = self.client.post('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: POST') def test_head(self): "Request a view via request method HEAD" response = self.client.head('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) # A HEAD request doesn't return any content. self.assertNotEqual(response.content, b'request method: HEAD') self.assertEqual(response.content, b'') def test_options(self): "Request a view via request method OPTIONS" response = self.client.options('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: OPTIONS') def test_put(self): "Request a view via request method PUT" response = self.client.put('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PUT') def test_delete(self): "Request a view via request method DELETE" response = self.client.delete('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: DELETE') def test_patch(self): "Request a view via request method PATCH" response = self.client.patch('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PATCH') class RequestMethodStringDataTests(TestCase): def test_post(self): "Request a view with string data via request method POST" # Regression test for #11371 data = '{"test": "json"}' response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: POST') def test_put(self): "Request a view with string data via request method PUT" # Regression test for #11371 data = '{"test": "json"}' response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PUT') def test_patch(self): "Request a view with string data via request method PATCH" # Regression test for #17797 data = '{"test": "json"}' response = self.client.patch('/test_client_regress/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PATCH') class QueryStringTests(TestCase): def test_get_like_requests(self): # See: https://code.djangoproject.com/ticket/10571. for method_name in ('get', 'head'): # A GET-like request can pass a query string as data method = getattr(self.client, method_name) response = method("/test_client_regress/request_data/", data={'foo':'whiz'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['request-foo'], 'whiz') # A GET-like request can pass a query string as part of the URL response = method("/test_client_regress/request_data/?foo=whiz") self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['request-foo'], 'whiz') # Data provided in the URL to a GET-like request is overridden by actual form data response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'}) self.assertEqual(response.context['get-foo'], 'bang') self.assertEqual(response.context['request-foo'], 'bang') response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'}) self.assertEqual(response.context['get-foo'], None) self.assertEqual(response.context['get-bar'], 'bang') self.assertEqual(response.context['request-foo'], None) self.assertEqual(response.context['request-bar'], 'bang') def test_post_like_requests(self): # A POST-like request can pass a query string as data response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'}) self.assertEqual(response.context['get-foo'], None) self.assertEqual(response.context['post-foo'], 'whiz') # A POST-like request can pass a query string as part of the URL response = self.client.post("/test_client_regress/request_data/?foo=whiz") self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['post-foo'], None) self.assertEqual(response.context['request-foo'], 'whiz') # POST data provided in the URL augments actual form data response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['post-foo'], 'bang') self.assertEqual(response.context['request-foo'], 'bang') response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['get-bar'], None) self.assertEqual(response.context['post-foo'], None) self.assertEqual(response.context['post-bar'], 'bang') self.assertEqual(response.context['request-foo'], 'whiz') self.assertEqual(response.context['request-bar'], 'bang') class UnicodePayloadTests(TestCase): def test_simple_unicode_payload(self): "A simple ASCII-only unicode JSON document can be POSTed" # Regression test for #10571 json = '{"english": "mountain pass"}' response = self.client.post("/test_client_regress/parse_unicode_json/", json, content_type="application/json") self.assertEqual(response.content, json.encode()) def test_unicode_payload_utf8(self): "A non-ASCII unicode data encoded as UTF-8 can be POSTed" # Regression test for #10571 json = '{"dog": "собака"}' response = self.client.post("/test_client_regress/parse_unicode_json/", json, content_type="application/json; charset=utf-8") self.assertEqual(response.content, json.encode('utf-8')) def test_unicode_payload_utf16(self): "A non-ASCII unicode data encoded as UTF-16 can be POSTed" # Regression test for #10571 json = '{"dog": "собака"}' response = self.client.post("/test_client_regress/parse_unicode_json/", json, content_type="application/json; charset=utf-16") self.assertEqual(response.content, json.encode('utf-16')) def test_unicode_payload_non_utf(self): "A non-ASCII unicode data as a non-UTF based encoding can be POSTed" #Regression test for #10571 json = '{"dog": "собака"}' response = self.client.post("/test_client_regress/parse_unicode_json/", json, content_type="application/json; charset=koi8-r") self.assertEqual(response.content, json.encode('koi8-r')) class DummyFile(object): def __init__(self, filename): self.name = filename def read(self): return b'TEST_FILE_CONTENT' class UploadedFileEncodingTest(TestCase): def test_file_encoding(self): encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin')) self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0]) self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1]) self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1]) def test_guesses_content_type_on_file_encoding(self): self.assertEqual(b'Content-Type: application/octet-stream', encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2]) self.assertEqual(b'Content-Type: text/plain', encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2]) self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], ( b'Content-Type: application/x-compress', b'Content-Type: application/x-zip', b'Content-Type: application/x-zip-compressed', b'Content-Type: application/zip',)) self.assertEqual(b'Content-Type: application/octet-stream', encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2]) class RequestHeadersTest(TestCase): def test_client_headers(self): "A test client can receive custom headers" response = self.client.get("/test_client_regress/check_headers/", HTTP_X_ARG_CHECK='Testing 123') self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123") self.assertEqual(response.status_code, 200) def test_client_headers_redirect(self): "Test client headers are preserved through redirects" response = self.client.get("/test_client_regress/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123') self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123") self.assertRedirects(response, '/test_client_regress/check_headers/', status_code=301, target_status_code=200) class ReadLimitedStreamTest(TestCase): """ Tests that ensure that HttpRequest.body, HttpRequest.read() and HttpRequest.read(BUFFER) have proper LimitedStream behavior. Refs #14753, #15785 """ def test_body_from_empty_request(self): """HttpRequest.body on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/test_client_regress/body/").content, b'') def test_read_from_empty_request(self): """HttpRequest.read() on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/test_client_regress/read_all/").content, b'') def test_read_numbytes_from_empty_request(self): """HttpRequest.read(LARGE_BUFFER) on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/test_client_regress/read_buffer/").content, b'') def test_read_from_nonempty_request(self): """HttpRequest.read() on a test client PUT request with some payload should return that payload.""" payload = b'foobar' self.assertEqual(self.client.put("/test_client_regress/read_all/", data=payload, content_type='text/plain').content, payload) def test_read_numbytes_from_nonempty_request(self): """HttpRequest.read(LARGE_BUFFER) on a test client PUT request with some payload should return that payload.""" payload = b'foobar' self.assertEqual(self.client.put("/test_client_regress/read_buffer/", data=payload, content_type='text/plain').content, payload) class RequestFactoryStateTest(TestCase): """Regression tests for #15929.""" # These tests are checking that certain middleware don't change certain # global state. Alternatively, from the point of view of a test, they are # ensuring test isolation behavior. So, unusually, it doesn't make sense to # run the tests individually, and if any are failing it is confusing to run # them with any other set of tests. def common_test_that_should_always_pass(self): request = RequestFactory().get('/') request.session = {} self.assertFalse(hasattr(request, 'user')) def test_request(self): self.common_test_that_should_always_pass() def test_request_after_client(self): # apart from the next line the three tests are identical self.client.get('/') self.common_test_that_should_always_pass() def test_request_after_client_2(self): # This test is executed after the previous one self.common_test_that_should_always_pass() class RequestFactoryEnvironmentTests(TestCase): """ Regression tests for #8551 and #17067: ensure that environment variables are set correctly in RequestFactory. """ def test_should_set_correct_env_variables(self): request = RequestFactory().get('/path/') self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1') self.assertEqual(request.META.get('SERVER_NAME'), 'testserver') self.assertEqual(request.META.get('SERVER_PORT'), '80') self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1') self.assertEqual(request.META.get('SCRIPT_NAME') + request.META.get('PATH_INFO'), '/path/')
mdj2/django
tests/test_client_regress/tests.py
Python
bsd-3-clause
59,629
[ "VisIt" ]
394f1ae58db4f5b812d8b8a78ec557c4802fe9376e8920752b55a65e0cdff4ce
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import libcst as cst import pathlib import sys from typing import (Any, Callable, Dict, List, Sequence, Tuple) def partition( predicate: Callable[[Any], bool], iterator: Sequence[Any] ) -> Tuple[List[Any], List[Any]]: """A stable, out-of-place partition.""" results = ([], []) for i in iterator: results[int(predicate(i))].append(i) # Returns trueList, falseList return results[1], results[0] class secretmanagerCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'access_secret_version': ('name', ), 'add_secret_version': ('parent', 'payload', ), 'create_secret': ('parent', 'secret_id', 'secret', ), 'delete_secret': ('name', ), 'destroy_secret_version': ('name', ), 'disable_secret_version': ('name', ), 'enable_secret_version': ('name', ), 'get_iam_policy': ('resource', 'options', ), 'get_secret': ('name', ), 'get_secret_version': ('name', ), 'list_secrets': ('parent', 'page_size', 'page_token', ), 'list_secret_versions': ('parent', 'page_size', 'page_token', ), 'set_iam_policy': ('resource', 'policy', ), 'test_iam_permissions': ('resource', 'permissions', ), 'update_secret': ('secret', 'update_mask', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: try: key = original.func.attr.value kword_params = self.METHOD_TO_PARAMS[key] except (AttributeError, KeyError): # Either not a method from the API or too convoluted to be sure. return updated # If the existing code is valid, keyword args come after positional args. # Therefore, all positional args must map to the first parameters. args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) if any(k.keyword.value == "request" for k in kwargs): # We've already fixed this file, don't fix it again. return updated kwargs, ctrl_kwargs = partition( lambda a: not a.keyword.value in self.CTRL_PARAMS, kwargs ) args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) request_arg = cst.Arg( value=cst.Dict([ cst.DictElement( cst.SimpleString("'{}'".format(name)), cst.Element(value=arg.value) ) # Note: the args + kwargs looks silly, but keep in mind that # the control parameters had to be stripped out, and that # those could have been passed positionally or by keyword. for name, arg in zip(kword_params, args + kwargs)]), keyword=cst.Name("request") ) return updated.with_changes( args=[request_arg] + ctrl_kwargs ) def fix_files( in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=secretmanagerCallTransformer(), ): """Duplicate the input dir to the output dir, fixing file method calls. Preconditions: * in_dir is a real directory * out_dir is a real, empty directory """ pyfile_gen = ( pathlib.Path(os.path.join(root, f)) for root, _, files in os.walk(in_dir) for f in files if os.path.splitext(f)[1] == ".py" ) for fpath in pyfile_gen: with open(fpath, 'r') as f: src = f.read() # Parse the code and insert method call fixes. tree = cst.parse_module(src) updated = tree.visit(transformer) # Create the path and directory structure for the new file. updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) updated_path.parent.mkdir(parents=True, exist_ok=True) # Generate the updated source file at the corresponding path. with open(updated_path, 'w') as f: f.write(updated.code) if __name__ == '__main__': parser = argparse.ArgumentParser( description="""Fix up source that uses the secretmanager client library. The existing sources are NOT overwritten but are copied to output_dir with changes made. Note: This tool operates at a best-effort level at converting positional parameters in client method calls to keyword based parameters. Cases where it WILL FAIL include A) * or ** expansion in a method call. B) Calls via function or method alias (includes free function calls) C) Indirect or dispatched calls (e.g. the method is looked up dynamically) These all constitute false negatives. The tool will also detect false positives when an API method shares a name with another method. """) parser.add_argument( '-d', '--input-directory', required=True, dest='input_dir', help='the input directory to walk for python files to fix up', ) parser.add_argument( '-o', '--output-directory', required=True, dest='output_dir', help='the directory to output files fixed via un-flattening', ) args = parser.parse_args() input_dir = pathlib.Path(args.input_dir) output_dir = pathlib.Path(args.output_dir) if not input_dir.is_dir(): print( f"input directory '{input_dir}' does not exist or is not a directory", file=sys.stderr, ) sys.exit(-1) if not output_dir.is_dir(): print( f"output directory '{output_dir}' does not exist or is not a directory", file=sys.stderr, ) sys.exit(-1) if os.listdir(output_dir): print( f"output directory '{output_dir}' is not empty", file=sys.stderr, ) sys.exit(-1) fix_files(input_dir, output_dir)
googleapis/python-secret-manager
scripts/fixup_secretmanager_v1beta1_keywords.py
Python
apache-2.0
6,654
[ "VisIt" ]
6867f1dfa7c5caa8a97bb169a0fa4ab6c0e04cb1564187c9f5d181332feb8b6b
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from hypothesis import given, assume, strategies, Settings from libcloudcore.exceptions import ParameterError from libcloudcore.importer import Importer from libcloudcore import models # FIXME: \r is encoded as \n # FIXME: \x0b is not a well formed token according to expat # xml.parsers.expat.ExpatError: not well-formed (invalid token) # FIXME ditto for \x0c PRINTABLE = ( '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL' 'MNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n' ) def find_services(): root = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "data") ) for path, dirs, files in os.walk(root): if 'service.json' in files: yield os.path.relpath(path, root) def find_operations(): session = Importer(__name__) for service in find_services(): if service == "aws/dynamodb": continue driver = session.get_driver(service) for operation in driver.model.get_operations(): yield service, operation.name, driver, operation class StrategyBuilder(models.Visitor): def __init__(self): self.active = set() def visit(self, shape): assert shape.type is not None visit_fn_name = "visit_{}".format(shape.type) try: visit_fn = getattr(self, visit_fn_name) except AttributeError: raise NotImplementedError(visit_fn_name) return visit_fn(shape) def visit_string(self, shape): return strategies.text( alphabet=PRINTABLE, min_size=shape.min, max_size=shape.max or 12, ) def visit_integer(self, shape): return strategies.integers( min_value=shape.min, max_value=shape.max, ) visit_long = visit_integer def visit_float(self, shape): return strategies.floats( min_value=shape.min or -10000, max_value=shape.max or 10000, ) visit_double = visit_float def visit_boolean(self, shape): return strategies.booleans() def visit_timestamp(self, shape): from hypothesis.extra.datetime import datetimes return datetimes( min_year=1900, max_year=2100, ) def visit_blob(self, shape): # FIXME: strategies.binary # xmltodict can't roundtrip b'\x00' return strategies.text( alphabet=PRINTABLE, min_size=shape.min, max_size=shape.max or 50, ) def visit_list(self, shape): return strategies.lists( self.visit(shape.of), max_size=1, ) def visit_map(self, shape): if shape.name in self.active: return strategies.fixed_dictionaries({}) self.active.add(shape.name) try: return strategies.dictionaries( keys=self.visit(shape.key_shape), values=self.visit(shape.value_shape), ) finally: self.active.remove(shape.name) def visit_structure(self, shape): if shape.name in self.active: return strategies.fixed_dictionaries({}) self.active.add(shape.name) try: structure = {} for member in shape.iter_members(): structure[member.name] = self.visit(member.shape) return strategies.fixed_dictionaries(structure) finally: self.active.remove(shape.name) def roundtrip(driver, operation, shape): strategy = StrategyBuilder().visit(shape) settings = Settings( min_satisfying_examples=1, max_examples=3, ) @given(strategy, settings=settings) def inner(data): try: driver.validate(shape, data) except ParameterError: assume(False) serialized = driver.serialize(operation, shape, data) assert isinstance(serialized, str) deserialized = driver.deserialize(operation, shape, serialized) assert data == deserialized return inner() @pytest.mark.parametrize('d,o,driver,operation', find_operations()) def test_data(d, o, driver, operation): if operation.input_shape: assert len(operation.input_shape.name) > 0 roundtrip( driver(), operation, operation.input_shape ) if operation.output_shape: assert len(operation.output_shape.name) > 0 roundtrip( driver(), operation, operation.output_shape )
Jc2k/libcloudcore
libcloudcore/tests/test_data.py
Python
apache-2.0
5,383
[ "VisIt" ]
1adc0b0db1b489c73e07c5baf53c3d75acfa83c1cfebbbb884bcfd12eb52fb41
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for templates module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import absltest as test import gast from pyctr.core import anno from pyctr.core import parsing from pyctr.sct import transformer class TransformerTest(test.TestCase): def _simple_source_info(self): return transformer.EntityInfo( source_code=None, source_file=None, namespace=None, arg_values=None, arg_types=None, owner_type=None) def test_entity_scope_tracking(self): class TestTransformer(transformer.Base): # The choice of note to assign to is arbitrary. Using Assign because it's # easy to find in the tree. def visit_Assign(self, node): anno.setanno(node, 'enclosing_entities', self.enclosing_entities) return self.generic_visit(node) # This will show up in the lambda function. def visit_BinOp(self, node): anno.setanno(node, 'enclosing_entities', self.enclosing_entities) return self.generic_visit(node) tr = TestTransformer(self._simple_source_info()) def test_function(): a = 0 class TestClass(object): def test_method(self): b = 0 def inner_function(x): c = 0 d = lambda y: (x + y) return c, d return b, inner_function return a, TestClass node, _ = parsing.parse_entity(test_function) node = tr.visit(node) test_function_node = node.body[0] test_class = test_function_node.body[1] test_method = test_class.body[0] inner_function = test_method.body[1] lambda_node = inner_function.body[1].value a = test_function_node.body[0] b = test_method.body[0] c = inner_function.body[0] lambda_expr = lambda_node.body self.assertEqual((test_function_node,), anno.getanno( a, 'enclosing_entities')) self.assertEqual((test_function_node, test_class, test_method), anno.getanno(b, 'enclosing_entities')) self.assertEqual( (test_function_node, test_class, test_method, inner_function), anno.getanno(c, 'enclosing_entities')) self.assertEqual((test_function_node, test_class, test_method, inner_function, lambda_node), anno.getanno(lambda_expr, 'enclosing_entities')) def assertSameAnno(self, first, second, key): self.assertIs(anno.getanno(first, key), anno.getanno(second, key)) def assertDifferentAnno(self, first, second, key): self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key)) def test_state_tracking(self): class LoopState(object): pass class CondState(object): pass class TestTransformer(transformer.Base): def visit(self, node): anno.setanno(node, 'loop_state', self.state[LoopState].value) anno.setanno(node, 'cond_state', self.state[CondState].value) return super(TestTransformer, self).visit(node) def visit_While(self, node): self.state[LoopState].enter() node = self.generic_visit(node) self.state[LoopState].exit() return node def visit_If(self, node): self.state[CondState].enter() node = self.generic_visit(node) self.state[CondState].exit() return node tr = TestTransformer(self._simple_source_info()) def test_function(a): a = 1 while a: _ = 'a' if a > 2: _ = 'b' while True: raise '1' if a > 3: _ = 'c' while True: raise '1' node, _ = parsing.parse_entity(test_function) node = tr.visit(node) fn_body = node.body[0].body outer_while_body = fn_body[1].body self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state') self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state') first_if_body = outer_while_body[1].body self.assertDifferentAnno(outer_while_body[0], first_if_body[0], 'cond_state') self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state') first_inner_while_body = first_if_body[1].body self.assertSameAnno(first_if_body[0], first_inner_while_body[0], 'cond_state') self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0], 'loop_state') second_if_body = outer_while_body[2].body self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state') self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state') second_inner_while_body = second_if_body[1].body self.assertDifferentAnno(first_inner_while_body[0], second_inner_while_body[0], 'cond_state') self.assertDifferentAnno(first_inner_while_body[0], second_inner_while_body[0], 'loop_state') def test_visit_block_postprocessing(self): class TestTransformer(transformer.Base): def _process_body_item(self, node): if isinstance(node, gast.Assign) and (node.value.id == 'y'): if_node = gast.If(gast.Name('x', gast.Load(), None), [node], []) return if_node, if_node.body return node, None def visit_FunctionDef(self, node): node.body = self.visit_block( node.body, after_visit=self._process_body_item) return node def test_function(x, y): z = x z = y return z tr = TestTransformer(self._simple_source_info()) node, _ = parsing.parse_entity(test_function) node = tr.visit(node) node = node.body[0] self.assertLen(node.body, 2) self.assertIsInstance(node.body[0], gast.Assign) self.assertIsInstance(node.body[1], gast.If) self.assertIsInstance(node.body[1].body[0], gast.Assign) self.assertIsInstance(node.body[1].body[1], gast.Return) def test_robust_error_on_list_visit(self): class BrokenTransformer(transformer.Base): def visit_If(self, node): # This is broken because visit expects a single node, not a list, and # the body of an if is a list. # Importantly, the default error handling in visit also expects a single # node. Therefore, mistakes like this need to trigger a type error # before the visit called here installs its error handler. # That type error can then be caught by the enclosing call to visit, # and correctly blame the If node. self.visit(node.body) return node def test_function(x): if x > 0: return x tr = BrokenTransformer(self._simple_source_info()) node, _ = parsing.parse_entity(test_function) with self.assertRaises(ValueError) as cm: node = tr.visit(node) obtained_message = str(cm.exception) expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"' self.assertRegexpMatches(obtained_message, expected_message) def test_robust_error_on_ast_corruption(self): # A child class should not be able to be so broken that it causes the error # handling in `transformer.Base` to raise an exception. Why not? Because # then the original error location is dropped, and an error handler higher # up in the call stack gives misleading information. # Here we test that the error handling in `visit` completes, and blames the # correct original exception, even if the AST gets corrupted. class NotANode(object): pass class BrokenTransformer(transformer.Base): def visit_If(self, node): node.body = NotANode() raise ValueError('I blew up') def test_function(x): if x > 0: return x tr = BrokenTransformer(self._simple_source_info()) node, _ = parsing.parse_entity(test_function) with self.assertRaises(ValueError) as cm: node = tr.visit(node) obtained_message = str(cm.exception) # The message should reference the exception actually raised, not anything # from the exception handler. expected_substring = 'I blew up' self.assertIn(expected_substring, obtained_message, obtained_message) if __name__ == '__main__': test.main()
google/pyctr
sct/transformer_test.py
Python
apache-2.0
8,923
[ "VisIt" ]
56860090c97de31b2aa3758d76f1caca7501b87f4795aecaf35a5115231d0a76
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase import vtk class vtkHierarchicalDataLevelFilter(SimpleVTKClassModuleBase): def __init__(self, module_manager): SimpleVTKClassModuleBase.__init__( self, module_manager, vtk.vtkHierarchicalDataLevelFilter(), 'Processing.', ('vtkMultiGroupDataSet',), ('vtkMultiGroupDataSet',), replaceDoc=True, inputFunctions=None, outputFunctions=None)
chrisidefix/devide
modules/vtk_basic/vtkHierarchicalDataLevelFilter.py
Python
bsd-3-clause
533
[ "VTK" ]
298ef6894895e0048af20cb18b43cc437ecac31c78d2434a6961d064d6ea3e38
# -*- coding: utf-8 -*- #This is generated code - do not edit encoding = 'utf-8' dict = { ' of ': ' de ', '&About...': '&Sobre...', '&Close Document': 'Fe&char o Documento', '&Comment Region': '\xc3\x81rea de &Coment\xc3\xa1rios', '&Delete Window': '%Apagar Janela', '&Describe Action': '&Descrever Ac\xc3\xa7\xc3\xa3o', '&Describe Key': '&Descrever Chave', '&Execute Action': '&Executar Ac\xc3\xa7\xc3\xa3o', '&Execute Macro': '&Executar macro', '&Folding': '&Dobrar', '&Help': '&Ajuda', '&Line Numbers': '&N\xc3\xbameros de linha', '&Line Wrapping': '&Quebra de Linha', '&New Window': '&Nova Janela', '&Open Hex Editor...': '&Abrir Editor de Hexadecimais...', '&Open Image Viewer...': '&Abrir Visualizador de Imagens...', '&Open Sample Graphviz dot file': 'Abrir ficheiro exemplo dot do Graphviz', '&Open Sample Python': 'Abrir ficheiro de exemplo Python', '&Preferences...': '&Prefer\xc3\xaancias...', '&Revert': '&Reverter', '&Save...': '&Gravar...', '&Show Key Bindings': 'Exibir liga\xc3\xa7\xc3\xb5es de teclas', '&Show Toolbars': 'Exibir Barra de Ferramentas', '&Tabify': '&Tabular', '&Uncomment Region': 'Descomentar regi\xc3\xa3o', '&Untabify': 'Anular tabular', '&Word Count': '&Contar Palavras', '&Wrap Words': 'Moldar palavras', '. Do you wish to continue?': 'Deseja continuar?', 'Abort': 'Abortar', 'About this program': 'Sobre esta aplica\xc3\xa7\xc3\xa3o', 'Actions': 'Ac\xc3\xa7\xc3\xb5es', 'Add ChangeLog Entry': 'Adicionar entrada no ficheiro ChangeLog', 'Add Keybinding For Last Macro': 'Adicionar liga\xc3\xa7\xc3\xa3o a uma tecla para a \xc3\xbaltima macro', 'Add To File Cabinet': 'Adicionar a ficheiro .cab', 'Add a Multi-Key Binding': 'Adicionar liga\xc3\xa7\xc3\xa3o a multiplas teclas', 'Add a Single Key Binding': 'Adicionar liga\xc3\xa7\xc3\xa3o a uma \xc3\xbanica tecla', 'Add a Three Key Binding': 'Adicionar liga\xc3\xa7\xc3\xa3o a tr\xc3\xaas teclas', 'Add a Two Key Binding': 'Adicionar liga\xc3\xa7\xc3\xa3o a duas teclas', 'Add new ChangeLog entry to the top of the ChangeLog': 'Adicionar nova entrada no topo do ficheiro ChangeLog', 'Add to repository': 'Adicionar ao reposit\xc3\xb3ri', 'All Macros': 'Todas as macros', 'All Minor Modes': 'Todos os modos menores', 'All Sidebars': 'Todas as barras laterais', 'All Sidebars and Minor Modes': 'Todas as barras laterais e modos menores', 'An error occurred when attempting to remove ': 'Ocorreu um erro na tentativa de remo\xc3\xa7\xc3\xa3o de ', 'Angle of Rotation:': '\xc3\x82ngulo de Rota\xc3\xa7\xc3\xa3o:', 'Apply Settings': 'Aplicar defini\xc3\xa7\xc3\xb5es', 'As Defaults for %s Mode': 'Como nos valores por omiss\xc3\xa3o nos modos %s', 'As Defaults for %s Mode in Project': 'Como no valores por omiss\xc3\xa3o para os %s no Projecto', 'As Defaults for All Modes': 'Como nos valores por omiss\xc3\xa3o para todos os modos', 'As Defaults for All Modes in Project': 'Como nos valores por omiss\xc3\xa3o para todos os modos no projecto', 'As Kate Variables at Top of File': 'Como as vari\xc3\xa1veis Kate no topo do ficheiro', 'Attributes': 'Atributos', 'Average Focal Planes': 'Planos focais m\xc3\xa9dios', 'Background': 'Fundo', 'Backslashify': 'Aplicar \\', 'Band Subtraction Filter': 'Filtro de subtrac\xc3\xa7\xc3\xa3o de banda', 'Base new theme on existing one': 'Basear o novo tema num existente', 'Bookmarks': 'Favoritos', 'Bring All to Front': 'Trazer Todos Para a Frente', 'Cancel': 'Cancelar', 'Cancel Minibuffer': 'Cancelar minibuffer', 'Cancelled multi-key keystroke': 'Cancelada pressionar v\xc3\xa1rias teclas', 'Capitalize': 'Capitaliza\xc3\xa7\xc3\xa3o', 'Case': 'Capitaliza\xc3\xa7\xc3\xa3o', 'Case Sensitive Search': 'Pesquisa sens\xc3\xadvel \xc3\xa0 capitaliza\xc3\xa7\xc3\xa3o', 'Change to a document by typing part of its name': 'Alternar para um documento digitando parte do nome', 'Changed color scheme to %s': 'Mudar esquema de cor para %s', 'Clear Flags': 'Eliminar flags', 'Clear Playlist': 'Limpar a Lista de Reprodu\xc3\xa7\xc3\xa3o', 'Clipping Filter': 'Filtro de corte', 'Close Tab': 'Fechar Separador', 'Close the current tab': 'Fechar o separador actual', 'Color': 'Cor', 'Color Map': 'Mapa de Cores', 'Commit Dialog': 'Di\xc3\xa1logo de Execu\xc3\xa7\xc3\xa3o', 'Commit changes': 'Executar altera\xc3\xa7\xc3\xb5es', 'Compare to previous version': 'Comparar com vers\xc3\xa3o anterior', 'Complete word': 'Completar palavra', 'Contrast': 'Contraste', 'Contributions by:': 'Contributos por:', 'Copy': 'Copiar', 'Cut': 'Cortar', 'Debug': 'Depurar', 'Debug Keypress': 'Depurar tecla premida', 'Decrease Size': 'Diminuir tamanho', 'Decrease Volume': 'Diminuir Volume', 'Decrease the volume': 'Diminuir o Volume', 'Delete Bookmark': 'Eliminar favorito', 'Delete Macro': 'Apagar macro', 'Delete current window': 'Apagar janela actual', 'Describe key:': 'Descri\xc3\xa7\xc3\xa3o detecla:', 'Documents': 'Documentos', 'Downcase': 'Min\xc3\xbasculas', 'E&xit': '&Sair', 'EOL Characters': 'Caracteres EOL', 'Edit': 'Editar', 'Edit Macro': 'Editar macro', 'Enter a hex color value': 'introduza a cor em hexa', 'Enter command to execute on all files': 'Introduza comando a executar em todos os ficheiros', 'Enter your commit message:': 'Introduza a sua mensagem para execu\xc3\xa7\xc3\xa3o:', 'Error Traceback:': 'Erro de Traceback:', 'Error occurred when copying/moving files': 'Ocorreu um erro quando copiava ou movia ficheiros', 'Error occurred when removing files': 'Ocorreu um erro ao apagar ficheiros', 'Error/Crash Reporter': 'Reporte de erros ou crashes', 'Error: Something unexpected happened. You can attempt to continue,\nabort the program, or send an error report.': 'Erro, algo inesperado aconteceu. Pode tentar continuar, \nabortar ou enviar um reporte de erro,', 'Execute Macro By Keystroke': 'Executar macro por premirtecla', 'Execute command...': 'Executar comando...', 'Exit the application': 'Sair da aplica\xc3\xa7\xc3\xa3o', 'Export': 'Exportar', 'Fast test of the progress bar': 'Teste r\xc3\xa1pido da barra de progresso', 'File': 'Ficheiro', 'File Cabinet': 'Ficheiro cab', 'File/Export': 'Exportar ficheiro', 'Fill Paragraph': 'preencher par\xc3\xa1grafo', 'Filter': 'Filtro', 'Find Previous...': 'Localizar anterior...', 'Find Regex...': 'Localizar Regex...', 'Find Wildcard...': 'Encontrar coringa...', 'Find...': 'Localizar...', 'Finished %s on': 'Localizar %s em', 'Floating Point': 'V\xc3\xadrgula Flutuante', 'Focal Plane View': 'Vista de Plano focal', 'Folder': 'Pasta', 'Font': 'Tipo de letra', 'Font Settings': 'Configura\xc3\xa7\xc3\xa3o dos Tipos de Letra', 'Font Size': 'Tamanho da Letra', 'Foreground': 'Primeiro plano', 'Frames': 'Molduras', 'Games': 'Jogos', 'Gaussian': 'Gaussiano', 'General': 'Geral', 'Global Project Settings': 'Defini\xc3\xa7\xc3\xb5es globais do projecto', 'Go to next %s in the cube': 'Ir para %s no cubo', 'Go to previous %s in the cube': 'Ir para os anteriores %s no cub', 'Goto %s': 'Ir para %s', 'Goto Band': 'Ir para banda', 'Goto Line...': 'Ir para a linha...', 'Goto Offset...': 'ir para offset...', 'Goto a specified %s in the cube': 'Ir para uma espec\xc3\xadfica %s no cubo', 'Guess Indent Size': 'Adivinhar tamanho da indenta\xc3\xa7\xc3\xa3o', 'Help on Minibuffer': 'Ajuda no minibuffer', 'Hide': 'Ocultar', 'Highlight Caret Line': 'Destacar Caret Line', 'Image View': 'Vista de Imagem', 'Incomplete regex': 'regex incompleto', 'Increase Size': 'aumentar tamanho', 'Increase Volume': 'Aumentar o Volume', 'Increase the volume': 'Aumentar o volume', 'Indent Character': 'Indentar letra', 'Indentation Guides': 'Guias de indenta\xc3\xa7\xc3\xa3o', 'Indentation Size': 'Tamanho daindenta\xc3\xa7\xc3\xa3o', 'Input:': 'Entrada de dados:', 'Insert Raw Char': 'Inserir caractere em bruto', 'Insert Unicode': 'Inserir Unicode', 'Insert repr': 'Inserir repr', 'Integer': 'N\xc3\xbamero inteiro', 'Join Lines': 'Juntar as Linhas', 'Just One Line': 'Apenas uma linha', 'Just One Space': 'Apenas um espa\xc3\xa7o', 'Key Bindings...': 'Liga\xc3\xa7\xc3\xb5es a teclas', 'Line Endings': 'Fins de linhas', 'List All Documents': 'Listar todos os documentos', 'Local settings (each view can have different values for these settings)': 'Defini\xc3\xa7\xc3\xb5es locais cada vista poder\xc3\xa1 ter valores diferentes para estas defini\xc3\xa7\xc3\xb5es)', 'Locate Config Dir': 'Localizar direct\xc3\xb3rio de configura\xc3\xa7\xc3\xa3o', 'Lookup Tag': 'Tag de busca', 'Lookup Tag:': 'Tag de busca:', 'MPD Server...': 'Servidor MPD...', 'Macros Compatible with %s': 'Macros compat\xc3\xadveis com %s', 'Major Mode': 'Modo Maior', 'Make Patch': 'Criar caminho', 'Mark for Deletion': 'Marcar para Remo\xc3\xa7\xc3\xa3o', 'Mark for Deletion and Move Backwards': 'Marcar para Remo\xc3\xa7\xc3\xa3o e andar para tr\xc3\xa1s', 'Mark for Display': 'Marcar para visualizar', 'Mark for Display and Move Backwards': 'Marcar para visualizar e desfazer', 'Mark for Save': 'Marcar para gravar', 'Mark for Save and Move Backwards': 'Marcar para gravar e desfazer', 'Median Filter': 'Filtro Mediano', 'Mercurial': 'Mercural', 'Minimize': 'Minimizar', 'Minor Modes': 'Modos menores', 'Misc': 'Variadas', 'Modes': 'Modos', 'Move Tab to New Window': 'Mover tabula\xc3\xa7\xc3\xa3o para nova janela', 'Move to %(trash)s': 'Mover para %(trash)s', 'Move to Next Item': 'Maver para o pr\xc3\xb3ximo item', 'Move to Previous Item': 'Mover para item anterior', 'Multi Minibuffer Test': 'Teste ao multi minibuffer', 'Mute': 'Silenciar', 'New': 'Novo', 'New Key Binding': 'Nova associa\xc3\xa7\xc3\xa3o deteclas', 'New Tab': 'Novo Separador', 'New...': 'Novo...', 'Next %s': 'Seguinte %s', 'Next Band': 'Proxima banda', 'Next Bookmark': 'Favorito Seguinte', 'Next Diff': 'Proximo Diff', 'Next Song': 'Pr\xc3\xb3xima M\xc3\xbasica', 'No suggestions': 'Sem sugest\xc3\xb5es', 'Open': 'Abrir', 'Open File Using Minibuffer...': 'Abrir ficheiro utilizando o Minibuffer...', 'Open File in New Window...': 'Abrir ficheiro numa nova janela...', 'Open File...': 'Abrir Ficheiro...', 'Open Line': 'Abrir Linha', 'Open Recent': 'Abrir Recente', 'Open URL Using Minibuffer...': 'Abrir URL utilizando o Minibuffer...', 'Open a Hex Editor': 'Abrir um editor de hexa', 'Open a file': 'Abrir um ficheiro', 'Open a new tab': 'Abrir uma nova p\xc3\xa1gina', 'Open a new window': 'Abrir uma nova janela', 'Open a sample Graphviz file': 'Abrir um ficheiro Graphviz de exemplo', 'Open a sample Python file': 'Abrir um ficheiro Python de exemplo', 'Open an Image Viewer': 'Abrir um visualizador de imagem', 'Open enclosing folder': 'Abrir subpasta', 'Paste': 'Colar', 'Paste at Column': 'Colar na coluna', 'Perform Marked Actions': 'Executar ac\xc3\xa7\xc3\xb5es seleccionadas', 'Play Last Macro': 'Executar a \xc3\x9altima Macro', 'Plugins': 'Extens\xc3\xb5es', 'Preferences, settings, and configurations...': 'Prefer\xc3\xaancias, defini\xc3\xa7\xc3\xb5es e configura\xc3\xa7\xc3\xb5es...', 'Prev %s': 'Anterior %s', 'Prev Band': 'Banda Anterior', 'Prev Bookmark': 'Favorito anterior', 'Prev Diff': 'Diferen\xc3\xa7a anterior', 'Preview': 'Pr\xc3\xa9-visualizar', 'Project': 'Projecto', 'Project From Existing Code...': 'Projecto apartir de c\xc3\xb3digo existente', 'Project Homepage': 'P\xc3\xa1gina do Projecto', 'Project Settings...': 'Defini\xc3\xa7\xc3\xb5es do Projecto...', 'Project...': 'Projecto...', 'Properties': 'Propriedades', 'Quoting:': 'Citando:', 'Raise Unhandled Exception': 'Levantar Excep\xc3\xa7\xc3\xa3o n\xc3\xa3o prevista', 'Range': '\xc3\x81rea', 'Rebuild Tag File': 'Recontruir ficgeiro Tag', 'Recent Macros': 'Macros Recentes', 'Recent Projects': 'Projectos Recentes', 'Recenter Screen': 'Centrar Ecr\xc3\xa3', 'Record Format...': 'Gravar formato...', 'Redo': 'Repetir', 'Reduce Dimensions by Integer': 'Reduzir Dimens\xc3\xb5es por N\xc3\xbamero Inteiro', 'Refresh': 'Actualizar', 'Refresh While Dragging Slider': 'Actualizar enquanto arrasta Slider', 'Refresh status': 'Actualizar estado', 'Reindent': 'Re-indentar', 'Remove Backslashes': 'Remover \\', 'Remove Project from List': 'Remover projecto da lista', 'Remove Trailing Whitespace': 'Remover espa\xc3\xa7o branco de rasto', 'Remove from repository': 'Remover do reposit\xc3\xb3rio', 'Rename Macro': 'Renomear macro', 'Rename Project': 'Renomear projecto', 'Reorder': 'Reordenar', 'Reorder File Cabinet': 'Reordenar ficheiro cab', 'Replace Buffer': 'Substituir buffer', 'Replace Regex...': 'Substituir Regex...', 'Replace Wildcard...': 'Substituir coringa...', 'Replace with': 'Substituir por', 'Replace with Multi-Key Binding': 'Substituir por liga\xc3\xa7\xc3\xa3o a m\xc3\xbaltiplas teclas', 'Replace with Single Key Binding': 'Substituir por liga\xc3\xa7\xc3\xa3o a uma tecla', 'Replace with Three Key Binding': 'Substituir por liga\xc3\xa7\xc3\xa3o a tr\xc3\xaas teclas', 'Replace with Two Key Binding': 'Substituir por liga\xc3\xa7\xc3\xa3o a duas teclas', 'Replace...': 'Substituir...', 'Replaced %d occurrence': 'Substituidas %d ocorr\xc3\xaancia', 'Replaced %d occurrences': 'Substituidas %d ocorr\xc3\xaancias', 'Report a bug': 'Reportar um erro (bug)', 'Returned to the starting point': 'Retornou ao ponto inicial.', 'Reverse Lines': 'Inverter linhas', 'Revert With Encoding': 'Reverter com codifica\xc3\xa7\xc3\xa3o', 'Revert to repository version': 'Reverter \xc3\xa1 vers\xc3\xa3o do reposit\xc3\xb3rio', 'Run': 'Executar', 'Run Filter': 'Executar filtro', 'Run with Args': 'Executar com argumentos', 'Run...': 'Executar...', 'Running Jobs': 'Executar trabalhos', 'Same Major Mode': 'Mesmo Modo Maior', 'Samples': 'Exemplos', 'Save &As...': 'Gr&avar Como...', 'Save Styles': 'Gravar estilos', 'Save as Global %s Template': 'Gravar como %d modelo global', 'Save as Project %s Template': 'Gravar como %s modelo de projecto', 'Save or Delete Marked Buffers': 'Apagar ou gravar buffers seleccionados', 'Save to URL Using Minibuffer...': 'Gravar para URL utilizando Minibuffer', 'Scale Dimensions by Integer': 'Modificar dimens\xc3\xb5es por N\xc3\xbamero Inteiro', 'Seek %s': 'Procurar %s', 'Seek Band': 'Procurar banda', 'Select All': 'Seleccionar tudo', 'Select Next Tab': 'Seleccionar pr\xc3\xb3ximo tabulador', 'Select Previous Tab': 'Seleccionar tabulador anterior', 'Select Rect': 'Seleccionar rectangulo', 'Select all text': 'Seleccionar todo o texto', 'Selected files are from multiple repositories': 'Os ficheiros seleccionados pertencem a v\xc3\xa1rios reposit\xc3\xb3rios', 'Send Error Report': 'Enviar relat\xc3\xb3rio de erros', 'Set Bookmark': 'Adicionar Marcador', 'Set Font Zoom...': 'Seleccionar zoom de tipo de letra', 'Set the preview file type': 'Seleccionar tipo de ficheiro anterior', 'Show Buffer': 'Ver Buffer', 'Show Hex Digits': 'Ver digitos hexa', 'Show Indentation': 'Ver indenta\xc3\xa7\xc3\xa3o', 'Show Line Style': 'Ver estilo de linha', 'Show Long Lines': 'Ver linhas compridas', 'Show Pixel Values': 'Ver valores do pixel', 'Show Record Numbers': 'Ver n\xc3\xbameros gravados', 'Show Whitespace': 'Ver espa\xc3\xa7os', 'Show revision history': 'Ver hist\xc3\xb3rico de revis\xc3\xb5es', 'Show the styling information of the current line': 'Ver informa\xc3\xa7\xc3\xa3o de estilo para a linha actual', 'Show uncollectable objects': 'Ver objectos n\xc3\xa3o colecion\xc3\xa1veis', 'Shuffle Lines': 'Misturar linhas', 'Sidebars': 'Barras Laterais', 'Size': 'Tamanho', 'Slow test of the progress bar': 'Teste lento da barra de progresso', 'Some styles have been changed would you like to save before exiting?': 'Alguns estilos foram modificados, quer gravar antes de sair?', 'Sort Lines': 'Ordenar linhas', 'Sort Lines by Field': 'Ordenar linhas por campo', 'Sort Order': 'Ordem de Ordena\xc3\xa7\xc3\xa3o', 'Spaces Per Tab': 'Espa\xc3\xa7os por tabulador', 'Spatial Subset': 'Subconjunto espacial', 'Spelling...': 'Soletrar...', 'Start Recording': 'Iniciar a Grava\xc3\xa7\xc3\xa3o', 'Start a blank new style': 'Novo estilo vazio', 'Started %s on': 'Come\xc3\xa7ado em %s', 'Stop': 'Parar', 'Stop Recording': 'Parar a Grava\xc3\xa7\xc3\xa3o', 'Style Editor': 'Editor de Estilo', 'Style Tags': 'Tags de estilo', 'Style Theme': 'Estilo de Tema', 'Subversion': 'Sub vers\xc3\xa3o', 'Swap Endian': 'Trocar endian', 'Swap case': 'Alterar capitaliza\xc3\xa7\xc3\xa3o', 'Switch to Buffer': 'Alternar para buffer', 'Syntax Files': 'Ficheiros de sintaxe', 'Templates': 'Modelos', 'Test HSI Spatial Subset': 'Testar subconjunto HSI Spatial', 'Tests': 'Testes', 'Text': 'Texto', 'Text Styles...': 'Estilos de texto...', 'Text file': 'Ficheiro de texto', 'The following command will be executed on all selected\nfiles and files contained in selected directories.': 'O seguinte comando ser\xc3\xa1 executado em\ntodos os ficheiros e pastas seleccionados', 'The system returned the following message when attempting to move/copy %(filename)s: %(errmsg)s. Do you wish to continue?': 'O sistema devolveu a seguinte mensagem: %(errmsg)s quando tentava copiar/mover %(filename)s. Deseja continuar?', 'This Project': 'Este projecto', 'Toggle Bookmark': 'Activar/Desactivar Marcador', 'Tools': 'Ferramentas', 'Transform': 'Transformar', 'Transpose Characters': 'Transpor os Caracteres', 'Transpose Line Down': 'Transpor linha abaixo', 'Undo': 'Desfazer', 'Untitled Folder': 'Pasta sem nome', 'Upcase': 'Maiusculas', 'Update': 'Actualizar', 'View': 'Ver', 'View %s': 'Ver %s', 'View Band': 'Ver banda', 'View Direction': 'Ver direc\xc3\xa7\xc3\xa3o', 'View/Apply Settings': 'Ver/aplicar defini\xc3\xa7\xc3\xb5es', 'Whitespace': 'Espa\xc3\xa7o em Branco', 'Whole Word Search': 'Pesquisa de palavra completa', 'Widget Inspector...': 'Inspector de Widget', 'Window': 'Janela', 'X Axis Label': 'R\xc3\xb3tulo do eixo X', 'Zoom In': 'Aumentar Zoom', 'Zoom Out': 'Diminuir Zoom', 'Zoom in (magnify) image': 'Aumentar imagem', 'Zoom out (demagnify) image': 'Diminuir imagem', 'as ENVI': 'como ENVI', 'as ENVI (big endian)': 'como ENVI (grande endian)', 'as ENVI (little endian)': 'como ENVI (pequeno endian)', 'as Image': 'como imagem', 'bold': 'negrito', 'complete word at the current cursor position': 'Palavra completa na posi\xc3\xa7\xc3\xa3o actual do cursor', 'cwd = ': 'cwd= ', 'exit code = %s': 'fim de c\xc3\xb3digo = %s', 'italic': 'it\xc3\xa1lico', 'other': 'outro', 'underline': 'sublinhado', 'unknown': 'desconhecido', 'untitled': 'sem nome', }
robmcmullen/peppy
peppy/i18n/pt.py
Python
gpl-2.0
18,181
[ "Gaussian" ]
160896cd001e81f9a1657f6ea09d567e6c1a51a878f1b1a547934c9c375b2c3d
""" .. module:: samples_generator samples_generator ************* :Description: samples_generator :Authors: bejar :Version: :Created on: 21/01/2015 9:02 """ __author__ = 'bejar' import numpy as np import numbers from sklearn.utils import check_random_state, check_array def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None): """Generate isotropic Gaussian blobs for clustering. 7/10/2015 A fixed and more flexible version of the scikit-learn function Parameters ---------- n_samples : int, or sequence of integers, optional (default=100) The total number of points equally divided among clusters. or a sequence of the number of examples of each cluster n_features : int, optional (default=2) The number of features for each sample. centers : int or array of shape [n_centers, n_features], optional (default=3) The number of centers to generate, or the fixed center locations. cluster_std: float or sequence of floats, optional (default=1.0) The standard deviation of the clusters. now works for the list of floats center_box: pair of floats (min, max), optional (default=(-10.0, 10.0)) The bounding box for each cluster center when centers are generated at random. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. Examples -------- >>> from sklearn.datasets.samples_generator import make_blobs >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, ... random_state=0) >>> print(X.shape) (10, 2) >>> y array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) """ generator = check_random_state(random_state) if isinstance(centers, numbers.Integral): centers = generator.uniform(center_box[0], center_box[1], size=(centers, n_features)) else: centers = check_array(centers) n_features = centers.shape[1] X = [] y = [] n_centers = centers.shape[0] if not isinstance(n_samples, list): n_samples_per_center = [int(n_samples // n_centers)] * n_centers for i in range(n_samples % n_centers): n_samples_per_center[i] += 1 else: if len(n_samples) != n_centers: raise NameError('List of number of examples per center doer not match number of centers') n_samples_per_center = n_samples n_samples = sum(n_samples) if not isinstance(cluster_std, list): std_list = [cluster_std] * centers.shape[0] else: if len(cluster_std) != n_centers: raise NameError('List of number of examples per center doer not match number of centers') std_list = cluster_std for i, (n, st) in enumerate(zip(n_samples_per_center, std_list)): X.append(centers[i] + generator.normal(scale=st, size=(n, n_features))) y += [i] * n X = np.concatenate(X) y = np.array(y) if shuffle: indices = np.arange(n_samples) generator.shuffle(indices) X = X[indices] y = y[indices] return X, y # def cluster_generator(n_clusters=3, sepval=0.5, numNonNoisy=5, numNoisy=0, numOutlier=0, # clustszind=2, clustSizeEq=100, rangeN=[100, 150], rotateind=True): # """ # Generates clusters using the R package CusterGeneration # See the documentation of that package for the meaning of the parameters # # You must have an R installation with the clusterGeneration package # # :param n_clusters: # :param sepval: # :return: # """ # clusterG = importr('clusterGeneration') # # params = {'numClust': n_clusters, # 'sepVal': sepval, # 'numNonNoisy': numNonNoisy, # 'numNoisy': numNoisy, # 'numOutlier': numOutlier, # 'numReplicate': 1, # 'clustszind': clustszind, # 'clustSizeEq': clustSizeEq, # 'rangeN': rangeN, # 'rotateind': rotateind, # 'outputDatFlag': False, # 'outputLogFlag': False, # 'outputEmpirical': False, # 'outputInfo': False # } # # x = clusterG.genRandomClust(**params) # # nm = np.array(x[2][0].colnames) # # nm = np.concatenate((nm, ['class'])) # m = np.matrix(x[2][0]) # v = np.array(x[3][0]) # v.resize((len(x[3][0]))) # #m = np.concatenate((m, v), axis=1) # return m, v
bejar/kemlglearn
kemlglearn/datasets/samples_generator.py
Python
mit
5,132
[ "Gaussian" ]
15689f9135acec8608dd236aa7d06d3af7ea40418f9ce9b7d85fa0edd52ee38c
#!/opt/datafax/PHRI/python27 # # Copyright 2017, Population Health Research Institute # Copyright 2017, Martin Renters # # This file is part of the DataFax Toolkit. # # The DataFax Toolkit is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # The DataFax Toolkit is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with The DataFax Toolkit. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from __future__ import print_function import os import getopt import codecs import datafax import sys import xlsxwriter import datetime import StringIO import smtplib,ssl from email.mime.multipart import MIMEMultipart from email.mime.base import MIMEBase from email.mime.text import MIMEText from email.utils import formatdate from email import encoders ##################################################################### # Decode text as Unicode, and if that doesn't work, try Latin-1 ##################################################################### def to_unicode(s): try: u = s.decode('utf-8') except UnicodeDecodeError: u = s.decode('latin-1') return u ##################################################################### # Load Priorities ##################################################################### def load_priorities(studydir, name): priorities={} try: with open(os.path.join(studydir, 'lib', os.path.basename(name)), 'rU') as f: contents = f.read().decode('utf-8') for line in contents.split('\n'): rec = line.split('|') if len(rec) < 3: continue if rec[0]=='Plate' and rec[1]=='Field' and rec[2]=='Priority': continue try: plate = int(rec[0]) field = int(rec[1]) priority = int(rec[2]) if priority < 1: priority = 1 if priority > 5: priority = 5 priorities[(plate,field)] = priority except ValueError: print('Misformed priority record: ', line) pass except IOError: print('Unable to open/read priorities file "{0}"'.format(name)) sys.exit(2) pass return priorities def extractDate(user_ts): if user_ts: try: fields = user_ts.split(' ') if len(fields) == 3: [year, month, day] = fields[1].split('/') if int(year) > 90: year = int(year) + 1900 else: year = int(year) + 2000 [hh, mm, ss] = fields[2].split(':') return (fields[0], datetime.datetime(year, int(month), int(day), int(hh), int(mm), int(ss))) except: pass return (None, None) ##################################################################### # Convert QC data on stdin to Excel format ##################################################################### def QC2Excel(config): priorities = {} studydir = config.get('studydir') if studydir == None: return 0 centers_filter = config.get('centers') plates_filter = config.get('plates') visits_filter = config.get('visits') outstanding_only = config.get('outstanding') simplify = config.get('simplify') external = config.get('external') percent = config.get('percent') sitemode = config.get('sitemode') timestamps = config.get('timestamps') xlsx = config.get('xlsx') if xlsx is None: xlsx = 'qc.xlsx' priority_file = config.get('priority-file') if priority_file: priorities = load_priorities(studydir, priority_file) color_by_priority = config.get('color_by_priority') if simplify: status_labels = [ 'Pending', 'Outstanding', 'Resolved'] problem_labels = ['Missing Value', 'Illegal Value', 'Inconsistent Value', 'Illegible Value', 'Fax Noise', 'Other', 'Missing Page', 'Overdue Assessment'] else: status_labels = [ 'Pending', 'Outstanding(New)', 'Outstanding(New, in report not sent)', 'Resolved N/A', 'Resolved Irrelevant', 'Resolved Corrected', 'Outstanding(New, in report sent)'] problem_labels = ['Missing Value', 'Illegal Value', 'Inconsistent Value', 'Illegible Value', 'Fax Noise', 'Other', 'Missing Page', 'Overdue Assessment', 'EC Missing Page'] agebin_labels = ['0-30 days', '31-60 days', '61-90 days', '91-120 days', '121-150 days', '151-180 days', '>180 days' ] priority_labels = [ 1, 2, 3, 4, 5 ] status_count = [ 0, 0, 0, 0, 0, 0, 0 ] problem_count = [ 0, 0, 0, 0, 0, 0, 0, 0, 0 ] agebin_count = [ 0, 0, 0, 0, 0, 0, 0 ] priority_count = [ 0, 0, 0, 0, 0 ] ##################################################################### # Load Study information ##################################################################### study = datafax.Study() print('Loading Study Configuration Files...') study.loadFromFiles(studydir) ##################################################################### # Create spreadsheet and formatting information ##################################################################### print('Creating Spreadsheet...') email = config.get('email') if email: output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output, {'in_memory': True}) else: workbook = xlsxwriter.Workbook(xlsx) title_format = workbook.add_format( { 'font_color': 'white', 'bg_color': '#4f81bd', 'font_size': 36, 'align': 'center', 'valign': 'vcenter', 'text_wrap': True, 'border': 1 }) header_format = workbook.add_format( { 'bold': True, 'font_color': 'white', 'bg_color': '#244062', 'align': 'center', 'valign': 'vcenter', 'border': 1 }) category_format = workbook.add_format( { 'font_color': 'white', 'bg_color': '#4f81bd', 'align': 'center', 'valign': 'vcenter', 'text_wrap': True, 'border': 1 }) percent_format = workbook.add_format( { 'font_color': 'black', 'bg_color': 'white', 'align': 'center', 'valign': 'vcenter', 'text_wrap': True, 'num_format': '0.0%', 'border': 1 }) normal_format = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'text_wrap': True, 'num_format': '0', 'border': 1 }) normal_format_str = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'text_wrap': True, 'num_format': '@', 'border': 1 }) normal_format_date = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'text_wrap': True, 'num_format': 'yyyy-mm-dd hh:mm:ss', 'border': 1 }) normal_format_red = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#9c0006', 'bg_color': '#ffc7ce', 'text_wrap': True, 'num_format': '0', 'border': 1 }) normal_format_str_red = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#9c0006', 'bg_color': '#ffc7ce', 'text_wrap': True, 'num_format': '@', 'border': 1 }) normal_format_date_red = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#9c0006', 'bg_color': '#ffc7ce', 'text_wrap': True, 'num_format': 'yyyy-mm-dd hh:mm:ss', 'border': 1 }) normal_format_orange = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#3f3f76', 'bg_color': '#ffcc99', 'text_wrap': True, 'num_format': '0', 'border': 1 }) normal_format_str_orange = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#3f3f76', 'bg_color': '#ffcc99', 'text_wrap': True, 'num_format': '@', 'border': 1 }) normal_format_date_orange = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#3f3f76', 'bg_color': '#ffcc99', 'text_wrap': True, 'num_format': 'yyyy-mm-dd hh:mm:ss', 'border': 1 }) normal_format_yellow = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#9c6500', 'bg_color': '#ffeb9c', 'text_wrap': True, 'num_format': '0', 'border': 1 }) normal_format_str_yellow = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#9c6500', 'bg_color': '#ffeb9c', 'text_wrap': True, 'num_format': '@', 'border': 1 }) normal_format_date_yellow = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#9c6500', 'bg_color': '#ffeb9c', 'text_wrap': True, 'num_format': 'yyyy-mm-dd hh:mm:ss', 'border': 1 }) normal_format_green = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#006100', 'bg_color': '#c6efce', 'text_wrap': True, 'num_format': '0', 'border': 1 }) normal_format_str_green = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#006100', 'bg_color': '#c6efce', 'text_wrap': True, 'num_format': '@', 'border': 1 }) normal_format_date_green = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'font_color': '#006100', 'bg_color': '#c6efce', 'text_wrap': True, 'num_format': 'yyyy-mm-dd hh:mm:ss', 'border': 1 }) shaded_format = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'text_wrap': True, 'num_format': '0', 'border': 1 }) shaded_format_str = workbook.add_format( { 'align': 'center', 'valign': 'vcenter', 'text_wrap': True, 'num_format': '@', 'border': 1 }) sheet = workbook.add_worksheet('QCs') ##################################################################### # Add title ##################################################################### sheet.set_row(0,75) sheet.merge_range(0,0,0,23, 'QC Report for {0}'.format(study.studyName()), title_format) sheet.merge_range(1,0,1,23, 'Generated on {0}'.format( datetime.date.today().isoformat()), header_format) ##################################################################### # Add space for charts ##################################################################### sheet.set_row(2,230) sheet.merge_range(2,0,2,23, '', normal_format) ##################################################################### # Add QC listing headers ##################################################################### row = 3 table_row = row #sheet.set_row(table_row, 36) hidden = {'hidden': 1} extra_distribute = 0 extra_width = 0 if config.get('region'): sheet.set_column( 0, 0, 10) # Region else: sheet.set_column( 0, 0, 10, options=hidden) # Region extra_distribute += 12 if config.get('country'): sheet.set_column( 1, 1, 10) # Country else: sheet.set_column( 1, 1, 10, options=hidden) # Country extra_distribute += 12 if priority_file: sheet.set_column(10,10, 10) # Priority else: sheet.set_column(10,10, 10, options=hidden) # Priority extra_distribute += 12 if sitemode: sheet.set_column( 5, 5, 10, options=hidden) # Visit sheet.set_column( 6, 6, 10, options=hidden) # Plate sheet.set_column( 9, 9, 10, options=hidden) # Field# sheet.set_column(11,11, 10, options=hidden) # Age extra_distribute += 50 # Gross up 25% bit else: sheet.set_column( 5, 5, 10) # Visit sheet.set_column( 6, 6, 10) # Plate sheet.set_column( 9, 9, 10) # Field# sheet.set_column(11,11, 10) # Age # Creation, modification, resolution user/timestamp if timestamps: sheet.set_column(18,18, 12) sheet.set_column(19,19, 20) sheet.set_column(20,20, 12) sheet.set_column(21,21, 20) sheet.set_column(22,22, 12) sheet.set_column(23,23, 20) extra_width += 96 else: sheet.set_column(18,18, 12, options=hidden) sheet.set_column(19,19, 20, options=hidden) sheet.set_column(20,20, 12, options=hidden) sheet.set_column(21,21, 20, options=hidden) sheet.set_column(22,22, 12, options=hidden) sheet.set_column(23,23, 20, options=hidden) sheet.set_column( 2, 2, 10) # Site sheet.set_column( 3, 3, 15) # Patient sheet.set_column( 4, 4, 25 + (extra_distribute/10)) # Assessment sheet.set_column( 7, 7, 25 + (extra_distribute/10)) # Page sheet.set_column( 8, 8, 25 + (extra_distribute/10)) # Field sheet.set_column(12,12, 10) # Age Bin sheet.set_column(13,13, 20) # Status sheet.set_column(14,14, 20) # Problem sheet.set_column(15,15, 20 + (extra_distribute/5)) # Value sheet.set_column(16,16, 20 + (extra_distribute/5)) # Query sheet.set_column(17,17, 20 + (extra_distribute/5)) # Reply row += 1 start_table_row = row ##################################################################### # Add QC listing ##################################################################### today = datetime.date.today() print('Populating Data Table...') #stdin = codecs.getreader("utf-8")(sys.stdin) countries = study.Countries() for qc in sys.stdin: qc = to_unicode(qc) qcf = qc.split('|') center_num = int(qcf[8]) visit_num = int(qcf[5]) plate_num = int(qcf[4]) plate = study.plate(plate_num) if plate == None: continue field = plate.fieldAt(int(qcf[7])+3) if field == None: continue fnum = field.number fname = field.description # If we have a plate/visit/center restrictions, enforce them now if centers_filter and not centers_filter.contains(center_num): continue if plates_filter and not plates_filter.contains(plate_num): continue if visits_filter and not visits_filter.contains(visit_num): continue # Check for external only if external and int(qcf[21]) == 2: continue status_code = int(qcf[0]) # We don't count deleted records if status_code > 6: continue # Check whether this QC is resolved is_resolved = status_code >= 3 and status_code <= 5 # If we're only interested in outstanding QCs if outstanding_only and is_resolved: continue if outstanding_only and sitemode and status_code == 0: continue problem_code = int(qcf[14]) # Extract creation, modification, resolution users/timestamps cr_user, cr_date = extractDate(qcf[18]) md_user, md_date = extractDate(qcf[19]) rs_user, rs_date = extractDate(qcf[20]) ###################################################### # Calculate Age of QC ###################################################### age = None agebin_str = None if not is_resolved and cr_date: age = (today-cr_date.date()).days agebin = int(age/30) if agebin > 6: agebin = 6 agebin_count[agebin] += 1 agebin_str = agebin_labels[agebin] ###################################################### # Priority ###################################################### priority = priorities.get((plate_num, fnum)) if priority is None: priority = 5 priority_count[priority-1] += 1 (format_number, format_string, format_date) = \ (normal_format, normal_format_str, normal_format_date) if color_by_priority and not is_resolved: if priority == 1: (format_number, format_string, format_date) = \ (normal_format_red, normal_format_str_red, normal_format_date_red) if priority == 2: (format_number, format_string, format_date) = \ (normal_format_orange, normal_format_str_orange, normal_format_date_orange) if priority == 3: (format_number, format_string, format_date) = \ (normal_format_yellow, normal_format_str_yellow, normal_format_date_yellow) if priority == 4: (format_number, format_string, format_date) = \ (normal_format_green, normal_format_str_green, normal_format_date_green) ###################################################### # Problem code ###################################################### if problem_code < 7: pname = problem_labels[problem_code-1] problem_count[problem_code-1] += 1 value = qcf[13] else: # If simplify, map EC Missing Page -> Missing Page if simplify and problem_code == 23: problem_code = 21 pname = problem_labels[problem_code-15] problem_count[problem_code-15] += 1 value = None fnum = None # Clear field number/name for output fname = None ###################################################### # If simplify, map Outstanding* to Outstanding, # Resolved* to Resolved ###################################################### if simplify and status_code != 0: if status_code in [1,2,6]: status_code = 1 else: status_code = 2 ###################################################### # Output a row ###################################################### sheet.write_string(row, 0, countries.region(center_num), format_string) sheet.write_string(row, 1, countries.country(center_num), format_string) sheet.write_number(row, 2, center_num, format_number) sheet.write_number(row, 3, int(qcf[6]), format_number) sheet.write_string(row, 4, study.visitLabel(int(qcf[5])), format_string) sheet.write_number(row, 5, visit_num, format_number) sheet.write_number(row, 6, plate_num, format_number) sheet.write(row, 7, study.pageLabel(visit_num, plate_num), format_string) sheet.write(row, 8, fname, format_string) sheet.write(row, 9, fnum, format_number) sheet.write(row, 10, priority, format_number) sheet.write(row, 11, age, format_number) sheet.write(row, 12, agebin_str, format_string) sheet.write_string(row, 13, status_labels[status_code], format_string) sheet.write_string(row, 14, pname, format_string) sheet.write(row, 15, value, format_string) sheet.write(row, 16, qcf[16], format_string) sheet.write(row, 17, qcf[11], format_string) sheet.write(row, 18, cr_user, format_string) sheet.write(row, 19, cr_date, format_date) sheet.write(row, 20, md_user, format_string) sheet.write(row, 21, md_date, format_date) sheet.write(row, 22, rs_user, format_string) sheet.write(row, 23, rs_date, format_date) status_count[status_code] += 1 row += 1 end_table_row = row qc_count = end_table_row - start_table_row # Make sure we have at least one row if qc_count == 0: end_table_row += 1 row += 1 sheet.merge_range(row,0,row,23, 'No Matching QC Records found', normal_format_str) row += 1 sheet.add_table(start_table_row-1, 0, end_table_row-1, 23, {'autofilter': True, 'first_column': True, 'name': 'QC_Details', 'columns': [ {'header': 'Region', 'header_format': header_format}, {'header': 'Country', 'header_format': header_format}, {'header': 'Site', 'header_format': header_format}, {'header': 'Patient', 'header_format': header_format}, {'header': 'Assessment', 'header_format': header_format}, {'header': 'Visit', 'header_format': header_format}, {'header': 'Plate', 'header_format': header_format}, {'header': 'Page', 'header_format': header_format}, {'header': 'Field', 'header_format': header_format}, {'header': 'Fld #', 'header_format': header_format}, {'header': 'Priority', 'header_format': header_format}, {'header': 'Days', 'header_format': header_format}, {'header': 'Age', 'header_format': header_format}, {'header': 'Status', 'header_format': header_format}, {'header': 'Problem', 'header_format': header_format}, {'header': 'Value', 'header_format': header_format}, {'header': 'Query', 'header_format': header_format}, {'header': 'Reply', 'header_format': header_format}, {'header': 'Creator', 'header_format': header_format}, {'header': 'Created', 'header_format': header_format}, {'header': 'Modifier', 'header_format': header_format}, {'header': 'Modified', 'header_format': header_format}, {'header': 'Resolver', 'header_format': header_format}, {'header': 'Resolved', 'header_format': header_format} ]}) ##################################################################### # Add data for charts ##################################################################### print('Writing Charts...') row += 1 # Totals sheet.merge_range(row,2,row,4, 'Total', header_format) row += 1 sheet.write_formula(row, 3, '=SUBTOTAL(3,C{0}:C{1})'.format( start_table_row+1, end_table_row), shaded_format, end_table_row-start_table_row) sheet.write_string(row, 4, 'Selected Records', category_format) row +=2 ##################################################################### # Add charts ##################################################################### charts = [ { 'name': 'Status', 'column': 'N', 'labels': status_labels, 'counts': status_count }, { 'name': 'Problems', 'column': 'O', 'labels': problem_labels, 'counts': problem_count }, { 'name': 'Age', 'column': 'M', 'labels': agebin_labels, 'counts': agebin_count }, ] if priority_file: charts.append({ 'name': 'Priority', 'column': 'K', 'labels': priority_labels, 'counts': priority_count }) chart_width = ((extra_width*8) + 2000)/len(charts) chart_x_offset = 5 for chart in charts: sheet.merge_range(row,2,row,4, chart['name'], header_format) row += 1 chart_start_row = row chart['start_row'] = chart_start_row sheet.write_column(row, 4, chart['labels'], category_format) column = chart['column'] counts = chart['counts'] for label in chart['labels']: l = label if type(label) is not int: l = '"'+label+'"' sheet.write_formula(row, 3, '=SUMPRODUCT(SUBTOTAL(3,OFFSET({0}{1}:{0}{2},ROW({0}{1}:{0}{2})-MIN(ROW({0}{1}:{0}{2})),,1)), --({0}{1}:{0}{2}={3}))'.format(column, start_table_row+1, end_table_row, l), shaded_format, counts[row-chart_start_row]) if end_table_row - start_table_row == 0: value = 0 else: value = counts[row-chart_start_row]/(end_table_row-start_table_row) sheet.write_formula(row, 2, '=IFERROR(D{0}/SUBTOTAL(3,D{1}:D{2}), 0)'.format( row+1,start_table_row+1, end_table_row), percent_format, value) row +=1 chart_gap = 1000//len(chart['labels']) if percent: data_column = 'C' else: data_column = 'D' ##################################################################### # Add chart to worksheet ##################################################################### excel_chart = workbook.add_chart({'type': 'bar'}) excel_chart.add_series({ 'values': '=QCs!${0}${1}:${0}${2}'.format(data_column, chart_start_row+1, chart_start_row+len(chart['labels'])), 'categories': '=QCs!$E${0}:$E${1}'.format(chart_start_row+1, chart_start_row+len(chart['labels'])), 'data_labels': {'value': True}, 'gap': chart_gap}) excel_chart.set_title({'name': chart['name']}) excel_chart.set_legend({'none': True}) excel_chart.set_size({'width': chart_width, 'x_offset': chart_x_offset, 'y_offset': 5}) excel_chart.set_chartarea({'border': {'none': True}}) sheet.insert_chart('A3', excel_chart) chart_x_offset += chart_width row += 1 ##################################################################### # Add print formatting setup ##################################################################### sheet.set_header("&LQC Report&C{0}&R&P of &N".format(study.studyName().replace("&", "&&"))) sheet.set_landscape() sheet.set_paper(5) sheet.fit_to_pages(1,0) sheet.repeat_rows(table_row) ##################################################################### # Protect sheet from changes and save ##################################################################### sheet.hide_gridlines(2) sheet.protect('', { 'autofilter': True, 'sort': True, 'select_locked_cells': True, 'select_unlocked_cells': True }) sheet.set_zoom(90) if xlsx and not email: print('Saving Spreadsheet to', xlsx, '...') workbook.close() if row >= 1048567: print('************************************************************') print('* WARNING: WORKSHEET HAS MORE THAN 1,048,567 ROWS WHICH') print('* EXCEEDS EXCEL LIMIT. EXCEL WILL NOT RENDER') print('* THIS SPREADSHEET CORRECTLY.') print('************************************************************') sys.exit(2) if email: emailfrom = config.get('emailfrom') if emailfrom is None: emailfrom = 'PHRI.donotreply@phri.ca' output.seek(0, os.SEEK_END) if output.tell() > 10*1024*1024: print('Excel file too large to email') return 2 print('Emailing spreadsheet to:', email, '...') output.seek(0) msg = MIMEMultipart() msg['Subject'] = 'QC Report for {0}'.format(study.studyName()) msg['From'] = emailfrom msg['To'] = email msg['Date'] = formatdate(localtime = True) msg.attach(MIMEText('QC Report for {0} is attached.\n'.format(study.studyName()))) part = MIMEBase('application', 'vnd.openxmlformats-officedocument.spreadsheetml.sheet') part.set_payload(output.read()) encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="QC.xlsx"') msg.attach(part) # smtp = smtplib.SMTP('localhost', 25) # try: # smtp.sendmail('PHRI.donotreply@phri.ca', email, msg.as_string()) # except: # print '*** EMAIL Could not be sent ***' # smtp.quit() sendmail = os.popen('/usr/sbin/sendmail -t', 'w') sendmail.write(msg.as_string()) status = sendmail.close() if status is not None and status >> 8: print('*** EMAIL could not be sent ***', status) return 2 return 1 if qc_count == 0 else 0 ##################################################################### # Convert data on stdin to Excel format ##################################################################### def main(): try: opts, args = getopt.getopt(sys.argv[1:], "s:c:p:v:o", [ 'studydir=', 'centers=', 'plates=', 'visits=', 'outstanding', 'simplify', 'external', 'percent', 'site-mode', 'email=', 'email-to=', 'email-from=', 'xlsx=', 'include-country', 'include-region', 'priority-file=', 'color-by-priority', 'creation-date', 'timestamps', 'help', 'version']) except getopt.GetoptError, err: print(str(err)) sys.exit(2) config = {} for o, a in opts: if o in ("-s", "--studydir"): if config.get('studydir'): print('Study directory previously specified, skipping') else: config['studydir'] = a if o in ("-c", "--centers"): rl = datafax.rangelist.RangeList(0, 21460) rl.fromString(a) config['centers'] = rl if o in ("-p", "--plates"): rl = datafax.rangelist.RangeList(0, 500) rl.fromString(a) config['plates'] = rl if o in ("-v", "--visits"): rl = datafax.rangelist.RangeList(0, 65535) rl.fromString(a) config['visits'] = rl if o in ("-o", "--outstanding"): config['outstanding'] = True if o == "--simplify": config['simplify'] = True if o == "--external": config['external'] = True if o == "--percent": config['percent'] = True if o == "--site-mode": config['sitemode'] = True if o == "--include-country": config['country'] = True if o == "--include-region": config['region'] = True if o == "--priority-file": config['priority-file'] = a if o == "--color-by-priority": config['color_by_priority'] = True if o in ("--creation-date", "--timestamps"): config['timestamps'] = True if o in ("--email", "--email-to"): if config.get('email'): print('Email address previously specified, skipping') else: config['email'] = a if o == "--email-from": config['emailfrom'] = a if o == "--xlsx": if config.get('xlsx'): print('Output Excel previously specified, skipping') elif config.get('email'): print('Email previously selected, output Excel file name ignored') else: config['xlsx'] = a if o == "--help": print('QC2Excel Options') print('--centers range Limit output to specified centers') print('--plates range Limit output to specified plates') print('--visits range Limit output to specified visits') print('--outstanding Limit output to unresolved QCs') print('--simplify Simplify QC states to pending/outstanding/resolved') print(' and group Missing Page QCs (EC and QCupdate)') print('--external Don\'t include internal QC notes') print('--percent Show percentages in charts instead of counts') print('--site-mode Simply for sites. Hides visit, plate, field') print(' and age columns. If --outstanding') print(' option also given, skip QCs in pending state') print(' as those have been dealt with by site.') print('--include-country Include country column based on DFcountries file') print('--include-region Include region column based on DFcountries file') print('--priority-file name Use file called name for field priority levels') print('--color-by-priority Color the rows based on priority.') print(' 1=red, 2=orange, 3=yellow, 4=green, 5=blue') print('--timestamps Show creation/modification/resolution user and timestamps') print('--email-to addr Sets the email address to send report to.') print('--email-from addr Sets the email address report will appear to come from.') print('--version Print version number and exit') # Flush stdin if it is coming from a file or pipe if not sys.stdin.isatty(): for line in sys.stdin: pass sys.exit(0) if o == "--version": print(datafax.__version__) # Flush stdin if it is coming from a file or pipe if not sys.stdin.isatty(): for line in sys.stdin: pass sys.exit(0) if not config.get('studydir'): print('--studydir not specified') sys.exit(2) if len(args): print('unexpected extraneous arguments found:', ' '.join(args)) # Flush stdin if it is coming from a file or pipe if not sys.stdin.isatty(): for line in sys.stdin: pass sys.exit(2) ret = QC2Excel(config) sys.exit(ret) if __name__ == "__main__": main()
mrenters/DFtoolkit
dfpython/qc2excel.py
Python
gpl-3.0
35,931
[ "VisIt" ]
ca45f69aabece36bc363608884e9379dd38575c8850f56a6fcf908072ff0c228
""" Unit tests for pipelines """ import logging import sys import unittest import numpy from astropy import units as u from astropy.coordinates import SkyCoord from data_models.polarisation import PolarisationFrame from workflows.serial.imaging.imaging_serial import zero_list_serial_workflow, \ predict_list_serial_workflow, invert_list_serial_workflow, subtract_list_serial_workflow, \ weight_list_serial_workflow, residual_list_serial_workflow from wrappers.serial.image.operations import export_image_to_fits, smooth_image, qa_image from wrappers.serial.imaging.base import predict_skycomponent_visibility from wrappers.serial.skycomponent.operations import find_skycomponents, find_nearest_skycomponent, \ insert_skycomponent from wrappers.serial.griddata.convolution_functions import apply_bounding_box_convolutionfunction from wrappers.serial.griddata.kernels import create_awterm_convolutionfunction from wrappers.serial.simulation.testing_support import ingest_unittest_visibility, \ create_unittest_model, insert_unittest_errors, create_unittest_components from processing_components.simulation.configurations import create_named_configuration log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) log.addHandler(logging.StreamHandler(sys.stdout)) log.addHandler(logging.StreamHandler(sys.stderr)) class TestImaging(unittest.TestCase): def setUp(self): from data_models.parameters import arl_path self.dir = arl_path('test_results') self.persist = False def tearDown(self): pass def actualSetUp(self, add_errors=False, freqwin=3, block=False, dospectral=True, dopol=False, zerow=False, makegcfcf=False): self.npixel = 256 self.low = create_named_configuration('LOWBD2', rmax=750.0) self.freqwin = freqwin self.vis_list = list() self.ntimes = 5 self.cellsize = 0.0005 # Choose the interval so that the maximum change in w is smallish integration_time = numpy.pi * (24 / (12 * 60)) self.times = numpy.linspace(-integration_time * (self.ntimes // 2), integration_time * (self.ntimes // 2), self.ntimes) if freqwin > 1: self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin) self.channelwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]]) else: self.frequency = numpy.array([1.0e8]) self.channelwidth = numpy.array([4e7]) if dopol: self.vis_pol = PolarisationFrame('linear') self.image_pol = PolarisationFrame('stokesIQUV') f = numpy.array([100.0, 20.0, -10.0, 1.0]) else: self.vis_pol = PolarisationFrame('stokesI') self.image_pol = PolarisationFrame('stokesI') f = numpy.array([100.0]) if dospectral: flux = numpy.array([f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency]) else: flux = numpy.array([f]) self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') self.vis_list = [ingest_unittest_visibility(self.low, [self.frequency[freqwin]], [self.channelwidth[freqwin]], self.times, self.vis_pol, self.phasecentre, block=block, zerow=zerow) for freqwin, _ in enumerate(self.frequency)] self.model_list = [create_unittest_model(self.vis_list[freqwin], self.image_pol, cellsize=self.cellsize, npixel=self.npixel) for freqwin, _ in enumerate(self.frequency)] self.components_list = [create_unittest_components(self.model_list[freqwin], flux[freqwin, :][numpy.newaxis, :], single=True) for freqwin, _ in enumerate(self.frequency)] self.model_list = [insert_skycomponent(self.model_list[freqwin], self.components_list[freqwin]) for freqwin, _ in enumerate(self.frequency)] self.vis_list = [predict_skycomponent_visibility(self.vis_list[freqwin], self.components_list[freqwin]) for freqwin, _ in enumerate(self.frequency)] centre = self.freqwin // 2 # Calculate the model convolved with a Gaussian. self.model = self.model_list[centre] self.cmodel = smooth_image(self.model) if self.persist: export_image_to_fits(self.model, '%s/test_imaging_model.fits' % self.dir) if self.persist: export_image_to_fits(self.cmodel, '%s/test_imaging_cmodel.fits' % self.dir) if add_errors and block: self.vis_list = [insert_unittest_errors(self.vis_list[i]) for i, _ in enumerate(self.frequency)] self.components = self.components_list[centre] if makegcfcf: self.gcfcf = [create_awterm_convolutionfunction(self.model, nw=61, wstep=16.0, oversampling=8, support=64, use_aaf=True)] self.gcfcf_clipped = [(self.gcfcf[0][0], apply_bounding_box_convolutionfunction(self.gcfcf[0][1], fractional_level=1e-3))] self.gcfcf_joint = [create_awterm_convolutionfunction(self.model, nw=11, wstep=16.0, oversampling=8, support=64, use_aaf=True)] else: self.gcfcf = None self.gcfcf_clipped = None self.gcfcf_joint = None def test_time_setup(self): self.actualSetUp() def _checkcomponents(self, dirty, fluxthreshold=0.6, positionthreshold=1.0): comps = find_skycomponents(dirty, fwhm=1.0, threshold=10 * fluxthreshold, npixels=5) assert len(comps) == len(self.components), "Different number of components found: original %d, recovered %d" % \ (len(self.components), len(comps)) cellsize = abs(dirty.wcs.wcs.cdelt[0]) for comp in comps: # Check for agreement in direction ocomp, separation = find_nearest_skycomponent(comp.direction, self.components) assert separation / cellsize < positionthreshold, "Component differs in position %.3f pixels" % \ separation / cellsize def _predict_base(self, context='2d', extra='', fluxthreshold=1.0, facets=1, vis_slices=1, gcfcf=None, **kwargs): centre = self.freqwin // 2 vis_list = zero_list_serial_workflow(self.vis_list) vis_list = predict_list_serial_workflow(vis_list, self.model_list, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) vis_list = subtract_list_serial_workflow(self.vis_list, vis_list) dirty = invert_list_serial_workflow(vis_list, self.model_list, context=context, dopsf=False, gcfcf=gcfcf, normalize=True, vis_slices=vis_slices)[centre] assert numpy.max(numpy.abs(dirty[0].data)), "Residual image is empty" if self.persist: export_image_to_fits(dirty[0], '%s/test_imaging_predict_%s%s_serial_dirty.fits' % (self.dir, context, extra)) maxabs = numpy.max(numpy.abs(dirty[0].data)) assert maxabs < fluxthreshold, "Error %.3f greater than fluxthreshold %.3f " % (maxabs, fluxthreshold) def _invert_base(self, context, extra='', fluxthreshold=1.0, positionthreshold=1.0, check_components=True, facets=1, vis_slices=1, gcfcf=None, **kwargs): centre = self.freqwin // 2 dirty = invert_list_serial_workflow(self.vis_list, self.model_list, context=context, dopsf=False, normalize=True, facets=facets, vis_slices=vis_slices, gcfcf=gcfcf, **kwargs)[centre] if self.persist: export_image_to_fits(dirty[0], '%s/test_imaging_invert_%s%s_serial_dirty.fits' % (self.dir, context, extra)) assert numpy.max(numpy.abs(dirty[0].data)), "Image is empty" if check_components: self._checkcomponents(dirty[0], fluxthreshold, positionthreshold) def test_predict_2d(self): self.actualSetUp(zerow=True) self._predict_base(context='2d') @unittest.skip("Facets need overlap") def test_predict_facets(self): self.actualSetUp() self._predict_base(context='facets', fluxthreshold=17.0, facets=4) @unittest.skip("Timeslice predict needs better interpolation and facets need overlap") def test_predict_facets_timeslice(self): self.actualSetUp() self._predict_base(context='facets_timeslice', fluxthreshold=19.0, facets=8, vis_slices=self.ntimes) @unittest.skip("Facets need overlap") def test_predict_facets_wprojection(self, makegcfcf=True): self.actualSetUp() self._predict_base(context='facets', extra='_wprojection', facets=8, fluxthreshold=15.0, gcfcf=self.gcfcf_joint) @unittest.skip("Facets need overlap") def test_predict_facets_wstack(self): self.actualSetUp() self._predict_base(context='facets_wstack', fluxthreshold=15.0, facets=8, vis_slices=101) def test_predict_timeslice(self): self.actualSetUp() self._predict_base(context='timeslice', fluxthreshold=3.0, vis_slices=self.ntimes) def test_predict_wsnapshots(self): self.actualSetUp(makegcfcf=True) self._predict_base(context='wsnapshots', fluxthreshold=3.0, vis_slices=self.ntimes // 2, gcfcf=self.gcfcf_joint) def test_predict_wprojection(self): self.actualSetUp(makegcfcf=True) self._predict_base(context='2d', extra='_wprojection', fluxthreshold=1.0, gcfcf=self.gcfcf) def test_predict_wprojection_clip(self): self.actualSetUp(makegcfcf=True) self._predict_base(context='2d', extra='_wprojection_clipped', fluxthreshold=1.0, gcfcf=self.gcfcf_clipped) def test_predict_wstack(self): self.actualSetUp() self._predict_base(context='wstack', fluxthreshold=1.0, vis_slices=101) def test_predict_wstack_serial(self): self.actualSetUp() self._predict_base(context='wstack', fluxthreshold=1.0, vis_slices=101, use_serial_predict=True) def test_predict_wstack_wprojection(self): self.actualSetUp(makegcfcf=True) self._predict_base(context='wstack', extra='_wprojection', fluxthreshold=1.0, vis_slices=11, gcfcf=self.gcfcf_joint) def test_predict_wstack_spectral(self): self.actualSetUp(dospectral=True) self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=101) def test_predict_wstack_spectral_pol(self): self.actualSetUp(dospectral=True, dopol=True) self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=101) def test_invert_2d(self): self.actualSetUp(zerow=True) self._invert_base(context='2d', positionthreshold=2.0, check_components=False) def test_invert_2d_uniform(self): self.actualSetUp(zerow=True, makegcfcf=True) self.vis_list = weight_list_serial_workflow(self.vis_list, self.model_list, gcfcf=self.gcfcf, weighting='uniform') self._invert_base(context='2d', extra='_uniform', positionthreshold=2.0, check_components=False) @unittest.skip("Facets need overlap") def test_invert_facets(self): self.actualSetUp() self._invert_base(context='facets', positionthreshold=2.0, check_components=True, facets=8) @unittest.skip("Facets need overlap") def test_invert_facets_timeslice(self): self.actualSetUp() self._invert_base(context='facets_timeslice', check_components=True, vis_slices=self.ntimes, positionthreshold=5.0, flux_threshold=1.0, facets=8) @unittest.skip("Facets need overlap") def test_invert_facets_wprojection(self): self.actualSetUp(makegcfcf=True) self._invert_base(context='facets', extra='_wprojection', check_components=True, positionthreshold=2.0, facets=4, gcfcf=self.gcfcf) @unittest.skip("Facets need overlap") def test_invert_facets_wstack(self): self.actualSetUp() self._invert_base(context='facets_wstack', positionthreshold=1.0, check_components=False, facets=4, vis_slices=101) def test_invert_timeslice(self): self.actualSetUp() self._invert_base(context='timeslice', positionthreshold=1.0, check_components=True, vis_slices=self.ntimes) def test_invert_wsnapshots(self): self.actualSetUp(makegcfcf=True) self._invert_base(context='wsnapshots', positionthreshold=1.0, check_components=True, vis_slices=self.ntimes // 2, gcfcf=self.gcfcf_joint) def test_invert_wprojection(self): self.actualSetUp(makegcfcf=True) self._invert_base(context='2d', extra='_wprojection', positionthreshold=2.0, gcfcf=self.gcfcf) def test_invert_wprojection_clip(self): self.actualSetUp(makegcfcf=True) self._invert_base(context='2d', extra='_wprojection_clipped', positionthreshold=2.0, gcfcf=self.gcfcf_clipped) def test_invert_wprojection_wstack(self): self.actualSetUp(makegcfcf=True) self._invert_base(context='wstack', extra='_wprojection', positionthreshold=1.0, vis_slices=11, gcfcf=self.gcfcf_joint) def test_invert_wstack(self): self.actualSetUp() self._invert_base(context='wstack', positionthreshold=1.0, vis_slices=101) def test_invert_wstack_spectral(self): self.actualSetUp(dospectral=True) self._invert_base(context='wstack', extra='_spectral', positionthreshold=2.0, vis_slices=101) def test_invert_wstack_spectral_pol(self): self.actualSetUp(dospectral=True, dopol=True) self._invert_base(context='wstack', extra='_spectral_pol', positionthreshold=2.0, vis_slices=101) def test_zero_list(self): self.actualSetUp() centre = self.freqwin // 2 vis_list = zero_list_serial_workflow(self.vis_list) assert numpy.max(numpy.abs(vis_list[centre].vis)) < 1e-15, numpy.max(numpy.abs(vis_list[centre].vis)) predicted_vis_list = [predict_skycomponent_visibility(vis_list[freqwin], self.components_list[freqwin]) for freqwin, _ in enumerate(self.frequency)] assert numpy.max(numpy.abs(predicted_vis_list[centre].vis)) > 0.0, \ numpy.max(numpy.abs(predicted_vis_list[centre].vis)) diff_vis_list = subtract_list_serial_workflow(self.vis_list, predicted_vis_list) assert numpy.max(numpy.abs(diff_vis_list[centre].vis)) < 1e-15, numpy.max(numpy.abs(diff_vis_list[centre].vis)) def test_residual_list(self): self.actualSetUp(zerow=True) centre = self.freqwin // 2 residual_image_list = residual_list_serial_workflow(self.vis_list, self.model_list, context='2d') qa = qa_image(residual_image_list[centre][0]) assert numpy.abs(qa.data['max'] - 0.35139716991480785) < 1.0, str(qa) assert numpy.abs(qa.data['min'] + 0.7681701460717593) < 1.0, str(qa) if __name__ == '__main__': unittest.main()
SKA-ScienceDataProcessor/algorithm-reference-library
tests/workflows/test_imaging_serial.py
Python
apache-2.0
17,056
[ "Gaussian" ]
1ffa9f4b831401feaff54d7ee60bb65bcdf5d3c7b7f742dcb34ee666d47552eb
import os import os.path import inspect import numpy as np from astropy.io import fits import yaml import galsim import bashes class Observation(object): """ Represents a GREAT3 observation specified by a branch, index (0-199) and epoch. """ def __init__(self,branch,index,epoch): """ Initializes an observation using a branch path of the form 'control/ground/constant' that should be present under $GREAT3_ROOT (and also under $GREAT3_ROOT/truth if truth info is required), together with an image index (0-199) and an epoch number. Raises a RuntimeError if any problems are detected. After initialization, the following attributes are defined: nFields, nSubfieldsPerField, nEpochs, pixelScale, stampSize. """ # Lookup the GREAT3 filesystem root. if 'GREAT3_ROOT' not in os.environ: raise RuntimeError('$GREAT3_ROOT is not set.') g3root = os.environ['GREAT3_ROOT'] # Check for a valid branch path. pathNames = branch.split('/') if (len(pathNames) != 3 or pathNames[0] not in ('control','real_galaxy','variable_psf','multiepoch','full') or pathNames[1] not in ('ground','space') or pathNames[2] not in ('constant','variable')): raise RuntimeError('Invalid branch path: %r' % branch) # Lookup the path to this observation's branch. self.branchPath = os.path.join(g3root,branch) if not os.path.isdir(self.branchPath): raise RuntimeError('No such branch path: %r' % self.branchPath) # Do we have truth info available? self.truthPath = os.path.join(g3root,'truth',branch) if not os.path.isdir(self.truthPath): self.truthPath = None # Specify this branch's parameters. if pathNames[0] in ('variable_psf','full') or pathNames[2] == 'variable': self.nFields = 10 self.nSubfieldsPerField = 20 else: self.nFields = 200 self.nSubfieldsPerField = 1 if pathNames[0] in ('multiepoch','full'): self.nEpochs = 6 else: self.nEpochs = 1 if pathNames[1] == 'space': if self.nEpochs == 1: self.pixelScale = 0.05 self.stampSize = 96 else: self.pixelScale = 0.1 self.stampSize = 48 else: self.pixelScale = 0.2 self.stampSize = 48 # Check the index and epoch parameters. try: self.index = int(index) assert self.index >= 0 and self.index < 200 except (ValueError,AssertionError): raise RuntimeError('Invalid branch index: %r' % index) try: self.epoch = int(epoch) assert self.epoch >= 0 and self.epoch < self.nEpochs except (ValueError,AssertionError): raise RuntimeError('Invalid branch epoch index: %r' % epoch) # Our galaxy and star images are loaded on demand. self.image = None self.stars = None # Our truth params and catalog are loaded on demand. self.truthParams = None self.truthCatalog = None @staticmethod def addArgs(parser): """ Add arguments to the provided command-line parser that support the fromArgs() method. """ parser.add_argument('--branch', type = str, default = 'control/ground/constant', help = 'Name of branch to use relative to $GREAT3_ROOT') parser.add_argument('--index', type = int, default = 0, help = 'Index of field to analyze (0-199)') parser.add_argument('--epoch', type = int, default = 0, help = 'Epoch number to analyze') @staticmethod def fromArgs(args): """ Returns a dictionary of constructor parameter values based on the parsed args provided. """ # Look up the named Estimator constructor parameters. pnames = (inspect.getargspec(Observation.__init__)).args[1:] # Get a dictionary of the arguments provided. argsDict = vars(args) # Return a dictionary of constructor parameters provided in args. return { key:argsDict[key] for key in (set(pnames) & set(argsDict)) } @classmethod def getGSParams(cls): if not hasattr(cls,'GSParams'): cls.GSParams = galsim.GSParams(maximum_fft_size=2**16) return cls.GSParams def getImage(self): """ Returns the array of postage stamp image data for this observation and initializes our stampSize data member. """ if self.image is None: dataStampsPath = os.path.join(self.branchPath,'image-%03d-%d.fits' % ( self.index,self.epoch)) hduList = fits.open(dataStampsPath) dataStamps = hduList[0].data hduList.close() # Check for the expected image dimensions. assert dataStamps.shape[0] == dataStamps.shape[1], 'Image data is not square' assert dataStamps.shape[0] == 100*self.stampSize, 'Image has unexpected dimensions' self.image = bashes.tiled.Tiled(dataStamps,self.stampSize) self.image.scale = self.pixelScale return self.image def getStars(self): """ Returns the array of postage stamp starfield data for this observation. """ if self.stars is None: psfStampsPath = os.path.join(self.branchPath,'starfield_image-%03d-%d.fits' % ( self.index,self.epoch)) hduList = fits.open(psfStampsPath) psfStamps = hduList[0].data hduList.close() self.stars = bashes.tiled.Tiled(psfStamps,self.stampSize) self.stars.scale = self.pixelScale return self.stars def getTruthParams(self): """ Returns a dictionary of truth parameter values for this observation. """ if self.truthParams is None: # No cached value available, so fetch it now. if not self.truthPath: raise RuntimeError('No truth available for observation') truthParamsPath = os.path.join(self.truthPath,'epoch_parameters-%03d-%d.yaml' % ( self.index,self.epoch)) with open(truthParamsPath,'r') as f: self.truthParams = yaml.load(f) return self.truthParams def getTruthCatalog(self): """ Returns the truth catalog for this observation. """ if self.truthCatalog is None: # No cached value available, so fetch it now. if not self.truthPath: raise RuntimeError('No truth available for observation') truthCatalogPath = os.path.join(self.truthPath,'epoch_catalog-%03d-%d.fits' % ( self.index,self.epoch)) hduList = fits.open(truthCatalogPath) self.truthCatalog = hduList[1].data hduList.close() return self.truthCatalog def createSource(self,galaxyIndex,shifted = False,lensed = False): """ Returns a GalSim model of the source for the specified galaxy index with optional centroid shifts and weak lensing distortion. """ params = self.getTruthCatalog()[galaxyIndex] # Create the bulge component. bulge = galsim.Sersic(flux = params['bulge_flux'], half_light_radius = params['bulge_hlr'], n = params['bulge_n'], gsparams = Observation.getGSParams()) bulge.applyShear(q = params['bulge_q'], beta = params['bulge_beta_radians']*galsim.radians) # Is there a disk component? if params['disk_flux'] > 0: disk = galsim.Exponential(flux = params['disk_flux'], half_light_radius = params['disk_hlr'], gsparams = Observation.getGSParams()) disk.applyShear(q = params['disk_q'], beta = params['disk_beta_radians']*galsim.radians) source = galsim.Add(bulge,disk) else: source = bulge # Apply optional lensing. if lensed: source = source.lens(g1=params['g1'],g2=params['g2'],mu=params['mu']) # Apply optional centroid shift. if shifted: source = source.shift( dx=params['xshift']*self.pixelScale, dy=params['yshift']*self.pixelScale) return source def createPSF(self,galaxyIndex): """ Returns a GalSim model of the PSF for the specified galaxy index. """ catalog = self.getTruthCatalog() keys = catalog.columns.names params = catalog[galaxyIndex] # Create an empty list of models that will be convolved for the final PSF. models = [ ] # Add jitter contribution if provided. if 'opt_psf_jitter_sigma' in keys: jitterPSF = galsim.Gaussian(sigma=params['opt_psf_jitter_sigma']).shear( beta = params['opt_psf_jitter_beta']*galsim.degrees, e = params['opt_psf_jitter_e']) models.append(jitterPSF) # Add charge diffusion contribution if provided. if 'opt_psf_charge_sigma' in keys: chargePSF = galsim.Gaussian(sigma=params['opt_psf_charge_sigma']).shear( e1 = params['opt_psf_charge_e1'], e2 = 0.) models.append(chargePSF) # Create the optical component, which is always present. kmap = { 'opt_psf_lam_over_diam':'lam_over_diam', 'opt_psf_obscuration':'obscuration', 'opt_psf_n_struts':'nstruts', 'opt_psf_strut_angle':'strut_angle', 'opt_psf_pad_factor':'pad_factor', 'opt_psf_defocus':'defocus', 'opt_psf_astig1':'astig1', 'opt_psf_astig2':'astig2', 'opt_psf_coma1':'coma1', 'opt_psf_coma2':'coma2', 'opt_psf_trefoil1':'trefoil1', 'opt_psf_trefoil2':'trefoil2', 'opt_psf_spher':'spher'} opticalPSFParams = { kmap[key]:params[key] for key in kmap } # Add units for the strut angle. opticalPSFParams['strut_angle'] *= galsim.degrees # Suppress warnings. opticalPSFParams['suppress_warning'] = True # Build the optical PSF from the params dictionary. models.append(galsim.OpticalPSF(**opticalPSFParams)) # Add an atmospheric component if a FWHM value is provided. if 'atmos_psf_fwhm' in keys: atmosphericPSF = galsim.Kolmogorov(fwhm = params['atmos_psf_fwhm']).shear( beta = params['atmos_psf_beta']*galsim.degrees, e = params['atmos_psf_e']) models.append(atmosphericPSF) # Return the convolution of all PSF component models. return galsim.Convolve(models, gsparams = Observation.getGSParams()) def createObject(self,galaxyIndex,shifted = True,lensed = True): """ Returns a GalSim model of the object corresponding to the specified galaxy index, consisting of the source model with lensing distortion and centroid shift applied, and convolved with the appropriate PSF. """ # Look up the component models. src = self.createSource(galaxyIndex,shifted,lensed) psf = self.createPSF(galaxyIndex) # Return their convolution. return galsim.Convolve(src,psf) def renderObject(self,galaxyIndex,shifted = True,lensed = True,addNoise = True): """ Renders a postage stamp of the truth model for the specified galaxy index with optional noise (that will exactly match the noise used for GREAT3). """ obj = self.createObject(galaxyIndex) stamp = bashes.utility.render(obj,self.pixelScale,size = self.stampSize) if addNoise: params = self.getTruthParams() seed = params['noise_seed'] var = float(params['noise']['variance']) rng = galsim.BaseDeviate(seed = seed + galaxyIndex) noise = galsim.GaussianNoise(rng).withVariance(var) stamp.addNoise(noise) return stamp def main(): import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) Observation.addArgs(parser) parser.add_argument('--stamp', type = int, default = 0, help = 'index of stamp to use (0-99999)') parser.add_argument('--unlensed', action = 'store_true', help = 'do not include weak lensing effects') parser.add_argument('--test', action = 'store_true', help = 'test that stamp reconstructed from truth matches actual stamp') parser.add_argument('--ds9', action = 'store_true', help = 'display stamps in DS9') parser.add_argument('--save', type = str, default = None, help = 'save stamps to the specified FITS file') parser.add_argument('--truth', action = 'store_true', help = 'print catalog truth parameter values') args = parser.parse_args() # Initialize the requested observation. obs = Observation(**Observation.fromArgs(args)) # Dump catalog truth info for this stamp if requested. if args.truth: import pprint catalog = obs.getTruthCatalog() truth = zip(catalog.columns.names,catalog[args.stamp]) pprint.pprint(truth) # Render stamps if requested. if args.ds9 or args.save or args.test: lensed = not args.unlensed # Lookup the specified stamp's psf and source models. psfModel = obs.createPSF(args.stamp) srcModel = obs.createSource(args.stamp,shifted=True,lensed=lensed) # Render the PSF and source models separately. gsp = galsim.GSParams(maximum_fft_size = 2**16) psfStamp = bashes.utility.render(psfModel,obs.pixelScale,size=obs.stampSize) srcStamp = bashes.utility.render(srcModel,obs.pixelScale,size=obs.stampSize) # Render the combined object with and without noise. objStamp = obs.renderObject(args.stamp,shifted=True,lensed=lensed,addNoise=False) noiseStamp = obs.renderObject(args.stamp,shifted=True,lensed=lensed,addNoise=True) if args.test: dataStamp = obs.getImage().getStamp(args.stamp) if args.ds9: display = bashes.display.Display('cmap heat; scale sqrt') display.show(psfStamp) display.show(srcStamp) display.show(noiseStamp) if args.test: display.show(dataStamp,reuseLimits=True) display.show(objStamp,reuseLimits=True) if args.save: # Open this file using: ds9 -multiframe <filename> -zoom to fit -cmap heat stamps = [objStamp,psfStamp,srcStamp,noiseStamp] if args.test: stamps.append(dataStamp) galsim.fits.writeMulti(stamps, file_name = args.save) if args.test: delta = noiseStamp.array - dataStamp.array adiff = np.max(np.abs(delta)) nonzero = dataStamp.array != 0 rdiff = np.max(np.abs(delta[nonzero]/dataStamp.array[nonzero])) print 'Max difference between generated and saved stamps: %.3g (abs) %.3g (rel)' % ( adiff,rdiff) noiseVar = obs.getTruthParams()['noise']['variance'] print 'Std. deviation of differences / noise RMS = %.3g' % ( np.std(delta)/np.sqrt(noiseVar)) close = np.allclose(noiseStamp.array,dataStamp.array) print 'All pixels close?',close if not close: # Return a non-zero exit code to support scripting. return -1 if __name__ == "__main__": main()
deepzot/bashes
bashes/great3.py
Python
mit
15,737
[ "Galaxy", "Gaussian" ]
ee75b68cb033ebbadb2073ec46e8fd7f790a1aa19f9d4f6bd0b5f7ce1ec9d14d
# # ENVISIoN # # Copyright (c) 2017-2019 Daniel Thomas # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ############################################################################################## # Preparements for testing import os, sys, h5py import pytest # path to current directory TEST_DIR = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(TEST_DIR, os.pardir)) import envisionpy.hdf5parser ######################################################################################## # Test of a VASP-directory which is compatible with the DOS parser. # Path to the vasp directory PATH_TO_VASP_CALC = os.path.join(TEST_DIR, "resources/CuFeS2_band_CBT2") # Path to the resulting hdf5 file PATH_TO_HDF5 = os.path.join(TEST_DIR, "dos_demo.hdf5") def test_parse_DOS(): """Testing if correct DOS parsing of a VASP-directory. Parameters ---------- None Returns ------- None """ # Parse envisionpy.hdf5parser.dos(PATH_TO_HDF5, PATH_TO_VASP_CALC) envisionpy.hdf5parser.unitcell(PATH_TO_HDF5, PATH_TO_VASP_CALC) # Test if the generated HDF5-file contains correct information if os.path.isfile(PATH_TO_HDF5): with h5py.File(PATH_TO_HDF5, 'r') as h5: assert '/DOS' in h5 assert '/FermiEnergy' in h5 assert '/UnitCell' in h5 assert '/basis' in h5 assert '/incar' in h5 assert '/scaling_factor' in h5 # cleanup os.remove(PATH_TO_HDF5)
rartino/ENVISIoN
unit_testing/test_DOS_parsing.py
Python
bsd-2-clause
2,801
[ "VASP" ]
e1cf92ff2ed41fefcbea60216c80f5157385e1025168a64dc7e7acea6d78f8e7
# RPi Meteor Station # Copyright (C) 2015 Dario Zubovic # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Timings of compression algorithm with various cases. """ from __future__ import print_function, division, absolute_import from RMS.Compression import Compressor import RMS.ConfigReader as cr import numpy as np import time import sys config = cr.parse(".config") comp = Compressor(None, None, None, None, None, config) # IMAGE SIZE WIDTH = 1280 HEIGHT = 720 def timing(img): t = time.time() comp.compress(img) return time.time() - t def create(f): arr = np.empty((256, HEIGHT, WIDTH), np.uint8) for i in range(256): arr[i] = f() return arr def black(): return np.zeros((HEIGHT, WIDTH), np.uint8) def white(): return np.full((HEIGHT, WIDTH), 255, np.uint8) def uniform(): return np.random.uniform(0, 256, (HEIGHT, WIDTH)) def gauss(): return np.random.normal(128, 2, (HEIGHT, WIDTH)) def test(): func_list = [black, white, uniform, gauss] t = [0, 0, 0, 0] for i in range(4): arr = create(func_list[i]) timing(arr) # warmup for n in range(2): t[i] += timing(arr) print("Black:", t[0]/2) print("White:", t[1]/2) print("Uniform noise:", t[2]/2) print("Gaussian noise:", t[3]/2 ) if __name__ == "__main__": test()
CroatianMeteorNetwork/RMS
Tests/CompressionTimings.py
Python
gpl-3.0
1,976
[ "Gaussian" ]
68e9a358332d96f7d0e41449cdb6edc770b034ece3ea181640d95b7d98444410
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import collections import os.path from pymatgen.io.abinit.pseudos import Pseudo, PseudoTable from pymatgen.util.testing import PymatgenTest _test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "abinit") def ref_file(filename): return os.path.join(_test_dir, filename) def ref_files(*filenames): return list(map(ref_file, filenames)) class PseudoTestCase(PymatgenTest): def setUp(self): nc_pseudo_fnames = collections.defaultdict(list) nc_pseudo_fnames["Si"] = ref_files("14si.pspnc", "14si.4.hgh", "14-Si.LDA.fhi") self.nc_pseudos = collections.defaultdict(list) for symbol, fnames in nc_pseudo_fnames.items(): for fname in fnames: root, ext = os.path.splitext(fname) pseudo = Pseudo.from_file(fname) self.nc_pseudos[symbol].append(pseudo) # Save the pseudo as instance attribute whose name # is constructed with the rule: symbol_ppformat attr_name = symbol + "_" + ext[1:] if hasattr(self, attr_name): raise RuntimeError(f"self has already the attribute {attr_name}") setattr(self, attr_name, pseudo) def test_nc_pseudos(self): """Test norm-conserving pseudopotentials""" for symbol, pseudos in self.nc_pseudos.items(): for pseudo in pseudos: assert repr(pseudo) assert str(pseudo) self.assertTrue(pseudo.isnc) self.assertFalse(pseudo.ispaw) self.assertEqual(pseudo.Z, 14) self.assertEqual(pseudo.symbol, symbol) self.assertEqual(pseudo.Z_val, 4) self.assertGreaterEqual(pseudo.nlcc_radius, 0.0) # Test pickle self.serialize_with_pickle(pseudo, test_eq=False) # Test MSONable self.assertMSONable(pseudo) # HGH pseudos pseudo = self.Si_hgh self.assertFalse(pseudo.has_nlcc) self.assertEqual(pseudo.l_max, 1) self.assertEqual(pseudo.l_local, 0) assert not pseudo.supports_soc assert self.Si_hgh.md5 is not None assert self.Si_hgh == self.Si_hgh # TM pseudos pseudo = self.Si_pspnc self.assertTrue(pseudo.has_nlcc) self.assertEqual(pseudo.l_max, 2) self.assertEqual(pseudo.l_local, 2) assert not pseudo.supports_soc assert self.Si_hgh != self.Si_pspnc # FHI pseudos pseudo = self.Si_fhi self.assertFalse(pseudo.has_nlcc) self.assertEqual(pseudo.l_max, 3) self.assertEqual(pseudo.l_local, 2) assert not pseudo.supports_soc # Test PseudoTable. table = PseudoTable(self.nc_pseudos["Si"]) assert repr(table) assert str(table) self.assertTrue(table.allnc) self.assertTrue(not table.allpaw) self.assertFalse(not table.is_complete) assert len(table) == 3 assert len(table[14]) == 3 assert len(table.select_symbols("Si")) == 3 assert table.zlist == [14] # Test pickle self.serialize_with_pickle(table, test_eq=False) def test_pawxml_pseudos(self): """Test O.GGA_PBE-JTH-paw.xml.""" oxygen = Pseudo.from_file(ref_file("O.GGA_PBE-JTH-paw.xml")) assert repr(oxygen) assert str(oxygen) assert isinstance(oxygen.as_dict(), dict) self.assertTrue(oxygen.ispaw) self.assertTrue( oxygen.symbol == "O" and (oxygen.Z, oxygen.core, oxygen.valence) == (8, 2, 6), oxygen.Z_val == 6, ) assert oxygen.xc.type == "GGA" and oxygen.xc.name == "PBE" assert oxygen.supports_soc assert oxygen.md5 is not None self.assertAlmostEqual(oxygen.paw_radius, 1.4146523028) # Test pickle new_objs = self.serialize_with_pickle(oxygen, test_eq=False) # Test MSONable self.assertMSONable(oxygen) for o in new_objs: self.assertTrue(o.ispaw) self.assertTrue( o.symbol == "O" and (o.Z, o.core, o.valence) == (8, 2, 6), o.Z_val == 6, ) self.assertAlmostEqual(o.paw_radius, 1.4146523028) def test_oncvpsp_pseudo_sr(self): """ Test the ONCVPSP Ge pseudo (scalar relativistic version). """ ger = Pseudo.from_file(ref_file("ge.oncvpsp")) assert repr(ger) assert str(ger) assert isinstance(ger.as_dict(), dict) ger.as_tmpfile() self.assertTrue(ger.symbol == "Ge") self.assertEqual(ger.Z, 32.0) self.assertEqual(ger.Z_val, 4.0) self.assertTrue(ger.isnc) self.assertFalse(ger.ispaw) self.assertEqual(ger.l_max, 2) self.assertEqual(ger.l_local, 4) self.assertEqual(ger.rcore, None) assert not ger.supports_soc # Data persistence self.serialize_with_pickle(ger, test_eq=False) self.assertMSONable(ger) def test_oncvpsp_pseudo_fr(self): """ Test the ONCVPSP Pb pseudo (relativistic version with SO). """ pb = Pseudo.from_file(ref_file("Pb-d-3_r.psp8")) repr(pb) str(pb) # Data persistence self.serialize_with_pickle(pb, test_eq=False) self.assertMSONable(pb) self.assertTrue(pb.symbol == "Pb") self.assertEqual(pb.Z, 82.0) self.assertEqual(pb.Z_val, 14.0) self.assertTrue(pb.isnc) self.assertFalse(pb.ispaw) self.assertEqual(pb.l_max, 2) self.assertEqual(pb.l_local, 4) self.assertTrue(pb.supports_soc) class PseudoTableTest(PymatgenTest): def test_methods(self): """Test PseudoTable methods""" table = PseudoTable(ref_files("14si.pspnc", "14si.4.hgh", "14-Si.LDA.fhi")) assert str(table) assert len(table) == 3 for pseudo in table: assert pseudo.isnc assert table.allnc and not table.allpaw assert table.zlist == [14] # Data persistence self.serialize_with_pickle(table, test_eq=False) d = table.as_dict() PseudoTable.from_dict(d) self.assertMSONable(table) selected = table.select_symbols("Si") assert len(selected) == len(table) and selected.__class__ is table.__class__ with self.assertRaises(ValueError): table.pseudos_with_symbols("Si")
materialsproject/pymatgen
pymatgen/io/abinit/tests/test_pseudos.py
Python
mit
6,615
[ "ABINIT", "pymatgen" ]
4d411199b59c10791a2917b522a31f91cfe0228cc941e3d1b1b88bedf4d4488c
# Most of this code is: # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # The server command includes the additional header: # For discussion of daemonizing: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731 # Code taken also from QP: # http://www.mems-exchange.org/software/qp/ # From lib/site.py # Galaxy originally used PasteScript and PasteDeploy for application # loading, to maintain compatibility we've internalized some of that # code here, stripping out uneeded functionality. # All top level imports from each package moved here and organized import atexit import configparser import errno import grp import logging import optparse import os import pwd import re import resource import signal import socket import subprocess import sys import textwrap import threading import time from gettext import gettext as _ from logging.config import fileConfig from typing import Optional from .loadwsgi import loadapp, loadserver difflib = None # ---- from paste.script.bool_optparse -------------------------------- """ A subclass of ``optparse.OptionParser`` that allows boolean long options (like ``--verbose``) to also take arguments (like ``--verbose=true``). Arguments *must* use ``=``. """ class BoolOptionParser(optparse.OptionParser): def _process_long_opt(self, rargs, values): arg = rargs.pop(0) # Value explicitly attached to arg? Pretend it's the next # argument. if "=" in arg: (opt, next_arg) = arg.split("=", 1) rargs.insert(0, next_arg) had_explicit_value = True else: opt = arg had_explicit_value = False opt = self._match_long_opt(opt) option = self._long_opt[opt] if option.takes_value(): nargs = option.nargs if len(rargs) < nargs: if nargs == 1: self.error(_("%s option requires an argument") % opt) else: self.error(_("%s option requires %d arguments") % (opt, nargs)) elif nargs == 1: value = rargs.pop(0) else: value = tuple(rargs[0:nargs]) del rargs[0:nargs] elif had_explicit_value: value = rargs[0].lower().strip() del rargs[0:1] if value in ('true', 'yes', 'on', '1', 'y', 't'): value = None elif value in ('false', 'no', 'off', '0', 'n', 'f'): # Don't process return else: self.error(_('%s option takes a boolean value only (true/false)') % opt) else: value = None option.process(opt, value, values, self) # ---- from paste.script.command -------------------------------------- # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php class BadCommand(Exception): def __init__(self, message, exit_code=2): self.message = message self.exit_code = exit_code Exception.__init__(self, message) def _get_message(self): """Getter for 'message'; needed only to override deprecation in BaseException.""" return self.__message def _set_message(self, value): """Setter for 'message'; needed only to override deprecation in BaseException.""" self.__message = value # BaseException.message has been deprecated since Python 2.6. # To prevent DeprecationWarning from popping up over this # pre-existing attribute, use a new property that takes lookup # precedence. message = property(_get_message, _set_message) class NoDefault: pass # run and invoke methods moved below ServeCommand class Command: def __init__(self, name): self.command_name = name max_args = None max_args_error = 'You must provide no more than %(max_args)s arguments' min_args: Optional[int] = None min_args_error = 'You must provide at least %(min_args)s arguments' required_args = None # If this command takes a configuration file, set this to 1 or -1 # Then if invoked through #! the config file will be put into the positional # arguments -- at the beginning with 1, at the end with -1 takes_config_file: Optional[int] = None # Grouped in help messages by this: group_name = '' required_args = () description: Optional[str] = None usage = '' hidden = False # This is the default verbosity level; --quiet subtracts, # --verbose adds: default_verbosity = 0 # This is the default interactive state: default_interactive = 0 return_code = 0 BadCommand = BadCommand # Must define: # parser # summary # command() def run(self, args): self.parse_args(args) # Setup defaults: for name, default in [('verbose', 0), ('quiet', 0), ('interactive', False), ('overwrite', False)]: if not hasattr(self.options, name): setattr(self.options, name, default) if getattr(self.options, 'simulate', False): self.options.verbose = max(self.options.verbose, 1) self.interactive = self.default_interactive if getattr(self.options, 'interactive', False): self.interactive += self.options.interactive if getattr(self.options, 'no_interactive', False): self.interactive = False self.verbose = self.default_verbosity self.verbose += self.options.verbose self.verbose -= self.options.quiet self.simulate = getattr(self.options, 'simulate', False) # For #! situations: if os.environ.get('PASTE_CONFIG_FILE') and self.takes_config_file is not None: take = self.takes_config_file filename = os.environ.get('PASTE_CONFIG_FILE') if take == 1: self.args.insert(0, filename) elif take == -1: self.args.append(filename) else: assert 0, ( "Value takes_config_file must be None, 1, or -1 (not %r)" % take) if os.environ.get('PASTE_DEFAULT_QUIET'): self.verbose = 0 # Validate: if self.min_args is not None and len(self.args) < self.min_args: raise BadCommand( self.min_args_error % {'min_args': self.min_args, 'actual_args': len(self.args)}) if self.max_args is not None and len(self.args) > self.max_args: raise BadCommand( self.max_args_error % {'max_args': self.max_args, 'actual_args': len(self.args)}) for var_name, option_name in self.required_args: if not getattr(self.options, var_name, None): raise BadCommand( 'You must provide the option %s' % option_name) result = self.command() if result is None: return self.return_code else: return result def parse_args(self, args): if self.usage: usage = ' ' + self.usage else: usage = '' self.parser.usage = "%prog [options]{}\n{}".format( usage, self.summary) self.parser.prog = self._prog_name() if self.description: desc = self.description desc = textwrap.dedent(desc) self.parser.description = desc self.options, self.args = self.parser.parse_args(args) def _prog_name(self): return '{} {}'.format(os.path.basename(sys.argv[0]), self.command_name) ######################################## # Utility methods ######################################## def pad(self, s, length, dir='left'): if len(s) >= length: return s if dir == 'left': return s + ' ' * (length - len(s)) else: return ' ' * (length - len(s)) + s def _standard_parser(cls, verbose=True, interactive=False, no_interactive=False, simulate=False, quiet=False, overwrite=False): """ Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method. """ parser = BoolOptionParser() if verbose: parser.add_option('-v', '--verbose', action='count', dest='verbose', default=0) if quiet: parser.add_option('-q', '--quiet', action='count', dest='quiet', default=0) if no_interactive: parser.add_option('--no-interactive', action="count", dest="no_interactive", default=0) if interactive: parser.add_option('-i', '--interactive', action='count', dest='interactive', default=0) if simulate: parser.add_option('-n', '--simulate', action='store_true', dest='simulate', default=False) if overwrite: parser.add_option('-f', '--overwrite', dest="overwrite", action="store_true", help="Overwrite files (warnings will be emitted for non-matching files otherwise)") return parser standard_parser = classmethod(_standard_parser) def quote_first_command_arg(self, arg): """ There's a bug in Windows when running an executable that's located inside a path with a space in it. This method handles that case, or on non-Windows systems or an executable with no spaces, it just leaves well enough alone. """ if sys.platform != 'win32' or ' ' not in arg: # Problem does not apply: return arg try: import win32api except ImportError: raise ValueError( "The executable %r contains a space, and in order to " "handle this issue you must have the win32api module " "installed" % arg) arg = win32api.GetShortPathName(arg) return arg def parse_vars(self, args): """ Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': 'b', 'c': 'd'}`` """ result = {} for arg in args: if '=' not in arg: raise BadCommand( 'Variable assignment %r invalid (no "=")' % arg) name, value = arg.split('=', 1) result[name] = value return result def logging_file_config(self, config_file): """ Setup logging via the logging module's fileConfig function with the specified ``config_file``, if applicable. ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading. """ parser = configparser.ConfigParser() parser.read([config_file]) if parser.has_section('loggers'): config_file = os.path.abspath(config_file) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file))) class NotFoundCommand(Command): def run(self, args): print('Command %r not known (you may need to run setup.py egg_info)' % self.command_name) commands = list() commands.sort() if not commands: print('No commands registered.') print('Have you installed Paste Script?') print('(try running python setup.py develop)') return 2 print('Known commands:') longest = max([len(n) for n, c in commands]) for name, command in commands: print(' {} {}'.format(self.pad(name, length=longest), command.load().summary)) return 2 # ---- From paste.script.serve ---------------------------------------- MAXFD = 1024 jython = sys.platform.startswith('java') class DaemonizeException(Exception): pass class ServeCommand(Command): min_args = 0 usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]' takes_config_file = 1 summary = "Serve the described application" description: Optional[str] = """\ This command serves a web application that uses a paste.deploy configuration file for the server and application. If start/stop/restart is given, then --daemon is implied, and it will start (normal operation), stop (--stop-daemon), or do both. You can also include variable assignments like 'http_port=8080' and then use %(http_port)s in your config files. """ # used by subclasses that configure apps and servers differently requires_config_file = True parser = Command.standard_parser(quiet=True) parser.add_option('-n', '--app-name', dest='app_name', metavar='NAME', help="Load the named application (default main)") parser.add_option('-s', '--server', dest='server', metavar='SERVER_TYPE', help="Use the named server.") parser.add_option('--server-name', dest='server_name', metavar='SECTION_NAME', help="Use the named server as defined in the configuration file (default: main)") if hasattr(os, 'fork'): parser.add_option('--daemon', dest="daemon", action="store_true", help="Run in daemon (background) mode") parser.add_option('--pid-file', dest='pid_file', metavar='FILENAME', help="Save PID to file (default to paster.pid if running in daemon mode)") parser.add_option('--log-file', dest='log_file', metavar='LOG_FILE', help="Save output to the given log file (redirects stdout)") parser.add_option('--reload', dest='reload', action='store_true', help="Use auto-restart file monitor") parser.add_option('--reload-interval', dest='reload_interval', default=1, help="Seconds between checking files (low number can cause significant CPU usage)") parser.add_option('--monitor-restart', dest='monitor_restart', action='store_true', help="Auto-restart server if it dies") parser.add_option('--status', action='store_true', dest='show_status', help="Show the status of the (presumably daemonized) server") if hasattr(os, 'setuid'): # I don't think these are available on Windows parser.add_option('--user', dest='set_user', metavar="USERNAME", help="Set the user (usually only possible when run as root)") parser.add_option('--group', dest='set_group', metavar="GROUP", help="Set the group (usually only possible when run as root)") parser.add_option('--stop-daemon', dest='stop_daemon', action='store_true', help='Stop a daemonized server (given a PID file, or default paster.pid file)') if jython: parser.add_option('--disable-jython-reloader', action='store_true', dest='disable_jython_reloader', help="Disable the Jython reloader") _scheme_re = re.compile(r'^[a-z][a-z]+:', re.I) default_verbosity = 1 _reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN' _monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN' possible_subcommands = ('start', 'stop', 'restart', 'status') def command(self): if self.options.stop_daemon: return self.stop_daemon() if not hasattr(self.options, 'set_user'): # Windows case: self.options.set_user = self.options.set_group = None # @@: Is this the right stage to set the user at? self.change_user_group( self.options.set_user, self.options.set_group) if self.requires_config_file: if not self.args: raise BadCommand('You must give a config file') app_spec = self.args[0] if len(self.args) > 1 and self.args[1] in self.possible_subcommands: cmd = self.args[1] restvars = self.args[2:] else: cmd = None restvars = self.args[1:] else: app_spec = "" if self.args and self.args[0] in self.possible_subcommands: cmd = self.args[0] restvars = self.args[1:] else: cmd = None restvars = self.args[:] if (getattr(self.options, 'daemon', False) and getattr(self.options, 'reload', False)): raise BadCommand('The --daemon and --reload options may not be used together') jython_monitor = False if self.options.reload: if jython and not self.options.disable_jython_reloader: # JythonMonitor raises the special SystemRestart # exception that'll cause the Jython interpreter to # reload in the existing Java process (avoiding # subprocess startup time) try: from paste.reloader import JythonMonitor except ImportError: pass else: jython_monitor = JythonMonitor(poll_interval=int( self.options.reload_interval)) if self.requires_config_file: jython_monitor.watch_file(self.args[0]) if not jython_monitor: if os.environ.get(self._reloader_environ_key): from paste import reloader if self.verbose > 1: print('Running reloading file monitor') reloader.install(int(self.options.reload_interval)) if self.requires_config_file: reloader.watch_file(self.args[0]) else: return self.restart_with_reloader() if cmd not in (None, 'start', 'stop', 'restart', 'status'): raise BadCommand( 'Error: must give start|stop|restart (not %s)' % cmd) if cmd == 'status' or self.options.show_status: return self.show_status() if cmd == 'restart' or cmd == 'stop': result = self.stop_daemon() if result: print("Could not stop daemon") # It's ok to continue trying to restart if stop_daemon returns # a 1, otherwise shortcut and return. if cmd == 'restart' and result != 1: return result if cmd == 'stop': return result self.options.daemon = True if cmd == 'start': self.options.daemon = True app_name = self.options.app_name vars = self.parse_vars(restvars) if not self._scheme_re.search(app_spec): app_spec = 'config:' + app_spec server_name = self.options.server_name if self.options.server: server_spec = 'egg:PasteScript' assert server_name is None server_name = self.options.server else: server_spec = app_spec base = os.getcwd() if getattr(self.options, 'daemon', False): if not self.options.pid_file: self.options.pid_file = 'paster.pid' if not self.options.log_file: self.options.log_file = 'paster.log' # Ensure the log file is writeable if self.options.log_file: try: writeable_log_file = open(self.options.log_file, 'a') except OSError as ioe: msg = 'Error: Unable to write to log file: %s' % ioe raise BadCommand(msg) writeable_log_file.close() # Ensure the pid file is writeable if self.options.pid_file: try: writeable_pid_file = open(self.options.pid_file, 'a') except OSError as ioe: msg = 'Error: Unable to write to pid file: %s' % ioe raise BadCommand(msg) writeable_pid_file.close() if getattr(self.options, 'daemon', False): try: self.daemonize() except DaemonizeException as ex: if self.verbose > 0: print(str(ex)) return if (self.options.monitor_restart and not os.environ.get(self._monitor_environ_key)): return self.restart_with_monitor() if self.options.pid_file: self.record_pid(self.options.pid_file) if self.options.log_file: stdout_log = LazyWriter(self.options.log_file, 'a') sys.stdout = stdout_log sys.stderr = stdout_log logging.basicConfig(stream=stdout_log) log_fn = app_spec if log_fn.startswith('config:'): log_fn = app_spec[len('config:'):] elif log_fn.startswith('egg:'): log_fn = None if log_fn: log_fn = os.path.join(base, log_fn) self.logging_file_config(log_fn) server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars) app = loadapp(app_spec, name=app_name, relative_to=base, global_conf=vars) if self.verbose > 0: if hasattr(os, 'getpid'): msg = 'Starting server in PID %i.' % os.getpid() else: msg = 'Starting server.' print(msg) def serve(): try: server(app) except (SystemExit, KeyboardInterrupt) as e: if self.verbose > 1: raise if str(e): msg = ' ' + str(e) else: msg = '' print('Exiting%s (-v to see traceback)' % msg) except AttributeError as e: # Capturing bad error response from paste if str(e) == "'WSGIThreadPoolServer' object has no attribute 'thread_pool'": raise OSError(98, 'Address already in use') else: raise AttributeError(e) if jython_monitor: # JythonMonitor has to be ran from the main thread threading.Thread(target=serve).start() print('Starting Jython file monitor') jython_monitor.periodic_reload() else: serve() def daemonize(self): pid = live_pidfile(self.options.pid_file) if pid: raise DaemonizeException( "Daemon is already running (PID: %s from PID file %s)" % (pid, self.options.pid_file)) if self.verbose > 0: print('Entering daemon mode') pid = os.fork() if pid: # The forked process also has a handle on resources, so we # *don't* want proper termination of the process, we just # want to exit quick (which os._exit() does) os._exit(0) # Make this the session leader os.setsid() # Fork again for good measure! pid = os.fork() if pid: os._exit(0) # @@: Should we set the umask and cwd now? maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = MAXFD # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass if hasattr(os, "devnull"): REDIRECT_TO = os.devnull else: REDIRECT_TO = "/dev/null" os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) # Duplicate standard input to standard output and standard error. os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) def record_pid(self, pid_file): pid = os.getpid() if self.verbose > 1: print(f'Writing PID {pid} to {pid_file}') f = open(pid_file, 'w') f.write(str(pid)) f.close() atexit.register(_remove_pid_file, pid, pid_file, self.verbose) def stop_daemon(self): pid_file = self.options.pid_file or 'paster.pid' if not os.path.exists(pid_file): print('No PID file exists in %s' % pid_file) return 1 pid = read_pidfile(pid_file) if not pid: print("Not a valid PID file in %s" % pid_file) return 1 pid = live_pidfile(pid_file) if not pid: print("PID in %s is not valid (deleting)" % pid_file) try: os.unlink(pid_file) except OSError as e: print("Could not delete: %s" % e) return 2 return 1 for _i in range(10): if not live_pidfile(pid_file): break os.kill(pid, signal.SIGTERM) time.sleep(1) else: print("failed to kill web process %s" % pid) return 3 if os.path.exists(pid_file): os.unlink(pid_file) return 0 def show_status(self): pid_file = self.options.pid_file or 'paster.pid' if not os.path.exists(pid_file): print('No PID file %s' % pid_file) return 1 pid = read_pidfile(pid_file) if not pid: print('No PID in file %s' % pid_file) return 1 pid = live_pidfile(pid_file) if not pid: print(f'PID {pid} in {pid_file} is not running') return 1 print('Server running in PID %s' % pid) return 0 def restart_with_reloader(self): self.restart_with_monitor(reloader=True) def restart_with_monitor(self, reloader=False): if self.verbose > 0: if reloader: print('Starting subprocess with file monitor') else: print('Starting subprocess with monitor parent') while 1: args = [self.quote_first_command_arg(sys.executable)] + sys.argv new_environ = os.environ.copy() if reloader: new_environ[self._reloader_environ_key] = 'true' else: new_environ[self._monitor_environ_key] = 'true' proc = None try: try: _turn_sigterm_into_systemexit() proc = subprocess.Popen(args, env=new_environ) exit_code = proc.wait() proc = None except KeyboardInterrupt: print('^C caught in monitor process') if self.verbose > 1: raise return 1 finally: if proc is not None and hasattr(os, 'kill'): try: os.kill(proc.pid, signal.SIGTERM) except OSError: pass if reloader: # Reloader always exits with code 3; but if we are # a monitor, any exit code will restart if exit_code != 3: return exit_code if self.verbose > 0: print('-' * 20, 'Restarting', '-' * 20) def change_user_group(self, user, group): if not user and not group: return uid = gid = None if group: try: gid = int(group) group = grp.getgrgid(gid).gr_name except ValueError: try: entry = grp.getgrnam(group) except KeyError: raise BadCommand( "Bad group: %r; no such group exists" % group) gid = entry.gr_gid try: uid = int(user) user = pwd.getpwuid(uid).pw_name except ValueError: try: entry = pwd.getpwnam(user) except KeyError: raise BadCommand( "Bad username: %r; no such user exists" % user) if not gid: gid = entry.pw_gid uid = entry.pw_uid if self.verbose > 0: print('Changing user to {}:{} ({}:{})'.format( user, group or '(unknown)', uid, gid)) if hasattr(os, 'initgroups'): os.initgroups(user, gid) else: os.setgroups([e.gr_gid for e in grp.getgrall() if user in e.gr_mem] + [gid]) if gid: os.setgid(gid) if uid: os.setuid(uid) class LazyWriter: """ File-like object that opens a file lazily when it is first written to. """ def __init__(self, filename, mode='w'): self.filename = filename self.fileobj = None self.lock = threading.Lock() self.mode = mode def open(self): if self.fileobj is None: self.lock.acquire() try: if self.fileobj is None: self.fileobj = open(self.filename, self.mode) finally: self.lock.release() return self.fileobj def write(self, text): fileobj = self.open() fileobj.write(text) fileobj.flush() def writelines(self, text): fileobj = self.open() fileobj.writelines(text) fileobj.flush() def flush(self): self.open().flush() def live_pidfile(pidfile): """(pidfile:str) -> int | None Returns an int found in the named file, if there is one, and if there is a running process with that process id. Return None if no such process exists. """ pid = read_pidfile(pidfile) if pid: try: os.kill(int(pid), 0) return pid except OSError as e: if e.errno == errno.EPERM: return pid return None def read_pidfile(filename): if os.path.exists(filename): try: f = open(filename) content = f.read() f.close() return int(content.strip()) except (ValueError, OSError): return None else: return None def _remove_pid_file(written_pid, filename, verbosity): current_pid = os.getpid() if written_pid != current_pid: # A forked process must be exiting, not the process that # wrote the PID file return if not os.path.exists(filename): return f = open(filename) content = f.read().strip() f.close() try: pid_in_file = int(content) except ValueError: pass else: if pid_in_file != current_pid: print("PID file {} contains {}, not expected PID {}".format( filename, pid_in_file, current_pid)) return if verbosity > 0: print("Removing PID file %s" % filename) try: os.unlink(filename) return except OSError as e: # Record, but don't give traceback print("Cannot remove PID file: %s" % e) # well, at least lets not leave the invalid PID around... try: f = open(filename, 'w') f.write('') f.close() except OSError as e: print(f'Stale PID left in file: {filename} ({e:e})') else: print('Stale PID removed') def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2): """ This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443]) """ atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries, sleeptime=sleeptime) def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2): # Wait for the server to bind to the port. for bound_address in bound_addresses: for _i in range(maxtries): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect(bound_address) except OSError as e: if e.errno != errno.ECONNREFUSED: raise break else: time.sleep(sleeptime) else: raise SystemExit('Timeout waiting for port.') sock.close() def _turn_sigterm_into_systemexit(): """ Attempts to turn a SIGTERM exception into a SystemExit exception. """ def handle_term(signo, frame): raise SystemExit signal.signal(signal.SIGTERM, handle_term) # ---- from paste.script.command -------------------------------------- python_version = sys.version.splitlines()[0].strip() parser = optparse.OptionParser(add_help_option=False, # version='%s from %s (python %s)' # % (dist, dist.location, python_version), usage='%prog [paster_options] COMMAND [command_options]') parser.add_option( '-h', '--help', action='store_true', dest='do_help', help="Show this help message") parser.disable_interspersed_args() # @@: Add an option to run this in another Python interpreter commands = { 'serve': ServeCommand } def run(args=None): if (not args and len(sys.argv) >= 2 and os.environ.get('_') and sys.argv[0] != os.environ['_'] and os.environ['_'] == sys.argv[1]): # probably it's an exe execution args = ['exe', os.environ['_']] + sys.argv[2:] if args is None: args = sys.argv[1:] options, args = parser.parse_args(args) options.base_parser = parser if options.do_help: args = ['help'] + args if not args: print('Usage: %s COMMAND' % sys.argv[0]) args = ['help'] command_name = args[0] if command_name not in commands: command = NotFoundCommand else: command = commands[command_name] invoke(command, command_name, options, args[1:]) def invoke(command, command_name, options, args): try: runner = command(command_name) exit_code = runner.run(args) except BadCommand as e: print(e) exit_code = e.exit_code sys.exit(exit_code)
natefoo/pulsar
pulsar/util/pastescript/serve.py
Python
apache-2.0
36,222
[ "Galaxy" ]
210cd7eca5affb655e7f57717f6530d8a1a74012847dfd9d7d59476b156d3f5a
'''<h1>Library for surface x-ray diffraction simulations</h1> <p> The problem of modelling the sample is divided to four different classes: Sample, Slab, UnitCell and Instrument. A Slab is the basic unit that builds up a sample and can be seen as a quasi-unitcell for the sxrd problem. Stricitly it is a 2D unitcell with a finite extension out-of-plane. The Sample is then built from these Slabs one slab for the bulk and a list of slabs for the surface strucutre. <p> The unitcell consists of parameters for the unitcell and the instrument contains instrument variables. See below for a full list. <h2>Classes</h2> <h3>Slab</h3> <code> Slab(c = 1.0, slab_oc = 1.0)</code><br> <dl> <dt><code><b>c</b></code></dt> <dd> A scale factor for ou-of-plane extension of the Slab. All z-positions will be scaled with this factor.</dd> <dt><code><b>slab_oc</b></code></dt> <dd> A global scaling of the occupancy of all atoms in the slab.</dd> </dl> <code> [Slab].add_atom(id, el, x, y, z, u = 0, oc = 1.0, m = 1.0)</code><br> <dl> <dt><code><b>id</b></code></dt> <dd>A unique string identifier </dd> <dt><code><b>el</b></code></dt> <dd>The element described in a string. Note that ions is denoted as "Sr2p" and "O2m" where 2 is the oxidation number and p and m denoted plus and minus charge.</dd> <dt><code><b>x</b></code></dt> <dd> The x-position in Slab unit cell coords (same as given by the UnitCell)</dd> <dt><code><b>y</b></code></dt> <dd> The y-position in Slab unit cell coords (same as given by the UnitCell)</dd> <dt><code><b>z</b></code></dt> <dd> The z-position in Slab unit cell coords (The Unitcell c scaled by a factor of the c-value for the slab)</dd> <dt><code><b>u</b></code></dt> <dd> The mean-square displacement for the atom</dd> <dt><code><b>oc</b></code></dt> <dd> The occupancy of the atom</dd> <dt><code><b>m</b></code></dt> <dd> The multiplicity of the site, defined as in the international tables of crystallogrphy. Note that it is plane goups and NOT space groups that will produce valid results.</dd> </dl> <code> [Slab].copy()</code><br> Creates a copy of object [Slab]. This decouples the new object returned by copy from the original [Slab]. <code> [Slab].find_atoms(expression)</code><br> Function to locate atoms in a slab in order to connect parameters between them. Returns an AtomGroup. <dl> <dt><code><b>expression</b></code></dt> <dd> Either a list of the same length as the number of atoms or a string that will evaluate to true or false for each atom. Allowed variables are: <code>x, y, z, id, el, u, ov, m,/code></dd> </dl> <code> [Slab].all_atoms()</code><br> Yields all atoms inside a slab as an AtomGroup. Returns an AtomGroup. <code> [Slab][id]</code><br> Locates atom that has id <code>id</code>. Returns an AtomGroup <dl> <dt><code><b>id</b></code></dt> <dd>Uniqe string identifer for one atom </dd> </dl> <h3>Sample</h3> <code> Sample(inst, bulk_slab, slabs, unit_cell, surface_sym = [], bulk_sym = []) </code><br> <dl> <dt><code><b>inst</b></code></dt> <dd> Instrument object for the sample </dd> <dt><code><b>bulk_slab</b></code></dt> <dd>The Slab that describes the bulk strucutre </dd> <dt><code><b>slabs</b></code></dt> <dd>A list ([]) of slabs for the surface structure </dd> <dt><code><b>unit_cell</b></code></dt> <dd>A UnitCell object </dd> <dt><code><b>surface_sym</b></code></dt> <dd>A list ([]) of SymTrans objects describing the surface symmetry. Default value - an empty list will implement a p1 symmetry, that is no symmetry operations at all. </dd> <dt><code><b>bulk_sym</b></code></dt> <dd>A list ([]) of SymTrans objects describing the bulk symmetry. Default value - an empty list will implement a p1 symmetry, that is no symetry operations at all. </dd> </dl> <code>[Sample].calc_f(h, k, l)</code><br> Calculates the total structure factor (complex number) from the the surface and bulk strucutre. Returns an array of the same size as h, k, l. (h, k, l should be of the same legth and is given in coordinates of the reciprocal lattice as defnined by the uit_cell coords) <code>[Sample].turbo_calc_f(h, k, l)</code><br> A faster version of <code>calc_f</code> which uses inline c code to increase the speed. Can be more unstable than <code>calc_f</code> use on your own risk. <code>[Sample].calc_rhos(x, y, z, sb)</code><br> Calculate the the surface electron density of a model. The parameter sb is a Gaussian convolution factor given the width of the Gaussian in reciprocal space. Used mainly for comparison with direct methods, i.e. DCAF. NOTE that the transformation from the width of the window function given in <code>dimes.py</code> is <code>sqrt(2)*pi*[]</code> ''' #updated by Jackey Qiu 12/19/2011 """ change log: class Sample has been updated to consider domains in the frame of surface unit cell the argument of slabs is a library of domains in form of {'domain_name':{'slab':slab_class,'wt':0.1}}, and therefore the vertical stacking operation will be disable. argument 'surface_parms' is a library of delta1 and delta2 which are used to define surface unitcell, if no coordinate system change, just set them 0. argument 'coherence'is a flag for operation to add up struction factor for different domains, True means adding up in coherence, False means adding up incoherence class Slab was updated as follows: dx was changed to three parmeter dx1,dx2 and dx3, the same thing to dy dz, it is changed in this way to fit into the operation for operations in AtomGroup, refer to AtomGroup part for detail. function _extract_value was changed accordingly argument T_factor is a switch to different interpretation of termal factor, which can be either 'u' or 'B' some bugs in function of del_atom was fixed, now it works well AtomGroup was changed to consider moving atoms on symmetrical basis In the original version, dx/dy/dz shift set in AtomGoup will be set in the exactely the same way for the member atoms After considering symmetry operation, operation of dx shift in AtomGroup will make shift of dx1, dy1 and dz1 simultaneously dy shift will corresond to dx2,dy2 and dz2 (dz to dx3 dy3 and dz3)in the associated slab, that't why Slab has three set of dxdydz which has been customized in the slab class argument id_in_sym_file is a list of ids with their orders corresponding to their row orders appearing symmetry operation datafile use_sym is a switch to use symmetry or not filename is the file name of symmetry operations (txt file, data in form of n by 9) set_par is scaling parameters for code extention in future _set_func and _get_func was changed accordingly """ ##in version3## #the symmetry related domains are always added up together incoherencely. If the coherence is true, actually domainA's added up coherencely, #domainB's added up coherencely. ##in version4## #take away the argument of sym_file in the Atom_group, each time you add a new group member, you must specify the matrix list to define the symmetry relationship import numpy as np from utils import f, rho import time,os import pickle,copy try: from scipy import weave _turbo_sim = True except: print 'Info: Could not import weave, turbo off' _turb_sim = False __pars__ = ['Sample', 'UnitCell', 'Slab', 'AtomGroup', 'Instrument'] class Sample: def __init__(self, inst, bulk_slab, slabs,unit_cell,surface_parms={'delta1':0,'delta2':0}, surface_sym = [], bulk_sym = [],coherence=True): self.set_bulk_slab(bulk_slab) #self.set_slabs(slabs) self.set_surface_sym(surface_sym) self.set_bulk_sym(bulk_sym) self.inst = inst self.set_unit_cell(unit_cell) self.delta1=surface_parms['delta1'] self.delta2=surface_parms['delta2'] self.domain=slabs self.coherence=coherence def set_bulk_slab(self, bulk_slab): '''Set the bulk unit cell to bulk_slab ''' if type(bulk_slab) != type(Slab()): raise TypeError("The bulk slab has to be a member of class Slab") self.bulk_slab = bulk_slab def set_slabs(self, slabs): '''Set the slabs of the sample. slabs should be a list of objects from the class Slab ''' if type(slabs) != type([]): raise TypeError("The surface slabs has to contained in a list") if min([type(slab) == type(Slab()) for slab in slabs]) == 0: raise TypeError("All members in the slabs list has to be a memeber of class Slab") self.slabs = slabs def set_surface_sym(self, sym_list): '''Sets the list of symmetry operations for the surface. sym_list has to be a list ([]) of symmetry elements from the class SymTrans ''' # Type checking if type(sym_list) != type([]): raise TypeError("The surface symmetries has to contained in a list") if sym_list == []: sym_list = [SymTrans()] if min([type(sym) == type(SymTrans()) for sym in sym_list]) == 0: raise TypeError("All members in the symmetry list has to be a memeber of class SymTrans") self.surface_sym = sym_list def set_bulk_sym(self, sym_list): '''Sets the list of allowed symmetry operations for the bulk sym_list has to be a list ([]) of symmetry elements from the class SymTrans ''' # Type checking if type(sym_list) != type([]): raise TypeError("The surface symmetries has to contained in a list") if sym_list == []: sym_list = [SymTrans()] if min([type(sym) == type(SymTrans()) for sym in sym_list]) == 0: raise TypeError("All members in the symmetry list has to be a memeber of class SymTrans") self.bulk_sym = sym_list def set_unit_cell(self, unit_cell): '''Sets the unitcell of the sample ''' if type(unit_cell) != type(UnitCell(1.0, 1.0, 1.0)): raise TypeError("The bulk slab has to be a member of class UnitCell") if unit_cell == None: unit_cell = UnitCell(1.0, 1,.0, 1.0) self.unit_cell = unit_cell def calc_f(self, h, k, l): '''Calculate the structure factors for the sample ''' #here the chemically equivalent domains will be added up in-coherently always ftot=0 ftot_A=0 ftot_B=0 keys_domainA=[] keys_domainB=[] fb = self.calc_fb(h, k, l) for i in self.domain.keys(): if "A" in i:keys_domainA.append(i) if "B" in i:keys_domainB.append(i) if self.coherence==True: for i in keys_domainA: if self.domain[i]['wt']!=0: ftot_A=ftot_A+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: pass for i in keys_domainB: if self.domain[i]['wt']!=0: ftot_B=ftot_B+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: pass else: for i in keys_domainA: if self.domain[i]['wt']!=0: ftot_A=ftot_A+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: pass for i in keys_domainB: if self.domain[i]['wt']!=0: ftot_B=ftot_B+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: pass ftot=abs(ftot_A)+abs(ftot_B) return abs(ftot)*self.inst.inten def calc_f2(self, h, k, l): #here incoherence means add up all domains in-coherently, and coherence means adding up all coherently ftot=0 fb = self.calc_fb(h, k, l) if self.coherence==True: for i in self.domain.keys(): ftot=ftot+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: for i in self.domain.keys(): ftot=ftot+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] return abs(ftot)*self.inst.inten def calc_f3(self, h, k, l): #here self.coherence is a list of True (add up coherently) or False (add up in-coherently) ftot=0 ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] fb = self.calc_fb(h, k, l) for i in self.domain.keys(): if "A" in i:keys_domainA.append(i) if "B" in i:keys_domainB.append(i) for i in keys_domainA: j=int(i[-2])-1 if self.coherence[j]: ftot_A_C=ftot_A_C+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] for i in keys_domainB: j=int(i[-2])-1 if self.coherence[j]: ftot_B_C=ftot_B_C+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] ftot=abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C) return abs(ftot)*self.inst.inten def calc_f4(self, h, k, l): #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: if coherence[n].keys()[0]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] for i in keys_domainB: if coherence[n].keys()[0]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] ftot=ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C) #ftot=ftot+ftot_A_C+ftot_A_IC+ftot_B_IC+ftot_B_C return abs(ftot)*self.inst.inten def calc_f4_specular(self, h, k, l,raxr_el): #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys(): if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate(h,k,l,raxr_el,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] for i in keys_domainB: f_layered_water=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys(): if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate(h,k,l,raxr_el,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] ftot=ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C) #ftot=ftot+ftot_A_C+ftot_A_IC+ftot_B_IC+ftot_B_C return abs(ftot)*self.inst.inten def calculate_structure_factor(self,h,k,x,y,index=None,fit_mode='MD',height_offset=0,version=1): if x[0]<100:#CTR data return self.calc_f4_muscovite_CTR(h,k,x,height_offset,version) else:#RAXR data if fit_mode=='MI': return self.calc_f4_muscovite_RAXR_MI(h,k,x,y,index,height_offset,version) elif fit_mode=='MD': return self.calc_f4_muscovite_RAXR_MD(h,k,x,y,index,height_offset,version) def calc_f4_muscovite_CTR(self, h, k, l,height_offset=0,version=1): #now the coherence is either true or force corresponding to coherent and incoherent summation of structure factor ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) if version==1: f_surface=self.calc_fs elif version>=1.1: f_surface=self.calc_fs_muscovite f_layered_water=self.calc_f_layered_water_muscovite(h,k,l,self.domain['layered_water_pars'],height_offset) if self.domain['freeze']:#the raxs el has no effect on the structure factor f_layered_sorbate=0 else: f_layered_sorbate=self.calc_f_layered_sorbate_muscovite(h,k,l,self.domain['layered_sorbate_pars'],height_offset) domains=self.domain['domains'] if coherence: for i in range(len(domains)): ftot=ftot+getattr(self.domain['global_vars'],'wt'+str(i+1))*(fb+f_surface(h,k,l,[domains[i]])+f_layered_water+f_layered_sorbate) else: for i in range(len(domains)): ftot=ftot+getattr(self.domain['global_vars'],'wt'+str(i+1))*abs(fb+f_surface(h,k,l,[domains[i]])+f_layered_water+f_layered_sorbate) return abs(ftot)*self.inst.inten def calc_f4_muscovite_RAXR_MI(self,h,k,x,y,index,height_offset=0,version=1): h, k, l, E, E0, f1f2, a, b, c, resonant_el=h,k,y,x,self.domain['E0'],self.domain['F1F2'],self.domain['raxs_vars']['a'+str(index)],self.domain['raxs_vars']['b'+str(index)],self.domain['raxs_vars']['c'+str(index)],self.domain['el'] ftot=0 def _extract_f1f2(f1f2,E): E_f1f2=np.around(f1f2[:,2],0)#make sure E in eV E=np.around(E,0) index=[] for each_E in E_f1f2: if each_E in E: index.append(np.where(E_f1f2==each_E)[0][0]) return f1f2[index,:] f1f2=_extract_f1f2(f1f2,E) coherence=self.coherence fb = self.calc_fb(h, k, l) if version==1: f_surface=self.calc_fs elif version>=1.1: f_surface=self.calc_fs_muscovite f_layered_water=self.calc_f_layered_water_muscovite(h,k,l,self.domain['layered_water_pars'],height_offset) f_layered_sorbate=self.calc_f_layered_sorbate_muscovite_RAXR(h,k,l,self.domain['layered_sorbate_pars'],height_offset,f1f2) #only consider one set of Fourier components in the whole strucutre A_list=[self.domain['raxs_vars']['A'+str(index)+'_D'+str(i+1)] for i in range(1)] P_list=[self.domain['raxs_vars']['P'+str(index)+'_D'+str(i+1)] for i in range(1)] domains=self.domain['domains'] if coherence: for i in range(len(domains)): ftot=ftot+getattr(self.domain['global_vars'],'wt'+str(i+1))*(fb+f_surface(h,k,l,[domains[i]])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[0]*np.exp(1.0J*np.pi*2*P_list[0])) else: for i in range(len(domains)): ftot=ftot+getattr(self.domain['global_vars'],'wt'+str(i+1))*abs(fb+f_surface(h,k,l,[domains[i]])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[0]*np.exp(1.0J*np.pi*2*P_list[0])) ftot=np.exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*abs(ftot) #linear attenuation of resonant element from liquid film if version>=1.2: #electron radius _re=self.domain['exp_factors'][2] #film thickness _mu=self.domain['exp_factors'][1] #ra element concentration _ra_con=self.domain['exp_factors'][4] _q=np.pi*2*self.unit_cell.abs_hkl(h,k,l) L_attn=np.array(16*np.pi*(_re/1e7)*6.02e23*_ra_con/1e6*(_mu/1e3)/(_q*1e7)) li_attn=(np.exp(-L_attn*f1f2[:,1]))**0.5 return ftot/li_attn*self.inst.inten else: return ftot*self.inst.inten def calc_f4_muscovite_RAXR_MD(self,h,k,x,y,index,height_offset=0,version=1): h, k, l, E, E0, f1f2, a, b, c, resonant_el=h,k,y,x,self.domain['E0'],self.domain['F1F2'],self.domain['raxs_vars']['a'+str(index)],self.domain['raxs_vars']['b'+str(index)],self.domain['raxs_vars']['c'+str(index)],self.domain['el'] ftot=0 def _extract_f1f2(f1f2,E): E_f1f2=np.around(f1f2[:,2],0)#make sure E in eV E=np.around(E,0) index=[] for each_E in E_f1f2: if each_E in E: index.append(np.where(E_f1f2==each_E)[0][0]) return f1f2[index,:] if len(f1f2)!=len(E): f1f2=_extract_f1f2(f1f2,E) coherence=self.coherence fb = self.calc_fb(h, k, l) if version==1: f_surface=self.calc_fs_RAXR elif version>=1.1: f_surface=self.calc_fs_RAXR_muscovite f_layered_water=self.calc_f_layered_water_muscovite(h,k,l,self.domain['layered_water_pars'],height_offset) f_layered_sorbate=self.calc_f_layered_sorbate_muscovite_RAXR(h,k,l,self.domain['layered_sorbate_pars'],height_offset,f1f2) domains=self.domain['domains'] if coherence: for i in range(len(domains)): ftot=ftot+getattr(self.domain['global_vars'],'wt'+str(i+1))*(fb+f_surface(h, k, l,[domains[i]],f1f2,resonant_el)+f_layered_water+f_layered_sorbate) else: for i in range(len(domains)): ftot=ftot+getattr(self.domain['global_vars'],'wt'+str(i+1))*abs(fb+f_surface(h, k, l,[domains[i]],f1f2,resonant_el)+f_layered_water+f_layered_sorbate) ftot=np.exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*abs(ftot) #linear attenuation of resonant element from liquid film if version>=1.2: #electron radius _re=self.domain['exp_factors'][2] #film thickness _mu=self.domain['exp_factors'][1] #ra element concentration _ra_con=self.domain['exp_factors'][4] _q=np.pi*2*self.unit_cell.abs_hkl(h,k,l) L_attn=np.array(16*np.pi*(_re/1e7)*6.02e23*_ra_con/1e6*(_mu/1e3)/(_q*1e7)) li_attn=(np.exp(-L_attn*f1f2[:,1]))**0.5 return ftot/li_attn*self.inst.inten else: return ftot*self.inst.inten def cal_structure_factor_hematite_RAXR(self,i,VARS,RAXR_FIT_MODE,RESONANT_EL_LIST,RAXR_EL,h, k, y, x, E0, F1F2,SCALES,rough): a=getattr(VARS['rgh_raxr'],'a'+str(i+1)) b=getattr(VARS['rgh_raxr'],'b'+str(i+1)) c=getattr(VARS['rgh_raxr'],'c'+str(i+1)) if RAXR_FIT_MODE=='MI': A_list,P_list=[],[] for index_resonant_el in range(len(RESONANT_EL_LIST)): A_list_domain=0 P_list_domain=0 if RESONANT_EL_LIST[index_resonant_el]!=0: A_list_domain=getattr(VARS['rgh_raxr'],'A_D'+str(index_resonant_el+1)+'_'+str(i+1)) P_list_domain=getattr(VARS['rgh_raxr'],'P_D'+str(index_resonant_el+1)+'_'+str(i+1)) A_list.append(A_list_domain) P_list.append(P_list_domain) if h[0]==0 and k[0]==0:#consider layered water only for specular rod if existent f = SCALES[0]*rough*self.calc_f4_specular_hematite_RAXR_MI(h, k, y, x, E0, F1F2, a, b, c, A_list, P_list, RESONANT_EL_LIST,RAXR_EL) else: f = rough*self.calc_f4_nonspecular_hematite_RAXR_MI(h, k, y, x, E0, F1F2, a, b, c, A_list, P_list, RESONANT_EL_LIST) elif RAXR_FIT_MODE=='MD': if h[0]==0 and k[0]==0:#consider layered water only for specular rod if existent f = SCALES[0]*rough*self.calc_f4_specular_hematite_RAXR_MD(h, k, y, x, E0, F1F2, a, b, c, RESONANT_EL_LIST,RAXR_EL) else: f = rough*self.calc_f4_nonspecular_hematite_RAXR_MD(h, k, y, x, E0, F1F2, a, b, c, RESONANT_EL_LIST) return f def calc_f4_specular_hematite_RAXR_MD(self, h, k, l,E,E0,f1f2,a,b,c,resonant_els=[1,0,0],raxr_el=''): #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs_hematite_RAXR_MD #(h, k, l,slabs,f1f2,raxr_el) for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]:#consider layered water? f_layered_water=self.calc_f_layered_water_hematite(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys():#consider layered sorbate? if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_hematite_RAXR_MD(h,k,l,raxr_el,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el)+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el)+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el)+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el)+f_layered_water)*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys(): if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_hematite_RAXR_MD(h,k,l,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el)+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el)+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el)+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el)+f_layered_water)*self.domain[i]['wt'] ftot=np.exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) return abs(ftot)*self.inst.inten def calc_f4_nonspecular_hematite_RAXR_MD(self, h, k, l,E,E0,f1f2,a,b,c,resonant_els=[1,0,0],raxr_el=''): #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs_hematite_RAXR_MD #(h, k, l,slabs,f1f2,raxr_el) for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el))*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,raxr_el))*self.domain[i]['wt'] ftot=np.exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) return abs(ftot)*self.inst.inten def calc_f4_specular_hematite_RAXR_MI(self, h, k, l,E,E0,f1f2,a,b,c,A_list=[],P_list=[],resonant_els=[1,0,0],raxr_el=''): #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]:#consider layered water? f_layered_water=self.calc_f_layered_water_hematite(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys():#consider layered sorbate? if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_hematite(h,k,l,raxr_el,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*P_list[ii]))*self.domain[i]['wt'] else: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*P_list[ii]))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys(): if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_hematite(h,k,l,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*(P_list[ii]-0.5*l[0])))*self.domain[i]['wt'] else: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*(P_list[ii]-0.5*l[0])))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] ftot=np.exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) return abs(ftot)*self.inst.inten def calc_f4_nonspecular_hematite_RAXR_MI(self, h, k, l,E,E0,f1f2,a,b,c,A_list=[],P_list=[],resonant_els=[1,0,0],raxr_el=''): #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] ftot=np.exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) return abs(ftot)*self.inst.inten def calc_f4_specular_RAXR(self, h, k, l,E,E0,f1f2,a,b,A_list=[],P_list=[],resonant_els=[1,0,0],raxr_el=''): #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]:#consider layered water? f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys():#consider layered sorbate? if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_RAXR(h,k,l,raxr_el,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*P_list[ii]))*self.domain[i]['wt'] else: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*P_list[ii]))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys(): if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_RAXR(h,k,l,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*(P_list[ii]-0.5*l[0])))*self.domain[i]['wt'] else: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*(P_list[ii]-0.5*l[0])))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] ftot=(a+b*(E-E0))*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) #ftot=ftot+ftot_A_C+ftot_A_IC+ftot_B_IC+ftot_B_C return abs(ftot)*self.inst.inten def calc_f4_specular_RAXR_MI(self, h, k, l,E,E0,f1f2,A_list=[],P_list=[],resonant_els=[1,0,0],**abc): #calculate the structure factor in the process of model-independent RAXR fitting #Use linear background function (abc.keys=['a','b']), or Victoreen background function (abc.keys=['a','b','c']) #Linear func: slope{n} = (a(n)*(E{n}-Eo)+1)*b(n)*norm_offset*1/q(n)^2; #Victoreen func: slope{n} = exp(-a(n)*(E{n}-Eo).^2/Eo^2 + b(n)*(E{n}-Eo)/Eo) * c(n); #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" if len(abc.keys())==3: a,b,c=abc['a'],abc['b'],abc['c'] elif len(abc.keys())==2: a,b=abc['a'],abc['b'] ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]:#consider layered water? f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys():#consider layered sorbate? if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_RAXR(h,k,l,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*P_list[ii]))*self.domain[i]['wt'] else: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*P_list[ii]))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys(): if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_RAXR(h,k,l,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*(P_list[ii]-0.5*l[0])))*self.domain[i]['wt'] else: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water+f_layered_sorbate+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*(P_list[ii]-0.5*l[0])))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] if len(abc.keys())==2: ftot=(a*(E-E0)+1)*b*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) else: ftot=exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) return abs(ftot)*self.inst.inten def calc_f4_offspecular_RAXR_MI(self, h, k, l,E,E0,f1f2,A_list=[],P_list=[],resonant_els=[1,0,0],**abc): #calculate the structure factor in the process of model-independent RAXR fitting for offspecular rods (no influence from layered water and sorbates) #Use linear background function (abc.keys=['a','b']), or Victoreen background function (abc.keys=['a','b','c']) #Linear func: slope{n} = (a(n)*(E{n}-Eo)+1)*b(n)*norm_offset*1/q(n)^2; #Victoreen func: slope{n} = exp(-a(n)*(E{n}-Eo).^2/Eo^2 + b(n)*(E{n}-Eo)/Eo) * c(n); #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" if len(abc.keys())==3: a,b,c=abc['a'],abc['b'],abc['c'] elif len(abc.keys())==2: a,b=abc['a'],abc['b'] ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: if resonant_els[ii]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*P_list[ii]))*self.domain[i]['wt'] else: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*P_list[ii]))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: if resonant_els[ii]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*(P_list[ii]-0.5*l[0])))*self.domain[i]['wt'] else: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+(f1f2[:,0]+1.0J*f1f2[:,1])*A_list[ii]*np.exp(1.0J*np.pi*2*(P_list[ii]-0.5*l[0])))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] if len(abc.keys())==2: ftot=(a*(E-E0)+1)*b*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) else: ftot=exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) return abs(ftot)*self.inst.inten def calc_f4_specular_RAXR_for_test_purpose(self, h, k, l,f1f2,res_el='Pb'): #this function is used to generate an arbitrary raxr dataset for testing purpose #hkl is a list of hkl values #f1f2 is in form of [[f1_1,f2_1],[f1_2,f2_2]] #The return value is in form of [[],[]] with length =len(f1f2) and the length of each item=len(hkl) #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs_test_purpose f_total_container=[] for each_f1f2 in f1f2: ftot=0 for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if coherence[n].keys()[0]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],each_f1f2,res_el)+f_layered_water)*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],each_f1f2,res_el)+f_layered_water)*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if coherence[n].keys()[0]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],each_f1f2,res_el)+f_layered_water)*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],each_f1f2,res_el)+f_layered_water)*self.domain[i]['wt'] ftot=ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C) #ftot=ftot+ftot_A_C+ftot_A_IC+ftot_B_IC+ftot_B_C f_total_container.append(abs(ftot)*self.inst.inten) return f_total_container def calc_f4_specular_RAXR_MD(self, h, k, l,E,E0,f1f2,resonant_els=[1,0,0],res_el='Zr',**abc): #calculate the structure factor in the process of model-dependent RAXR fitting #Use linear background function (abc.keys=['a','b']), or Victoreen background function (abc.keys=['a','b','c']) #Linear func: slope{n} = (a(n)*(E{n}-Eo)+1)*b(n)*norm_offset*1/q(n)^2; #Victoreen func: slope{n} = exp(-a(n)*(E{n}-Eo).^2/Eo^2 + b(n)*(E{n}-Eo)/Eo) * c(n); #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" if len(abc.keys())==3: a,b,c=abc['a'],abc['b'],abc['c'] elif len(abc.keys())==2: a,b=abc['a'],abc['b'] ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs_RAXR for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]:#consider layered water? f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys():#consider layered sorbate? if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_RAXR(h,k,l,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,res_el)+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_A_C=ftot_A_C+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,res_el)+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index f_layered_water=0 f_layered_sorbate=0 if self.domain[i]['layered_water']!=[]: f_layered_water=self.calc_f_layered_water(h,k,l,*self.domain[i]['layered_water']) if 'layered_sorbate' in self.domain[i].keys(): if self.domain[i]['layered_sorbate']!=[]: f_layered_sorbate=self.calc_f_layered_sorbate_RAXR(h,k,l,*self.domain[i]['layered_sorbate']) if coherence[n].keys()[0]: if resonant_els[ii]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,res_el)+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_B_C=ftot_B_C+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,res_el)+f_layered_water+f_layered_sorbate)*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']])+f_layered_water)*self.domain[i]['wt'] if len(abc.keys())==2: ftot=(a*(E-E0)+1)*b*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) else: ftot=exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) return abs(ftot)*self.inst.inten def calc_f4_offspecular_RAXR_MD(self, h, k, l,E,E0,f1f2,resonant_els=[1,0,0],res_el='Zr',**abc): #calculate the structure factor in the process of model-dependent RAXR fitting for offspecular rods (no influence from layered water and sorbates) #Use linear background function (abc.keys=['a','b']), or Victoreen background function (abc.keys=['a','b','c']) #Linear func: slope{n} = (a(n)*(E{n}-Eo)+1)*b(n)*norm_offset*1/q(n)^2; #Victoreen func: slope{n} = exp(-a(n)*(E{n}-Eo).^2/Eo^2 + b(n)*(E{n}-Eo)/Eo) * c(n); #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of True or False specifying whether or not considering the resonant scattering in each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains.Note in P or A_list, the 0 item means no resonant element # so len(P_list)==len(resonant_els) #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" if len(abc.keys())==3: a,b,c=abc['a'],abc['b'],abc['c'] elif len(abc.keys())==2: a,b=abc['a'],abc['b'] ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs_RAXR for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: if resonant_els[ii]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,res_el))*self.domain[i]['wt'] else: ftot_A_C=ftot_A_C+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,res_el))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: if resonant_els[ii]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,res_el))*self.domain[i]['wt'] else: ftot_B_C=ftot_B_C+(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']],f1f2,res_el))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+self.calc_fs(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] if len(abc.keys())==2: ftot=(a*(E-E0)+1)*b*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) else: ftot=exp(-a*(E-E0)**2/E0**2+b*(E-E0)/E0)*c*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) return abs(ftot)*self.inst.inten def calc_fs_test_purpose(self, h, k, l,slabs,single_f1f2,res_el): '''Calculate the structure factors from the surface ''' #print single_f1f2 dinv = self.unit_cell.abs_hkl(h, k, l) x, y, z, u, oc, el = self._surf_pars(slabs) f=self._get_f(el, dinv) shape=f.shape f_offset=np.zeros(shape=shape)+0J for i in range(shape[0]): for j in range(shape[1]): if res_el==el[j]: f_offset[i][j]=single_f1f2[0]+1.0J*single_f1f2[1] f=f+f_offset fs = np.sum(oc*f*np.exp(-2*np.pi**2*u**2*dinv[:,np.newaxis]**2)\ *np.sum([np.exp(2.0*np.pi*1.0J*( h[:,np.newaxis]*(sym_op.trans_x(x, y)+self.delta1) + k[:,np.newaxis]*(sym_op.trans_y(x, y)+self.delta2) + l[:,np.newaxis]*(z[np.newaxis, :]+1))) for sym_op in self.surface_sym], 0) ,1) return fs #calculate the Fourier components #this function will only consider the specular rod #it will calculate Fourier components only for the domainA's def find_A_P(self,l,res_el,print_AP=False): keys=self.domain.keys() keys_domainA=[key for key in keys if "A" in key] keys_domainA.sort() A,P={},{} for each_key in keys_domainA: single_domain=self.domain[each_key] slabs=[single_domain['slab']] domain_wt=single_domain['wt'] dinv = self.unit_cell.abs_hkl(np.zeros(len(l)), np.zeros(len(l)), np.array(l)) x, y, z, u, oc, el = self._surf_pars(slabs) sorbate_index=[i for i in range(len(el)) if el[i]==res_el] A_container,P_container=[],[] for each_l in l: q=each_l*2*np.pi/self.unit_cell.c complex_sum=0.+1.0J*0. for i in sorbate_index: complex_sum+=oc[i]*np.exp(-q**2*u[i]**2/2)*np.exp(1.0J*2*np.pi*each_l*(z[i]+1))#z should be plus 1 to account for the fact that surface slab sitting on top of bulk slab A_container.append(domain_wt*abs(complex_sum)) img_complex_sum, real_complex_sum=np.imag(complex_sum),np.real(complex_sum) if img_complex_sum==0.: P_container.append(0) elif real_complex_sum==0 and img_complex_sum==1: P_container.append(0.25)#1/2pi/2pi elif real_complex_sum==0 and img_complex_sum==-1: P_container.append(0.75)#3/2pi/2pi else:#adjustment is needed since the return of np.arctan is ranging from -1/2pi to 1/2pi if real_complex_sum>0 and img_complex_sum>0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.) elif real_complex_sum>0 and img_complex_sum<0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+1.) elif real_complex_sum<0 and img_complex_sum>0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5) elif real_complex_sum<0 and img_complex_sum<0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5) A[each_key]=A_container P[each_key]=P_container if print_AP: print "l list=",l for each_key in keys_domainA: print '\n',each_key print "A list=",['%.4f' % each_A for each_A in A[each_key]] print "P list=",['%.4f' % each_P for each_P in P[each_key]] return l,A,P #calculate the Fourier components #this function will only consider the specular rod #it will calculate Fourier components only for the domainA's def find_A_P_hematite(self,h,k,l,res_el): keys=self.domain.keys() keys_domainA=[key for key in keys if "A" in key] keys_domainA.sort() dinv = self.unit_cell.abs_hkl(np.array(h), np.array(k), np.array(l)) Q=np.pi*2*dinv A,P={},{} for each_key in keys_domainA: single_domain=self.domain[each_key] slabs=[single_domain['slab']] #domain_wt=single_domain['wt'] domain_wt=1 dinv = self.unit_cell.abs_hkl(np.zeros(len(l)), np.zeros(len(l)), np.array(l)) x, y, z, u, oc, el = self._surf_pars(slabs) sorbate_index=[i for i in range(len(el)) if el[i]==res_el] A_container,P_container=[],[] for q_index in range(len(Q)): q=Q[q_index] complex_sum=0.+1.0J*0. for i in sorbate_index: complex_sum+=oc[i]*np.exp(-q**2*u[i]**2/2)*np.exp(1.0J*q*(z[i]+1)*self.unit_cell.c) #z should be plus 1 to account for the fact that surface slab sitting on top of bulk slab A_container.append(domain_wt*abs(complex_sum)) img_complex_sum, real_complex_sum=np.imag(complex_sum),np.real(complex_sum) if img_complex_sum==0.: P_container.append(0) elif real_complex_sum==0 and img_complex_sum==1: P_container.append(0.25)#1/2pi/2pi elif real_complex_sum==0 and img_complex_sum==-1: P_container.append(0.75)#3/2pi/2pi else:#adjustment is needed since the return of np.arctan is ranging from -1/2pi to 1/2pi if real_complex_sum>0 and img_complex_sum>0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.) elif real_complex_sum>0 and img_complex_sum<0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+1.) elif real_complex_sum<0 and img_complex_sum>0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5) elif real_complex_sum<0 and img_complex_sum<0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5) A[each_key]=A_container P[each_key]=P_container return A,P,Q def find_A_P_muscovite_original(self,h,k,l): A,P=[],[] hs,ks,ls=np.array([h]*100),np.array([k]*100),np.arange(0,l,l/100.) dinv = self.unit_cell.abs_hkl(hs, ks, ls) Q=np.pi*2*dinv for i in range(len(self.domain['domains'])): single_domain=self.domain['domains'][i] slabs=[single_domain] x, y, z, u, oc, el = self._surf_pars(slabs) res_el=self.domain['el'] sorbate_index=[ii for ii in range(len(el)) if el[ii]==res_el] A_container,P_container=[],[] for q_index in range(len(Q)): q=Q[q_index] h_single,k_single,l_single=hs[q_index],ks[q_index],ls[q_index] complex_sum=0.+1.0J*0. for j in sorbate_index: complex_sum+=oc[j]*np.exp(-q**2*u[j]**2/2)*np.exp(1.0J*2*np.pi*(h_single*x[j]+k_single*y[j]+l_single*(z[j]+1)))#z should be plus 1 to account for the fact that surface slab sitting on top of bulk slab A_container.append(abs(complex_sum)) img_complex_sum, real_complex_sum=np.imag(complex_sum),np.real(complex_sum) if img_complex_sum==0.: P_container.append(0) elif real_complex_sum==0 and img_complex_sum==1: P_container.append(0.25)#1/2pi/2pi elif real_complex_sum==0 and img_complex_sum==-1: P_container.append(0.75)#3/2pi/2pi else:#adjustment is needed since the return of np.arctan is ranging from -1/2pi to 1/2pi if real_complex_sum>0 and img_complex_sum>0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.) elif real_complex_sum>0 and img_complex_sum<0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+1.) elif real_complex_sum<0 and img_complex_sum>0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5) elif real_complex_sum<0 and img_complex_sum<0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5) A.append(A_container) P.append(P_container) return np.transpose(A),np.transpose(P),Q def find_A_P_muscovite(self,h,k,l): if type(h)==type([]): hs,ks,ls=h,k,l else: hs,ks,ls=np.array([h]*100),np.array([k]*100),np.arange(0,l,l/100.) dinv = self.unit_cell.abs_hkl(np.array(hs), np.array(ks), np.array(ls)) Q=np.pi*2*dinv A_container,P_container=[],[] for q_index in range(len(Q)): q=Q[q_index] h_single,k_single,l_single=hs[q_index],ks[q_index],ls[q_index] complex_sum=0.+1.0J*0. for i in range(len(self.domain['domains'])): single_domain=self.domain['domains'][i] slabs=[single_domain] x, y, z, u, oc, el = self._surf_pars(slabs) res_el=self.domain['el'] sorbate_index=[ii for ii in range(len(el)) if el[ii]==res_el] for j in sorbate_index: #complex_sum+=getattr(self.domain['global_vars'],'wt'+str(i+1))*oc[j]*np.exp(-q**2*u[j]**2/2)*np.exp(1.0J*2*np.pi*(h_single*x[j]+k_single*y[j]+l_single*(z[j]+1)))#z should be plus 1 to account for the fact that surface slab sitting on top of bulk slab #l is not necessary perpendicular to z direction complex_sum+=getattr(self.domain['global_vars'],'wt'+str(i+1))*oc[j]*np.exp(-q**2*u[j]**2/2)*np.exp(1.0J*q*(z[j]+1)*self.unit_cell.c) A_container.append(abs(complex_sum)) img_complex_sum, real_complex_sum=np.imag(complex_sum),np.real(complex_sum) if img_complex_sum==0.: P_container.append(0) elif real_complex_sum==0 and img_complex_sum==1: P_container.append(0.25)#1/2pi/2pi elif real_complex_sum==0 and img_complex_sum==-1: P_container.append(0.75)#3/2pi/2pi else:#adjustment is needed since the return of np.arctan is ranging from -1/2pi to 1/2pi if real_complex_sum>0 and img_complex_sum>0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.) elif real_complex_sum>0 and img_complex_sum<0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+1.) elif real_complex_sum<0 and img_complex_sum>0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5) elif real_complex_sum<0 and img_complex_sum<0: P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5) return np.array(A_container),np.array(P_container),Q def calc_f4_nonspecular_RAXR(self, h, k, l,E,E0,f1f2,a,b,A_list=[],P_list=[],resonant_els=[1,1,0]): #now the coherence looks like [{True:[0,1]},{False:[2,3]}] which means adding up first two domains coherently #and last two domains in-coherently. After calculation of structure factor for each item of the list, absolute #value of SF will be calculated followed by being summed up #so [{True:[0,1]},{True:[2,3]}] is different from [{True:[0,1,2,3]}] #resonant_els:a list of integer numbers (>=0) specifying whether or not considering the resonant scattering in each domain, and how many species on each domain # so the len(resonant_els) is equal to the total domain numbers #E is the energy scan list, and make sure items in E is one-to-one corresponding to those in f1f2 #E0 is the center of the range of energy scan #f1f2 numpy array of anomalous correction items (n*2 shape) with the first column as f' and the second as f'' #a,b are fitting parameters for extrinsic factors #P_list and A_list are two lists of Fourier components. Depending on the total domains, you can consider different Fourier # components for chemically different domains. #Resonant structure factor is calculated using equation (9) presented in paper of "Park, Changyong and Fenter, Paul A.(2007) J. Appl. Cryst.40, 290-301" ftot=0 coherence=self.coherence fb = self.calc_fb(h, k, l) f_surface=self.calc_fs for n in range(len(coherence)): ftot_A_C, ftot_A_IC=0,0 ftot_B_C, ftot_B_IC=0,0 keys_domainA=[] keys_domainB=[] for i in coherence[n].values()[0]: keys_domainA.append('domain'+str(i+1)+'A') keys_domainB.append('domain'+str(i+1)+'B') for i in keys_domainA: ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: if resonant_els[ii]: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+(f1f2[:,0]+1.0J*f1f2[0:,1])*np.sum(np.array(A_list[ii])*np.exp(1.0J*np.pi*2*np.array(P_list[ii]))))*self.domain[i]['wt'] else: ftot_A_C=ftot_A_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_A_C=ftot_A_C+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+(f1f2[:,0]+1.0J*f1f2[0:,1])*np.sum(np.array(A_list[ii])*np.exp(1.0J*np.pi*2*np.array(P_list[ii]))))*self.domain[i]['wt'] else: ftot_A_IC=ftot_A_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] for i in keys_domainB: #in this specific case (rcut hematite, domainB is symmetricaly related to domainA with half unit cell step lower) #in light of that, the Fourier component A(amplitude) is same as that for the associated domainA, but the other one (phase) should be 0.5 off ii=int(i[6:-1])-1#extract the domain index from the domain key, eg for "domain10A" will have a 9 as the domain index if coherence[n].keys()[0]: if resonant_els[ii]: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']])+(f1f2[:,0]+1.0J*f1f2[0:,1])*np.sum(np.array(A_list[ii])*np.exp(1.0J*np.pi*2*(np.array(P_list[ii])-0.5))))*self.domain[i]['wt'] else: ftot_B_C=ftot_B_C+(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] else: if resonant_els[ii]: ftot_B_C=ftot_B_C+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']])+(f1f2[:,0]+1.0J*f1f2[0:,1])*np.sum(np.array(A_list[ii])*np.exp(1.0J*np.pi*2*(np.array(P_list[ii])-0.5))))*self.domain[i]['wt'] else: ftot_B_IC=ftot_B_IC+abs(fb+f_surface(h, k, l,[self.domain[i]['slab']]))*self.domain[i]['wt'] ftot=(a+b*(E-E0))*(ftot+abs(ftot_A_C)+ftot_A_IC+ftot_B_IC+abs(ftot_B_C)) #ftot=ftot+ftot_A_C+ftot_A_IC+ftot_B_IC+ftot_B_C return abs(ftot)*self.inst.inten def calc_f_layered_water(self,h,k,l,u0,ubar,d_w,first_layer_height,density_w=0.033): #contribution of layered water calculated as equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height. and the corrections were done accordingly #In addition, the occupancy of layered water molecules was correctly calculated here by Auc*d_w*density_w #the u0 and ubar here are in A dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array(['O']), dinv)[:,0] Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv F_layered_water=f*(Auc*d_w*density_w)*np.exp(-0.5*q**2*u0**2)*np.exp(q*first_layer_height*1.0J)\ /(1-np.exp(-0.5*q**2*ubar**2)*np.exp(q*d_w*1.0J)) return F_layered_water def calc_f_layered_water_muscovite(self,h,k,l,args,height_offset=0): #contribution of layered water calculated as equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height. and the corrections were done accordingly #In addition, the occupancy of layered water molecules was correctly calculated here by Auc*d_w*density_w #the u0 and ubar here are in A if h[0]==0 and k[0]==0:#layered structure has effect only on specular rod u0,ubar,d_w,first_layer_height,density_w=args['u0_w'],args['ubar_w'],args['d_w'],args['first_layer_height_w'],args['density_w'] dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array(['O']), dinv)[:,0] f_H=self._get_f(np.array(['H']), dinv)[:,0] Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv #here the first layer height is referenced to 0, which is the height of top most atom layer before relaxation in the surface slab F_layered_water=(f+2*f_H)*(Auc*d_w*density_w)*np.exp(-0.5*q**2*u0**2)*np.exp(q*(first_layer_height)*1.0J)\ /(1-np.exp(-0.5*q**2*ubar**2)*np.exp(q*d_w*1.0J))#54.3=20.1058*(1+1.6) offset height accouting for bulk and surface slab return F_layered_water else: return 0 def calc_f_layered_sorbate(self,h,k,l,el,u0_s,ubar_s,d_s,first_layer_height_s,density_s,oc_damping_factor,f1f2=None): #contribution of layered sorbate calculated based on a function modified from equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height_s. and the corrections were done accordingly #In addition, the occupancy of layered sorbate molecules was correctly calculated here by Auc*1*density_s #the u0_s and ubar_s here are in A #note f1f2 is not used in the function, it serves as a purpose for easy pasting arguments in script dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array([el]), dinv)[:,0] Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv F_layered_sorbate=f*(Auc*1*density_s)*np.exp(-0.5*q**2*u0_s**2)*np.exp(q*first_layer_height_s*1.0J)\ /(1-np.exp(-oc_damping_factor)*np.exp(-0.5*q**2*ubar_s**2)*np.exp(q*d_s*1.0J)) return F_layered_sorbate def calc_f_layered_water_hematite(self,h,k,l,u0,ubar,d_w,first_layer_height,density_w=0.033): #contribution of layered water calculated as equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height. and the corrections were done accordingly #In addition, the occupancy of layered water molecules was correctly calculated here by Auc*d_w*density_w #the u0 and ubar here are in A dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array(['O']), dinv)[:,0] Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv F_layered_water=f*(Auc*d_w*density_w)*np.exp(-0.5*q**2*u0**2)*np.exp(q*first_layer_height*1.0J)\ /(1-np.exp(-0.5*q**2*ubar**2)*np.exp(q*d_w*1.0J)) return F_layered_water def calc_f_layered_sorbate_hematite(self,h,k,l,el,u0_s,ubar_s,d_s,first_layer_height_s,density_s,oc_damping_factor,f1f2=None): #contribution of layered sorbate calculated based on a function modified from equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height_s. and the corrections were done accordingly #In addition, the occupancy of layered sorbate molecules was correctly calculated here by Auc*1*density_s #the u0_s and ubar_s here are in A #note f1f2 is not used in the function, it serves as a purpose for easy pasting arguments in script dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array([el]), dinv)[:,0] Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv F_layered_sorbate=f*(Auc*1*density_s)*np.exp(-0.5*q**2*u0_s**2)*np.exp(q*first_layer_height_s*1.0J)\ /(1-np.exp(-oc_damping_factor)*np.exp(-0.5*q**2*ubar_s**2)*np.exp(q*d_s*1.0J)) return F_layered_sorbate def calc_f_layered_sorbate_hematite_RAXR_MD(self,h,k,l,el,u0_s,ubar_s,d_s,first_layer_height_s,density_s,oc_damping_factor,f1f2): #contribution of layered sorbate calculated based on a function modified from equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height_s. and the corrections were done accordingly #In addition, the occupancy of layered sorbate molecules was correctly calculated here by Auc*d_s*density_s #the u0_s and ubar here are in A dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array([el]), dinv)[:,0]+(f1f2[:,0]+1.0J*f1f2[0:,1])#atomic form factor corrected by the f1f2 correction items Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv F_layered_sorbate=f*(Auc*1*density_s)*np.exp(-0.5*q**2*u0_s**2)*np.exp(q*first_layer_height_s*1.0J)\ /(1-np.exp(-oc_damping_factor)*np.exp(-0.5*q**2*ubar_s**2)*np.exp(q*d_s*1.0J)) return F_layered_sorbate def calc_f_layered_sorbate_muscovite(self,h,k,l,args,height_offset=0): #contribution of layered sorbate calculated based on a function modified from equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height_s. and the corrections were done accordingly #In addition, the occupancy of layered sorbate molecules was correctly calculated here by Auc*d_s*density_s #the u0_s and ubar_s here are in A #note f1f2 is not used in the function, it serves as a purpose for easy pasting arguments in script if h[0]==0 and k[0]==0:#layered structure has effect only on specular rod el,u0_s,ubar_s,d_s,first_layer_height_s,density_s=self.domain['el'],args['u0_s'],args['ubar_s'],args['d_s'],args['first_layer_height_s'],args['density_s'] try: oc_bar=args['oc_damping_factor'] except: oc_bar=0 dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array([el]), dinv)[:,0] Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv F_layered_sorbate=f*(Auc*1*density_s)*np.exp(-0.5*q**2*u0_s**2)*np.exp(q*(first_layer_height_s)*1.0J)\ /(1-np.exp(-oc_bar)*np.exp(-0.5*q**2*ubar_s**2)*np.exp(q*d_s*1.0J)) return F_layered_sorbate else: return 0 def calc_f_layered_sorbate_RAXR(self,h,k,l,el,u0_s,ubar_s,d_s,first_layer_height_s,density_s,oc_damping_factor,f1f2): #contribution of layered sorbate calculated based on a function modified from equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height_s. and the corrections were done accordingly #In addition, the occupancy of layered sorbate molecules was correctly calculated here by Auc*d_s*density_s #the u0_s and ubar here are in A dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array([el]), dinv)[:,0]+(f1f2[:,0]+1.0J*f1f2[0:,1])#atomic form factor corrected by the f1f2 correction items Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv F_layered_sorbate=f*(Auc*1*density_s)*np.exp(-0.5*q**2*u0_s**2)*np.exp(q*first_layer_height_s*1.0J)\ /(1-np.exp(-oc_damping_factor)*np.exp(-0.5*q**2*ubar_s**2)*np.exp(q*d_s*1.0J)) return F_layered_sorbate def calc_f_layered_sorbate_muscovite_RAXR(self,h,k,l,args,height_offset=0,f1f2=None): #contribution of layered sorbate calculated based on a function modified from equation(29) in Reviews in Mineralogy and Geochemistry v. 49 no. 1 p. 149-221 #note here the height of first atom layer is not at 0 as in that equation but is specified by the first_layer_height_s. and the corrections were done accordingly #In addition, the occupancy of layered sorbate molecules was correctly calculated here by Auc*d_s*density_s #the u0_s and ubar here are in A if h[0]==0 and k[0]==0:#layered structure has effect only on specular rod el,u0_s,ubar_s,d_s,first_layer_height_s,density_s=self.domain['el'],args['u0_s'],args['ubar_s'],args['d_s'],args['first_layer_height_s'],args['density_s'] try: oc_bar=args['oc_damping_factor'] except: oc_bar=0 if f1f2==None: f1f2=self.domain['F1F2'] else: pass dinv = self.unit_cell.abs_hkl(h, k, l) f=self._get_f(np.array([el]), dinv)[:,0]+(f1f2[:,0]+1.0J*f1f2[:,1])#atomic form factor corrected by the f1f2 correction items Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) q=2*np.pi*dinv F_layered_sorbate=f*(Auc*1*density_s)*np.exp(-0.5*q**2*u0_s**2)*np.exp(q*(first_layer_height_s)*1.0J)\ /(1-np.exp(-oc_bar)*np.exp(-0.5*q**2*ubar_s**2)*np.exp(q*d_s*1.0J)) return F_layered_sorbate else: return 0 def turbo_calc_f(self, h, k, l): '''Calculate the structure factors for the sample with inline c code for the surface. ''' fs = self.turbo_calc_fs(h, k, l) fb = self.calc_fb(h, k, l) ftot = fs + fb return ftot*self.inst.inten def fourier_synthesis(self,HKL_list,P_list,A_list,z_min=0.,z_max=20.,el_lib={'O':8,'Fe':26,'As':33,'Pb':82,'Sb':51,'Zr':40,"Th":90,"Rb":37},resonant_el='Pb',resolution=1000,water_scaling=1): ZR=el_lib[resonant_el] q_list = self.unit_cell.abs_hkl(np.array(HKL_list[0]), np.array(HKL_list[1]), np.array(HKL_list[2]))#a list of 1/d for each hkl set q_list_sorted=copy.copy(q_list) q_list_sorted.sort() q_list_sorted=np.array(q_list_sorted)*np.pi*2#note that q=2pi/d delta_q=np.average([q_list_sorted[i+1]-q_list_sorted[i] for i in range(len(q_list_sorted)-1)]) Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) z_plot=[] eden_plot=[] eden_domain_plot=[] for i in range(resolution): z_each=float(z_max-z_min)/resolution*i+z_min z_plot.append(z_each) eden=0 eden_domains=[] eden_each_domain=ZR/Auc/np.pi/2*np.sum(A_list*np.cos(2*np.pi*P_list-np.array(q_list_sorted)*z_each)*delta_q)/water_scaling eden_domains.append(eden_each_domain) eden+=eden_each_domain eden_plot.append(eden) eden_domain_plot.append(eden_domains) return z_plot,eden_plot,eden_domain_plot def fourier_synthesis_hematite(self,HKL_list,P_list,A_list,z_min=0.,z_max=20.,el_lib={'O':8,'Fe':26,'As':33,'Pb':82,'Sb':51,'Zr':40,"Th":90,"Rb":37},resonant_el='Pb',resolution=1000,water_scaling=1): ZR=el_lib[resonant_el] q_list = self.unit_cell.abs_hkl(np.array(HKL_list[0]), np.array(HKL_list[1]), np.array(HKL_list[2]))#a list of 1/d for each hkl set q_list_sorted=copy.copy(q_list) q_list_sorted.sort() q_list_sorted=np.array(q_list_sorted)*np.pi*2#note that q=2pi/d delta_q=np.average([q_list_sorted[i+1]-q_list_sorted[i] for i in range(len(q_list_sorted)-1)]) Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) z_plot=[] eden_plot=[] eden_domain_plot=[] keys=P_list.keys() keys.sort() for i in range(resolution): z_each=float(z_max-z_min)/resolution*i+z_min z_plot.append(z_each) eden=0 eden_domains=[] for key in keys: eden_each_domain=ZR/Auc/np.pi/2*np.sum(np.array([A_list[key]])*np.cos(2*np.pi*np.array([P_list[key]])-np.array(q_list_sorted)*z_each)*delta_q)/water_scaling eden_domains.append(eden_each_domain) eden+=eden_each_domain eden_plot.append(eden) eden_domain_plot.append(eden_domains) return z_plot,eden_plot,eden_domain_plot def fourier_synthesis_original(self,HKL_list,P_list,A_list,z_min=0.,z_max=20.,el_lib={'O':8,'Fe':26,'As':33,'Pb':82,'Sb':51,'Zr':40},resonant_el='Pb',resolution=1000): ZR=el_lib[resonant_el] q_list = self.unit_cell.abs_hkl(np.array(HKL_list[0]), np.array(HKL_list[1]), np.array(HKL_list[2]))#a list of 1/d for each hkl set q_list_sorted=copy.copy(q_list) q_list_sorted.sort() q_list_sorted=np.array(q_list_sorted)*np.pi*2#note that q=2pi/d delta_q=np.average([q_list_sorted[i+1]-q_list_sorted[i] for i in range(len(q_list_sorted)-1)]) Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) z_plot=[] eden_plot=[] eden_domain_plot=[] for i in range(resolution): z_each=float(z_max-z_min)/resolution*i+z_min z_plot.append(z_each) eden=0 eden_domains=[] for j in range(len(P_list)): eden_each_domain=ZR/Auc/np.pi*np.sum(A_list[j]*np.cos(2*np.pi*P_list[j]-np.array(q_list_sorted)*z_each)*delta_q) eden_domains.append(eden_each_domain) eden+=eden_each_domain eden_plot.append(eden) eden_domain_plot.append(eden_domains) return z_plot,eden_plot,eden_domain_plot def plot_electron_density(self,slabs,el_lib={'O':8,'Fe':26,'As':33,'Pb':82,'Sb':51,'P':15,'Cr':24,'Cd':48,'Cu':29,'Zn':30,'Al':13,'Si':14,'K':19},z_min=0.,z_max=28.,N_layered_water=10,resolution=1000,file_path="D:\\"): #print dinv e_data=[] labels=[] e_total=np.zeros(resolution) keys_sorted=[each for each in slabs.keys() if "A" in each] keys_sorted.sort() for key in keys_sorted: slab=[slabs[key]['slab']] x, y, z, u, oc, el = self._surf_pars(slab) z=(z+1.)*self.unit_cell.c#z is offseted by 1 unit since such offset is explicitly considered in the calculatino of structure factor f=np.array([el_lib[each] for each in el]) Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) z_min,z_max=z_min,z_max eden=[] z_plot=[] layered_water,z_layered_water,sigma_layered_water,d_w,water_density=None,[],[],None,None if slabs[key]['layered_water']!=[]: #the items for the layered water is [u0,ubar,d_w(in A),first_layer_height(in fractional),density_w (in # of waters/A^3)] layered_water=slabs[key]['layered_water'] d_w=layered_water[2] water_density=layered_water[-1] for i in range(N_layered_water): z_layered_water.append((layered_water[3]+1.)*self.unit_cell.c+i*layered_water[2])#first layer is offseted by 1 accordingly sigma_layered_water.append((layered_water[0]**2+i*layered_water[1]**2)**0.5) #consider the e density of layered sorbate layered_sorbate,z_layered_sorbate,sigma_layered_sorbate,d_s,sorbate_density=None,[],[],None,None if 'layered_sorbate' in slabs[key].keys(): if slabs[key]['layered_sorbate']!=[]: #the items for the layered sorbate is [el,u0,ubar,d_s(in A),first_layer_height(in fractional),density_s (in # of waters/A^3)] layered_sorbate=slabs[key]['layered_sorbate'] d_s=layered_sorbate[3] sorbate_density=layered_sorbate[-2] for i in range(N_layered_water):#assume the number of sorbate layer equal to that for water layers z_layered_sorbate.append((layered_sorbate[4]+1.)*self.unit_cell.c+i*layered_sorbate[3])#first layer is offseted by 1 accordingly sigma_layered_sorbate.append((layered_sorbate[1]**2+i*layered_sorbate[2]**2)**0.5) #print u,f,z for i in range(resolution): z_each=float(z_max-z_min)/resolution*i+z_min z_plot.append(z_each) #normalized with occupancy and weight factor (manually scaled by a factor 2 to consider the half half of domainA and domainB) #here considering the e density for each atom layer will be distributed within a volume of Auc*1, so the unit here is e/A3 eden.append(np.sum(slabs[key]['wt']*2*oc*f/Auc*(2*np.pi*u**2)**-0.5*np.exp(-0.5/u**2*(z_each-z)**2))) if slabs[key]['layered_water']!=[]: #eden[-1]=eden[-1]+np.sum(8*slabs[key]['wt']*2*Auc*d_w*water_density*(2*np.pi*np.array(sigma_layered_water)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_water)**2*(z_each-np.array(z_layered_water))**2)) eden[-1]=eden[-1]+np.sum(8*slabs[key]['wt']*2*water_density*(2*np.pi*np.array(sigma_layered_water)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_water)**2*(z_each-np.array(z_layered_water))**2)) if 'layered_sorbate' in slabs[key].keys(): if slabs[key]['layered_sorbate']!=[]: eden[-1]=eden[-1]+np.sum(el_lib[slabs[key]['layered_sorbate'][0]]*slabs[key]['wt']*2*sorbate_density*(2*np.pi*np.array(sigma_layered_sorbate)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_sorbate)**2*(z_each-np.array(z_layered_sorbate))**2)) labels.append(key) e_data.append(np.array([z_plot,eden])) e_total=e_total+np.array(eden) labels.append('Total electron density') e_data.append(np.array([list(e_data[0])[0],e_total])) pickle.dump([e_data,labels],open(os.path.join(file_path,"temp_plot_eden"),"wb")) def plot_electron_density_hematite(self,slabs,el_lib={'O':8,'Fe':26,'As':33,'Pb':82,'Sb':51,'P':15,'Cr':24,'Cd':48,'Cu':29,'Zn':30,'Al':13,'Si':14,'K':19,'Zr':40,"Th":90,"Rb":37},z_min=0.,z_max=28.,N_layered_water=10,resolution=1000,file_path="D:\\",height_offset=0,version=1.0,freeze=False,raxs_el='Pb'): #print dinv z_min=z_min z_max=z_max e_data=[] labels=[] e_total=np.zeros(resolution) e_total_raxs=np.zeros(resolution) e_total_layer_water=np.zeros(resolution) keys_sorted=[each for each in slabs.keys() if "A" in each] keys_sorted.sort() for key in keys_sorted: slab=[slabs[key]['slab']] wt=slabs[key]['wt'] raxs_el=raxs_el x, y, z, u, oc, el = self._surf_pars(slab) try: sig_eff=slabs[key]['sig_eff'] except: sig_eff=0.203 u=(u**2+sig_eff**2)**0.5 index_raxs=np.where(np.array(el)==raxs_el)[0] z_raxs=np.array([(z[i]+1.)*self.unit_cell.c for i in index_raxs]) u_raxs=np.array([u[i] for i in index_raxs]) oc_raxs=np.array([oc[i] for i in index_raxs]) f_raxs=el_lib[raxs_el] eden_raxs=[] eden_layer_water=[] z=(z+1.)*self.unit_cell.c#z is offseted by 1 unit since such offset is explicitly considered in the calculatino of structure factor f=np.array([el_lib[each] for each in el]) Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) z_min,z_max=z_min,z_max eden=[] z_plot=[] layered_water,z_layered_water,sigma_layered_water,d_w,water_density=None,[],[],None,None layered_water=slabs[key]['layered_water'] if layered_water!=[]: d_w=layered_water[2] water_density=layered_water[-1] for i in range(N_layered_water): #z_layered_water.append(layered_water[3]+54.3+height_offset+i*layered_water[2])#first layer is offseted by 1 accordingly z_layered_water.append(7.3707+layered_water[3]+i*layered_water[2])#offset by one unit cell length (7.3707) sigma_layered_water.append((layered_water[0]**2+i*layered_water[1]**2+sig_eff**2)**0.5) #consider the e density of layered sorbate layered_sorbate,z_layered_sorbate,sigma_layered_sorbate,sorbate_damping_factors,d_s,sorbate_density=None,[],[],[],None,None layered_sorbate_keys=['u0_s','ubar_s','d_s','first_layer_height_s','density_s','oc_damping_factor','F1F2'] layered_sorbate=slabs[key]['layered_sorbate'] if layered_sorbate!=[]: d_s=layered_sorbate[2] sorbate_density=layered_sorbate[4] damping_factor=layered_sorbate[5] for i in range(N_layered_water):#assume the number of sorbate layer equal to that for water layers z_layered_sorbate.append(7.3707+layered_sorbate[3]+i*layered_sorbate[2])#first layer is offseted by 1 unit cell (7.3707 A) accordingly sigma_layered_sorbate.append((layered_sorbate[0]**2+i*layered_sorbate[1]**2+sig_eff**2)**0.5) sorbate_damping_factors.append(damping_factor*i)#first layer no damping, second will be damped with a factor of exp(-damping_factor), third will exp(-2*damping_factor) and so on. #print u,f,z for i in range(resolution): z_each=float(z_max-z_min)/resolution*i+z_min z_plot.append(z_each) #normalized with occupancy and weight factor (thus normalized to the whole surface area containing multiple domains) #here considering the e density for each atom layer will be distributed within a volume of Auc*1, so the unit here is e/A3 eden.append(np.sum(wt*2*oc*f/Auc*(2*np.pi*u**2)**-0.5*np.exp(-0.5/u**2*(z_each-z)**2))) eden_raxs.append(np.sum(wt*2*oc_raxs*f_raxs/Auc*(2*np.pi*u_raxs**2)**-0.5*np.exp(-0.5/u_raxs**2*(z_each-z_raxs)**2))) if layered_water!=[]: eden[-1]=eden[-1]+np.sum(10*wt*2*water_density*layered_water[2]*(2*np.pi*np.array(sigma_layered_water)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_water)**2*(z_each-np.array(z_layered_water))**2)) eden_layer_water.append(np.sum(10*wt*2*water_density*layered_water[2]*(2*np.pi*np.array(sigma_layered_water)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_water)**2*(z_each-np.array(z_layered_water))**2))) if layered_sorbate!=[]: eden[-1]=eden[-1]+np.sum(el_lib[raxs_el]*wt*2*sorbate_density*np.exp(-np.array(sorbate_damping_factors))*(2*np.pi*np.array(sigma_layered_sorbate)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_sorbate)**2*(z_each-np.array(z_layered_sorbate))**2)) eden_raxs[-1]=eden_raxs[-1]+np.sum(el_lib[raxs_el]*wt*2*sorbate_density*np.exp(-np.array(sorbate_damping_factors))*(2*np.pi*np.array(sigma_layered_sorbate)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_sorbate)**2*(z_each-np.array(z_layered_sorbate))**2)) labels.append(key) #e_data.append(np.array([z_plot,eden,eden_raxs,eden_layer_water])) normalized_factor=3.03#3.03:1 electron per 3.03 cubic A e_data.append(np.array([np.array(z_plot)-height_offset,np.array(eden)*normalized_factor,np.array(eden_raxs)*normalized_factor,np.array(eden_layer_water)*normalized_factor])) e_total=e_total+np.array(eden) #if layered_sorbate!=[]: e_total_raxs=e_total_raxs+np.array(eden_raxs) #if layered_water!=[]: e_total_layer_water=e_total_layer_water+np.array(eden_layer_water) labels.append('Total electron density') #e_data.append(np.array([list(e_data[0])[0],e_total,e_total_raxs,e_total_layer_water])) e_data.append(np.array([list(e_data[0])[0],e_total*normalized_factor,e_total_raxs*normalized_factor,e_total_layer_water*normalized_factor])) water_scaling=0.33 pickle.dump([e_data,labels],open(os.path.join(file_path,"temp_plot_eden"),"wb")) return water_scaling def plot_electron_density_muscovite(self,slabs,el_lib={'O':8,'Fe':26,'As':33,'Pb':82,'Sb':51,'P':15,'Cr':24,'Cd':48,'Cu':29,'Zn':30,'Al':13,'Si':14,'K':19,'Zr':40,"Th":90,"Rb":37},z_min=0.,z_max=28.,N_layered_water=10,resolution=1000,file_path="D:\\",height_offset=0,version=1.0,freeze=False): #print dinv e_data=[] labels=[] e_total=np.zeros(resolution) e_total_raxs=np.zeros(resolution) e_total_layer_water=np.zeros(resolution) for domain_index in range(len(slabs['domains'])): wt=getattr(slabs['global_vars'],'wt'+str(domain_index+1)) raxs_el=slabs['el'] slab=[slabs['domains'][domain_index]] x, y, z, u, oc, el = self._surf_pars(slab) try: sig_eff=slabs['sig_eff'] except: sig_eff=0.203 u=(u**2+sig_eff**2)**0.5 index_raxs=np.where(np.array(el)==raxs_el)[0] z_raxs=np.array([(z[i]+1.)*self.unit_cell.c for i in index_raxs]) u_raxs=np.array([u[i] for i in index_raxs]) oc_raxs=np.array([oc[i] for i in index_raxs]) f_raxs=el_lib[raxs_el] eden_raxs=[] eden_layer_water=[] z=(z+1.)*self.unit_cell.c#z is offseted by 1 unit since such offset is explicitly considered in the calculatino of structure factor f=np.array([el_lib[each] for each in el]) Auc=self.unit_cell.a*self.unit_cell.b*np.sin(self.unit_cell.gamma) z_min,z_max=z_min,z_max eden=[] z_plot=[] layered_water,z_layered_water,sigma_layered_water,d_w,water_density=None,[],[],None,None layered_water_keys=['u0_w','ubar_w','d_w','first_layer_height_w','density_w'] layered_water=[slabs['layered_water_pars'][each_key] for each_key in layered_water_keys] d_w=layered_water[2] water_density=layered_water[-1] for i in range(N_layered_water): #z_layered_water.append(layered_water[3]+54.3+height_offset+i*layered_water[2])#first layer is offseted by 1 accordingly z_layered_water.append(layered_water[3]+i*layered_water[2]) sigma_layered_water.append((layered_water[0]**2+i*layered_water[1]**2+sig_eff**2)**0.5) #consider the e density of layered sorbate layered_sorbate,z_layered_sorbate,sigma_layered_sorbate,sorbate_damping_factors,d_s,sorbate_density=None,[],[],[],None,None layered_sorbate_keys=['u0_s','ubar_s','d_s','first_layer_height_s','density_s','oc_damping_factor'] layered_sorbate=[slabs['layered_sorbate_pars'][each_key] for each_key in layered_sorbate_keys] d_s=layered_sorbate[2] sorbate_density=layered_sorbate[-2] damping_factor=layered_sorbate[-1] for i in range(N_layered_water):#assume the number of sorbate layer equal to that for water layers z_layered_sorbate.append(layered_sorbate[3]+i*layered_sorbate[2])#first layer is offseted by 1 accordingly sigma_layered_sorbate.append((layered_sorbate[0]**2+i*layered_sorbate[1]**2+sig_eff**2)**0.5) sorbate_damping_factors.append(damping_factor*i)#first layer no damping, second will be damped with a factor of exp(-damping_factor), third will exp(-2*damping_factor) and so on. #print u,f,z for i in range(resolution): z_each=float(z_max-z_min)/resolution*i+z_min z_plot.append(z_each) #normalized with occupancy and weight factor (thus normalized to the whole surface area containing multiple domains) #here considering the e density for each atom layer will be distributed within a volume of Auc*1, so the unit here is e/A3 eden.append(np.sum(wt*oc*f/Auc*(2*np.pi*u**2)**-0.5*np.exp(-0.5/u**2*(z_each-z)**2))) eden_raxs.append(np.sum(wt*oc_raxs*f_raxs/Auc*(2*np.pi*u_raxs**2)**-0.5*np.exp(-0.5/u_raxs**2*(z_each-z_raxs)**2))) bulk_water=0 if z_each>0: bulk_water=1 eden[-1]=eden[-1]+np.sum(10*wt*water_density*layered_water[2]*(2*np.pi*np.array(sigma_layered_water)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_water)**2*(z_each-np.array(z_layered_water))**2))+(.33-0.16233394)*wt*bulk_water*0 eden_layer_water.append(np.sum(10*wt*water_density*layered_water[2]*(2*np.pi*np.array(sigma_layered_water)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_water)**2*(z_each-np.array(z_layered_water))**2))+(.33-0.16233394)*wt*bulk_water*0) #eden[-1]=eden[-1]+np.sum(10*wt*water_density*(np.exp(-0.5/np.array(sigma_layered_water)**2*(z_each-np.array(z_layered_water))**2))) eden[-1]=eden[-1]+np.sum(el_lib[raxs_el]*wt*sorbate_density*np.exp(-np.array(sorbate_damping_factors))*(2*np.pi*np.array(sigma_layered_sorbate)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_sorbate)**2*(z_each-np.array(z_layered_sorbate))**2)) eden_raxs[-1]=eden_raxs[-1]+np.sum(el_lib[raxs_el]*wt*sorbate_density*np.exp(-np.array(sorbate_damping_factors))*(2*np.pi*np.array(sigma_layered_sorbate)**2)**-0.5*np.exp(-0.5/np.array(sigma_layered_sorbate)**2*(z_each-np.array(z_layered_sorbate))**2)) labels.append('Domain'+str(domain_index+1)) #e_data.append(np.array([z_plot,eden,eden_raxs,eden_layer_water])) normalized_factor=3.03#3.03:1 electron per 3.03 cubic A if domain_index==0:#domain1 has a 0.25 weighting factor e_data.append(np.array([z_plot,np.array(eden)*normalized_factor,np.array(eden_raxs)*normalized_factor,np.array(eden_layer_water)*normalized_factor])) elif domain_index==1:#domain2 has a 0.75 weighting factor e_data.append(np.array([z_plot,np.array(eden)*normalized_factor,np.array(eden_raxs)*normalized_factor,np.array(eden_layer_water)*normalized_factor])) if version==1.0: e_total=e_total+np.array(eden) elif version>=1.1: if freeze: e_total=e_total+np.array(np.array(eden)-np.array(eden_raxs)) else: e_total=e_total+np.array(eden) e_total_raxs=e_total_raxs+np.array(eden_raxs) e_total_layer_water=e_total_layer_water+np.array(eden_layer_water) labels.append('Total electron density') #e_data.append(np.array([list(e_data[0])[0],e_total,e_total_raxs,e_total_layer_water])) e_data.append(np.array([list(e_data[0])[0],e_total*normalized_factor,e_total_raxs*normalized_factor,e_total_layer_water*normalized_factor])) water_scaling=0.33 pickle.dump([e_data,labels],open(os.path.join(file_path,"temp_plot_eden"),"wb")) return water_scaling def calc_fs(self, h, k, l,slabs): '''Calculate the structure factors from the surface ''' dinv = self.unit_cell.abs_hkl(h, k, l) x, y, z, u, oc, el = self._surf_pars(slabs) #Note that the u here has been recalculated to represent for the Gaussian distribution width of the thermal vibration (ie sigma in Angstrom) f=self._get_f(el, dinv) #print x, y,z # Create all the atomic structure factors #print f.shape, h.shape, oc.shape, x.shape, y.shape, z.shape,el.shape #change mark 3 #delta_l=1 #if self.delta1==[]:delta_l=0 fs = np.sum(oc*f*np.exp(-2*np.pi**2*u**2*dinv[:,np.newaxis]**2)\ *np.sum([np.exp(2.0*np.pi*1.0J*( h[:,np.newaxis]*(sym_op.trans_x(x, y)+self.delta1) + k[:,np.newaxis]*(sym_op.trans_y(x, y)+self.delta2) + l[:,np.newaxis]*(z[np.newaxis, :]+1))) for sym_op in self.surface_sym], 0) ,1) """ for id in slabs[0].id: if "Pb" in str(id): print id, np.sum([np.exp(2.0*np.pi*1.0J*(\ 1*(sym_op.trans_x(x, y)+self.delta1) +\ 1*(sym_op.trans_y(x, y)+self.delta2) +\ 1.3*(z[np.newaxis, :]+1)))\ for sym_op in self.surface_sym][0][0])#[np.where(slabs[0].id==id)[0][0]] """ return fs def calc_fs_hematite_RAXR_MD(self, h, k, l,slabs,f1f2,res_el='Pb'): '''Calculate the structure factors from the surface ''' dinv = self.unit_cell.abs_hkl(h, k, l) x, y, z, u, oc, el = self._surf_pars(slabs) #Note that the u here has been recalculated to represent for the Gaussian distribution width of the thermal vibration (ie sigma in Angstrom) f=self._get_f(el, dinv) #print x, y,z # Create all the atomic structure factors #print f.shape, h.shape, oc.shape, x.shape, y.shape, z.shape,el.shape #change mark 3 #delta_l=1 #if self.delta1==[]:delta_l=0 shape=f.shape f_offset=np.zeros(shape=shape)+0J for i in range(shape[0]): for j in range(shape[1]): if res_el==el[j]: f_offset[i][j]=f1f2[i][0]+1.0J*f1f2[i][1] f=f+f_offset fs = np.sum(oc*f*np.exp(-2*np.pi**2*u**2*dinv[:,np.newaxis]**2)\ *np.sum([np.exp(2.0*np.pi*1.0J*( h[:,np.newaxis]*(sym_op.trans_x(x, y)+self.delta1) + k[:,np.newaxis]*(sym_op.trans_y(x, y)+self.delta2) + l[:,np.newaxis]*(z[np.newaxis, :]+1))) for sym_op in self.surface_sym], 0) ,1) """ for id in slabs[0].id: if "Pb" in str(id): print id, np.sum([np.exp(2.0*np.pi*1.0J*(\ 1*(sym_op.trans_x(x, y)+self.delta1) +\ 1*(sym_op.trans_y(x, y)+self.delta2) +\ 1.3*(z[np.newaxis, :]+1)))\ for sym_op in self.surface_sym][0][0])#[np.where(slabs[0].id==id)[0][0]] """ return fs def calc_fs_muscovite(self, h, k, l,slabs): '''Calculate the structure factors from the surface ''' dinv = self.unit_cell.abs_hkl(h, k, l) x, y, z, u, oc, el = self._surf_pars(slabs) try: if self.domain['freeze']: sub_space_index=[i for i in range(len(slabs[0].id)) if slabs[0].id[i][0:11]!='Freezed_el_'] x,y,z,u,oc,el=x[sub_space_index],y[sub_space_index],z[sub_space_index],u[sub_space_index],oc[sub_space_index],el[sub_space_index] except: pass #Note that the u here has been recalculated to represent for the Gaussian distribution width of the thermal vibration (ie sigma in Angstrom) f=self._get_f(el, dinv) #print x, y,z # Create all the atomic structure factors #print f.shape, h.shape, oc.shape, x.shape, y.shape, z.shape,el.shape #change mark 3 #delta_l=1 #if self.delta1==[]:delta_l=0 fs = np.sum(oc*f*np.exp(-2*np.pi**2*u**2*dinv[:,np.newaxis]**2)\ *np.sum([np.exp(2.0*np.pi*1.0J*( h[:,np.newaxis]*(sym_op.trans_x(x, y)+self.delta1) + k[:,np.newaxis]*(sym_op.trans_y(x, y)+self.delta2) + l[:,np.newaxis]*(z[np.newaxis, :]+1))) for sym_op in self.surface_sym], 0) ,1) """ for id in slabs[0].id: if "Pb" in str(id): print id, np.sum([np.exp(2.0*np.pi*1.0J*(\ 1*(sym_op.trans_x(x, y)+self.delta1) +\ 1*(sym_op.trans_y(x, y)+self.delta2) +\ 1.3*(z[np.newaxis, :]+1)))\ for sym_op in self.surface_sym][0][0])#[np.where(slabs[0].id==id)[0][0]] """ return fs def calc_fs_RAXR_muscovite(self, h, k, l,slabs,f1f2,res_el='Zr'): '''Calculate the structure factors from the surface with resonant element In the normal case, hkl will be an array of same number (eg h=[1]*10,k=[1]*10,l=[1.3]*10,f1f2 has the same length as hkl, but it changes as a function of E) Atomic form factor for the res_el will be corrected by those two correction items (f1 and f2) ''' dinv = self.unit_cell.abs_hkl(h, k, l) x, y, z, u, oc, el = self._surf_pars(slabs) sub_space_index=[i for i in range(len(slabs[0].id)) if slabs[0].id[i][0:11]=='Freezed_el_'] #Note that the u here has been recalculated to represent for the Gaussian distribution width of the thermal vibration (ie sigma in Angstrom) f=self._get_f(el, dinv) shape=f.shape f_offset=np.zeros(shape=shape)+0J for i in range(shape[0]): for j in range(shape[1]): if res_el==el[j]: try: if j in sub_space_index and self.domain['freeze']: f[:,j]=f[:,j]*0#set resonant element have no effect on the non-resonant structure factor f_offset[i][j]=f1f2[i][0]+1.0J*f1f2[i][1] else: f_offset[i][j]=f1f2[i][0]+1.0J*f1f2[i][1] except: f_offset[i][j]=f1f2[i][0]+1.0J*f1f2[i][1] f=f+f_offset #print x, y,z # Create all the atomic structure factors #print f.shape, h.shape, oc.shape, x.shape, y.shape, z.shape,el.shape #change mark 3 #delta_l=1 #if self.delta1==[]:delta_l=0 fs = np.sum(oc*f*np.exp(-2*np.pi**2*u**2*dinv[:,np.newaxis]**2)\ *np.sum([np.exp(2.0*np.pi*1.0J*( h[:,np.newaxis]*(sym_op.trans_x(x, y)+self.delta1) + k[:,np.newaxis]*(sym_op.trans_y(x, y)+self.delta2) + l[:,np.newaxis]*(z[np.newaxis, :]+1))) for sym_op in self.surface_sym], 0) ,1) """ for id in slabs[0].id: if "Pb" in str(id): print id, np.sum([np.exp(2.0*np.pi*1.0J*(\ 1*(sym_op.trans_x(x, y)+self.delta1) +\ 1*(sym_op.trans_y(x, y)+self.delta2) +\ 1.3*(z[np.newaxis, :]+1)))\ for sym_op in self.surface_sym][0][0])#[np.where(slabs[0].id==id)[0][0]] """ return fs def calc_fs_RAXR(self, h, k, l,slabs,f1f2,res_el='Zr'): '''Calculate the structure factors from the surface with resonant element In the normal case, hkl will be an array of same number (eg h=[1]*10,k=[1]*10,l=[1.3]*10,f1f2 has the same length as hkl, but it changes as a function of E) Atomic form factor for the res_el will be corrected by those two correction items (f1 and f2) ''' dinv = self.unit_cell.abs_hkl(h, k, l) x, y, z, u, oc, el = self._surf_pars(slabs) #Note that the u here has been recalculated to represent for the Gaussian distribution width of the thermal vibration (ie sigma in Angstrom) f=self._get_f(el, dinv) shape=f.shape f_offset=np.zeros(shape=shape)+0J for i in range(shape[0]): for j in range(shape[1]): if res_el==el[j]: f_offset[i][j]=f1f2[i][0]+1.0J*f1f2[i][1] f=f+f_offset #print x, y,z # Create all the atomic structure factors #print f.shape, h.shape, oc.shape, x.shape, y.shape, z.shape,el.shape #change mark 3 #delta_l=1 #if self.delta1==[]:delta_l=0 fs = np.sum(oc*f*np.exp(-2*np.pi**2*u**2*dinv[:,np.newaxis]**2)\ *np.sum([np.exp(2.0*np.pi*1.0J*( h[:,np.newaxis]*(sym_op.trans_x(x, y)+self.delta1) + k[:,np.newaxis]*(sym_op.trans_y(x, y)+self.delta2) + l[:,np.newaxis]*(z[np.newaxis, :]+1))) for sym_op in self.surface_sym], 0) ,1) """ for id in slabs[0].id: if "Pb" in str(id): print id, np.sum([np.exp(2.0*np.pi*1.0J*(\ 1*(sym_op.trans_x(x, y)+self.delta1) +\ 1*(sym_op.trans_y(x, y)+self.delta2) +\ 1.3*(z[np.newaxis, :]+1)))\ for sym_op in self.surface_sym][0][0])#[np.where(slabs[0].id==id)[0][0]] """ return fs def calc_fs_offspecular(self, h, k, l,slabs): '''Calculate the structure factors from the surface ''' dinv = self.unit_cell.abs_hkl(h, k, l) x, y, z, u, oc, el = self._surf_pars_offspecular(slabs) f=self._get_f(el, dinv) #print x, y,z # Create all the atomic structure factors #print f.shape, h.shape, oc.shape, x.shape, y.shape, z.shape,el.shape #change mark 3 #delta_l=1 #if self.delta1==[]:delta_l=0 fs = np.sum(oc*f*np.exp(-2*np.pi**2*u**2*dinv[:,np.newaxis]**2)\ *np.sum([np.exp(2.0*np.pi*1.0J*( h[:,np.newaxis]*(sym_op.trans_x(x, y)+self.delta1) + k[:,np.newaxis]*(sym_op.trans_y(x, y)+self.delta2) + l[:,np.newaxis]*(z[np.newaxis, :]+1))) for sym_op in self.surface_sym], 0) ,1) """ for id in slabs[0].id: if "Pb" in str(id): print id, np.sum([np.exp(2.0*np.pi*1.0J*(\ 1*(sym_op.trans_x(x, y)+self.delta1) +\ 1*(sym_op.trans_y(x, y)+self.delta2) +\ 1.3*(z[np.newaxis, :]+1)))\ for sym_op in self.surface_sym][0][0])#[np.where(slabs[0].id==id)[0][0]] """ return fs def turbo_calc_fs(self, h, k, l): '''Calculate the structure factors with weave (inline c code) Produces faster simulations of large strucutres. ''' h = h.astype(np.float64) k = k.astype(np.float64) l = l.astype(np.float64) #t1 = time.time() dinv = self.unit_cell.abs_hkl(h, k, l) #t2 = time.time() #print 'dinv: %f'%(t2-t1) x, y, z, u, oc, el = self._surf_pars() #x = np.array(x); y = np.array(y); z= np.array(z) f = self._get_f(el, dinv) #print f.shape Pt = np.array([np.c_[so.P, so.t] for so in self.surface_sym]) # Setup other stuff needed ... im = np.array([1.0J], dtype = np.complex128) fs = np.zeros(h.shape, dtype = np.complex128) tmp = np.array([0.0J], dtype = np.complex128) # Inline c-code goes here.. code = ''' double pi = 3.14159265358979311599796346854418516159057617187500; int ij = 0; int offset = 0; //printf("Atoms: %d, Points: %d, Symmetries: %d\\n", Noc[0], Nh[0], NPt[0]); // Loop over all data points for(int i = 0; i < Nh[0]; i++){ // Loop over all atoms //printf("l = %f\\n", l[i]); for(int j = 0; j < Noc[0]; j++){ ij = i + j*Nh[0]; //printf(" x = %f, y = %f, z = %f, u = %f, oc = %f \\n", x[j], y[j], z[j], u[j], oc[j]); // Loop over symmetry operations tmp[0] = 0.0*tmp[0]; for(int m = 0; m < NPt[0]; m++){ offset = m*6; tmp[0] += exp(2.0*pi*im[0]*(h[i]*( Pt[0 + offset]*x[j] + Pt[1 + offset]*y[j] + Pt[2 + offset])+ k[i]*(Pt[3+offset]*x[j] + Pt[4+offset]*y[j]+ Pt[5 + offset]) + l[i]*z[j])); if(i == 0 && j == 0 && false){ printf("P = [%d, %d] [%d, %d]", Pt[0 + offset], Pt[1 + offset], Pt[3 + offset], Pt[4 + offset]); printf(", t = [%d, %d]\\n", Pt[2 + offset], Pt[5+offset]); } // End if statement } // End symmetry loop index m fs[i] += oc[j]*f[ij]*exp(-2.0*pow(pi*dinv[i],2.0)*u[j])*tmp[0]; } // End atom loop index j } // End data point (h,k,l) loop ''' #t1 = time.time() weave.inline(code, ['x', 'y', 'z', 'h', 'k', 'l', 'u', 'oc', 'f', 'Pt', 'im', 'fs', 'dinv', 'tmp'], compiler = 'gcc') #t2 = time.time() #print t2-t1 return fs def calc_fb(self, h, k, l): '''Calculate the structure factors from the bulk ''' dinv = self.unit_cell.abs_hkl(h, k, l) x, y, z, el, u, oc, c = self.bulk_slab._extract_values() oc = oc/float(len(self.bulk_sym)) f = self._get_f(el, dinv) # Calculate the "shape factor" for the CTRs eff_thick = self.unit_cell.c/np.sin(self.inst.alpha*np.pi/180.0) alpha = (2.82e-5*self.inst.wavel*eff_thick/self.unit_cell.vol()* np.sum(f.imag,1)) #change mark 1,l was changed to zeta denom = 1.0-np.exp(-2.0*np.pi*1.0J*(self.delta1*h+self.delta2*k+l))*np.exp(-alpha) # Delta functions to remove finite size effect in hk plane delta_funcs=(abs(h - np.round(h)) < 1e-12)*( abs(k - np.round(k)) < 1e-12) # Sum up the uc struct factors f_u = np.sum(oc*f*np.exp(-2*np.pi**2*u**2*dinv[:, np.newaxis]**2)* np.sum([np.exp(2.0*np.pi*1.0J*( h[:,np.newaxis]*sym_op.trans_x(x, y) + k[:,np.newaxis]*sym_op.trans_y(x, y) + l[:,np.newaxis]*z [np.newaxis, :])) for sym_op in self.bulk_sym], 0) ,1) # Putting it all togheter fb = f_u/denom*delta_funcs return fb def calc_rhos(self, x, y, z, sb = 0.8): '''Calcualte the electron density of the unitcell Not working yet ''' px, py, pz, u, oc, el = self._surf_pars([self.domain['domain1A']['slab']]) rhos = self._get_rho(el) rho = np.sum([np.sum([rho(self.unit_cell.dist(x, y, z, sym_op.trans_x(xat, yat)%1.0, sym_op.trans_y(xat, yat)%1.0, zat), 0.5*uat+0.5/sb**2, ocat) for rho, xat, yat, zat, uat, ocat in zip(rhos, px, py, pz, u, oc)], 0) for sym_op in self.surface_sym], 0) return rho def _surf_pars(self,slabs): '''Extracts the necessary parameters for simulating the surface part ''' # Extract the parameters we need # the star in zip(*... transform the list elements to arguments xt, yt, zt, elt, ut, oct, ct = zip(*[slab._extract_values() for slab in slabs]) #x1 = np. r_[xt] #y1 = np.r_[yt] # scale and shift the slabs with respect to each other cn = np.cumsum(np.r_[0, ct])[:-1] z = np.concatenate([zs*c_s + c_cum for zs, c_cum, c_s in zip(zt, cn, ct)]) x = np.concatenate([xs + c_cum*self.delta1 for xs, c_cum, c_s in zip(xt, cn, ct)]) y = np.concatenate([ys + c_cum*self.delta2 for ys, c_cum, c_s in zip(yt, cn, ct)]) el = np.r_[elt] u = np.r_[ut] # Account for overlapping atoms oc = np.r_[oct]/float(len(self.surface_sym)) #print x,y,z, u #print y-y1 return x, y, z, u, oc, el def _surf_pars_offspecular(self,slabs): '''Extracts the necessary parameters for simulating the surface part ''' #the effect of interfacial molecules wont be included for the calculation of structure factor for offspecular rods # Extract the parameters we need # the star in zip(*... transform the list elements to arguments xt, yt, zt, elt, ut, oct, ct = zip(*[slab._extract_values_offspecular() for slab in slabs]) #x1 = np. r_[xt] #y1 = np.r_[yt] # scale and shift the slabs with respect to each other cn = np.cumsum(np.r_[0, ct])[:-1] z = np.concatenate([zs*c_s + c_cum for zs, c_cum, c_s in zip(zt, cn, ct)]) x = np.concatenate([xs + c_cum*self.delta1 for xs, c_cum, c_s in zip(xt, cn, ct)]) y = np.concatenate([ys + c_cum*self.delta2 for ys, c_cum, c_s in zip(yt, cn, ct)]) el = np.r_[elt] u = np.r_[ut] # Account for overlapping atoms oc = np.r_[oct]/float(len(self.surface_sym)) #print x,y,z, u #print y-y1 return x, y, z, u, oc, el def create_uc_output(self): ''' Create atomic positions and such for output ''' x, y, z, u, oc, el = self._surf_pars() ids = [] [ids.extend(slab._extract_ids()) for slab in self.slabs] xout = np.array([]) yout = np.array([]) zout = np.array([]) uout = np.array([]) ocout = np.array([]) elout = el[0:0].copy() idsout = [] for sym_op in self.surface_sym: xout = np.r_[xout, sym_op.trans_x(x, y)] yout = np.r_[yout, sym_op.trans_y(x, y)] zout = np.r_[zout, z] uout = np.r_[uout, u] ocout = np.r_[ocout, oc] elout = np.r_[elout, el] idsout.extend(ids) return xout, yout, zout, uout, ocout, elout, idsout def _get_f(self, el, dinv): '''from the elements extract an array with atomic structure factors ''' return _get_f(self.inst, el, dinv) def _get_rho(self, el): '''Returns the rho functions for all atoms in el ''' return _get_rho(self.inst, el) def _fatom_eval(self, f, element, s): '''Smart (fast) evaluation of f_atom. Only evaluates f if not evaluated before. element - element string f - dictonary for lookup s - sintheta_over_lambda array ''' return _fatom_eval(inst, f, element, s) class UnitCell: '''Class containing the unitcell. This also allows for simple crystalloraphic computing of different properties. ''' def __init__(self, a, b, c, alpha = 90, beta = 90, gamma = 90): self.set_a(a) self.set_b(b) self.set_c(c) self.set_alpha(alpha) self.set_beta(beta) self.set_gamma(gamma) def set_a(self, a): self.a = a def set_b(self, b): self.b = b def set_c(self, c): self.c = c def set_alpha(self, alpha): self.alpha = alpha*np.pi/180. def set_beta(self, beta): self.beta = beta*np.pi/180. def set_gamma(self, gamma): self.gamma = gamma*np.pi/180. def vol(self): '''Calculate the volume of the unit cell in AA**3 ''' vol = self.a*self.b*self.c*np.sqrt(1 - np.cos(self.alpha)**2 - np.cos(self.beta)**2 - np.cos(self.gamma)**2 + 2*np.cos(self.alpha)*np.cos(self.beta)*np.cos(self.gamma)) return vol def cart_coords(self, uc_x, uc_y, uc_z): '''Transform the uc coors uc_x, uc_y, uc_z to cartesian coordinates expressed in AA ''' return (cart_coord_x(uc_x, uc_y, uc_z), cart_coord_y(uc_x, uc_y, uc_z), cart_coord_z(uc_x, uc_y, uc_z)) def cart_coord_x(self, uc_x, uc_y, uc_z): '''Get the x-coord in the cart system ''' return uc_x*self.a def cart_coord_y(self, uc_x, uc_y, uc_z): '''Get the y-coord in the cart system ''' return uc_y*self.b def cart_coord_z(self, uc_x, uc_y, uc_z): '''Get the y-coord in the cart system ''' return uc_z*self.c def dist(self, x1, y1, z1, x2, y2, z2): '''Calculate the distance in AA between the points (x1, y1, z1) and (x2, y2, z2). The coords has to be unit cell coordinates. ''' #print 'Warning works only with orth cryst systems!' return np.sqrt(((x1 - x2)*self.a)**2 + ((y1 - y2)*self.b)**2 + ((z1 - z2)*self.c)**2) def abs_hkl(self, h, k, l): '''Returns the absolute value of (h,k,l) vector in units of AA. This is equal to the inverse lattice spacing 1/d_hkl. ''' dinv = np.sqrt(((h/self.a*np.sin(self.alpha))**2 + (k/self.b*np.sin(self.beta))**2 + (l/self.c*np.sin(self.gamma))**2 + 2*k*l/self.b/self.c*(np.cos(self.beta)* np.cos(self.gamma) - np.cos(self.alpha)) + 2*l*h/self.c/self.a*(np.cos(self.gamma)* np.cos(self.alpha) - np.cos(self.beta)) + 2*h*k/self.a/self.b*(np.cos(self.alpha)* np.cos(self.beta) - np.cos(self.gamma))) /(1 - np.cos(self.alpha)**2 - np.cos(self.beta)**2 - np.cos(self.gamma)**2 + 2*np.cos(self.alpha) *np.cos(self.beta)*np.cos(self.gamma))) return dinv class Slab: par_names = ['dx1','dx2','dx3','dx4','dy1','dy2','dy3','dy4','dz1','dz2','dz3','dz4',\ 'u', 'du','oc','doc', 'm'] def __init__(self, name = '', c = 1.0, slab_oc = 1.0, T_factor='u'): try: self.c = float(c) except: raise ValueError("Parameter c has to be a valid floating point number") try: self.slab_oc = float(slab_oc) except: raise ValueError("Parameter slab_oc has to be a valid floating point number") # Set the arrays to their default values self.x = np.array([], dtype = np.float64) self.y = np.array([], dtype = np.float64) self.z = np.array([], dtype = np.float64) self.dx1 = np.array([], dtype = np.float64) self.dx2 = np.array([], dtype = np.float64) self.dx3 = np.array([], dtype = np.float64) self.dx4 = np.array([], dtype = np.float64) self.dy1 = np.array([], dtype = np.float64) self.dy2 = np.array([], dtype = np.float64) self.dy3 = np.array([], dtype = np.float64) self.dy4 = np.array([], dtype = np.float64) self.dz1 = np.array([], dtype = np.float64) self.dz2 = np.array([], dtype = np.float64) self.dz3 = np.array([], dtype = np.float64) self.dz4 = np.array([], dtype = np.float64) self.u = np.array([], dtype = np.float64) self.oc = np.array([], dtype = np.float64) self.du = np.array([], dtype = np.float64) self.doc = np.array([], dtype = np.float64) self.m = np.array([], dtype = np.float64) self.id = np.array([], dtype = np.str) self.el = np.array([], dtype = np.str) self.T_factor=T_factor # TODO: Type checking and defaults! #self.inst = inst self.name = str(name) def copy(self): '''Returns a copy of the object. ''' #T_factor must be 'u', not matter what's that for the original one, since they have been transfered to u already. cpy = Slab(c = self.c, slab_oc = self.slab_oc,T_factor=self.T_factor) for i in range(len(self.id)): cpy.add_atom(str(self.id[i]), str(self.el[i]), self.x[i], self.y[i], self.z[i], self.u[i], self.oc[i], self.m[i]) cpy.dz1[-1] = self.dz1[i] cpy.dz2[-1] = self.dz2[i] cpy.dz3[-1] = self.dz3[i] cpy.dz4[-1] = self.dz4[i] cpy.dx1[-1] = self.dx1[i] cpy.dx2[-1] = self.dx2[i] cpy.dx3[-1] = self.dx3[i] cpy.dx4[-1] = self.dx4[i] cpy.dy1[-1] = self.dy1[i] cpy.dy2[-1] = self.dy2[i] cpy.dy3[-1] = self.dy3[i] cpy.dy4[-1] = self.dy4[i] cpy.du[-1] = self.du[i] cpy.doc[-1] = self.doc[i] return cpy def add_atom(self,id, element, x, y, z, u = 0.0, oc = 1.0, m = 1.0): '''Add an atom to the slab. id - a unique id for this atom (string) element - the element of this atom has to be found within the scatteringlength table. x, y, z - position in the assymetricv unit cell (floats) u - debye-waller parameter for the atom oc - occupancy of the atomic site ''' if id in self.id: raise ValueError('The id %s is already defined in the' 'slab'%(id)) # TODO: Check the element as well... self.x = np.append(self.x, x) self.dx1 = np.append(self.dx1, 0.) self.dx2 = np.append(self.dx2, 0.) self.dx3 = np.append(self.dx3, 0.) self.dx4 = np.append(self.dx4, 0.) self.y = np.append(self.y, y) self.dy1 = np.append(self.dy1, 0.) self.dy2 = np.append(self.dy2, 0.) self.dy3 = np.append(self.dy3, 0.) self.dy4 = np.append(self.dy4, 0.) self.z = np.append(self.z, z) self.dz1 = np.append(self.dz1, 0.) self.dz2 = np.append(self.dz2, 0.) self.dz3 = np.append(self.dz3, 0.) self.dz4 = np.append(self.dz4, 0.) self.du = np.append(self.du, 0.) self.doc = np.append(self.doc, 0.) self.u = np.append(self.u, u) self.oc = np.append(self.oc, oc) self.m = np.append(self.m, m) self.id = np.append(self.id, id) self.el = np.append(self.el, str(element)) item = len(self.id) - 1 # Create the set and get functions dynamically for par in self.par_names: p = par setattr(self, 'set' + id + par, self._make_set_func(par, item)) setattr(self, 'get' + id + par, self._make_get_func(par, item)) return AtomGroup(self, id) def insert_atom(self,index,id,element, x, y, z, u = 0.0, oc = 1.0, m = 1.0): '''Add an atom to the slab. id - a unique id for this atom (string) element - the element of this atom has to be found within the scatteringlength table. x, y, z - position in the assymetricv unit cell (floats) u - debye-waller parameter for the atom oc - occupancy of the atomic site ''' if id in self.id: raise ValueError('The id %s is already defined in the' 'slab'%(id)) # TODO: Check the element as well... self.x = np.insert(self.x,[index+1], x) self.dx1 = np.insert(self.dx1, [index+1],0.) self.dx2 = np.insert(self.dx2, [index+1],0.) self.dx3 = np.insert(self.dx3, [index+1],0.) self.dx4 = np.insert(self.dx4, [index+1],0.) self.y = np.insert(self.y, [index+1],y) self.dy1 = np.insert(self.dy1,[index+1], 0.) self.dy2 = np.insert(self.dy2, [index+1],0.) self.dy3 = np.insert(self.dy3, [index+1],0.) self.dy4 = np.insert(self.dy4, [index+1],0.) self.z = np.insert(self.z, [index+1],z) self.dz1 = np.insert(self.dz1, [index+1],0.) self.dz2 = np.insert(self.dz2, [index+1],0.) self.dz3 = np.insert(self.dz3, [index+1],0.) self.dz4 = np.insert(self.dz4, [index+1],0.) self.du = np.insert(self.du, [index+1],0.) self.doc = np.insert(self.doc, [index+1],0.) self.u = np.insert(self.u,[index+1],u) self.oc = np.insert(self.oc,[index+1],oc) self.m = np.insert(self.m,[index+1],m) self.id = np.insert(self.id,[index+1],id) self.el = np.insert(self.el,[index+1],str(element)) item = len(self.id) - 1 # Create the set and get functions dynamically for par in self.par_names: p = par setattr(self, 'set' + id + par, self._make_set_func(par, item)) setattr(self, 'get' + id + par, self._make_get_func(par, item)) return AtomGroup(self, id) def del_atom(self, id): '''Remove atom identified with id ''' if not id in self.id: raise ValueError('Can not remove atom with id %s -' 'namedoes not exist') item = np.argwhere(self.id == id)[0][0] for par in self.par_names: for id in self.id: delattr(self, 'set' + id + par) delattr(self, 'get' + id + par) if item < len(self.x) - 1: ar = getattr(self, 'id') setattr(self, 'id', np.r_[ar[:item], ar[item+1:]]) ar = getattr(self, 'el') setattr(self, 'el', np.r_[ar[:item], ar[item+1:]]) ar = getattr(self, 'x') setattr(self, 'x', np.r_[ar[:item], ar[item+1:]]) ar = getattr(self, 'y') setattr(self, 'y', np.r_[ar[:item], ar[item+1:]]) ar = getattr(self, 'z') setattr(self, 'z', np.r_[ar[:item], ar[item+1:]]) for par in self.par_names: ar = getattr(self, par) setattr(self, par, np.r_[ar[:item], ar[item+1:]]) #when you delete one atom, you must reset the set_get function, since the order of parameter values will change. for par in self.par_names: for id in self.id: setattr(self, 'set' + id + par, self._make_set_func(par, np.where(self.id==id)[0][0])) setattr(self, 'get' + id + par, self._make_get_func(par, np.where(self.id==id)[0][0])) else: ar = getattr(self, 'id') setattr(self, 'id', ar[:-1]) ar = getattr(self, 'el') setattr(self, 'el', ar[:-1]) ar = getattr(self, 'x') setattr(self, 'x', ar[:-1]) ar = getattr(self, 'y') setattr(self, 'y', ar[:-1]) ar = getattr(self, 'z') setattr(self, 'z', ar[:-1]) for par in self.par_names: ar = getattr(self, par) setattr(self, par, ar[:-1]) for par in self.par_names: for id in self.id: setattr(self, 'set' + id + par, self._make_set_func(par, np.where(self.id==id)[0][0])) setattr(self, 'get' + id + par, self._make_get_func(par, np.where(self.id==id)[0][0])) def find_atoms(self, expression): '''Find the atoms that satisfy the logical expression given in the string expression. Expression can also be a list or array of the same length as the number of atoms in the slab. Allowed variables in expression are: x, y, z, u, occ, id, el returns an AtomGroup ''' if (type(expression) == type(np.array([])) or type(expression) == type(list([]))): if len(expression) != len(self.id): raise ValueError('The length of experssion is wrong' ', it should match the number of atoms') ag = AtomGroup() [ag.add_atom(self, str(id)) for id, add in zip(self.id, expression) if add] return ag elif type(expression) == type(''): choose_list = [eval(expression) for x,y,z,u,oc,el,id in zip(self.x, self.y, self.z, self.u, self.oc, self.el, self.id)] #print choose_list ag = AtomGroup() [ag.add_atom(self, str(name)) for name, add in zip(self.id, choose_list) if add] return ag else: raise ValueError('Expression has to be a string, array or list') def all_atoms(self): '''Puts all atoms in the slab to an AtomGroup. returns: AtomGroup ''' return self.find_atoms([True]*len(self.id)) def set_c(self, c): '''Set the out-of-plane extension of the slab. Note that this is in the defined UC coords given in the corresponding sample ''' self.c = float(c) def get_c(self): '''Get the out-of-plane extension of the slab in UC coord. ''' return self.c def set_oc(self, oc): '''Set a global occupation parameter for the entire slab. should be between 0 and 1. To create the real occupancy this value is multiplied with the occupancy for that atom. ''' self.slab_oc = oc def get_oc(self): '''Get the global occupancy of the slab ''' return self.slab_oc def __getitem__(self, id): '''Locate id in slab with a dictonary style. Returns a AtomGroup instance ''' return AtomGroup(self, id) def __contains__(self, id): '''Makes it possible to check if id exist in this Slab by using the in operator. It is also possible if all atoms in an AtomGroup belongs to the slab. returns True or False ''' if type(id) == type(''): return id in self.id elif type(id) == type(AtomGroup): return np.all([atid in self.id for atid in id.ids]) else: raise ValueError('Can only check for mebership for Atom groups' 'or string ids.') def _set_in(self, arr, pos, value): '''Sets a value in an array or list ''' arr[pos]=value def _make_set_func(self, par, pos): ''' Creates a set functions for parameter par and at pos. Returns a function ''' def set_par(val): getattr(self, par)[pos] = val return set_par def _make_get_func(self, par, pos): '''Cerates a set function for member par at pos. Returns a function. ''' def get_par(scale=1.): return getattr(self, par)[pos]/scale return get_par def _extract_values(self): #B=8*pi*pi*u*u in A2 #u in A if self.T_factor=='B': return self.x + self.dx1+self.dx2+self.dx3+self.dx4, self.y + self.dy1+self.dy2+self.dy3+self.dy4, self.z + self.dz1+ self.dz2+ self.dz3+self.dz4,\ self.el, (self.u/(8*np.pi**2))**0.5+self.du, (self.oc+self.doc)*self.m*self.slab_oc, self.c elif self.T_factor=='u': return self.x + self.dx1+self.dx2+self.dx3+self.dx4, self.y + self.dy1+self.dy2+self.dy3+self.dy4, self.z + self.dz1+ self.dz2+ self.dz3+self.dz4,\ self.el, (self.u)**0.5+self.du, (self.oc+self.doc)*self.m*self.slab_oc, self.c def _extract_values_offspecular(self): ids=self.id ii=None#index for first water molecule for i in range(1,30):#water molecules will be added at the very end and wont exceed 10 usually if 'Os' not in ids[-i]: ii=len(ids)-i+1 break else: pass if self.T_factor=='B': return self.x[0:ii] + self.dx1[0:ii]+self.dx2[0:ii]+self.dx3[0:ii]+self.dx4[0:ii], self.y[0:ii] + self.dy1[0:ii]+self.dy2[0:ii]+self.dy3[0:ii]+self.dy4[0:ii], self.z[0:ii] + self.dz1[0:ii]+ self.dz2[0:ii]+ self.dz3[0:ii]+self.dz4[0:ii],\ self.el[0:ii], (self.u[0:ii]/(8*np.pi**2))**0.5+self.du[0:ii], (self.oc[0:ii]+self.doc[0:ii])*self.m[0:ii]*self.slab_oc, self.c elif self.T_factor=='u': return self.x[0:ii] + self.dx1[0:ii]+self.dx2[0:ii]+self.dx3[0:ii]+self.dx4[0:ii], self.y[0:ii] + self.dy1[0:ii]+self.dy2[0:ii]+self.dy3[0:ii]+self.dy4[0:ii], self.z[0:ii] + self.dz1[0:ii]+ self.dz2[0:ii]+ self.dz3[0:ii]+self.dz4[0:ii],\ self.el[0:ii], self.u[0:ii]+self.du[0:ii], (self.oc[0:ii]+self.doc[0:ii])*self.m[0:ii]*self.slab_oc, self.c def _extract_values2(self): return self.x + self.dx1+self.dx2+self.dx3+self.dx4, self.y + self.dy1+self.dy2+self.dy3+self.dy4, self.z + self.dz1+ self.dz2+ self.dz3+self.dz4,\ self.el, self.u+self.du, (self.oc+self.doc)*self.m*self.slab_oc, self.c def _extract_ids(self): 'Extract the ids of the atoms' return [self.name + '.' + str(id) for id in self.id] class AtomGroup: par_names = ['dx', 'dy', 'dz', 'u', 'oc'] def __init__(self, slab = None, id = None,matrix=[1,0,0,0,1,0,0,0,1]): self.ids = [] self.slabs = [] # Variable for composition ... self.comp = 1.0 self.oc = 1.0 self.sym=[] if slab != None and id != None: self.add_atom(slab, id, matrix) def _set_func(self, par): '''create a function that sets all atom paramater par''' #id_=list(np.copy(self.ids)) #id_.sort() #print id_ funcs=[] #here you must make sure the id is different even for different slab for i in range(len(self.ids)): #the change of dx,dy or dz will accordingly change dx,dy and dz at the same time #to eliminate the overwriting, the changes go to temp dxn,dyn and dzn. At the time of calculating structure factor #sum of dxn will be added to x, sum of dyn will be added up to y, and sum of dzn will be added up to z id=self.ids[i] if (par=='dx'): funcx=getattr(self.slabs[i], 'set'+ id + 'dx1') funcy=getattr(self.slabs[i], 'set'+ id + 'dy1') funcz=getattr(self.slabs[i], 'set'+ id + 'dz1') funcs.append([funcx,funcy,funcz]) elif (par=='dy'): funcx=getattr(self.slabs[i], 'set'+ id + 'dx2') funcy=getattr(self.slabs[i], 'set'+ id + 'dy2') funcz=getattr(self.slabs[i], 'set'+ id + 'dz2') funcs.append([funcx,funcy,funcz]) elif (par=='dz'): funcx=getattr(self.slabs[i], 'set'+ id + 'dx3') funcy=getattr(self.slabs[i], 'set'+ id + 'dy3') funcz=getattr(self.slabs[i], 'set'+ id + 'dz3') funcs.append([funcx,funcy,funcz]) else:funcs.append(getattr(self.slabs[i], 'set'+ id + par)) def set_pars(val): #print self.sym_file.shape for i in range(len(funcs)): #the corresponding infomation stored in sym_row, id_order_in_sym_file is the ids of atoms with its order #appearing the same as that in sym files, say, if I have a id1 at the first place, then the order is defined as 0 #which is order of id1's symmetry operations in sym file, thus the first row is the associated sym opts. if par=='dx': funcs[i][0](val*self.sym[i][0]) funcs[i][1](val*self.sym[i][1]) funcs[i][2](val*self.sym[i][2]) #print i,'dx',val elif par=='dy': funcs[i][0](val*self.sym[i][3]) funcs[i][1](val*self.sym[i][4]) funcs[i][2](val*self.sym[i][5]) #i,'dy',val elif par=='dz': funcs[i][0](val*self.sym[i][6]) funcs[i][1](val*self.sym[i][7]) funcs[i][2](val*self.sym[i][8]) #i,'dz',val else: funcs[i](val) return set_pars def _get_func(self, par): '''create a function that gets all atom paramater par''' funcs = [] for id, slab in zip(self.ids, self.slabs): if par=='dx': funcs.append(getattr(slab, 'get' + id + 'dx1')) elif par=='dy': funcs.append(getattr(slab, 'get' + id + 'dy2')) elif par=='dz': funcs.append(getattr(slab, 'get' + id + 'dz3')) else:funcs.append(getattr(slab, 'get' + id + par)) def get_pars(): if par=='dx': return np.mean([func(self.sym[funcs.index(func)][0]+1.0e-30) for func in funcs]) elif par=='dy': return np.mean([func(self.sym[funcs.index(func)][4]+1.0e-30) for func in funcs]) elif par=='dz': return np.mean([func(self.sym[funcs.index(func)][8]+1.0e-30) for func in funcs]) else: return np.mean([func() for func in funcs]) return get_pars def update_setget_funcs(self,matrix): '''Update all the atomic set and get functions ''' for par in self.par_names: setattr(self, 'set' + par, self._set_func(par)) setattr(self, 'get' + par, self._get_func(par)) def add_atom(self, slab, id,matrix=[1,0,0,0,1,0,0,0,1]): '''Add an atom to the group. ''' if not id in slab: raise ValueError('The id %s is not a member of the slab'%id) self.ids.append(id) #print self.sym_file self.slabs.append(slab) self.sym.append(matrix) self.update_setget_funcs(matrix) def _copy(self): '''Creates a copy of self And looses all connection to the previously created compositions conenctions ''' cpy = AtomGroup() cpy.ids = self.ids[:] cpy.slabs = self.slabs[:] cpy.update_setget_funcs() return cpy def comp_coupl(self, other, self_copy = False, exclusive = True): '''Method to create set-get methods to use compositions in the atomic groups. Note that this does not affect the slabs global occupancy. If self_copy is True the returned value will be a copy of self. If exculive is true reomves all methods from the previous AtomGroups that are coupled. ''' if not type(self) == type(other): raise TypeError('To create a composition function both objects' ' has to be of the type AtomGroup') if hasattr(other, '_setoc_'): raise AttributeError('The right hand side AtomicGroup has already' 'been coupled to another one before.' ' Only one connection' 'is allowed') if hasattr(self, '_setoc'): raise AttributeError('The left hand side AtomicGroup has already' 'been coupled to another one before.' ' Only one connection' 'is allowed') if self_copy: s = self._copy() else: s = self def set_comp(comp): #print "Executing comp function" s.comp = float(comp) s._setoc(comp*s.oc) other._setoc_((1.0 - comp)*s.oc) def set_oc(oc): #print "Executing oc function" s.oc = float(oc) s._setoc(s.comp*s.oc) other._setoc_((1 - s.comp)*s.oc) def get_comp(): return s.comp def get_oc(): return s.oc # Functions to couple the other parameters, set def create_set_func(par): sf_set = getattr(s, 'set' + par) of_set = getattr(other, 'set' + par) def _set_func(val): p = str(par) #print 'Setting %s to %s'%(p, val) sf_set(val) of_set(val) return _set_func # Functions to couple the other parameters, set def create_get_func(par): sf_get = getattr(s, 'get' + par) of_get = getattr(other, 'get' + par) def _get_func(): p = str(par) return (sf_get() + of_get())/2 return _get_func # Do it (couple) for all parameters except the occupations if exclusive: for par in s.par_names: if not str(par) == 'oc': #print par setattr(s, 'set' + par, create_set_func(par)) setattr(s, 'get' + par, create_get_func(par)) # Create new set and get methods for the composition setattr(s, 'setcomp', set_comp) setattr(s, 'getcomp', get_comp) # Store the original setoc for future use safely setattr(s, '_setoc', s.setoc) setattr(other, '_setoc_', getattr(other, 'setoc')) setattr(s, 'setoc', set_oc) setattr(s, 'getoc', get_oc) # Now remove all the coupled attribute from other. if exclusive: for par in s.par_names: delattr(other, 'set' + par) s.setcomp(1.0) return s def __xor__(self, other): '''Method to create set-get methods to use compositions in the atomic groups. Note that this does not affect the slabs global occupancy. Note that the first element (left hand side of ^) will be copied and loose all its previous connections. Note that all the move methods that are not coupled will be removed. ''' return self.comp_coupl(other, self_copy = True, exclusive = True) def __ixor__(self, other): '''Method to create set-get methods to use compositions in the atomic groups. Note that this does not affect the slabs global occupancy. Note that all the move methods that are not coupled will be removed. ''' self.comp_coupl(other, exclusive = True) def __or__(self, other): '''Method to create set-get methods to use compositions in the atomic groups. Note that this does not affect the slabs global occupancy. Note that the first element (left hand side of |) will be copied and loose all its previous connections. ''' return self.comp_coupl(other, self_copy = True, exclusive = False) def __ior__(self, other): '''Method to create set-get methods to use compositions in the atomic groups. Note that this does not affect the slabs global occupancy. ''' self.comp_coupl(other, exclusive = False) def __add__(self, other): '''Adds two Atomic groups togheter ''' if not type(other) == type(self): raise TyepError('Adding wrong type to an AtomGroup has to be an' 'AtomGroup') ids = self.ids + other.ids slabs = self.slabs + other.slabs out = AtomGroup() [out.add_atom(slab, id) for slab, id in zip(slabs, ids)] s = self def set_oc(oc): #print "Executing oc function" s.oc = float(oc) s.setoc(s.oc) other.setoc(s.oc) def get_oc(): return s.oc setattr(out, 'setoc', set_oc) setattr(out, 'getoc', get_oc) return out class Instrument: '''Class that keeps tracks of instrument settings. ''' geometries = ['alpha_in fixed', 'alpha_in eq alpha_out', 'alpha_out fixed'] def __init__(self, wavel, alpha, geom = 'alpha_in fixed', flib = f, rholib = rho): '''Inits the instrument with default parameters ''' self.flib = f self.rholib = rho self.set_wavel(wavel) self.set_geometry(geom) self.alpha = alpha self.inten = 1.0 def set_inten(self, inten): '''Set the incomming intensity ''' self.inten = inten def get_inten(self): '''retrieves the intensity ''' return self.inten def set_wavel(self, wavel): '''Set the wavelength in AA ''' try: self.wavel = float(wavel) self.flib.set_wavelength(wavel) self.rholib.set_wavelength(wavel) except ValueError: raise ValueError('%s is not a valid float number needed for the' 'wavelength'%(wavel)) def get_wavel(self, wavel): '''Returns the wavelength in AA ''' return self.wavel def set_energy(self, energy): '''Set the energy in keV ''' try: self.set_wavel(12.39842/float(energy)) except ValueError: raise ValueErrror('%s is not a valid float number needed for the' 'energy'%(wavel)) def get_energy(self, energy): '''Returns the photon energy in keV ''' return 12.39842/self.wavel def set_alpha(self, alpha): '''Sets the freezed angle. The meaning of this angle varies depening of the geometry parameter. geo = "alpha_in fixed", alpha = alpha_in geo = "alpha_in eq alpha_out", alpha = alpha_in = alpha_out geo = "alpha_out fixed", alpha = alpha_out ''' self.alpha = alpha def get_alpha(self): '''Gets the freexed angle. See set_alpha. ''' return self.alpha def set_geometry(self, geom): '''Set the measurement geometry Should be one of the items in Instrument.geometry ''' try: self.geom = self.geometries.index(geom) except ValueError: raise ValueError('The geometry %s does not exist please choose' 'one of the following:\n%s'%(geom, self.geomeries)) def set_flib(self, flib): '''Set the structure factor library ''' self.flib = flib def set_rholib(self, rholib): '''Set the rho library (electron density shape of the atoms) ''' self.rholib = rholib class SymTrans: def __init__(self, P = [[1,0],[0,1]], t = [0,0]): # TODO: Check size of arrays! self.P = np.array(P) self.t = np.array(t) def trans_x(self, x, y): '''transformed x coord ''' #print self.P[0][0]*x + self.P[0][1]*y + self.t[0] return self.P[0][0]*x + self.P[0][1]*y + self.t[0] def trans_y(self, x, y): '''transformed x coord ''' #print self.P[1][0]*x + self.P[1][1]*y + self.t[1] return self.P[1][0]*x + self.P[1][1]*y + self.t[1] def apply_symmetry(self, x, y): return np.dot(P, c_[x, y]) + t #============================================================================== # Utillity functions def scale_sim(data, sim_list, scale_func = None): '''Scale the data according to a miminimazation of sum (data-I_list)**2 ''' numerator = sum([(data[i].y*sim_list[i]).sum() for i in range(len(data)) if data[i].use]) denominator = sum([(sim_list[i]**2).sum() for i in range(len(data)) if data[i].use]) scale = numerator/denominator print scale scaled_sim_list = [sim*scale for sim in sim_list] if not scale_func == None: scale_func(scale) return scaled_sim_list def scale_sqrt_sim(data, sim_list, scale_func = None): '''Scale the data according to a miminimazation of sum (sqrt(data)-sqrt(I_list))**2 ''' numerator = sum([(np.sqrt(data[i].y*sim_list[i])).sum() for i in range(len(data)) if data[i].use]) denominator = sum([(sim_list[i]).sum() for i in range(len(data)) if data[i].use]) scale = numerator/denominator scaled_sim_list = [sim*scale**2 for sim in sim_list] if not scale_func == None: scale_func(scale) return scaled_sim_list ## def scale_log_sim(data, sim_list): ## '''Scale the data according to a miminimazation of ## sum (log(data)-log(I_list))**2 ## ''' ## numerator = sum([(np.log10(data[i].y)*np.log10(sim_list[i])).sum() ## for i in range(len(data)) if data[i].use]) ## denominator = sum([(np.log10(sim_list[i])**2).sum() ## for i in range(len(data)) if data[i].use]) ## scale = numerator/denominator ## print scale ## scaled_sim_list = [sim*(10**-scale) for sim in sim_list] ## return scaled_sim_list def _get_f(inst, el, dinv): '''from the elements extract an array with atomic structure factors ''' fdict = {} f = np.transpose(np.array([_fatom_eval(inst, fdict, elem, dinv/2.0) for elem in el], dtype = np.complex128)) return f def _get_rho(inst, el): '''Returns the rho functions for all atoms in el ''' rhos = [getattr(inst.rholib, elem) for elem in el] return rhos def _fatom_eval(inst, f, element, s): '''Smart (fast) evaluation of f_atom. Only evaluates f if not evaluated before. element - element string f - dictonary for lookup s - sintheta_over_lambda array ''' try: fret = f[element] except KeyError: fret = getattr(inst.flib, element)(s) f[element] = fret #print element, fret[0] return fret #============================================================================= if __name__ == '__main__': import models.sxrd_test5_sym_new_test_new66_2 as model from models.utils import UserVars import numpy as np from operator import mul from numpy.linalg import inv class domain_creator(): def __init__(self,ref_domain,id_list,terminated_layer=0,domain_N=1,new_var_module=None,z_shift=0.): #id_list is a list of id in the order of ref_domain,terminated_layer is the index number of layer to be considered #for termination,domain_N is a index number for this specific domain, new_var_module is a UserVars module to be used in #function of set_new_vars self.ref_domain=ref_domain self.id_list=id_list self.terminated_layer=terminated_layer self.domain_N=domain_N self.new_var_module=new_var_module self.z_shift=z_shift self.domain_A,self.domain_B=self.create_equivalent_domains() def create_equivalent_domains(self): new_domain_A=self.ref_domain.copy() new_domain_B=self.ref_domain.copy() for id in self.id_list[:self.terminated_layer]: if id!=[]: new_domain_A.del_atom(id) #number 5 here is crystal specific, here is the case for hematite for id in self.id_list[:self.terminated_layer+5]: new_domain_B.del_atom(id) return new_domain_A,new_domain_B def add_sorbates(self,domain,attach_atm_id=[['id1','id2']],el=['Pb'],id=[1],O_id=['_A'],r1=0.1,r2=None,alpha1=1.7,alpha2=None): #this function can add multiple sorbates #domain is a slab under consideration #attach_atm_id is a list of ids to be attached by absorbates,2 by n #el is list of element symbol for the first absorbates #id is the list of index number to be attached to elment symbol as the id symbol #O_id is list, each member will be attached at the end of id of the other absorbates #r1 alpha1 associated to the first absorbates, and r2 alpha2 associated to the other absorbates for i in range(len(el)): point1_x=domain.x[np.where(domain.id==attach_atm_id[i][0])[0][0]] point1_y=domain.y[np.where(domain.id==attach_atm_id[i][0])[0][0]] point1_z=domain.z[np.where(domain.id==attach_atm_id[i][0])[0][0]] point2_x=domain.x[np.where(domain.id==attach_atm_id[i][1])[0][0]] point2_y=domain.y[np.where(domain.id==attach_atm_id[i][1])[0][0]] point2_z=domain.z[np.where(domain.id==attach_atm_id[i][1])[0][0]] point1=[point1_x,point1_y,point1_z] point2=[point2_x,point2_y,point2_z] point_sorbate=self._cal_xyz_single(point1,point2,r1,alpha1) domain.add_atom(id=el[i]+str(id[i]),element=el[i],x=point_sorbate[0],y=point_sorbate[1],z=point_sorbate[2],u=1.) if r2!=None: point_sorbate_1,point_sorbate_2=self._cal_xyz_double(point_sorbate,r2,alpha2) domain.add_atom(id='Oi_1'+str(O_id[i]),element='O',x=point_sorbate_1[0],y=point_sorbate_1[1],z=point_sorbate_1[2],u=1.) domain.add_atom(id='Oi_2'+str(O_id[i]),element='O',x=point_sorbate_2[0],y=point_sorbate_2[1],z=point_sorbate_2[2],u=1.) #return domain def add_oxygen_pair(self,domain,O_id,ref_point,r,alpha): #add single oxygen pair to a ref_point,which does not stand for an atom, the xyz for this point will be set as #three fitting parameters.O_id will be attached at the end of each id for the oxygen x_shift=r*np.cos(alpha) y_shift=r*np.sin(alpha) point1=ref_point[0]-x_shift,ref_point[1]-y_shift,ref_point[2] point2=ref_point[0]+x_shift,ref_point[1]+y_shift,ref_point[2] domain.add_atom(id='Os_1'+str(O_id),element='O',x=point1[0],y=point1[1],z=point1[2],u=1.) domain.add_atom(id='Os_2'+str(O_id),element='O',x=point2[0],y=point2[1],z=point2[2],u=1.) def updata_oxygen_pair(self,domain,ids,ref_point,r,alpha): #updata the position information of oxygen pair, to be dropped inside sim func print 'sensor',np.where(domain.id==ids[0]),np.where(domain.id==ids[0])[0] index_1=np.where(domain.id==ids[0])[0][0] index_2=np.where(domain.id==ids[1])[0][0] x_shift=r*np.cos(alpha) y_shift=r*np.sin(alpha) domain.x[index_1]=ref_point[0]+x_shift domain.y[index_1]=ref_point[1]+y_shift domain.z[index_1]=ref_point[2] domain.x[index_2]=ref_point[0]-x_shift domain.y[index_2]=ref_point[1]-y_shift domain.z[index_2]=ref_point[2] def group_sorbates_2(self,domain,attach_atm_id,ids_to_be_attached,r,alpha,beta,gamma): #updating the sorbate position, to be dropped inside sim function #the same as the group_sorbates except more freedome for the attached sorbates #r is the distance between Pb and one of O in this case, alpha is half of the open angle between the sorbates #beta is the angle between the normal line and the plane formed by three sorbates #gamma is then angle between the x axis and the first edge in the two dimentional space #alpha from 0-pi/2, beta from 0-pi/2, gamma from 0-2pi index_ref=np.where(domain.id==attach_atm_id)[0][0] index_1=np.where(domain.id==ids_to_be_attached[0])[0][0] index_2=np.where(domain.id==ids_to_be_attached[1])[0][0] ref_x=domain.x[index_ref]+domain.dx1[index_ref]+domain.dx2[index_ref]+domain.dx3[index_ref] ref_y=domain.y[index_ref]+domain.dy1[index_ref]+domain.dy2[index_ref]+domain.dy3[index_ref] ref_z=domain.z[index_ref]+domain.dz1[index_ref]+domain.dz2[index_ref]+domain.dz3[index_ref] z_shift=r*np.cos(alpha)*np.cos(beta) #r1 is the edge length of triangle inside the circle, alpha1 is the half open angle of that triangle r1=(r**2-z_shift**2)**0.5 alpha1=np.arcsin(r*np.sin(alpha)/r1) point1_x_shift=r1*np.cos(gamma) point1_y_shift=r1*np.sin(gamma) point2_x_shift=r1*np.cos(gamma+2.*alpha1) point2_y_shift=r1*np.sin(gamma+2.*alpha1) domain.x[index_1]=ref_x+point1_x_shift domain.y[index_1]=ref_y+point1_y_shift domain.z[index_1]=ref_z+z_shift domain.x[index_2]=ref_x+point2_x_shift domain.y[index_2]=ref_y+point2_y_shift domain.z[index_2]=ref_z+z_shift def group_sorbates(self,domain,attach_atm_id,sorbate_ids,r1,alpha1,z_shift): #group the oxygen pair to the absorbate specified,attach_atm_id='Pb1',sorbate_ids=[] index_ref=np.where(domain.id==attach_atm_id)[0][0] index_1=np.where(domain.id==sorbate_ids[0])[0][0] index_2=np.where(domain.id==sorbate_ids[1])[0][0] ref_x=domain.x[index_ref]+domain.dx1[index_ref]+domain.dx2[index_ref]+domain.dx3[index_ref] ref_y=domain.y[index_ref]+domain.dy1[index_ref]+domain.dy2[index_ref]+domain.dy3[index_ref] ref_z=domain.z[index_ref]+domain.dz1[index_ref]+domain.dz2[index_ref]+domain.dz3[index_ref] O1_point,O2_point=self._cal_xyz_double(ref_point=[ref_x,ref_y,ref_z],r=r1,alpha=alpha1,z_shift=z_shift) domain.x[index_1],domain.y[index_1],domain.z[index_1]=O1_point[0],O1_point[1],O1_point[2] domain.x[index_2],domain.y[index_2],domain.z[index_2]=O2_point[0],O2_point[1],O2_point[2] def updata_sorbates(self,domain,id1,r1,alpha1,z_shift,attach_atm_id=['id1','id2'],id2=[],r2=None,alpha2=None): #old version of updating,less freedome for Pb sorbates #group all sorbates to the first layer oxygen pair #domain is a slab under consideration #id1 is the id for the first absorbate(Pb), r1 is positive value, alpha1 is angle lower than pi #attach_atm_id is a list of ids of first atoms(oxy) #id2 is a list of two pair absorbates, r2 is positive value, alpha2 is anlge less than pi index_1=np.where(domain.id==attach_atm_id[0])[0][0] index_2=np.where(domain.id==attach_atm_id[1])[0][0] point1_x=domain.x[index_1]+domain.dx1[index_1]+domain.dx2[index_1]+domain.dx3[index_1] point1_y=domain.y[index_1]+domain.dy1[index_1]+domain.dy2[index_1]+domain.dy3[index_1] point1_z=domain.z[index_1]+domain.dz1[index_1]+domain.dz2[index_1]+domain.dz3[index_1] point2_x=domain.x[index_2]+domain.dx1[index_2]+domain.dx2[index_2]+domain.dx3[index_2] point2_y=domain.y[index_2]+domain.dy1[index_2]+domain.dy2[index_2]+domain.dy3[index_2] point2_z=domain.z[index_2]+domain.dz1[index_2]+domain.dz2[index_2]+domain.dz3[index_2] point1=[point1_x,point1_y,point1_z] point2=[point2_x,point2_y,point2_z] point_sorbate=self._cal_xyz_single(point1,point2,r1,alpha1) domain.x[np.where(domain.id==id1)[0][0]]=point_sorbate[0] domain.y[np.where(domain.id==id1)[0][0]]=point_sorbate[1] domain.z[np.where(domain.id==id1)[0][0]]=point_sorbate[2] if r2!=None: point_sorbate_1,point_sorbate_2=self._cal_xyz_double(point_sorbate,r2,alpha2,z_shift) domain.x[np.where(domain.id==id2[0])[0][0]]=point_sorbate_1[0] domain.y[np.where(domain.id==id2[0])[0][0]]=point_sorbate_1[1] domain.z[np.where(domain.id==id2[0])[0][0]]=point_sorbate_1[2] domain.x[np.where(domain.id==id2[1])[0][0]]=point_sorbate_2[0] domain.y[np.where(domain.id==id2[1])[0][0]]=point_sorbate_2[1] domain.z[np.where(domain.id==id2[1])[0][0]]=point_sorbate_2[2] #return domain def _cal_xyz_single(self,point1,point2,r,alpha): #point1=[x1,y1,z1],point2=[x2,y2,z2],r is a value, alpha is angle less than pi slope_pt1_pt2=(point1[1]-point2[1])/(point1[0]-point2[0]) slope_new1=-1./slope_pt1_pt2 cent_point=[(point1[0]+point2[0])/2.,(point1[1]+point2[1])/2.] dist_pt12=((point1[0]-point2[0])**2+(point1[1]-point2[1])**2)**0.5 tan_theta=r*np.cos(alpha)/(dist_pt12/2.) slope_new2=(slope_pt1_pt2+tan_theta)/(1.-slope_pt1_pt2*tan_theta) #slope_new1 and cent_point form a line equation #slope_new2 and point2 form another line equation A=np.array([[-slope_new1,1.],[-slope_new2,1.]]) C=np.array([cent_point[1]-slope_new1*cent_point[0],point2[1]-slope_new2*point2[0]]) xy=np.dot(inv(A),C) return [xy[0],xy[1],point1[2]+r*np.sin(alpha)] def _cal_xyz_double(self,ref_point,r,alpha,z_shift=0.1): #ref_point=[x1,y1,z1],r is a positive value, alpha an angle less than pi, z_shift is positive value represent shift at z direction x_shift=r*np.cos(alpha) y_shift=r*np.sin(alpha) new_point1=[ref_point[0]+x_shift,ref_point[1]+y_shift,ref_point[2]+z_shift] new_point2=[2.*ref_point[0]-new_point1[0],2.*ref_point[1]-new_point1[1],ref_point[2]+z_shift] return new_point1,new_point2 def grouping_sequence_layer(self, domain=[], first_atom_id=[],sym_file={},id_match_in_sym={},layers_N=1,use_sym=False): #group the atoms at the same layer in one domain and the associated atoms in its chemically equivalent domain #so 4 atoms will group together if consider two chemical equivalent domain #domain is list of two chemical equivalent domains #first_atom_id is list of first id in id array of two domains #sym_file is a library of symmetry file names, the keys are element symbols #id_match_in_sym is a library of ids, the order of which match the symmetry operation in the associated sym file #layers_N is the number of layer you consider for grouping operation #use_sym is a flag to choose the shifting rule (symmetry basis or not) atm_gp_list=[] for i in range(layers_N): index_1=np.where(domain[0].id==first_atom_id[0])[0][0]+i*2 temp_atm_gp=model.AtomGroup(slab=domain[0],id=str(domain[0].id[index_1]),id_in_sym_file=id_match_in_sym[str(domain[0].el[index_1])],use_sym=use_sym,filename=sym_file[str(domain[0].el[index_1])]) temp_atm_gp.add_atom(domain[0],str(domain[0].id[index_1+1])) index_2=np.where(domain[1].id==first_atom_id[1])[0][0]+i*2 temp_atm_gp.add_atom(domain[1],str(domain[1].id[index_2])) temp_atm_gp.add_atom(domain[1],str(domain[1].id[index_2+1])) atm_gp_list.append(temp_atm_gp) return atm_gp_list def grouping_discrete_layer(self,domain=[],atom_ids=[],sym_file=None,id_match_in_sym=[],use_sym=False): atm_gp=model.AtomGroup(id_in_sym_file=id_match_in_sym,filename=sym_file,use_sym=use_sym) for i in range(len(domain)): atm_gp.add_atom(domain[i],atom_ids[i]) return atm_gp def scale_opt(self,atm_gp_list,scale_factor,sign_values=None,flag='u',ref_v=1.): #scale the parameter from first layer atom to deeper layer atom #dx,dy,dz,u will decrease inward, oc decrease outward usually #and note the ref_v for oc and u is the value for inner most atom, while ref_v for the other parameters are values for outer most atoms #atm_gp_list is a list of atom group to consider the scaling operation #scale_factor is list of values of scale factor, note accummulated product will be used for scaling #flag is the parameter symbol #ref_v is the reference value to start off if sign_values==None: for i in range(len(atm_gp_list)): atm_gp_list[i]._set_func(flag)(ref_v*reduce(mul,scale_factor[:i+1])) else: for i in range(len(atm_gp_list)): atm_gp_list[i]._set_func(flag)(ref_v*sign_values[i]*reduce(mul,scale_factor[:i+1])) def set_new_vars(self,head_list=['u_Fe_'],N_list=[2]): #set new vars #head_list is a list of heading test for a new variable,N_list is the associated number of each set of new variable to be created for head,N in zip(head_list,N_list): for i in range(N): getattr(self.new_var_module,'new_var')(head+str(i+1),1.) #################################################################### unitcell = model.UnitCell(5.038, 5.434, 7.3707, 90, 90, 90) inst = model.Instrument(wavel = .833, alpha = 2.0) bulk = model.Slab(T_factor='B') domain0 = model.Slab(c = 1.0,T_factor='B') bulk.add_atom( "Fe2", "Fe", 0.00000e+00 , 8.30000e-01 , 8.55000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "Fe3", "Fe", 5.00000e-01 , 3.30000e-01 , 8.55000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "Fe4", "Fe", 5.00000e-01 , 8.80000e-01 , 6.45000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "Fe6", "Fe", 0.00000e+00 , 3.79000e-01 , 6.45000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "Fe8", "Fe", 0.00000e+00 , 7.61000e-01 , 3.55000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "Fe9", "Fe", 5.00000e-01 , 2.60000e-01 , 3.55000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "Fe10", "Fe", 5.00000e-01 , 8.10000e-01 , 1.45000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "Fe12", "Fe", 0.00000e+00 , 3.10000e-01 , 1.45000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O1", "O", 6.53000e-01 , 9.73000e-01 , 9.03000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O2", "O", 8.47000e-01 , 4.73000e-01 , 9.03000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O3", "O", 3.06000e-01 , 6.05000e-01 , 7.50000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O4", "O", 1.94000e-01 , 1.04000e-01 , 7.50000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O5", "O", 8.47000e-01 , 7.37000e-01 , 5.97000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O6", "O", 6.53000e-01 , 2.36000e-01 , 5.97000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O7", "O", 3.47000e-01 , 9.04000e-01 , 4.03000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O8", "O", 1.53000e-01 , 4.03000e-01 , 4.03000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O9", "O", 6.94000e-01 , 5.35000e-01 , 2.50000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O10", "O", 8.06000e-01 , 3.50000e-02 , 2.50000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O11", "O", 1.53000e-01 , 6.67000e-01 , 9.70000e-02 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) bulk.add_atom( "O12", "O", 3.47000e-01 , 1.67000e-01 , 9.70000e-02 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) #domain0 here is a reference domain, the atoms are ordered according to hight (z values) #it is a super surface structure by stacking the surface slab on bulk slab, the repeat vector was counted domain0.add_atom( "O1_1_0", "O", 6.53000e-01 , 1.11210e+00 , 1.90300e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_2_0", "O", 8.47000e-01 , 6.12100e-01 , 1.90300e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe1_2_0", "Fe", 0.00000e+00 , 9.69100e-01 , 1.85500e+00 , 3.20000e-01 , 1.00000e+00 , 1. ) domain0.add_atom( "Fe1_3_0", "Fe", 5.00000e-01 , 4.69100e-01 , 1.85500e+00 , 3.20000e-01 , 1.00000e+00 , 1. ) domain0.add_atom( "O1_3_0", "O", 3.06000e-01 , 7.44100e-01 , 1.75000e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_4_0", "O", 1.94000e-01 , 2.43100e-01 , 1.75000e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe1_4_0", "Fe", 5.00000e-01 , 1.01910e+00 , 1.64500e+00 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe1_6_0", "Fe", 0.00000e+00 , 5.18100e-01 , 1.64500e+00 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_5_0", "O", 8.47000e-01 , 8.76100e-01 , 1.59700e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_6_0", "O", 6.53000e-01 , 3.75100e-01 , 1.59700e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_7_0", "O", 3.47000e-01 , 1.04310e+00 , 1.40300e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_8_0", "O", 1.53000e-01 , 5.42100e-01 , 1.40300e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe1_8_0", "Fe", 0.00000e+00 , 9.00100e-01 , 1.35500e+00 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe1_9_0", "Fe", 5.00000e-01 , 3.99100e-01 , 1.35500e+00 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_9_0", "O", 6.94000e-01 , 6.74100e-01 , 1.25000e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_10_0", "O", 8.06000e-01 , 1.74100e-01 , 1.25000e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe1_10_0", "Fe", 5.00000e-01 , 9.49100e-01 , 1.14500e+00 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe1_12_0", "Fe", 0.00000e+00 , 4.49100e-01 , 1.14500e+00 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_11_0", "O", 1.53000e-01 , 8.06100e-01 , 1.09700e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_12_0", "O", 3.47000e-01 , 3.06100e-01 , 1.09700e+00 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O1_0", "O", 6.53000e-01 , 9.73000e-01 , 9.03000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O2_0", "O", 8.47000e-01 , 4.73000e-01 , 9.03000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe2_0", "Fe", 0.00000e+00 , 8.30000e-01 , 8.55000e-01 , 3.20000e-01 , 1.00000e+00 , 1. ) domain0.add_atom( "Fe3_0", "Fe", 5.00000e-01 , 3.30000e-01 , 8.55000e-01 , 3.20000e-01 , 1.00000e+00 , 1. ) domain0.add_atom( "O3_0", "O", 3.06000e-01 , 6.05000e-01 , 7.50000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O4_0", "O", 1.94000e-01 , 1.04000e-01 , 7.50000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe4_0", "Fe", 5.00000e-01 , 8.80000e-01 , 6.45000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe6_0", "Fe", 0.00000e+00 , 3.79000e-01 , 6.45000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O5_0", "O", 8.47000e-01 , 7.37000e-01 , 5.97000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O6_0", "O", 6.53000e-01 , 2.36000e-01 , 5.97000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O7_0", "O", 3.47000e-01 , 9.04000e-01 , 4.03000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O8_0", "O", 1.53000e-01 , 4.03000e-01 , 4.03000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe8_0", "Fe", 0.00000e+00 , 7.61000e-01 , 3.55000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe9_0", "Fe", 5.00000e-01 , 2.60000e-01 , 3.55000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O9_0", "O", 6.94000e-01 , 5.35000e-01 , 2.50000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O10_0", "O", 8.06000e-01 , 3.50000e-02 , 2.50000e-01 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe10_0", "Fe", 5.00000e-01 , 8.10000e-01 , 1.45000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "Fe12_0", "Fe", 0.00000e+00 , 3.10000e-01 , 1.45000e-01 , 3.20000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O11_0", "O", 1.53000e-01 , 6.67000e-01 , 9.70000e-02 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) domain0.add_atom( "O12_0", "O", 3.47000e-01 , 1.67000e-01 , 9.70000e-02 , 3.30000e-01 , 1.00000e+00 , 1.00000e+00 ) #id list according to the order in the reference domain ref_id_list=["O1_1_0","O1_2_0","Fe1_2_0","Fe1_3_0","O1_3_0","O1_4_0","Fe1_4_0","Fe1_6_0","O1_5_0","O1_6_0","O1_7_0","O1_8_0","Fe1_8_0","Fe1_9_0","O1_9_0","O1_10_0","Fe1_10_0","Fe1_12_0","O1_11_0","O1_12_0",\ "O_1_0","O_2_0","Fe_2_0","Fe_3_0","O_3_0","O_4_0","Fe_4_0","Fe_6_0","O_5_0","O_6_0","O_7_0","O_8_0","Fe_8_0","Fe_9_0","O_9_0","O_10_0","Fe_10_0","Fe_12_0","O_11_0","O_12_0"] #the matching row Id information in the symfile sym_file_Fe=np.array(['Fe1_0','Fe2_0','Fe3_0','Fe4_0','Fe5_0','Fe6_0','Fe7_0','Fe8_0','Fe9_0','Fe10_0','Fe11_0','Fe12_0',\ 'Fe1_1_0','Fe1_2_0','Fe1_3_0','Fe1_4_0','Fe1_5_0','Fe1_6_0','Fe1_7_0','Fe1_8_0','Fe1_9_0','Fe1_10_0','Fe1_11_0','Fe1_12_0']) sym_file_O=np.array(['O1_0','O2_0','O3_0','O4_0','O5_0','O6_0','O7_0','O8_0','O9_0','O10_0','O11_0','O12_0',\ 'O1_1_0','O1_2_0','O1_3_0','O1_4_0','O1_5_0','O1_6_0','O1_7_0','O1_8_0','O1_9_0','O1_10_0','O1_11_0','O1_12_0']) #create a domain class and initiate the chemical equivalent domains rgh_domain1=UserVars() domain_class_1=domain_creator(ref_domain=domain0,id_list=ref_id_list,terminated_layer=0,domain_N=1,new_var_module=rgh_domain1) domain1A=domain_class_1.domain_A domain1B=domain_class_1.domain_B #add sorbates for two domains domain_class_1.add_sorbates(domain=domain1A,attach_atm_id=[['O1_1_0','O1_2_0'],['O1_3_0','O1_4_0']],el=['Pb','Pb'],id=[1,11],O_id=['_A','_AA'],r1=0.1,r2=0.1,alpha1=np.pi/2.,alpha2=0.) domain_class_1.add_sorbates(domain=domain1B,attach_atm_id=[['O1_7_0','O1_8_0'],['O1_9_0','O1_10_0']],el=['Pb','Pb'],id=[2,22],O_id=['_B','_BB'],r1=0.1,r2=0.1,alpha1=np.pi/2.,alpha2=0.) #add lone oxygen pair on top domain_class_1.add_oxygen_pair(domain1A,O_id='_A1',ref_point=[0.5,0.5,2.203],r=0.1,alpha=0.) domain_class_1.add_oxygen_pair(domain1B,O_id='_B1',ref_point=[0.5,0.5,1.703],r=0.1,alpha=0.) #set new variables domain_class_1.set_new_vars(head_list=['u_o_n','u_Fe_n','dx_n','dy_n','dz_n','oc_n','dx_sign_n','dy_sign_n','dz_sign_n'],N_list=[4,3,7,7,7,7,7,7,7]) #some other parameters to be used rgh_domain1.new_var('r_Pb_O', 0.1) rgh_domain1.new_var('r_Pb_O2', 0.1) rgh_domain1.new_var('r_O_pair1', 0.1) rgh_domain1.new_var('alpha_O_pair1', np.pi/4.) rgh_domain1.new_var('alpha_Pb_O', np.pi/4.) rgh_domain1.new_var('beta_Pb_O', np.pi/4.) rgh_domain1.new_var('gamma_Pb_O', np.pi/4.) rgh_domain1.new_var('alpha_Pb_O2', np.pi/4.) rgh_domain1.new_var('beta_Pb_O2', np.pi/4.) rgh_domain1.new_var('gamma_Pb_O2', np.pi/4.) rgh_domain1.new_var('ref_x_O_pair1', 0.5) rgh_domain1.new_var('ref_y_O_pair1', 0.5) rgh_domain1.new_var('ref_z_O_pair1', 2.203) rgh_domain1.new_var('domain_wt', 0.) rgh_domain1.new_var('beta', 0.) #do grouping for top seven layers atm_gp_list_domain1=domain_class_1.grouping_sequence_layer(domain=[domain1A,domain1B], first_atom_id=['O1_1_0','O1_7_0'],\ sym_file={'Fe':'Fe0 output file for Genx reading.txt','O':'O0 output file for Genx reading.txt'},\ id_match_in_sym={'Fe':sym_file_Fe,'O':sym_file_O},layers_N=7,use_sym=True) #the first atom group will be the reference group for scaling operation of dx dy dz ref_atm_gp_domain1=atm_gp_list_domain1[0] #group the sorbate of Pb atm_gp_Pb_domain1=domain_class_1.grouping_discrete_layer(domain=[domain1A,domain1B],atom_ids=['Pb1','Pb2']) atm_gp_Pb2_domain1=domain_class_1.grouping_discrete_layer(domain=[domain1A,domain1B],atom_ids=['Pb11','Pb22']) #Group sorbates of Oxygen pair atm_gp_O_domain1=domain_class_1.grouping_discrete_layer(domain=[domain1A,domain1A,domain1B,domain1B],atom_ids=['Oi_1_A','Oi_2_A','Oi_1_B','Oi_2_B']) atm_gp_O2_domain1=domain_class_1.grouping_discrete_layer(domain=[domain1A,domain1A,domain1B,domain1B],atom_ids=['Oi_1_AA','Oi_2_AA','Oi_1_BB','Oi_2_BB']) atm_gp_Os1_domain1=domain_class_1.grouping_discrete_layer(domain=[domain1A,domain1A,domain1B,domain1B],atom_ids=['Os_1_A1','Os_2_A1','Os_1_B1','Os_2_B1']) #make a domain libratry wrapping two chemical equivalent domains domain={'domain1A':{'slab':domain1A,'wt':1.},'domain1B':{'slab':domain1B,'wt':0.}} sample = model.Sample(inst, bulk, domain, unitcell,coherence=False,surface_parms={'delta1':0.,'delta2':0.1391}) def extract_list(ref_list,extract_index): output_list=[] for i in extract_index: output_list.append(ref_list[i]) return output_list def norm_sign(value,scale=1.): if value<=0.5: return -scale elif value>0.5: return scale def Sim(data): #scale the thermal factor (1-2), note the scaling will be done from deepest layer, so here the list extraction is done from inside [6,3,1] scale_values_Fe_u=[rgh_domain1.u_Fe_n1,rgh_domain1.u_Fe_n2,rgh_domain1.u_Fe_n3] scale_values_O_u=[rgh_domain1.u_o_n1,rgh_domain1.u_o_n2,rgh_domain1.u_o_n3,rgh_domain1.u_o_n4] domain_class_1.scale_opt(extract_list(atm_gp_list_domain1,[6,3,1]),scale_factor=scale_values_Fe_u,sign_values=None,flag='u',ref_v=0.32) domain_class_1.scale_opt(extract_list(atm_gp_list_domain1,[5,4,2,0]),scale_factor=scale_values_O_u,sign_values=None,flag='u',ref_v=0.4) #scale the occupancy (0.5-1), scaling was done outward, so reverse the atom group list here scale_values_all_oc=[rgh_domain1.oc_n1,rgh_domain1.oc_n2,rgh_domain1.oc_n3,rgh_domain1.oc_n4,rgh_domain1.oc_n5,rgh_domain1.oc_n6,rgh_domain1.oc_n7] domain_class_1.scale_opt(atm_gp_list_domain1[::-1],scale_factor=scale_values_all_oc,sign_values=None,flag='oc',ref_v=1.) #extract reference dxdydz from reference atom group ref_dx_domain1=getattr(ref_atm_gp_domain1,'getdx')() ref_dy_domain1=getattr(ref_atm_gp_domain1,'getdy')() ref_dz_domain1=getattr(ref_atm_gp_domain1,'getdz')() #scale dx value(0.1-1), the extra value in norm_sign is a second scaling factor for dxdy compared to dz #which is believed to be more likely to relax than dxdy #fit the shift amount for the first layer oxygen, and scale the shift for the other deeper layers from n2 to n7 scale_values_all_dx=[rgh_domain1.dx_n2,rgh_domain1.dx_n3,rgh_domain1.dx_n4,rgh_domain1.dx_n5,rgh_domain1.dx_n6,rgh_domain1.dx_n7] sign_values_all_dx=[norm_sign(rgh_domain1.dx_sign_n2,0.1),norm_sign(rgh_domain1.dx_sign_n3,0.05),\ norm_sign(rgh_domain1.dx_sign_n4,0.01),norm_sign(rgh_domain1.dx_sign_n5,0.001),norm_sign(rgh_domain1.dx_sign_n6,0.0001),norm_sign(rgh_domain1.dx_sign_n7,0.00001)] domain_class_1.scale_opt(atm_gp_list_domain1[1:],scale_factor=scale_values_all_dx,sign_values=sign_values_all_dx,flag='dx',ref_v=ref_dx_domain1) #scale dy value(0.1-1) scale_values_all_dy=[rgh_domain1.dy_n2,rgh_domain1.dy_n3,rgh_domain1.dy_n4,rgh_domain1.dy_n5,rgh_domain1.dy_n6,rgh_domain1.dy_n7] sign_values_all_dy=[norm_sign(rgh_domain1.dy_sign_n2,0.1),norm_sign(rgh_domain1.dy_sign_n3,0.05),\ norm_sign(rgh_domain1.dy_sign_n4,0.01),norm_sign(rgh_domain1.dy_sign_n5,0.001),norm_sign(rgh_domain1.dy_sign_n6,0.0001),norm_sign(rgh_domain1.dy_sign_n7,0.00001)] domain_class_1.scale_opt(atm_gp_list_domain1[1:],scale_factor=scale_values_all_dy,sign_values=sign_values_all_dy,flag='dy',ref_v=ref_dy_domain1) #scale dz value(0.1-1) scale_values_all_dz=[rgh_domain1.dz_n2,rgh_domain1.dz_n3,rgh_domain1.dz_n4,rgh_domain1.dz_n5,rgh_domain1.dz_n6,rgh_domain1.dz_n7] sign_values_all_dz=[norm_sign(rgh_domain1.dz_sign_n1),norm_sign(rgh_domain1.dz_sign_n2),norm_sign(rgh_domain1.dz_sign_n3),\ norm_sign(rgh_domain1.dz_sign_n4),norm_sign(rgh_domain1.dz_sign_n5),norm_sign(rgh_domain1.dz_sign_n6),norm_sign(rgh_domain1.dz_sign_n7)] domain_class_1.scale_opt(atm_gp_list_domain1[1:],scale_factor=scale_values_all_dz,sign_values=sign_values_all_dz,flag='dz',ref_v=ref_dz_domain1) #updata sorbate xyz (bidentate configuration here) domain_class_1.group_sorbates_2(domain=domain1A,attach_atm_id='Pb1',ids_to_be_attached=['Oi_1_A','Oi_2_A'],r=rgh_domain1.r_Pb_O,alpha=rgh_domain1.alpha_Pb_O,beta=rgh_domain1.beta_Pb_O,gamma=rgh_domain1.gamma_Pb_O) domain_class_1.group_sorbates_2(domain=domain1B,attach_atm_id='Pb2',ids_to_be_attached=['Oi_1_B','Oi_2_B'],r=rgh_domain1.r_Pb_O,alpha=rgh_domain1.alpha_Pb_O,beta=rgh_domain1.beta_Pb_O,gamma=rgh_domain1.gamma_Pb_O) domain_class_1.group_sorbates_2(domain=domain1A,attach_atm_id='Pb11',ids_to_be_attached=['Oi_1_AA','Oi_2_AA'],r=rgh_domain1.r_Pb_O2,alpha=rgh_domain1.alpha_Pb_O2,beta=rgh_domain1.beta_Pb_O2,gamma=rgh_domain1.gamma_Pb_O2) domain_class_1.group_sorbates_2(domain=domain1B,attach_atm_id='Pb22',ids_to_be_attached=['Oi_1_BB','Oi_2_BB'],r=rgh_domain1.r_Pb_O2,alpha=rgh_domain1.alpha_Pb_O2,beta=rgh_domain1.beta_Pb_O2,gamma=rgh_domain1.gamma_Pb_O2) domain_class_1.updata_oxygen_pair(domain=domain1A,ids=['Os_1_A1','Os_2_A1'],ref_point=[rgh_domain1.ref_x_O_pair1,rgh_domain1.ref_y_O_pair1,rgh_domain1.ref_z_O_pair1],r=rgh_domain1.r_O_pair1,alpha=rgh_domain1.alpha_O_pair1) domain_class_1.updata_oxygen_pair(domain=domain1B,ids=['Os_1_B1','Os_2_B1'],ref_point=[rgh_domain1.ref_x_O_pair1,rgh_domain1.ref_y_O_pair1,rgh_domain1.ref_z_O_pair1-0.5],r=rgh_domain1.r_O_pair1,alpha=rgh_domain1.alpha_O_pair1) #roughness par beta=rgh_domain1.beta F = [] domain['domain1A']['wt']=1.-rgh_domain1.domain_wt domain['domain1B']['wt']=rgh_domain1.domain_wt #9.a loop through the data sets for data_set in data: # 9.b create all the h,k,l values for the rod (data_set) h = data_set.extra_data['h'] k = data_set.extra_data['k'] l = data_set.x # 9.c. calculate roughness using beta model LB = data_set.extra_data['LB'] dL = data_set.extra_data['dL'] rough = (1-beta)/((1-beta)**2 + 4*beta*np.sin(np.pi*(l - LB)/dL)**2)**0.5 # 9.d. Calculate the structure factor f = rough*sample.calc_f(h, k, l) # 9.e Calculate |F| i = abs(f) # 9.f Append the calculated intensity to the list I F.append(i) return F
jackey-qiu/genx_pc_qiu
models/sxrd_new1.py
Python
gpl-3.0
213,492
[ "CRYSTAL", "Gaussian" ]
9add43ab733fb4d459e41860231e4b1d12b05c81e23fdd9c5f1b35ca430f9992