text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
''' count reads or records in usearch-formatted fastq and tab files ''' __author__ = "Sonia Timberlake" import os import sys import argparse from os.path import basename, dirname, join from Bio import SeqIO from collections import OrderedDict # Globals ? default_delimiter={'fields': ';','key-value':'='} # Functions def parseAnnotation(record, fields=None, delimiter=default_delimiter): """ Extracts annotations from a FASTA/FASTQ sequence description Arguments: record : Description string to extract annotations from fields : List of fields to subset the return dictionary to; if None return all fields delimiter : a tuple of delimiters for (fields, values, value lists) Returns: OrderedDict : An OrderedDict of field/value pairs """ annotation = record.split(delimiter[0]) field_dict = OrderedDict([('ID', annotation.pop(0))]) for ann in annotation: vals = ann.split(delimiter[1]) field_dict[vals[0].upper()] = vals[1] # Subset return dictionary to requested fields if fields is not None: if not isinstance(fields, list): fields = [fields] for f in set(field_dict).difference(fields): del field_dict[f] return field_dict def parse_usearch_header(header, delims={'fields':';','key-value':'='}): ''' Extracts annotations from a FASTA/FASTQ sequence description Default delimiters are the usearch/qiime format ''' fields = header.split(delims['fields']) field_dict = OrderedDict([('ID', fields.pop(0))]) for field in fields: pair = field.split(delims['key-value']) if len(pair)>1: # in ase not every field is a key-value pair field_dict[pair[0].lower()] = pair[1] else: field_dict[pair[0].lower()] = pair[0] return field_dict def count_tab(infile, count_unit='reads', derep_field=None): ''' when wc -l won't do because the reads are dereplicated ''' if count_unit == 'reads' and derep_field: count = sum( int(parse_usearch_header(line)[derep_field]) for line in infile) else: count = sum( 1 for line in infile) # TODO add in ID dereplication ? return count, count_unit def count_fastq(infile, count_unit='reads'): prefix, suffix = basename(infile.name).split('.', 1) if len(suffix)==2: suffix = {'fq':'fastq', 'fa':'fasta'}[suffix] # lazy exception handling if not suffix in ('fastq','fasta'): raise Exception("Encountered file {} in rule counts which doesn't end " "with fastq, fasta, fa, fq.".format(infile.name)) if 'derep' in prefix and count_unit=='reads': # have to use the size field # TODO: maybe should look for it by default ? derep_field = 'size' count = sum( int(parse_usearch_header(record.description)[derep_field]) for record in SeqIO.parse(infile, suffix)) else: count = sum(1 for record in SeqIO.parse(infile, suffix)) return count, count_unit def parse_args(argv): parser = argparse.ArgumentParser(usage=__doc__, description=''' count reads in a fastq/fasta / usearch file ''') parser.add_argument('--infile', type=argparse.FileType('r'), default=sys.stdin) parser.add_argument('--outfile', type=argparse.FileType('w'), default=sys.stdout) parser.add_argument('--count_unit', choices=["reads","records"], default='reads' , help='in dereplicated fasta, reads!=records') parser.add_argument('--file_format', choices=["tab","fasta","fastq"], default='fasta' , help='tab or fast[aq]') parser.add_argument('--test', action='store_true') return parser.parse_args(argv) def main(argv=sys.argv): opts = parse_args(argv[1:]) prefix, suffix = basename(opts.infile.name).split('.', 1) if opts.file_format=='tab' or suffix not in ('fastq','fasta','fa','fq','fst'): count, count_unit =count_tab(opts.infile, opts.count_unit, derep_field='size') else: count, count_unit = count_fastq(opts.infile, opts.count_unit) print("{}\t{}\t{}".format(count, count_unit, opts.infile.name), file=opts.outfile) return (count, opts.infile.name) if __name__ == '__main__': main()
sonia-t/utils
count_reads.py
Python
mit
4,451
[ "ASE" ]
fc02c5ac2ef5944d8ecd8c4d95a5716ae163828181a4daa18c0bae8c6a7629a4
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np from skimage.transform import rotate from skimage.util import random_noise def random_flips(X): ''' Take random x-y flips of images. Input: - X: (N, C, H, W) array of image data. Output: - An array of the same shape as X, containing a copy of the data in X, but with half the examples flipped along the horizontal direction. ''' N, C, H, W = X.shape mask = np.random.randint(2, size=N) out = np.zeros_like(X) out[mask == 1] = X[mask == 1, :, :, ::-1] out[mask == 0] = X[mask == 0] return out def add_pad(X, pad): ''' Take random crops of images. For each input image we will generate a random crop of that image of the specified size. Input: - X: (N, C, H, W) array of image data - pad: Number of white pixels to add on each side of each image Output: - Array of shape (N, C, H + 2 * pad, WW + 2 * pad) ''' N, C, H, W = X.shape assert pad > 0 out = np.pad(X, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant') return out def random_crops(X, crop_shape, pad=0): ''' Take random crops of images. For each input image we will generate a random crop of that image of the specified size. Input: - X: (N, C, H, W) array of image data - crop_shape: Tuple (HH, WW) to which each image will be cropped. Output: - Array of shape (N, C, HH, WW) ''' if pad: X = add_pad(X, pad) N, C, H, W = X.shape HH, WW = crop_shape assert HH < H and WW < W out = np.zeros((N, C, HH, WW), dtype=X.dtype) np.random.randint((H-HH), size=N) y_start = np.random.randint((H-HH), size=N) x_start = np.random.randint((W-WW), size=N) for i in xrange(N): out[i] = X[i, :, y_start[i]:y_start[i]+HH, x_start[i]:x_start[i]+WW] return out def random_rotate(X, max_angle=10): N, C, H, W = X.shape out = np.zeros_like(X) high = np.abs(max_angle) + 1 low = - np.abs(max_angle) for i, x in enumerate(X): t = x.transpose(1, 2, 0) t = rotate(t, np.random.randint(low, high), resize=False) t = t.transpose(2, 0, 1) out[i] = t return out def random_contrast(X, scale=(0.8, 1.2)): ''' Randomly adjust the contrast of images. For each input image, choose a number uniformly at random from the range given by the scale parameter, and multiply each pixel of the image by that number. source: https://github.com/MyHumbleSelf/cs231n/blob/master/assignment3/cs231n/data_augmentation.py Inputs: - X: (N, C, H, W) array of image data - scale: Tuple (low, high). For each image we sample a scalar in the range (low, high) and multiply the image by that scaler. Output: - Rescaled array out of shape (N, C, H, W) where out[i] is a contrast adjusted version of X[i]. ''' low, high = scale N = X.shape[0] out = np.zeros_like(X) l = (scale[1]-scale[0])*np.random.random_sample(N)+scale[0] # for i in xrange(N): # out[i] = X[i] * l[i] out = X * l[:, None, None, None] # TODO: vectorize this somehow... # out = #np.diag(l).dot(X)#X*l[:,np.newaxis, np.newaxis, np.newaxis] return out def random_tint(X, scale=(-10, 10)): ''' Randomly tint images. For each input image, choose a random color whose red, green, and blue components are each drawn uniformly at random from the range given by scale. Add that color to each pixel of the image. source: https://github.com/MyHumbleSelf/cs231n/blob/master/assignment3/cs231n/data_augmentation.py Inputs: - X: (N, C, W, H) array of image data - scale: A tuple (low, high) giving the bounds for the random color that will be generated for each image. Output: - Tinted array out of shape (N, C, H, W) where out[i] is a tinted version of X[i]. ''' low, high = scale N, C = X.shape[:2] out = np.zeros_like(X) l = (scale[1]-scale[0])*np.random.random_sample((N, C))+scale[0] out = X+l[:, :, None, None] return out def fixed_crops(X, crop_shape, crop_type): ''' Take center or corner crops of images. source: https://github.com/MyHumbleSelf/cs231n/blob/master/assignment3/cs231n/data_augmentation.py Inputs: - X: Input data, of shape (N, C, H, W) - crop_shape: Tuple of integers (HH, WW) giving the size to which each image will be cropped. - crop_type: One of the following strings, giving the type of crop to compute: 'center': Center crop 'ul': Upper left corner 'ur': Upper right corner 'bl': Bottom left corner 'br': Bottom right corner Returns: Array of cropped data of shape (N, C, HH, WW) ''' N, C, H, W = X.shape HH, WW = crop_shape x0 = (W - WW) / 2 y0 = (H - HH) / 2 x1 = x0 + WW y1 = y0 + HH if crop_type == 'center': return X[:, :, y0:y1, x0:x1] elif crop_type == 'ul': return X[:, :, :HH, :WW] elif crop_type == 'ur': return X[:, :, :HH, -WW:] elif crop_type == 'bl': return X[:, :, -HH:, :WW] elif crop_type == 'br': return X[:, :, -HH:, -WW:] else: raise ValueError('Unrecognized crop type %s' % crop_type) def RGB_PCA(images): ''' Source: https://github.com/Thrandis/ift6266h15/blob/1cc3fc6164dc6c54936971 935027cd447e2cd81f/dataset_augmentation.py RGB PCA and variations from Alex's paper ''' pixels = images.reshape(-1, images.shape[-1]) idx = np.random.random_integers(0, pixels.shape[0], 1000000) pixels = [pixels[i] for i in idx] pixels = np.array(pixels, dtype=np.uint8).T m = np.mean(pixels)/256. C = np.cov(pixels)/(256.*256.) l, v = np.linalg.eig(C) return l, v, m def RGB_variations(image, eig_val, eig_vec): ''' Source: https://github.com/Thrandis/ift6266h15/blob/1cc3fc6164dc6c54936971 935027cd447e2cd81f/dataset_augmentation.py ''' a = np.random.randn(3) v = np.array([a[0]*eig_val[0], a[1]*eig_val[1], a[2]*eig_val[2]]) variation = np.dot(eig_vec, v) return image + variation def noise(x): ''' Source: https://github.com/Thrandis/ift6266h15/blob/1cc3fc6164dc6c54936971 935027cd447e2cd81f/dataset_augmentation.py ''' r = np.random.rand(1)[0] # TODO randomize parameters of the noises; check how to init seed if r < 0.33: return random_noise(x, 's&p', seed=np.random.randint(1e6)) if r < 0.66: return random_noise(x, 'gaussian', seed=np.random.randint(1e6)) return random_noise(x, 'speckle', seed=np.random.randint(1e6)) def create_2d_gaussian(dim, sigma): """ Source: https://github.com/vsvinayak/mnist-helper/blob/master/mnist_helpers.py This function creates a 2d gaussian kernel with the standard deviation denoted by sigma :param dim: integer denoting a side (1-d) of gaussian kernel :type dim: int :param sigma: the standard deviation of the gaussian kernel :type sigma: float :returns: a numpy 2d array """ # check if the dimension is odd if dim % 2 == 0: raise ValueError("Kernel dimension should be odd") # initialize the kernel kernel = np.zeros((dim, dim), dtype=np.float16) # calculate the center point center = dim/2 # calculate the variance variance = sigma ** 2 # calculate the normalization coefficeint coeff = 1. / (2 * variance) # create the kernel for x in range(0, dim): for y in range(0, dim): x_val = abs(x - center) y_val = abs(y - center) numerator = x_val**2 + y_val**2 denom = 2*variance kernel[x, y] = coeff * np.exp(-1. * numerator/denom) # normalise it return kernel/sum(sum(kernel)) def elastic_transform(image, kernel_dim=13, sigma=6, alpha=36, negated=False): """ Source: https://github.com/vsvinayak/mnist-helper/blob/master/mnist_helpers.py This method performs elastic transformations on an image by convolving with a gaussian kernel. NOTE: Image dimensions should be a sqaure image :param image: the input image :type image: a numpy nd array :param kernel_dim: dimension(1-D) of the gaussian kernel :type kernel_dim: int :param sigma: standard deviation of the kernel :type sigma: float :param alpha: a multiplicative factor for image after convolution :type alpha: float :param negated: a flag indicating whether the image is negated or not :type negated: boolean :returns: a nd array transformed image """ # TEMP FIX import math from numpy.random import random_integers from scipy.signal import convolve2d import cv2 # convert the image to single channel if it is multi channel one if image.ndim == 3: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # check if the image is a negated one if not negated: image = 255-image # check if the image is a square one if image.shape[0] != image.shape[1]: raise ValueError("Image should be of sqaure form") # check if kernel dimesnion is odd if kernel_dim % 2 == 0: raise ValueError("Kernel dimension should be odd") # create an empty image result = np.zeros(image.shape) # create random displacement fields displacement_field_x = np.array([[random_integers(-1, 1) for x in xrange(image.shape[0])] for y in xrange(image.shape[1])]) * alpha displacement_field_y = np.array([[random_integers(-1, 1) for x in xrange(image.shape[0])] for y in xrange(image.shape[1])]) * alpha # create the gaussian kernel kernel = create_2d_gaussian(kernel_dim, sigma) # convolve the fields with the gaussian kernel displacement_field_x = convolve2d(displacement_field_x, kernel) displacement_field_y = convolve2d(displacement_field_y, kernel) # make the distortrd image by averaging each pixel value to the neighbouring # four pixels based on displacement fields for row in xrange(image.shape[1]): for col in xrange(image.shape[0]): low_ii = row + int(math.floor(displacement_field_x[row, col])) high_ii = row + int(math.ceil(displacement_field_x[row, col])) low_jj = col + int(math.floor(displacement_field_y[row, col])) high_jj = col + int(math.ceil(displacement_field_y[row, col])) if low_ii < 0 or low_jj < 0 or high_ii >= image.shape[1] - 1 \ or high_jj >= image.shape[0] - 1: continue res = image[low_ii, low_jj]/4 + image[low_ii, high_jj]/4 + \ image[high_ii, low_jj]/4 + image[high_ii, high_jj]/4 result[row, col] = res # if the input image was not negated, make the output image also a non # negated one if not negated: result = 255-result return result
dnlcrl/PyDatSet
pydatset/data_augmentation.py
Python
mit
11,052
[ "Gaussian" ]
54b5587449c2f67981aa68aca1f0d7fad0766159979bf6a3d4beaf7c4311b8de
import sys import cv2 import math from skimage import draw import numpy as np from typing import List from pathlib import Path import logging import argparse def alpha_blur(img, alpha_mask, kernel_size=10): """ Blur image proportional to the given mask :param img: :param alpha_mask: :param kernel_size: :return: """ # apply morphology open to smooth the outline # kernel_size = max(2, (10 // (i + 1))) # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size)) # blurred_img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) # kernel_size = max(20, (150 // (i + 1))) #blurred_img = cv2.bilateralFilter(img, 9, 5, 5) blurred_img = cv2.GaussianBlur(img, (21, 21), 11) if alpha_mask is not None: if alpha_mask.ndim == 3 and alpha_mask.shape[-1] == 3: alpha = alpha_mask / 255.0 else: alpha = cv2.cvtColor(alpha_mask, cv2.COLOR_GRAY2BGR) / 255.0 blurred_img = cv2.convertScaleAbs(blurred_img * (1 - alpha) + img * alpha) return blurred_img def positive_sharpen(i, overblur=False, coeff=8.): # emphasize the edges blurred = cv2.blur(i,(5,5)) sharpened = i + (i - blurred) * coeff if overblur: return cv2.blur(np.maximum(sharpened,i),(11,11)) return cv2.blur(np.maximum(sharpened,i),(3,3)) def sample_color(img, x, y, neighbor_size): # sample color from image => converges faster. color = img[max(0, y - neighbor_size):y + neighbor_size, max(0, x - neighbor_size):x + neighbor_size].mean(axis=(0,1)) return color def get_phase_and_magnitude(img, sobel_kernel_size=7, magnitude_power=0.3): """ Calculate phase/rotation angle from image gradient :param img: image to compute phase from :param sobel_kernel_size: :return: phase in float32 radian """ # grayify img_gray = img.astype('float32') #cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype('float32') # gradient (along x and y axis) xg = cv2.Sobel(img_gray, cv2.CV_32F, 1, 0, ksize=sobel_kernel_size) yg = - cv2.Sobel(img_gray, cv2.CV_32F, 0, 1, ksize=sobel_kernel_size) # calculates the rotation angle of the 2D vectors gradients phase = cv2.phase(xg, yg) # calculates the magnitude of the 2D vectors gradients magnitude = cv2.magnitude(xg, yg) magnitude = magnitude / magnitude.max() # normalize to [0, 1] range # make magnitude more uniform magnitude = np.power(magnitude, magnitude_power) return phase, magnitude def get_point_angle_and_magnitude(x, y, phase_map, magnitude_map, phase_neighbor_size: int, magnitude_neighbor_size: int): # get gradient orientation info from phase map (phase should be between [0,2pi)) # compute an average phase around the point, for an area proportional to brush size phase = phase_map[max(0, y - phase_neighbor_size):y + phase_neighbor_size, max(0, x - phase_neighbor_size):x + phase_neighbor_size].mean() # choose direction perpendicular to gradient angle = (((phase / math.pi) * 180) + 90) % 360 magnitude = magnitude_map[max(0, y - magnitude_neighbor_size):y + magnitude_neighbor_size, max(0, x - magnitude_neighbor_size):x + magnitude_neighbor_size].mean() return angle, magnitude def get_edges(img, img_blur_size=5, min_hyst_val=100, max_hyst_val=200, edges_blur_size=5): """ Detect image edges :param img: :param img_blur_size: kernel for gaussian-blur on image :param min_hyst_val: hysteresis min threshold (canny edge detection) :param max_hyst_val: hysteresis max threshold (canny edge detection) :param edges_blur_size: blur size applied to edge results :return: norm blurred edges and uint8 original edges images """ # remove noise to improve edge detection results blurred_img = cv2.GaussianBlur(img, (img_blur_size, img_blur_size), 0) # canny edge detection edges = cv2.Canny((blurred_img * 255).astype('uint8'), min_hyst_val, max_hyst_val) # blur edges blurred_edges = cv2.blur(edges, (edges_blur_size, edges_blur_size)).astype('float32') / 255 norm_edges = blurred_edges / blurred_edges.sum() # normalize to probabilities return norm_edges, edges def get_distance_map(src_img): """ Get distance values for given image. Distance is the closest zero pixel for each pixel of the source image. :param src_img: grayscale image :return: distance map """ # use simple euclidean distance and a 3×3 mask for a fast, coarse distance estimation dist = cv2.distanceTransform(255 - src_img, cv2.DIST_L2, 3) # normalize distance image between 0.0 and 1.0 cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX) return dist def get_min_radius_to_edge(dist_img, start_pos, end_pos, dist_threshold=0.01): # get lines coordinates line = np.transpose(np.array(draw.line(start_pos[0], start_pos[1], end_pos[0], end_pos[1]))) line = np.array([[x, y] for [x, y] in line if (0 <= x < dist_img.shape[1] and 0<= y < dist_img.shape[0])]) # get dist values overlapping the line data = dist_img[line[:, 1], line[:, 0]] # find first index below threshold radius = np.argmax(data < dist_threshold) return radius def add_border_to_img(img, border_size: int): border_img = cv2.copyMakeBorder(img, border_size, border_size, border_size, border_size, cv2.BORDER_CONSTANT, value=[255] * 3) border_img = border_img.astype('float32') / 255 # convert to float32 img = cv2.copyMakeBorder(img, border_size // 2, border_size // 2, border_size // 2, border_size // 2, cv2.BORDER_CONSTANT, value=[255] * 3) img = cv2.resize(img, border_img.shape[:2][::-1]) img = img.astype('float32') / 255 # convert to float32 return img, border_img def combine_salience_images(input_dir: Path, salience_paths: List[Path], weights: List[float], output_dir: Path): main_imgs = list(input_dir.glob('*.png')) + list(input_dir.glob('*.jpg')) output_dir.mkdir(exist_ok=True, parents=True) # for each image in out target input folder, compute composed salience image for img_path in main_imgs: main_img = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE).astype('float32') / 255 composed_salience_img = np.zeros(main_img.shape, dtype='float32') salience_img_name = img_path.stem for i, salience_path in enumerate(salience_paths): salience_img_ext = None for ext in ['.jpg', '.png']: if (salience_path / (salience_img_name + ext)).exists(): salience_img_ext = ext break if salience_img_ext is None: continue salience_img_path = str(salience_path / (salience_img_name + ext)) salience_img = cv2.imread(str(salience_img_path), cv2.IMREAD_GRAYSCALE) salience_img = salience_img.astype('float32') / 255 # convert to float32 salience_img = salience_img.clip(0.) composed_salience_img = composed_salience_img + (weights[i] * salience_img) composed_salience_img = composed_salience_img / composed_salience_img.max() cv2.imwrite(str(output_dir / f'{salience_img_name}.png'), composed_salience_img * 255) def main(_=None): logging.getLogger().setLevel(logging.INFO) parser = argparse.ArgumentParser(description='Image Utils') parser.add_argument('-i', '--input-path', required=True) parser.add_argument('-o', '--output-path', required=True) parser.add_argument('-s', '--salience-paths', type=str, nargs='+',) args = parser.parse_args() input_path = Path(args.input_path) output_path = Path(args.output_path) salience_paths = [Path(p) for p in args.salience_paths] weights = [1. for p in salience_paths] combine_salience_images(input_path, salience_paths=salience_paths, weights=weights, output_dir=output_path) if __name__ == "__main__": main(sys.argv[1:])
5agado/data-science-learning
graphics/learn_to_paint/image_utils.py
Python
apache-2.0
8,095
[ "Gaussian" ]
6c869365219d16dfe27852478b450e7e0f6847fd813bb07b779cb7f320ef0c47
''' ModelGenie (c) University of Manchester 2015 ModelGenie is licensed under the MIT License. To view a copy of this license, visit <http://opensource.org/licenses/MIT/>. @author: neilswainston ''' import re def parse_equation(equation, separator): '''Parses a chemical equation, returning the participants as a list of name, stoichiometry tuples. Negative stoichiometries define reactants, positive products.''' equation_terms = [re.split('\\s*\\+\\s*', equation_side) for equation_side in re.split('\\s*' + separator + '\\s*', equation)] participants = [] # Add reactants: __add_reaction_participants(equation_terms[0], -1, participants) # Add products: __add_reaction_participants(equation_terms[1], 1, participants) return participants def __add_reaction_participants(equation_term, stoich_factor, participants): '''Adds participants to reaction.''' for participant in equation_term: terms = participant.split() participants.append((participant, stoich_factor) if len(terms) == 1 else (terms[1], stoich_factor * float(terms[0])))
neilswainston/development-py
synbiochemdev/modelgenie/chem_utils.py
Python
mit
1,183
[ "VisIt" ]
c23d306ee6418460b221d07b2f054a11bef21e9809bb7cdbe497322a7adbcc12
from scipy.optimize import minimize from scipy.optimize import basinhopping import matplotlib.pyplot as plt from sklearn.gaussian_process.kernels import WhiteKernel, DotProduct from sklearn.metrics import mean_absolute_error from sklearn.gaussian_process import GaussianProcessRegressor from sklearn import preprocessing import numpy as np def optimizer(obj_func, initial_theta, bounds, gradient=True, minimizer='L-BFGS-B', hopping=0, **kwargs): """Substitute optimizer in scikit-learn Gaussian Process function. Note 'L-BFGS-B' is equivalent to the standard optimizer used in scikit-learn. This function allows for more direct control over the arguments. https://docs.scipy.org/doc/scipy/reference/optimize.html Parameters ---------- obj_func : function scikit-learn objective function. initial_theta : array (n,) Hyperparameters to be optimized against. bounds : list of tuples (n, 2) Lower and upper bounds for each hyper parameter. gradient : bool Include the gradient for the optimization function. minimizer : str A scipy minimization method. hopping : int Perform a number of basin hopping steps. Returns ------- theta_opt : list (n,) Optimized hyperparameters. func_min : float Value of the minimized objective function. """ margs = { 'method': minimizer, 'args': (gradient, ), 'jac': gradient, 'bounds': bounds, } if hopping: m = basinhopping( obj_func, initial_theta, minimizer_kwargs=margs, niter=hopping, T=1e2, stepsize=2, ) else: m = minimize(obj_func, initial_theta, **margs) theta_opt = m.x func_min = m.fun return theta_opt, func_min def online_learning(X, y, samples, factors=[1.0, 1.0], nsteps=40, plot=False): """A simple utility for performing online learning. The main components required are a regression method and a scoring technique. Currently, the scoring methodology and regressor are baked in. These need to be made modular. Minimum 3 samples are required for 3 fold cross validation. """ ids = np.arange(len(y)) kernel = DotProduct() + WhiteKernel() regressor = GaussianProcessRegressor( kernel=kernel, n_restarts_optimizer=5, alpha=0) step = 0 while step < nsteps: X0 = X[samples] y0 = y[samples] regressor.fit(X0, y0) yp, ys = regressor.predict(X, return_std=True) # Provides some form of normalization. # Multiples denote relative importance yp_scale = preprocessing.scale(yp) * factors[0] ys_scale = preprocessing.scale(ys) * factors[1] score = ys_scale - yp_scale srt = np.argsort(score)[::-1] for s in srt: if s not in samples: samples = np.concatenate([samples, [s]]) break if plot: mae = np.round(mean_absolute_error(yp, y), 3) n = len(samples) fig, ax = plt.subplots(figsize=(6, 4)) ax.plot(ids, y, 'o', zorder=0) ax.errorbar(ids, yp, yerr=ys, fmt='o', zorder=1) ax.plot(samples, y[samples], 'o', zorder=3) xlim = ax.get_xlim() ylim = ax.get_ylim() ax.text(xlim[0] / 9.0, ylim[0] / 9.0, mae) plt.tight_layout() plt.savefig('./online-learning-RBF-{}.png'.format(n)) plt.close() step += 1 return samples
jboes/CatKit
catkit/learn.py
Python
gpl-3.0
3,685
[ "Gaussian" ]
f7dc6486c442011f41273053bd028c3adc2582e68fd6abee8a252a9013921127
""" Test basic performance report functionality NOTE: To run these tests, browsermob-proxy-2.0-beta-9 must be installed These aren't real unittests, just some sample scenarios. """ from bok_choy.web_app_test import WebAppTest, with_cache from bok_choy.performance import MethodNotEnabledInCurrentMode from .pages import ButtonPage, TextFieldPage from nose.plugins.attrib import attr from unittest import expectedFailure import json import os # Set the default har capture method to 'error' os.environ['BOK_CHOY_HAR_MODE'] = 'error' os.environ['BROWSERMOB_PROXY_PORT'] = '8000' def har_files(): return os.listdir(os.environ.get('BOK_CHOY_HAR_DIR', '')) class HarCaptureTestBase(WebAppTest): """ CaptureHarOnErrorTest """ def visit_pages(self): page = ButtonPage(self.browser) page.visit() page2 = TextFieldPage(self.browser) page2.visit() def setUp(self): # Adding as extra 'Cleanup', because we have to wait for other # cleanup to happen before checking the har folder. Since cleanup # is LIFO, add the inspecting function first to ensure it is # executed last. self.should_capture = bool() self.addCleanup(self._inspect_har_files) self.inspect_har_content = False self.addCleanup(self._inspect_har_content) super(HarCaptureTestBase, self).setUp() def _inspect_har_files(self): # A list of booleans, each item representing if the file is a match. matched = [filename for filename in har_files() if self.id() in filename] self.assertEqual(self.should_capture, len(matched)) def _inspect_har_content(self): # Additional check for this one to make sure that data is actually captured if self.inspect_har_content: har_file = None for filename in har_files(): if self.id() in filename: har_file = filename break with open(os.path.join(os.environ.get('BOK_CHOY_HAR_DIR', ''), har_file)) as f: har_contents = json.load(f) self.assertTrue('status' in har_contents['log']['entries'][0]['response'].keys()) @attr(har_mode='explicit') class ExplicitHarCaptureTest(HarCaptureTestBase): """ How the har_mode is set: using the nose @attr decorator. This should override any environment setting. Note that this will only work if the `TestClass` is decorated, not the `test_case`. """ @expectedFailure def test_har_is_not_captured_in_explicit_mode(self): self.should_capture = 0 self.visit_pages() self.assertTrue(False) def test_capture_har_explicitly(self): self.should_capture = 1 self.har_capturer.add_page(self.browser, 'ButtonPage') page = ButtonPage(self.browser) page.visit() self.har_capturer.add_page(self.browser, 'TextFieldPage') page2 = TextFieldPage(self.browser) page2.visit() page2.enter_text('testing') self.har_capturer.save_har(self.browser) @with_cache def test_capture_har_explicitly_with_cache(self): self.should_capture = 4 self.har_capturer.add_page(self.browser, 'ButtonPage') page = ButtonPage(self.browser) page.visit() self.har_capturer.save_har(self.browser, self.id()+'_1') self.har_capturer.add_page(self.browser, 'TextFieldPage') page2 = TextFieldPage(self.browser) page2.visit() page2.enter_text('testing') self.har_capturer.save_har(self.browser, self.id()+'_2') @attr(har_mode='auto') class AutoHarCaptureTest(HarCaptureTestBase): """ How the har_mode is set: using the nose @attr decorator. This should override any environment setting. Note that this will only work if the `TestClass` is decorated, not the `test_case`. """ def test_har_is_captured_on_success_in_auto_mode(self): self.should_capture = 1 self.inspect_har_content = True self.visit_pages() self.assertTrue(True) @expectedFailure def test_har_is_captured_on_failure_in_auto_mode(self): self.should_capture = 1 self.visit_pages() self.assertTrue(False) @with_cache def test_two_hars_captured_on_success_in_auto_mode_with_cache(self): self.should_capture = 2 self.visit_pages() self.assertTrue(True) class ErrorHarCaptureTest(HarCaptureTestBase): """ How the har_mode is set: using environment var `BOK_CHOY_HAR_MODE`. This can be overridden for an individual test class using the @attr decorator from the nose.plugin.attrib module. """ @expectedFailure def test_har_is_captured_on_error_in_error_mode(self): self.should_capture = 1 self.visit_pages() raise Exception('Raising generic exception so that this test will error.') @expectedFailure def test_har_is_captured_on_failure_in_error_mode(self): self.should_capture = 1 self.visit_pages() self.assertTrue(False) def test_har_is_not_captured_on_success_in_error_mode(self): self.should_capture = 0 self.visit_pages() self.assertTrue(True) def test_explicit_har_capture_doesnt_work_in_error_mode(self): self.should_capture = 0 # Try to save one when we shouldn't be able to. with self.assertRaises(MethodNotEnabledInCurrentMode): self.har_capturer.add_page(self.browser, 'ButtonPage') # Try to save one when we shouldn't be able to. with self.assertRaises(MethodNotEnabledInCurrentMode): self.har_capturer.save_har(self.browser)
drptbl/bok-choy
tests/test_performance.py
Python
apache-2.0
5,710
[ "VisIt" ]
756c12bfa7c041aa0043d0b10b2217aeed6c7ad9f4595573554036ac1301f7a3
import numpy as np import matplotlib.pyplot as plt import dolfin_navier_scipy.stokes_navier_utils as snu import dolfin_navier_scipy.problem_setups as dnsps def conv_plot(abscissa, datalist, fit=None, markerl=None, xlabel=None, ylabel=None, fititem=0, fitfac=1., leglist=None, legloc=1, title='title not provided', fignum=None, ylims=None, xlims=None, yticks=None, logscale=False, logbase=10, tikzfile=None, showplot=True): """Universal function for convergence plots Parameters ---------- fititem : integer, optional to which item of the data the fit is aligned, defaults to `0` fitfac : float, optional to shift the fitting lines in y-direction, defaults to `1.0` """ try: import seaborn as sns sns.set(style="whitegrid") # mpilightgreen = '#BFDFDE' # mpigraygreen = '#7DA9A8' # sns.set_palette(sns.dark_palette(mpigraygreen, 4, reverse=True)) # sns.set_palette(sns.dark_palette(mpilightgreen, 6, reverse=True)) # sns.set_palette('cool', 3) sns.set_palette('ocean_r', 7) except ImportError: print('I recommend to install seaborn for nicer plots') lend = len(datalist) if markerl is None: markerl = ['']*lend if leglist is None: leglist = [None]*lend plt.figure(fignum) ax = plt.axes() for k, data in enumerate(datalist): plt.plot(abscissa, data, markerl[k], label=leglist[k]) if fit is not None: fls = [':', ':'] for i, cfit in enumerate(fit): abspow = [] for ela in abscissa: try: abspow.append((ela/abscissa[0])**(-cfit) * datalist[0][fititem]*fitfac) except TypeError: abspow.append((ela/abscissa[0])**(-cfit) * datalist[0][0][fititem]*fitfac) ax.plot(abscissa, abspow, 'k'+fls[i]) if logscale: ax.set_xscale('log', base=logbase) ax.set_yscale('log', base=logbase) if ylims is not None: plt.ylim(ylims) if xlims is not None: plt.xlim(xlims) if yticks is not None: plt.yticks(yticks) if title is not None: ax.set_title(title) plt.legend(loc=legloc) plt.grid(which='major') # _gohome(tikzfile, showplot) plt.show() return def cnvchk(meshprfx='mesh/karman2D-outlets', meshlevel=1, proutdir='results/', problem='drivencavity', N=None, nu=1e-2, Re=None, time_int_scheme='cnab', t0=0.0, tE=1.0, Nts=1e2+1, scheme='TH', dblng=2): meshfile = meshprfx + '_lvl{0}.xml.gz'.format(meshlevel) physregs = meshprfx + '_lvl{0}_facet_region.xml.gz'.format(meshlevel) geodata = meshprfx + '_geo_cntrlbc.json' femp, stokesmatsc, rhsd = \ dnsps.get_sysmats(problem='gen_bccont', Re=Re, bccontrol=False, scheme=scheme, mergerhs=True, meshparams=dict(strtomeshfile=meshfile, strtophysicalregions=physregs, strtobcsobs=geodata)) # setting some parameters if Re is not None: nu = femp['charlen']/Re soldict = stokesmatsc # containing A, J, JT soldict.update(femp) # adding V, Q, invinds, diribcs soldict.update(fv=rhsd['fv'], fp=rhsd['fp'], N=N, nu=nu, return_final_vp=True, start_ssstokes=True, get_datastring=None, verbose=True, treat_nonl_explicit=True, time_int_scheme=time_int_scheme, dbcinds=femp['dbcinds'], dbcvals=femp['dbcvals']) mmat = stokesmatsc['M'] cnts = Nts*2**dblng tips = dict(t0=t0, tE=tE, Nts=cnts) soldict.update(tips) # adding time integration params print('*** computing the ref solution ***') (vfref, pfref) = snu.solve_nse(**soldict) print('*** done with the ref solution ***') soldict.update(dict(verbose=False)) errlst, ntslst = [], [] for k in range(dblng): cnts = Nts*2**k tips = dict(t0=t0, tE=tE, Nts=cnts) soldict.update(tips) # adding time integration params (vf, pf) = snu.solve_nse(**soldict) difv = vf - vfref cnv = np.sqrt(difv.T @ mmat @ difv).flatten()[0] errlst.append(cnv) ntslst.append(cnts) print('Nts: {0} -- |v-vref|: {1:e}'.format(cnts, cnv)) conv_plot(ntslst, [errlst], logscale=True, fit=[2], markerl=['o'], leglist=[time_int_scheme], title='Check for 2nd order convergence') if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("--meshprefix", type=str, help="prefix for the mesh files", default='mesh/karman2D-outlets') parser.add_argument("--meshlevel", type=int, help="mesh level", default=1) parser.add_argument("--Re", type=int, help="Reynoldsnumber", default=100) parser.add_argument("--tE", type=float, help="final time of the simulation", default=.1) parser.add_argument("--Nts", type=float, help="number of time steps", default=100) parser.add_argument("--scaletest", type=float, help="scale the test size", default=1.) parser.add_argument("--paraviewframes", type=int, help="number of outputs for paraview", default=200) parser.add_argument("--doublings", type=int, help="how often we double the time steps", default=4) parser.add_argument("--tis", type=str, choices=['cnab', 'sbdf2'], help="scheme for time integration", default='sbdf2') args = parser.parse_args() print(args) scheme = 'TH' cnvchk(problem='gen_bccont', Re=args.Re, meshprfx=args.meshprefix, meshlevel=args.meshlevel, t0=0., tE=args.scaletest*args.tE, Nts=np.int(args.scaletest*args.Nts), dblng=args.doublings, scheme=scheme, time_int_scheme=args.tis)
highlando/dolfin_navier_scipy
tests/tdp_convcheck.py
Python
gpl-3.0
6,349
[ "ParaView" ]
9130bafc3019d89e9525e09d75c571a5af3e5ac709809f5ab3eb33dd44ba7f8d
# # Copyright (c) 2007, Novartis Institutes for BioMedical Research Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Novartis Institutes for BioMedical Research Inc. # nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """ Implementation of the RECAP algorithm from Lewell et al. JCICS *38* 511-522 (1998) The published algorithm is implemented more or less without modification. The results are returned as a hierarchy of nodes instead of just as a set of fragments. The hope is that this will allow a bit more flexibility in working with the results. For example: >>> from rdkit import Chem >>> from rdkit.Chem import Recap >>> m = Chem.MolFromSmiles('C1CC1Oc1ccccc1-c1ncc(OC)cc1') >>> res = Recap.RecapDecompose(m) >>> res <...Chem.Recap.RecapHierarchyNode object at ...> >>> sorted(res.children.keys()) ['*C1CC1', '*c1ccc(OC)cn1', '*c1ccccc1-c1ccc(OC)cn1', '*c1ccccc1OC1CC1'] >>> sorted(res.GetAllChildren().keys()) ['*C1CC1', '*c1ccc(OC)cn1', '*c1ccccc1*', '*c1ccccc1-c1ccc(OC)cn1', '*c1ccccc1OC1CC1'] To get the standard set of RECAP results, use GetLeaves(): >>> leaves=res.GetLeaves() >>> sorted(leaves.keys()) ['*C1CC1', '*c1ccc(OC)cn1', '*c1ccccc1*'] >>> leaf = leaves['*C1CC1'] >>> leaf.mol <...Chem.rdchem.Mol object at ...> """ import sys import weakref from rdkit import Chem from rdkit.Chem import rdChemReactions as Reactions # These are the definitions that will be applied to fragment molecules: reactionDefs = ( "[#7;+0;D2,D3:1]!@C(!@=O)!@[#7;+0;D2,D3:2]>>*[#7:1].[#7:2]*", # urea "[C;!$(C([#7])[#7]):1](=!@[O:2])!@[#7;+0;!D1:3]>>*[C:1]=[O:2].*[#7:3]", # amide "[C:1](=!@[O:2])!@[O;+0:3]>>*[C:1]=[O:2].[O:3]*", # ester "[N;!D1;+0;!$(N-C=[#7,#8,#15,#16])](-!@[*:1])-!@[*:2]>>*[*:1].[*:2]*", # amines # "[N;!D1](!@[*:1])!@[*:2]>>*[*:1].[*:2]*", # amines # again: what about aromatics? "[#7;R;D3;+0:1]-!@[*:2]>>*[#7:1].[*:2]*", # cyclic amines "[#6:1]-!@[O;+0]-!@[#6:2]>>[#6:1]*.*[#6:2]", # ether "[C:1]=!@[C:2]>>[C:1]*.*[C:2]", # olefin "[n;+0:1]-!@[C:2]>>[n:1]*.[C:2]*", # aromatic nitrogen - aliphatic carbon "[O:3]=[C:4]-@[N;+0:1]-!@[C:2]>>[O:3]=[C:4]-[N:1]*.[C:2]*", # lactam nitrogen - aliphatic carbon "[c:1]-!@[c:2]>>[c:1]*.*[c:2]", # aromatic carbon - aromatic carbon # aromatic nitrogen - aromatic carbon *NOTE* this is not part of the standard recap set. "[n;+0:1]-!@[c:2]>>[n:1]*.*[c:2]", "[#7;+0;D2,D3:1]-!@[S:2](=[O:3])=[O:4]>>[#7:1]*.*[S:2](=[O:3])=[O:4]", # sulphonamide ) reactions = tuple([Reactions.ReactionFromSmarts(x) for x in reactionDefs]) class RecapHierarchyNode(object): """ This class is used to hold the Recap hiearchy """ mol = None children = None parents = None smiles = None def __init__(self, mol): self.mol = mol self.children = {} self.parents = {} def GetAllChildren(self): " returns a dictionary, keyed by SMILES, of children " res = {} for smi, child in self.children.items(): res[smi] = child child._gacRecurse(res, terminalOnly=False) return res def GetLeaves(self): " returns a dictionary, keyed by SMILES, of leaf (terminal) nodes " res = {} for smi, child in self.children.items(): if not len(child.children): res[smi] = child else: child._gacRecurse(res, terminalOnly=True) return res def getUltimateParents(self): """ returns all the nodes in the hierarchy tree that contain this node as a child """ if not self.parents: res = [self] else: res = [] for p in self.parents.values(): for uP in p.getUltimateParents(): if uP not in res: res.append(uP) return res def _gacRecurse(self, res, terminalOnly=False): for smi, child in self.children.items(): if not terminalOnly or not len(child.children): res[smi] = child child._gacRecurse(res, terminalOnly=terminalOnly) def __del__(self): self.children = {} self.parents = {} self.mol = None def RecapDecompose(mol, allNodes=None, minFragmentSize=0, onlyUseReactions=None): """ returns the recap decomposition for a molecule """ mSmi = Chem.MolToSmiles(mol, 1) if allNodes is None: allNodes = {} if mSmi in allNodes: return allNodes[mSmi] res = RecapHierarchyNode(mol) res.smiles = mSmi activePool = {mSmi: res} allNodes[mSmi] = res while activePool: nSmi = next(iter(activePool)) node = activePool.pop(nSmi) if not node.mol: continue for rxnIdx, reaction in enumerate(reactions): if onlyUseReactions and rxnIdx not in onlyUseReactions: continue # print ' .',nSmi # print ' !!!!',rxnIdx,nSmi,reactionDefs[rxnIdx] ps = reaction.RunReactants((node.mol, )) # print ' ',len(ps) if ps: for prodSeq in ps: seqOk = True # we want to disqualify small fragments, so sort the product sequence by size # and then look for "forbidden" fragments tSeq = [(prod.GetNumAtoms(onlyExplicit=True), idx) for idx, prod in enumerate(prodSeq)] tSeq.sort() ts = [(x, prodSeq[y]) for x, y in tSeq] prodSeq = ts for nats, prod in prodSeq: try: Chem.SanitizeMol(prod) except Exception: continue pSmi = Chem.MolToSmiles(prod, 1) if minFragmentSize > 0: nDummies = pSmi.count('*') if nats - nDummies < minFragmentSize: seqOk = False break # don't forget after replacing dummy atoms to remove any empty # branches: elif pSmi.replace('*', '').replace('()', '') in ('', 'C', 'CC', 'CCC'): seqOk = False break prod.pSmi = pSmi if seqOk: for nats, prod in prodSeq: pSmi = prod.pSmi # print '\t',nats,pSmi if pSmi not in allNodes: pNode = RecapHierarchyNode(prod) pNode.smiles = pSmi pNode.parents[nSmi] = weakref.proxy(node) node.children[pSmi] = pNode activePool[pSmi] = pNode allNodes[pSmi] = pNode else: pNode = allNodes[pSmi] pNode.parents[nSmi] = weakref.proxy(node) node.children[pSmi] = pNode # print ' >>an:',allNodes.keys() return res # ------- ------- ------- ------- ------- ------- ------- ------- # Begin testing code if __name__ == '__main__': import unittest class TestCase(unittest.TestCase): def test1(self): m = Chem.MolFromSmiles('C1CC1Oc1ccccc1-c1ncc(OC)cc1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.children.keys()) == 4) self.assertTrue(len(res.GetAllChildren().keys()) == 5) self.assertTrue(len(res.GetLeaves().keys()) == 3) def test2(self): m = Chem.MolFromSmiles('CCCOCCC') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(res.children == {}) def test3(self): allNodes = {} m = Chem.MolFromSmiles('c1ccccc1-c1ncccc1') res = RecapDecompose(m, allNodes=allNodes) self.assertTrue(res) self.assertTrue(len(res.children.keys()) == 2) self.assertTrue(len(allNodes.keys()) == 3) m = Chem.MolFromSmiles('COc1ccccc1-c1ncccc1') res = RecapDecompose(m, allNodes=allNodes) self.assertTrue(res) self.assertTrue(len(res.children.keys()) == 2) # we get two more nodes from that: self.assertTrue(len(allNodes.keys()) == 5) self.assertTrue('*c1ccccc1OC' in allNodes) self.assertTrue('*c1ccccc1' in allNodes) m = Chem.MolFromSmiles('C1CC1Oc1ccccc1-c1ncccc1') res = RecapDecompose(m, allNodes=allNodes) self.assertTrue(res) self.assertTrue(len(res.children.keys()) == 4) self.assertTrue(len(allNodes.keys()) == 10) def testSFNetIssue1801871(self): m = Chem.MolFromSmiles('c1ccccc1OC(Oc1ccccc1)Oc1ccccc1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertFalse('*C(*)*' in ks) self.assertTrue('*c1ccccc1' in ks) self.assertTrue('*C(*)Oc1ccccc1' in ks) def testSFNetIssue1804418(self): m = Chem.MolFromSmiles('C1CCCCN1CCCC') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*N1CCCCC1' in ks) self.assertTrue('*CCCC' in ks) def testMinFragmentSize(self): m = Chem.MolFromSmiles('CCCOCCC') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(res.children == {}) res = RecapDecompose(m, minFragmentSize=3) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 1) ks = res.GetLeaves().keys() self.assertTrue('*CCC' in ks) m = Chem.MolFromSmiles('CCCOCC') res = RecapDecompose(m, minFragmentSize=3) self.assertTrue(res) self.assertTrue(res.children == {}) m = Chem.MolFromSmiles('CCCOCCOC') res = RecapDecompose(m, minFragmentSize=2) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*CCC' in ks) ks = res.GetLeaves().keys() self.assertTrue('*CCOC' in ks) def testAmideRxn(self): m = Chem.MolFromSmiles('C1CC1C(=O)NC1OC1') res = RecapDecompose(m, onlyUseReactions=[1]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*C(=O)C1CC1' in ks) self.assertTrue('*NC1CO1' in ks) m = Chem.MolFromSmiles('C1CC1C(=O)N(C)C1OC1') res = RecapDecompose(m, onlyUseReactions=[1]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*C(=O)C1CC1' in ks) self.assertTrue('*N(C)C1CO1' in ks) m = Chem.MolFromSmiles('C1CC1C(=O)n1cccc1') res = RecapDecompose(m, onlyUseReactions=[1]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*C(=O)C1CC1' in ks) self.assertTrue('*n1cccc1' in ks) m = Chem.MolFromSmiles('C1CC1C(=O)CC1OC1') res = RecapDecompose(m, onlyUseReactions=[1]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) m = Chem.MolFromSmiles('C1CCC(=O)NC1') res = RecapDecompose(m, onlyUseReactions=[1]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) m = Chem.MolFromSmiles('CC(=O)NC') res = RecapDecompose(m, onlyUseReactions=[1]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() m = Chem.MolFromSmiles('CC(=O)N') res = RecapDecompose(m, onlyUseReactions=[1]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) m = Chem.MolFromSmiles('C(=O)NCCNC(=O)CC') res = RecapDecompose(m, onlyUseReactions=[1]) self.assertTrue(res) self.assertTrue(len(res.children) == 4) self.assertTrue(len(res.GetLeaves()) == 3) def testEsterRxn(self): m = Chem.MolFromSmiles('C1CC1C(=O)OC1OC1') res = RecapDecompose(m, onlyUseReactions=[2]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*C(=O)C1CC1' in ks) self.assertTrue('*OC1CO1' in ks) m = Chem.MolFromSmiles('C1CC1C(=O)CC1OC1') res = RecapDecompose(m, onlyUseReactions=[2]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) m = Chem.MolFromSmiles('C1CCC(=O)OC1') res = RecapDecompose(m, onlyUseReactions=[2]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) def testUreaRxn(self): m = Chem.MolFromSmiles('C1CC1NC(=O)NC1OC1') res = RecapDecompose(m, onlyUseReactions=[0]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*NC1CC1' in ks) self.assertTrue('*NC1CO1' in ks) m = Chem.MolFromSmiles('C1CC1NC(=O)N(C)C1OC1') res = RecapDecompose(m, onlyUseReactions=[0]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*NC1CC1' in ks) self.assertTrue('*N(C)C1CO1' in ks) m = Chem.MolFromSmiles('C1CCNC(=O)NC1C') res = RecapDecompose(m, onlyUseReactions=[0]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) m = Chem.MolFromSmiles('c1cccn1C(=O)NC1OC1') res = RecapDecompose(m, onlyUseReactions=[0]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*n1cccc1' in ks) self.assertTrue('*NC1CO1' in ks) m = Chem.MolFromSmiles('c1cccn1C(=O)n1c(C)ccc1') res = RecapDecompose(m, onlyUseReactions=[0]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*n1cccc1C' in ks) def testAmineRxn(self): m = Chem.MolFromSmiles('C1CC1N(C1NC1)C1OC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 3) ks = res.GetLeaves().keys() self.assertTrue('*C1CC1' in ks) self.assertTrue('*C1CO1' in ks) self.assertTrue('*C1CN1' in ks) m = Chem.MolFromSmiles('c1ccccc1N(C1NC1)C1OC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 3) ks = res.GetLeaves().keys() self.assertTrue('*c1ccccc1' in ks) self.assertTrue('*C1CO1' in ks) self.assertTrue('*C1CN1' in ks) m = Chem.MolFromSmiles('c1ccccc1N(c1ncccc1)C1OC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 3) ks = res.GetLeaves().keys() self.assertTrue('*c1ccccc1' in ks) self.assertTrue('*c1ccccn1' in ks) self.assertTrue('*C1CO1' in ks) m = Chem.MolFromSmiles('c1ccccc1N(c1ncccc1)c1ccco1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 3) ks = res.GetLeaves().keys() self.assertTrue('*c1ccccc1' in ks) self.assertTrue('*c1ccccn1' in ks) self.assertTrue('*c1ccco1' in ks) m = Chem.MolFromSmiles('C1CCCCN1C1CC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*N1CCCCC1' in ks) self.assertTrue('*C1CC1' in ks) m = Chem.MolFromSmiles('C1CCC2N1CC2') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) def testEtherRxn(self): m = Chem.MolFromSmiles('C1CC1OC1OC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*C1CC1' in ks) self.assertTrue('*C1CO1' in ks) m = Chem.MolFromSmiles('C1CCCCO1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) m = Chem.MolFromSmiles('c1ccccc1OC1OC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*c1ccccc1' in ks) self.assertTrue('*C1CO1' in ks) m = Chem.MolFromSmiles('c1ccccc1Oc1ncccc1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*c1ccccc1' in ks) self.assertTrue('*c1ccccn1' in ks) def testOlefinRxn(self): m = Chem.MolFromSmiles('ClC=CBr') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*CCl' in ks) self.assertTrue('*CBr' in ks) m = Chem.MolFromSmiles('C1CC=CC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) def testAromNAliphCRxn(self): m = Chem.MolFromSmiles('c1cccn1CCCC') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*n1cccc1' in ks) self.assertTrue('*CCCC' in ks) m = Chem.MolFromSmiles('c1ccc2n1CCCC2') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) def testLactamNAliphCRxn(self): m = Chem.MolFromSmiles('C1CC(=O)N1CCCC') res = RecapDecompose(m, onlyUseReactions=[8]) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*N1CCC1=O' in ks) self.assertTrue('*CCCC' in ks) m = Chem.MolFromSmiles('O=C1CC2N1CCCC2') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) def testAromCAromCRxn(self): m = Chem.MolFromSmiles('c1ccccc1c1ncccc1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*c1ccccc1' in ks) self.assertTrue('*c1ccccn1' in ks) m = Chem.MolFromSmiles('c1ccccc1C1CC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) def testAromNAromCRxn(self): m = Chem.MolFromSmiles('c1cccn1c1ccccc1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*n1cccc1' in ks) self.assertTrue('*c1ccccc1' in ks) def testSulfonamideRxn(self): m = Chem.MolFromSmiles('CCCNS(=O)(=O)CC') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*NCCC' in ks) self.assertTrue('*S(=O)(=O)CC' in ks) m = Chem.MolFromSmiles('c1cccn1S(=O)(=O)CC') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) ks = res.GetLeaves().keys() self.assertTrue('*n1cccc1' in ks) self.assertTrue('*S(=O)(=O)CC' in ks) m = Chem.MolFromSmiles('C1CNS(=O)(=O)CC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) def testSFNetIssue1881803(self): m = Chem.MolFromSmiles('c1ccccc1n1cccc1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) m = Chem.MolFromSmiles('c1ccccc1[n+]1ccccc1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) m = Chem.MolFromSmiles('C1CC1NC(=O)CC') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) m = Chem.MolFromSmiles('C1CC1[NH+]C(=O)CC') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) m = Chem.MolFromSmiles('C1CC1NC(=O)NC1CCC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 2) m = Chem.MolFromSmiles('C1CC1[NH+]C(=O)[NH+]C1CCC1') res = RecapDecompose(m) self.assertTrue(res) self.assertTrue(len(res.GetLeaves()) == 0) unittest.main()
rdkit/rdkit
rdkit/Chem/Recap.py
Python
bsd-3-clause
24,160
[ "RDKit" ]
3a9c03e6c2c00efe556c937d04f439ab78f56146fdd95a53de41b7df546c385d
import typing as t from . import nodes from .visitor import NodeVisitor VAR_LOAD_PARAMETER = "param" VAR_LOAD_RESOLVE = "resolve" VAR_LOAD_ALIAS = "alias" VAR_LOAD_UNDEFINED = "undefined" def find_symbols( nodes: t.Iterable[nodes.Node], parent_symbols: t.Optional["Symbols"] = None ) -> "Symbols": sym = Symbols(parent=parent_symbols) visitor = FrameSymbolVisitor(sym) for node in nodes: visitor.visit(node) return sym def symbols_for_node( node: nodes.Node, parent_symbols: t.Optional["Symbols"] = None ) -> "Symbols": sym = Symbols(parent=parent_symbols) sym.analyze_node(node) return sym class Symbols: def __init__( self, parent: t.Optional["Symbols"] = None, level: t.Optional[int] = None ) -> None: if level is None: if parent is None: level = 0 else: level = parent.level + 1 self.level: int = level self.parent = parent self.refs: t.Dict[str, str] = {} self.loads: t.Dict[str, t.Any] = {} self.stores: t.Set[str] = set() def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None: visitor = RootVisitor(self) visitor.visit(node, **kwargs) def _define_ref( self, name: str, load: t.Optional[t.Tuple[str, t.Optional[str]]] = None ) -> str: ident = f"l_{self.level}_{name}" self.refs[name] = ident if load is not None: self.loads[ident] = load return ident def find_load(self, target: str) -> t.Optional[t.Any]: if target in self.loads: return self.loads[target] if self.parent is not None: return self.parent.find_load(target) return None def find_ref(self, name: str) -> t.Optional[str]: if name in self.refs: return self.refs[name] if self.parent is not None: return self.parent.find_ref(name) return None def ref(self, name: str) -> str: rv = self.find_ref(name) if rv is None: raise AssertionError( "Tried to resolve a name to a reference that was" f" unknown to the frame ({name!r})" ) return rv def copy(self) -> "Symbols": rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.refs = self.refs.copy() rv.loads = self.loads.copy() rv.stores = self.stores.copy() return rv def store(self, name: str) -> None: self.stores.add(name) # If we have not see the name referenced yet, we need to figure # out what to set it to. if name not in self.refs: # If there is a parent scope we check if the name has a # reference there. If it does it means we might have to alias # to a variable there. if self.parent is not None: outer_ref = self.parent.find_ref(name) if outer_ref is not None: self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref)) return # Otherwise we can just set it to undefined. self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None)) def declare_parameter(self, name: str) -> str: self.stores.add(name) return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None)) def load(self, name: str) -> None: if self.find_ref(name) is None: self._define_ref(name, load=(VAR_LOAD_RESOLVE, name)) def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None: stores: t.Dict[str, int] = {} for branch in branch_symbols: for target in branch.stores: if target in self.stores: continue stores[target] = stores.get(target, 0) + 1 for sym in branch_symbols: self.refs.update(sym.refs) self.loads.update(sym.loads) self.stores.update(sym.stores) for name, branch_count in stores.items(): if branch_count == len(branch_symbols): continue target = self.find_ref(name) # type: ignore assert target is not None, "should not happen" if self.parent is not None: outer_target = self.parent.find_ref(name) if outer_target is not None: self.loads[target] = (VAR_LOAD_ALIAS, outer_target) continue self.loads[target] = (VAR_LOAD_RESOLVE, name) def dump_stores(self) -> t.Dict[str, str]: rv: t.Dict[str, str] = {} node: t.Optional["Symbols"] = self while node is not None: for name in sorted(node.stores): if name not in rv: rv[name] = self.find_ref(name) # type: ignore node = node.parent return rv def dump_param_targets(self) -> t.Set[str]: rv = set() node: t.Optional["Symbols"] = self while node is not None: for target, (instr, _) in self.loads.items(): if instr == VAR_LOAD_PARAMETER: rv.add(target) node = node.parent return rv class RootVisitor(NodeVisitor): def __init__(self, symbols: "Symbols") -> None: self.sym_visitor = FrameSymbolVisitor(symbols) def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None: for child in node.iter_child_nodes(): self.sym_visitor.visit(child) visit_Template = _simple_visit visit_Block = _simple_visit visit_Macro = _simple_visit visit_FilterBlock = _simple_visit visit_Scope = _simple_visit visit_If = _simple_visit visit_ScopedEvalContextModifier = _simple_visit def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None: for child in node.body: self.sym_visitor.visit(child) def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None: for child in node.iter_child_nodes(exclude=("call",)): self.sym_visitor.visit(child) def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None: for child in node.body: self.sym_visitor.visit(child) def visit_For( self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any ) -> None: if for_branch == "body": self.sym_visitor.visit(node.target, store_as_param=True) branch = node.body elif for_branch == "else": branch = node.else_ elif for_branch == "test": self.sym_visitor.visit(node.target, store_as_param=True) if node.test is not None: self.sym_visitor.visit(node.test) return else: raise RuntimeError("Unknown for branch") if branch: for item in branch: self.sym_visitor.visit(item) def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None: for target in node.targets: self.sym_visitor.visit(target) for child in node.body: self.sym_visitor.visit(child) def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None: raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}") class FrameSymbolVisitor(NodeVisitor): """A visitor for `Frame.inspect`.""" def __init__(self, symbols: "Symbols") -> None: self.symbols = symbols def visit_Name( self, node: nodes.Name, store_as_param: bool = False, **kwargs: t.Any ) -> None: """All assignments to names go through this function.""" if store_as_param or node.ctx == "param": self.symbols.declare_parameter(node.name) elif node.ctx == "store": self.symbols.store(node.name) elif node.ctx == "load": self.symbols.load(node.name) def visit_NSRef(self, node: nodes.NSRef, **kwargs: t.Any) -> None: self.symbols.load(node.name) def visit_If(self, node: nodes.If, **kwargs: t.Any) -> None: self.visit(node.test, **kwargs) original_symbols = self.symbols def inner_visit(nodes: t.Iterable[nodes.Node]) -> "Symbols": self.symbols = rv = original_symbols.copy() for subnode in nodes: self.visit(subnode, **kwargs) self.symbols = original_symbols return rv body_symbols = inner_visit(node.body) elif_symbols = inner_visit(node.elif_) else_symbols = inner_visit(node.else_ or ()) self.symbols.branch_update([body_symbols, elif_symbols, else_symbols]) def visit_Macro(self, node: nodes.Macro, **kwargs: t.Any) -> None: self.symbols.store(node.name) def visit_Import(self, node: nodes.Import, **kwargs: t.Any) -> None: self.generic_visit(node, **kwargs) self.symbols.store(node.target) def visit_FromImport(self, node: nodes.FromImport, **kwargs: t.Any) -> None: self.generic_visit(node, **kwargs) for name in node.names: if isinstance(name, tuple): self.symbols.store(name[1]) else: self.symbols.store(name) def visit_Assign(self, node: nodes.Assign, **kwargs: t.Any) -> None: """Visit assignments in the correct order.""" self.visit(node.node, **kwargs) self.visit(node.target, **kwargs) def visit_For(self, node: nodes.For, **kwargs: t.Any) -> None: """Visiting stops at for blocks. However the block sequence is visited as part of the outer scope. """ self.visit(node.iter, **kwargs) def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None: self.visit(node.call, **kwargs) def visit_FilterBlock(self, node: nodes.FilterBlock, **kwargs: t.Any) -> None: self.visit(node.filter, **kwargs) def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None: for target in node.values: self.visit(target) def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None: """Stop visiting at block assigns.""" self.visit(node.target, **kwargs) def visit_Scope(self, node: nodes.Scope, **kwargs: t.Any) -> None: """Stop visiting at scopes.""" def visit_Block(self, node: nodes.Block, **kwargs: t.Any) -> None: """Stop visiting at blocks.""" def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None: """Do not visit into overlay scopes."""
pallets/jinja
src/jinja2/idtracking.py
Python
bsd-3-clause
10,704
[ "VisIt" ]
19f36669d8abe280c02d5c739f70cbf582278490ebebd79b5de036ca07ee0860
import os import unittest import ansigenome.constants as c import ansigenome.test_helpers as th import ansigenome.utils as utils class TestTemplates(unittest.TestCase): """ Integration tests for the templates. """ def setUp(self): self.test_path = os.getenv("ANSIGENOME_TEST_PATH", c.TEST_PATH) if not os.path.exists(self.test_path): os.makedirs(self.test_path) def tearDown(self): th.rmrf(self.test_path) def test_templates(self): role = th.create_roles(self.test_path, 1) role_name = role[0] role_path = os.path.join(self.test_path, role_name) readme_path = os.path.join(role_path, "README.rst") defaults_path = os.path.join(self.test_path, role_name, "defaults", "main.yml") tasks_path = os.path.join(self.test_path, role_name, "tasks", "main.yml") meta_path = os.path.join(self.test_path, role_name, "meta", "main.yml") utils.string_to_file(defaults_path, th.DEFAULTS_TEMPLATE) utils.string_to_file(tasks_path, th.TASKS_TEMPLATE) utils.string_to_file(meta_path, th.META_TEMPLATE_FULL) (out, err) = utils.capture_shell( "ansigenome gendoc {0}".format(self.test_path)) readme = utils.file_to_string(readme_path) print print "README compiled template:" print readme print self.assertIn("tear.drinker", readme) self.assertIn("Chuck Norris", readme) self.assertIn("Travis", readme) self.assertIn("poop", readme) self.assertIn("imposes", readme) self.assertIn("fists", readme) self.assertIn("telekinesis", readme) self.assertIn(role_name, readme) self.assertIn("testuser." + role_name, readme) self.assertIn("ansible-" + role_name, readme) self.assertIn("theuniverse", readme) self.assertIn("foo: bar", readme) self.assertIn("unix_is_cool", readme) self.assertIn("ansigenome", readme) self.assertIn("galaxy.ansible.com", readme) self.assertNotIn("BETA", readme) self.assertNotIn("beta", readme) self.assertNotIn("deprecated", readme) self.assertNotIn("galaxy-install", readme) self.assertNotIn("twitter", readme)
AlbanAndrieu/ansigenome
test/integration/test_templates.py
Python
gpl-3.0
2,387
[ "Galaxy" ]
fc3290f4af9453b8fae45f235ee739634faef2b602b4afe99a74ef1fa7578178
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved. # Revisions copyright 2008-2009 by Peter Cock. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. # # Contact: Leighton Pritchard, Scottish Crop Research Institute, # Invergowrie, Dundee, Scotland, DD2 5DA, UK # L.Pritchard@scri.ac.uk ################################################################################ """ Graph module Provides: o GraphData - Contains data from which a graph will be drawn, and information about its presentation For drawing capabilities, this module uses reportlab to draw and write the diagram: http://www.reportlab.com For dealing with biological information, the package expects BioPython objects: http://www.biopython.org """ # ReportLab imports from __future__ import print_function from reportlab.lib import colors from math import sqrt class GraphData(object): """ GraphData Provides: Methods: o __init__(self, id=None, data=None, name=None, style='bar', color=colors.lightgreen, altcolor=colors.darkseagreen) Called on instantiation o set_data(self, data) Load the object with data to be plotted o get_data(self) Returns the data to be plotted as a list of (position, value) tuples o add_point(self, point) Add a single point to the data set o quartiles(self) Returns a tuple of the data quartiles o range(self) Returns a tuple of the base range covered by the graph data o mean(self) Returns a float of the mean data point value o stdev(self) Returns the sample standard deviation of the data values o __len__(self) Returns the length of sequence covered by the data o __getitem__(self, index) Returns the value at the base specified, or graph data in the base range o __str__(self) Returns a formatted string describing the graph data Attributes: o id Unique identifier for the data o data Dictionary of describing the data, keyed by position o name String describing the data o style String ('bar', 'heat', 'line') describing how to draw the data o poscolor colors.Color for drawing high (some styles) or all values o negcolor colors.Color for drawing low values (some styles) o linewidth Int, thickness to draw the line in 'line' styles """ def __init__(self, id=None, data=None, name=None, style='bar', color=colors.lightgreen, altcolor=colors.darkseagreen, center=None, colour=None, altcolour=None): """__init__(self, id=None, data=None, name=None, style='bar', color=colors.lightgreen, altcolor=colors.darkseagreen) o id Unique ID for the graph o data List of (position, value) tuples o name String describing the graph o style String describing the presentation style ('bar', 'line', 'heat') o color colors.Color describing the color to draw all or the 'high' (some styles) values (overridden by backwards compatible argument with UK spelling, colour). o altcolor colors.Color describing the color to draw the 'low' values (some styles only) (overridden by backwards compatible argument with UK spelling, colour). o center Value at which x-axis crosses y-axis. """ # Let the UK spelling (colour) override the USA spelling (color) if colour is not None: color = colour if altcolour is not None: altcolor = altcolour self.id = id # Unique identifier for the graph self.data = {} # holds values, keyed by sequence position if data is not None: self.set_data(data) self.name = name # Descriptive string # Attributes describing how the graph will be drawn self.style = style # One of 'bar', 'heat' or 'line' self.poscolor = color # Color to draw all, or 'high' values self.negcolor = altcolor # Color to draw 'low' values self.linewidth = 2 # linewidth to use in line graphs self.center = center # value at which x-axis crosses y-axis def set_data(self, data): """ set_data(self, data) o data List of (position, value) tuples Add data with a list of (position, value) tuples """ for (pos, val) in data: # Fill data dictionary self.data[pos] = val def get_data(self): """ get_data(self) -> [(int, float), (int, float), ...] Return data as a list of sorted (position, value) tuples """ data = [] for xval in self.data: yval = self.data[xval] data.append((xval, yval)) data.sort() return data def add_point(self, point): """ add_point(self, point) o point (position, value) tuple Add a single point to the set of data """ pos, val = point self.data[pos] = val def quartiles(self): """ quartiles(self) -> (float, float, float, float, float) Returns the (minimum, lowerQ, medianQ, upperQ, maximum) values as a tuple """ data = sorted(self.data.values()) datalen = len(data) return(data[0], data[datalen // 4], data[datalen // 2], data[3 * datalen // 4], data[-1]) def range(self): """ range(self) -> (int, int) Returns the range of the data, i.e. its start and end points on the genome as a (start, end) tuple """ positions = sorted(self.data) # i.e. dict keys # Return first and last positions in graph # print len(self.data) return (positions[0], positions[-1]) def mean(self): """ mean(self) -> Float Returns the mean value for the data points """ data = list(self.data.values()) sum = 0. for item in data: sum += float(item) return sum / len(data) def stdev(self): """ stdev(self) -> Float Returns the sample standard deviation for the data """ data = list(self.data.values()) m = self.mean() runtotal = 0. for entry in data: runtotal += float((entry - m) ** 2) # This is sample standard deviation; population stdev would involve # division by len(data), rather than len(data)-1 return sqrt(runtotal / (len(data) - 1)) def __len__(self): """ __len__(self) -> Int Returns the number of points in the data set """ return len(self.data) def __getitem__(self, index): """ __getitem__(self, index) -> Float or list of tuples Given an integer representing position on the sequence returns a float - the data value at the passed position. If a slice, returns graph data from the region as a list or (position, value) tuples. Slices with step are not supported. Returns the data value at the passed position """ if isinstance(index, int): return self.data[index] elif isinstance(index, slice): # TODO - Why does it treat the end points both as inclusive? # This doesn't match Python norms does it? low = index.start high = index.stop if index.step is not None and index.step != 1: raise ValueError outlist = [] for pos in sorted(self.data): if pos >= low and pos <= high: outlist.append((pos, self.data[pos])) return outlist else: raise TypeError("Need an integer or a slice") def __str__(self): """ __str__(self) -> "" Returns a string describing the graph data """ outstr = ["\nGraphData: %s, ID: %s" % (self.name, self.id)] outstr.append("Number of points: %d" % len(self.data)) outstr.append("Mean data value: %s" % self.mean()) outstr.append("Sample SD: %.3f" % self.stdev()) outstr.append("Minimum: %s\n1Q: %s\n2Q: %s\n3Q: %s\nMaximum: %s" % self.quartiles()) outstr.append("Sequence Range: %s..%s" % self.range()) return "\n".join(outstr)
zjuchenyuan/BioWeb
Lib/Bio/Graphics/GenomeDiagram/_Graph.py
Python
mit
8,910
[ "Biopython" ]
8b5e705b264feffafe26a2bdc86d99760e924cd9916b3b465c8e616fba724043
# Zeobuilder is an extensible GUI-toolkit for molecular model construction. # Copyright (C) 2007 - 2009 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center # for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights # reserved unless otherwise stated. # # This file is part of Zeobuilder. # # Zeobuilder is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # In addition to the regulations of the GNU General Public License, # publications and communications based in parts on this program or on # parts of this program are required to cite the following article: # # "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the # nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck # and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48 # (7), 1530-1541, 2008 # DOI:10.1021/ci8000748 # # Zeobuilder is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/> # # -- from zeobuilder import context from zeobuilder.actions.collections.menu import MenuInfo from zeobuilder.actions.composed import Immediate, UserError from zeobuilder.nodes.glcontainermixin import GLContainerMixin from zeobuilder.moltools import create_molecular_graph, create_molecule import zeobuilder.actions.primitive as primitive import zeobuilder.authors as authors from molmod.transformations import Translation, superpose from molmod.toyff import guess_geometry, tune_geometry from molmod.data.periodic import periodic from molmod.units import angstrom import numpy, gtk, tempfile, os def coords_to_zeobuilder(org_coords, opt_coords, atoms, parent): # Transform the guessed geometry as to overlap with the original geometry transf = superpose(org_coords, opt_coords) opt_coords = numpy.dot(opt_coords, transf.r.transpose()) + transf.t # Put coordinates of guess geometry back into Zeobuilder model for i in xrange(len(atoms)): translation = Translation() atom = atoms[i] # Make sure atoms in subframes are treated properly transf = atom.parent.get_frame_relative_to(parent) org_pos = atom.transformation.t opt_pos = transf.vector_apply_inverse(opt_coords[i]) translation.t = opt_pos - org_pos primitive.Transform(atom, translation) class GuessGeometry(Immediate): """Guess the geometry of the selected molecule based on the molecular graph""" description = "Guess the molecular geometry" menu_info = MenuInfo("default/_Object:tools/_Molecular:geometry","_Guess geometry", ord("g"), False, order=(0, 4, 1, 5, 4, 0)) authors = [authors.wouter_smet, authors.toon_verstraelen] @staticmethod def analyze_selection(): # A) calling ancestor if not Immediate.analyze_selection(): return False # B) validating if not isinstance(context.application.cache.node, GLContainerMixin): return False if len(context.application.cache.node.children) == 0: return False # C) passed all tests: return True def do(self): # Get the molecular graph of the molecule in the selection parent = context.application.cache.node graph = create_molecular_graph([parent], parent) if graph.molecule.size == 0: raise UserError("Could not get molecular graph.", "Make sure that the selected frame contains a molecule.") # Guessed and original geometry opt_coords = guess_geometry(graph).coordinates org_coords = graph.molecule.coordinates coords_to_zeobuilder(org_coords, opt_coords, graph.molecule.atoms, parent) class TuneGeometry(Immediate): """Tune the geometry of the selected molecule based on the molecular graph""" description = "Tune the molecular geometry" menu_info = MenuInfo("default/_Object:tools/_Molecular:geometry","_Tune geometry", ord("t"), False, order=(0, 4, 1, 5, 4, 1)) authors = [authors.wouter_smet, authors.toon_verstraelen] @staticmethod def analyze_selection(): # A) calling ancestor if not Immediate.analyze_selection(): return False # B) validating if not isinstance(context.application.cache.node, GLContainerMixin): return False if len(context.application.cache.node.children) == 0: return False # C) passed all tests: return True def do(self): # Get the molecular graph of the molecule in the selection parent = context.application.cache.node graph = create_molecular_graph([parent], parent) if graph.molecule.size == 0: raise UserError("Could not get molecular graph.", "Make sure that the selected frame contains a molecule.") # Guessed and original geometry opt_coords = tune_geometry(graph, graph.molecule).coordinates org_coords = graph.molecule.coordinates coords_to_zeobuilder(org_coords, opt_coords, graph.molecule.atoms, parent) class OptimizeMopacPM3(Immediate): """Plugin that calls Mopac to optimize the geometry at PM3 level""" description = "Optimize geometry at PM3 level with Mopac" menu_info = MenuInfo("default/_Object:tools/_Molecular:geometry","Optimize geometry (PM3, Mopac)", ord("p"), False, order=(0, 4, 1, 5, 4, 2)) authors = [authors.wouter_smet, authors.toon_verstraelen] @staticmethod def analyze_selection(): # A) calling ancestor if not Immediate.analyze_selection(): return False # B) validating if not isinstance(context.application.cache.node, GLContainerMixin): return False if len(context.application.cache.node.children) == 0: return False # C) passed all tests: return True def write_mopac_input(self, molecule, prefix): f = open('%s.dat' % prefix, 'w') print >> f, 'PM3 GNORM=0.01 XYZ' print >> f, 'comment1' print >> f, 'comment2' for i in xrange(molecule.size): symbol = periodic[molecule.numbers[i]].symbol c = molecule.coordinates[i]/angstrom print >> f, "% 2s % 8.5f 1 % 8.5f 1 % 8.5f 1" % (symbol, c[0], c[1], c[2]) f.close() def read_mopac_output(self, filename, num_atoms): if not os.path.isfile(filename): raise UserError("Could not find Mopac output file.", "Expected location of output file: %s" % filename) f = open(filename,'r') coordinates = numpy.zeros((num_atoms, 3), float) success = False for line in f: if line == " CARTESIAN COORDINATES \n": break for line in f: if line == " CARTESIAN COORDINATES \n": success = True break if success: for i in xrange(3): f.next() i = 0 for line in f: if i < num_atoms: words = line.split() coordinates[i,0] = float(words[2]) coordinates[i,1] = float(words[3]) coordinates[i,2] = float(words[4]) i +=1 else: break else: raise UserError("Could not find optimal coordinates.", "Check the file %s for more details." % filename) f.close() return coordinates*angstrom def do(self): parent = context.application.cache.node org_mol = create_molecule([parent], parent) org_coords = org_mol.coordinates if org_mol.size == 0: raise UserError("Could not get molecule.", "Make sure that the selected frame contains a molecule.") if org_mol.size == 3: raise UserError("For the moment three atoms are not supported.") # Make temp directory work = tempfile.mkdtemp("_zeobuilder_mopac") # Create mopac input file self.write_mopac_input(org_mol, os.path.join(work, 'mopac')) # Run input file through mopac and capture output in file object retcode = os.system('cd %s; run_mopac7 mopac > mopac.out' % work) if retcode != 0: raise UserError("Failed to run Mopac.", "Check that the run_mopac7 binary is in the path. The input file can be found here: %s." % work) opt_coords = self.read_mopac_output(os.path.join(work, 'mopac.out'), org_mol.size) # clean up def safe_remove(filename): filename = os.path.join(work, filename) if os.path.isfile(filename): os.remove(filename) safe_remove("mopac.dat") safe_remove("mopac.log") safe_remove("mopac.out") safe_remove("mopac.arc") os.rmdir(work) coords_to_zeobuilder(org_coords, opt_coords, org_mol.atoms, parent) actions = { "GuessGeometry": GuessGeometry, "TuneGeometry": TuneGeometry, "OptimizeMopacPM3": OptimizeMopacPM3 }
woutersmet/Zeo_thesis
share/plugins/molecular/geometry.py
Python
gpl-3.0
9,350
[ "MOPAC" ]
973d0f1b6468307c8e3644de6f5c57a01311d6796d1b71be215e6ec3be6001da
######################################################################## # $Id$ # File : TorqueComputingElement.py # Author : Stuart Paterson, Paul Szczypka ######################################################################## """ The simplest Computing Element instance that submits jobs locally. """ __RCSID__ = "$Id$" from DIRAC.Resources.Computing.ComputingElement import ComputingElement from DIRAC.Core.Utilities.Subprocess import shellCall, systemCall from DIRAC import S_OK, S_ERROR from DIRAC import rootPath from DIRAC import gConfig import os, re, socket import shutil, bz2, base64, tempfile CE_NAME = 'Torque' UsedParameters = [ 'ExecQueue', 'SharedArea', 'BatchOutput', 'BatchError', 'UserName' ] MandatoryParameters = [ 'Queue' ] class TorqueComputingElement( ComputingElement ): """ Direct Torque submission """ mandatoryParameters = MandatoryParameters ############################################################################# def __init__( self, ceUniqueID ): """ Standard constructor. """ ComputingElement.__init__( self, ceUniqueID ) self.submittedJobs = 0 self.queue = self.ceConfigDict['Queue'] self.execQueue = self.ceConfigDict['ExecQueue'] self.log.info( "Using queue: ", self.queue ) self.hostname = socket.gethostname() self.sharedArea = self.ceConfigDict['SharedArea'] self.batchOutput = self.ceConfigDict['BatchOutput'] self.batchError = self.ceConfigDict['BatchError'] self.userName = self.ceConfigDict['UserName'] self.removeOutput = True if 'RemoveOutput' in self.ceParameters: if self.ceParameters['RemoveOutput'].lower() in ['no', 'false', '0']: self.removeOutput = False ############################################################################# def _addCEConfigDefaults( self ): """Method to make sure all necessary Configuration Parameters are defined """ # First assure that any global parameters are loaded ComputingElement._addCEConfigDefaults( self ) # Now Torque specific ones if 'ExecQueue' not in self.ceConfigDict: self.ceConfigDict['ExecQueue'] = self.ceConfigDict['Queue'] if 'SharedArea' not in self.ceConfigDict: self.ceConfigDict['SharedArea'] = '' if 'UserName' not in self.ceConfigDict: self.ceConfigDict['UserName'] = '' if 'BatchOutput' not in self.ceConfigDict: self.ceConfigDict['BatchOutput'] = os.path.join( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), 'data' ) if 'BatchError' not in self.ceConfigDict: self.ceConfigDict['BatchError'] = os.path.join( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), 'data' ) ############################################################################# def makeProxyExecutableFile( self, executableFile, proxy ): """ Make a single executable bundling together executableFile and proxy """ compressedAndEncodedProxy = base64.encodestring( bz2.compress( proxy ) ).replace( '\n', '' ) compressedAndEncodedExecutable = base64.encodestring( bz2.compress( open( executableFile, "rb" ).read(), 9 ) ).replace( '\n', '' ) wrapperContent = """#!/usr/bin/env python # Wrapper script for executable and proxy import os, tempfile, sys, base64, bz2 try: workingDirectory = tempfile.mkdtemp( suffix = '_wrapper', prefix= 'TORQUE_' ) os.chdir( workingDirectory ) open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) ) open( '%(executable)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedExecutable)s" ) ) ) os.chmod('proxy',0600) os.chmod('%(executable)s',0700) os.environ["X509_USER_PROXY"]=os.path.join(workingDirectory, 'proxy') except Exception, x: print >> sys.stderr, x sys.exit(-1) cmd = "%(executable)s" print 'Executing: ', cmd sys.stdout.flush() os.system( cmd ) shutil.rmtree( workingDirectory ) """ % { 'compressedAndEncodedProxy': compressedAndEncodedProxy, \ 'compressedAndEncodedExecutable': compressedAndEncodedExecutable, \ 'executable': os.path.basename( executableFile ) } fd, name = tempfile.mkstemp( suffix = '_wrapper.py', prefix = 'TORQUE_', dir = os.getcwd() ) wrapper = os.fdopen( fd, 'w' ) wrapper.write( wrapperContent ) wrapper.close() return name ############################################################################# def submitJob( self, executableFile, proxy, numberOfJobs = 1 ): """ Method to submit job, should be overridden in sub-class. """ self.log.info( "Executable file path: %s" % executableFile ) if not os.access( executableFile, 5 ): os.chmod( executableFile, 0755 ) #Perform any other actions from the site admin if self.ceParameters.has_key( 'AdminCommands' ): commands = self.ceParameters['AdminCommands'].split( ';' ) for command in commands: self.log.verbose( 'Executing site admin command: %s' % command ) result = shellCall( 30, command, callbackFunction = self.sendOutput ) if not result['OK'] or result['Value'][0]: self.log.error( 'Error during "%s":' % command, result ) return S_ERROR( 'Error executing %s CE AdminCommands' % CE_NAME ) # if no proxy is supplied, the executable can be submitted directly # otherwise a wrapper script is needed to get the proxy to the execution node # The wrapper script makes debugging more complicated and thus it is # recommended to transfer a proxy inside the executable if possible. if proxy: self.log.verbose( 'Setting up proxy for payload' ) submitFile = self.makeProxyExecutableFile( executableFile, proxy ) else: # no proxy submitFile = executableFile # submit submitFile to the batch system cmd = "qsub -o %(output)s -e %(error)s -q %(queue)s -N DIRACPilot %(executable)s" % \ {'output': self.batchOutput, \ 'error': self.batchError, \ 'queue': self.queue, \ 'executable': os.path.abspath( submitFile ) } self.log.verbose( 'CE submission command: %s' % ( cmd ) ) batchIDList = [] for i in range( numberOfJobs ): result = shellCall( 30, cmd ) if not result['OK'] or result['Value'][0]: self.log.warn( '===========>Torque CE result NOT OK' ) self.log.debug( result ) return S_ERROR( result['Value'] ) else: self.log.debug( 'Torque CE result OK' ) batchID = result['Value'][1].strip() batchIDList.append( batchID ) self.submittedJobs += 1 return S_OK( batchIDList ) ############################################################################# def getCEStatus( self ): """ Method to return information on running and pending jobs. """ result = S_OK() result['SubmittedJobs'] = self.submittedJobs cmd = ["qstat", "-Q" , self.execQueue ] if self.userName: cmd = [ "qstat", "-u", self.userName, self.execQueue ] ret = systemCall( 10, cmd ) if not ret['OK']: self.log.error( 'Timeout', ret['Message'] ) return ret status = ret['Value'][0] stdout = ret['Value'][1] stderr = ret['Value'][2] self.log.debug( "status:", status ) self.log.debug( "stdout:", stdout ) self.log.debug( "stderr:", stderr ) if status: self.log.error( 'Failed qstat execution:', stderr ) return S_ERROR( stderr ) if self.userName: # Parse qstat -u userName queueName runningJobs = 0 waitingJobs = 0 lines = stdout.replace( '\r', '' ).split( '\n' ) for line in lines: if not line: continue if line.find( self.userName ) != -1: if 'R' == line.split( ' ' )[-2]: runningJobs += 1 else: # every other status to assimilate to Waiting waitingJobs += 1 else: # parse qstat -Q queueName matched = re.search( self.queue + "\D+(\d+)\D+(\d+)\W+(\w+)\W+(\w+)\D+(\d+)\D+(\d+)\D+(\d+)\D+(\d+)\D+(\d+)\D+(\d+)\W+(\w+)", stdout ) if matched.groups < 6: return S_ERROR( "Error retrieving information from qstat:" + stdout + stderr ) try: waitingJobs = int( matched.group( 5 ) ) runningJobs = int( matched.group( 6 ) ) except ValueError: return S_ERROR( "Error retrieving information from qstat:" + stdout + stderr ) result['WaitingJobs'] = waitingJobs result['RunningJobs'] = runningJobs self.log.verbose( 'Waiting Jobs: ', waitingJobs ) self.log.verbose( 'Running Jobs: ', runningJobs ) return result def getJobStatus( self, jobIDList ): """ Get the status information for the given list of jobs """ jobDict = {} for job in jobIDList: if not job: continue jobNumber = job.split( '.' )[0] jobDict[jobNumber] = job cmd = [ 'qstat' ] + jobIDList result = systemCall( 10, cmd ) if not result['OK']: return result resultDict = {} output = result['Value'][1].replace( '\r', '' ) lines = output.split( '\n' ) for job in jobDict: resultDict[jobDict[job]] = 'Unknown' for line in lines: if line.find( job ) != -1: if line.find( 'Unknown' ) != -1: resultDict[jobDict[job]] = 'Unknown' else: torqueStatus = line.split()[4] if torqueStatus in ['E', 'C']: resultDict[jobDict[job]] = 'Done' elif torqueStatus in ['R']: resultDict[jobDict[job]] = 'Running' elif torqueStatus in ['S', 'W', 'Q', 'H', 'T']: resultDict[jobDict[job]] = 'Waiting' return S_OK( resultDict ) def getJobOutput( self, jobID, localDir = None ): """ Get the specified job standard output and error files. If the localDir is provided, the output is returned as file in this directory. Otherwise, the output is returned as strings. """ jobNumber = jobID.split( '.' )[0] # Find the output files outFile = '' outNames = os.listdir( self.batchOutput ) for outName in outNames: if outName.find( jobNumber ) != -1: outFile = os.path.join( self.batchOutput, outName ) break errFile = '' errNames = os.listdir( self.batchError ) for errName in errNames: if errName.find( jobNumber ) != -1: errFile = os.path.join( self.batchError, errName ) break if localDir: if outFile: doutFile = os.path.join( localDir, os.path.basename( outFile ) ) shutil.copyfile( outFile, doutFile ) if errFile: derrFile = os.path.join( localDir, os.path.basename( errFile ) ) shutil.copyfile( errFile, derrFile ) # The result is OK, we can remove the output if self.removeOutput: result = os.system( 'rm -f %s/*%s* %s/*%s*' % ( self.batchOutput, jobNumber, self.batchError, jobNumber ) ) if localDir: if outFile and errFile: return S_OK( ( doutFile, derrFile ) ) else: return S_ERROR( 'Output files not found' ) else: # Return the output as a string output = '' error = '' if outFile: outputFile = open( outFile, 'r' ) output = outputFile.read() outputFile.close() if errFile: outputFile = open( errFile, 'r' ) error = outputFile.read() outputFile.close() return S_OK( ( output, error ) ) #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
sposs/DIRAC
Resources/Computing/TorqueComputingElement.py
Python
gpl-3.0
11,637
[ "DIRAC" ]
654867c2b658c9fa276d26c2d9f432c9938b6b9436b65d7db135b499b0a69f34
#!/usr/bin/env python # boards.txt python builder for esp8266/Arduino # Copyright (C) 2017 community # Permission is hereby granted, free of charge, to any person who buy it, # use it, break it, fix it, trash it, change it, mail - upgrade it, charge # it, point it, zoom it, press it, snap it, work it, quick - erase it, write # it, cut it, paste it, save it, load it, check it, quick - rewrite it, plug # it, play it, burn it, rip it, drag and drop it, zip - unzip it, lock it, # fill it, call it, find it, view it, code it, jam - unlock it, surf it, # scroll it, pause it, click it, cross it, crack it, switch - update it, # name it, rate it, tune it, print it, scan it, send it, fax - rename it, # touch it, bring it, pay it, watch it, turn it, leave it, start - format # it. # diff ldscripts after regeneration: # (cd tools/sdk/ld/backup/; for i in *; do diff -u $i ../$i|less; done) # board descriptor: # name display name # opts: specific entries dicts (overrides same entry in macros) # macro: common entries # unmodifiable parameters: # resetmethod_ck/_nodemcu/_none/_dtrset: fixed reset method # flashmode_qio/_dio/_qout/_dout: fixed flash mode # flashfreq_40/_80: fixed flash frequency # selection menu: # resetmethod_menu menus for reset method # resetmethod_menu_extra menus for additional reset methods # crystalfreq/flashfreq_menu: menus for crystal/flash frequency selection # flashmode_menu: menus for flashmode selection (dio/dout/qio/qout) # 512K/1M/2M/4M/8M/16M: menus for flash & SPIFFS size # lwip/lwip2 menus for available lwip versions import os import sys import collections import getopt import re import json # serial upload speed order in menu # default is 115 for every board unless specified with 'serial' in board # or by user command line speeds = collections.OrderedDict([ ( '9', [ 's9', 's57', 's115', 's230', 's256', 's460', 's512', 's921' ]), ( '57', [ 's57', 's9', 's115', 's230', 's256', 's460', 's512', 's921' ]), ( '115', [ 's115', 's9', 's57', 's230', 's256', 's460', 's512', 's921' ]), ( '230', [ 's230', 's9', 's57', 's115', 's256', 's460', 's512', 's921' ]), ( '256', [ 's256', 's9', 's57', 's115', 's230', 's460', 's512', 's921' ]), ( '460', [ 's460', 's9', 's57', 's115', 's230', 's256', 's512', 's921' ]), ( '512', [ 's512', 's9', 's57', 's115', 's230', 's256', 's460', 's921' ]), ( '921', [ 's921', 's9', 's57', 's115', 's230', 's256', 's460', 's512' ]), ]) # boards list boards = collections.OrderedDict([ ( 'generic', { 'name': 'Generic ESP8266 Module', 'opts': { '.build.board': 'ESP8266_GENERIC', }, 'macro': [ 'resetmethod_menu', 'resetmethod_menu_extra', 'crystalfreq_menu', 'flashfreq_menu', 'flashmode_menu', '512K', '1M', '2M', '4M', '8M', '16M', 'led', 'sdk', ], 'desc': [ 'These modules come in different form factors and pinouts. See the page at ESP8266 community wiki for more info: `ESP8266 Module Family <http://www.esp8266.com/wiki/doku.php?id=esp8266-module-family>`__.', '', 'Usually these modules have no bootstapping resistors on board, insufficient decoupling capacitors, no voltage regulator, no reset circuit, and no USB-serial adapter. This makes using them somewhat tricky, compared to development boards which add these features.', '', 'In order to use these modules, make sure to observe the following:', '', '- **Provide sufficient power to the module.** For stable use of the ESP8266 a power supply with 3.3V and >= 250mA is required. Using the power available from USB to Serial adapter is not recommended, these adapters typically do not supply enough current to run ESP8266 reliably in every situation. An external supply or regulator alongwith filtering capacitors is preferred.', '', '- **Connect bootstapping resistors** to GPIO0, GPIO2, GPIO15 according to the schematics below.', '', '- **Put ESP8266 into bootloader mode** before uploading code.', '', 'Serial Adapter', '--------------', '', 'There are many different USB to Serial adapters / boards. To be able to put ESP8266 into bootloader mode using serial handshaking lines, you need the adapter which breaks out RTS and DTR outputs. CTS and DSR are not useful for upload (they are inputs). Make sure the adapter can work with 3.3V IO voltage: it should have a jumper or a switch to select between 5V and 3.3V, or be marked as 3.3V only.', '', 'Adapters based around the following ICs should work:', '', '- FT232RL', '- CP2102', '- CH340G', '', 'PL2303-based adapters are known not to work on Mac OS X. See https://github.com/igrr/esptool-ck/issues/9 for more info.', '', 'Minimal Hardware Setup for Bootloading and Usage', '------------------------------------------------', '', '+-----------------+------------+------------------+', '| PIN | Resistor | Serial Adapter |', '+=================+============+==================+', '| VCC | | VCC (3.3V) |', '+-----------------+------------+------------------+', '| GND | | GND |', '+-----------------+------------+------------------+', '| TX or GPIO2\* | | RX |', '+-----------------+------------+------------------+', '| RX | | TX |', '+-----------------+------------+------------------+', '| GPIO0 | PullUp | DTR |', '+-----------------+------------+------------------+', '| Reset\* | PullUp | RTS |', '+-----------------+------------+------------------+', '| GPIO15\* | PullDown | |', '+-----------------+------------+------------------+', '| CH\_PD | PullUp | |', '+-----------------+------------+------------------+', '', '- Note', '- GPIO15 is also named MTDO', '- Reset is also named RSBT or REST (adding PullUp improves the', ' stability of the module)', '- GPIO2 is alternative TX for the boot loader mode', '- **Directly connecting a pin to VCC or GND is not a substitute for a', ' PullUp or PullDown resistor, doing this can break upload management', ' and the serial console, instability has also been noted in some', ' cases.**', '', 'ESP to Serial', '-------------', '', '.. figure:: ESP_to_serial.png', ' :alt: ESP to Serial', '', ' ESP to Serial', '', 'Minimal Hardware Setup for Bootloading only', '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~', '', 'ESPxx Hardware', '', '+---------------+------------+------------------+', '| PIN | Resistor | Serial Adapter |', '+===============+============+==================+', '| VCC | | VCC (3.3V) |', '+---------------+------------+------------------+', '| GND | | GND |', '+---------------+------------+------------------+', '| TX or GPIO2 | | RX |', '+---------------+------------+------------------+', '| RX | | TX |', '+---------------+------------+------------------+', '| GPIO0 | | GND |', '+---------------+------------+------------------+', '| Reset | | RTS\* |', '+---------------+------------+------------------+', '| GPIO15 | PullDown | |', '+---------------+------------+------------------+', '| CH\_PD | PullUp | |', '+---------------+------------+------------------+', '', '- Note', '- if no RTS is used a manual power toggle is needed', '', 'Minimal Hardware Setup for Running only', '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~', '', 'ESPxx Hardware', '', '+----------+------------+----------------+', '| PIN | Resistor | Power supply |', '+==========+============+================+', '| VCC | | VCC (3.3V) |', '+----------+------------+----------------+', '| GND | | GND |', '+----------+------------+----------------+', '| GPIO0 | PullUp | |', '+----------+------------+----------------+', '| GPIO15 | PullDown | |', '+----------+------------+----------------+', '| CH\_PD | PullUp | |', '+----------+------------+----------------+', '', 'Minimal', '-------', '', '.. figure:: ESP_min.png', ' :alt: ESP min', '', ' ESP min', '', 'Improved Stability', '------------------', '', '.. figure:: ESP_improved_stability.png', ' :alt: ESP improved stability', '', ' ESP improved stability', '', 'Boot Messages and Modes', '-----------------------', '', 'The ESP module checks at every boot the Pins 0, 2 and 15. based on them its boots in different modes:', '', '+----------+---------+---------+------------------------------------+', '| GPIO15 | GPIO0 | GPIO2 | Mode |', '+==========+=========+=========+====================================+', '| 0V | 0V | 3.3V | Uart Bootloader |', '+----------+---------+---------+------------------------------------+', '| 0V | 3.3V | 3.3V | Boot sketch (SPI flash) |', '+----------+---------+---------+------------------------------------+', '| 3.3V | x | x | SDIO mode (not used for Arduino) |', '+----------+---------+---------+------------------------------------+', '', 'at startup the ESP prints out the current boot mode example:', '', '::', '', ' rst cause:2, boot mode:(3,6)', '', 'note: - GPIO2 is used as TX output and the internal Pullup is enabled on boot.', '', 'rst cause', '~~~~~~~~~', '', '+----------+------------------+', '| Number | Description |', '+==========+==================+', '| 0 | unknown |', '+----------+------------------+', '| 1 | normal boot |', '+----------+------------------+', '| 2 | reset pin |', '+----------+------------------+', '| 3 | software reset |', '+----------+------------------+', '| 4 | watchdog reset |', '+----------+------------------+', '', 'boot mode', '~~~~~~~~~', '', 'the first value respects the pin setup of the Pins 0, 2 and 15.', '', '+----------+----------+---------+---------+-------------+', '| Number | GPIO15 | GPIO0 | GPIO2 | Mode |', '+==========+==========+=========+=========+=============+', '| 0 | 0V | 0V | 0V | Not valid |', '+----------+----------+---------+---------+-------------+', '| 1 | 0V | 0V | 3.3V | Uart |', '+----------+----------+---------+---------+-------------+', '| 2 | 0V | 3.3V | 0V | Not valid |', '+----------+----------+---------+---------+-------------+', '| 3 | 0V | 3.3V | 3.3V | Flash |', '+----------+----------+---------+---------+-------------+', '| 4 | 3.3V | 0V | 0V | SDIO |', '+----------+----------+---------+---------+-------------+', '| 5 | 3.3V | 0V | 3.3V | SDIO |', '+----------+----------+---------+---------+-------------+', '| 6 | 3.3V | 3.3V | 0V | SDIO |', '+----------+----------+---------+---------+-------------+', '| 7 | 3.3V | 3.3V | 3.3V | SDIO |', '+----------+----------+---------+---------+-------------+', '', 'note: - number = ((GPIO15 << 2) \| (GPIO0 << 1) \| GPIO2);', ], }), ( 'esp8285', { 'name': 'Generic ESP8285 Module', 'opts': { '.build.board': 'ESP8266_ESP01', '.build.variant': 'esp8285' }, 'macro': [ 'resetmethod_menu', 'resetmethod_menu_extra', 'crystalfreq_menu', 'flashmode_dout', 'flashfreq_40', '1M', 'led', ], 'desc': [ 'ESP8285 (`datasheet <http://www.espressif.com/sites/default/files/0a-esp8285_datasheet_en_v1.0_20160422.pdf>`__) is a multi-chip package which contains ESP8266 and 1MB flash. All points related to bootstrapping resistors and recommended circuits listed above apply to ESP8285 as well.', '', 'Note that since ESP8285 has SPI flash memory internally connected in DOUT mode, pins 9 and 10 may be used as GPIO / I2C / PWM pins.', ], }), ( 'espduino', { 'name': 'ESPDuino (ESP-13 Module)', 'opts': collections.OrderedDict([ ( '.build.board', 'ESP8266_ESP13' ), ( '.build.variant', 'ESPDuino' ), ( '.menu.ResetMethod.v2', 'ESPduino-V2' ), ( '.menu.ResetMethod.v2.upload.resetmethod', 'nodemcu' ), ( '.menu.ResetMethod.v1', 'ESPduino-V1' ), ( '.menu.ResetMethod.v1.upload.resetmethod', 'ck' ), ( '.menu.UploadTool.esptool', 'Serial' ), ( '.menu.UploadTool.esptool.upload.tool', 'esptool' ), ( '.menu.UploadTool.esptool.upload.verbose', '-vv' ), ( '.menu.UploadTool.espota', 'OTA' ), ( '.menu.UploadTool.espota.upload.tool', 'espota' ), ]), 'macro': [ 'flashmode_dio', 'flashfreq_40', '4M', ], 'desc': [ '*TODO*' ], }), ( 'huzzah', { 'name': 'Adafruit Feather HUZZAH ESP8266', 'opts': { '.build.board': 'ESP8266_ESP12', '.build.variant': 'adafruit', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_qio', 'flashfreq_40', '4M', ], 'desc': [ 'The Adafruit Feather HUZZAH ESP8266 is an Arduino-compatible Wi-Fi development board powered by Ai-Thinker\'s ESP-12S, clocked at 80 MHz at 3.3V logic. A high-quality SiLabs CP2104 USB-Serial chip is included so that you can upload code at a blistering 921600 baud for fast development time. It also has auto-reset so no noodling with pins and reset button pressings. A 3.7V Lithium polymer battery connector is included, making it ideal for portable projects. The Adafruit Feather HUZZAH ESP8266 will automatically recharge a connected battery when USB power is available.', '', 'Product page: https://www.adafruit.com/product/2821' ], }), ( 'inventone', { 'name': 'Invent One', 'opts': { '.build.board': 'ESP8266_GENERIC', '.build.variant': 'inventone', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_40', '4M', ], 'desc': [ 'The Invent One is an Arduino-compatible Wi-Fi development board powered by Ai-Thinker\'s ESP-12F, clocked at 80 MHz at 3.3V logic. It has an onboard ADC (PCF8591) so that you can have multiple analog inputs to work with. More information can be found here: https://blog.inventone.ng', '', 'Product page: https://inventone.ng' ], }), ( 'cw01', { 'name': 'XinaBox CW01', 'opts': { '.build.board': 'ESP8266_GENERIC', '.build.variant': 'xinabox', }, 'macro': [ 'resetmethod_nodemcu', 'crystalfreq_menu', 'flashmode_dio', 'flashfreq_40', '4M', ], 'desc': [ 'The XinaBox CW01(ESP8266) is an Arduino-compatible Wi-Fi development board powered by an ESP-12F, clocked at 80 MHz at 3.3V logic. The CW01 has an onboard RGB LED and 3 xBUS connection ports.', '', 'Product page: https://xinabox.cc/products/CW01' ], }), ( 'espresso_lite_v1', { 'name': 'ESPresso Lite 1.0', 'opts': { '.build.board': 'ESP8266_ESPRESSO_LITE_V1', '.build.variant': 'espresso_lite_v1', }, 'macro': [ 'flashmode_dio', 'flashfreq_40', '4M', 'resetmethod_menu', ], 'desc': [ 'ESPresso Lite 1.0 (beta version) is an Arduino-compatible Wi-Fi development board powered by Espressif System\'s own ESP8266 WROOM-02 module. It has breadboard-friendly breakout pins with in-built LED, two reset/flash buttons and a user programmable button . The operating voltage is 3.3VDC, regulated with 800mA maximum current. Special distinctive features include on-board I2C pads that allow direct connection to OLED LCD and sensor boards.', ] }), ( 'espresso_lite_v2', { 'name': 'ESPresso Lite 2.0', 'opts': { '.build.board': 'ESP8266_ESPRESSO_LITE_V2', '.build.variant': 'espresso_lite_v2', }, 'macro': [ 'flashmode_dio', 'flashfreq_40', '4M', 'resetmethod_menu', ], 'desc': [ 'ESPresso Lite 2.0 is an Arduino-compatible Wi-Fi development board based on an earlier V1 (beta version). Re-designed together with Cytron Technologies, the newly-revised ESPresso Lite V2.0 features the auto-load/auto-program function, eliminating the previous need to reset the board manually before flashing a new program. It also feature two user programmable side buttons and a reset button. The special distinctive features of on-board pads for I2C sensor and actuator is retained.', ] }), ( 'phoenix_v1', { 'name': 'Phoenix 1.0', 'opts': { '.build.board': 'ESP8266_PHOENIX_V1', '.build.variant': 'phoenix_v1', }, 'macro': [ 'flashmode_dio', 'flashfreq_40', '4M', 'resetmethod_menu', ], 'desc': [ 'Product page: http://www.espert.co', ], }), ( 'phoenix_v2', { 'name': 'Phoenix 2.0', 'opts': { '.build.board': 'ESP8266_PHOENIX_V2', '.build.variant': 'phoenix_v2', }, 'macro': [ 'flashmode_dio', 'flashfreq_40', '4M', 'resetmethod_menu', ], 'desc': [ 'Product page: http://www.espert.co', ], }), ( 'nodemcu', { 'name': 'NodeMCU 0.9 (ESP-12 Module)', 'opts': { '.build.board': 'ESP8266_NODEMCU', '.build.variant': 'nodemcu', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_qio', 'flashfreq_40', '4M', ], 'desc': [ 'Pin mapping', '~~~~~~~~~~~', '', 'Pin numbers written on the board itself do not correspond to ESP8266 GPIO pin numbers. Constants are defined to make using this board easier:', '', '.. code:: c++', '', ' static const uint8_t D0 = 16;', ' static const uint8_t D1 = 5;', ' static const uint8_t D2 = 4;', ' static const uint8_t D3 = 0;', ' static const uint8_t D4 = 2;', ' static const uint8_t D5 = 14;', ' static const uint8_t D6 = 12;', ' static const uint8_t D7 = 13;', ' static const uint8_t D8 = 15;', ' static const uint8_t D9 = 3;', ' static const uint8_t D10 = 1;', '', 'If you want to use NodeMCU pin 5, use D5 for pin number, and it will be translated to \'real\' GPIO pin 14.', ], }), ( 'nodemcuv2', { 'name': 'NodeMCU 1.0 (ESP-12E Module)', 'opts': { '.build.board': 'ESP8266_NODEMCU', '.build.variant': 'nodemcu', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_40', '4M', ], 'desc': [ 'This module is sold under many names for around $6.50 on AliExpress and it\'s one of the cheapest, fully integrated ESP8266 solutions.', '', 'It\'s an open hardware design with an ESP-12E core and 4 MB of SPI flash.', '', 'According to the manufacturer, "with a micro USB cable, you can connect NodeMCU devkit to your laptop and flash it without any trouble". This is more or less true: the board comes with a CP2102 onboard USB to serial adapter which just works, well, the majority of the time. Sometimes flashing fails and you have to reset the board by holding down FLASH +', 'RST, then releasing FLASH, then releasing RST. This forces the CP2102 device to power cycle and to be re-numbered by Linux.', '', 'The board also features a NCP1117 voltage regulator, a blue LED on GPIO16 and a 220k/100k Ohm voltage divider on the ADC input pin.', 'The ESP-12E usually has a led connected on GPIO2.', '', 'Full pinout and PDF schematics can be found `here <https://github.com/nodemcu/nodemcu-devkit-v1.0>`__', ], }), ( 'modwifi', { 'name': 'Olimex MOD-WIFI-ESP8266(-DEV)', 'opts': { '.build.board': 'MOD_WIFI_ESP8266', '.build.variant': 'modwifi', }, 'macro': [ 'resetmethod_ck', 'flashmode_qio', 'flashfreq_40', '2M', ], 'desc': [ 'This board comes with 2 MB of SPI flash and optional accessories (e.g. evaluation board ESP8266-EVB or BAT-BOX for batteries).', '', 'The basic module has three solder jumpers that allow you to switch the operating mode between SDIO, UART and FLASH.', '', 'The board is shipped for FLASH operation mode, with jumpers TD0JP=0, IO0JP=1, IO2JP=1.', '', 'Since jumper IO0JP is tied to GPIO0, which is PIN 21, you\'ll have to ground it before programming with a USB to serial adapter and reset the board by power cycling it.', '', 'UART pins for programming and serial I/O are GPIO1 (TXD, pin 3) and GPIO3 (RXD, pin 4).', '', 'You can find the board schematics `here <https://github.com/OLIMEX/ESP8266/blob/master/HARDWARE/MOD-WIFI-ESP8266-DEV/MOD-WIFI-ESP8266-DEV_schematic.pdf>`__', ], }), ( 'thing', { 'name': 'SparkFun ESP8266 Thing', 'opts': { '.build.board': 'ESP8266_THING', '.build.variant': 'thing', }, 'macro': [ 'resetmethod_ck', 'flashmode_qio', 'flashfreq_40', '512K', ], 'desc': [ 'Product page: https://www.sparkfun.com/products/13231' ], }), ( 'thingdev', { 'name': 'SparkFun ESP8266 Thing Dev', 'opts': { '.build.board': 'ESP8266_THING_DEV', '.build.variant': 'thing', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_40', '512K', ], 'desc': [ 'Product page: https://www.sparkfun.com/products/13711' ], }), ( 'esp210', { 'name': 'SweetPea ESP-210', 'opts': { '.build.board': 'ESP8266_ESP210', }, 'macro': [ 'resetmethod_ck', 'flashmode_qio', 'flashfreq_40', '4M', ], 'serial': '57', 'desc': [ '*TODO*' ], }), ( 'd1_mini', { 'name': 'LOLIN(WEMOS) D1 R2 & mini', 'opts': { '.build.board': 'ESP8266_WEMOS_D1MINI', '.build.variant': 'd1_mini', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_40', '4M', ], 'serial': '921', 'desc': [ 'Product page: https://www.wemos.cc/' ], }), ( 'd1_mini_pro', { 'name': 'LOLIN(WEMOS) D1 mini Pro', 'opts': { '.build.board': 'ESP8266_WEMOS_D1MINIPRO', '.build.variant': 'd1_mini', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_40', '16M', ], 'serial': '921', 'desc': [ 'Product page: https://www.wemos.cc/' ], }), ( 'd1_mini_lite', { 'name': 'LOLIN(WEMOS) D1 mini Lite', 'opts': { '.build.board': 'ESP8266_WEMOS_D1MINILITE', '.build.variant': 'd1_mini', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dout', 'flashfreq_40', '1M', ], 'serial': '921', 'desc': [ 'Parameters in Arduino IDE:', '~~~~~~~~~~~~~~~~~~~~~~~~~~', '', '- Card: "WEMOS D1 Mini Lite"', '- Flash Size: "1M (512K SPIFFS)"', '- CPU Frequency: "80 Mhz"', '- Upload Speed: "230400"', '', 'Power:', '~~~~~~', '', '- 5V pin : 4.7V 500mA output when the board is powered by USB ; 3.5V-6V input', '- 3V3 pin : 3.3V 500mA regulated output', '- Digital pins : 3.3V 30mA.', '', 'links:', '~~~~~~', '', '- Product page: https://www.wemos.cc/', '- Board schematic: https://wiki.wemos.cc/_media/products:d1:sch_d1_mini_lite_v1.0.0.pdf', '- ESP8285 datasheet: https://www.espressif.com/sites/default/files/0a-esp8285_datasheet_en_v1.0_20160422.pdf', '- Voltage regulator datasheet: http://pdf-datasheet.datasheet.netdna-cdn.com/pdf-down/M/E/6/ME6211-Microne.pdf', ], }), ( 'd1', { 'name': 'WeMos D1 R1', 'opts': { '.build.board': 'ESP8266_WEMOS_D1R1', '.build.variant': 'd1', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_40', '4M', ], 'serial': '921', 'desc': [ 'Product page: https://www.wemos.cc/' ], }), ( 'espino', { 'name': 'ESPino (ESP-12 Module)', 'opts': { '.build.board': 'ESP8266_ESP12', '.build.variant': 'espino', }, 'macro': [ 'resetmethod_menu', 'flashmode_qio', 'flashfreq_40', '4M', ], 'desc': [ 'ESPino integrates the ESP-12 module with a 3.3v regulator, CP2104 USB-Serial bridge and a micro USB connector for easy programming. It is designed for fitting in a breadboard and has an RGB Led and two buttons for easy prototyping.', '', 'For more information about the hardware, pinout diagram and programming procedures, please see the `datasheet <https://github.com/makerlabmx/ESPino-tools/raw/master/Docs/ESPino-Datasheet-EN.pdf>`__.', '', 'Product page: http://www.espino.io/en', ], }), ( 'espinotee', { 'name': 'ThaiEasyElec\'s ESPino', 'opts': { '.build.board': 'ESP8266_ESP13', '.build.variant': 'espinotee', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_qio', 'flashfreq_40', '4M', ], 'desc': [ 'ESPino by ThaiEasyElec using WROOM-02 module from Espressif Systems with 4 MB Flash.', '', 'We will update an English description soon. - Product page:', 'http://thaieasyelec.com/products/wireless-modules/wifi-modules/espino-wifi-development-board-detail.html', '- Schematics:', 'www.thaieasyelec.com/downloads/ETEE052/ETEE052\_ESPino\_Schematic.pdf -', 'Dimensions:', 'http://thaieasyelec.com/downloads/ETEE052/ETEE052\_ESPino\_Dimension.pdf', '- Pinouts:', 'http://thaieasyelec.com/downloads/ETEE052/ETEE052\_ESPino\_User\_Manual\_TH\_v1\_0\_20160204.pdf (Please see pg. 8)', ], }), ( 'wifinfo', { 'name': 'WifInfo', 'opts': collections.OrderedDict([ ( '.build.board', 'WIFINFO' ), ( '.build.variant', 'wifinfo' ), ( '.menu.ESPModule.ESP07192', 'ESP07 (1M/192K SPIFFS)' ), ( '.menu.ESPModule.ESP07192.build.board', 'ESP8266_ESP07' ), ( '.menu.ESPModule.ESP07192.build.flash_size', '1M' ), ( '.menu.ESPModule.ESP07192.build.flash_ld', 'eagle.flash.1m192.ld' ), ( '.menu.ESPModule.ESP07192.build.spiffs_start', '0xCB000' ), ( '.menu.ESPModule.ESP07192.build.spiffs_end', '0xFB000' ), ( '.menu.ESPModule.ESP07192.build.spiffs_blocksize', '4096' ), ( '.menu.ESPModule.ESP07192.upload.maximum_size', '827376' ), ( '.menu.ESPModule.ESP12', 'ESP12 (4M/1M SPIFFS)' ), ( '.menu.ESPModule.ESP12.build.board', 'ESP8266_ESP12' ), ( '.menu.ESPModule.ESP12.build.flash_size', '4M' ), ( '.menu.ESPModule.ESP12.build.flash_ld', 'eagle.flash.4m1m.ld' ), ( '.menu.ESPModule.ESP12.build.spiffs_start', '0x300000' ), ( '.menu.ESPModule.ESP12.build.spiffs_end', '0x3FB000' ), ( '.menu.ESPModule.ESP12.build.spiffs_blocksize', '8192' ), ( '.menu.ESPModule.ESP12.build.spiffs_pagesize', '256' ), ( '.menu.ESPModule.ESP12.upload.maximum_size', '1044464' ), ]), 'macro': [ 'resetmethod_nodemcu', 'flashmode_qio', 'flashfreq_menu', '1M', ], 'desc': [ 'WifInfo integrates the ESP-12 or ESP-07+Ext antenna module with a 3.3v regulator and the hardware to be able to measure French telemetry issue from ERDF powering meter serial output. It has a USB connector for powering, an RGB WS2812 Led, 4 pins I2C connector to fit OLED or sensor, and two buttons + FTDI connector and auto reset feature.', '', 'For more information, please see WifInfo related `blog <http://hallard.me/category/wifinfo/>`__ entries, `github <https://github.com/hallard/WifInfo>`__ and `community <https://community.hallard.me/category/16/wifinfo>`__ forum.', ], }), ( 'arduino-esp8266', { 'name': 'Arduino', 'opts': collections.OrderedDict([ ( '.build.board', 'ESP8266_ARDUINO' ), ( '.menu.BoardModel.primo', 'Primo' ), ( '.menu.BoardModel.primo.build.board', 'ESP8266_ARDUINO_PRIMO' ), ( '.menu.BoardModel.primo.build.variant', 'arduino_spi' ), ( '.menu.BoardModel.primo.build.extra_flags', '-DF_CRYSTAL=40000000 -DESP8266' ), ( '.menu.BoardModel.unowifideved', 'Uno WiFi' ), ( '.menu.BoardModel.unowifideved.build.board', 'ESP8266_ARDUINO_UNOWIFI' ), ( '.menu.BoardModel.unowifideved.build.variant', 'arduino_uart' ), ( '.menu.BoardModel.unowifideved.build.extra_flags=-DF_CRYSTAL', '40000000 -DESP8266' ), ( '.menu.BoardModel.starottodeved', 'Star OTTO' ), ( '.menu.BoardModel.starottodeved.build.variant', 'arduino_uart' ), ( '.menu.BoardModel.starottodeved.build.board', 'ESP8266_ARDUINO_STAR_OTTO' ), ( '.menu.BoardModel.starottodeved.build.extra_flags', '-DF_CRYSTAL=40000000 -DESP8266' ), ]), 'macro': [ 'resetmethod_ck', 'flashmode_qio', 'flashfreq_40', '4M', ], 'desc': [ '*TODO*' ], }), ( 'gen4iod', { 'name': '4D Systems gen4 IoD Range', 'opts': { '.build.board': 'GEN4_IOD', '.build.f_cpu': '160000000L', '.build.variant': 'generic', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_80', '512K', ], 'desc': [ 'gen4-IoD Range of ESP8266 powered Display Modules by 4D Systems.', '', '2.4", 2.8" and 3.2" TFT LCD with uSD card socket and Resistive Touch. Chip Antenna + uFL Connector.', '', 'Datasheet and associated downloads can be found on the 4D Systems product page.', '', 'The gen4-IoD range can be programmed using the Arduino IDE and also the 4D Systems Workshop4 IDE, which incorporates many additional graphics benefits. GFX4d library is available, along with a number of demo applications.', '', '- Product page: http://www.4dsystems.com.au/product/gen4-IoD', ], }), ( 'oak', { 'name': 'Digistump Oak', 'opts': { '.build.board': 'ESP8266_OAK', '.build.variant': 'oak', '.upload.maximum_size': '1040368', }, 'macro': [ 'resetmethod_none', 'flashmode_dio', 'flashfreq_40', '4M', ], 'serial': '921', 'desc': [ 'The Oak requires an `Serial Adapter`_ for a serial connection or flashing; its micro USB port is only for power.', '', 'To make a serial connection, wire the adapter\'s **TX to P3**, **RX to P4**, and **GND** to **GND**. Supply 3.3v from the serial adapter if not already powered via USB.', '', 'To put the board into bootloader mode, configure a serial connection as above, connect **P2 to GND**, then re-apply power. Once flashing is complete, remove the connection from P2 to GND, then re-apply power to boot into normal mode.', ], }), ( 'wifiduino', { 'name': 'WiFiduino', 'opts': { '.build.board': 'WIFIDUINO_ESP8266', '.build.variant': 'wifiduino', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_40', '4M', ], 'serial': '921', 'desc': [ 'Product page: https://wifiduino.com/esp8266' ], }), ( 'wifi_slot', { 'name': 'Amperka WiFi Slot', 'opts': { '.build.board': 'AMPERKA_WIFI_SLOT', '.build.variant': 'wifi_slot', }, 'macro': [ 'resetmethod_nodemcu', 'flashfreq_menu', 'flashmode_menu', '1M', '2M', ], 'desc': [ 'Product page: http://wiki.amperka.ru/wifi-slot' ], }), ( 'wiolink', { 'name': 'Seeed Wio Link', 'opts': { '.build.board': 'ESP8266_WIO_LINK', '.build.variant': 'wiolink', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_qio', 'flashfreq_40', '4M', ], 'desc': [ 'Wio Link is designed to simplify your IoT development. It is an ESP8266 based open-source Wi-Fi development board to create IoT applications by virtualizing plug-n-play modules to RESTful APIs with mobile APPs. Wio Link is also compatible with the Arduino IDE.', '', 'Please DO NOTICE that you MUST pull up pin 15 to enable the power for Grove ports, the board is designed like this for the purpose of peripherals power management.', '', 'Product page: https://www.seeedstudio.com/Wio-Link-p-2604.html' ], }), ('espectro', { 'name': 'ESPectro Core', 'opts': { '.build.board': 'ESP8266_ESPECTRO_CORE', '.build.variant': 'espectro', }, 'macro': [ 'resetmethod_nodemcu', 'flashmode_dio', 'flashfreq_40', '4M', ], 'desc': [ 'ESPectro Core is ESP8266 development board as the culmination of our 3+ year experience in exploring and developing products with ESP8266 MCU.', '', 'Initially designed for kids in mind, everybody should be able to use it. Yet it\'s still hacker-friendly as we break out all ESP8266 ESP-12F pins.', '', 'More details at https://shop.makestro.com/product/espectrocore/', ], }) ]) ################################################################ macros = { 'defaults': collections.OrderedDict([ ( '.upload.tool', 'esptool' ), ( '.upload.maximum_data_size', '81920' ), ( '.upload.wait_for_upload_port', 'true' ), ( '.upload.erase_cmd', 'version'), ( '.serial.disableDTR', 'true' ), ( '.serial.disableRTS', 'true' ), ( '.build.mcu', 'esp8266' ), ( '.build.core', 'esp8266' ), ( '.build.variant', 'generic' ), ( '.build.spiffs_pagesize', '256' ), ( '.build.debug_port', '' ), ( '.build.debug_level', '' ), ]), ####################### 'cpufreq_menu': collections.OrderedDict([ ( '.menu.xtal.80', '80 MHz' ), ( '.menu.xtal.80.build.f_cpu', '80000000L' ), ( '.menu.xtal.160', '160 MHz' ), ( '.menu.xtal.160.build.f_cpu', '160000000L' ), ]), 'vtable_menu': collections.OrderedDict([ ( '.menu.vt.flash', 'Flash'), ( '.menu.vt.flash.build.vtable_flags', '-DVTABLES_IN_FLASH'), ( '.menu.vt.heap', 'Heap'), ( '.menu.vt.heap.build.vtable_flags', '-DVTABLES_IN_DRAM'), ( '.menu.vt.iram', 'IRAM'), ( '.menu.vt.iram.build.vtable_flags', '-DVTABLES_IN_IRAM'), ]), 'exception_menu': collections.OrderedDict([ ( '.menu.exception.disabled', 'Disabled' ), ( '.menu.exception.disabled.build.exception_flags', '-fno-exceptions' ), ( '.menu.exception.disabled.build.stdcpp_lib', '-lstdc++' ), ( '.menu.exception.enabled', 'Enabled' ), ( '.menu.exception.enabled.build.exception_flags', '-fexceptions' ), ( '.menu.exception.enabled.build.stdcpp_lib', '-lstdc++-exc' ), ]), 'crystalfreq_menu': collections.OrderedDict([ ( '.menu.CrystalFreq.26', '26 MHz' ), ( '.menu.CrystalFreq.40', '40 MHz' ), ( '.menu.CrystalFreq.40.build.extra_flags', '-DF_CRYSTAL=40000000 -DESP8266' ), ]), 'flashfreq_menu': collections.OrderedDict([ ( '.menu.FlashFreq.40', '40MHz' ), ( '.menu.FlashFreq.40.build.flash_freq', '40' ), ( '.menu.FlashFreq.80', '80MHz' ), ( '.menu.FlashFreq.80.build.flash_freq', '80' ), ]), 'flashfreq_40': collections.OrderedDict([ ( '.build.flash_freq', '40' ), ]), 'flashfreq_80': collections.OrderedDict([ ( '.build.flash_freq', '80' ), ]), ####################### menu.resetmethod 'resetmethod_menu': collections.OrderedDict([ ( '.menu.ResetMethod.ck', 'ck' ), ( '.menu.ResetMethod.ck.upload.resetmethod', 'ck' ), ( '.menu.ResetMethod.nodemcu', 'nodemcu' ), ( '.menu.ResetMethod.nodemcu.upload.resetmethod', 'nodemcu' ), ]), 'resetmethod_menu_extra': collections.OrderedDict([ ( '.menu.ResetMethod.none', 'none' ), ( '.menu.ResetMethod.none.upload.resetmethod', 'none' ), ( '.menu.ResetMethod.dtrset', 'dtrset' ), ( '.menu.ResetMethod.dtrset.upload.resetmethod', 'dtrset' ), ]), ####################### upload.resetmethod 'resetmethod_ck': collections.OrderedDict([ ( '.upload.resetmethod', 'ck' ), ]), 'resetmethod_nodemcu': collections.OrderedDict([ ( '.upload.resetmethod', 'nodemcu' ), ]), 'resetmethod_none': collections.OrderedDict([ ( '.upload.resetmethod', 'none' ), ]), 'resetmethod_dtrset': collections.OrderedDict([ ( '.upload.resetmethod', 'dtrset' ), ]), ####################### menu.FlashMode 'flashmode_menu': collections.OrderedDict([ ( '.menu.FlashMode.dout', 'DOUT (compatible)' ), ( '.menu.FlashMode.dout.build.flash_mode', 'dout' ), ( '.menu.FlashMode.dout.build.flash_flags', '-DFLASHMODE_DOUT' ), ( '.menu.FlashMode.dio', 'DIO' ), ( '.menu.FlashMode.dio.build.flash_mode', 'dio' ), ( '.menu.FlashMode.dio.build.flash_flags', '-DFLASHMODE_DIO' ), ( '.menu.FlashMode.qout', 'QOUT' ), ( '.menu.FlashMode.qout.build.flash_mode', 'qout' ), ( '.menu.FlashMode.qout.build.flash_flags', '-DFLASHMODE_QOUT' ), ( '.menu.FlashMode.qio', 'QIO (fast)' ), ( '.menu.FlashMode.qio.build.flash_mode', 'qio' ), ( '.menu.FlashMode.qio.build.flash_flags', '-DFLASHMODE_QIO' ), ]), ####################### default flash_mode 'flashmode_dio': collections.OrderedDict([ ( '.build.flash_mode', 'dio' ), ( '.build.flash_flags', '-DFLASHMODE_DIO' ), ]), 'flashmode_qio': collections.OrderedDict([ ( '.build.flash_mode', 'qio' ), ( '.build.flash_flags', '-DFLASHMODE_QIO' ), ]), 'flashmode_dout': collections.OrderedDict([ ( '.build.flash_mode', 'dout' ), ( '.build.flash_flags', '-DFLASHMODE_DOUT' ), ]), 'flashmode_qout': collections.OrderedDict([ ( '.build.flash_mode', 'qout' ), ( '.build.flash_flags', '-DFLASHMODE_QOUT' ), ]), ####################### lwip 'lwip2': collections.OrderedDict([ ( '.menu.ip.lm2f', 'v2 Lower Memory' ), ( '.menu.ip.lm2f.build.lwip_include', 'lwip2/include' ), ( '.menu.ip.lm2f.build.lwip_lib', '-llwip2-536-feat' ), ( '.menu.ip.lm2f.build.lwip_flags', '-DLWIP_OPEN_SRC -DTCP_MSS=536 -DLWIP_FEATURES=1 -DLWIP_IPV6=0' ), ( '.menu.ip.hb2f', 'v2 Higher Bandwidth' ), ( '.menu.ip.hb2f.build.lwip_include', 'lwip2/include' ), ( '.menu.ip.hb2f.build.lwip_lib', '-llwip2-1460-feat' ), ( '.menu.ip.hb2f.build.lwip_flags', '-DLWIP_OPEN_SRC -DTCP_MSS=1460 -DLWIP_FEATURES=1 -DLWIP_IPV6=0' ), ( '.menu.ip.lm2n', 'v2 Lower Memory (no features)' ), ( '.menu.ip.lm2n.build.lwip_include', 'lwip2/include' ), ( '.menu.ip.lm2n.build.lwip_lib', '-llwip2-536' ), ( '.menu.ip.lm2n.build.lwip_flags', '-DLWIP_OPEN_SRC -DTCP_MSS=536 -DLWIP_FEATURES=0 -DLWIP_IPV6=0' ), ( '.menu.ip.hb2n', 'v2 Higher Bandwidth (no features)' ), ( '.menu.ip.hb2n.build.lwip_include', 'lwip2/include' ), ( '.menu.ip.hb2n.build.lwip_lib', '-llwip2-1460' ), ( '.menu.ip.hb2n.build.lwip_flags', '-DLWIP_OPEN_SRC -DTCP_MSS=1460 -DLWIP_FEATURES=0 -DLWIP_IPV6=0' ), ( '.menu.ip.lm6f', 'v2 IPv6 Lower Memory' ), ( '.menu.ip.lm6f.build.lwip_include', 'lwip2/include' ), ( '.menu.ip.lm6f.build.lwip_lib', '-llwip6-536-feat' ), ( '.menu.ip.lm6f.build.lwip_flags', '-DLWIP_OPEN_SRC -DTCP_MSS=536 -DLWIP_FEATURES=1 -DLWIP_IPV6=1' ), ( '.menu.ip.hb6f', 'v2 IPv6 Higher Bandwidth' ), ( '.menu.ip.hb6f.build.lwip_include', 'lwip2/include' ), ( '.menu.ip.hb6f.build.lwip_lib', '-llwip6-1460-feat' ), ( '.menu.ip.hb6f.build.lwip_flags', '-DLWIP_OPEN_SRC -DTCP_MSS=1460 -DLWIP_FEATURES=1 -DLWIP_IPV6=1' ), ]), 'lwip': collections.OrderedDict([ ( '.menu.ip.hb1', 'v1.4 Higher Bandwidth' ), ( '.menu.ip.hb1.build.lwip_lib', '-llwip_gcc' ), ( '.menu.ip.hb1.build.lwip_flags', '-DLWIP_OPEN_SRC' ), #( '.menu.ip.Espressif', 'v1.4 Espressif (xcc)' ), #( '.menu.ip.Espressif.build.lwip_lib', '-llwip' ), #( '.menu.ip.Espressif.build.lwip_flags', '-DLWIP_MAYBE_XCC' ), ( '.menu.ip.src', 'v1.4 Compile from source' ), ( '.menu.ip.src.build.lwip_lib', '-llwip_src' ), ( '.menu.ip.src.build.lwip_flags', '-DLWIP_OPEN_SRC' ), ( '.menu.ip.src.recipe.hooks.sketch.prebuild.1.pattern', 'make -C "{runtime.platform.path}/tools/sdk/lwip/src" install TOOLS_PATH="{runtime.tools.xtensa-lx106-elf-gcc.path}/bin/xtensa-lx106-elf-"' ), ]), ####################### serial 's9': collections.OrderedDict([ ( '.menu.baud.9600', '9600' ), ( '.menu.baud.9600.upload.speed', '9600' ), ]), 's57': collections.OrderedDict([ ( '.menu.baud.57600', '57600' ), ( '.menu.baud.57600.upload.speed', '57600' ), ]), 's115': collections.OrderedDict([ ( '.menu.baud.115200', '115200' ), ( '.menu.baud.115200.upload.speed', '115200' ), ]), 's256': collections.OrderedDict([ ( '.menu.baud.256000.windows', '256000' ), ( '.menu.baud.256000.upload.speed', '256000' ), ]), 's230': collections.OrderedDict([ ( '.menu.baud.230400.linux', '230400' ), ( '.menu.baud.230400.macosx', '230400' ), ( '.menu.baud.230400.upload.speed', '230400' ), ]), 's460': collections.OrderedDict([ ( '.menu.baud.460800.linux', '460800' ), ( '.menu.baud.460800.macosx', '460800' ), ( '.menu.baud.460800.upload.speed', '460800' ), ]), 's512': collections.OrderedDict([ ( '.menu.baud.512000.windows', '512000' ), ( '.menu.baud.512000.upload.speed', '512000' ), ]), 's921': collections.OrderedDict([ ( '.menu.baud.921600', '921600' ), ( '.menu.baud.921600.upload.speed', '921600' ), ]), ####################### flash erase 'flash_erase_menu': collections.OrderedDict([ ( '.menu.wipe.none', 'Only Sketch' ), ( '.menu.wipe.none.upload.erase_cmd', 'version' ), ( '.menu.wipe.sdk', 'Sketch + WiFi Settings' ), ( '.menu.wipe.sdk.upload.erase_cmd', 'erase_region "{build.rfcal_addr}" 0x4000' ), ( '.menu.wipe.all', 'All Flash Contents' ), ( '.menu.wipe.all.upload.erase_cmd', 'erase_flash' ), ]), } ################################################################ # defs def checkdir (): if not os.path.isfile("boards.txt"): print("please run me from boards.txt directory (like: ./tools/boards.txt.py -...)") sys.exit(1) ################################################################ # debug options # https://rosettacode.org/wiki/Combinations#Python def comb (m, lst): if m == 0: return [[]] return [[x] + suffix for i, x in enumerate(lst) for suffix in comb(m - 1, lst[i + 1:])] def combn (lst): all = [] for i in range(0, len(lst)): all += comb(i + 1, lst) return all def comb1 (lst): all = [] for i in range(0, len(lst)): all += [ [ lst[i] ] ] all += [ lst ] return all def all_debug (): listcomb = [ 'SSL', 'TLS_MEM', 'HTTP_CLIENT', 'HTTP_SERVER' ] listnocomb = [ 'CORE', 'WIFI', 'HTTP_UPDATE', 'UPDATER', 'OTA', 'OOM' ] listsingle = [ 'NoAssert-NDEBUG' ] options = combn(listcomb) options += comb1(listnocomb) options += [ listcomb + listnocomb ] options += [ listsingle ] debugmenu = collections.OrderedDict([ ( '.menu.dbg.Disabled', 'Disabled' ), ( '.menu.dbg.Disabled.build.debug_port', '' ), ( '.menu.dbg.Serial', 'Serial' ), ( '.menu.dbg.Serial.build.debug_port', '-DDEBUG_ESP_PORT=Serial' ), ( '.menu.dbg.Serial1', 'Serial1' ), ( '.menu.dbg.Serial1.build.debug_port', '-DDEBUG_ESP_PORT=Serial1' ), ( '.menu.lvl.None____', 'None' ), ( '.menu.lvl.None____.build.debug_level', '' ), ]) for optlist in options: debugname = '' debugmenuname = '' debugdefs = '' for opt in optlist: space = opt.find(" ") if space > 0: # remove subsequent associated gcc cmdline option simpleopt = opt[0:space] else: simpleopt = opt debugname += simpleopt if debugmenuname != '': debugmenuname += '+' debugmenuname += simpleopt if opt == 'NoAssert-NDEBUG': debugdefs += ' -DNDEBUG' else: debugdefs += ' -DDEBUG_ESP_' + opt debugmenu.update(collections.OrderedDict([ ( '.menu.lvl.' + debugname, debugmenuname ), ( '.menu.lvl.' + debugname + '.build.debug_level', debugdefs ) ])) return { 'debug_menu': debugmenu } ################################################################ # flash size def flash_map (flashsize_kb, spiffs_kb = 0): # mapping: # flash | reserved | empty | spiffs | eeprom | rf-cal | sdk-wifi-settings spi = 0x40200000 # https://github.com/esp8266/esp8266-wiki/wiki/Memory-Map reserved = 4112 eeprom_size_kb = 4 rfcal_size_kb = 4 sdkwifi_size_kb = 12 spiffs_end = (flashsize_kb - sdkwifi_size_kb - rfcal_size_kb - eeprom_size_kb) * 1024 rfcal_addr = (flashsize_kb - sdkwifi_size_kb - rfcal_size_kb) * 1024 if flashsize_kb <= 1024: max_upload_size = (flashsize_kb - (spiffs_kb + eeprom_size_kb + rfcal_size_kb + sdkwifi_size_kb)) * 1024 - reserved spiffs_start = spiffs_end - spiffs_kb * 1024 spiffs_blocksize = 4096 else: max_upload_size = 1024 * 1024 - reserved spiffs_start = (flashsize_kb - spiffs_kb) * 1024 if spiffs_kb < 512: spiffs_blocksize = 4096 else: spiffs_blocksize = 8192 strsize = str(int(flashsize_kb / 1024)) + 'M' if (flashsize_kb >= 1024) else str(flashsize_kb) + 'K' strspiffs = str(int(spiffs_kb / 1024)) + 'M' if (spiffs_kb >= 1024) else str(spiffs_kb) + 'K' strspiffs_strip = str(int(spiffs_kb / 1024)) + 'M' if (spiffs_kb >= 1024) else str(spiffs_kb) if (spiffs_kb > 0) else '' ld = 'eagle.flash.' + strsize.lower() + strspiffs_strip.lower() + '.ld' menu = '.menu.eesz.' + strsize + strspiffs_strip menub = menu + '.build.' desc = 'no' if (spiffs_kb == 0) else strspiffs d = collections.OrderedDict([ ( menu, strsize + ' (' + desc + ' SPIFFS)' ), ( menub + 'flash_size', strsize ), ( menub + 'flash_size_bytes', "0x%X" % (flashsize_kb * 1024)), ( menub + 'flash_ld', ld ), ( menub + 'spiffs_pagesize', '256' ), ( menu + '.upload.maximum_size', "%i" % max_upload_size ), ( menub + 'rfcal_addr', "0x%X" % rfcal_addr) ]) if spiffs_kb > 0: d.update(collections.OrderedDict([ ( menub + 'spiffs_start', "0x%05X" % spiffs_start ), ( menub + 'spiffs_end', "0x%05X" % spiffs_end ), ( menub + 'spiffs_blocksize', "%i" % spiffs_blocksize ), ])) if ldshow: if ldgen: checkdir() ldbackupdir = lddir + "backup/" if not os.path.isdir(ldbackupdir): os.mkdir(ldbackupdir) if os.path.isfile(lddir + ld) and not os.path.isfile(ldbackupdir + ld): os.rename(lddir + ld, ldbackupdir + ld) realstdout = sys.stdout sys.stdout = open(lddir + ld, 'w') if spiffs_kb == 0: spiffs_start = spiffs_end page = 0 block = 0 elif spiffs_kb < 0x80000 / 1024: page = 0x100 block = 0x1000 else: page = 0x100 block = 0x2000 print("/* Flash Split for %s chips */" % strsize) print("/* sketch @0x%X (~%dKB) (%dB) */" % (spi, (max_upload_size / 1024), max_upload_size)) empty_size = spiffs_start - max_upload_size if empty_size > 0: print("/* empty @0x%X (~%dKB) (%dB) */" % (spi + max_upload_size, empty_size / 1024, empty_size)) print("/* spiffs @0x%X (~%dKB) (%dB) */" % (spi + spiffs_start, ((spiffs_end - spiffs_start) / 1024), spiffs_end - spiffs_start)) print("/* eeprom @0x%X (%dKB) */" % (spi + rfcal_addr - eeprom_size_kb * 1024, eeprom_size_kb)) print("/* rfcal @0x%X (%dKB) */" % (spi + rfcal_addr, rfcal_size_kb)) print("/* wifi @0x%X (%dKB) */" % (spi + rfcal_addr + rfcal_size_kb * 1024, sdkwifi_size_kb)) print("") print("MEMORY") print("{") print(" dport0_0_seg : org = 0x3FF00000, len = 0x10") print(" dram0_0_seg : org = 0x3FFE8000, len = 0x14000") print(" iram1_0_seg : org = 0x40100000, len = 0x8000") print(" irom0_0_seg : org = 0x40201010, len = 0x%x" % max_upload_size) print("}") print("") print("PROVIDE ( _SPIFFS_start = 0x%08X );" % (0x40200000 + spiffs_start)) print("PROVIDE ( _SPIFFS_end = 0x%08X );" % (0x40200000 + spiffs_end)) print("PROVIDE ( _SPIFFS_page = 0x%X );" % page) print("PROVIDE ( _SPIFFS_block = 0x%X );" % block) print("") print('INCLUDE "local.eagle.app.v6.common.ld"') if ldgen: sys.stdout.close() sys.stdout = realstdout return d def all_flash_map (): f512 = collections.OrderedDict([]) f1m = collections.OrderedDict([]) f2m = collections.OrderedDict([]) f4m = collections.OrderedDict([]) f8m = collections.OrderedDict([]) f16m = collections.OrderedDict([]) # flash(KB) spiffs(KB) f512.update(flash_map( 512)) f512.update(flash_map( 512, 32 )) f512.update(flash_map( 512, 64 )) f512.update(flash_map( 512, 128 )) f1m.update( flash_map( 1024)) f1m.update( flash_map( 1024, 64 )) f1m.update( flash_map( 1024, 128 )) f1m.update( flash_map( 1024, 144 )) f1m.update( flash_map( 1024, 160 )) f1m.update( flash_map( 1024, 192 )) f1m.update( flash_map( 1024, 256 )) f1m.update( flash_map( 1024, 512 )) f2m.update( flash_map( 2*1024)) f2m.update( flash_map( 2*1024, 128 )) f2m.update( flash_map( 2*1024, 256 )) f2m.update( flash_map( 2*1024, 512 )) f2m.update( flash_map( 2*1024, 1024 )) f4m.update( flash_map( 4*1024)) f4m.update( flash_map( 4*1024, 1024 )) f4m.update( flash_map( 4*1024, 2*1024 )) f4m.update( flash_map( 4*1024, 3*1024 )) f8m.update( flash_map( 8*1024, 6*1024 )) f8m.update( flash_map( 8*1024, 7*1024 )) f16m.update(flash_map( 16*1024, 14*1024 )) f16m.update(flash_map( 16*1024, 15*1024 )) if ldgen: print("generated: ldscripts (in %s)" % lddir) return { '512K': f512, '1M': f1m, '2M': f2m, '4M': f4m, '8M': f8m, '16M': f16m } ################################################################ # builtin led def led (default,max): led = collections.OrderedDict([ ('.menu.led.' + str(default), str(default)), ('.menu.led.' + str(default) + '.build.led', '-DLED_BUILTIN=' + str(default)), ]); for i in range(0,max): if not i == default: led.update( collections.OrderedDict([ ('.menu.led.' + str(i), str(i)), ('.menu.led.' + str(i) + '.build.led', '-DLED_BUILTIN=' + str(i)), ])) return { 'led': led } ################################################################ # sdk selection def sdk (): return { 'sdk': collections.OrderedDict([ ('.menu.sdk.nonosdk221', 'nonos-sdk 2.2.1 (legacy)'), ('.menu.sdk.nonosdk221.build.sdk', 'NONOSDK221'), ('.menu.sdk.nonosdk222', 'nonos-sdk 2.2.2-190313 (testing)'), ('.menu.sdk.nonosdk222.build.sdk', 'NONOSDK22x'), ('.menu.sdk.nonosdk3v0', 'nonos-sdk pre-3 (known issues)'), ('.menu.sdk.nonosdk3v0.build.sdk', 'NONOSDK3V0'), ]) } ################################################################ def all_boards (): if boardsgen: checkdir() # check if backup already exists if not os.path.isfile("boards.txt.orig"): os.rename("boards.txt", "boards.txt.orig") realstdout = sys.stdout sys.stdout = open("boards.txt", 'w') macros.update(all_flash_map()) macros.update(all_debug()) macros.update(led(led_default, led_max)) macros.update(sdk()) print('#') print('# Do not create pull-requests for this file only, CI will not accept them.') print('# You *must* edit/modify/run ' + os.path.basename(sys.argv[0]) + ' to regenerate boards.txt.') print('# All modified files after running with option "--allgen" must be included in the pull-request.') print('#') print('') print('menu.BoardModel=Model') print('menu.baud=Upload Speed') print('menu.xtal=CPU Frequency') print('menu.CrystalFreq=Crystal Frequency') print('menu.eesz=Flash Size') print('menu.FlashMode=Flash Mode') print('menu.FlashFreq=Flash Frequency') print('menu.ResetMethod=Reset Method') print('menu.ESPModule=Module') print('menu.dbg=Debug port') print('menu.lvl=Debug Level') print('menu.ip=lwIP Variant') print('menu.vt=VTables') print('menu.exception=Exceptions') print('menu.led=Builtin Led') print('menu.wipe=Erase Flash') print('menu.sdk=Espressif FW') print('') for id in boards: print('##############################################################') board = boards[id] print(id + '.name=' + board['name']) # standalone options if 'opts' in board: for optname in board['opts']: print(id + optname + '=' + board['opts'][optname]) # macros macrolist = [ 'defaults', 'cpufreq_menu', 'vtable_menu', 'exception_menu' ] if 'macro' in board: macrolist += board['macro'] if lwip == 2: macrolist += [ 'lwip2', 'lwip' ] else: macrolist += [ 'lwip', 'lwip2' ] macrolist += [ 'debug_menu', 'flash_erase_menu' ] for cs in customspeeds: print(id + cs) if 'serial' in board: macrolist += speeds[board['serial']] else: macrolist += speeds[default_speed] for block in macrolist: for optname in macros[block]: if not ('opts' in board) or not (optname in board['opts']): print(id + optname + '=' + macros[block][optname]) if nofloat: print(id + '.build.float=') print('') if boardsgen: sys.stdout.close() sys.stdout = realstdout print("generated: boards.txt") ################################################################ def package (): pkgfname = "package/package_esp8266com_index.template.json" pkgfname_read = pkgfname checkdir() if packagegen: pkgfname_read = pkgfname + '.orig' if os.path.isfile(pkgfname_read): os.remove(pkgfname_read) os.rename(pkgfname, pkgfname_read) # read package file with open (pkgfname_read, "r") as package_file: filestr = package_file.read() substitution = '"boards": [\n' board_items = [' {\n "name": "%s"\n }' % boards[id]['name'] for id in boards] substitution += ',\n'.join(board_items) substitution += '\n ],' newfilestr = re.sub(r'"boards":[^\]]*\],', substitution, filestr, re.MULTILINE) # To get consistent indent/formatting read the JSON and write it out programattically if packagegen: with open(pkgfname, 'w') as package_file: filejson = json.loads(filestr, object_pairs_hook=collections.OrderedDict) package_file.write(json.dumps(filejson, indent=3, separators=(',',': '))) print("updated: %s" % pkgfname) else: sys.stdout.write(newfilestr) ################################################################ def doc (): if docgen: checkdir() # check if backup already exists if not os.path.isfile("doc/boards.rst.orig"): os.rename("doc/boards.rst", "doc/boards.rst.orig") realstdout = sys.stdout sys.stdout = open("doc/boards.rst", 'w') print('Boards') print('======') print('') for id in boards: board = boards[id] print(board['name']) dash = "" for i in range(len(board['name'])): dash += '-' print(dash) print('') if 'desc' in board: for line in board['desc']: print(line) else: print('No description') print('') if docgen: sys.stdout.close() sys.stdout = realstdout print("generated: doc/boards.rst") ################################################################ # help / usage def usage (name,ret): print("") print("boards.txt generator for esp8266/Arduino") print("") print("usage: %s [options]" % name) print("") print(" -h, --help") print(" --lwip - preferred default lwIP version (default %d)" % lwip) print(" --led - preferred default builtin led for generic boards (default %d)" % led_default) print(" --board <b> - board to modify:") print(" --speed <s> - change default serial speed") print(" --customspeed <s> - new serial speed for all boards") print(" --nofloat - disable float support in printf/scanf") print("") print(" mandatory option (at least one):") print("") print(" --boards - show boards.txt") print(" --boardsgen - replace boards.txt") print(" --ld - show ldscripts") print(" --ldgen - replace ldscripts") print(" --package - show package") print(" --packagegen - replace board:[] in package") print(" --doc - shows doc/boards.rst") print(" --docgen - replace doc/boards.rst") print(" --allgen - generate and replace everything") print(" (useful for pushing on github)") print("") out = "" for s in speeds: out += s + ' ' print("available serial speed options (kbps):", out) out = "" for b in boards: out += b + '(' if 'serial' in boards[b]: out += boards[b]['serial'] else: out += default_speed out += 'k) ' print("available board names:", out) print("") sys.exit(ret) ################################################################ ################################################################ # entry point lwip = 2 default_speed = '115' led_default = 2 led_max = 16 nofloat = False ldgen = False ldshow = False boardsgen = False boardsshow = False packageshow = False packagegen = False docshow = False docgen = False customspeeds = [] lddir = "tools/sdk/ld/" #### vvvv cmdline parsing starts try: opts, args = getopt.getopt(sys.argv[1:], "h", [ "help", "lwip=", "led=", "speed=", "board=", "customspeed=", "nofloat", "noextra4kheap", "allowWPS", "ld", "ldgen", "boards", "boardsgen", "package", "packagegen", "doc", "docgen", "allgen"] ) except getopt.GetoptError as err: print(str(err)) # will print something like "option -a not recognized" usage(sys.argv[0], 1) no = '(not set)' board = no for o, a in opts: if o in ("-h", "--help"): usage(sys.argv[0], 0) elif o in ("--lwip"): lwip = a elif o in ("--led"): led_default = int(a) elif o in ("--customspeed"): customspeeds += [ '.menu.baud.' + a + '=' + a, '.menu.baud.' + a + '.upload.speed' + '=' + a ] elif o in ("--board"): if not a in boards: print("board %s not available" % a) usage(sys.argv[0], 1) board = a elif o in ("--speed"): if board == no: print("board not set") usage(sys.argv[0], 1) if not a in speeds: print("speed %s not available" % a) usage(sys.argv[0], 1) boards[board]['serial'] = a elif o in ("--nofloat"): nofloat=True elif o in ("--noextra4kheap", "--allowWPS"): print('option ' + o + ' is now deprecated, without effect, and will be removed') elif o in ("--ld"): ldshow = True elif o in ("--ldgen"): ldshow = True ldgen = True elif o in ("--boardsshow"): boardsshow = True elif o in ("--boardsgen"): boardsshow = True boardsgen = True elif o in ("--package"): packageshow = True elif o in ("--packagegen"): packageshow = True packagegen = True elif o in ("--doc"): docshow = True elif o in ("--docgen"): docshow = True docgen = True elif o in ("--allgen"): ldshow = True ldgen = True boardsshow = True boardsgen = True packageshow = True packagegen = True docshow = True docgen = True else: assert False, "unhandled option" #### ^^^^ cmdline parsing ends did = False if ldshow: all_flash_map() did = True if boardsshow: ldshow = False ldgen = False all_boards() did = True if packageshow: package() did = True if docshow: doc() did = True if not did: usage(sys.argv[0], 0)
Lan-Hekary/Arduino
tools/boards.txt.py
Python
lgpl-2.1
69,429
[ "CRYSTAL", "ESPResSo" ]
87f34de12e9d6b0e5e532cfce5da797716e3a4a3fe5c9d0684451db3cb68f7a8
import itertools import math import re import time from collections import defaultdict as ddict, deque from lib.tree import Tree from common import log from common.hgraph.hgraph import Hgraph from common.cfg import Chart from vo_item import CfgItem, HergItem, SynchronousItem from vo_rule import VoRule import pprint class Parser: ''' A deductive style parser for hypergraphs and strings that matches parts of the input hypergraph according to an arbitrary visit order for edges. (or left-to-right for strings, in which case this is essentially a CKY parser). ''' def __init__(self, grammar): self.grammar = grammar self.nodelabels = grammar.nodelabels def parse_graphs(self, graph_iterator): """ Parse all the graphs in graph_iterator. This is a generator. """ for graph in graph_iterator: raw_chart = self.parse(None, graph) # The raw chart contains parser operations, need to decode the parse forest from this yield cky_chart(raw_chart) def parse_strings(self, string_iterator): """ Parse all strings in the string iterator. This is a generator. """ for string in string_iterator: raw_chart = self.parse(string, None) yield cky_chart(raw_chart) def parse_bitexts(self, pair_iterator): """ Parse all pairs of input objects returned by the pair iterator. This is a generator. """ for line1, line2 in pair_iterator: if self.grammar.rhs1_type == "hypergraph": obj1 = Hgraph.from_string(line1) else: obj1 = line1.strip().split() if self.grammar.rhs2_type == "hypergraph": obj2 = Hgraph.from_string(line2) else: obj2 = line2.strip().split() raw_chart = self.parse_bitext(obj1, obj2) yield cky_chart(raw_chart) def parse_bitext(self, obj1, obj2): """ Parse a single pair of objects (two strings, two graphs, or string/graph). """ rhs1type, rhs2type = self.grammar.rhs1_type, self.grammar.rhs2_type assert rhs1type in ["string","hypergraph"] and rhs2type in ["string","hypergraph"] # Remember size of input objects and figure out Item subclass if rhs1type == "string": obj1size = len(obj1) elif rhs1type == "hypergraph": obj1size = len(obj1.triples()) if rhs2type == "string": obj2size = len(obj2) elif rhs2type == "hypergraph": obj2size = len(obj2.triples()) grammar = self.grammar start_time = time.clock() log.chatter('parse...') # initialize data structures and lookups # we use various tables to provide constant-time lookup of fragments available # for shifting, completion, etc. chart = ddict(set) #TODO: command line filter to switch rule filter on/off pgrammar = [grammar[r] for r in grammar.reachable_rules(obj1, obj2)] #grammar.values() queue = deque() # the items left to be visited pending = set() # a copy of queue with constant-time lookup attempted = set() # a cache of previously-attempted item combinations visited = set() # a cache of already-visited items nonterminal_lookup = ddict(set) # a mapping from labels to graph edges reverse_lookup = ddict(set) # a mapping from outside symbols to open items # mapping from words to string indices for each string word_terminal_lookup1 = ddict(set) word_terminal_lookup2 = ddict(set) if rhs1type == "string": for i in range(len(obj1)): word_terminal_lookup1[obj1[i]].add(i) if rhs2type == "string": for i in range(len(obj2)): word_terminal_lookup2[obj2[i]].add(i) # mapping from edge labels to graph edges for each graph edge_terminal_lookup1 = ddict(set) edge_terminal_lookup2 = ddict(set) if rhs1type == "hypergraph": for edge in obj1.triples(nodelabels = self.nodelabels): edge_terminal_lookup1[edge[1]].add(edge) if rhs2type == "hypergraph": for edge in obj2.triples(nodelabels = self.nodelabels): edge_terminal_lookup2[edge[1]].add(edge) for rule in pgrammar: item1class = CfgItem if rhs1type == "string" else HergItem item2class = CfgItem if rhs2type == "string" else HergItem axiom = SynchronousItem(rule, item1class, item2class, nodelabels = self.nodelabels) queue.append(axiom) pending.add(axiom) if axiom.outside_is_nonterminal: reverse_lookup[axiom.outside_symbol].add(axiom) # keep track of whether we found any complete derivation success = False # parse while queue: item = queue.popleft() pending.remove(item) visited.add(item) log.debug('handling', item) if item.closed: log.debug(' is closed.') # check if it's a complete derivation if self.successful_biparse(obj1, obj2, item, obj1size, obj2size): chart['START'].add((item,)) success = True # add to nonterminal lookup nonterminal_lookup[item.rule.symbol].add(item) # wake up any containing rules # Unlike in ordinary state-space search, it's possible that we will have # to re-visit items which couldn't be merged with anything the first time # we saw them, and are waiting for the current item. The reverse_lookup # indexes all items by their outside symbol, so we re-append to the queue # all items looking for something with the current item's symbol. for ritem in reverse_lookup[item.rule.symbol]: if ritem not in pending: queue.append(ritem) pending.add(ritem) else: if item.outside_is_nonterminal: # complete reverse_lookup[item.outside_symbol].add(item) for oitem in nonterminal_lookup[item.outside_symbol]: log.debug(" oitem:", oitem) if (item, oitem) in attempted: # don't repeat combinations we've tried before continue attempted.add((item, oitem)) if not item.can_complete(oitem): log.debug(" fail") continue log.debug(" ok") nitem = item.complete(oitem) chart[nitem].add((item, oitem)) if nitem not in pending and nitem not in visited: queue.append(nitem) pending.add(nitem) else: # shift ; this depends on the configuration (string/graph -> string/graph) if not item.outside1_is_nonterminal and not item.item1.closed: if rhs1type == "string": new_items = [item.shift_word1(item.outside_object1, index) for index in word_terminal_lookup1[item.outside_object1] if item.can_shift_word1(item.outside_object1, index)] else: assert rhs1type is "hypergraph" new_items = [item.shift_edge1(edge) for edge in edge_terminal_lookup1[item.outside_object1] if item.can_shift_edge1(edge)] else: assert not item.outside2_is_nonterminal # Otherwise shift would not be called if rhs2type == "string": new_items = [item.shift_word2(item.outside_object2, index) for index in word_terminal_lookup2[item.outside_object2] if item.can_shift_word2(item.outside_object2, index)] else: assert rhs2type is "hypergraph" new_items = [item.shift_edge2(edge) for edge in edge_terminal_lookup2[item.outside_object2] if item.can_shift_edge2(edge)] for nitem in new_items: log.debug(' shift', nitem, nitem.shifted) chart[nitem].add((item,)) if nitem not in pending and nitem not in visited: queue.append(nitem) pending.add(nitem) if success: log.chatter(' success!') etime = time.clock() - start_time log.chatter('done in %.2fs' % etime) # TODO return partial chart return chart def parse(self, string, graph): """ Parses the given string and/or graph. """ # This is a long function, so let's start with a high-level overview. This is # a "deductive-proof-style" parser: We begin with one "axiomatic" chart item # for each rule, and combine these items with each other and with fragments of # the object(s) being parsed to deduce new items. We can think of these items # as defining a search space in which we need to find a path to the goal item. # The parser implemented here performs a BFS of this search space. grammar = self.grammar # remember when we started start_time = time.clock() log.chatter('parse...') # specify what kind of items we're working with if string and graph: axiom_class = CfgHergItem elif string: axiom_class = CfgItem else: axiom_class = HergItem # remember the size of the example if string: string_size = len(string) else: string_size = -1 if graph: graph_size = len(graph.triples(nodelabels = self.nodelabels)) else: graph_size = -1 # initialize data structures and lookups # we use various tables to provide constant-time lookup of fragments available # for shifting, completion, etc. chart = ddict(set) # TODO: Command line option to switch grammar filter on/off if string: pgrammar = [grammar[r] for r in grammar.reachable_rules(string, None)] #grammar.values() if graph: pgrammar = [grammar[r] for r in grammar.reachable_rules(graph, None)] #grammar.values() queue = deque() # the items left to be visited pending = set() # a copy of queue with constant-time lookup attempted = set() # a cache of previously-attempted item combinations visited = set() # a cache of already-visited items word_terminal_lookup = ddict(set) nonterminal_lookup = ddict(set) # a mapping from labels to graph edges reverse_lookup = ddict(set) # a mapping from outside symbols open items if string: word_terminal_lookup = ddict(set) # mapping from words to string indices for i in range(len(string)): word_terminal_lookup[string[i]].add(i) if graph: edge_terminal_lookup = ddict(set) # mapping from edge labels to graph edges for edge in graph.triples(nodelabels = self.nodelabels): edge_terminal_lookup[edge[1]].add(edge) for rule in pgrammar: axiom = axiom_class(rule, nodelabels = self.nodelabels) queue.append(axiom) pending.add(axiom) if axiom.outside_is_nonterminal: reverse_lookup[axiom.outside_symbol].add(axiom) # keep track of whether we found any complete derivation success = False # parse while queue: item = queue.popleft() pending.remove(item) visited.add(item) log.debug('handling', item) if item.closed: log.debug(' is closed.') # check if it's a complete derivation if self.successful_parse(string, graph, item, string_size, graph_size): chart['START'].add((item,)) success = True # add to nonterminal lookup nonterminal_lookup[item.rule.symbol].add(item) # wake up any containing rules # Unlike in ordinary state-space search, it's possible that we will have # to re-visit items which couldn't be merged with anything the first time # we saw them, and are waiting for the current item. The reverse_lookup # indexes all items by their outside symbol, so we re-append to the queue # all items looking for something with the current item's symbol. for ritem in reverse_lookup[item.rule.symbol]: if ritem not in pending: queue.append(ritem) pending.add(ritem) else: if item.outside_is_nonterminal: # complete reverse_lookup[item.outside_symbol].add(item) for oitem in nonterminal_lookup[item.outside_symbol]: log.debug(" oitem:", oitem) if (item, oitem) in attempted: # don't repeat combinations we've tried before continue attempted.add((item, oitem)) if not item.can_complete(oitem): log.debug(" fail") continue log.debug(" ok") nitem = item.complete(oitem) chart[nitem].add((item, oitem)) if nitem not in pending and nitem not in visited: queue.append(nitem) pending.add(nitem) else: # shift if string and graph: if not item.outside_word_is_nonterminal: new_items = [item.shift_word(item.outside_word, index) for index in word_terminal_lookup[item.outside_word] if item.can_shift_word(item.outside_word, index)] else: assert not item.outside_edge_is_nonterminal new_items = [item.shift_edge(edge) for edge in edge_terminal_lookup[item.outside_edge] if item.can_shift_edge(edge)] elif string: new_items = [item.shift(item.outside_word, index) for index in word_terminal_lookup[item.outside_word] if item.can_shift(item.outside_word, index)] else: assert graph new_items = [item.shift(edge) for edge in edge_terminal_lookup[item.outside_edge] if item.can_shift(edge)] for nitem in new_items: log.debug(' shift', nitem, nitem.shifted) chart[nitem].add((item,)) if nitem not in pending and nitem not in visited: queue.append(nitem) pending.add(nitem) if success: log.chatter(' success!') etime = time.clock() - start_time log.chatter('done in %.2fs' % etime) # TODO return partial chart return chart def successful_parse(self, string, graph, item, string_size, graph_size): """ Determines whether the given item represents a complete derivation of the object(s) being parsed. """ # make sure the right start symbol is used if self.grammar.start_symbol != item.rule.symbol: return False # make sure the item spans the whole object if string and graph: whole_string = item.cfg_item.j - item.cfg_item.i == string_size whole_graph = len(item.herg_item.shifted) == graph_size return whole_string and whole_graph elif string: return item.j - item.i == string_size else: # graph return len(item.shifted) == graph_size def successful_biparse(self, obj1, obj2, item, obj1size, obj2size): """ Determines whether the given item represents a complete derivation of the object(s) being parsed. """ # make sure the right start symbol is used if self.grammar.start_symbol != item.rule.symbol: return False # make sure the item spans the whole object if item.item1class is CfgItem: if item.item1.j - item.item1.i != obj1size: return False else: if len(item.item1.shifted) != obj1size: return False if item.item2class is CfgItem: if item.item2.j - item.item2.i != obj2size: return False else: if len(item.item2.shifted) != obj2size: return False return True #def output_bolinas(charts, grammar, prefix): # """ # Prints given in native bolinas format. # """ # raise InvocationException("Output format 'bolinas' is unsupported") #def output_carmel(charts, grammar, prefix): # """ # Prints given charts in carmel format, suitable for use with forest-em. # Will produce two files: prefix.carmel.norm (the RHS normalizer groups) and # prefix.carmel.charts (the charts). # """ # # # we need an explicit id for the start rule # # forest-em irritatingly expects rules to be 1-indexed rather than 0-indexed, # # so we have to increase all rule ids by 1 # start_rule_id = max(grammar.keys()) + 2 # # # create the set of all normalization groups, and write them # normgroups = ddict(set) # normgroups['START'].add(start_rule_id) # for rule_id in grammar: # rule = grammar[rule_id] # normgroups[rule.symbol].add(rule.rule_id + 1) # with open('%s.carmel.norm' % prefix, 'w') as ofile: # print >>ofile, '(', # for group in normgroups.values(): # print >>ofile, '(%s)' % (' '.join([str(rule) for rule in group])), # print >>ofile, ')' # # # unlike the other formats, all carmel charts go in one file # with open('%s.carmel.charts' % prefix, 'w') as ofile: # for chart in charts: # # chart items we've already seen, and the labels assigned to them # seen = dict() # # python scoping weirdness requires us to store this variable with an # # extra layer of reference so that it can be reassigned by the inner # # method # next_id = [1] # # def format_inner(item): # if item in seen: # return '#d' % seen[item] # my_id = next_id[0] # next_id[0] += 1 # if item == 'START': # sym = start_rule_id # else: # # see note above on rule ids # sym = item.rule.rule_id + 1 # if item in chart: # parts = [] # for production in chart[item]: # prod_parts = [] # for pitem in production: # prod_parts.append(format_inner(pitem)) # parts.append('(%s %s)' % (sym, ' '.join(prod_parts))) # if len(parts) > 1: # return '#%d(OR %s)' % (my_id, ' '.join(parts)) # else: # return '#%d%s' % (my_id, parts[0]) # else: # return '#%d(%s)' % (my_id, sym) # # print >>ofile, format_inner('START') #def output_tiburon(charts, grammar, prefix): # """ # Prints given charts in tiburon format, for finding n-best AMRs. # """ # # def start_stringifier(rhs_item): # return 'START -> %s # 1.0' % rhs_item.uniq_str() # # def nt_stringifier(item, rhs): # nrhs = ' '.join([i for i in item.rule.string if i[0] == '#']) # # strip indices # nrhs = re.sub(r'\[\d+\]', '', nrhs) # for ritem in rhs: # # replace only one occurrence, in case we have a repeated NT symbol # nrhs = re.sub('#' + ritem.rule.symbol, ritem.uniq_str(), nrhs, count=1) # nrhs = '%s(%d(%s))' % (item.rule.symbol, item.rule.rule_id, nrhs) # return '%s -> %s # %f' % (item.uniq_str(), nrhs, item.rule.weight) # # def t_stringifier(item): # return '%s -> %s(%d) # %f' % (item.uniq_str(), item.rule.symbol, # item.rule.rule_id, item.rule.weight) # # # for i, chart in zip(range(len(charts)), charts): # if chart: # with open('%s%d.tiburon' % (prefix, i), 'w') as ofile: # rules = ['START'] + strings_for_items(chart, start_stringifier, # nt_stringifier, t_stringifier) # print >>ofile, '\n'.join(rules) #def output_cdec(charts, grammar, prefix): # """ # Prints given charts in cdec format, for finding n-best strings. # """ # # def start_stringifier(rhs_item): # return '[START] ||| [%s] ||| Rule=0.0' % rhs_item.uniq_str() # # def nt_stringifier(item, rhs): # nrhs = ' '.join(item.rule.string) # # strip indices # nrhs = re.sub(r'\[\d+\]', '', nrhs) # for ritem in rhs: # # replace only one occurrence, in case we have a repeated NT symbol # nrhs = re.sub('#' + ritem.rule.symbol, '[%s]' % ritem.uniq_str(), nrhs) # return '[%s] ||| %s ||| Rule=%f' % (item.uniq_str(), nrhs, # math.log(item.rule.weight)) # # def t_stringifier(item): # return '[%s] ||| %s ||| Rule=%f' % (item.uniq_str(), # ' '.join(item.rule.string), math.log(item.rule.weight)) # # for i, chart in zip(range(len(charts)), charts): # with open('%s%d.cdec' % (prefix, i), 'w') as ofile: # rules = ['[S] ||| [START]'] + strings_for_items(chart, start_stringifier, # nt_stringifier, t_stringifier) # print >>ofile, '\n'.join(rules) def strings_for_items(chart, start_stringifier, nt_stringifier, t_stringifier): strings = [] stack = ['START'] visited = set() while stack: item = stack.pop() if item in visited: continue visited.add(item) if item in chart: for rhs in chart[item]: if item == 'START': assert len(rhs) == 1 strings.append(start_stringifier(rhs[0])) stack.append(rhs[0]) else: strings.append(nt_stringifier(item, rhs)) for ritem in rhs: assert ritem.rule.is_terminal or ritem in chart stack.append(ritem) else: assert item.rule.is_terminal strings.append(t_stringifier(item)) return strings def cky_chart(chart): """ Convert the chart returned by the parser into a standard parse chart. """ def search_productions(citem, chart): if len(chart[citem]) == 0: return [] if citem == "START": return [{"START":child[0]} for child in chart[citem]] prodlist = list(chart[citem]) lefts = set(x[0] for x in prodlist) lengths = set(len(x) for x in prodlist) assert len(lengths) == 1 split_len = lengths.pop() # figure out all items that could have been used to complete this nonterminal if split_len != 1: assert split_len == 2 symbol = prodlist[0][0].outside_symbol, prodlist[0][0].outside_nt_index result = [] for child in prodlist: other_nts = search_productions(child[0], chart) if other_nts: for option in other_nts: d = dict(option) d[symbol] = child[1] result.append(d) else: result.append(dict([(symbol, child[1])])) return result else: return search_productions(prodlist[0][0], chart) stack = ['START'] visit_items = set() while stack: item = stack.pop() if item in visit_items: continue visit_items.add(item) for production in chart[item]: for citem in production: stack.append(citem) cky_chart = Chart() for item in visit_items: # we only care about complete steps, so only add closed items to the chart if not (item == 'START' or item.closed): continue prods = search_productions(item, chart) if prods: cky_chart[item] = prods return cky_chart
GullyAPCBurns/bolinas
parser/parser.py
Python
mit
23,547
[ "VisIt" ]
610b431a7f826439789a9cf41a129dba7824cc3e44d43712e8236f8aa8b5c0e0
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Canonicalizes continue statements by de-sugaring into a control boolean.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.autograph.pyct import anno from tensorflow.contrib.autograph.pyct import templates from tensorflow.contrib.autograph.pyct import transformer from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno class ContinueCanonicalizationTransformer(transformer.Base): """Canonicalizes continue statements into additional conditionals.""" def __init__(self, context): super(ContinueCanonicalizationTransformer, self).__init__(context) # This is a stack structure, to correctly process nested loops. self.continuation_uses = [] def _create_continuation_check(self): template = """ if not var_name: pass """ cond, = templates.replace(template, var_name=self.continuation_uses[-1][1]) cond.body = [] return cond def _create_continuation_trigger(self): template = """ var_name = True """ assign, = templates.replace( template, var_name=self.continuation_uses[-1][1]) return assign def _create_continuation_init(self): template = """ var_name = False """ assign, = templates.replace( template, var_name=self.continuation_uses[-1][1]) return assign def _visit_and_reindent_if_necessary(self, nodes): reorganized_nodes = [] current_dest = reorganized_nodes continue_used_in_block = False for i, n in enumerate(nodes): # TODO(mdan): This could be optimized if control structures are simple. self.continuation_uses[-1][0] = False n = self.visit(n) current_dest.append(n) if self.continuation_uses[-1][0]: continue_used_in_block = True if i < len(nodes) - 1: # Last statement in block needs no protection. cond = self._create_continuation_check() current_dest.append(cond) current_dest = cond.body self.continuation_uses[-1][0] = continue_used_in_block return reorganized_nodes def _process_loop_block(self, block, scope): cont_var = self.context.namer.new_symbol('cont_requested', scope.referenced) self.continuation_uses.append([False, cont_var]) block = self._visit_and_reindent_if_necessary(block) if self.continuation_uses[-1][0]: block.insert(0, self._create_continuation_init()) self.continuation_uses.pop() return block def visit_While(self, node): self.generic_visit(node.test) node.body = self._process_loop_block(node.body, anno.getanno(node, NodeAnno.BODY_SCOPE)) for n in node.orelse: self.generic_visit(n) return node def visit_For(self, node): self.generic_visit(node.target) self.generic_visit(node.iter) node.body = self._process_loop_block(node.body, anno.getanno(node, NodeAnno.BODY_SCOPE)) for n in node.orelse: self.generic_visit(n) return node def visit_If(self, node): if self.continuation_uses: self.generic_visit(node.test) node.body = self._visit_and_reindent_if_necessary(node.body) continue_used_in_body = self.continuation_uses[-1][0] node.orelse = self._visit_and_reindent_if_necessary(node.orelse) self.continuation_uses[-1][0] = ( continue_used_in_body or self.continuation_uses[-1][0]) else: node = self.generic_visit(node) return node def visit_Continue(self, node): self.continuation_uses[-1][0] = True return self._create_continuation_trigger() def visit_Break(self, node): assert False, 'break statement should be desugared at this point' def transform(node, namer): return ContinueCanonicalizationTransformer(namer).visit(node)
nburn42/tensorflow
tensorflow/contrib/autograph/converters/continue_statements.py
Python
apache-2.0
4,655
[ "VisIt" ]
2ade4cd519e102c6cc30b22ecb6bd7a8913f06efa8fdbd7ee70c29ce4f77620b
#!/usr/bin/env python # -*- coding: utf-8 -*- # ace2fasta.py # From ACE format to FASTA format # Eric Normandeau # 2010 04 15 # Create an output_file.fasta containing contig sequences alignements from # an input_file.ace # Clean each contig by removing insertions '*' in the contig consensus and # all the sequences in the contig alignment __version_info__ = ('0', '0', '2') __version__ = '.'.join(__version_info__) # Module imports import getopt import sys import platform from Bio.Sequencing import Ace from Bio.Align.Generic import Alignment from Bio.Alphabet import IUPAC, Gapped # Function definitions def cut_ends(read, start, end): '''Replace residues on either end of a sequence with gaps. Cut out the sections of each read which the assembler has decided are not good enough to include in the contig and replace them with gap ''' return (start-1) * '-' + read[start-1:end] + (len(read)-end) * '-' def pad_read(read, start, conlength): ''' Pad out either end of a read so it fits into an alignment. The start argument is the position of the first base of the reads sequence in the contig it is part of. If the start value is lower than 1 (since ACE files count from 1, not 0) we take part of the sequence off the start, otherwise each end is padded to the length of the consensus with gaps. ''' if start < 1: seq = read[-1*start+1:] else: seq = (start-1) * '-' + read seq = seq + (conlength-len(seq)) * '-' return seq def ace2fasta(in_file, out_file): ace_gen = Ace.parse(open(in_file, 'r')) with open(out_file, "w") as output_file: while 1: try: contig = ace_gen.next() except: print "All contigs treated" break align = Alignment(Gapped(IUPAC.ambiguous_dna, "-")) # Now we have started our alignment we can add sequences to it # Add concensus sequence to alignment align.add_sequence(contig.name, contig.sequence) for readn in xrange(len(contig.reads)): clipst = contig.reads[readn].qa.qual_clipping_start clipe = contig.reads[readn].qa.qual_clipping_end start = contig.af[readn].padded_start seq = cut_ends(contig.reads[readn].rd.sequence, clipst, clipe) seq = pad_read(seq, start, len(contig.sequence)) if "pseudo" not in contig.reads[readn].rd.name: align.add_sequence(contig.reads[readn].rd.name, seq) output_file.write(align.format("fasta")) def help(): _plateform = platform.system() name = sys.argv[0] text = """ %s(1) User Commands %s(1) \033[1mNAME\033[0m \t%s - From ACE format to FASTA format \033[1mSYNOPSIS\033[0m \t\033[1mpython %s \033[0m[\033[4mOPTION\033[0m]... [\033[4mFILE\033[0m]... \033[1mDESCRIPTION\033[0m \tCreate FASTA alignment file from an ACE file containing contigs. \t%s uses the Biopython library to parse an ACE file containing \tNext Generation Sequencing contig alignments. It then writes a FASTA \tfile containing all the contigs, with both the consensus sequence \tand the aligned sequences for each contig. \t\033[1m-h, --help\033[0m \t\tDisplay the manual of this program \t\033[1m-i, --input\033[0m \t\tInput file in .ACE format \t\033[1m-o, --output\033[0m \t\tOutput file in .FASTA format \033[1mAUTHORS\033[0m \tWritten by Eric Normandeau and Nicolas Maillet. """ % (name, name, name, name, name) if _plateform != 'Windows' and "this is cool": print text else: remove = ["\033[1m","\033[0m","\033[4m"] for i in remove: text = text.replace(i, "") print text def main(): # parse command line options # opts, args = None, None try: opts, args = getopt.getopt(sys.argv[1:], "hi:o:", ["help", "input=", "output="]) except getopt.GetoptError, e: print "Input error. Use -h for help" sys.exit(0) # Process command lines options for option, value in opts: if option in ('-h', '--help'): help() sys.exit(0) elif option in ('-i', '--input'): input_ace = value output_fasta = input_ace.replace(".ace", "") + ".fasta" elif option in ('-o', '--output'): output_fasta = value try: with open(input_ace) as test: pass except: print "Input Error: No input file specified or file not found." print "Use -h for help." sys.exit(0) ace2fasta(input_ace, output_fasta) if __name__ == "__main__": main()
wkh124/wkh124
ace2fasta.py
Python
gpl-3.0
4,772
[ "Biopython" ]
997e2b55484077a0af77695b174006a236c5ffc914525836b757f1edb89436d3
''' =========================================== Using the Kalman Filter and Kalman Smoother =========================================== This simple example shows how one may apply the Kalman Filter and Kalman Smoother to some randomly generated data. The Kalman Filter and Kalman Smoother are two algorithms for predicting the hidden state of Linear-Gaussian system. In this script, all model parameters are specified beforehand, so there is no need to fit the Kalman Filter's parameters to the measurements. However, this is not essential; sensible defaults will be used for unspecified parameters, and they may be learned using :func:`KalmanFilter.em`. The figure drawn shows the true, hidden state, the state estimates given by the Kalman Filter, and the state estimates given by the Kalman Smoother. ''' import numpy as np import pylab as pl from pykalman import KalmanFilter # specify parameters random_state = np.random.RandomState(0) transition_matrix = [[1, 0.1], [0, 1]] transition_offset = [-0.1, 0.1] observation_matrix = np.eye(2) + random_state.randn(2, 2) * 0.1 observation_offset = [1.0, -1.0] transition_covariance = np.eye(2) observation_covariance = np.eye(2) + random_state.randn(2, 2) * 0.1 initial_state_mean = [5, -5] initial_state_covariance = [[1, 0.1], [-0.1, 1]] # sample from model kf = KalmanFilter( transition_matrix, observation_matrix, transition_covariance, observation_covariance, transition_offset, observation_offset, initial_state_mean, initial_state_covariance, random_state=random_state ) states, observations = kf.sample( n_timesteps=50, initial_state=initial_state_mean ) # estimate state with filtering and smoothing filtered_state_estimates = kf.filter(observations)[0] smoothed_state_estimates = kf.smooth(observations)[0] # draw estimates pl.figure() lines_true = pl.plot(states, color='b') lines_filt = pl.plot(filtered_state_estimates, color='r') lines_smooth = pl.plot(smoothed_state_estimates, color='g') pl.legend((lines_true[0], lines_filt[0], lines_smooth[0]), ('true', 'filt', 'smooth'), loc='lower right' ) pl.show()
PierrotLC/pykalman
examples/standard/plot_filter.py
Python
bsd-3-clause
2,121
[ "Gaussian" ]
23f3158391c7663e98e54d291c26424212382dabd47cf532984548e9a9bd9aa5
# -*- coding: UTF-8 -*- import gethtml """ weblogin by Anarchintosh @ xbmcforums Copyleft (GNU GPL v3) 2011 onwards this example is configured for Fantasti.cc login See for the full guide please visit: http://forum.xbmc.org/showthread.php?p=772597#post772597 USAGE: in your default.py put: import weblogin logged_in = weblogin.doLogin('a-path-to-save-the-cookie-to','the-username','the-password') logged_in will then be either True or False depending on whether the login was successful. """ import os import urllib,urllib2 import cookielib import xbmc ### TESTING SETTINGS (will only be used when running this file independent of your addon) # Remember to clear these after you are finished testing, # so that your sensitive details are not in your source code. # These are only used in the: if __name__ == "__main__" thing at the bottom of this script. #myusername = '' #mypassword = '' #note, the cookie will be saved to the same directory as weblogin.py when testing def check_login(source,username): #the string you will use to check if the login is successful. #you may want to set it to: username (no quotes) logged_in_string = 'SIGN OUT' #search for the string in the html, without caring about upper or lower case #if re.search(logged_in_string, source, re.IGNORECASE): if source.find(logged_in_string) >= 0: return True else: return False def doLogin(cookiepath, username, password, debug = False): #check if user has supplied only a folder path, or a full path # if not os.path.isfile(cookiepath): # #if the user supplied only a folder path, append on to the end of the path a filename. # print "hockeystreams: cant make cookiepath " + cookiepath # return False if not os.path.isdir(os.path.dirname(cookiepath)): try: os.makedirs(os.path.dirname(cookiepath)) except: print "hockeystreams: cant make cookiepath " + cookiepath return False #delete any old version of the cookie file try: if os.path.isfile(cookiepath): os.remove(cookiepath) except: print "hockeystreams: cant clear cookiepath file" + cookiepath return False if username and password: #the url you will request to. login_url = 'http://www5.hockeystreams.com/verify/login' #the header used to pretend you are a browser header_string = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3' #build the form data necessary for the login login_data = urllib.urlencode({'username':username, 'password':password,'submit':'Sign In'})#, 'memento':1, 'x':0, 'y':0, 'do':'login'}) #build the request we will make req = urllib2.Request(login_url, login_data) req.add_header('User-Agent',header_string) #initiate the cookielib class cj = cookielib.LWPCookieJar() #install cookielib into the url opener, so that cookies are handled opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) #do the login and get the response response = opener.open(req) if debug: print str(response) source = response.read() print source response.close() cj.save(xbmc.translatePath(cookiepath)) # if debug: # print "cookies!" + str(cj._cookies) #check the received html for a string that will tell us if the user is logged in #pass the username, which can be used to do this. url = "http://www.hockeystreams.com" page = gethtml.get(url, cj = cj, debug = debug) if debug: # print page print "index + " + str(page.find('SIGN OUT')) + "/" + str(len(page)) login = check_login(page, username) #return whether we are logged in or not return login else: return False #code to enable running the .py independent of addon for testing if __name__ == "__main__": if myusername is '' or mypassword is '': print 'YOU HAVE NOT SET THE USERNAME OR PASSWORD!' else: logged_in = doLogin(os.getcwd(),myusername,mypassword) print 'LOGGED IN:',logged_in
jlongman/xbmc-hockeystreams-plugin
weblogin.py
Python
gpl-3.0
4,283
[ "VisIt" ]
63ba12abaaec2b75af97768eaaf9698f98d3ae6fa4cbff46f8f334eb30ecfd6e
#!/usr/bin/env python """ ir.py - parse c declarations (c) 2002, 2003, 2004, 2005 Simon Burton <simon@arrowtheory.com> Released under GNU LGPL license. version 0.xx """ import sys #import cPickle as pickle import pickle #from lexer import Lexer from parse_core import Symbols #, Parser import node as node_module import cparse import genpyx class Node(genpyx.Node, node_module.Node): """ tree structure """ def __init__( self, *args, **kw ): node_module.Node.__init__( self, *args, **kw ) self._marked = False def get_marked( self ): return self._marked def set_marked( self, marked ): # if marked: # print "MARK", self self._marked = marked marked = property( get_marked, set_marked ) # def __getstate__( self ): # return self.__class__, tuple( [ item.__getstate__() for item in self ] ) # def __setstate__( self, state ): # cls, states = state # states = list(states) # for idx, state in enumerate(states): # items[idx] = items[idx].__setstate__( def __getstate__(self): return str(self) def __setstate__(self, state): Node.__init__(self) self[:] = eval(state) # _unique_id = 0 # def get_unique_id(cls): # Node._unique_id += 1 # return Node._unique_id # get_unique_id = classmethod(get_unique_id) def __hash__( self ): return hash( tuple([hash(type(self))]+[hash(item) for item in self]) ) def clone(self): l = [] for item in self: if isinstance(item,Node): item = item.clone() l.append(item) return self.__class__(*l, **self.__dict__) def init_from( self, other ): # class method ? # Warning: shallow init self[:] = other self.__dict__.update( other.__dict__ ) return self # def is_struct(self): # for x in self: # if isinstance(x,Node): # if x.is_struct(): # return 1 # return 0 #def explain(self): #l = [] #for x in self: #if isinstance(x,Node): #l.append(x.explain()) #else: #l.append(str(x)) #return string.join(l," ") ##(self.__class__.__name__,string.join(l) ) def psource(self): if hasattr(self,'lines'): # print "# "+string.join(self.lines,"\n# ")+"\n" print "# "+"\n# ".join(self.lines)+"\n" def cstr(self,l=None): """ Build a list of tokens; return the joined tokens string """ if l is None: l = [] for x in self: if isinstance(x,Node): x.cstr(l) else: l.insert(0,str(x)+' ') s = ''.join(l) return s def ctype(self): # anon_clone " return clone of self without identifiers " #print "%s.ctype()"%self l=[] for x in self: if isinstance(x,Node): l.append(x.ctype()) else: l.append(x) #print "%s.__class__(*%s)"%(self,l) return self.__class__(*l, **self.__dict__) # XX **self.__dict__ ? def cbasetype(self): " return ctype with all TypeAlias's replaced " # WARNING: we cache results (so do not mutate self!!) l=[] for x in self: if isinstance(x,Node): l.append(x.cbasetype()) else: l.append(x) #print "%s.__class__(*%s)"%(self,l) return self.__class__(*l, **self.__dict__) # XX **self.__dict__ ? def signature( self, tank=None ): if tank is None: tank = {} for node in self.nodes(): if not tank.has_key( type(node) ): tank[ type(node) ] = {} type(node).tank = tank[type(node)] shape = tuple( [ type(_node).__name__ for _node in node ] ) if not tank[type(node)].has_key(shape): tank[type(node)][shape] = [] tank[type(node)][shape].append( node ) return tank def psig( self, tank=None ): if tank is None: tank = {} tank = self.signature(tank) for key in tank.keys(): print key.__name__ for shape in tank[key].keys(): print " ", shape # ################################################# class Named(genpyx.Named, Node): " has a .name property " def get_name(self): if self: assert type(self[0])==str return self[0] return None def set_name(self, name): if self: self[0] = name else: self.append(name) name = property(get_name,set_name) class BasicType(genpyx.BasicType, Named): "float double void char int" pass class Qualifier(genpyx.Qualifier, Named): "register signed unsigned short long const volatile inline" pass class StorageClass(genpyx.StorageClass, Named): "extern static auto" pass class Ellipses(genpyx.Ellipses, Named): "..." pass class GCCBuiltin(genpyx.GCCBuiltin, BasicType): "things with __builtin prefix" pass class Identifier(genpyx.Identifier, Named): """ shape = +( str, +ConstExpr ) """ #def explain(self): #if len(self)==1: #return "%s"%self.name #else: #return "%s initialized to %s"%(self.name, #Node(self[1]).explain()) # will handle Initializer # def ctype(self): # return self.__class__(*self[1:]) #.clone() ? # def get_name(self): # if self: # return self[0] # def set_name(self, name): # if self: # self[0] = name # else: # self.append(name) # name = property(get_name,set_name) def cstr(self,l=None): if l is None: l=[] if len(self)>1: assert len(self)==2 l.append( '%s = %s'%(self[0],self[1]) ) elif len(self)==1: l.append( str(self[0]) ) return " ".join(l) class TypeAlias(genpyx.TypeAlias, Named): """ typedefed things, eg. size_t """ def cbasetype( self ): node = self.typedef.cbasetype().get_rest() return node class Function(genpyx.Function, Node): """ """ #def explain(self): #if len(self): #return "function (%s), returning"%\ #", ".join( map(lambda x:x.explain(),self) ) #else: #return "function returning" def cstr(self,l): #print '%s.cstr(%s)'%(self,l) _l=[] assert len(self) i=0 while isinstance(self[i],Declarator): _l.append( self[i].cstr() ) i=i+1 l.append( '(%s)'% ', '.join(_l) ) while i<len(self): self[i].cstr(l) i=i+1 return " ".join(l) def return_type(self): node = self[-1] #assert isinstance(node,DeclarationSpecifiers) return Declarator( Identifier(), node ) ret = property(return_type) def get_args(self): args = [ arg for arg in self[:-1] if not arg.is_void() ] return args args = property(get_args) def arg_types(self): return [ AbstractDeclarator().init_from( arg.ctype() ) for arg in self[:-1]] def is_varargs(self): for node in self.nodes(): if isinstance(node,Ellipses) or 'va_list' in node: # print self, 'is_varargs' return True # print self, 'is_varargs' return False # return fn.deepfind(Ellipses) or fn.deepfind('va_list') def ctype(self): return Function(*self.arg_types()+[self[-1]]) # XX self[-1].ctype class Pointer(genpyx.Pointer, Node): """ """ def get_spec(self): if type(self[0])==TypeSpecifiers: # isinstance ?? return self[0] spec = property(get_spec) #def explain(self): #return "pointer to" def cstr(self,l): assert len(self) node=self[0] l.insert(0,'*') if isinstance(node,Function): l.insert(0,'(') l.append(')') elif isinstance(node,Array): l.insert(0,'(') l.append(')') return Node.cstr(self,l) class Array(genpyx.Array, Node): """ """ #def explain(self): #s='' #if len(self): #if type(self[0])==int: #s='0 to %s '%(self[0]-1) #return "array %sof"%s def has_size(self): try: int(self.size) return True except: return False def get_size(self): if type(self[-1])==str: try: return int(self[-1]) except: return self[-1] return self[-1] # None size = property(get_size) def get_spec(self): if type(self[0])==TypeSpecifiers: # isinstance ?? return self[0] spec = property(get_spec) def to_pointer(self): node = Pointer() node.init_from( self.clone() ) node.pop() # pop the size element return node def cstr(self,l): if self.size is None: l.append('[]') else: l.append('[%s]'%self.size) return Node( *self[:-1] ).cstr( l ) class Tag(genpyx.Tag, Named): " the tag of a Struct, Union or Enum " pass class Taged(genpyx.Taged, Node): "Struct, Union or Enum " def get_tag(self): if len(self): tag = self[0] assert type(tag)==Tag # isinstance ?? else: tag = None return tag def set_tag(self,tag): if len(self): self[0] = tag else: self.append(tag) tag = property( get_tag, set_tag ) def has_members(self): return len(self)>1 # more than just a tag def get_members(self): return self[1:] members = property(get_members) # fields ? def ctype(self): if not self.tag.name: #print "# WARNING : anonymous struct " # OK i think return self.clone() # self = self.clone() # return self[:1] # just the tag return self.__class__( self.tag, **self.__dict__ ) # just the Tag # return self.__class__( *self, **self.__dict__ ) def cbasetype(self): return self.ctype() # is this enough ??? # return Node.cbasetype(self) # XX lookup my tag if i am empty ..? class Compound(genpyx.Compound, Taged): "Struct or Union" def cstr(self,_l=None): assert isinstance( self[0], Tag ) tag='' if len(self[0]): tag=' '+self[0][0] if isinstance(self,Struct): l=[ 'struct%s '%tag ] elif isinstance(self,Union): l=[ 'union%s '%tag ] if len(self)>1: l.append(' { ') for decl in self[1:]: l.append( decl.cstr()+"; " ) l.append('} ') if _l is None: _l=[] while l: _l.insert( 0, l.pop() ) # XX empty struct with no tag -> "struct" XX return "".join( _l ) def ctype(self): tp = Taged.ctype(self) for i in range(1,len(tp)): tp[i] = StructDeclarator().init_from( tp[i] ) return tp class Struct(genpyx.Struct, Compound): """ """ pass class Union(genpyx.Union, Compound): """ """ pass class Enum(genpyx.Enum, Taged): """ """ def cstr(self,_l=None): assert isinstance( self[0], Tag ) tag='' if len(self[0]): tag=' '+self[0][0] l=[ 'enum%s '%tag ] if len(self)>1: l.append(' { ') for node in self[1:]: l.append( node.cstr()+', ' ) l.append('} ') if _l is None: _l=[] while l: _l.insert( 0, l.pop() ) return ''.join( _l ) class Declarator(genpyx.Declarator, Node): """ """ def __eq__(self,other): " unordered equality " # ordering sometimes gets lost when we do a cbasetype if not isinstance(other,Node): return False a, b = self[:], other[:] a.sort() b.sort() return a == b def __hash__( self ): hs = [hash(item) for item in self] hs.sort() return hash( tuple([hash(type(self))]+hs) ) def transform(self): return def get_identifier(self): if len(self)>1: return self[0] def set_identifier(self, identifier): if len(self)>1: self[0] = identifier else: self.insert(0,identifier) identifier = property(get_identifier,set_identifier) def get_spec(self): spec = self[-1] if type(spec)==TypeSpecifiers: # isinstance ?? return spec spec = property(get_spec) def get_type_alias(self): if self.spec: if isinstance(self.spec[0], TypeAlias): return self.spec[0] type_alias = property(get_type_alias) def get_tagged(self): if self.spec: return self.spec.tagged # i am a tagged tagged = property(get_tagged) def get_compound(self): if self.spec: return self.spec.compound # i am a compound compound = property(get_compound) def get_struct(self): if self.spec: return self.spec.struct # i am a struct struct = property(get_struct) def get_union(self): if self.spec: return self.spec.union # i am a union union = property(get_union) def get_enum(self): if self.spec: return self.spec.enum # i am an enum enum = property(get_enum) def get_function(self): if len(self)>1 and type(self[1])==Function: # isinstance ?? return self[1] function = property(get_function) def get_pointer(self): if len(self)>1 and type(self[1])==Pointer: # isinstance ?? return self[1] pointer = property(get_pointer) def get_array(self): if len(self)>1 and type(self[1])==Array: # isinstance ?? return self[1] array = property(get_array) def get_name(self): if self.identifier: return self.identifier.name def set_name(self, name): assert self.identifier is not None self.identifier.name = name name = property(get_name, set_name) def get_rest(self): # XX needs a better name if len(self)>1: return self[1] return self[0] def pointer_to( self ): " return Declarator pointing to self's type " decl = Declarator(Identifier(), Pointer(self.get_rest().clone())) return decl def deref( self ): " return (clone of) Declarator that self is pointing to " node = self.ctype() # clone pointer = node.pointer or node.array assert pointer, "cannot dereference non-pointer" node[1:2] = pointer return node def is_void(self): return self.spec and BasicType('void') in self.spec def is_pointer_to_fn(self): return self.pointer and self.deref().function def is_pointer_to_char(self): # return self.ctype() == TransUnit("char *a;").transform()[0].ctype() node = self.pointer or self.array if node: spec = node.spec if spec and BasicType('char') in spec and not BasicType('unsigned') in spec: return True return False def is_callback(self): " i am a pointer to a function whose last arg is void* " if self.is_pointer_to_fn(): fn = self.deref().function if fn.args: arg = fn.args[-1] if arg.pointer and arg.deref().is_void(): return True def is_complete( self, tag_lookup ): if self.tagged and self.tagged.tag.name in tag_lookup and not tag_lookup[self.tagged.tag.name].has_members(): return False return True def is_primative( self ): "i am a char,short,int,float,double... " spec = self.cbasetype().spec return spec and spec.find(BasicType) def is_pyxnative( self ): # pyrex handles char* too # but i don't know if we should make this the default # sometimes we want to send a NULL, so ... XXX self = self.cbasetype() if self.is_void(): return False if self.is_primative(): return True if self.enum: return True # pointer = None # if self.pointer: # pointer = self.pointer # elif self.array: # pointer = self.array # if pointer and pointer.spec: # spec = pointer.spec # if BasicType("char") in spec and not Qualifier("unsigned") in spec: # # char*, const char* ## print self.deepstr() # return True return False def cstr(self,l=None): return Node.cstr(self,l).strip() def ctype(self): decl=Declarator() decl.init_from( self.clone() ) decl.identifier = Identifier() for i in range(1,len(decl)): decl[i]=decl[i].ctype() return decl def cbasetype(self): # WARNING: we cache results (so do not mutate self!!) try: # this cache improves performance by 50% return self.__cbasetype.clone() except AttributeError: pass decl = self.ctype() # gets rid of Identifier names for i, node in enumerate(decl): decl[i] = decl[i].cbasetype() # return decl.get_rest() done = False while not done: done = True nodes = decl.deepfilter( TypeSpecifiers ) for node in nodes: if node.deepfind( TypeSpecifiers ) != node: # this node has another TypeSpecifier; decl.expose_node( node ) done = False break # start again... # each TypeSpecifier needs to absorb primitive siblings (StorageClass, BasicType etc.) nodes = decl.deepfilter( TypeSpecifiers ) for node in nodes: parent = decl.get_parent(node) i = 0 while i < len(parent): assert not type(parent[i]) in (TypeAlias, Enum, Struct, Union) if type(parent[i]) in (StorageClass, BasicType, Qualifier): node.append( parent.pop(i) ) else: i = i + 1 self.__cbasetype = decl.clone() return decl def invalidate(self): # flush cache, etc. try: del self.__cbasetype except AttributeError: pass def declare_str(self,name): " return c string declaring name with same type as self " tp = self.ctype() tp.name = name return tp.cstr()+";" class Typedef(genpyx.Typedef, Declarator): def cstr(self,l=None): return 'typedef ' + Declarator.cstr(self,l) #.strip() class AbstractDeclarator(genpyx.AbstractDeclarator, Declarator): """ used in Function; may lack an identifier """ #def cstr(self,l=None): #return Node.cstr(self,l) # def ctype(self): # # _type_ ignores the name of our identifier # return Node.ctype(self) class FieldLength(genpyx.FieldLength, Node): """ """ #def explain(self): #return "" def cstr(self,l): l.append(':%s'%self[0]) class StructDeclarator(genpyx.StructDeclarator, Declarator): # also used in Union """ """ #def explain(self): #flen = self.find(FieldLength) #if flen is not None: #i = self.index(flen) #self.pop(i) #s = Declarator.explain(self) #self.insert(i,flen) #width = flen[0] #if width > 0: #return s+" bitfield %s wide"%width #else: #return s+" alignment bitfield" #else: #return Declarator.explain(self) # def ctype(self): # return self def get_field_length(self): if len(self)>1 and isinstance( self[1], FieldLength ): return self[1] field_length = property(get_field_length) class DeclarationSpecifiers(genpyx.DeclarationSpecifiers, Node): #class TypeSpecifiers(Node): """ """ def __eq__(self,other): " unordered equality " if not isinstance(other,Node): return False a, b = self[:], other[:] a.sort() b.sort() return a == b def __hash__( self ): hs = [hash(item) for item in self] hs.sort() return hash( tuple([hash(type(self))]+hs) ) # def is_struct(self): # return self.find(Struct) is not None class TypeSpecifiers(genpyx.TypeSpecifiers, DeclarationSpecifiers): """ """ def get_tagged(self): if self and isinstance(self[0],Taged): return self[0] tagged = property(get_tagged) def get_compound(self): if self and isinstance(self[0],Compound): return self[0] compound = property(get_compound) def get_struct(self): if self and isinstance(self[0],Struct): return self[0] struct = property(get_struct) def get_union(self): if self and isinstance(self[0],Union): return self[0] union = property(get_union) def get_enum(self): if self and isinstance(self[0],Enum): return self[0] enum = property(get_enum) def cbasetype(self): node = Node.cbasetype(self) # node.expose( TypeSpecifiers ) # if node.deepfind(TypeSpecifiers) != node: return node class Initializer(genpyx.Initializer, Node): """ """ pass class Declaration(genpyx.Declaration, Node): """ """ def do_spec(self): " distribute DeclarationSpecifiers over each Declarator " spec=self[0] assert isinstance(spec,DeclarationSpecifiers), spec.deepstr() self.pop(0) for declarator in self: assert isinstance(declarator,Declarator) #if isinstance(declarator,DeclarationSpecifiers #huh? ##for node in spec: ##declarator.append(node.clone()) declarator.append(spec) def transform(self): # children go first for node in self.nodes(): if isinstance(node,Declaration): node.do_spec() node.file = self.file # overkill ? self.expose(Declaration) #def explain(self): #return string.join([x.explain() for x in self],", ") #return string.join(map(lambda x:x.explain(),self),", ") class ParameterDeclaration(genpyx.ParameterDeclaration, Declaration): """ """ pass class StructDeclaration(genpyx.StructDeclaration, Declaration): """ """ pass class TransUnit(genpyx.TransUnit, Node): """ Top level node. """ def __init__( self, item ): # XX __init__ uses different signature ! XX if type(item)==str: node = cparse.TransUnit() node.parse(item) else: node = item assert isinstance( node, cparse.TransUnit ), str(node) Node.__init__(self) self[:] = [ self.convert(child) for child in node ] self.__dict__.update( node.__dict__ ) assert "name" not in node.__dict__ self.syms = {} # map identifier names to their Declarator's self.typedefs = {} # map names to Typedef's self.tag_lookup = {} # map struct, union, enum tags to Taged's # XX should call transform here XX # print self.deepstr() def __getstate__( self ): nodes = tuple( [ repr(node) for node in self ] ) typedefs = tuple( [ (key,repr(val)) for key,val in self.typedefs.items() ] ) return nodes, typedefs def __setstate__( self, state ): Node.__init__(self) nodes, typedefs = state nodes = [ eval(node) for node in nodes ] self[:] = nodes typedefs = [ (key,eval(val)) for key,val in typedefs ] self.typedefs = dict(typedefs) def convert( self, node ): # name = node.__class__.__name__ # cls = globals()[ name ] cls = cls_lookup[ type(node) ] _node = cls() for child in node: if isinstance(child, node_module.Node): child = self.convert( child ) else: assert child is None or type(child) in (str, int), type(child) _node.append( child ) _node.__dict__.update( node.__dict__ ) return _node def strip(self,files): " leave only the declarations from <files> " i=0 while i<len(self): if self[i].file in files: i=i+1 else: self.pop(i) def mark(self,cb,verbose=False): " mark our child nodes such that cb(node).. mark dependants too. prune unmarked objects. " # mark the nodes: for node in self: node.marked = cb(self, node) if verbose and node.marked: print '1:', node.cstr() # propagate dependancy: i=len(self) while i: i-=1 # we go backwards for node in self[i].nodes(): # bottom-up search if verbose and self[i].marked and not node.marked: print '2:', str(node), '<--', self[i].cstr() node.marked = self[i].marked or node.marked if type(node)==TypeAlias: if verbose and node.marked and not node.typedef.marked: print '3:', node.typedef.cstr(), '<--', node.cstr() node.typedef.marked = node.typedef.marked or node.marked if isinstance(node, Taged): if node.tag.name in self.tag_lookup: _node = self.tag_lookup[ node.tag.name ] # look-up the def'n if verbose and node.marked and not _node.marked: print '4:', _node.cstr(), '<--', self[i].cstr() # _node.marked = _node.marked or self[i].marked _node.marked = _node.marked or node.marked # else: # # this guy has no tag # print "lost tag:", self[i].cstr() # XX struct defs acquire marks from members, but XX # XX ordinary definitions do not XX # if node.marked and not self[i].marked: # # one of my descendants is marked # if verbose: # print '5:', self[i].cstr(), '<--', node.cstr() # self[i].marked = True # if verbose: # for node in self: # print '-'*79 # if node.enum: # print str(node.marked) + ': ' + node.cstr() # prune: f = open(".tmp/pruned.txt","w") f.write("// This file autogenerated by '%s' .\n"%__file__) f.write("// List of functions pruned from parse tree, for various reasons.\n\n") i=0 while i<len(self): if not self[i].marked: if verbose: print 'pop:', self[i].cstr() f.write( self[i].cstr() + "\n" ) self.pop(i) # elif self[i].compound: # # XXXX for now, rip out all struct members XXXX # self[i].compound[1:] = [] # XX encapsulation # i = i + 1 else: i = i + 1 for key, value in self.syms.items(): if not value.marked: del self.syms[key] for key, value in self.typedefs.items(): if not value.marked: del self.typedefs[key] for key, value in self.tag_lookup.items(): if not value.marked: del self.tag_lookup[key] # sys.exit(1) def assert_no_dups(self): check={} for node in self.nodes(): assert not check.has_key(id(node)) check[id(node)]=1 def transform(self, verbose=False, test_parse=False, test_types=False ): i=0 while i < len(self): if verbose: print "##"*25 declaration=self[i] if verbose: declaration.psource() if verbose: print declaration.deepstr(),'\n' assert isinstance(declaration,Declaration) if verbose: print "# expose declarators from declaration" # STAGE 1 declaration.transform() if verbose: print declaration.deepstr(),'\n' self[i:i+1] = declaration # expose declarators from declaration for j in range(len(declaration)): declarator=self[i] assert isinstance(declarator,Declarator) if verbose: print "# declarator.transform()" # STAGE 2 declarator.transform() if verbose: print declarator.deepstr(),'\n' if verbose: print "# self.visit_declarator(declarator)" # STAGE 3 self[i] = declarator = self.visit_declarator(declarator) # STAGE 4 if declarator.name: if isinstance(declarator, Typedef): if verbose: print "# typedef %s" % declarator.name self.typedefs[ declarator.name ] = declarator else: if verbose: print "# sym %s" % declarator.name self.syms[ declarator.name ] = declarator for node in declarator.nodes(): if isinstance(node,Taged) and node.tag.name: assert type(node.tag.name)==str, node.deepstr() taged = self.tag_lookup.get( node.tag.name, None ) if taged is None: if verbose: print "# tag lookup %s = %s" % (declarator.name, node.tag.name) self.tag_lookup[ node.tag.name ] = node elif not taged.has_members(): # this is (maybe) the definition of this tag if verbose: print "# definition %s = %s" % (declarator.name, node.tag.name) self.tag_lookup[ node.tag.name ] = node # Annotate the TypeAlias's for node in declarator.deepfilter( TypeAlias ): name = node[0] assert type( name ) == str node.typedef = self.typedefs[ name ] if verbose: print declarator.deepstr(),'\n' #print declarator.ctype().deepstr(),'\n' #assert declarator.clone() == declarator ################################################### # TESTS: if test_parse: # test that parse of cstr gives same answer cstr = declarator.cstr()+';\n' if verbose: print '# '+cstr.replace('\n','\n# ') #print if isinstance(declarator,Typedef): name = declarator[0][0] assert type(name)==str self.lexer.rmtypedef( name ) declaration = cparse.Declaration() self.lexer.lex( cstr ) #print self.lexer.err_string() declaration.parse( self.lexer, Symbols() ) # use new name-space #declaration.parse( Lexer( cstr ), Symbols() ) declaration = self.convert(declaration) declaration.transform() assert len(declaration)==1 decl=declaration[0] decl.transform() decl = self.visit_declarator(decl) if decl!=declarator: if verbose: print "#???????????" if verbose: print decl.deepstr(),'\n\n' #if verbose: print declaration.deepstr(),'\n\n' #assert 0 elif verbose: print '# OK\n' if test_types: node = declarator.ctype() declare_str= node.declare_str("my_name") if verbose: print "# declarator.ctype() " if verbose: print node.deepstr(),"\n" if verbose: print "#",declare_str.replace('\n','\n# '), '\n' i=i+1 return self def visit(self,node): #print 'visit(%s)'%node for _node in node: if isinstance(_node,Declarator): _node = self.visit_declarator(_node) # XX replace _node elif isinstance(_node,Node): _node = self.visit(_node) # XX replace _node return node def visit_declarator(self,decl): assert isinstance(decl,Declarator) # STAGE 3.a tp = decl.deepfind(Typedef) if tp is not None: decl.deeprm(tp) tp.init_from( decl ) # warning: shallow init decl = tp # STAGE 3.b i=len(decl) # accumulate nodes (they become the children of decl) children=[] while i: i=i-1 node=decl.pop(i) if isinstance(node,Declarator): node = self.visit_declarator(node) # replace node else: node = self.visit(node) # replace node if isinstance(node,Pointer): node+=children children=[node] elif isinstance(node,Function): node+=children children=[node] elif isinstance(node,Array): while children: node.insert(0,children.pop()) children=[node] # array size (if any) at end #elif isinstance(node,Identifier): #node+=children #children=[node] else: # accumulate children.insert(0,node) decl[:]=children return decl cstr = None ctype = None cbasetype = None # remap the global class definitions in genpyx to # point to the definitions in this module gbl = globals() for key, val in gbl.items(): if type(val)==type: if issubclass(val,Node): setattr( genpyx, key, val ) assert genpyx.Node == Node cls_lookup = { # Node : Node , cparse.BasicType : BasicType , cparse.Qualifier : Qualifier , cparse.StorageClass : StorageClass , cparse.Ellipses : Ellipses , cparse.GCCBuiltin : GCCBuiltin , cparse.Identifier : Identifier , cparse.TypeAlias : TypeAlias , cparse.Function : Function , cparse.Pointer : Pointer , cparse.Array : Array , cparse.Tag : Tag , cparse.Compound : Compound , cparse.Struct : Struct , cparse.Union : Union , cparse.Enum : Enum , cparse.Declarator : Declarator , cparse.Typedef : Typedef , cparse.AbstractDeclarator : AbstractDeclarator , cparse.FieldLength : FieldLength , cparse.StructDeclarator : StructDeclarator , cparse.DeclarationSpecifiers : TypeSpecifiers , cparse.TypeSpecifiers : TypeSpecifiers , cparse.Initializer : Initializer , cparse.Declaration : Declaration , cparse.ParameterDeclaration : ParameterDeclaration , cparse.StructDeclaration : StructDeclaration , cparse.TransUnit : TransUnit , }
jpflori/mpir
yasm/tools/python-yasm/pyxelator/ir.py
Python
gpl-3.0
35,563
[ "VisIt" ]
8aab32227dc40876799ebfc4b7261128b3dca2a5d5239f10c2bb74fcc7ac6f16
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2010 Brian G. Matherly # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ The User class provides basic interaction with the user. """ import sys from abc import ABCMeta, abstractmethod from contextlib import contextmanager class UserBase(metaclass=ABCMeta): """ This class provides a means to interact with the user in an abstract way. This class should be overridden by each respective user interface to provide the appropriate interaction (eg. dialogs for GTK, prompts for CLI). """ def __init__(self, callback=None, error=None, uistate=None, dbstate=None): self.callback_function = callback self.error_function = error self._fileout = sys.stderr # redirected to mocks by unit tests self.uistate = uistate self.dbstate = dbstate @abstractmethod def begin_progress(self, title, message, steps): """ Start showing a progress indicator to the user. Don't use this method directly, use progress instead. :param title: the title of the progress meter :type title: str :param message: the message associated with the progress meter :type message: str :param steps: the total number of steps for the progress meter. a value of 0 indicates that the ending is unknown and the meter should just show activity. :type steps: int :returns: none """ @abstractmethod def step_progress(self): """ Advance the progress meter. Don't use this method directly, use progress instead. """ def callback(self, percentage, text=None): """ Display the precentage. """ if self.callback_function: if text: self.callback_function(percentage, text) else: self.callback_function(percentage) else: self._default_callback(percentage, text) def _default_callback(self, percentage, text): if text is None: self._fileout.write("\r%02d%%" % percentage) else: self._fileout.write("\r%02d%% %s" % (percentage, text)) @abstractmethod def end_progress(self): """ Stop showing the progress indicator to the user. Don't use this method directly, use progress instead. """ # Context-manager wrapper of the begin/step/end_progress above @contextmanager def progress(self, *args, **kwargs): """ Preferred form of progress reporting. Parameters: same as for begin_progress. Usage example (see gramps/cli/test/user_test.py):: with self.user.progress("Foo", "Bar", 0) as step: for i in range(10): step() Ensures end_progress will be called even if an exception was thrown. """ self.begin_progress(*args, **kwargs) try: yield self.step_progress except: raise finally: self.end_progress() @abstractmethod def prompt(self, title, message, accept_label, reject_label, parent=None, default_label=None): """ Prompt the user with a message to select an alternative. :param title: the title of the question, e.g.: "Undo history warning" :type title: str :param message: the message, e.g.: "Proceeding with the tool will erase the undo history. If you think you may want to revert running this tool, please stop here and make a backup of the DB." :type question: str :param accept_label: what to call the positive choice, e.g.: "Proceed" :type accept_label: str :param reject_label: what to call the negative choice, e.g.: "Stop" :type reject_label: str :param default_label: the label of the default :type default_label: str or None :returns: the user's answer to the question :rtype: bool """ @abstractmethod def warn(self, title, warning=""): """ Warn the user. :param title: the title of the warning :type title: str :param warning: the warning :type warning: str :returns: none """ @abstractmethod def notify_error(self, title, error=""): """ Notify the user of an error. :param title: the title of the error :type title: str :param error: the error message :type error: str :returns: none """ @abstractmethod def notify_db_error(self, error): """ Notify the user of a DB error. :param error: the error message :type error: str :returns: none """ @abstractmethod def notify_db_repair(self, error): """ Notify the user their DB might need repair. :param error: the error message :type error: str :returns: none """ @abstractmethod def info(self, msg1, infotext, parent=None, monospaced=False): """ Displays information to the user """ class User(UserBase): """ An implementation of the :class:`.gen.user.UserBase` class which supresses output and accepts prompts. This is useful for unit tests. """ def __init__(self, callback=None, error=None, uistate=None, dbstate=None): UserBase.__init__(self, callback=self.__cb) def __cb(self, percent, text=None): return def begin_progress(self, title, message, steps): pass def step_progress(self): pass def end_progress(self): pass def prompt(self, title, message, accept_label, reject_label, parent=None, default_label=None): return True def warn(self, title, warning=""): pass def notify_error(self, title, error=""): pass def notify_db_error(self, error): pass def notify_db_repair(self, error): pass def info(self, msg1, infotext, parent=None, monospaced=False): pass
gramps-project/gramps
gramps/gen/user.py
Python
gpl-2.0
6,849
[ "Brian" ]
831f2c16a22262904030cc982a8bb396ceaf5db27f34da54485c6ffd9c0ea357
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START mod0_baseline] import os import webapp2 from google.appengine.ext import ndb from google.appengine.ext.webapp import template class Visit(ndb.Model): 'Visit entity registers visitor IP address & timestamp' visitor = ndb.StringProperty() timestamp = ndb.DateTimeProperty(auto_now_add=True) def store_visit(remote_addr, user_agent): 'create new Visit entity in Datastore' Visit(visitor='{}: {}'.format(remote_addr, user_agent)).put() def fetch_visits(limit): 'get most recent visits' return Visit.query().order(-Visit.timestamp).fetch(limit) class MainHandler(webapp2.RequestHandler): 'main application (GET) handler' def get(self): store_visit(self.request.remote_addr, self.request.user_agent) visits = fetch_visits(10) tmpl = os.path.join(os.path.dirname(__file__), 'index.html') self.response.out.write(template.render(tmpl, {'visits': visits})) app = webapp2.WSGIApplication([ ('/', MainHandler), ], debug=True) # [END mod0_baseline]
googlecodelabs/migrate-python2-appengine
mod0-baseline/main.py
Python
apache-2.0
1,599
[ "VisIt" ]
fce9d1eac95bf25f1814953ec357cbe26aafb43f67eeaeea80c15cba15fc3a97
# -*- coding: utf-8 -*- """Unit tests for IOs """ ################################################################## # Copyright 2018 Open Source Geospatial Foundation and others # # licensed under MIT, Please consult LICENSE.txt for details # ################################################################## from __future__ import absolute_import import requests import os import tempfile import datetime import unittest import json import base64 from pywps import Format from pywps.validator import get_validator from pywps import NAMESPACES from pywps.inout.basic import IOHandler, SOURCE_TYPE, SimpleHandler, BBoxInput, BBoxOutput, \ ComplexInput, ComplexOutput, LiteralOutput, LiteralInput, _is_textfile from pywps.inout import BoundingBoxInput as BoundingBoxInputXML from pywps.inout.literaltypes import convert, AllowedValue from pywps._compat import StringIO, text_type, urlparse from pywps.validator.base import emptyvalidator from pywps.exceptions import InvalidParameterValue from pywps.validator.mode import MODE from pywps.inout.basic import UOM from pywps.inout.storage import FileStorage from pywps._compat import PY2 from lxml import etree DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data') def get_data_format(mime_type): return Format(mime_type=mime_type, validate=get_validator(mime_type)) class IOHandlerTest(unittest.TestCase): """IOHandler test cases""" def setUp(self): tmp_dir = tempfile.mkdtemp() self.iohandler = IOHandler(workdir=tmp_dir) self._value = 'lalala' def tearDown(self): pass def test_basic_IOHandler(self): """Test basic IOHandler""" self.assertTrue(os.path.isdir(self.iohandler.workdir)) def test_validator(self): """Test available validation function """ self.assertEqual(self.iohandler.validator, emptyvalidator) def _test_outout(self, source_type, suffix=''): """Test all outputs""" self.assertEqual(source_type, self.iohandler.source_type, 'Source type properly set') self.assertEqual(self._value, self.iohandler.data, 'Data obtained') if self.iohandler.source_type == SOURCE_TYPE.URL: self.assertEqual('http', urlparse(self.iohandler.url).scheme) else: self.assertEqual('file', urlparse(self.iohandler.url).scheme) if self.iohandler.source_type == SOURCE_TYPE.STREAM: source = StringIO(text_type(self._value)) self.iohandler.stream = source file_path = self.iohandler.file self.assertTrue(file_path.endswith(suffix)) file_handler = open(file_path) self.assertEqual(self._value, file_handler.read(), 'File obtained') file_handler.close() if self.iohandler.source_type == SOURCE_TYPE.STREAM: source = StringIO(text_type(self._value)) self.iohandler.stream = source stream_val = self.iohandler.stream.read() self.iohandler.stream.close() if PY2 and isinstance(stream_val, str): self.assertEqual(self._value, stream_val.decode('utf-8'), 'Stream obtained') elif not PY2 and isinstance(stream_val, bytes): self.assertEqual(self._value, stream_val.decode('utf-8'), 'Stream obtained') else: self.assertEqual(self._value, stream_val, 'Stream obtained') if self.iohandler.source_type == SOURCE_TYPE.STREAM: source = StringIO(text_type(self._value)) self.iohandler.stream = source # self.assertEqual(stream_val, self.iohandler.memory_object, # 'Memory object obtained') def test_data(self): """Test data input IOHandler""" self.iohandler.data = self._value self.iohandler.data_format = Format('foo', extension='.foo') self._test_outout(SOURCE_TYPE.DATA, '.foo') def test_stream(self): """Test stream input IOHandler""" source = StringIO(text_type(self._value)) self.iohandler.stream = source self._test_outout(SOURCE_TYPE.STREAM) def test_file(self): """Test file input IOHandler""" (fd, tmp_file) = tempfile.mkstemp() source = tmp_file file_handler = open(tmp_file, 'w') file_handler.write(self._value) file_handler.close() self.iohandler.file = source self._test_outout(SOURCE_TYPE.FILE) def test_url(self): wfsResource = 'http://demo.mapserver.org/cgi-bin/wfs?' \ 'service=WFS&version=1.1.0&' \ 'request=GetFeature&' \ 'typename=continents&maxfeatures=2' self._value = requests.get(wfsResource).text self.iohandler.url = wfsResource self._test_outout(SOURCE_TYPE.URL) def test_workdir(self): """Test workdir""" workdir = tempfile.mkdtemp() self.iohandler.workdir = workdir self.assertTrue(os.path.isdir(self.iohandler.workdir)) # make another workdir = tempfile.mkdtemp() self.iohandler.workdir = workdir self.assertTrue(os.path.isdir(self.iohandler.workdir)) def test_memory(self): """Test data input IOHandler""" self.skipTest('Memory object not implemented') def test_data_bytes(self): self._value = b'aa' self.iohandler.data = self._value self.assertEqual(self.iohandler.source_type, SOURCE_TYPE.DATA, 'Source type properly set') # test the data handle self.assertEqual(self._value, self.iohandler.data, 'Data obtained') # test the file handle file_handler = open(self.iohandler.file, 'rb') self.assertEqual(self._value, file_handler.read(), 'File obtained') file_handler.close() # test the stream handle stream_data = self.iohandler.stream.read() self.iohandler.stream.close() self.assertEqual(self._value, stream_data, 'Stream obtained') def test_is_textfile(self): geotiff = os.path.join(DATA_DIR, 'geotiff', 'dem.tiff') self.assertFalse(_is_textfile(geotiff)) gml = os.path.join(DATA_DIR, 'gml', 'point.gml') self.assertTrue(_is_textfile(gml)) geojson = os.path.join(DATA_DIR, 'json', 'point.geojson') self.assertTrue(_is_textfile(geojson)) class ComplexInputTest(unittest.TestCase): """ComplexInput test cases""" def setUp(self): self.tmp_dir = tempfile.mkdtemp() data_format = get_data_format('application/json') self.complex_in = ComplexInput(identifier="complexinput", title='MyComplex', abstract='My complex input', keywords=['kw1', 'kw2'], workdir=self.tmp_dir, supported_formats=[data_format]) self.complex_in.data = "Hallo world!" def test_validator(self): self.assertEqual(self.complex_in.data_format.validate, get_validator('application/json')) self.assertEqual(self.complex_in.validator, get_validator('application/json')) frmt = get_data_format('application/json') def my_validate(): return True frmt.validate = my_validate self.assertNotEqual(self.complex_in.validator, frmt.validate) def test_contruct(self): self.assertIsInstance(self.complex_in, ComplexInput) def test_data_format(self): self.assertIsInstance(self.complex_in.supported_formats[0], Format) def test_json_out(self): self.skipTest('json property now in pywps.inout.inputs.ComplexInput') out = self.complex_in.json self.assertEqual(out['workdir'], self.tmp_dir, 'Workdir defined') self.assertTrue(out['file'], 'There is no file') self.assertTrue(out['supported_formats'], 'There are some formats') self.assertEqual(len(out['supported_formats']), 1, 'There is one formats') self.assertEqual(out['title'], 'MyComplex', 'Title not set but existing') self.assertEqual(out['abstract'], 'My complex input', 'Abstract not set but existing') self.assertEqual(out['keywords'], ['kw1', 'kw2'], 'Keywords not set but existing') self.assertEqual(out['identifier'], 'complexinput', 'identifier set') self.assertEqual(out['type'], 'complex', 'it is complex input') self.assertTrue(out['data_format'], 'data_format set') self.assertEqual(out['data_format']['mime_type'], 'application/json', 'data_format set') class DodsComplexInputTest(unittest.TestCase): """ComplexInput test cases""" def setUp(self): self.tmp_dir = tempfile.mkdtemp() data_format = get_data_format('application/x-ogc-dods') self.complex_in = ComplexInput(identifier="complexinput", title='MyComplex', abstract='My complex input', keywords=['kw1', 'kw2'], workdir=self.tmp_dir, data_format=data_format, supported_formats=[data_format, get_data_format('application/x-netcdf')]) self.complex_in.href = "http://test.opendap.org:80/opendap/netcdf/examples/sresa1b_ncar_ccsm3_0_run1_200001.nc" def test_validator(self): self.assertEqual(self.complex_in.data_format.validate, get_validator('application/x-ogc-dods')) self.assertEqual(self.complex_in.validator, get_validator('application/x-ogc-dods')) frmt = get_data_format('application/x-ogc-dods') def my_validate(): return True frmt.validate = my_validate self.assertNotEqual(self.complex_in.validator, frmt.validate) def test_contruct(self): self.assertIsInstance(self.complex_in, ComplexInput) class ComplexOutputTest(unittest.TestCase): """ComplexOutput test cases""" def setUp(self): tmp_dir = tempfile.mkdtemp() data_format = get_data_format('application/json') self.complex_out = ComplexOutput(identifier="complexinput", workdir=tmp_dir, data_format=data_format, supported_formats=[data_format], mode=MODE.NONE) self.complex_out_nc = ComplexOutput(identifier="netcdf", workdir=tmp_dir, data_format=get_data_format('application/x-netcdf'), supported_formats=[get_data_format('application/x-netcdf')], mode=MODE.NONE) self.data = json.dumps({'a': 1, 'unicodé': u'éîïç', }) self.ncfile = os.path.join(DATA_DIR, 'netcdf', 'time.nc') self.test_fn = os.path.join(self.complex_out.workdir, 'test.json') with open(self.test_fn, 'w') as f: f.write(self.data) def test_contruct(self): self.assertIsInstance(self.complex_out, ComplexOutput) def test_data_format(self): self.assertIsInstance(self.complex_out.data_format, Format) def test_storage(self): class Storage(object): pass storage = Storage() self.complex_out.store = storage self.assertEqual(self.complex_out.store, storage) def test_validator(self): self.assertEqual(self.complex_out.validator, get_validator('application/json')) self.assertEqual(self.complex_out_nc.validator, get_validator('application/x-netcdf')) def test_file_handler(self): self.complex_out.file = self.test_fn self.assertEqual(self.complex_out.data, self.data) if PY2: self.assertEqual(self.complex_out.stream.read(), self.data) else: with self.complex_out.stream as s: self.assertEqual(s.read(), bytes(self.data, encoding='utf8')) with open(urlparse(self.complex_out.url).path) as f: self.assertEqual(f.read(), self.data) def test_file_handler_netcdf(self): self.complex_out_nc.file = self.ncfile data = self.complex_out_nc.base64 def test_data_handler(self): self.complex_out.data = self.data with open(self.complex_out.file) as f: self.assertEqual(f.read(), self.data) def test_base64(self): self.complex_out.data = self.data b = self.complex_out.base64 if PY2: self.assertEqual(base64.b64decode(b), self.data) else: self.assertEqual(base64.b64decode(b).decode(), self.data) def test_url_handler(self): wfsResource = 'http://demo.mapserver.org/cgi-bin/wfs?' \ 'service=WFS&version=1.1.0&' \ 'request=GetFeature&' \ 'typename=continents&maxfeatures=2' self.complex_out.url = wfsResource storage = FileStorage() self.complex_out.storage = storage url = self.complex_out.get_url() self.assertEqual('file', urlparse(url).scheme) class SimpleHandlerTest(unittest.TestCase): """SimpleHandler test cases""" def setUp(self): data_type = 'integer' self.simple_handler = SimpleHandler(data_type=data_type) def test_contruct(self): self.assertIsInstance(self.simple_handler, SimpleHandler) def test_data_type(self): self.assertEqual(convert(self.simple_handler.data_type, '1'), 1) class LiteralInputTest(unittest.TestCase): """LiteralInput test cases""" def setUp(self): self.literal_input = LiteralInput( identifier="literalinput", mode=2, allowed_values=(1, 2, (3, 3, 12)), default=6, uoms=(UOM("metre"),)) def test_contruct(self): self.assertIsInstance(self.literal_input, LiteralInput) self.assertEqual(len(self.literal_input.allowed_values), 3) self.assertIsInstance(self.literal_input.allowed_values[0], AllowedValue) self.assertIsInstance(self.literal_input.allowed_values[2], AllowedValue) self.assertEqual(self.literal_input.allowed_values[2].spacing, 3) self.assertEqual(self.literal_input.allowed_values[2].minval, 3) self.assertEqual(self.literal_input.data, 6, "Default value set to 6") def test_valid(self): self.assertEqual(self.literal_input.data, 6) self.literal_input.data = 1 self.assertEqual(self.literal_input.data, 1) with self.assertRaises(InvalidParameterValue): self.literal_input.data = 5 with self.assertRaises(InvalidParameterValue): self.literal_input.data = "a" with self.assertRaises(InvalidParameterValue): self.literal_input.data = 15 self.literal_input.data = 6 self.assertEqual(self.literal_input.data, 6) def test_json_out(self): self.literal_input.data = 9 out = self.literal_input.json self.assertTrue('uoms' in out, 'UOMs does not exist') self.assertTrue('uom' in out, 'uom exists') self.assertFalse(out['workdir'], 'Workdir exist') self.assertEqual(out['data_type'], 'integer', 'Data type is integer') self.assertFalse(out['abstract'], 'abstract exist') self.assertFalse(out['keywords'], 'keywords exist') self.assertFalse(out['title'], 'title exist') self.assertEqual(out['data'], 9, 'data set') self.assertEqual(out['mode'], MODE.STRICT, 'Mode set') self.assertEqual(out['identifier'], 'literalinput', 'identifier set') self.assertEqual(out['type'], 'literal', 'it\'s literal input') self.assertEqual(len(out['allowed_values']), 3, '3 allowed values') self.assertEqual(out['allowed_values'][0]['value'], 1, 'allowed value 1') def test_json_out_datetime(self): inpt = LiteralInput( identifier="datetime", mode=2, data_type='dateTime') inpt.data = "2017-04-20T12:30:00" out = inpt.json self.assertEqual(out['data'], datetime.datetime(2017, 4, 20, 12, 30, 0), 'datetime set') def test_json_out_time(self): inpt = LiteralInput( identifier="time", mode=2, data_type='time') inpt.data = "12:30:00" out = inpt.json self.assertEqual(out['data'], datetime.time(12, 30, 0), 'time set') def test_json_out_date(self): inpt = LiteralInput( identifier="date", mode=2, data_type='date') inpt.data = "2017-04-20" out = inpt.json self.assertEqual(out['data'], datetime.date(2017, 4, 20), 'date set') class LiteralOutputTest(unittest.TestCase): """LiteralOutput test cases""" def setUp(self): self.literal_output = LiteralOutput("literaloutput", data_type="integer") def test_contruct(self): self.assertIsInstance(self.literal_output, LiteralOutput) def test_storage(self): class Storage(object): pass storage = Storage() self.literal_output.store = storage self.assertEqual(self.literal_output.store, storage) class BoxInputTest(unittest.TestCase): """BBoxInput test cases""" def setUp(self): self.bbox_input = BBoxInput("bboxinput", dimensions=2) self.bbox_input.ll = [0, 1] self.bbox_input.ur = [2, 4] def test_contruct(self): self.assertIsInstance(self.bbox_input, BBoxInput) def test_json_out(self): out = self.bbox_input.json self.assertTrue(out['identifier'], 'identifier exists') self.assertFalse(out['title'], 'title exists') self.assertFalse(out['abstract'], 'abstract set') self.assertEqual(out['type'], 'bbox', 'type set') self.assertTupleEqual(out['bbox'], ([0, 1], [2, 4]), 'data are tehre') self.assertEqual(out['dimensions'], 2, 'Dimensions set') class BoxOutputTest(unittest.TestCase): """BoundingBoxOutput test cases""" def setUp(self): self.bbox_out = BBoxOutput("bboxoutput") def test_contruct(self): self.assertIsInstance(self.bbox_out, BBoxOutput) def test_storage(self): class Storage(object): pass storage = Storage() self.bbox_out.store = storage self.assertEqual(self.bbox_out.store, storage) def load_tests(loader=None, tests=None, pattern=None): if not loader: loader = unittest.TestLoader() suite_list = [ loader.loadTestsFromTestCase(IOHandlerTest), loader.loadTestsFromTestCase(ComplexInputTest), loader.loadTestsFromTestCase(DodsComplexInputTest), loader.loadTestsFromTestCase(ComplexOutputTest), loader.loadTestsFromTestCase(SimpleHandlerTest), loader.loadTestsFromTestCase(LiteralInputTest), loader.loadTestsFromTestCase(LiteralOutputTest), loader.loadTestsFromTestCase(BoxInputTest), loader.loadTestsFromTestCase(BoxOutputTest) ] return unittest.TestSuite(suite_list)
tomkralidis/pywps
tests/test_inout.py
Python
mit
19,534
[ "NetCDF" ]
accff82708850719b8ad05258d27d8a65a1519cf600b2d922127e6fbac2ab75e
import numpy as np import scipy as sci from scipy.io import loadmat from itertools import chain,izip_longest from numpy.random import rand from scipy.stats import scoreatpercentile from matplotlib.pyplot import * import matplotlib from collections import defaultdict from zlib import compress matplotlib.rcParams['xtick.direction'] = 'out' matplotlib.rcParams['ytick.direction'] = 'out' #Smooth from Scipy Cookbook import numpy def LZC(data): #Data must be zscored for this and the periodic trend removed; otherwise measurement confounded return len(data)/float(len(compress(data))) def grouper(iterable, n, fillvalue=None): args = [iter(iterable)] * n return izip_longest(*args, fillvalue=fillvalue) def size(nestedList): rowcount = 0 colcount = [] for row,column in enumerate(nestedList): rowcount += 1 colcount.append(len(column)) return (rowcount,tuple(colcount)) def xcorr(first,second,dt=0.001,window=0.2,savename=None): length = min(len(first),len(second)) diffs = array([first[i]-second[j] for i in xrange(length) for j in xrange(length)]) bins = arange(-window,window,dt) ''' counts = [] for i in bins: print (i-.5)*dt,(i+.5)*dt counts.append(sum((diffs>((i-.5)*dt)) & (diffs<((i+.5)*dt)))) ''' counts = bincount(digitize(diffs,bins)) #counts -= len(diffs)*dt/float(max(first[-1],second[-1])) #counts /= float(length) if savename: cPickle.dump((bins,counts),open(savename,'wb')) return (bins[1:],counts[1:-1]) def delete(nestedList, indices): for index in indices: del nestedList[index] return nestedList def prints(toPrint, isIndex): if type(toPrint) is list: if isIndex: for key,value in enumerate(toPrint): print key, ' : ', value elif not isIndex: for _,value in enumerate(toPrint): print value else: print 'Argument type not recognized.' def spiketime2binary(spikeTimes, binSize=0.001): if len(spikeTimes) is 0: return -1 spikeTimes = np.around(spikeTimes/binSize).astype(int) answer = np.zeros((max(spikeTimes),), dtype=int) answer[spikeTimes-1]=1 return answer def binary2spiketime(spikeTimes,binSize=0.001,neuron_census=0): if len(spikeTimes) is 0: return -1 else: #return np.array([[timepoint for neuron,timepoint in np.where(spikeTimes==1)]]) neuron_count = spikeTimes.shape[0] if neuron_census==0 else neuron_census if neuron_count == 1: return np.squeeze(np.array(np.where(spikeTimes==1))) else: answer= [[] for neuron in range(neuron_count)] for neuron,spikeTime in np.transpose(np.array(np.where(spikeTimes==1))): answer[neuron].append(spikeTime) return answer def flatten(aList): return [item for sublist in aList for item in sublist] def toString(spikeTrain): return ''.join([str(spike) for spike in spikeTrain]) def fromFileRasters(filename): data = loadmat(filename) def makeKeysStrings(data): for key,_ in data.iteritems(): data[str(key)]=data.pop(key) return data def raster_plot(spiketimes): trains = map(spiketime2binary,spiketimes) cutoff = map(min,trains) trains = array([train[:cutoff] for train in trains]) fig = figure() ax = fig.add_subplot(111) ax.imshow(self.images, aspect='auto', cmap=cm.bone_r, interpolation='nearest') ax.set_bgcolor('none') adjust_spines(ax,['bottom','left']) ax.set_xlabel('Time (ms)') ax.set_ylabel('Neuron') return fig def toArray(dictionary): rowCount =max(dictionary)+1 colCount=max(max(x) for x in dictionary.values())+1 for rowIndex,row in dictionary.iteritems(): for colIndex, value in row.iteritems(): answer[rowIndex,colIndex] = value return answer def iqr(data): return scoreatpercentile(data,75)-scoreatpercentile(data,25) def sequences(length, alphabet): return [''.join(sequence) for sequence in product(alphabet,repeat=length)] def allSequences(maxLength,alphabet): return [sequences(length,alphabet) for length in range(1,maxLength+1)] def permutations(iterable, r=None): # permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC # permutations(range(3)) --> 012 021 102 120 201 210 pool = tuple(iterable) n = len(pool) r = n if r is None else r if r > n: return indices = range(n) cycles = range(n, n-r, -1) yield tuple(pool[i] for i in indices[:r]) while n: for i in reversed(range(r)): cycles[i] -= 1 if cycles[i] == 0: indices[i:] = indices[i+1:] + indices[i:i+1] cycles[i] = n - i else: j = cycles[i] indices[i], indices[-j] = indices[-j], indices[i] yield tuple(pool[i] for i in indices[:r]) break else: return def product(*args, **kwds): # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111 pools = map(tuple, args) * kwds.get('repeat', 1) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] for prod in result: yield tuple(prod) def combinations(iterable, r): pool = tuple(iterable) n = len(pool) for indices in permutations(range(n), r): if sorted(indices) == list(indices): yield tuple(pool[i] for i in indices) def make_step_rewards(step_length,padding_length): padding = tile(array([0]),(1,padding_length)) return hstack((padding,tile(array([1]),(1, step_length)),padding)) def sigmoid(arg): return 1/1+exp(-arg) def activation_function(associability, predicted_reward,actual_reward, global_inhibition,global_excitation): devRate = 0.2 decayDevRate = 0.5 maxDev = 0.4 delta = associability*(actual_reward-predicted_reward)+global_excitation+global_inhibition + (isDruggie)*((1-decayDevRate)*devRate + decayDevRate*maxDev) return 1/(1+exp(delta-1)) def powerset(iterable): "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" s = list(iterable) return from_iterable(combinations(s, r) for r in range(len(s)+1)) def kleene(alphabet, baseAlphabet, seqLength): if seqLength is None: return alphabet else: return kleene(alphabet.extend(concatenate(alphabet, baseAlphabet)),seqLength-1) def test(): base = 'abc' test = permutations(list('abc'),2) test = [''.join(item) for item in test] test = [[item+thing for thing in base] for item in test] test = flatten(test) print test def conditionalProbability(past, future, data): return data.count(past+future)/float(data.count(future)) def bin(n): '''convert denary integer n to binary string bStr''' bStr = '' if n < 0: raise ValueError, "must be a positive integer" if n == 0: return '0' while n > 0: bStr = str(n % 2) + bStr n = n >> 1 return bStr def ternary(ifTrue,ifFalse,condition): if condition: return ifTrue else: return ifFalse def flatten(aList): if type(aList) is list: return [item for sublist in aList for item in sublist] else: print 'Cannot flatten type: ', type(aList) def group2(iterator, count): return itertools.imap(None, *([ iter(iterator) ] * count)) def evenProcess(dataPoints, isNoise = False): #generate sample data for CSSR stateSequence = '' for step in range(dataPoints): stateSequence += ternary('A','BA',rand()>0.5) observationSequence = '' for state in stateSequence: if state is 'A' : observationSequence += ternary('1','0',rand()>(0.5 + isNoise*0.1*(rand()-.5))) elif state is 'B' : observationSequence += '1' else: print 'System in unknown state ', state return observationSequence def nCorr(one,two): print 'One : ',one one /= sqrt(dot(one,one.conj())) print 'One normalized : ',one two /= sqrt(dot(two,two.conj())) return dot(one,two) def combine(aList): answer = defaultdict(list) for key, value in aList: answer[key].append(value) return answer.items() def STA(stimulus, response, maxDelay, isPlot = False): #assume each row of response is a trial estimate= array([[nCorr(stimulus[0:-delay],row[delay:]) for delay in range(1,maxDelay+1)] for row in response]) answer= [(iqr(datum), median(datum)) for datum in transpose(estimate)] if isPlot: figure() errorbar(range(1,maxDelay+1), zip(*answer)[1], yerr=0.5*array(zip(*answer)[0]),fmt='--o') #have to plot backwards in time ax = gca() xlim((0, maxDelay+1)) _,xMax = xlim() _,yMax = ylim() ylim(ymax=1.2*yMax) xticks(arange(xMax),[str(x) for x in range(int(xMax))]) ax.set_xlim(ax.get_xlim()[::-1]) xlabel('Time Before Spike (ms)') ylabel('Correlation') title('Spike Triggered Average') show() return answer if __name__=='__main__': dataPoints = 100 print evenProcess(dataPoints)
mac389/brainpy
lib/neuroTools.py
Python
gpl-3.0
8,658
[ "NEURON" ]
3f3916240b9ab044fdd337705aeb54e610f064f5bfdf4e4c3cb1782cb126ef3b
import numpy as np import pytest from numpy import linalg import numpy.testing as npt import itertools from utils import get_rstate, get_printing import dynesty # noqa from dynesty import utils as dyfunc # noqa """ Run a series of basic tests to check whether anything huge is broken. """ nlive = 500 printing = get_printing() def bootstrap_tol(results, rstate): """ Compute the uncertainty of means/covs by doing bootstrapping """ n = len(results.logz) niter = 50 pos = results.samples wts = np.exp(results.logwt - results.logz[-1]) means = [] covs = [] for i in range(niter): # curpos = dyfunc.resample_equal(pos, wts) # xid = np.random.randint(len(curpos), size=len(curpos)) sub = rstate.uniform(size=n) < wts / wts.max() ind0 = np.nonzero(sub)[0] ind1 = rstate.choice(ind0, size=len(ind0), replace=True) mean = pos[ind1].mean(axis=0) cov = np.cov(pos[ind1].T) means.append(mean) covs.append(cov) return np.std(means, axis=0), np.std(covs, axis=0) def check_results(results, mean_truth, cov_truth, logz_truth, mean_tol, cov_tol, logz_tol, sig=5): """ Check if means and covariances match match expectations within the tolerances """ pos = results.samples wts = np.exp(results.logwt - results.logz[-1]) mean, cov = dyfunc.mean_and_cov(pos, wts) logz = results.logz[-1] npt.assert_array_less(np.abs(mean - mean_truth), sig * mean_tol) npt.assert_array_less(np.abs(cov - cov_truth), sig * cov_tol) npt.assert_array_less(np.abs((logz_truth - logz)), sig * logz_tol) # GAUSSIAN TEST class Gaussian: def __init__(self, corr=.95, prior_win=10): self.ndim = 3 self.mean = np.linspace(-1, 1, self.ndim) self.cov = np.identity(self.ndim) # set covariance to identity matrix self.cov[self.cov == 0] = corr # set off-diagonal terms (strongly correlated) self.cov_inv = linalg.inv(self.cov) # precision matrix self.lnorm = -0.5 * (np.log(2 * np.pi) * self.ndim + np.log(linalg.det(self.cov))) self.prior_win = prior_win # +/- on both sides self.logz_truth = self.ndim * (-np.log(2 * self.prior_win)) # 3-D correlated multivariate normal log-likelihood def loglikelihood(self, x): """Multivariate normal log-likelihood.""" return -0.5 * np.dot( (x - self.mean), np.dot(self.cov_inv, (x - self.mean))) + self.lnorm # prior transform def prior_transform(self, u): """Flat prior between -10. and 10.""" return self.prior_win * (2. * u - 1.) # gradient (no jacobian) def grad_x(self, x): """Multivariate normal log-likelihood gradient.""" return -np.dot(self.cov_inv, (x - self.mean)) # gradient (with jacobian) def grad_u(self, x): """Multivariate normal log-likelihood gradient.""" return -np.dot(self.cov_inv, x - self.mean) * 2 * self.prior_win def check_results_gau(results, g, rstate, sig=5, logz_tol=None): if logz_tol is None: logz_tol = sig * results.logzerr[-1] mean_tol, cov_tol = bootstrap_tol(results, rstate) # just check that resample_equal works dyfunc.resample_equal(results.samples, np.exp(results.logwt - results.logz[-1])) check_results(results, g.mean, g.cov, g.logz_truth, mean_tol, cov_tol, logz_tol, sig=sig) def test_gaussian(): sig = 5 rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, rstate=rstate) sampler.run_nested(print_progress=printing) # check that jitter/resample/simulate_run work # for not dynamic sampler dyfunc.jitter_run(sampler.results, rstate=rstate) dyfunc.resample_run(sampler.results, rstate=rstate) dyfunc.simulate_run(sampler.results, rstate=rstate) # add samples # check continuation behavior sampler.run_nested(dlogz=0.1, print_progress=printing) # get errors nerr = 3 result_list = [] for i in range(nerr): sampler.reset() sampler.run_nested(print_progress=False) results = sampler.results result_list.append(results) pos = results.samples wts = np.exp(results.logwt - results.logz[-1]) mean, cov = dyfunc.mean_and_cov(pos, wts) logz = results.logz[-1] assert (np.abs(logz - g.logz_truth) < sig * results.logzerr[-1]) res_comb = dyfunc.merge_runs(result_list) assert (np.abs(res_comb.logz[-1] - g.logz_truth) < sig * results.logzerr[-1]) # check summary res = sampler.results res.summary() # try all combinations excepte none/unif @pytest.mark.parametrize( "bound,sample", list( itertools.product(['single', 'multi', 'balls', 'cubes', 'none'], ['unif', 'rwalk', 'slice', 'rslice']))) def test_bounding_sample(bound, sample): # check various bounding methods rstate = get_rstate() if bound == 'none': if sample != 'unif': g = Gaussian(0.1) else: g = Gaussian(corr=0., prior_win=3) # make live easy if bound is none else: g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, bound=bound, sample=sample, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) @pytest.mark.parametrize("bound,sample", itertools.product( ['single', 'multi', 'balls', 'cubes'], ['unif'])) def test_bounding_bootstrap(bound, sample): # check various bounding methods rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, bound=bound, sample=sample, bootstrap=5, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) # extra checks for gradients def test_slice_nograd(): rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, sample='hslice', rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) def test_slice_grad(): rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, sample='hslice', gradient=g.grad_x, compute_jac=True, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) def test_slice_grad1(): rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, sample='hslice', gradient=g.grad_u, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) def test_dynamic(): # check dynamic nested sampling behavior rstate = get_rstate() g = Gaussian() dsampler = dynesty.DynamicNestedSampler(g.loglikelihood, g.prior_transform, g.ndim, rstate=rstate) dsampler.run_nested(print_progress=printing) # chechk explicit adding batches dsampler.add_batch(mode='auto') dsampler.add_batch(mode='weight') dsampler.add_batch(mode='full') dsampler.add_batch(logl_bounds=(-10, 0), mode='manual') dsampler.add_batch(logl_bounds=(-10000000, -1000), mode='manual') check_results_gau(dsampler.results, g, rstate) # check error analysis functions dres = dyfunc.jitter_run(dsampler.results, rstate=rstate) check_results_gau(dres, g, rstate) dres = dyfunc.resample_run(dsampler.results, rstate=rstate) check_results_gau(dres, g, rstate) dres = dyfunc.simulate_run(dsampler.results, rstate=rstate) check_results_gau(dres, g, rstate) dyfunc.kld_error(dsampler.results, rstate=rstate) def test_ravel_unravel(): """ Here I test that ravel/unravel preserves things correctly """ rstate = get_rstate() g = Gaussian() dsampler = dynesty.DynamicNestedSampler(g.loglikelihood, g.prior_transform, g.ndim, bound='single', sample='unif', rstate=rstate, nlive=nlive) maxiter = 1800 dsampler.run_nested(maxiter=maxiter, use_stop=False, nlive_batch=100) dres = dsampler.results dres_list = dyfunc.unravel_run(dres) dres_merge = dyfunc.merge_runs(dres_list) assert np.abs(dres.logz[-1] - dres_merge.logz[-1]) < 0.01
joshspeagle/dynesty
tests/test_gau.py
Python
mit
10,528
[ "Gaussian" ]
cd49e5c2a2b65ba7c7acc9164756ef4db63314fb1ffaaec82089f7b10d169ff8
def gaussianQuadrature(function, a, b): """ Perform a Gaussian quadrature approximation of the integral of a function from a to b. """ # Coefficient values can be found at pomax.github.io/bezierinfo/legendre-gauss.html A = (b - a)/2 B = (b + a)/2 return A * ( 0.2955242247147529*function(-A*0.1488743389816312 + B) + \ 0.2955242247147529*function(+A*0.1488743389816312 + B) + \ 0.2692667193099963*function(-A*0.4333953941292472 + B) + \ 0.2692667193099963*function(+A*0.4333953941292472 + B) + \ 0.2190863625159820*function(-A*0.6794095682990244 + B) + \ 0.2190863625159820*function(+A*0.6794095682990244 + B) + \ 0.1494513491505806*function(-A*0.8650633666889845 + B) + \ 0.1494513491505806*function(+A*0.8650633666889845 + B) + \ 0.0666713443086881*function(-A*0.9739065285171717 + B) + \ 0.0666713443086881*function(+A*0.9739065285171717 + B) )
FCDM/py-dml
dml/maths/gquad.py
Python
mit
877
[ "Gaussian" ]
e704acdc1369cf4335fae0e34706f4b4eed62073dc1aa5c4f79e7242a0f54c07
from test import test_support import unittest import codecs import StringIO class Queue(object): """ queue: write bytes at one end, read bytes from the other end """ def __init__(self): self._buffer = "" def write(self, chars): self._buffer += chars def read(self, size=-1): if size<0: s = self._buffer self._buffer = "" return s else: s = self._buffer[:size] self._buffer = self._buffer[size:] return s class ReadTest(unittest.TestCase): def test_seek(self): # all codecs should be able to encode these s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456") encoding = self.encoding reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding))) for t in xrange(5): # Test that calling seek resets the internal codec state and buffers reader.seek(0, 0) line = reader.readline() self.assertEqual(s[:len(line)], line) def check_partial(self, input, partialresults): # get a StreamReader for the encoding and feed the bytestring version # of input to the reader byte by byte. Read every available from # the StreamReader and check that the results equal the appropriate # entries from partialresults. q = Queue() r = codecs.getreader(self.encoding)(q) result = u"" for (c, partialresult) in zip(input.encode(self.encoding), partialresults): q.write(c) result += r.read() self.assertEqual(result, partialresult) # check that there's nothing left in the buffers self.assertEqual(r.read(), u"") self.assertEqual(r.bytebuffer, "") self.assertEqual(r.charbuffer, u"") def test_readline(self): def getreader(input): stream = StringIO.StringIO(input.encode(self.encoding)) return codecs.getreader(self.encoding)(stream) def readalllines(input, keepends=True): reader = getreader(input) lines = [] while True: line = reader.readline(keepends=keepends) if not line: break lines.append(line) return "".join(lines) s = u"foo\nbar\r\nbaz\rspam\u2028eggs" self.assertEqual(readalllines(s, True), s) self.assertEqual(readalllines(s, False), u"foobarbazspameggs") # Test long lines (multiple calls to read() in readline()) vw = [] vwo = [] for (i, lineend) in enumerate(u"\n \r\n \r \u2028".split()): vw.append((i*200)*u"\3042" + lineend) vwo.append((i*200)*u"\3042") self.assertEqual(readalllines("".join(vw), True), "".join(vw)) self.assertEqual(readalllines("".join(vw), False),"".join(vwo)) # Test lines where the first read might end with \r, so the # reader has to look ahead whether this is a lone \r or a \r\n for size in xrange(80): for lineend in u"\n \r\n \r \u2028".split(): s = size*u"a" + lineend + u"xxx\n" self.assertEqual( getreader(s).readline(keepends=True), size*u"a" + lineend, ) self.assertEqual( getreader(s).readline(keepends=False), size*u"a", ) def test_readlinequeue(self): q = Queue() writer = codecs.getwriter(self.encoding)(q) reader = codecs.getreader(self.encoding)(q) # No lineends writer.write(u"foo\r") self.assertEqual(reader.readline(keepends=False), u"foo") writer.write(u"\nbar\r") self.assertEqual(reader.readline(keepends=False), u"bar") writer.write(u"baz") self.assertEqual(reader.readline(keepends=False), u"baz") self.assertEqual(reader.readline(keepends=False), u"") # Lineends writer.write(u"foo\r") self.assertEqual(reader.readline(keepends=True), u"foo\r") writer.write(u"\nbar\r") self.assertEqual(reader.readline(keepends=True), u"bar\r") writer.write(u"baz") self.assertEqual(reader.readline(keepends=True), u"baz") self.assertEqual(reader.readline(keepends=True), u"") writer.write(u"foo\r\n") self.assertEqual(reader.readline(keepends=True), u"foo\r\n") def test_bug1098990_a(self): s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n" s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n" s3 = u"next line.\r\n" s = (s1+s2+s3).encode(self.encoding) stream = StringIO.StringIO(s) reader = codecs.getreader(self.encoding)(stream) self.assertEqual(reader.readline(), s1) self.assertEqual(reader.readline(), s2) self.assertEqual(reader.readline(), s3) self.assertEqual(reader.readline(), u"") def test_bug1098990_b(self): s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n" s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n" s3 = u"stillokay:bbbbxx\r\n" s4 = u"broken!!!!badbad\r\n" s5 = u"againokay.\r\n" s = (s1+s2+s3+s4+s5).encode(self.encoding) stream = StringIO.StringIO(s) reader = codecs.getreader(self.encoding)(stream) self.assertEqual(reader.readline(), s1) self.assertEqual(reader.readline(), s2) self.assertEqual(reader.readline(), s3) self.assertEqual(reader.readline(), s4) self.assertEqual(reader.readline(), s5) self.assertEqual(reader.readline(), u"") class UTF16Test(ReadTest): encoding = "utf-16" spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00' spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m' def test_only_one_bom(self): _,_,reader,writer = codecs.lookup(self.encoding) # encode some stream s = StringIO.StringIO() f = writer(s) f.write(u"spam") f.write(u"spam") d = s.getvalue() # check whether there is exactly one BOM in it self.assert_(d == self.spamle or d == self.spambe) # try to read it back s = StringIO.StringIO(d) f = reader(s) self.assertEquals(f.read(), u"spamspam") def test_partial(self): self.check_partial( u"\x00\xff\u0100\uffff", [ u"", # first byte of BOM read u"", # second byte of BOM read => byteorder known u"", u"\x00", u"\x00", u"\x00\xff", u"\x00\xff", u"\x00\xff\u0100", u"\x00\xff\u0100", u"\x00\xff\u0100\uffff", ] ) class UTF16LETest(ReadTest): encoding = "utf-16-le" def test_partial(self): self.check_partial( u"\x00\xff\u0100\uffff", [ u"", u"\x00", u"\x00", u"\x00\xff", u"\x00\xff", u"\x00\xff\u0100", u"\x00\xff\u0100", u"\x00\xff\u0100\uffff", ] ) class UTF16BETest(ReadTest): encoding = "utf-16-be" def test_partial(self): self.check_partial( u"\x00\xff\u0100\uffff", [ u"", u"\x00", u"\x00", u"\x00\xff", u"\x00\xff", u"\x00\xff\u0100", u"\x00\xff\u0100", u"\x00\xff\u0100\uffff", ] ) class UTF8Test(ReadTest): encoding = "utf-8" def test_partial(self): self.check_partial( u"\x00\xff\u07ff\u0800\uffff", [ u"\x00", u"\x00", u"\x00\xff", u"\x00\xff", u"\x00\xff\u07ff", u"\x00\xff\u07ff", u"\x00\xff\u07ff", u"\x00\xff\u07ff\u0800", u"\x00\xff\u07ff\u0800", u"\x00\xff\u07ff\u0800", u"\x00\xff\u07ff\u0800\uffff", ] ) class EscapeDecodeTest(unittest.TestCase): def test_empty_escape_decode(self): self.assertEquals(codecs.escape_decode(""), ("", 0)) class RecodingTest(unittest.TestCase): def test_recoding(self): f = StringIO.StringIO() f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8") f2.write(u"a") f2.close() # Python used to crash on this at exit because of a refcount # bug in _codecsmodule.c # From RFC 3492 punycode_testcases = [ # A Arabic (Egyptian): (u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", "egbpdaj6bu4bxfgehfvwxn"), # B Chinese (simplified): (u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", "ihqwcrb4cv8a8dqg056pqjye"), # C Chinese (traditional): (u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", "ihqwctvzc91f659drss3x8bo0yb"), # D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky (u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" u"\u0065\u0073\u006B\u0079", "Proprostnemluvesky-uyb24dma41a"), # E Hebrew: (u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" u"\u05D1\u05E8\u05D9\u05EA", "4dbcagdahymbxekheh6e0a7fei0b"), # F Hindi (Devanagari): (u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" u"\u0939\u0948\u0902", "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"), #(G) Japanese (kanji and hiragana): (u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"), # (H) Korean (Hangul syllables): (u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" "psd879ccm6fea98c"), # (I) Russian (Cyrillic): (u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" u"\u0438", "b1abfaaepdrnnbgefbaDotcwatmq2g4l"), # (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol (u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" u"\u0061\u00F1\u006F\u006C", "PorqunopuedensimplementehablarenEspaol-fmd56a"), # (K) Vietnamese: # T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\ # <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t (u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" u"\u0056\u0069\u1EC7\u0074", "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"), #(L) 3<nen>B<gumi><kinpachi><sensei> (u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", "3B-ww4c5e180e575a65lsy2b"), # (M) <amuro><namie>-with-SUPER-MONKEYS (u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" u"\u004F\u004E\u004B\u0045\u0059\u0053", "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"), # (N) Hello-Another-Way-<sorezore><no><basho> (u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240", "Hello-Another-Way--fc4qua05auwb3674vfr0b"), # (O) <hitotsu><yane><no><shita>2 (u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", "2-u9tlzr9756bt3uc0v"), # (P) Maji<de>Koi<suru>5<byou><mae> (u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" u"\u308B\u0035\u79D2\u524D", "MajiKoi5-783gue6qz075azm5e"), # (Q) <pafii>de<runba> (u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", "de-jg4avhby1noc0d"), # (R) <sono><supiido><de> (u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", "d9juau41awczczp"), # (S) -> $1.00 <- (u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" u"\u003C\u002D", "-> $1.00 <--") ] for i in punycode_testcases: if len(i)!=2: print repr(i) class PunycodeTest(unittest.TestCase): def test_encode(self): for uni, puny in punycode_testcases: # Need to convert both strings to lower case, since # some of the extended encodings use upper case, but our # code produces only lower case. Converting just puny to # lower is also insufficient, since some of the input characters # are upper case. self.assertEquals(uni.encode("punycode").lower(), puny.lower()) def test_decode(self): for uni, puny in punycode_testcases: self.assertEquals(uni, puny.decode("punycode")) # From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html nameprep_tests = [ # 3.1 Map to nothing. ('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar' '\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef' '\xb8\x8f\xef\xbb\xbf', 'foobarbaz'), # 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045. ('CAFE', 'cafe'), # 3.3 Case folding 8bit U+00DF (german sharp s). # The original test case is bogus; it says \xc3\xdf ('\xc3\x9f', 'ss'), # 3.4 Case folding U+0130 (turkish capital I with dot). ('\xc4\xb0', 'i\xcc\x87'), # 3.5 Case folding multibyte U+0143 U+037A. ('\xc5\x83\xcd\xba', '\xc5\x84 \xce\xb9'), # 3.6 Case folding U+2121 U+33C6 U+1D7BB. # XXX: skip this as it fails in UCS-2 mode #('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb', # 'telc\xe2\x88\x95kg\xcf\x83'), (None, None), # 3.7 Normalization of U+006a U+030c U+00A0 U+00AA. ('j\xcc\x8c\xc2\xa0\xc2\xaa', '\xc7\xb0 a'), # 3.8 Case folding U+1FB7 and normalization. ('\xe1\xbe\xb7', '\xe1\xbe\xb6\xce\xb9'), # 3.9 Self-reverting case folding U+01F0 and normalization. # The original test case is bogus, it says `\xc7\xf0' ('\xc7\xb0', '\xc7\xb0'), # 3.10 Self-reverting case folding U+0390 and normalization. ('\xce\x90', '\xce\x90'), # 3.11 Self-reverting case folding U+03B0 and normalization. ('\xce\xb0', '\xce\xb0'), # 3.12 Self-reverting case folding U+1E96 and normalization. ('\xe1\xba\x96', '\xe1\xba\x96'), # 3.13 Self-reverting case folding U+1F56 and normalization. ('\xe1\xbd\x96', '\xe1\xbd\x96'), # 3.14 ASCII space character U+0020. (' ', ' '), # 3.15 Non-ASCII 8bit space character U+00A0. ('\xc2\xa0', ' '), # 3.16 Non-ASCII multibyte space character U+1680. ('\xe1\x9a\x80', None), # 3.17 Non-ASCII multibyte space character U+2000. ('\xe2\x80\x80', ' '), # 3.18 Zero Width Space U+200b. ('\xe2\x80\x8b', ''), # 3.19 Non-ASCII multibyte space character U+3000. ('\xe3\x80\x80', ' '), # 3.20 ASCII control characters U+0010 U+007F. ('\x10\x7f', '\x10\x7f'), # 3.21 Non-ASCII 8bit control character U+0085. ('\xc2\x85', None), # 3.22 Non-ASCII multibyte control character U+180E. ('\xe1\xa0\x8e', None), # 3.23 Zero Width No-Break Space U+FEFF. ('\xef\xbb\xbf', ''), # 3.24 Non-ASCII control character U+1D175. ('\xf0\x9d\x85\xb5', None), # 3.25 Plane 0 private use character U+F123. ('\xef\x84\xa3', None), # 3.26 Plane 15 private use character U+F1234. ('\xf3\xb1\x88\xb4', None), # 3.27 Plane 16 private use character U+10F234. ('\xf4\x8f\x88\xb4', None), # 3.28 Non-character code point U+8FFFE. ('\xf2\x8f\xbf\xbe', None), # 3.29 Non-character code point U+10FFFF. ('\xf4\x8f\xbf\xbf', None), # 3.30 Surrogate code U+DF42. ('\xed\xbd\x82', None), # 3.31 Non-plain text character U+FFFD. ('\xef\xbf\xbd', None), # 3.32 Ideographic description character U+2FF5. ('\xe2\xbf\xb5', None), # 3.33 Display property character U+0341. ('\xcd\x81', '\xcc\x81'), # 3.34 Left-to-right mark U+200E. ('\xe2\x80\x8e', None), # 3.35 Deprecated U+202A. ('\xe2\x80\xaa', None), # 3.36 Language tagging character U+E0001. ('\xf3\xa0\x80\x81', None), # 3.37 Language tagging character U+E0042. ('\xf3\xa0\x81\x82', None), # 3.38 Bidi: RandALCat character U+05BE and LCat characters. ('foo\xd6\xbebar', None), # 3.39 Bidi: RandALCat character U+FD50 and LCat characters. ('foo\xef\xb5\x90bar', None), # 3.40 Bidi: RandALCat character U+FB38 and LCat characters. ('foo\xef\xb9\xb6bar', 'foo \xd9\x8ebar'), # 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031. ('\xd8\xa71', None), # 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628. ('\xd8\xa71\xd8\xa8', '\xd8\xa71\xd8\xa8'), # 3.43 Unassigned code point U+E0002. # Skip this test as we allow unassigned #('\xf3\xa0\x80\x82', # None), (None, None), # 3.44 Larger test (shrinking). # Original test case reads \xc3\xdf ('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2' '\xaa\xce\xb0\xe2\x80\x80', 'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '), # 3.45 Larger test (expanding). # Original test case reads \xc3\x9f ('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c' '\x80', 'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3' '\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82' '\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88') ] class NameprepTest(unittest.TestCase): def test_nameprep(self): from encodings.idna import nameprep for pos, (orig, prepped) in enumerate(nameprep_tests): if orig is None: # Skipped continue # The Unicode strings are given in UTF-8 orig = unicode(orig, "utf-8") if prepped is None: # Input contains prohibited characters self.assertRaises(UnicodeError, nameprep, orig) else: prepped = unicode(prepped, "utf-8") try: self.assertEquals(nameprep(orig), prepped) except Exception,e: raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e))) class CodecTest(unittest.TestCase): def test_builtin(self): self.assertEquals(unicode("python.org", "idna"), u"python.org") class CodecsModuleTest(unittest.TestCase): def test_decode(self): self.assertEquals(codecs.decode('\xe4\xf6\xfc', 'latin-1'), u'\xe4\xf6\xfc') self.assertRaises(TypeError, codecs.decode) self.assertEquals(codecs.decode('abc'), u'abc') self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii') def test_encode(self): self.assertEquals(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'), '\xe4\xf6\xfc') self.assertRaises(TypeError, codecs.encode) self.assertEquals(codecs.encode(u'abc'), 'abc') self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii') def test_register(self): self.assertRaises(TypeError, codecs.register) def test_lookup(self): self.assertRaises(TypeError, codecs.lookup) self.assertRaises(LookupError, codecs.lookup, "__spam__") class StreamReaderTest(unittest.TestCase): def setUp(self): self.reader = codecs.getreader('utf-8') self.stream = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80') def test_readlines(self): f = self.reader(self.stream) self.assertEquals(f.readlines(), [u'\ud55c\n', u'\uae00']) def test_main(): test_support.run_unittest( UTF16Test, UTF16LETest, UTF16BETest, UTF8Test, EscapeDecodeTest, RecodingTest, PunycodeTest, NameprepTest, CodecTest, CodecsModuleTest, StreamReaderTest ) if __name__ == "__main__": test_main()
mancoast/CPythonPyc_test
cpython/241_test_codecs.py
Python
gpl-3.0
21,080
[ "FEFF" ]
2bd6267132b52b4fb554c83c318b4dce45b1f64b5a338fcde7d60641ac702503
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The main BERT model and related functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six import tensorflow as tf class BertConfig(object): """Configuration for `BertModel`.""" def __init__(self, vocab_size, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02): """Constructs BertConfig. Args: vocab_size: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices. """ self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with tf.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class BertModel(object): """BERT model ("Bidirectional Encoder Representations from Transformers"). Example usage: ```python # Already been converted into WordPiece token ids input_ids = tf.constant([[31, 51, 99], [15, 5, 0]]) input_mask = tf.constant([[1, 1, 1], [1, 1, 0]]) token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]]) config = modeling.BertConfig(vocab_size=32000, hidden_size=512, num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024) model = modeling.BertModel(config=config, is_training=True, input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids) label_embeddings = tf.get_variable(...) pooled_output = model.get_pooled_output() logits = tf.matmul(pooled_output, label_embeddings) ... ``` """ def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=False, scope=None, adapter_fn="feedforward_adapter"): """Constructor for BertModel. Args: config: `BertConfig` instance. is_training: bool. true for training model, false for eval model. Controls whether dropout will be applied. input_ids: int32 Tensor of shape [batch_size, seq_length]. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word embeddings or tf.embedding_lookup() for the word embeddings. scope: (optional) variable scope. Defaults to "bert". adapter_fn: (optional) string identifying trainable adapter that takes as input a Tensor and returns a Tensor of the same shape. Raises: ValueError: The config is invalid or one of the input tensor shapes is invalid. """ config = copy.deepcopy(config) if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) if token_type_ids is None: token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) with tf.variable_scope(scope, default_name="bert"): with tf.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.embedding_output, self.embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.hidden_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = embedding_postprocessor( input_tensor=self.embedding_output, use_token_type=True, token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with tf.variable_scope("encoder"): # This converts a 2D mask of shape [batch_size, seq_length] to a 3D # mask of shape [batch_size, seq_length, seq_length] which is used # for the attention scores. attention_mask = create_attention_mask_from_input_mask( input_ids, input_mask) # Run the stacked transformer. # `sequence_output` shape = [batch_size, seq_length, hidden_size]. self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=attention_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, intermediate_size=config.intermediate_size, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True, adapter_fn=get_adapter(adapter_fn)) self.sequence_output = self.all_encoder_layers[-1] # The "pooler" converts the encoded sequence tensor of shape # [batch_size, seq_length, hidden_size] to a tensor of shape # [batch_size, hidden_size]. This is necessary for segment-level # (or segment-pair-level) classification tasks where we need a fixed # dimensional representation of the segment. with tf.variable_scope("pooler"): # We "pool" the model by simply taking the hidden state corresponding # to the first token. We assume that this has been pre-trained first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = tf.layers.dense( first_token_tensor, config.hidden_size, activation=tf.tanh, kernel_initializer=create_initializer(config.initializer_range)) def get_pooled_output(self): return self.pooled_output def get_sequence_output(self): """Gets final hidden layer of encoder. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the final hidden of the transformer encoder. """ return self.sequence_output def get_all_encoder_layers(self): return self.all_encoder_layers def get_embedding_output(self): """Gets output of the embedding lookup (i.e., input to the transformer). Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the output of the embedding layer, after summing the word embeddings with the positional embeddings and the token type embeddings, then performing layer normalization. This is the input to the transformer. """ return self.embedding_output def get_embedding_table(self): return self.embedding_table def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. """ # We assume that anything that"s not a string is already an activation # function, so we just return it. if not isinstance(activation_string, six.string_types): return activation_string if not activation_string: return None act = activation_string.lower() if act == "linear": return None elif act == "relu": return tf.nn.relu elif act == "gelu": return gelu elif act == "tanh": return tf.tanh else: raise ValueError("Unsupported activation: %s" % act) def feedforward_adapter(input_tensor, hidden_size=64, init_scale=1e-3): """A feedforward adapter layer with a bottleneck. Implements a bottleneck layer with a user-specified nonlinearity and an identity residual connection. All variables created are added to the "adapters" collection. Args: input_tensor: input Tensor of shape [batch size, hidden dimension] hidden_size: dimension of the bottleneck layer. init_scale: Scale of the initialization distribution used for weights. Returns: Tensor of the same shape as x. """ with tf.variable_scope("adapters"): in_size = input_tensor.get_shape().as_list()[1] w1 = tf.get_variable( "weights1", [in_size, hidden_size], initializer=tf.truncated_normal_initializer(stddev=init_scale), collections=["adapters", tf.GraphKeys.GLOBAL_VARIABLES]) b1 = tf.get_variable( "biases1", [1, hidden_size], initializer=tf.zeros_initializer(), collections=["adapters", tf.GraphKeys.GLOBAL_VARIABLES]) net = tf.tensordot(input_tensor, w1, [[1], [0]]) + b1 net = gelu(net) w2 = tf.get_variable( "weights2", [hidden_size, in_size], initializer=tf.truncated_normal_initializer(stddev=init_scale), collections=["adapters", tf.GraphKeys.GLOBAL_VARIABLES]) b2 = tf.get_variable( "biases2", [1, in_size], initializer=tf.zeros_initializer(), collections=["adapters", tf.GraphKeys.GLOBAL_VARIABLES]) net = tf.tensordot(net, w2, [[1], [0]]) + b2 return net + input_tensor def get_adapter(function_string): """Maps a string to a Python function. Args: function_string: String name of the adapter function. Returns: A Python function corresponding to the adatper function. `function_string` is None or empty, will return None. If `function_string` is not a string, it will return `function_string`. Raises: ValueError: The `function_string` does not correspond to a known adapter. """ # We assume that anything that"s not a string is already an adapter # function, so we just return it. if not isinstance(function_string, six.string_types): return function_string if not function_string: return None fn = function_string.lower() if fn == "feedforward_adapter": return feedforward_adapter else: raise ValueError("Unsupported adapters: %s" % fn) def get_assignment_map_from_checkpoint(tvars, init_checkpoint): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) assignment_map = collections.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) if name not in name_to_variable: continue assignment_map[name] = name initialized_variable_names[name] = 1 initialized_variable_names[name + ":0"] = 1 return (assignment_map, initialized_variable_names) def dropout(input_tensor, dropout_prob): """Perform dropout. Args: input_tensor: float Tensor. dropout_prob: Python float. The probability of dropping out a value (NOT of *keeping* a dimension as in `tf.nn.dropout`). Returns: A version of `input_tensor` with dropout applied. """ if dropout_prob is None or dropout_prob == 0.0: return input_tensor output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob) return output def layer_norm(input_tensor, name=None): """Run layer normalization on the last dimension of the tensor.""" return tf.contrib.layers.layer_norm( inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name, variables_collections=["layer_norm", tf.GraphKeys.GLOBAL_VARIABLES]) def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): """Runs layer normalization followed by dropout.""" output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob) return output_tensor def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range.""" return tf.truncated_normal_initializer(stddev=initializer_range) def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False): """Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.gather()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. """ # This function assumes that the input is of shape [batch_size, seq_length, # num_inputs]. # # If the input is a 2D tensor of shape [batch_size, seq_length], we # reshape to [batch_size, seq_length, 1]. if input_ids.shape.ndims == 2: input_ids = tf.expand_dims(input_ids, axis=[-1]) embedding_table = tf.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range)) flat_input_ids = tf.reshape(input_ids, [-1]) if use_one_hot_embeddings: one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) output = tf.matmul(one_hot_input_ids, embedding_table) else: output = tf.gather(embedding_table, flat_input_ids) input_shape = get_shape_list(input_ids) output = tf.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size]) return (output, embedding_table) def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = tf.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) # Since the position embedding table is a learned variable, we create it # using a (long) sequence length `max_position_embeddings`. The actual # sequence length might be shorter than this, for faster training of # tasks that do not have long sequences. # # So `full_position_embeddings` is effectively an embedding table # for position [0, 1, 2, ..., max_position_embeddings-1], and the current # sequence has positions [0, 1, 2, ... seq_length-1], so we can just # perform a slice. position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) # Only the last two dimensions are relevant (`seq_length` and `width`), so # we broadcast among the first dimensions, which is typically just # the batch size. position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output = layer_norm_and_dropout(output, dropout_prob) return output def create_attention_mask_from_input_mask(from_tensor, to_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=tf.float32) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask def attention_layer(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, size_per_head=512, query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, do_return_2d_tensor=False, batch_size=None, from_seq_length=None, to_seq_length=None): """Performs multi-headed attention from `from_tensor` to `to_tensor`. This is an implementation of multi-headed attention based on "Attention is all you Need". If `from_tensor` and `to_tensor` are the same, then this is self-attention. Each timestep in `from_tensor` attends to the corresponding sequence in `to_tensor`, and returns a fixed-with vector. This function first projects `from_tensor` into a "query" tensor and `to_tensor` into "key" and "value" tensors. These are (effectively) a list of tensors of length `num_attention_heads`, where each tensor is of shape [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dot-producted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor and returned. In practice, the multi-headed attention are done with transposes and reshapes rather than actual separate tensors. Args: from_tensor: float Tensor of shape [batch_size, from_seq_length, from_width]. to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width]. attention_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. size_per_head: int. Size of each attention head. query_act: (optional) Activation function for the query transform. key_act: (optional) Activation function for the key transform. value_act: (optional) Activation function for the value transform. attention_probs_dropout_prob: (optional) float. Dropout probability of the attention probabilities. initializer_range: float. Range of the weight initializer. do_return_2d_tensor: bool. If True, the output will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]. If False, the output will be of shape [batch_size, from_seq_length, num_attention_heads * size_per_head]. batch_size: (Optional) int. If the input is 2D, this might be the batch size of the 3D version of the `from_tensor` and `to_tensor`. from_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `from_tensor`. to_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `to_tensor`. Returns: float Tensor of shape [batch_size, from_seq_length, num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is true, this will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]). Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ def transpose_for_scores(input_tensor, batch_size, num_attention_heads, seq_length, width): output_tensor = tf.reshape( input_tensor, [batch_size, seq_length, num_attention_heads, width]) output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3]) return output_tensor from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) to_shape = get_shape_list(to_tensor, expected_rank=[2, 3]) if len(from_shape) != len(to_shape): raise ValueError( "The rank of `from_tensor` must match the rank of `to_tensor`.") if len(from_shape) == 3: batch_size = from_shape[0] from_seq_length = from_shape[1] to_seq_length = to_shape[1] elif len(from_shape) == 2: if (batch_size is None or from_seq_length is None or to_seq_length is None): raise ValueError( "When passing in rank 2 tensors to attention_layer, the values " "for `batch_size`, `from_seq_length`, and `to_seq_length` " "must all be specified.") # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` from_tensor_2d = reshape_to_matrix(from_tensor) to_tensor_2d = reshape_to_matrix(to_tensor) # `query_layer` = [B*F, N*H] query_layer = tf.layers.dense( from_tensor_2d, num_attention_heads * size_per_head, activation=query_act, name="query", kernel_initializer=create_initializer(initializer_range)) # `key_layer` = [B*T, N*H] key_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=key_act, name="key", kernel_initializer=create_initializer(initializer_range)) # `value_layer` = [B*T, N*H] value_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=value_act, name="value", kernel_initializer=create_initializer(initializer_range)) # `query_layer` = [B, N, F, H] query_layer = transpose_for_scores(query_layer, batch_size, num_attention_heads, from_seq_length, size_per_head) # `key_layer` = [B, N, T, H] key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads, to_seq_length, size_per_head) # Take the dot product between "query" and "key" to get the raw # attention scores. # `attention_scores` = [B, N, F, T] attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(size_per_head))) if attention_mask is not None: # `attention_mask` = [B, 1, F, T] attention_mask = tf.expand_dims(attention_mask, axis=[1]) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_scores += adder # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = tf.nn.softmax(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = dropout(attention_probs, attention_probs_dropout_prob) # `value_layer` = [B, T, N, H] value_layer = tf.reshape( value_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head]) # `value_layer` = [B, N, T, H] value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) # `context_layer` = [B, N, F, H] context_layer = tf.matmul(attention_probs, value_layer) # `context_layer` = [B, F, N, H] context_layer = tf.transpose(context_layer, [0, 2, 1, 3]) if do_return_2d_tensor: # `context_layer` = [B*F, N*H] context_layer = tf.reshape( context_layer, [batch_size * from_seq_length, num_attention_heads * size_per_head]) else: # `context_layer` = [B, F, N*H] context_layer = tf.reshape( context_layer, [batch_size, from_seq_length, num_attention_heads * size_per_head]) return context_layer def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False, adapter_fn=None): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. adapter_fn: (optional) trainable adapter function that takes as input a Tensor and returns a Tensor of the same shape. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. """ if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = int(hidden_size / num_attention_heads) input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] input_width = input_shape[2] # The Transformer performs sum residuals on all layers so the input needs # to be the same as the hidden size. if input_width != hidden_size: raise ValueError("The width of the input tensor (%d) != hidden size (%d)" % (input_width, hidden_size)) # We keep the representation as a 2D tensor to avoid re-shaping it back and # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on # the GPU/CPU but may not be free on the TPU, so we want to minimize them to # help the optimizer. prev_output = reshape_to_matrix(input_tensor) all_layer_outputs = [] for layer_idx in range(num_hidden_layers): with tf.variable_scope("layer_%d" % layer_idx): layer_input = prev_output with tf.variable_scope("attention"): attention_heads = [] with tf.variable_scope("self"): attention_head = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, size_per_head=attention_head_size, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range, do_return_2d_tensor=True, batch_size=batch_size, from_seq_length=seq_length, to_seq_length=seq_length) attention_heads.append(attention_head) attention_output = None if len(attention_heads) == 1: attention_output = attention_heads[0] else: # In the case where we have other sequences, we just concatenate # them to the self-attention head before the projection. attention_output = tf.concat(attention_heads, axis=-1) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.variable_scope("output"): attention_output = tf.layers.dense( attention_output, hidden_size, kernel_initializer=create_initializer(initializer_range)) attention_output = dropout(attention_output, hidden_dropout_prob) if adapter_fn: attention_output = adapter_fn(attention_output) attention_output = layer_norm(attention_output + layer_input) # The activation is only applied to the "intermediate" hidden layer. with tf.variable_scope("intermediate"): intermediate_output = tf.layers.dense( attention_output, intermediate_size, activation=intermediate_act_fn, kernel_initializer=create_initializer(initializer_range)) # Down-project back to `hidden_size` then add the residual. with tf.variable_scope("output"): layer_output = tf.layers.dense( intermediate_output, hidden_size, kernel_initializer=create_initializer(initializer_range)) layer_output = dropout(layer_output, hidden_dropout_prob) if adapter_fn: layer_output = adapter_fn(layer_output) layer_output = layer_norm(layer_output + attention_output) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: final_outputs = [] for layer_output in all_layer_outputs: final_output = reshape_from_matrix(layer_output, input_shape) final_outputs.append(final_output) return final_outputs else: final_output = reshape_from_matrix(prev_output, input_shape) return final_output def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1] return tf.reshape(output_tensor, orig_dims + [width]) def assert_rank(tensor, expected_rank, name=None): """Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A tf.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. """ if name is None: name = tensor.name expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = tf.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
google-research/adapter-bert
modeling.py
Python
apache-2.0
40,966
[ "Gaussian" ]
2e80e9b6f8e05e369bbf68e0fc035722c3bd3b0735b608afd1122ddad5da17aa
#!/usr/bin/python import numpy as np import scipy.stats as stats #Variable explanation #I believe maleSamples and femaleSamples are meant to be lists of the required samples to use for processing #numBins is likely archaic to my system but included in case compatability is needed def checkXcounts(maleSamples, femaleSamples, numBins=25): def basicCheck(sampleNames, expected, allCounts, gender): IQRs = [] for j,i in enumerate(sampleNames): ##########Add the correct filepath #filename = bincountsFolder + i + '.' + str(numBins) + 'k.bincounts.txt' binCounts = np.loadtxt(filename, usecols=[3]) if expected == 0.5: allCounts[j, :] = binCounts normCounts = np.log2( binCounts / np.median(binCounts) ) normXbins = normCounts[xLoc] thisIQR = np.percentile(normXbins, 75) - np.percentile(normXbins, 25) IQRs.append(thisIQR) if expected == 0.5: return IQRs, allCounts else: return IQRs ##########Add the correct filepath for your system # chromList = list(np.loadtxt('filepath/example/' + cfg.species + '/' + cfg.species + '.varbin.gc.content.' + str(numBins) + 'k.bowtie.k36.txt', dtype='S5', usecols=[0])) xLoc = [x for x,y in enumerate(chromList) if y == 'chrX'] autoLoc = range(xLoc[0]) allCounts = np.zeros([len(maleSamples), len(chromList)]) maleIQR, allCounts = basicCheck(maleSamples, 0.5, allCounts, 'male') femaleIQR = basicCheck(femaleSamples, 1, False, 'female') sums = [sum(allCounts[x,:]) for x in range(allCounts.shape[0])] rankData = map(int,list(stats.rankdata(sums,method='ordinal'))) rankDict = {rankData[x]:x for x,y in enumerate(sums)} male2IQR = [] for i in range(1,len(rankData)): testData = allCounts[rankDict[i], :] addData = allCounts[rankDict[i+1], :] testData[xLoc] = testData[xLoc] + addData[xLoc] normCounts = np.log2( testData / np.median(testData) ) normXbins = normCounts[xLoc] thisIQR = np.percentile(normXbins, 75) - np.percentile(normXbins, 25) male2IQR.append(thisIQR) #print out a brief stats comparison report# data = [maleIQR, femaleIQR, male2IQR] medians = [np.median(maleIQR), np.median(femaleIQR), np.median(male2IQR)] lowIQR = [medians[0] - np.percentile(maleIQR, 25), medians[1] - np.percentile(femaleIQR, 25), medians[2] - np.percentile(male2IQR, 25)] highIQR = [np.percentile(maleIQR, 75) - medians[0], np.percentile(femaleIQR, 75) - medians[1], np.percentile(male2IQR, 75) - medians[2]] names = ['Male', 'Female', 'Male+Male'] diffDict = {} for i,j in enumerate(medians): if names[i] != names[-1]: for k,l in enumerate(names[i+1:]): thisMWU = 2 * stats.mannwhitneyu(data[i], data[k+i+1])[1] diffDict[(i, k+i+1)] = thisMWU ###correction to p-value for multiple comparisons being made### numDiff = sum([1. for x in diffDict if diffDict[x] < 0.05]) diffDict = {x: numDiff*diffDict[x] for x in diffDict} for i,j in enumerate(medians): print '\tAssessing:', names[i] print '\t\tMedian (IQR):', j, '(', np.percentile(data[i], 25) , '-', np.percentile(data[i], 75), ')', lowIQR[i], highIQR[i] if names[i] != names[-1]: for k,l in enumerate(names[i+1:]): print '\t\tMWU vs', l, ':', diffDict[(i, k+i+1)]
suzannerohrback/somaticCNVpipeline
bin/simulations/copynumbernoise.py
Python
mit
3,193
[ "Bowtie" ]
1209f4ef60a82ec66d364c0395d87fa69404da760ed480cfd8b4e746e0afea95
import theano import theano.tensor as T from theano.sandbox.rng_mrg import MRG_RandomStreams from theano.tensor.nnet.conv import conv2d from theano.tensor.signal.downsample import max_pool_2d from theano.tensor.shared_randomstreams import RandomStreams import numpy as np import scipy.io import time import sys import logging import cPickle as pickle import copy from toolbox import * from modelbase import * class Vae1(ModelULBase): def __init__(self, data, hp): super(Vae1, self).__init__(self.__class__.__name__, data, hp) self.n_h = 800 self.n_z = 20 self.n_t = 1 self.gaussian = False self.params = Parameters() n_x = self.data['n_x'] n_h = self.n_h n_z = self.n_z n_t = self.n_t scale = hp.init_scale if hp.load_model and os.path.isfile(self.filename): self.params.load(self.filename) else: with self.params: W1 = shared_normal((n_x, n_h), scale=scale) W11 = shared_normal((n_h, n_h), scale=scale) W111 = shared_normal((n_h, n_h), scale=scale) W2 = shared_normal((n_h, n_z), scale=scale) W3 = shared_normal((n_h, n_z), scale=scale) W4 = shared_normal((n_h, n_h), scale=scale) W44 = shared_normal((n_h, n_h), scale=scale) W444 = shared_normal((n_z, n_h), scale=scale) W5 = shared_normal((n_h, n_x), scale=scale) b1 = shared_zeros((n_h,)) b11 = shared_zeros((n_h,)) b111 = shared_zeros((n_h,)) b2 = shared_zeros((n_z,)) b3 = shared_zeros((n_z,)) b4 = shared_zeros((n_h,)) b44 = shared_zeros((n_h,)) b444 = shared_zeros((n_h,)) b5 = shared_zeros((n_x,)) def encoder(x, p): h_encoder = T.tanh(T.dot(x,p.W1) + p.b1) h_encoder2 = T.tanh(T.dot(h_encoder,p.W11) + p.b11) h_encoder3 = T.tanh(T.dot(h_encoder2,p.W111) + p.b111) mu_encoder = T.dot(h_encoder3,p.W2) + p.b2 log_sigma_encoder = 0.5*(T.dot(h_encoder3,p.W3) + p.b3) log_qpz = -0.5* T.sum(1 + 2*log_sigma_encoder - mu_encoder**2 - T.exp(2*log_sigma_encoder)) eps = srnd.normal(mu_encoder.shape, dtype=theano.config.floatX) z = mu_encoder + eps*T.exp(log_sigma_encoder) return z, log_qpz def decoder(z, p, x=None): h_decoder3 = T.tanh(T.dot(z,p.W444) + p.b444) h_decoder2 = T.tanh(T.dot(h_decoder3,p.W44) + p.b44) h_decoder = T.tanh(T.dot(h_decoder2,p.W4) + p.b4) if self.gaussian: pxz = T.tanh(T.dot(h_decoder,p.W5) + p.b5) else: pxz = T.nnet.sigmoid(T.dot(h_decoder,p.W5) + p.b5) if not x is None: if self.gaussian: log_sigma_decoder = 0 log_pxz = 0.5 * np.log(2 * np.pi) + log_sigma_decoder + 0.5 * T.sum(T.sqr(x - pxz)) else: log_pxz = T.nnet.binary_crossentropy(pxz,x).sum() return pxz, log_pxz else: return pxz x = binomial(self.X) z, log_qpz = encoder(x, self.params) pxz, log_pxz = decoder(z, self.params, x) cost = log_pxz + log_qpz s_pxz = decoder(self.Z, self.params) a_pxz = T.zeros((self.n_t, s_pxz.shape[0], s_pxz.shape[1])) a_pxz = T.set_subtensor(a_pxz[0,:,:], s_pxz) self.compile(log_pxz, log_qpz, cost, a_pxz)
tigerneil/Theano-Lights
models/vae1.py
Python
mit
3,725
[ "Gaussian" ]
c7c8b3f9326b1a544990a834e69e933da423b6d50c18bda836afe19b834a37b2
import os from collections import defaultdict from os.path import join as pjoin import numpy.testing as npt from dipy.data import read_viz_icons, fetch_viz_icons from dipy.viz import ui from dipy.viz import window from dipy.data import DATA_DIR from dipy.testing.decorators import xvfb_it # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk from dipy.viz.ui import UI vtk, have_vtk, setup_module = optional_package('vtk') use_xvfb = os.environ.get('TEST_WITH_XVFB', False) if use_xvfb == 'skip': skip_it = True else: skip_it = False @npt.dec.skipif(not have_vtk or skip_it) @xvfb_it def test_ui(recording=False): print("Using VTK {}".format(vtk.vtkVersion.GetVTKVersion())) filename = "test_ui.log.gz" recording_filename = pjoin(DATA_DIR, filename) # Define some counter callback. states = defaultdict(lambda: 0) # Broken UI Element class BrokenUI(UI): def __init__(self): self.actor = vtk.vtkActor() super(BrokenUI, self).__init__() def add_callback(self, event_type, callback): """ Adds events to an actor. Parameters ---------- event_type : string event code callback : function callback function """ super(BrokenUI, self).add_callback(self.actor, event_type, callback) broken_ui = BrokenUI() npt.assert_raises(NotImplementedError, broken_ui.get_actors) npt.assert_raises(NotImplementedError, broken_ui.set_center, (1, 2)) # /Broken UI Element # Rectangle rectangle_test = ui.Rectangle2D(size=(10, 10)) rectangle_test.get_actors() another_rectangle_test = ui.Rectangle2D(size=(1, 1)) # /Rectangle # Button fetch_viz_icons() icon_files = dict() icon_files['stop'] = read_viz_icons(fname='stop2.png') icon_files['play'] = read_viz_icons(fname='play3.png') button_test = ui.Button2D(icon_fnames=icon_files) button_test.set_center((20, 20)) def counter(i_ren, obj, button): states[i_ren.event.name] += 1 # Assign the counter callback to every possible event. for event in ["CharEvent", "MouseMoveEvent", "KeyPressEvent", "KeyReleaseEvent", "LeftButtonPressEvent", "LeftButtonReleaseEvent", "RightButtonPressEvent", "RightButtonReleaseEvent", "MiddleButtonPressEvent", "MiddleButtonReleaseEvent"]: button_test.add_callback(event, counter) def make_invisible(i_ren, obj, button): # i_ren: CustomInteractorStyle # obj: vtkActor picked # button: Button2D button.set_visibility(False) i_ren.force_render() i_ren.event.abort() def modify_button_callback(i_ren, obj, button): # i_ren: CustomInteractorStyle # obj: vtkActor picked # button: Button2D button.next_icon() i_ren.force_render() button_test.on_right_mouse_button_pressed = make_invisible button_test.on_left_mouse_button_pressed = modify_button_callback button_test.scale((2, 2)) button_color = button_test.color button_test.color = button_color # /Button # Panel panel = ui.Panel2D(center=(440, 90), size=(300, 150), color=(1, 1, 1), align="right") panel.add_element(rectangle_test, 'absolute', (580, 150)) panel.add_element(button_test, 'relative', (0.2, 0.2)) npt.assert_raises(ValueError, panel.add_element, another_rectangle_test, 'error_string', (1, 2)) # /Panel current_size = (600, 600) show_manager = window.ShowManager(size=current_size, title="DIPY UI Example") show_manager.ren.add(panel) if recording: show_manager.record_events_to_file(recording_filename) print(list(states.items())) else: show_manager.play_events_from_file(recording_filename) msg = "Wrong count for '{}'." expected = [('CharEvent', 0), ('KeyPressEvent', 0), ('KeyReleaseEvent', 0), ('MouseMoveEvent', 161), ('LeftButtonPressEvent', 12), ('RightButtonPressEvent', 3), ('MiddleButtonPressEvent', 0), ('LeftButtonReleaseEvent', 12), ('MouseWheelForwardEvent', 0), ('MouseWheelBackwardEvent', 0), ('MiddleButtonReleaseEvent', 0), ('RightButtonReleaseEvent', 3)] # Useful loop for debugging. for event, count in expected: if states[event] != count: print("{}: {} vs. {} (expected)".format(event, states[event], count)) for event, count in expected: npt.assert_equal(states[event], count, err_msg=msg.format(event)) # Dummy Show Manager dummy_renderer = window.Renderer() dummy_show_manager = window.ShowManager(dummy_renderer, size=(800, 800), reset_camera=False, interactor_style='trackball') npt.assert_raises(TypeError, button_test.add_to_renderer, dummy_renderer) # /Dummy Show Manager
villalonreina/dipy
dipy/viz/tests/test_ui.py
Python
bsd-3-clause
5,390
[ "VTK" ]
dc7e930928190bda7da986180fc5381ebc733bb9a86fda92d0bccf4ae76b7fef
#!/usr/bin/python import pylab import sys import argparse import numpy as np import matplotlib.pyplot as plt import readpng as rpng import tikhonov as tik import scipy.sparse.linalg import time import random_light as rl import sklearn.linear_model as lm if __name__ == "__main__": parser = argparse.ArgumentParser(description='Inverse Problem test program by Hajime Kawahara using sklearn.Ridge') parser.add_argument('-f', nargs=1, required=True, help='png file') parser.add_argument('-n', nargs=1, default=[1000], help='number of light beams', type=int) parser.add_argument('-l', nargs='+', default=[0.001], help='lambda. if you specify multiple lambdas, then the cross validation is performed. ', type=float) parser.add_argument('-w', nargs=1, default=[20.0], help='mean beam diameter', type=float) parser.add_argument('-p', nargs=1, default=[1.0], help='beams probe on upper 100 p percent area ', type=float) parser.add_argument('-s', nargs=1, help='STD of gaussian noise', type=float) parser.add_argument('-save', nargs=1, help='save G and d of search light', type=str) parser.add_argument('-load', nargs=1, help='load G and d of search light', type=str) parser.add_argument('-solver', nargs=1, default=["sklearn"], help='SVD solver. sklearn.Ridge, fullsvd, lasso', type=str) args = parser.parse_args() solver=args.solver[0] img=rpng.get_bwimg(args.f[0]) if len(args.l)==1: lamb=args.l[0] else: lamb=0.0 lamblist=args.l width=args.w[0] N=args.n[0] p=args.p[0] Mx=img.shape[0] My=img.shape[1] M=Mx*My print("# of Data = ", N, "# of Model = ", M) if args.load: #load d,g,imgext print("Load G and d from ",args.load[0]) data=np.load(args.load[0]+".npz") d=data["arr_0"] g=data["arr_1"] imgext=data["arr_2"] else: #create eclipse curves deltaa=np.array(list(map(int,np.random.rand(Mx*My)*width))).reshape(Mx,My) #width of beams ixrand=np.array(list(map(int,np.random.rand(N)*Mx*p))) # x-position of beams iyrand=np.array(list(map(int,np.random.rand(N)*My))) # y-position of beams d,g,imgext=rl.create_data_and_designmatrix(N,Mx,My,ixrand,iyrand,deltaa,img) if args.s: sigma=args.s[0] print("noise injection to data: sigma=",sigma) d=d+np.random.normal(0.0,sigma,N) if args.save: print("Save G and d to ",args.save[0]) np.savez(args.save[0],d,g,imgext) if solver == "fullsvd": start = time.time() U,S,VT=np.linalg.svd(g) p=None mprior=np.zeros(M) mest,dpre,residual,modelnorm,curv_lcurve=tik.tikhonov_regularization(g,d,mprior,U,VT,S,lamb,p=p) imgest=mest.reshape(Mx,My) elapsed_time = time.time() - start print("solved by np.linalg.svd: time=",elapsed_time) method="Tikhonov Regularization" elif solver == "sklearn": start = time.time() if lamb > 0.0: clf = lm.Ridge(alpha = lamb) clf.fit(g,d) else: print("Cross Validation between ",lamblist) clf = lm.RidgeCV(alphas = lamblist) print(lamblist) clf.fit(g,d) lamb=clf.alpha_ print("Result: lambda=",lamb) mest=clf.coef_ dpre=np.dot(g,mest)+clf.intercept_ imgest=mest.reshape(Mx,My) elapsed_time = time.time() - start print("solved by scikit_learn.Ridge: time=",elapsed_time) method="Tikhonov Regularization" elif solver == "lasso": start = time.time() if lamb > 0.0: clf = lm.Lasso(alpha = lamb) clf.fit(g,d) else: print("Cross Validation between ",lamblist) clf = lm.LassoCV(alphas = lamblist) print(lamblist) clf.fit(g,d) lamb=clf.alpha_ print("Result: lambda=",lamb) mest=clf.coef_ dpre=np.dot(g,mest)+clf.intercept_ imgest=mest.reshape(Mx,My) elapsed_time = time.time() - start print("solved by scikit_learn.Lasso: time=",elapsed_time) method="LASSO" else: sys.exit("invalid solver option. specify sklearn or fullsvd. EXIT.") cap="# of beams = "+str(N)+", Mean beam diameter = "+str(width)+" pixels, $\lambda$ = "+str(lamb) fig =plt.figure() ax=fig.add_subplot(131) ax.imshow(img,cmap="gray") pylab.title("input") ax=fig.add_subplot(132) ax.imshow(imgext,cmap="gray") pylab.title("data (averaged)") ax.annotate(method, xy=(0.5, 1.15), xycoords='axes fraction',horizontalalignment="center", fontsize=16) ax.annotate(cap, xy=(0.5, -0.2), xycoords='axes fraction',horizontalalignment="center", fontsize=12,color="gray") ax=fig.add_subplot(133) ax.imshow(imgest,cmap="gray") pylab.title("estimate") plt.show()
HajimeKawahara/pinvprob
pinvprob/random_light_fast.py
Python
gpl-2.0
4,980
[ "Gaussian" ]
f7f1cd62cec87d33ce70ba6535f0772d4a3a1f174f11830e13533d17f0f6913a
# TODO: Add tests based on taxonomy, once we know how to mock mysql. import six import platform from six.moves import builtins from copy import deepcopy from json import dumps from unittest import TestCase, skip try: from unittest.mock import patch, mock_open except ImportError: from mock import patch from .sample_data import PARAMS, RECORD0, RECORD1, RECORD2, RECORD3, RECORD4 from dark.reads import Read, Reads, AAReadWithX from dark.hsp import HSP, LSP from dark.score import LowerIsBetterScore from dark.diamond.alignments import ( DiamondReadsAlignments, ZERO_EVALUE_UPPER_RANDOM_INCREMENT) from dark.titles import TitlesAlignments from dark.utils import StringIO class TestDiamondReadsAlignments(TestCase): """ Test the DiamondReadsAlignments class. """ def testEmptyJSONInput(self): """ When a JSON input file is empty, a C{ValueError} must be raised on trying to read it. """ mockOpener = mock_open() with patch.object(builtins, 'open', mockOpener): reads = Reads() error = "JSON file 'file\\.json' was empty\\." six.assertRaisesRegex(self, ValueError, error, DiamondReadsAlignments, reads, 'file.json', databaseFilename='database.fasta') def testNonJSONInput(self): """ When given a file whose contents are not JSON, attempting to read the DIAMOND hits from it must raise a C{ValueError}. """ pypy = platform.python_implementation() == 'PyPy' mockOpener = mock_open(read_data='not JSON\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() if six.PY3: error = ( "^Could not convert first line of 'file\\.json' to JSON " "\\(Expecting value: line 1 column 1 \\(char 0\\)\\)\\. " "Line is 'not JSON'\\.$") else: if pypy: error = ( "^Could not convert first line of 'file\\.json' to " "JSON \\(Error when decoding null at char 1\\)\\. " "Line is 'not JSON'\\.$") else: error = ( "^Could not convert first line of 'file\\.json' to " "JSON \\(No JSON object could be decoded\\)\\. Line " "is 'not JSON'\\.$") six.assertRaisesRegex(self, ValueError, error, DiamondReadsAlignments, reads, 'file.json', databaseFilename='database.fasta') def testScoreTitle_Bits(self): """ The score title must be correct when we are using bit scores. """ mockOpener = mock_open(read_data=dumps(PARAMS) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') self.assertEqual('Bit score', readsAlignments.params.scoreTitle) def testScoreTitle_EValue(self): """ The score title must be correct when we are using e values. """ mockOpener = mock_open(read_data=dumps(PARAMS) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta', scoreClass=LowerIsBetterScore) self.assertEqual('$- log_{10}(e)$', readsAlignments.params.scoreTitle) def testNucleotides(self): """ The nucleotide type of the subject must be correct. """ params = deepcopy(PARAMS) mockOpener = mock_open(read_data=dumps(params) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') self.assertFalse(readsAlignments.params.subjectIsNucleotides) def testApplicationParams(self): """ DIAMOND parameters must be extracted from the input JSON file and stored correctly. """ mockOpener = mock_open(read_data=dumps(PARAMS) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') self.assertEqual(PARAMS, readsAlignments.params.applicationParams) def testJSONParamsButNoHits(self): """ When DIAMOND parameters are present in the input but there are no records, the __iter__ method of a L{DiamondReadsAlignments} instance must not yield anything. """ mockOpener = mock_open(read_data=dumps(PARAMS) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') self.assertEqual([], list(readsAlignments)) def testNotEnoughReads(self): """ If a JSON file contains a parameters section and one hit, but there is no read to go with the hit, a C{ValueError} must be raised. """ mockOpener = mock_open( read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() error = ("Read generator failed to yield a read with id 'id0' as " "found in record number 1 during parsing of DIAMOND " "output file 'file\\.json'\\.") readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') six.assertRaisesRegex(self, ValueError, error, list, readsAlignments) def testIncorrectReadId(self): """ If the query id of a hit does not match the id of the corresponding input read, a C{ValueError} must be raised. """ mockOpener = mock_open( read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('not id0', 'A' * 70)) error = ("Read generator failed to yield a read with id 'id0' as " "found in record number 1 during parsing of DIAMOND " "output file 'file.json'.") readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') six.assertRaisesRegex(self, ValueError, error, list, readsAlignments) def testMoreReadsThanRecords(self): """ If a JSON file contains a parameters section and one hit, but there are two query reads, the second read must still be returned, but have no alignments (i.e., length zero). """ mockOpener = mock_open( read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'G' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments) self.assertEqual(2, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual('id1', result[1].read.id) self.assertEqual(0, len(result[1])) def testOneJSONInput(self): """ If a JSON file contains a parameters section and one record, it must be read correctly. """ result = StringIO(dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') with patch.object(builtins, 'open') as mockMethod: mockMethod.return_value = result reads = Reads() reads.add(Read('id0', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') self.assertEqual(1, len(list(readsAlignments))) def testTwoJSONInputs(self): """ If two JSON files are passed to L{DiamondReadsAlignments} each with a parameters section and one record, both records must be read correctly and the result should have 2 records. """ class SideEffect(object): def __init__(self): self.first = True def sideEffect(self, _ignoredFilename, **kwargs): if self.first: self.first = False return StringIO(dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') else: return StringIO(dumps(PARAMS) + '\n' + dumps(RECORD1) + '\n') sideEffect = SideEffect() with patch.object(builtins, 'open') as mockMethod: mockMethod.side_effect = sideEffect.sideEffect reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, ['file1.json', 'file2.json'], databaseFilename='database.fasta') result = list(readsAlignments) self.assertEqual(2, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual('id1', result[1].read.id) def testThreeJSONInputs(self): """ If three JSON files are passed to L{DiamondReadsAlignments} with names that have a numeric prefix and each with a parameters section and one record, all records must be read correctly and the result should have 3 records in the correct order. """ class SideEffect(object): def __init__(self, test): self.test = test self.count = 0 def sideEffect(self, filename, **kwargs): if self.count == 0: self.test.assertEqual('1.json', filename) self.count += 1 return StringIO(dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') elif self.count == 1: self.test.assertEqual('2.json', filename) self.count += 1 return StringIO(dumps(PARAMS) + '\n' + dumps(RECORD1) + '\n') else: self.test.assertEqual('3.json', filename) return StringIO(dumps(PARAMS) + '\n' + dumps(RECORD2) + '\n') sideEffect = SideEffect(self) with patch.object(builtins, 'open') as mockMethod: mockMethod.side_effect = sideEffect.sideEffect reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) # Note the files are given out of order. Their names will be # sorted before they are opened (due to sortFilenames=True). # The sorting of the names is verified in the SideEffect class, # above. readsAlignments = DiamondReadsAlignments( reads, ['3.json', '1.json', '2.json'], sortFilenames=True, databaseFilename='database.fasta') result = list(readsAlignments) self.assertEqual(3, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual('id1', result[1].read.id) self.assertEqual('id2', result[2].read.id) @skip('Some tests are broken and skipped under latest BioPython') def testGetSubjectSequence(self): """ The getSubjectSequence function must return an AAReadWithX instance with a string sequence. """ class SideEffect(object): def __init__(self, test): self.test = test self.count = 0 def sideEffect(self, filename, mode='r'): if self.count == 0: self.test.assertEqual('file.json', filename) self.count += 1 return StringIO(dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') elif self.count == 1: self.count += 1 return StringIO('>id1 Description\nAA\n') else: self.test.fail('Unexpected third call to open.') sideEffect = SideEffect(self) with patch.object(builtins, 'open') as mockMethod: mockMethod.side_effect = sideEffect.sideEffect reads = Reads() readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') subject = readsAlignments.getSubjectSequence('id1 Description') self.assertIsInstance(subject, AAReadWithX) self.assertIsInstance(subject.sequence, str) self.assertEqual('id1 Description', subject.id) self.assertEqual('AA', subject.sequence) def testHsps(self): """ The hsps function must yield the HSPs. """ # adjustHspsForPlotting changes HSPs in place, so we pass copied # records so we don't mess up other tests. mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' + dumps(RECORD3) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) reads.add(Read('id3', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') self.assertEqual( sorted([HSP(20), HSP(25), HSP(20), HSP(20), HSP(20), HSP(20)]), sorted(readsAlignments.hsps())) def testAdjustHspsForPlotting_EValueNoZero(self): """ The adjustHspsForPlotting function must alter HSPs so that non-zero evalues are converted to the positive value of their negative exponent. """ def result(a): return StringIO( dumps(PARAMS) + '\n' + dumps(deepcopy(RECORD0)) + '\n' + dumps(deepcopy(RECORD1)) + '\n' + dumps(deepcopy(RECORD2)) + '\n' + dumps(deepcopy(RECORD3)) + '\n') with patch.object(builtins, 'open') as mockMethod: mockMethod.side_effect = result reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) reads.add(Read('id3', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta', scoreClass=LowerIsBetterScore) titlesAlignments = TitlesAlignments(readsAlignments) title = 'gi|887699|gb|DQ37780 Cowpox virus 15' titleAlignments = titlesAlignments[title] readsAlignments.adjustHspsForPlotting(titleAlignments) hsps = sorted(titleAlignments.hsps()) self.assertEqual([6.0, 5.0], [hsp.score.score for hsp in hsps]) def testAdjustHspsForPlotting_EValueWithZero(self): """ The adjustHspsForPlotting function must alter HSPs so that zero evalues are set randomly high. """ def result(a): return StringIO( dumps(PARAMS) + '\n' + dumps(deepcopy(RECORD0)) + '\n' + dumps(deepcopy(RECORD1)) + '\n' + dumps(deepcopy(RECORD2)) + '\n' + dumps(deepcopy(RECORD3)) + '\n' + dumps(deepcopy(RECORD4)) + '\n') with patch.object(builtins, 'open') as mockMethod: mockMethod.side_effect = result reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) reads.add(Read('id3', 'A' * 70)) reads.add(Read('id4', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta', scoreClass=LowerIsBetterScore) titlesAlignments = TitlesAlignments(readsAlignments) title = 'gi|887699|gb|DQ37780 Cowpox virus 15' titleAlignments = titlesAlignments[title] readsAlignments.adjustHspsForPlotting(titleAlignments) hsps = sorted(titleAlignments.hsps()) # All we really know is that the first HSP will have a randomly # high value whose bounds we can check. The other values are # predictable. self.assertTrue(LSP(6.0 + 2) > hsps[0] > LSP(6.0 + 2 + ZERO_EVALUE_UPPER_RANDOM_INCREMENT)) self.assertEqual([6.0, 5.0, 3.0, 2.0], [hsp.score.score for hsp in hsps[1:]]) class TestDiamondReadsAlignmentsFiltering(TestCase): """ Test the DiamondReadsAlignments class filter function. """ def testNoResultNoFilteringArgs(self): """ If the L{DiamondReadsAlignments} filter function is called with no arguments, and there are no hits, it should produce a generator that yields no result. """ mockOpener = mock_open(read_data=dumps(PARAMS) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter()) self.assertEqual(0, len(result)) def testOneHitNoFilteringArgs(self): """ If the L{DiamondReadsAlignments} filter function is called with no arguments, and there is one hit, it should produce a generator that yields that hit. """ mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter()) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) def testLimitZero(self): """ If L{DiamondReadsAlignments} is limited to zero result, that limit must be respected. """ mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(limit=0)) self.assertEqual(0, len(result)) def testLimitOne(self): """ If L{DiamondReadsAlignments} is limited to one hit, that limit must be respected. """ mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(limit=1)) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) def testOneAlignmentPerRead(self): """ If L{DiamondReadsAlignments} is asked to deliver only the best alignment for each read, that must be respected. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362 }, ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362 } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(oneAlignmentPerRead=True)) self.assertEqual(1, len(result)) self.assertEqual(1, len(result[0])) self.assertEqual('Merkel2', result[0][0].subjectTitle) def testMaxZeroAlignmentsPerRead(self): """ If L{BlastReadsAlignments} is asked to deliver only reads that have at most zero alignments, a read with no alignments must be allowed through but a read with one alignment must be filtered out. """ record1 = { "query": "read1", "alignments": [], } record2 = { "query": "read2", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362 }, ], "title": "Merkel1" }, ], } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record1) + '\n' + dumps(record2) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('read1', 'A' * 500)) reads.add(Read('read2', 'G' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(maxAlignmentsPerRead=0)) self.assertEqual(1, len(result)) self.assertEqual('read1', result[0].read.id) def testMaxOneAlignmentPerRead(self): """ If L{BlastReadsAlignments} is asked to deliver only reads that have at most one alignment, a read with two alignments must be filtered out. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362 }, ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362 } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(maxAlignmentsPerRead=1)) self.assertEqual(0, len(result)) def testScoreCutoffRemovesEntireAlignment_Bits(self): """ If the L{DiamondReadsAlignments} filter function is supposed to filter on a scoreCutoff (bit score) and the cut-off value results in an alignment with no HSPs, then the alignment must be removed entirely. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362 }, ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362 } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(scoreCutoff=160)) self.assertEqual(1, len(result)) self.assertEqual(1, len(result[0])) self.assertEqual('Merkel2', result[0][0].subjectTitle) def testScoreCutoffRemovesEntireAlignment_EValue(self): """ If the L{DiamondReadsAlignments} filter function is supposed to filter on a scoreCutoff (bit score) and the cut-off value results in an alignment with no HSPs, then the alignment must be removed entirely. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362 }, ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362 } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta', scoreClass=LowerIsBetterScore) result = list(readsAlignments.filter(scoreCutoff=1e-20)) self.assertEqual(1, len(result)) self.assertEqual(1, len(result[0])) self.assertEqual('Merkel2', result[0][0].subjectTitle) def testPercentIdentityCutoffRemovesEntireAlignment(self): """ If the L{DiamondReadsAlignments} filter function is supposed to filter on a percent identity cut-off and the cut-off value results in an alignment with no HSPs, then the alignment must be removed entirely. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362, "percentIdentical": 40.0, }, ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362, "percentIdentical": 45.0, } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter( percentageIdenticalCutoff=41.0)) self.assertEqual(1, len(result)) self.assertEqual(1, len(result[0])) self.assertEqual('Merkel2', result[0][0].subjectTitle) def testPercentPositiveCutoffRemovesEntireAlignment(self): """ If the L{DiamondReadsAlignments} filter function is supposed to filter on a percent positive cut-off and the cut-off value results in an alignment with no HSPs, then the alignment must be removed entirely. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362, "percentPositive": 40.0, }, ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362, "percentPositive": 45.0, } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter( percentagePositiveCutoff=41.0)) self.assertEqual(1, len(result)) self.assertEqual(1, len(result[0])) self.assertEqual('Merkel2', result[0][0].subjectTitle) def testScoreCutoffRemovesHsps_Bits(self): """ If the L{DiamondRecords} records function is supposed to filter on scoreCutoff (bit score) and the cut-off value results in some HSPs being invalid, then those HSPs must be removed entirely. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362 }, { "sbjct_end": 869, "expect": 1.25e-20, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 170, "btop": "", "query_start": 362 } ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362 } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(scoreCutoff=160)) # There should only be one HSP left in the alignments for the # first read, and it should have the right score (bit score). self.assertEqual(1, len(result[0][0].hsps)) self.assertEqual(HSP(170), result[0][0].hsps[0]) # The second alignment should also be present. self.assertEqual(1, len(result[0][1].hsps)) self.assertEqual(HSP(180), result[0][1].hsps[0]) def testScoreCutoffRemovesHsps_EValue(self): """ If the L{DiamondRecords} records function is supposed to filter on scoreCutoff (bit score) and the cut-off value results in some HSPs being invalid, then those HSPs must be removed entirely. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362 }, { "sbjct_end": 869, "expect": 1.25e-20, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 170, "btop": "", "query_start": 362 } ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362 } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta', scoreClass=LowerIsBetterScore) result = list(readsAlignments.filter(scoreCutoff=1e-15)) # There should only be one HSP left in the alignments for the # first read, and it should have the right score (e-value). self.assertEqual(1, len(result[0][0].hsps)) self.assertEqual(LSP(1.25e-20), result[0][0].hsps[0]) # The second alignment should also be present. self.assertEqual(1, len(result[0][1].hsps)) self.assertEqual(LSP(1.25e-43), result[0][1].hsps[0]) def testPercentIdentityCutoffRemovesHsps(self): """ If the L{DiamondRecords} records function is supposed to filter on percentage identical cut-off and the cut-off value results in some HSPs being invalid, then those HSPs must be removed entirely. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362, "percentIdentical": 30.0, }, { "sbjct_end": 869, "expect": 1.25e-20, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 170, "btop": "", "query_start": 362, "percentIdentical": 70.0, } ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362, "percentIdentical": 80.0, } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter( percentageIdenticalCutoff=50.0)) # There should only be one HSP left in the alignments for the # first read, and it should have the right score (bit score). self.assertEqual(1, len(result[0][0].hsps)) self.assertEqual(70.0, result[0][0].hsps[0].percentIdentical) # The second alignment should also be present. self.assertEqual(1, len(result[0][1].hsps)) self.assertEqual(80.0, result[0][1].hsps[0].percentIdentical) def testPercentIdentityCutoffIgnoresNoneValues(self): """ If the L{DiamondRecords} records function is supposed to filter on percentage identical cut-off and the cut-off value in some HSPs are None, then those HSPs must not be removed. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362, "percentIdentical": None, }, { "sbjct_end": 869, "expect": 1.25e-20, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 170, "btop": "", "query_start": 362, "percentIdentical": 70.0, } ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362, "percentIdentical": 80.0, } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter( percentageIdenticalCutoff=75.0)) # Only one HSP should be left in the alignments for the first # read, and it should have a None percent identity score. self.assertEqual(1, len(result[0][0].hsps)) self.assertIs(None, result[0][0].hsps[0].percentIdentical) # The second alignment should also be present. self.assertEqual(1, len(result[0][1].hsps)) self.assertEqual(80.0, result[0][1].hsps[0].percentIdentical) def testPercentPositiveCutoffRemovesHsps(self): """ If the L{DiamondRecords} records function is supposed to filter on percentage positive cut-off and the cut-off value results in some HSPs being invalid, then those HSPs must be removed entirely. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362, "percentPositive": 30.0, }, { "sbjct_end": 869, "expect": 1.25e-20, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 170, "btop": "", "query_start": 362, "percentPositive": 70.0, } ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362, "percentPositive": 80.0, } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter( percentagePositiveCutoff=50.0)) # There should only be one HSP left in the alignments for the # first read, and it should have the right score (bit score). self.assertEqual(1, len(result[0][0].hsps)) self.assertEqual(70.0, result[0][0].hsps[0].percentPositive) # The second alignment should also be present. self.assertEqual(1, len(result[0][1].hsps)) self.assertEqual(80.0, result[0][1].hsps[0].percentPositive) def testPercentPositiveCutoffIgnoresNoneValues(self): """ If the L{DiamondRecords} records function is supposed to filter on percentage positive cut-off and the cut-off value in some HSPs are None, then those HSPs must not be removed. """ record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362, "percentPositive": None, }, { "sbjct_end": 869, "expect": 1.25e-20, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 170, "btop": "", "query_start": 362, "percentPositive": 70.0, } ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362, "percentPositive": 80.0, } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter( percentagePositiveCutoff=75.0)) # Only one HSP should be left in the alignments for the first # read, and it should have a None percent identity score. self.assertEqual(1, len(result[0][0].hsps)) self.assertIs(None, result[0][0].hsps[0].percentPositive) # The second alignment should also be present. self.assertEqual(1, len(result[0][1].hsps)) self.assertEqual(80.0, result[0][1].hsps[0].percentPositive) def testPercentPositiveFilterWithPercentIdenticalValues(self): """ If the L{DiamondRecords} records function is supposed to filter on percentage positive cut-offs, the result must be as excpected when a percentIdentical value is also present in the HSPs. """ # This is a failing test for a cut & paste bug that was encountered # on Dec 04, 2019. record = { "query": "H6E8I1T01BFUH9", "alignments": [ { "length": 961, "hsps": [ { "sbjct_end": 869, "expect": 1.25854e-10, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 150, "btop": "", "query_start": 362, "percentIdentical": 33.0, "percentPositive": None, }, { "sbjct_end": 869, "expect": 1.25e-20, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 836, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 170, "btop": "", "query_start": 362, "percentIdentical": 33.0, "percentPositive": 70.0, } ], "title": "Merkel1" }, { "length": 740, "hsps": [ { "sbjct_end": 647, "expect": 1.25e-43, "sbjct": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "sbjct_start": 614, "query": ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAA"), "frame": 1, "query_end": 462, "bits": 180, "btop": "", "query_start": 362, "percentIdentical": 33.0, "percentPositive": 80.0, } ], "title": "Merkel2" } ] } mockOpener = mock_open(read_data=dumps(PARAMS) + '\n' + dumps(record) + '\n') with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('H6E8I1T01BFUH9', 'A' * 500)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter( percentagePositiveCutoff=75.0)) # Only one HSP should be left in the alignments for the first # read, and it should have a None percent identity score. self.assertEqual(1, len(result[0][0].hsps)) self.assertIs(None, result[0][0].hsps[0].percentPositive) # The second alignment should also be present. self.assertEqual(1, len(result[0][1].hsps)) self.assertEqual(80.0, result[0][1].hsps[0].percentPositive) def testTitleByRegexCaseInvariant(self): """ Filtering with a title regex must work independent of case. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(titleRegex='sqUIRRel')) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', result[0][0].subjectTitle) def testTitleByRegexAllAlignments(self): """ Filtering with a title regex must work in the case that all alignments for a hit match the regex. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(titleRegex='squirrel')) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', result[0][0].subjectTitle) def testTitleByRegexOneAlignments(self): """ Filtering with a title regex must work in the case that only some alignments for a hit match the regex. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(titleRegex='Mummy')) self.assertEqual(1, len(result)) self.assertEqual('id1', result[0].read.id) self.assertEqual('gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', result[0][0].subjectTitle) def testTitleByNegativeRegexOneAlignment(self): """ Filtering with a negative title regex must work in the case that only some alignments for a hit are ruled out (in which case only those alignments must be removed but the hit is still valid). """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(negativeTitleRegex='Mummy')) self.assertEqual(3, len(result)) self.assertEqual('id1', result[1].read.id) self.assertEqual(1, len(result[1])) self.assertEqual('gi|887699|gb|DQ37780 Monkeypox virus 456', result[1][0].subjectTitle) def testTitleByNegativeRegexMatchesAll(self): """ Filtering with a negative title regex that matches all alignments must remove everything and return an empty result. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(negativeTitleRegex='pox')) self.assertEqual(0, len(result)) def testTitleByNegativeRegexMatchingAllWithWhitelist(self): """ Filtering with a negative title regex that matches all alignments must remove everything and result in no hits, except for any whitelisted titles. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99' result = list(readsAlignments.filter(negativeTitleRegex='pox', whitelist=[title])) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual(1, len(result[0])) self.assertEqual(title, result[0][0].subjectTitle) def testTitleByRegexMatchingAllWithBlacklist(self): """ Filtering with a title regex that matches all alignments must keep everything, except for any blacklisted titles. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') blacklist = ['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'] result = list(readsAlignments.filter(titleRegex='pox', blacklist=blacklist)) self.assertEqual(2, len(result)) self.assertEqual('id1', result[0].read.id) self.assertEqual('id2', result[1].read.id) def testTitleTruncation(self): """ When truncating titles, if a set of matched sequences has titles that are identical up to the truncation word, only the first found is returned. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = readsAlignments.filter(truncateTitlesAfter='virus') result = list(result) self.assertEqual(3, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual(1, len(result[0])) # The Squirrelpox virus 55 hit in RECORD0 is not returned. self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', result[0][0].subjectTitle) def testMinTitleSequenceLength(self): """ It must be possible to filter alignments based on minimum hit sequence length. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(minSequenceLen=37500)) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual(1, len(result[0])) self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 55', result[0][0].subjectTitle) def testMinTitleSequenceLengthNoHits(self): """ It must be possible to filter alignments based on minimum hit sequence length and if nothing sufficiently long matches, an empty list of alignments must be returned. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(minSequenceLen=1000000)) self.assertEqual(0, len(result)) def testMaxTitleSequenceLength(self): """ It must be possible to filter alignments based on maximum hit sequence length. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(maxSequenceLen=31000)) self.assertEqual(1, len(result)) self.assertEqual('id2', result[0].read.id) self.assertEqual(1, len(result[0])) self.assertEqual('gi|887699|gb|DQ37780 Cowpox virus 15', result[0][0].subjectTitle) def testMaxTitleSequenceLengthNoHits(self): """ It must be possible to filter alignments based on maximum hit sequence length and if no sufficiently short sequences match, an empty list of alignments must be returned. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(maxSequenceLen=10000)) self.assertEqual(0, len(result)) def testMinAndMaxTitleSequenceLength(self): """ It must be possible to filter alignments simultaneously on minimum and maximum hit sequence length. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(minSequenceLen=37000, maxSequenceLen=38000)) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual(2, len(result[0])) self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', result[0][0].subjectTitle) self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 55', result[0][1].subjectTitle) def testMinStart(self): """ It must be possible to filter alignments based on minimum offset in the hit sequence. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(minStart=15300)) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual(1, len(result[0])) self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', result[0][0].subjectTitle) def testMinStartNoHits(self): """ It must be possible to filter alignments based on minimum offset in the hit sequence, and if no hsps match then an empty result set must be returned. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(minStart=100000)) self.assertEqual(0, len(result)) def testMaxStop(self): """ It must be possible to filter alignments based on maximum offset in the hit sequence. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(maxStop=1500)) self.assertEqual(1, len(result)) self.assertEqual('id2', result[0].read.id) self.assertEqual(1, len(result[0])) self.assertEqual('gi|887699|gb|DQ37780 Cowpox virus 15', result[0][0].subjectTitle) def testMaxStopNoHits(self): """ It must be possible to filter alignments based on maximum offset in the hit sequence, and if no hsps match then an empty result set must be returned. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(maxStop=100)) self.assertEqual(0, len(result)) def testMinStartAndMaxstop(self): """ It must be possible to filter alignments based simultaneously on mininum and maximum offset in the hit sequence. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(minStart=9000, maxStop=12000)) self.assertEqual(1, len(result)) self.assertEqual('id1', result[0].read.id) self.assertEqual(2, len(result[0])) def testRepeatedFilter_MinStartThenMinStart(self): """ It must be possible to filter alignments multiple times using the same filter parameters. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') readsAlignments.filter(minStart=9000) readsAlignments.filter(minStart=9000) result = list(readsAlignments) self.assertEqual(2, len(result)) self.assertEqual('id0', result[0].read.id) self.assertEqual('id1', result[1].read.id) def testRepeatedFilter_MinStartThenMaxstop(self): """ It must be possible to filter alignments multiple times using different filter parameters. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') readsAlignments.filter(minStart=9000) readsAlignments.filter(maxStop=12000) result = list(readsAlignments) self.assertEqual(1, len(result)) self.assertEqual('id1', result[0].read.id) self.assertEqual(2, len(result[0])) def testReadIdNoMatches(self): """ When filtering on alignments based on a regex for read ids that matches no ids, an empty generator must be returned. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(readIdRegex='blah')) self.assertEqual(0, len(result)) def testReadId(self): """ It must be possible to filter alignments based on a regex for read ids. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(readIdRegex='id[12]')) self.assertEqual(2, len(result)) self.assertEqual('id1', result[0].read.id) self.assertEqual('id2', result[1].read.id) def testReadIdAnchored(self): """ It must be possible to filter alignments based on a regex for read ids that is anchored at start and end. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(readIdRegex='^id0$')) self.assertEqual(1, len(result)) self.assertEqual('id0', result[0].read.id) def testReadIdCaseSensitive(self): """ Filtering alignments based on a regex for read ids must be case sensitive. """ mockOpener = mock_open(read_data=( dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' + dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n')) with patch.object(builtins, 'open', mockOpener): reads = Reads() reads.add(Read('id0', 'A' * 70)) reads.add(Read('id1', 'A' * 70)) reads.add(Read('id2', 'A' * 70)) readsAlignments = DiamondReadsAlignments( reads, 'file.json', databaseFilename='database.fasta') result = list(readsAlignments.filter(readIdRegex='^ID0$')) self.assertEqual(0, len(result))
terrycojones/dark-matter
test/diamond/test_alignments.py
Python
mit
90,520
[ "Biopython" ]
4e862168c2ed92dddca792fb2e27e9bacd5554c3d0b3eeedc2707740ce0e4b3b
####################################################################### # # Filelistmod for VU+ by markusw and schomi (c) 2013 # www.vuplus-support.org # # This plugin is licensed under the Creative Commons # Attribution-NonCommercial-ShareAlike 3.0 Unported License. # To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/ # or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA. # # # This plugin is NOT free software. It is open source, you are allowed to # modify it (if you keep the license), but it may not be commercially # distributed other than under the conditions noted above. # # Filelistmod 20140512 v1.2-r1 ####################################################################### from re import compile as re_compile from os import path as os_path, listdir from Components.MenuList import MenuList from Components.Harddisk import harddiskmanager from Components.config import config, ConfigInteger from Tools.Directories import SCOPE_CURRENT_SKIN, resolveFilename, fileExists from enigma import RT_HALIGN_LEFT, eListboxPythonMultiContent, \ eServiceReference, eServiceCenter, gFont from Tools.LoadPixmap import LoadPixmap from os import listdir, remove, rename, system, path, symlink, chdir import os EXTENSIONS = { "m4a": "music", "mp2": "music", "mp3": "music", "wav": "music", "wma": "music", "ogg": "music", "flac": "music", "dts": "dts", "jpg": "picture", "jpeg": "picture", "png": "picture", "bmp": "picture", "ts": "movie", "avi": "movie", "divx": "movie", "m4v": "movie", "mpg": "movie", "mpeg": "movie", "mkv": "movie", "mp4": "movie", "mov": "movie", "flv": "movie", "m2ts": "movie", "mts": "movie", "wmv": "movie", "3gp": "movie", "3g2": "movie", "txt": "txt", "py": "py", "sh": "sh", "html": "html", "xml": "xml", "cfg": "cfg", "lst": "lst", "ipk": "ipk", "zip": "zip", "tar": "tar", "gz": "gz", "rar": "rar", "r\d+$": "rar", } def FileEntryComponent(name, absolute = None, isDir = False, isLink = False): res = [ (absolute, isDir, isLink) ] res.append((eListboxPythonMultiContent.TYPE_TEXT, 55, 1, 1175, 25, 0, RT_HALIGN_LEFT, name)) if isDir == True and isLink == False: png = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/directory.png") elif isLink: png = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/link.png") else: extension = name.split('.') extension = extension[-1].lower() print extension if EXTENSIONS.has_key(extension): png = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/" + EXTENSIONS[extension] + ".png") else: png = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/file.png") if png is not None: res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, 10, 4, 20, 20, png)) return res class FileList(MenuList): def __init__(self, directory, showDirectories = True, showFiles = True, showMountpoints = True, matchingPattern = None, useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None): MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent) self.additional_extensions = additionalExtensions self.mountpoints = [] self.current_directory = None self.current_mountpoint = None self.useServiceRef = useServiceRef self.showDirectories = showDirectories self.showMountpoints = showMountpoints self.showFiles = showFiles self.isTop = isTop # example: matching .nfi and .ts files: "^.*\.(nfi|ts)" self.matchingPattern = matchingPattern self.inhibitDirs = inhibitDirs or [] self.inhibitMounts = inhibitMounts or [] self.refreshMountpoints() self.changeDir(directory) self.l.setFont(0, gFont("Regular", 20)) self.l.setItemHeight(25) self.serviceHandler = eServiceCenter.getInstance() def refreshMountpoints(self): self.mountpoints = [os_path.join(p.mountpoint, "") for p in harddiskmanager.getMountedPartitions()] self.mountpoints.sort(reverse = True) def getMountpoint(self, file): file = os_path.join(os_path.realpath(file), "") for m in self.mountpoints: if file.startswith(m): return m return False def getMountpointLink(self, file): if os_path.realpath(file) == file: return self.getMountpoint(file) else: if file[-1] == "/": file = file[:-1] mp = self.getMountpoint(file) last = file file = os_path.dirname(file) while last != "/" and mp == self.getMountpoint(file): last = file file = os_path.dirname(file) return os_path.join(last, "") def getSelection(self): if self.l.getCurrentSelection() is None: return None return self.l.getCurrentSelection()[0] def getCurrentEvent(self): l = self.l.getCurrentSelection() if not l or l[0][1] == True: return None else: return self.serviceHandler.info(l[0][0]).getEvent(l[0][0]) def getFileList(self): return self.list def inParentDirs(self, dir, parents): dir = os_path.realpath(dir) for p in parents: if dir.startswith(p): return True return False def changeDir(self, directory, select = None): self.list = [] # if we are just entering from the list of mount points: if self.current_directory is None: if directory and self.showMountpoints: self.current_mountpoint = self.getMountpointLink(directory) else: self.current_mountpoint = None self.current_directory = directory directories = [] files = [] if directory is None and self.showMountpoints: # present available mountpoints for p in harddiskmanager.getMountedPartitions(): path = os_path.join(p.mountpoint, "") if path not in self.inhibitMounts and not self.inParentDirs(path, self.inhibitDirs): self.list.append(FileEntryComponent(name = p.description, absolute = path, isDir = True, isLink = False)) files = [ ] directories = [ ] elif directory is None: files = [ ] directories = [ ] elif self.useServiceRef: root = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + directory) if self.additional_extensions: root.setName(self.additional_extensions) serviceHandler = eServiceCenter.getInstance() list = serviceHandler.list(root) while 1: s = list.getNext() if not s.valid(): del list break if s.flags & s.mustDescent: directories.append(s.getPath()) else: files.append(s) directories.sort() files.sort() else: if fileExists(directory): try: files = listdir(directory) except: files = [] files.sort() tmpfiles = files[:] for x in tmpfiles: if os_path.isdir(directory + x): directories.append(directory + x + "/") files.remove(x) if directory is not None and self.showDirectories and not self.isTop: if directory == self.current_mountpoint and self.showMountpoints: self.list.append(FileEntryComponent(name = "<" +_("List of Storage Devices") + ">", absolute = None, isDir = True, isLink = False)) elif (directory != "/") and not (self.inhibitMounts and self.getMountpoint(directory) in self.inhibitMounts): self.list.append(FileEntryComponent(name = "<" +_("Parent Directory") + ">", absolute = '/'.join(directory.split('/')[:-2]) + '/', isDir = True, isLink = False)) if self.showDirectories: for x in directories: if not (self.inhibitMounts and self.getMountpoint(x) in self.inhibitMounts) and not self.inParentDirs(x, self.inhibitDirs): name = x.split('/')[-2] testname = x[:-1] if os_path.islink(testname): self.list.append(FileEntryComponent(name = name, absolute = x, isDir = True, isLink = True)) else: self.list.append(FileEntryComponent(name = name, absolute = x, isDir = True, isLink = False)) if self.showFiles: for x in files: if self.useServiceRef: path = x.getPath() name = path.split('/')[-1] else: path = directory + x name = x if (self.matchingPattern is None) or re_compile(self.matchingPattern).search(path): self.list.append(FileEntryComponent(name = name, absolute = x , isDir = False, isLink = False)) if self.showMountpoints and len(self.list) == 0: self.list.append(FileEntryComponent(name = _("nothing connected"), absolute = None, isDir = False, isLink = False)) self.l.setList(self.list) if select is not None: i = 0 self.moveToIndex(0) for x in self.list: p = x[0][0] if isinstance(p, eServiceReference): p = p.getPath() if p == select: self.moveToIndex(i) i += 1 # Sort Functions if config.plugins.filebrowservti.sorted.value == 0: self.list.sort(key = lambda x: x[0]) # sort by name normal else: self.list.sort(key = lambda x: x[0], reverse=True) # sort by name recursiv def getCurrentDirectory(self): return self.current_directory def canDescent(self): if self.getSelection() is None: return False return self.getSelection()[1] def descent(self): if self.getSelection() is None: return self.changeDir(self.getSelection()[0], select = self.current_directory) def getFilename(self): if self.getSelection() is None: return None x = self.getSelection()[0] if isinstance(x, eServiceReference): x = x.getPath() return x def getServiceRef(self): if self.getSelection() is None: return None x = self.getSelection()[0] if isinstance(x, eServiceReference): return x return None def execBegin(self): harddiskmanager.on_partition_list_change.append(self.partitionListChanged) def execEnd(self): harddiskmanager.on_partition_list_change.remove(self.partitionListChanged) def refresh(self): self.changeDir(self.current_directory, self.getFilename()) def partitionListChanged(self, action, device): self.refreshMountpoints() if self.current_directory is None: self.refresh() def getSelectionID(self): idx = self.l.getCurrentSelectionIndex() return idx def MultiFileSelectEntryComponent(name, absolute = None, isDir = False, isLink = False, selected = False): res = [ (absolute, isDir, isLink, selected, name) ] res.append((eListboxPythonMultiContent.TYPE_TEXT, 55, 1, 1175, 25, 0, RT_HALIGN_LEFT, name)) if isDir == True and isLink == False: png = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/directory.png") elif isLink: png = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/link.png") else: extension = name.split('.') extension = extension[-1].lower() if EXTENSIONS.has_key(extension): png = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/" + EXTENSIONS[extension] + ".png") # png = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "extensions/" + EXTENSIONS[extension] + ".png")) else: png = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/file.png") # png = LoadPixmap("/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/file.png") if png is not None: res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, 30, 4, 20, 20, png)) if not name.startswith('<'): if selected is False: icon = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/lock_off.png") res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, 4, 0, 25, 25, icon)) else: icon = LoadPixmap(cached=True, path="/usr/lib/enigma2/python/Plugins/Extensions/FilebrowserVTi/images/lock_on.png") res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, 4, 0, 25, 25, icon)) return res class MultiFileSelectList(FileList): def __init__(self, preselectedFiles, directory, showMountpoints = False, matchingPattern = None, showDirectories = True, showFiles = True, useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None): self.selectedFiles = preselectedFiles if self.selectedFiles is None: self.selectedFiles = [] FileList.__init__(self, directory, showMountpoints = showMountpoints, matchingPattern = matchingPattern, showDirectories = showDirectories, showFiles = showFiles, useServiceRef = useServiceRef, inhibitDirs = inhibitDirs, inhibitMounts = inhibitMounts, isTop = isTop, enableWrapAround = enableWrapAround, additionalExtensions = additionalExtensions) self.changeDir(directory) self.l.setItemHeight(25) self.l.setFont(0, gFont("Regular", 20)) self.onSelectionChanged = [ ] def selectionChanged(self): for f in self.onSelectionChanged: f() def changeSelectionState(self): idx = self.l.getCurrentSelectionIndex() # os.system('echo %s >> /tmp/test1.log' % ("- xxx - ")) count = 0 newList = [] for x in self.list: # os.system('echo %s >> /tmp/test1.log' % ("- state0 - ")) if idx == count: if x[0][4].startswith('<'): newList.append(x) else: if x[0][1] is True: realPathname = x[0][0] else: realPathname = self.current_directory + x[0][0] if x[0][3] == True: SelectState = False for entry in self.selectedFiles: if entry == realPathname: self.selectedFiles.remove(entry) else: SelectState = True alreadyinList = False for entry in self.selectedFiles: if entry == realPathname: alreadyinList = True if not alreadyinList: self.selectedFiles.append(realPathname) newList.append(MultiFileSelectEntryComponent(name = x[0][4], absolute = x[0][0], isDir = x[0][1], isLink = x[0][2], selected = SelectState )) else: newList.append(x) count += 1 self.list = newList self.l.setList(self.list) def getSelectedList(self): return self.selectedFiles def changeDir(self, directory, select = None): self.list = [] # if we are just entering from the list of mount points: if self.current_directory is None: if directory and self.showMountpoints: self.current_mountpoint = self.getMountpointLink(directory) else: self.current_mountpoint = None self.current_directory = directory directories = [] files = [] if directory is None and self.showMountpoints: # present available mountpoints for p in harddiskmanager.getMountedPartitions(): path = os_path.join(p.mountpoint, "") if path not in self.inhibitMounts and not self.inParentDirs(path, self.inhibitDirs): self.list.append(MultiFileSelectEntryComponent(name = p.description, absolute = path, isDir = True)) files = [ ] directories = [ ] elif directory is None: files = [ ] directories = [ ] elif self.useServiceRef: root = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + directory) if self.additional_extensions: root.setName(self.additional_extensions) serviceHandler = eServiceCenter.getInstance() list = serviceHandler.list(root) while 1: s = list.getNext() if not s.valid(): del list break if s.flags & s.mustDescent: directories.append(s.getPath()) else: files.append(s) directories.sort() files.sort() else: if fileExists(directory): try: files = listdir(directory) except: files = [] files.sort() tmpfiles = files[:] for x in tmpfiles: if os_path.isdir(directory + x): directories.append(directory + x + "/") files.remove(x) if directory is not None and self.showDirectories and not self.isTop: if directory == self.current_mountpoint and self.showMountpoints: self.list.append(MultiFileSelectEntryComponent(name = "<" +_("List of Storage Devices") + ">", absolute = None, isDir = True)) elif (directory != "/") and not (self.inhibitMounts and self.getMountpoint(directory) in self.inhibitMounts): self.list.append(MultiFileSelectEntryComponent(name = "<" +_("Parent Directory") + ">", absolute = '/'.join(directory.split('/')[:-2]) + '/', isDir = True)) if self.showDirectories: for x in directories: if not (self.inhibitMounts and self.getMountpoint(x) in self.inhibitMounts) and not self.inParentDirs(x, self.inhibitDirs): name = x.split('/')[-2] alreadySelected = False testname = x[:-1] if os_path.islink(testname): my_isLink =True else: my_isLink = False for entry in self.selectedFiles: if entry == x: alreadySelected = True if alreadySelected: self.list.append(MultiFileSelectEntryComponent(name = name, absolute = x, isDir = True, isLink = my_isLink, selected = True)) else: self.list.append(MultiFileSelectEntryComponent(name = name, absolute = x, isDir = True, isLink = my_isLink, selected = False)) if self.showFiles: for x in files: if self.useServiceRef: path = x.getPath() name = path.split('/')[-1] else: path = directory + x name = x if (self.matchingPattern is None) or re_compile(self.matchingPattern).search(path): alreadySelected = False for entry in self.selectedFiles: if os_path.basename(entry) == x: alreadySelected = True if alreadySelected: self.list.append(MultiFileSelectEntryComponent(name = name, absolute = x , isDir = False, selected = True)) else: self.list.append(MultiFileSelectEntryComponent(name = name, absolute = x , isDir = False, selected = False)) self.l.setList(self.list) if select is not None: i = 0 self.moveToIndex(0) for x in self.list: p = x[0][0] if isinstance(p, eServiceReference): p = p.getPath() if p == select: self.moveToIndex(i) i += 1 # Sort Functions if config.plugins.filebrowservti.sorted.value == 0: self.list.sort(key = lambda x: x[0]) # sort by name normal else: self.list.sort(key = lambda x: x[0], reverse=True) # sort by name recursiv
popazerty/e2-gui
lib/python/Plugins/Extensions/FilebrowserVTi/FileListmod.py
Python
gpl-2.0
17,967
[ "VisIt" ]
4f318b255e36bcebc4b14754d160a0e2d838b632d67c60aa4db1dc588917b4ac
""" # Notes: - This simulation seeks to emulate the COBAHH benchmark simulations of (Brette et al. 2007) using the Brian2 simulator for speed benchmark comparison to DynaSim. However, this simulation includes CLOCK-DRIVEN synapses, for direct comparison to DynaSim's clock-driven architecture. The synaptic connections are "low-density", with only a 2% probability of connection. - The time taken to simulate will be indicated in the stdout log file '~/batchdirs/brian_benchmark_COBAHH_clocksyn_lodens_1/pbsout/brian_benchmark_COBAHH_clocksyn_lodens_1.out' - Note that this code has been slightly modified from the original (Brette et al. 2007) benchmarking code, available here on ModelDB: https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319 in order to work with version 2 of the Brian simulator (aka Brian2), and also modified to change the model being benchmarked, etc. # References: - Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al. Simulation of networks of spiking neurons: A review of tools and strategies. Journal of Computational Neuroscience 2007;23:349–98. doi:10.1007/s10827-007-0038-6. - Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python. Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008. """ from brian2 import * # Parameters cells = 1 defaultclock.dt = 0.01*ms area = 20000*umetre**2 Cm = (1*ufarad*cmetre**-2) * area gl = (5e-5*siemens*cmetre**-2) * area El = -60*mV EK = -90*mV ENa = 50*mV g_na = (100*msiemens*cmetre**-2) * area g_kd = (30*msiemens*cmetre**-2) * area VT = -63*mV # Synaptic strengths gAMPA = (0.1*msiemens*cmetre**-2)* area gGABAA = (0.06*msiemens*cmetre**-2)* area # Synaptic time constants tauAMPA = 2 tauGABAA = 5 # Synaptic reversal potentials EAMPA = 1*mV EGABAA = -80*mV # The model eqs = Equations(''' dv/dt = (gl*(El-v)- gAMPA/cells*sAMPAtotal*(v-EAMPA)- gGABAA/cells*sGABAAtotal*(v-EGABAA)- g_na*(m*m*m)*h*(v-ENa)- g_kd*(n*n*n*n)*(v-EK))/Cm : volt dm/dt = alpha_m*(1-m)-beta_m*m : 1 dn/dt = alpha_n*(1-n)-beta_n*n : 1 dh/dt = alpha_h*(1-h)-beta_h*h : 1 alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/ (exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/ (exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/ (exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz sAMPAtotal : 1 sGABAAtotal : 1 ''') # Construct intrinsic cells P = NeuronGroup(cells, model=eqs, method='euler') Pe = 1 # proportion=int(0.8*cells) # Pe = P[:proportion] # Pi = P[proportion:] # Contruct synaptic network sAMPA=Synapses(Pe,P, model='''ds/dt=1000.*5.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - (s)/(2*ms) : 1 (clock-driven) sAMPAtotal_post = s : 1 (summed) ''') sAMPA.connect(p=0.02) # sGABAA_RETC=Synapses(Pi,P, # model='''ds/dt=1000.*2.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - s/(5*ms) : 1 (clock-driven) # sGABAAtotal_post = s : 1 (summed) # ''') # sGABAA_RETC.connect(p=0.02) # Initialization P.v = 'El + (randn() * 5 - 5)*mV' # Record a few traces trace = StateMonitor(P, 'v', record=[1, 10, 100]) totaldata = StateMonitor(P, 'v', record=True) run(0.5 * second, report='text') # # If you want to plot: # plot(trace.t/ms, trace[1].v/mV) # plot(trace.t/ms, trace[10].v/mV) # plot(trace.t/ms, trace[100].v/mV) # xlabel('t (ms)') # ylabel('v (mV)') # show() # # If you want to save data: # print("Saving TC cell voltages!") # numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
asoplata/dynasim-benchmark-brette-2007
output/Brian2/brian2_benchmark_COBAHH_clocksyn_lodens_0001/brian2_benchmark_COBAHH_clocksyn_lodens_0001.py
Python
gpl-3.0
3,795
[ "Brian" ]
78c70a8c85c78f58c3f960f41a581dae5630fb471db8a66475bb41e3bae44707
# vim: set expandtab: # vim: set tabstop=4: import MDAnalysis as mda import numpy as np import pytim from pytim.datafiles import * from pytim import observables ########################## sampling_frequency = 50 # change this to 1 to sample each frame ########################## u = mda.Universe(WATER_GRO, WATER_XTC) L = np.min(u.dimensions[:3]) oxygens = u.select_atoms("name OW") radii = pytim_data.vdwradii(G43A1_TOP) rdf = observables.RDF2D(u, max_radius='full', nbins=120) interface = pytim.ITIM(u, alpha=2., group=oxygens, max_layers=4, radii_dict=radii, cluster_cut=3.5) for ts in u.trajectory[::sampling_frequency]: print("frame " + str(ts.frame) + " / " + str(len(u.trajectory))) layer = interface.layers[0, 0] rdf.sample(layer, layer) rdf.rdf[0] = 0.0 np.savetxt('RDF.dat', np.column_stack((rdf.bins, rdf.rdf))) print('RDF saved to RDF.dat') if sampling_frequency > 1: print('set sampling_frequency = 1 in order to sample each frame in the trajectory')
balazsfabian/pytim
pytim/examples/example_rdf.py
Python
gpl-3.0
1,012
[ "MDAnalysis" ]
5ba32f99fa81a6d5f5c9c4f75f4ec79da97c88c40489a186fea360198536c606
r"""Plot figure: Different outcomes of a Gaussian kernel approximation.""" import matplotlib import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import pairwise_kernels def phi(x, w, D): r"""RFF map.""" Z = np.dot(x, w) return np.hstack((np.cos(Z), np.sin(Z))) / np.sqrt(D) def createColorbar(lwr, upr, fig, axes): r"""Create colorbar for multiple Pyplot plot.""" cax = fig.add_axes([.92, 0.1, 0.01, 0.8]) norm = matplotlib.colors.LogNorm(vmin=lwr, vmax=upr, clip=False) c = matplotlib.colorbar.ColorbarBase(cax, cmap=plt.get_cmap('rainbow'), norm=norm, label='D=') plt.title(r'$\widetilde{K}$') return c def main(): r"""Plot figure: Different outcomes of a Gaussian kernel approximation.""" T = 25 # Number of curves cm_subsection = np.linspace(0, 1, T + 1) colors = [matplotlib.cm.rainbow(x) for x in cm_subsection] d = 1 # Dimension of the input N = 250 # Number of points per curves # Generate N data in (-1, 1) and exact Gram matrix np.random.seed(0) X = np.linspace(-1, 1, N).reshape((N, d)) K = pairwise_kernels(X, metric='rbf', gamma=1. / (2. * .1 ** 2)) # A Matrix for the decomposable kernel. Link the outputs to some mean value c = np.random.randn(N, 2) A = .5 * np.eye(2) + .5 * np.ones((2, 2)) plt.close() plt.rc('text', usetex=True) plt.rc('font', family='serif') f, axes = plt.subplots(2, 2, figsize=(12, 8), sharex=True, sharey=True) # For each curve with different D for k, D in enumerate(np.logspace(0, 4, T)): D = int(D) np.random.seed(0) w = np.random.randn(d, D) / .1 phiX = phi(X, w, D) Kt = np.dot(phiX, phiX.T) # Generate outputs with the exact Gram matrix pred = np.dot(np.dot(Kt, c), A) axes[0, 0].plot(X, pred[:, 0], c=colors[k], lw=.5, linestyle='-') axes[0, 0].set_ylabel(r'$y_1$') axes[0, 1].plot(X, pred[:, 1], c=colors[k], lw=.5, linestyle='-') axes[0, 1].set_ylabel(r'$y_2$') # Generate outputs with the a realization of the random Gram matrix w = np.random.randn(d, D) / .1 phiX = phi(X, w, D) Kt = np.dot(phiX, phiX.T) pred = np.dot(np.dot(Kt, c), A) axes[1, 0].plot(X, pred[:, 0], c=colors[k], lw=.5, linestyle='-') axes[1, 0].set_xlabel(r'$x$') axes[1, 0].set_ylabel(r'$y_1$') axes[1, 1].plot(X, pred[:, 1], c=colors[k], lw=.5, linestyle='-') axes[1, 1].set_xlabel(r'$x$') axes[1, 1].set_ylabel(r'$y_2$') axes[0, 0].plot(X, np.dot(np.dot(K, c), A)[:, 0], c='k', lw=.5, label='K') axes[0, 1].plot(X, np.dot(np.dot(K, c), A)[:, 1], c='k', lw=.5, label='K') axes[1, 0].plot(X, np.dot(np.dot(K, c), A)[:, 0], c='k', lw=.5, label='K') axes[1, 1].plot(X, np.dot(np.dot(K, c), A)[:, 1], c='k', lw=.5, label='K') axes[0, 0].set_title(r'$\widetilde{K}u \approx Ku$, realization 1', x=1.1) axes[1, 0].set_title(r'$\widetilde{K}u \approx Ku$, realization 2', x=1.1) for xx in axes.ravel(): xx.legend(loc=4) createColorbar(1, D, f, axes) plt.savefig('not_Mercer.pgf', bbox_inches='tight') if __name__ == "__main__": main()
RomainBrault/JMLR-ORFF
src/not_mercer.py
Python
unlicense
3,271
[ "Gaussian" ]
5f457b893bf84567d9eb939ad0a3e39be8421f35114b05f9ee8d5c6fb3055917
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import math import unittest from collections import namedtuple from gi.repository import GLib from gi.repository import Hkl from numpy import (array, cross, dot, empty, hstack, reshape, vstack) from numpy.linalg import inv, norm ######### # Types # ######### Detector = namedtuple('Detector', ['type']) Diffractometer = namedtuple('Diffractometer', ['dtype', 'sample', 'detector', 'source', 'engine']) Engine = namedtuple('Engine', ['name', 'mode']) Lattice = namedtuple('Lattice', ['a', 'b', 'c', 'alpha', 'beta', 'gamma']) Reflection = namedtuple('Reflection', ['hkl', 'values']) Sample = namedtuple('Sample', ['lattice', 'or0', 'or1', 'ux', 'uy', 'uz']) Source = namedtuple('Source', ['wavelength', 'energy']) HklDiffractometer = namedtuple('HklDiffractometer', ['sample', 'detector', 'geometry', 'engines']) ################## # Helper methods # ################## def hkl_matrix_to_numpy(m): M = empty((3, 3)) for i in range(3): for j in range(3): M[i, j] = m.get(i, j) return M def from_numpy_to_hkl_vector(v): V = Hkl.Vector() V.init(v[0], v[1], v[2]) return V ######################### # Hkl Type constructors # ######################### def new_hkl_sample(config): # sample sample = Hkl.Sample.new("test") lattice = Hkl.Lattice.new(config.lattice.a, config.lattice.b, config.lattice.c, math.radians(config.lattice.alpha), math.radians(config.lattice.beta), math.radians(config.lattice.gamma)) sample.lattice_set(lattice) parameter = sample.ux_get() parameter.value_set(config.ux, Hkl.UnitEnum.USER) sample.ux_set(parameter) parameter = sample.uy_get() parameter.value_set(config.uy, Hkl.UnitEnum.USER) sample.uy_set(parameter) parameter = sample.uz_get() parameter.value_set(config.uz, Hkl.UnitEnum.USER) sample.uz_set(parameter) return sample def new_hkl_detector(config): return Hkl.Detector.factory_new(config.type) def new_hkl_geometry(dtype, wavelength, init_values): factory = Hkl.factories()[dtype] geometry = factory.create_new_geometry() geometry.axis_values_set(init_values, Hkl.UnitEnum.USER) geometry.wavelength_set(wavelength, Hkl.UnitEnum.USER) return geometry def new_hkl_engines(dtype): factory = Hkl.factories()[dtype] engines = factory.create_new_engine_list() return engines def new_hkl_diffractometer(config): sample = new_hkl_sample(config.sample) detector = new_hkl_detector(config.detector) # add reflection or0 geometry = new_hkl_geometry(config.dtype, config.source.wavelength, config.sample.or0.values) or0 = sample.add_reflection(geometry, detector, config.sample.or0.hkl[0], config.sample.or0.hkl[1], config.sample.or0.hkl[2]) # add reflection or1 geometry.axis_values_set(config.sample.or1.values, Hkl.UnitEnum.USER) or1 = sample.add_reflection(geometry, detector, config.sample.or1.hkl[0], config.sample.or1.hkl[1], config.sample.or1.hkl[2]) # compute UB with or0 and or1 # sample.compute_UB_busing_levy(or0, or1) # UB = hkl_matrix_to_numpy(sample.UB_get()) # compute angles for reciprocal lattice vector h, k, l engines = new_hkl_engines(config.dtype) # set the engine mode engine = engines.engine_get_by_name(config.engine.name) engine.current_mode_set(config.engine.mode) return HklDiffractometer(sample, geometry, detector, engines) ################# # Config parser # ################# def find(filename): if os.path.exists(filename): return filename else: datadir = os.getenv('DATADIR') if datadir: filename = os.path.join(datadir, filename) if os.path.exists(filename): return filename else: raise Exception("Can not find: " + filename) else: raise Exception("Cannot find: " + filename) def parse_reflection(line): ref = line.split() hkl = [float(x) for x in ref[2:5]] angles = [float(x) for x in ref[7:13]] return Reflection(hkl, angles) def parse(filename, dtype): with open(filename, 'r') as f: for line in f: if str(line).find('Wavelength') != -1: wavelength = float(line.split()[1]) energy = 12.39842 / wavelength if str(line).find('A') != -1 and str(line).find('B') != -1 and str(line).find('C') != -1: abc = line.split() a = float(abc[1]) b = float(abc[3]) c = float(abc[5]) if str(line).find('Alpha') != -1 and str(line).find('Beta') != -1 and str(line).find('Gamma') != -1: abg = line.split() alpha = float(abg[1]) beta = float(abg[3]) gamma = float(abg[5]) if str(line).find('R0') != -1: or0 = parse_reflection(line) if str(line).find('R1') != -1: or1 = parse_reflection(line) if str(line).find('Ux') != -1 and str(line).find('Uy') != -1 and str(line).find('Uz') != -1: uxuyuz = line.split() ux = float(uxuyuz[1]) uy = float(uxuyuz[3]) uz = float(uxuyuz[5]) if str(line).find('Engine') != -1: engine_name = line.split()[1] if str(line).find('Mode') != -1: mode_name = line.split()[1] lattice = Lattice(a, b, c, alpha, beta, gamma) sample = Sample(lattice, or0, or1, ux, uy, uz) detector = Detector(0) source = Source(wavelength, energy) engine = Engine(engine_name, mode_name) return Diffractometer(dtype, sample, detector, source, engine) ###################### # hight level method # ###################### def ca(config, hkl, engine_name='hkl'): sample, geometry, detector, engines = new_hkl_diffractometer(config) engines.init(geometry, detector, sample) engine = engines.engine_get_by_name(engine_name) solutions = engine.pseudo_axis_values_set(hkl, Hkl.UnitEnum.USER) first_solution = solutions.items()[0] values = first_solution.geometry_get().axis_values_get(Hkl.UnitEnum.USER) return Reflection(hkl, values) def get_UB(config): sample, geometry, detector, engines = new_hkl_diffractometer(config) return hkl_matrix_to_numpy(sample.UB_get()) def get_R_and_P(config, values): sample, geometry, detector, engines = new_hkl_diffractometer(config) geometry.axis_values_set(values, Hkl.UnitEnum.USER) R = hkl_matrix_to_numpy(geometry.sample_rotation_get(sample).to_matrix()) P = hkl_matrix_to_numpy(geometry.detector_rotation_get(detector).to_matrix()) return R, P ############## # Unit tests # ############## class Polarisation(unittest.TestCase): def test_petraIII(self): dtype = "E6C" # RUBh = kf - ki = (P ki - ki) = (P - I) ki config = parse(find('crystal.ini'), dtype) print config gaga = ca(config, [0.5, 14.5, 0.43]) print gaga UB = get_UB(config) print "UB: " print UB # the hkl vectors a*, b*, c* expressed in the laboratory basis for the current Q # transformation matrix T reciprocal space --> laboratory # values_w = [0, 34.16414, 79.52420, 0, 0, 38.29633] # values_w = [0, 35.06068, 80.78517, 0, 0, 43.91934] # values_w = [0, 36.45961, 81.75533, 0, 0, 49.7139] # values_w = [0, 38.24551, 82.52447, 0, 0, 55.68957] # values_w = [0, 40.34955, 83.14900, 0, 0, 61.86603] # values_w = [0, 42.73321, 83.66607, 0, 0, 68.27333] # values_w = [0, 45.37981, 84.10117, 0, 0, 74.95312] # values_w = [0, 48.29079, 84.47230, 0, 0, 81.96226] values_w = [0, 51.48568, 84.79259, 0, 0, 89.37964] # mu, omega, chi, phi, gamma, delta R, P = get_R_and_P(config, values_w) print "R: " print R print "P: " print P RUB = dot(R, UB) print "RUB: " print RUB astar = dot(RUB, [1, 0, 0]) bstar = dot(RUB, [0, 1, 0]) cstar = dot(RUB, [0, 0, 1]) # transformation matrix: reciprocal space --> laboratory T = hstack((reshape(astar, (3, 1)), reshape(bstar, (3, 1)), reshape(cstar, (3, 1)))) Tbis = vstack((reshape(astar, (1, 3)), reshape(bstar, (1, 3)), reshape(cstar, (1, 3)))) # transformation matrix: laboratory --> reciprocal space Tinv = inv(T) print '' # print 'cstar in laboratory frame :',cstar # print 'cstar in laboratory frame from T:',dot(T, hkl) # print 'cstar in rec. space from Tinv :',dot(Tinv, dot(T, hkl)) # compute kf ki = array([1, 0, 0]) * math.pi * 2 / config.source.wavelength kf = dot(P, ki) # compute Q Q = kf - ki print '' print 'Energy (keV):', config.source print 'Lattice parameters:', config.sample.lattice print '1st orienting reflection:', config.sample.or0.hkl, 'with angles: ', config.sample.or0.values print '2nd orienting reflection:', config.sample.or1.hkl, 'with angles: ', config.sample.or1.values print '' print 'UB matrix:' print UB print '' print 'Transformation matrix T(reciprocal space)--> laboratory frame:' print T, print '' print '' print 'Transformation matrix T(laboratory frame)--> reciprocal space :' print Tinv # compute Q # hkl = [0.5, 6.5, 0.43] # hkl = [0.5, 7.5, 0.43] # hkl = [0.5, 8.5, 0.43] # hkl = [0.5, 9.5, 0.43] # hkl = [0.5, 10.5, 0.43] # hkl = [0.5, 11.5, 0.43] # hkl = [0.5, 12.5, 0.43] # hkl = [0.5, 13.5, 0.43] hkl = [0.5, 14.5, 0.43] ''' print '' print 'Q in lab. frame from code :', dot(dot(R, UB), hkl), ', normalized:',(dot(dot(R, UB), hkl))/norm(dot(dot(R, UB), hkl)), ', norm:', norm(dot(dot(R, UB), hkl)) print 'Q in lab. frame from T :', dot(T, hkl), ', normalized:', (dot(T, hkl))/norm(dot(T, hkl)), ', norm:', norm(dot(T, hkl)) print 'Q in lab. frame from ki, kf :', Q, ', normalized:', Q/norm(Q),', norm:', norm(Q) print '' print 'Q in rec. space from Tinv of T :', dot(Tinv, dot(T, hkl)) print '' print 'difference factor:',(norm(dot(T, hkl)))/(norm(Q)) print '' print 'kf',kf,', norm:',norm(kf) print 'ki',ki,', norm:',norm(ki) ''' print '' print 'Q in rec. space from Tinv of T :', dot(Tinv, dot(T, hkl)) print '' # # compute u1, u2, u3 in reciprocal space coordinates # u1,u2,u3 in laboratory frame u1xyz = ki+kf u2xyz = cross(ki, kf) u3xyz = ki-kf # print '(u1,u2,u3) in laboratory frame:',u1xyz,u2xyz,u3xyz # u1,u2,u3 in reciprocal space u1 = dot(Tinv, u1xyz) / norm(dot(Tinv, u1xyz)) u2 = dot(Tinv, u2xyz) / norm(dot(Tinv, u2xyz)) u3 = dot(Tinv, u3xyz) / norm(dot(Tinv, u3xyz)) u1 = dot(Tinv, u1xyz) u2 = dot(Tinv, u2xyz) u3 = dot(Tinv, u3xyz) print '(u1,u2,u3) in reciprocal space from Tinv, unnormalized:',\ u1, u2, u3 print '(u1,u2,u3) in reciprocal space from Tinv, normalized to 1:',\ dot(Tinv, u1xyz) / norm(dot(Tinv, u1xyz)), \ dot(Tinv, u2xyz) / norm(dot(Tinv, u2xyz)), \ dot(Tinv, u3xyz) / norm(dot(Tinv, u3xyz)) # print '(u1,u2,u3) in reciprocal space from Tinv:', \ # dot(Tinv, u1xyz), dot(Tinv, u2xyz), dot(Tinv, u3xyz) print '' # TRANSFORMATION MATRIX reciprocal lattice basis to u1 u2 u3 basis ABC = hstack((reshape([1, 0, 0], (3, 1)), reshape([0, 1, 0], (3, 1)), reshape([0, 0, 1], (3, 1)))) U = hstack((reshape(u1, (3, 1)), reshape(u2, (3, 1)), reshape(u3, (3, 1)))) M = dot(ABC, inv(U)) print 'Transformation matrix reciprocal lattice basis to u1 u2 u3 basis:' print M # keep in order to pass the test self.assertTrue(True) if __name__ == '__main__': unittest.main(verbosity=2)
picca/hkl
tests/bindings/polarisation.py
Python
gpl-3.0
13,010
[ "CRYSTAL" ]
02e23791bd2de73a1182c33d19a7d667c8c33428171c25751a8b143dee929792
# (C) 2016, Markus Wildi, wildi.markus@bluewin.ch # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Or visit http://www.gnu.org/licenses/gpl.html. # ''' Multiprocesser Worker ''' __author__ = 'wildi.markus@bluewin.ch' import numpy as np from multiprocessing import Process,Event,current_process,Queue class Worker(Process): def __init__(self, work_queue=None,cmd_queue=None,next_queue=None,lock=None, dbg=None, lg=None, anl=None): Process.__init__(self) self.exit=Event() self.work_queue=work_queue self.cmd_queue=cmd_queue self.next_queue=next_queue self.anl=anl self.lock=lock self.dbg=dbg self.lg=lg def append_position(self,sky=None): self.lock.acquire() self.anl.append_position(sky=sky,analyzed=True) self.lock.release() def run(self): sky=acq_image_fn=None while not self.exit.is_set(): if self.cmd_queue is not None: try: cmd=self.cmd_queue.get() # ToDO Empty is not correct except Queue.Empty: self.lg.info('{}: cmd queue empty, returning'.format(current_process().name)) return # if isinstance(cmd, str): if 'dl' in cmd: self.lg.info('{}: got \'Delete\', delete last: {}'.format(current_process().name,sky.image_fn)) acq_image_fn=sky.image_fn sky=None elif 'q' in cmd: self.append_position(sky=sky) self.lg.error('{}: got {}, call shutdown'.format(current_process().name, cmd)) self.shutdown() return else: self.lg.warn('{}: got {}, continue'.format(current_process().name, cmd)) if sky: self.append_position(sky=sky) elif acq_image_fn is not None: self.lg.info('{}: not storing {}'.format(current_process().name, acq_image_fn)) acq_image_fn=None sky=None try: sky=self.work_queue.get() except Queue.Empty: self.lg.info('{}: queue empty, returning'.format(current_process().name)) return except Exception as e: self.lg.error('{}: queue error: {}, returning'.format(current_process().name, e)) return # 'STOP' if isinstance(sky, str): self.lg.error('{}: got {}, call shutdown_on_STOP'.format(current_process().name, sky)) self.shutdown_on_STOP() return # analysis part self.anl.catalog_to_apparent(sky=sky,pcn=current_process().name) x,y=self.anl.sextract(sky=sky,pcn=current_process().name) if x is not None and y is not None: self.anl.xy2lonlat_apparent(px=x,py=y,sky=sky,pcn=current_process().name) self.anl.astrometry(sky=sky,pcn=current_process().name) # ToDo why this condition if self.cmd_queue is None: self.append_position(sky=sky) else: self.next_queue.put('c') # end analysis part self.lg.info('{}: got shutdown event, exiting'.format(current_process().name)) def shutdown(self): if self.next_queue is not None: self.next_queue.put('c') self.lg.info('{}: shutdown event, initiate exit'.format(current_process().name)) self.exit.set() return def shutdown_on_STOP(self): if self.next_queue is not None: self.next_queue.put('c') self.lg.info('{}: shutdown on STOP, initiate exit'.format(current_process().name)) self.exit.set() return
RTS2/rts2
scripts/u_point/u_point/worker.py
Python
lgpl-3.0
4,078
[ "VisIt" ]
759d2ffeb0943da108f34bf853a2d564e82128d8b12caae807709a6359b2664c
# UNICODE CHARACTER DATABASE # This file was generated with the command: # ./generate_unicodedb.py --unidata_version=6.0.0 --output=unicodedb_6_0_0 --base=unicodedb_5_2_0 from rpython.rlib.rarithmetic import r_longlong import unicodedb_5_2_0 as base_mod version = '6.0.0' _names = { 128673: 'AERIAL TRAMWAY', 9200: 'ALARM CLOCK', 128769: 'ALCHEMICAL SYMBOL FOR AIR', 128874: 'ALCHEMICAL SYMBOL FOR ALEMBIC', 128822: 'ALCHEMICAL SYMBOL FOR ALKALI', 128823: 'ALCHEMICAL SYMBOL FOR ALKALI-2', 128837: 'ALCHEMICAL SYMBOL FOR ALUM', 128859: 'ALCHEMICAL SYMBOL FOR AMALGAM', 128811: 'ALCHEMICAL SYMBOL FOR ANTIMONY ORE', 128774: 'ALCHEMICAL SYMBOL FOR AQUA REGIA', 128775: 'ALCHEMICAL SYMBOL FOR AQUA REGIA-2', 128776: 'ALCHEMICAL SYMBOL FOR AQUA VITAE', 128777: 'ALCHEMICAL SYMBOL FOR AQUA VITAE-2', 128773: 'ALCHEMICAL SYMBOL FOR AQUAFORTIS', 128826: 'ALCHEMICAL SYMBOL FOR ARSENIC', 128855: 'ALCHEMICAL SYMBOL FOR ASHES', 128829: 'ALCHEMICAL SYMBOL FOR AURIPIGMENT', 128875: 'ALCHEMICAL SYMBOL FOR BATH OF MARY', 128876: 'ALCHEMICAL SYMBOL FOR BATH OF VAPOURS', 128830: 'ALCHEMICAL SYMBOL FOR BISMUTH ORE', 128783: 'ALCHEMICAL SYMBOL FOR BLACK SULFUR', 128834: 'ALCHEMICAL SYMBOL FOR BORAX', 128835: 'ALCHEMICAL SYMBOL FOR BORAX-2', 128836: 'ALCHEMICAL SYMBOL FOR BORAX-3', 128857: 'ALCHEMICAL SYMBOL FOR BRICK', 128848: 'ALCHEMICAL SYMBOL FOR CADUCEUS', 128844: 'ALCHEMICAL SYMBOL FOR CALX', 128846: 'ALCHEMICAL SYMBOL FOR CAPUT MORTUUM', 128787: 'ALCHEMICAL SYMBOL FOR CINNABAR', 128805: 'ALCHEMICAL SYMBOL FOR COPPER ANTIMONIATE', 128800: 'ALCHEMICAL SYMBOL FOR COPPER ORE', 128803: 'ALCHEMICAL SYMBOL FOR CROCUS OF COPPER', 128804: 'ALCHEMICAL SYMBOL FOR CROCUS OF COPPER-2', 128798: 'ALCHEMICAL SYMBOL FOR CROCUS OF IRON', 128869: 'ALCHEMICAL SYMBOL FOR CRUCIBLE', 128870: 'ALCHEMICAL SYMBOL FOR CRUCIBLE-2', 128871: 'ALCHEMICAL SYMBOL FOR CRUCIBLE-3', 128872: 'ALCHEMICAL SYMBOL FOR CRUCIBLE-4', 128873: 'ALCHEMICAL SYMBOL FOR CRUCIBLE-5', 128880: 'ALCHEMICAL SYMBOL FOR DAY-NIGHT', 128865: 'ALCHEMICAL SYMBOL FOR DISSOLVE', 128866: 'ALCHEMICAL SYMBOL FOR DISSOLVE-2', 128864: 'ALCHEMICAL SYMBOL FOR DISTILL', 128771: 'ALCHEMICAL SYMBOL FOR EARTH', 128770: 'ALCHEMICAL SYMBOL FOR FIRE', 128794: 'ALCHEMICAL SYMBOL FOR GOLD', 128841: 'ALCHEMICAL SYMBOL FOR GUM', 128882: 'ALCHEMICAL SYMBOL FOR HALF DRAM', 128883: 'ALCHEMICAL SYMBOL FOR HALF OUNCE', 128854: 'ALCHEMICAL SYMBOL FOR HORSE DUNG', 128878: 'ALCHEMICAL SYMBOL FOR HOUR', 128796: 'ALCHEMICAL SYMBOL FOR IRON ORE', 128797: 'ALCHEMICAL SYMBOL FOR IRON ORE-2', 128801: 'ALCHEMICAL SYMBOL FOR IRON-COPPER ORE', 128810: 'ALCHEMICAL SYMBOL FOR LEAD ORE', 128851: 'ALCHEMICAL SYMBOL FOR LODESTONE', 128824: 'ALCHEMICAL SYMBOL FOR MARCASITE', 128784: 'ALCHEMICAL SYMBOL FOR MERCURY SUBLIMATE', 128785: 'ALCHEMICAL SYMBOL FOR MERCURY SUBLIMATE-2', 128786: 'ALCHEMICAL SYMBOL FOR MERCURY SUBLIMATE-3', 128881: 'ALCHEMICAL SYMBOL FOR MONTH', 128879: 'ALCHEMICAL SYMBOL FOR NIGHT', 128789: 'ALCHEMICAL SYMBOL FOR NITRE', 128838: 'ALCHEMICAL SYMBOL FOR OIL', 128782: 'ALCHEMICAL SYMBOL FOR PHILOSOPHERS SULFUR', 128856: 'ALCHEMICAL SYMBOL FOR POT ASHES', 128843: 'ALCHEMICAL SYMBOL FOR POWDER', 128858: 'ALCHEMICAL SYMBOL FOR POWDERED BRICK', 128863: 'ALCHEMICAL SYMBOL FOR PRECIPITATE', 128867: 'ALCHEMICAL SYMBOL FOR PURIFY', 128868: 'ALCHEMICAL SYMBOL FOR PUTREFACTION', 128833: 'ALCHEMICAL SYMBOL FOR QUICK LIME', 128768: 'ALCHEMICAL SYMBOL FOR QUINTESSENCE', 128827: 'ALCHEMICAL SYMBOL FOR REALGAR', 128828: 'ALCHEMICAL SYMBOL FOR REALGAR-2', 128818: 'ALCHEMICAL SYMBOL FOR REGULUS', 128816: 'ALCHEMICAL SYMBOL FOR REGULUS OF ANTIMONY', 128817: 'ALCHEMICAL SYMBOL FOR REGULUS OF ANTIMONY-2', 128799: 'ALCHEMICAL SYMBOL FOR REGULUS OF IRON', 128819: 'ALCHEMICAL SYMBOL FOR REGULUS-2', 128820: 'ALCHEMICAL SYMBOL FOR REGULUS-3', 128821: 'ALCHEMICAL SYMBOL FOR REGULUS-4', 128877: 'ALCHEMICAL SYMBOL FOR RETORT', 128792: 'ALCHEMICAL SYMBOL FOR ROCK SALT', 128793: 'ALCHEMICAL SYMBOL FOR ROCK SALT-2', 128825: 'ALCHEMICAL SYMBOL FOR SAL-AMMONIAC', 128788: 'ALCHEMICAL SYMBOL FOR SALT', 128813: 'ALCHEMICAL SYMBOL FOR SALT OF ANTIMONY', 128806: 'ALCHEMICAL SYMBOL FOR SALT OF COPPER ANTIMONIATE', 128847: 'ALCHEMICAL SYMBOL FOR SCEPTER OF JOVE', 128795: 'ALCHEMICAL SYMBOL FOR SILVER', 128852: 'ALCHEMICAL SYMBOL FOR SOAP', 128839: 'ALCHEMICAL SYMBOL FOR SPIRIT', 128850: 'ALCHEMICAL SYMBOL FOR STARRED TRIDENT', 128860: 'ALCHEMICAL SYMBOL FOR STRATUM SUPER STRATUM', 128861: 'ALCHEMICAL SYMBOL FOR STRATUM SUPER STRATUM-2', 128812: 'ALCHEMICAL SYMBOL FOR SUBLIMATE OF ANTIMONY', 128802: 'ALCHEMICAL SYMBOL FOR SUBLIMATE OF COPPER', 128814: 'ALCHEMICAL SYMBOL FOR SUBLIMATE OF SALT OF ANTIMONY', 128807: 'ALCHEMICAL SYMBOL FOR SUBLIMATE OF SALT OF COPPER', 128862: 'ALCHEMICAL SYMBOL FOR SUBLIMATION', 128781: 'ALCHEMICAL SYMBOL FOR SULFUR', 128831: 'ALCHEMICAL SYMBOL FOR TARTAR', 128832: 'ALCHEMICAL SYMBOL FOR TARTAR-2', 128809: 'ALCHEMICAL SYMBOL FOR TIN ORE', 128840: 'ALCHEMICAL SYMBOL FOR TINCTURE', 128849: 'ALCHEMICAL SYMBOL FOR TRIDENT', 128845: 'ALCHEMICAL SYMBOL FOR TUTTY', 128853: 'ALCHEMICAL SYMBOL FOR URINE', 128808: 'ALCHEMICAL SYMBOL FOR VERDIGRIS', 128778: 'ALCHEMICAL SYMBOL FOR VINEGAR', 128815: 'ALCHEMICAL SYMBOL FOR VINEGAR OF ANTIMONY', 128779: 'ALCHEMICAL SYMBOL FOR VINEGAR-2', 128780: 'ALCHEMICAL SYMBOL FOR VINEGAR-3', 128790: 'ALCHEMICAL SYMBOL FOR VITRIOL', 128791: 'ALCHEMICAL SYMBOL FOR VITRIOL-2', 128772: 'ALCHEMICAL SYMBOL FOR WATER', 128842: 'ALCHEMICAL SYMBOL FOR WAX', 128126: 'ALIEN MONSTER', 128657: 'AMBULANCE', 127944: 'AMERICAN FOOTBALL', 128162: 'ANGER SYMBOL', 128544: 'ANGRY FACE', 128028: 'ANT', 128246: 'ANTENNA WITH BARS', 128260: 'ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS', 1568: 'ARABIC LETTER KASHMIRI YEH', 64434: 'ARABIC SYMBOL DOT ABOVE', 64435: 'ARABIC SYMBOL DOT BELOW', 64444: 'ARABIC SYMBOL DOUBLE VERTICAL BAR BELOW', 64442: 'ARABIC SYMBOL FOUR DOTS ABOVE', 64443: 'ARABIC SYMBOL FOUR DOTS BELOW', 64447: 'ARABIC SYMBOL RING', 64448: 'ARABIC SYMBOL SMALL TAH ABOVE', 64449: 'ARABIC SYMBOL SMALL TAH BELOW', 64438: 'ARABIC SYMBOL THREE DOTS ABOVE', 64439: 'ARABIC SYMBOL THREE DOTS BELOW', 64440: 'ARABIC SYMBOL THREE DOTS POINTING DOWNWARDS ABOVE', 64441: 'ARABIC SYMBOL THREE DOTS POINTING DOWNWARDS BELOW', 64436: 'ARABIC SYMBOL TWO DOTS ABOVE', 64437: 'ARABIC SYMBOL TWO DOTS BELOW', 64445: 'ARABIC SYMBOL TWO DOTS VERTICALLY ABOVE', 64446: 'ARABIC SYMBOL TWO DOTS VERTICALLY BELOW', 1631: 'ARABIC WAVY HAMZA BELOW', 128667: 'ARTICULATED LORRY', 127912: 'ARTIST PALETTE', 128562: 'ASTONISHED FACE', 9954: 'ASTRONOMICAL SYMBOL FOR URANUS', 128095: 'ATHLETIC SHOE', 127814: 'AUBERGINE', 127975: 'AUTOMATED TELLER MACHINE', 128663: 'AUTOMOBILE', 128118: 'BABY', 128124: 'BABY ANGEL', 127868: 'BABY BOTTLE', 128036: 'BABY CHICK', 128700: 'BABY SYMBOL', 128281: 'BACK WITH LEFTWARDS ARROW ABOVE', 128043: 'BACTRIAN CAMEL', 128708: 'BAGGAGE CLAIM', 127880: 'BALLOON', 92217: 'BAMUM LETTER PHASE-A FIRI', 92161: 'BAMUM LETTER PHASE-A GBIEE FON', 92193: 'BAMUM LETTER PHASE-A GHEUAEGHEUAE', 92188: 'BAMUM LETTER PHASE-A GHEUAERAE', 92199: 'BAMUM LETTER PHASE-A KAFA', 92240: 'BAMUM LETTER PHASE-A KAQ', 92211: 'BAMUM LETTER PHASE-A KET', 92179: 'BAMUM LETTER PHASE-A KEUKEUTNDA', 92219: 'BAMUM LETTER PHASE-A KPOQ', 92213: 'BAMUM LETTER PHASE-A KUOQ', 92183: 'BAMUM LETTER PHASE-A LAPAQ', 92184: 'BAMUM LETTER PHASE-A LET KUT', 92216: 'BAMUM LETTER PHASE-A LOMMAE', 92243: 'BAMUM LETTER PHASE-A LU', 92207: 'BAMUM LETTER PHASE-A LUAEP', 92186: 'BAMUM LETTER PHASE-A MAEKEUP', 92238: 'BAMUM LETTER PHASE-A MAEM', 92171: 'BAMUM LETTER PHASE-A MAEMBGBIEE', 92203: 'BAMUM LETTER PHASE-A MAEMKPEN', 92174: 'BAMUM LETTER PHASE-A MAEMVEUX', 92210: 'BAMUM LETTER PHASE-A MAENYI', 92230: 'BAMUM LETTER PHASE-A MAESI', 92175: 'BAMUM LETTER PHASE-A MANSUAE', 92221: 'BAMUM LETTER PHASE-A MAP PIEET', 92232: 'BAMUM LETTER PHASE-A MBANYI', 92246: 'BAMUM LETTER PHASE-A MBAQ', 92197: 'BAMUM LETTER PHASE-A MEUNJOMNDEUQ', 92196: 'BAMUM LETTER PHASE-A MGBASA', 92190: 'BAMUM LETTER PHASE-A MON NGGEUAET', 92214: 'BAMUM LETTER PHASE-A MOOMEUT', 92198: 'BAMUM LETTER PHASE-A MOOMPUQ', 92176: 'BAMUM LETTER PHASE-A MVEUAENGAM', 92164: 'BAMUM LETTER PHASE-A NAA MFON', 92245: 'BAMUM LETTER PHASE-A NAQ', 92201: 'BAMUM LETTER PHASE-A NDA LEERAEWA', 92212: 'BAMUM LETTER PHASE-A NDAANGGEUAET', 92244: 'BAMUM LETTER PHASE-A NEN', 92173: 'BAMUM LETTER PHASE-A NGANGU', 92229: 'BAMUM LETTER PHASE-A NGGEN', 92160: 'BAMUM LETTER PHASE-A NGKUE MFON', 92182: 'BAMUM LETTER PHASE-A NGKUENZEUM', 92204: 'BAMUM LETTER PHASE-A NIKA', 92231: 'BAMUM LETTER PHASE-A NJAM', 92227: 'BAMUM LETTER PHASE-A NKAARAE', 92180: 'BAMUM LETTER PHASE-A NKINDI', 92241: 'BAMUM LETTER PHASE-A NSHA', 92237: 'BAMUM LETTER PHASE-A NSHIEE', 92223: 'BAMUM LETTER PHASE-A NTAP', 92185: 'BAMUM LETTER PHASE-A NTAP MFAA', 92194: 'BAMUM LETTER PHASE-A NTAP NTAA', 92178: 'BAMUM LETTER PHASE-A NTOQPEN', 92233: 'BAMUM LETTER PHASE-A NYET', 92239: 'BAMUM LETTER PHASE-A NYI', 92225: 'BAMUM LETTER PHASE-A NYIT MONGKEUAEQ', 92167: 'BAMUM LETTER PHASE-A NZA MFON', 92191: 'BAMUM LETTER PHASE-A NZUN MEUT', 92200: 'BAMUM LETTER PHASE-A PA LEERAEWA', 92236: 'BAMUM LETTER PHASE-A PAAM', 92226: 'BAMUM LETTER PHASE-A PAARAE', 92189: 'BAMUM LETTER PHASE-A PAMSHAE', 92187: 'BAMUM LETTER PHASE-A PASHAE', 92202: 'BAMUM LETTER PHASE-A PET', 92163: 'BAMUM LETTER PHASE-A PON MFON PIPAEMBA', 92162: 'BAMUM LETTER PHASE-A PON MFON PIPAEMGBIEE', 92170: 'BAMUM LETTER PHASE-A PON PA NJI PIPAEMBA', 92169: 'BAMUM LETTER PHASE-A PON PA NJI PIPAEMGBIEE', 92205: 'BAMUM LETTER PHASE-A PUP', 92218: 'BAMUM LETTER PHASE-A ROM', 92177: 'BAMUM LETTER PHASE-A SEUNYAM', 92168: 'BAMUM LETTER PHASE-A SHINDA PA NJI', 92222: 'BAMUM LETTER PHASE-A SHIRAE', 92224: 'BAMUM LETTER PHASE-A SHOQ NSHUT YUM', 92165: 'BAMUM LETTER PHASE-A SHUENSHUET', 92215: 'BAMUM LETTER PHASE-A SHUM', 92195: 'BAMUM LETTER PHASE-A SISA', 92208: 'BAMUM LETTER PHASE-A SONJAM', 92220: 'BAMUM LETTER PHASE-A SOQ', 92235: 'BAMUM LETTER PHASE-A SOT', 92181: 'BAMUM LETTER PHASE-A SUU', 92234: 'BAMUM LETTER PHASE-A TEUAEN', 92209: 'BAMUM LETTER PHASE-A TEUTEUWEN', 92166: 'BAMUM LETTER PHASE-A TITA MFON', 92172: 'BAMUM LETTER PHASE-A TU MAEMBA', 92206: 'BAMUM LETTER PHASE-A TUAEP', 92192: 'BAMUM LETTER PHASE-A U YUQ NAE', 92228: 'BAMUM LETTER PHASE-A UNKNOWN', 92242: 'BAMUM LETTER PHASE-A VEE', 92293: 'BAMUM LETTER PHASE-B FEE', 92291: 'BAMUM LETTER PHASE-B FEUX', 92271: 'BAMUM LETTER PHASE-B GHEUGHEN', 92262: 'BAMUM LETTER PHASE-B GHEUGHEUAEM', 92256: 'BAMUM LETTER PHASE-B KAM', 92294: 'BAMUM LETTER PHASE-B KEUAEM', 92270: 'BAMUM LETTER PHASE-B KEUPUQ', 92272: 'BAMUM LETTER PHASE-B KEUYEUX', 92253: 'BAMUM LETTER PHASE-B KIEEM', 92301: 'BAMUM LETTER PHASE-B KIQ', 92273: 'BAMUM LETTER PHASE-B LAANAE', 92259: 'BAMUM LETTER PHASE-B LAM NSHUT NYAM', 92297: 'BAMUM LETTER PHASE-B LET', 92251: 'BAMUM LETTER PHASE-B LOM NTEUM', 92300: 'BAMUM LETTER PHASE-B MA', 92295: 'BAMUM LETTER PHASE-B MA NJEUAENA', 92296: 'BAMUM LETTER PHASE-B MA NJUQA', 92252: 'BAMUM LETTER PHASE-B MBA MAELEE', 92255: 'BAMUM LETTER PHASE-B MBAARAE', 92286: 'BAMUM LETTER PHASE-B MBEURI', 92268: 'BAMUM LETTER PHASE-B MBIT MBAAKET', 92292: 'BAMUM LETTER PHASE-B MBUOQ', 92281: 'BAMUM LETTER PHASE-B MEUQ', 92290: 'BAMUM LETTER PHASE-B MEUT NGGEET', 92284: 'BAMUM LETTER PHASE-B MFIYAQ', 92267: 'BAMUM LETTER PHASE-B MFON TEUAEQ', 92287: 'BAMUM LETTER PHASE-B MONTIEEN', 92261: 'BAMUM LETTER PHASE-B NDU NJAA', 92298: 'BAMUM LETTER PHASE-B NGGAAM', 92277: 'BAMUM LETTER PHASE-B NGGEU MBU', 92282: 'BAMUM LETTER PHASE-B NGGUOQ', 92283: 'BAMUM LETTER PHASE-B NGGUOQ LARGE', 92276: 'BAMUM LETTER PHASE-B NGKINDI MVOP', 92302: 'BAMUM LETTER PHASE-B NGOM', 92299: 'BAMUM LETTER PHASE-B NSEN', 92247: 'BAMUM LETTER PHASE-B NSHUET', 92260: 'BAMUM LETTER PHASE-B NTIEE SHEUOQ', 92288: 'BAMUM LETTER PHASE-B NYAEMAE', 92269: 'BAMUM LETTER PHASE-B NYI NTEUM', 92274: 'BAMUM LETTER PHASE-B PARUM', 92257: 'BAMUM LETTER PHASE-B PEESHI', 92263: 'BAMUM LETTER PHASE-B PIT', 92289: 'BAMUM LETTER PHASE-B PUNGAAM', 92279: 'BAMUM LETTER PHASE-B SAKEUAE', 92250: 'BAMUM LETTER PHASE-B SET TU', 92265: 'BAMUM LETTER PHASE-B SHET NJAQ', 92266: 'BAMUM LETTER PHASE-B SHEUAEQTU', 92249: 'BAMUM LETTER PHASE-B SIEE', 92285: 'BAMUM LETTER PHASE-B SUE', 92280: 'BAMUM LETTER PHASE-B TAAM', 92248: 'BAMUM LETTER PHASE-B TU MAEMGBIEE', 92264: 'BAMUM LETTER PHASE-B TU NSIEE', 92275: 'BAMUM LETTER PHASE-B VEUM', 92278: 'BAMUM LETTER PHASE-B WUAET', 92258: 'BAMUM LETTER PHASE-B YAFU LEERAEWA', 92254: 'BAMUM LETTER PHASE-B YEURAE', 92352: 'BAMUM LETTER PHASE-C BUNG', 92348: 'BAMUM LETTER PHASE-C FUE', 92312: 'BAMUM LETTER PHASE-C GBAYI', 92320: 'BAMUM LETTER PHASE-C GHAP', 92310: 'BAMUM LETTER PHASE-C GHARAE', 92329: 'BAMUM LETTER PHASE-C KAA', 92394: 'BAMUM LETTER PHASE-C KEN FATIGUE', 92393: 'BAMUM LETTER PHASE-C KEN LAW', 92361: 'BAMUM LETTER PHASE-C KET', 92321: 'BAMUM LETTER PHASE-C KEUKAQ', 92386: 'BAMUM LETTER PHASE-C KEUM', 92384: 'BAMUM LETTER PHASE-C KEUSEUX', 92319: 'BAMUM LETTER PHASE-C KEUSHEUAEP', 92328: 'BAMUM LETTER PHASE-C KPARAQ', 92373: 'BAMUM LETTER PHASE-C KUOP NKAARAE', 92366: 'BAMUM LETTER PHASE-C KUT', 92357: 'BAMUM LETTER PHASE-C LAM', 92342: 'BAMUM LETTER PHASE-C LAP', 92397: 'BAMUM LETTER PHASE-C LIQ', 92365: 'BAMUM LETTER PHASE-C LU', 92339: 'BAMUM LETTER PHASE-C MA KEUAERI', 92376: 'BAMUM LETTER PHASE-C MA NSIEE', 92382: 'BAMUM LETTER PHASE-C MAEMBA', 92363: 'BAMUM LETTER PHASE-C MAESI', 92318: 'BAMUM LETTER PHASE-C MBAA CABBAGE-TREE', 92387: 'BAMUM LETTER PHASE-C MBAA PICKET', 92383: 'BAMUM LETTER PHASE-C MBANYI', 92311: 'BAMUM LETTER PHASE-C MBEEKEET', 92354: 'BAMUM LETTER PHASE-C MBERAE', 92315: 'BAMUM LETTER PHASE-C MBEUM', 92385: 'BAMUM LETTER PHASE-C MBEUX', 92381: 'BAMUM LETTER PHASE-C MBI', 92343: 'BAMUM LETTER PHASE-C MBIRIEEN', 92326: 'BAMUM LETTER PHASE-C MBIT', 92364: 'BAMUM LETTER PHASE-C MBUAEM', 92324: 'BAMUM LETTER PHASE-C MBUE', 92344: 'BAMUM LETTER PHASE-C MGBASAQ', 92390: 'BAMUM LETTER PHASE-C MIEE', 92391: 'BAMUM LETTER PHASE-C MUAE', 92338: 'BAMUM LETTER PHASE-C NANSANAQ', 92396: 'BAMUM LETTER PHASE-C NAQ', 92375: 'BAMUM LETTER PHASE-C NDAM', 92378: 'BAMUM LETTER PHASE-C NDAP', 92308: 'BAMUM LETTER PHASE-C NDEUAEREE', 92349: 'BAMUM LETTER PHASE-C NDEUT', 92331: 'BAMUM LETTER PHASE-C NDIDA', 92317: 'BAMUM LETTER PHASE-C NDOMBU', 92395: 'BAMUM LETTER PHASE-C NGAQ', 92307: 'BAMUM LETTER PHASE-C NGGEN', 92362: 'BAMUM LETTER PHASE-C NGGU', 92336: 'BAMUM LETTER PHASE-C NGGUAEN NYAM', 92370: 'BAMUM LETTER PHASE-C NGGUEET', 92347: 'BAMUM LETTER PHASE-C NGGUM', 92341: 'BAMUM LETTER PHASE-C NGGUON', 92309: 'BAMUM LETTER PHASE-C NGKAQ', 92303: 'BAMUM LETTER PHASE-C NGKUE MAEMBA', 92368: 'BAMUM LETTER PHASE-C NGOM', 92356: 'BAMUM LETTER PHASE-C NJAEM', 92367: 'BAMUM LETTER PHASE-C NJAM', 92360: 'BAMUM LETTER PHASE-C NJEEEE', 92389: 'BAMUM LETTER PHASE-C NJEUX', 92333: 'BAMUM LETTER PHASE-C NJUEQ', 92350: 'BAMUM LETTER PHASE-C NSA', 92325: 'BAMUM LETTER PHASE-C NSEUAEN', 92351: 'BAMUM LETTER PHASE-C NSHAQ', 92371: 'BAMUM LETTER PHASE-C NSOM', 92374: 'BAMUM LETTER PHASE-C NSUN', 92359: 'BAMUM LETTER PHASE-C NSUOT NGOM', 92340: 'BAMUM LETTER PHASE-C NTAA', 92372: 'BAMUM LETTER PHASE-C NTEN', 92345: 'BAMUM LETTER PHASE-C NTEUNGBA', 92314: 'BAMUM LETTER PHASE-C NTU MBIT', 92313: 'BAMUM LETTER PHASE-C NYIR MKPARAQ MEUN', 92304: 'BAMUM LETTER PHASE-C NZA', 92323: 'BAMUM LETTER PHASE-C NZEUM', 92399: 'BAMUM LETTER PHASE-C PEN', 92398: 'BAMUM LETTER PHASE-C PIN', 92316: 'BAMUM LETTER PHASE-C PIRIEEN', 92355: 'BAMUM LETTER PHASE-C RU', 92380: 'BAMUM LETTER PHASE-C SETFON', 92330: 'BAMUM LETTER PHASE-C SEUX', 92392: 'BAMUM LETTER PHASE-C SHIQ', 92379: 'BAMUM LETTER PHASE-C SHUEQ', 92335: 'BAMUM LETTER PHASE-C SUAET', 92332: 'BAMUM LETTER PHASE-C TAASHAE', 92400: 'BAMUM LETTER PHASE-C TET', 92346: 'BAMUM LETTER PHASE-C TEUTEUX', 92334: 'BAMUM LETTER PHASE-C TITA YUE', 92358: 'BAMUM LETTER PHASE-C TITUAEP', 92353: 'BAMUM LETTER PHASE-C VEUAEPEN', 92337: 'BAMUM LETTER PHASE-C VEUX', 92306: 'BAMUM LETTER PHASE-C WANGKUOQ', 92369: 'BAMUM LETTER PHASE-C WUP', 92377: 'BAMUM LETTER PHASE-C YAA', 92327: 'BAMUM LETTER PHASE-C YEUQ', 92322: 'BAMUM LETTER PHASE-C YU MUOMAE', 92305: 'BAMUM LETTER PHASE-C YUM', 92388: 'BAMUM LETTER PHASE-C YUWOQ', 92517: 'BAMUM LETTER PHASE-D FAA', 92436: 'BAMUM LETTER PHASE-D FEUFEUAET', 92434: 'BAMUM LETTER PHASE-D GHAA', 92488: 'BAMUM LETTER PHASE-D GHEUAE', 92420: 'BAMUM LETTER PHASE-D KET', 92415: 'BAMUM LETTER PHASE-D KEUAETMEUN', 92454: 'BAMUM LETTER PHASE-D KEUM', 92431: 'BAMUM LETTER PHASE-D KEUOT MBUAE', 92460: 'BAMUM LETTER PHASE-D KEUP', 92489: 'BAMUM LETTER PHASE-D KU', 92474: 'BAMUM LETTER PHASE-D KUN', 92422: 'BAMUM LETTER PHASE-D KUOM', 92479: 'BAMUM LETTER PHASE-D KUQ', 92449: 'BAMUM LETTER PHASE-D KWAET', 92502: 'BAMUM LETTER PHASE-D KYEE', 92495: 'BAMUM LETTER PHASE-D LEEEE', 92464: 'BAMUM LETTER PHASE-D LET', 92439: 'BAMUM LETTER PHASE-D LEUAEP', 92484: 'BAMUM LETTER PHASE-D LEUM', 92406: 'BAMUM LETTER PHASE-D LIEE', 92511: 'BAMUM LETTER PHASE-D LOQ', 92446: 'BAMUM LETTER PHASE-D LUM', 92497: 'BAMUM LETTER PHASE-D M', 92482: 'BAMUM LETTER PHASE-D MAENJET', 92426: 'BAMUM LETTER PHASE-D MALEERI', 92448: 'BAMUM LETTER PHASE-D MBAA', 92515: 'BAMUM LETTER PHASE-D MBAA SEVEN', 92401: 'BAMUM LETTER PHASE-D MBUO', 92496: 'BAMUM LETTER PHASE-D MEEEE', 92478: 'BAMUM LETTER PHASE-D MEUN', 92427: 'BAMUM LETTER PHASE-D MEUT', 92458: 'BAMUM LETTER PHASE-D MFEUAE', 92424: 'BAMUM LETTER PHASE-D MFEUT', 92466: 'BAMUM LETTER PHASE-D MFIEE', 92445: 'BAMUM LETTER PHASE-D MFO', 92404: 'BAMUM LETTER PHASE-D MFON', 92442: 'BAMUM LETTER PHASE-D MGBEUN', 92444: 'BAMUM LETTER PHASE-D MGBIEE', 92438: 'BAMUM LETTER PHASE-D MGBOFUM', 92441: 'BAMUM LETTER PHASE-D MONI', 92499: 'BAMUM LETTER PHASE-D MU', 92510: 'BAMUM LETTER PHASE-D MVOP', 92471: 'BAMUM LETTER PHASE-D NDAM', 92437: 'BAMUM LETTER PHASE-D NDEE', 92425: 'BAMUM LETTER PHASE-D NDEUX', 92440: 'BAMUM LETTER PHASE-D NDON', 92465: 'BAMUM LETTER PHASE-D NGGAAM', 92409: 'BAMUM LETTER PHASE-D NGGAAMAE', 92483: 'BAMUM LETTER PHASE-D NGGAP', 92475: 'BAMUM LETTER PHASE-D NGGEUX', 92485: 'BAMUM LETTER PHASE-D NGGUOM', 92467: 'BAMUM LETTER PHASE-D NGGWAEN', 92414: 'BAMUM LETTER PHASE-D NGKAP', 92457: 'BAMUM LETTER PHASE-D NGKEUAEQ', 92432: 'BAMUM LETTER PHASE-D NGKEURI', 92476: 'BAMUM LETTER PHASE-D NGKIEE', 92412: 'BAMUM LETTER PHASE-D NGKUN', 92435: 'BAMUM LETTER PHASE-D NGKYEE', 92507: 'BAMUM LETTER PHASE-D NI', 92418: 'BAMUM LETTER PHASE-D NJAP', 92430: 'BAMUM LETTER PHASE-D NJEUAEM', 92407: 'BAMUM LETTER PHASE-D NJEUT', 92403: 'BAMUM LETTER PHASE-D NJI', 92405: 'BAMUM LETTER PHASE-D NJIEE', 92487: 'BAMUM LETTER PHASE-D NJUEQ', 92408: 'BAMUM LETTER PHASE-D NSHEE', 92486: 'BAMUM LETTER PHASE-D NSHUT', 92447: 'BAMUM LETTER PHASE-D NSIEEP', 92459: 'BAMUM LETTER PHASE-D NSIEET', 92480: 'BAMUM LETTER PHASE-D NSUM', 92505: 'BAMUM LETTER PHASE-D NTEE', 92472: 'BAMUM LETTER PHASE-D NTEUM', 92514: 'BAMUM LETTER PHASE-D NTUU', 92503: 'BAMUM LETTER PHASE-D NU', 92410: 'BAMUM LETTER PHASE-D NYAM', 92450: 'BAMUM LETTER PHASE-D NYET', 92493: 'BAMUM LETTER PHASE-D NYI', 92463: 'BAMUM LETTER PHASE-D NYUE', 92469: 'BAMUM LETTER PHASE-D PAP', 92506: 'BAMUM LETTER PHASE-D PEE', 92462: 'BAMUM LETTER PHASE-D PEUTAE', 92461: 'BAMUM LETTER PHASE-D PIP', 92509: 'BAMUM LETTER PHASE-D PUQ', 92443: 'BAMUM LETTER PHASE-D PUUT', 92455: 'BAMUM LETTER PHASE-D RAEM', 92512: 'BAMUM LETTER PHASE-D REN MUCH', 92490: 'BAMUM LETTER PHASE-D REN OLD', 92494: 'BAMUM LETTER PHASE-D RII', 92423: 'BAMUM LETTER PHASE-D SAP', 92516: 'BAMUM LETTER PHASE-D SAQ', 92428: 'BAMUM LETTER PHASE-D SEUAEQ', 92413: 'BAMUM LETTER PHASE-D SHEE', 92417: 'BAMUM LETTER PHASE-D SHEUAE', 92501: 'BAMUM LETTER PHASE-D SHEUX', 92500: 'BAMUM LETTER PHASE-D SHII', 92508: 'BAMUM LETTER PHASE-D SHOQ', 92504: 'BAMUM LETTER PHASE-D SHU', 92452: 'BAMUM LETTER PHASE-D SOT', 92473: 'BAMUM LETTER PHASE-D SUAE', 92419: 'BAMUM LETTER PHASE-D SUE', 92498: 'BAMUM LETTER PHASE-D SUU', 92491: 'BAMUM LETTER PHASE-D TAE', 92456: 'BAMUM LETTER PHASE-D TEEEE', 92451: 'BAMUM LETTER PHASE-D TEUAEN', 92481: 'BAMUM LETTER PHASE-D TEUN', 92416: 'BAMUM LETTER PHASE-D TEUT', 92513: 'BAMUM LETTER PHASE-D TI', 92492: 'BAMUM LETTER PHASE-D TOQ', 92433: 'BAMUM LETTER PHASE-D TU', 92477: 'BAMUM LETTER PHASE-D TUOT', 92402: 'BAMUM LETTER PHASE-D WAP', 92411: 'BAMUM LETTER PHASE-D WUAEN', 92421: 'BAMUM LETTER PHASE-D YAEMMAE', 92429: 'BAMUM LETTER PHASE-D YEN', 92468: 'BAMUM LETTER PHASE-D YUOM', 92470: 'BAMUM LETTER PHASE-D YUOP', 92453: 'BAMUM LETTER PHASE-D YUWOQ', 92629: 'BAMUM LETTER PHASE-E A', 92603: 'BAMUM LETTER PHASE-E FA', 92673: 'BAMUM LETTER PHASE-E FAQ', 92651: 'BAMUM LETTER PHASE-E FEE', 92627: 'BAMUM LETTER PHASE-E FOM', 92626: 'BAMUM LETTER PHASE-E FU CALL', 92616: 'BAMUM LETTER PHASE-E FU I', 92661: 'BAMUM LETTER PHASE-E FU REMEDY', 92596: 'BAMUM LETTER PHASE-E FUE', 92612: 'BAMUM LETTER PHASE-E FUET', 92585: 'BAMUM LETTER PHASE-E GBET', 92597: 'BAMUM LETTER PHASE-E GBEUX', 92578: 'BAMUM LETTER PHASE-E GHAAMAE', 92602: 'BAMUM LETTER PHASE-E GHET', 92615: 'BAMUM LETTER PHASE-E GHEUAE', 92565: 'BAMUM LETTER PHASE-E GHEUN', 92552: 'BAMUM LETTER PHASE-E GHEUX', 92674: 'BAMUM LETTER PHASE-E GHOM', 92632: 'BAMUM LETTER PHASE-E I', 92599: 'BAMUM LETTER PHASE-E KET', 92570: 'BAMUM LETTER PHASE-E KEUAE', 92646: 'BAMUM LETTER PHASE-E KEUX', 92670: 'BAMUM LETTER PHASE-E KI', 92665: 'BAMUM LETTER PHASE-E KO', 92532: 'BAMUM LETTER PHASE-E KPEUX', 92587: 'BAMUM LETTER PHASE-E KUET', 92538: 'BAMUM LETTER PHASE-E KUOP', 92620: 'BAMUM LETTER PHASE-E KUT', 92575: 'BAMUM LETTER PHASE-E LAAM', 92521: 'BAMUM LETTER PHASE-E LAP', 92633: 'BAMUM LETTER PHASE-E LAQ', 92595: 'BAMUM LETTER PHASE-E LEUAEM', 92539: 'BAMUM LETTER PHASE-E LOM', 92523: 'BAMUM LETTER PHASE-E LOON', 92556: 'BAMUM LETTER PHASE-E LOOT', 92664: 'BAMUM LETTER PHASE-E LOQ', 92653: 'BAMUM LETTER PHASE-E LU', 92667: 'BAMUM LETTER PHASE-E MA', 92600: 'BAMUM LETTER PHASE-E MAE', 92542: 'BAMUM LETTER PHASE-E MAEM', 92555: 'BAMUM LETTER PHASE-E MAP', 92668: 'BAMUM LETTER PHASE-E MAQ', 92582: 'BAMUM LETTER PHASE-E MBEE', 92520: 'BAMUM LETTER PHASE-E MBEUM', 92666: 'BAMUM LETTER PHASE-E MEN', 92591: 'BAMUM LETTER PHASE-E MFEUQ', 92551: 'BAMUM LETTER PHASE-E MGBA', 92581: 'BAMUM LETTER PHASE-E MGBEN', 92654: 'BAMUM LETTER PHASE-E MI', 92611: 'BAMUM LETTER PHASE-E MIEE', 92671: 'BAMUM LETTER PHASE-E MON', 92614: 'BAMUM LETTER PHASE-E MUAE', 92617: 'BAMUM LETTER PHASE-E MVI', 92662: 'BAMUM LETTER PHASE-E NA', 92613: 'BAMUM LETTER PHASE-E NAE', 92637: 'BAMUM LETTER PHASE-E NDAA MY HOUSE', 92562: 'BAMUM LETTER PHASE-E NDAA SOFTNESS', 92518: 'BAMUM LETTER PHASE-E NDAP', 92592: 'BAMUM LETTER PHASE-E NDIAQ', 92558: 'BAMUM LETTER PHASE-E NDIQ', 92528: 'BAMUM LETTER PHASE-E NDUN', 92658: 'BAMUM LETTER PHASE-E NGA', 92579: 'BAMUM LETTER PHASE-E NGEUREUT', 92557: 'BAMUM LETTER PHASE-E NGGEEEE', 92607: 'BAMUM LETTER PHASE-E NGGEUAE', 92535: 'BAMUM LETTER PHASE-E NGGEUAET', 92563: 'BAMUM LETTER PHASE-E NGGUAESHAE NYAM', 92624: 'BAMUM LETTER PHASE-E NGGUP', 92550: 'BAMUM LETTER PHASE-E NGGURAE', 92531: 'BAMUM LETTER PHASE-E NGKA', 92601: 'BAMUM LETTER PHASE-E NGKAAMI', 92553: 'BAMUM LETTER PHASE-E NGKEUAEM', 92543: 'BAMUM LETTER PHASE-E NGKEUX', 92619: 'BAMUM LETTER PHASE-E NGKUM', 92598: 'BAMUM LETTER PHASE-E NGKUP', 92541: 'BAMUM LETTER PHASE-E NGOP', 92544: 'BAMUM LETTER PHASE-E NGOQ', 92640: 'BAMUM LETTER PHASE-E NGUAE', 92657: 'BAMUM LETTER PHASE-E NGUAET', 92554: 'BAMUM LETTER PHASE-E NJAEMLI', 92628: 'BAMUM LETTER PHASE-E NJEE', 92648: 'BAMUM LETTER PHASE-E NJEE EPOCH', 92547: 'BAMUM LETTER PHASE-E NJEUX', 92584: 'BAMUM LETTER PHASE-E NKOM', 92540: 'BAMUM LETTER PHASE-E NSHIEE', 92545: 'BAMUM LETTER PHASE-E NSHUE', 92527: 'BAMUM LETTER PHASE-E NSHUOP', 92622: 'BAMUM LETTER PHASE-E NTAP', 92604: 'BAMUM LETTER PHASE-E NTUM', 92608: 'BAMUM LETTER PHASE-E NYI BETWEEN', 92589: 'BAMUM LETTER PHASE-E NYI CLEAVER', 92583: 'BAMUM LETTER PHASE-E NZAQ', 92609: 'BAMUM LETTER PHASE-E NZUQ', 92631: 'BAMUM LETTER PHASE-E O', 92625: 'BAMUM LETTER PHASE-E PA PEOPLE', 92634: 'BAMUM LETTER PHASE-E PA PLURAL', 92524: 'BAMUM LETTER PHASE-E PAA', 92536: 'BAMUM LETTER PHASE-E PAAM', 92548: 'BAMUM LETTER PHASE-E PEEM', 92605: 'BAMUM LETTER PHASE-E PEUT', 92647: 'BAMUM LETTER PHASE-E PEUX', 92663: 'BAMUM LETTER PHASE-E PI', 92593: 'BAMUM LETTER PHASE-E PIEEQ', 92621: 'BAMUM LETTER PHASE-E PIET', 92568: 'BAMUM LETTER PHASE-E PO', 92610: 'BAMUM LETTER PHASE-E POON', 92576: 'BAMUM LETTER PHASE-E PU', 92529: 'BAMUM LETTER PHASE-E PUAE', 92618: 'BAMUM LETTER PHASE-E PUAQ', 92649: 'BAMUM LETTER PHASE-E PUE', 92561: 'BAMUM LETTER PHASE-E PUM', 92656: 'BAMUM LETTER PHASE-E RAE', 92526: 'BAMUM LETTER PHASE-E RAQ', 92655: 'BAMUM LETTER PHASE-E REUX', 92546: 'BAMUM LETTER PHASE-E RIMGBA', 92549: 'BAMUM LETTER PHASE-E SAA', 92534: 'BAMUM LETTER PHASE-E SEE', 92560: 'BAMUM LETTER PHASE-E SET', 92580: 'BAMUM LETTER PHASE-E SHEUAEQ', 92638: 'BAMUM LETTER PHASE-E SHIQ', 92659: 'BAMUM LETTER PHASE-E SHO', 92660: 'BAMUM LETTER PHASE-E SHOQ', 92525: 'BAMUM LETTER PHASE-E SOM', 92571: 'BAMUM LETTER PHASE-E SUAEN', 92635: 'BAMUM LETTER PHASE-E TAA', 92577: 'BAMUM LETTER PHASE-E TAAQ', 92559: 'BAMUM LETTER PHASE-E TAEN NTEUM', 92530: 'BAMUM LETTER PHASE-E TAM', 92636: 'BAMUM LETTER PHASE-E TAQ', 92672: 'BAMUM LETTER PHASE-E TEN', 92669: 'BAMUM LETTER PHASE-E TEU', 92572: 'BAMUM LETTER PHASE-E TEUAEQ', 92537: 'BAMUM LETTER PHASE-E TOO', 92519: 'BAMUM LETTER PHASE-E TOON', 92630: 'BAMUM LETTER PHASE-E TOQ', 92566: 'BAMUM LETTER PHASE-E TUAE', 92586: 'BAMUM LETTER PHASE-E TUM', 92569: 'BAMUM LETTER PHASE-E TUMAE', 92652: 'BAMUM LETTER PHASE-E VEE', 92573: 'BAMUM LETTER PHASE-E VEUAE', 92522: 'BAMUM LETTER PHASE-E VOM', 92574: 'BAMUM LETTER PHASE-E WEUX', 92650: 'BAMUM LETTER PHASE-E WUE', 92533: 'BAMUM LETTER PHASE-E WUO', 92588: 'BAMUM LETTER PHASE-E YAP', 92567: 'BAMUM LETTER PHASE-E YEUAE', 92623: 'BAMUM LETTER PHASE-E YEUAET', 92606: 'BAMUM LETTER PHASE-E YEUM', 92639: 'BAMUM LETTER PHASE-E YEUX', 92564: 'BAMUM LETTER PHASE-E YIEE', 92590: 'BAMUM LETTER PHASE-E YIT', 92643: 'BAMUM LETTER PHASE-E YOQ COVER', 92642: 'BAMUM LETTER PHASE-E YOQ SWIMMING', 92641: 'BAMUM LETTER PHASE-E YUAEN', 92594: 'BAMUM LETTER PHASE-E YUEQ', 92645: 'BAMUM LETTER PHASE-E YUN', 92644: 'BAMUM LETTER PHASE-E YUQ', 92678: 'BAMUM LETTER PHASE-F EE', 92715: 'BAMUM LETTER PHASE-F FOM', 92675: 'BAMUM LETTER PHASE-F KA', 92710: 'BAMUM LETTER PHASE-F KEN', 92695: 'BAMUM LETTER PHASE-F KET', 92719: 'BAMUM LETTER PHASE-F KO', 92726: 'BAMUM LETTER PHASE-F KPA', 92677: 'BAMUM LETTER PHASE-F KU', 92694: 'BAMUM LETTER PHASE-F KYEE', 92682: 'BAMUM LETTER PHASE-F LA', 92717: 'BAMUM LETTER PHASE-F LI', 92718: 'BAMUM LETTER PHASE-F LOQ', 92689: 'BAMUM LETTER PHASE-F M', 92722: 'BAMUM LETTER PHASE-F MA', 92724: 'BAMUM LETTER PHASE-F MBAA', 92720: 'BAMUM LETTER PHASE-F MBEN', 92685: 'BAMUM LETTER PHASE-F MEEEE', 92723: 'BAMUM LETTER PHASE-F MO', 92687: 'BAMUM LETTER PHASE-F NDAA', 92712: 'BAMUM LETTER PHASE-F NGGA', 92711: 'BAMUM LETTER PHASE-F NGKWAEN', 92708: 'BAMUM LETTER PHASE-F NI', 92688: 'BAMUM LETTER PHASE-F NJAEM', 92698: 'BAMUM LETTER PHASE-F NJUAE', 92702: 'BAMUM LETTER PHASE-F NSHA', 92704: 'BAMUM LETTER PHASE-F NTEE', 92697: 'BAMUM LETTER PHASE-F NU', 92696: 'BAMUM LETTER PHASE-F NUAE', 92681: 'BAMUM LETTER PHASE-F NYI', 92706: 'BAMUM LETTER PHASE-F PEE', 92703: 'BAMUM LETTER PHASE-F PEUX', 92714: 'BAMUM LETTER PHASE-F PUAE', 92679: 'BAMUM LETTER PHASE-F REE', 92721: 'BAMUM LETTER PHASE-F REN', 92709: 'BAMUM LETTER PHASE-F REUX', 92684: 'BAMUM LETTER PHASE-F RIEE', 92683: 'BAMUM LETTER PHASE-F RII', 92707: 'BAMUM LETTER PHASE-F RU', 92727: 'BAMUM LETTER PHASE-F SAMBA', 92693: 'BAMUM LETTER PHASE-F SEUX', 92691: 'BAMUM LETTER PHASE-F SHII', 92713: 'BAMUM LETTER PHASE-F SHO', 92700: 'BAMUM LETTER PHASE-F SHU', 92692: 'BAMUM LETTER PHASE-F SI', 92690: 'BAMUM LETTER PHASE-F SUU', 92686: 'BAMUM LETTER PHASE-F TAA', 92680: 'BAMUM LETTER PHASE-F TAE', 92725: 'BAMUM LETTER PHASE-F TET', 92676: 'BAMUM LETTER PHASE-F U', 92728: 'BAMUM LETTER PHASE-F VUEQ', 92716: 'BAMUM LETTER PHASE-F WA', 92705: 'BAMUM LETTER PHASE-F WUE', 92701: 'BAMUM LETTER PHASE-F YA', 92699: 'BAMUM LETTER PHASE-F YOQ', 127820: 'BANANA', 127974: 'BANK', 128181: 'BANKNOTE WITH DOLLAR SIGN', 128182: 'BANKNOTE WITH EURO SIGN', 128183: 'BANKNOTE WITH POUND SIGN', 128180: 'BANKNOTE WITH YEN SIGN', 128202: 'BAR CHART', 128136: 'BARBER POLE', 127936: 'BASKETBALL AND HOOP', 7153: 'BATAK CONSONANT SIGN H', 7152: 'BATAK CONSONANT SIGN NG', 7104: 'BATAK LETTER A', 7109: 'BATAK LETTER BA', 7137: 'BATAK LETTER CA', 7121: 'BATAK LETTER DA', 7118: 'BATAK LETTER GA', 7106: 'BATAK LETTER HA', 7140: 'BATAK LETTER I', 7120: 'BATAK LETTER JA', 7110: 'BATAK LETTER KARO BA', 7134: 'BATAK LETTER LA', 7124: 'BATAK LETTER MA', 7108: 'BATAK LETTER MANDAILING HA', 7114: 'BATAK LETTER MANDAILING NA', 7130: 'BATAK LETTER MANDAILING SA', 7139: 'BATAK LETTER MBA', 7113: 'BATAK LETTER NA', 7138: 'BATAK LETTER NDA', 7133: 'BATAK LETTER NGA', 7127: 'BATAK LETTER NORTHERN TA', 7136: 'BATAK LETTER NYA', 7111: 'BATAK LETTER PA', 7117: 'BATAK LETTER PAKPAK WA', 7122: 'BATAK LETTER RA', 7128: 'BATAK LETTER SA', 7105: 'BATAK LETTER SIMALUNGUN A', 7119: 'BATAK LETTER SIMALUNGUN GA', 7107: 'BATAK LETTER SIMALUNGUN HA', 7135: 'BATAK LETTER SIMALUNGUN LA', 7125: 'BATAK LETTER SIMALUNGUN MA', 7112: 'BATAK LETTER SIMALUNGUN PA', 7123: 'BATAK LETTER SIMALUNGUN RA', 7129: 'BATAK LETTER SIMALUNGUN SA', 7116: 'BATAK LETTER SIMALUNGUN WA', 7132: 'BATAK LETTER SIMALUNGUN YA', 7126: 'BATAK LETTER SOUTHERN TA', 7141: 'BATAK LETTER U', 7115: 'BATAK LETTER WA', 7131: 'BATAK LETTER YA', 7154: 'BATAK PANGOLAT', 7155: 'BATAK PANONGONAN', 7142: 'BATAK SIGN TOMPI', 7166: 'BATAK SYMBOL BINDU JUDUL', 7164: 'BATAK SYMBOL BINDU NA METEK', 7167: 'BATAK SYMBOL BINDU PANGOLAT', 7165: 'BATAK SYMBOL BINDU PINARBORAS', 7143: 'BATAK VOWEL SIGN E', 7145: 'BATAK VOWEL SIGN EE', 7146: 'BATAK VOWEL SIGN I', 7147: 'BATAK VOWEL SIGN KARO I', 7149: 'BATAK VOWEL SIGN KARO O', 7148: 'BATAK VOWEL SIGN O', 7144: 'BATAK VOWEL SIGN PAKPAK E', 7150: 'BATAK VOWEL SIGN U', 7151: 'BATAK VOWEL SIGN U FOR SIMALUNGUN SA', 128704: 'BATH', 128705: 'BATHTUB', 128267: 'BATTERY', 128059: 'BEAR FACE', 128147: 'BEATING HEART', 127866: 'BEER MUG', 128276: 'BELL', 128277: 'BELL WITH CANCELLATION STROKE', 983621: 'BENGALI LETTER KHINYA', 127857: 'BENTO BOX', 128690: 'BICYCLE', 128692: 'BICYCLIST', 128089: 'BIKINI', 127921: 'BILLIARDS', 128038: 'BIRD', 127874: 'BIRTHDAY CAKE', 9196: 'BLACK DOWN-POINTING DOUBLE TRIANGLE', 9194: 'BLACK LEFT-POINTING DOUBLE TRIANGLE', 9198: 'BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR', 10067: 'BLACK QUESTION MARK ORNAMENT', 9193: 'BLACK RIGHT-POINTING DOUBLE TRIANGLE', 9197: 'BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR', 9199: 'BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR', 128306: 'BLACK SQUARE BUTTON', 9195: 'BLACK UP-POINTING DOUBLE TRIANGLE', 127804: 'BLOSSOM', 128033: 'BLOWFISH', 128216: 'BLUE BOOK', 128153: 'BLUE HEART', 128023: 'BOAR', 128163: 'BOMB', 128278: 'BOOKMARK', 128209: 'BOOKMARK TABS', 128218: 'BOOKS', 12728: 'BOPOMOFO LETTER GH', 12729: 'BOPOMOFO LETTER LH', 12730: 'BOPOMOFO LETTER ZY', 128144: 'BOUQUET', 127923: 'BOWLING', 128102: 'BOY', 69703: 'BRAHMI DANDA', 69742: 'BRAHMI DIGIT EIGHT', 69739: 'BRAHMI DIGIT FIVE', 69738: 'BRAHMI DIGIT FOUR', 69743: 'BRAHMI DIGIT NINE', 69735: 'BRAHMI DIGIT ONE', 69741: 'BRAHMI DIGIT SEVEN', 69740: 'BRAHMI DIGIT SIX', 69737: 'BRAHMI DIGIT THREE', 69736: 'BRAHMI DIGIT TWO', 69734: 'BRAHMI DIGIT ZERO', 69704: 'BRAHMI DOUBLE DANDA', 69637: 'BRAHMI LETTER A', 69638: 'BRAHMI LETTER AA', 69648: 'BRAHMI LETTER AI', 69650: 'BRAHMI LETTER AU', 69673: 'BRAHMI LETTER BA', 69674: 'BRAHMI LETTER BHA', 69656: 'BRAHMI LETTER CA', 69657: 'BRAHMI LETTER CHA', 69668: 'BRAHMI LETTER DA', 69663: 'BRAHMI LETTER DDA', 69664: 'BRAHMI LETTER DDHA', 69669: 'BRAHMI LETTER DHA', 69647: 'BRAHMI LETTER E', 69653: 'BRAHMI LETTER GA', 69654: 'BRAHMI LETTER GHA', 69683: 'BRAHMI LETTER HA', 69639: 'BRAHMI LETTER I', 69640: 'BRAHMI LETTER II', 69658: 'BRAHMI LETTER JA', 69659: 'BRAHMI LETTER JHA', 69651: 'BRAHMI LETTER KA', 69652: 'BRAHMI LETTER KHA', 69678: 'BRAHMI LETTER LA', 69684: 'BRAHMI LETTER LLA', 69675: 'BRAHMI LETTER MA', 69670: 'BRAHMI LETTER NA', 69655: 'BRAHMI LETTER NGA', 69665: 'BRAHMI LETTER NNA', 69660: 'BRAHMI LETTER NYA', 69649: 'BRAHMI LETTER O', 69685: 'BRAHMI LETTER OLD TAMIL LLLA', 69687: 'BRAHMI LETTER OLD TAMIL NNNA', 69686: 'BRAHMI LETTER OLD TAMIL RRA', 69671: 'BRAHMI LETTER PA', 69672: 'BRAHMI LETTER PHA', 69677: 'BRAHMI LETTER RA', 69682: 'BRAHMI LETTER SA', 69680: 'BRAHMI LETTER SHA', 69681: 'BRAHMI LETTER SSA', 69666: 'BRAHMI LETTER TA', 69667: 'BRAHMI LETTER THA', 69661: 'BRAHMI LETTER TTA', 69662: 'BRAHMI LETTER TTHA', 69641: 'BRAHMI LETTER U', 69642: 'BRAHMI LETTER UU', 69679: 'BRAHMI LETTER VA', 69645: 'BRAHMI LETTER VOCALIC L', 69646: 'BRAHMI LETTER VOCALIC LL', 69643: 'BRAHMI LETTER VOCALIC R', 69644: 'BRAHMI LETTER VOCALIC RR', 69676: 'BRAHMI LETTER YA', 69721: 'BRAHMI NUMBER EIGHT', 69730: 'BRAHMI NUMBER EIGHTY', 69727: 'BRAHMI NUMBER FIFTY', 69718: 'BRAHMI NUMBER FIVE', 69726: 'BRAHMI NUMBER FORTY', 69717: 'BRAHMI NUMBER FOUR', 69722: 'BRAHMI NUMBER NINE', 69731: 'BRAHMI NUMBER NINETY', 69714: 'BRAHMI NUMBER ONE', 69732: 'BRAHMI NUMBER ONE HUNDRED', 69733: 'BRAHMI NUMBER ONE THOUSAND', 69720: 'BRAHMI NUMBER SEVEN', 69729: 'BRAHMI NUMBER SEVENTY', 69719: 'BRAHMI NUMBER SIX', 69728: 'BRAHMI NUMBER SIXTY', 69723: 'BRAHMI NUMBER TEN', 69725: 'BRAHMI NUMBER THIRTY', 69716: 'BRAHMI NUMBER THREE', 69724: 'BRAHMI NUMBER TWENTY', 69715: 'BRAHMI NUMBER TWO', 69708: 'BRAHMI PUNCTUATION CRESCENT BAR', 69705: 'BRAHMI PUNCTUATION DOT', 69706: 'BRAHMI PUNCTUATION DOUBLE DOT', 69707: 'BRAHMI PUNCTUATION LINE', 69709: 'BRAHMI PUNCTUATION LOTUS', 69633: 'BRAHMI SIGN ANUSVARA', 69632: 'BRAHMI SIGN CANDRABINDU', 69635: 'BRAHMI SIGN JIHVAMULIYA', 69636: 'BRAHMI SIGN UPADHMANIYA', 69634: 'BRAHMI SIGN VISARGA', 69702: 'BRAHMI VIRAMA', 69688: 'BRAHMI VOWEL SIGN AA', 69699: 'BRAHMI VOWEL SIGN AI', 69701: 'BRAHMI VOWEL SIGN AU', 69689: 'BRAHMI VOWEL SIGN BHATTIPROLU AA', 69698: 'BRAHMI VOWEL SIGN E', 69690: 'BRAHMI VOWEL SIGN I', 69691: 'BRAHMI VOWEL SIGN II', 69700: 'BRAHMI VOWEL SIGN O', 69692: 'BRAHMI VOWEL SIGN U', 69693: 'BRAHMI VOWEL SIGN UU', 69696: 'BRAHMI VOWEL SIGN VOCALIC L', 69697: 'BRAHMI VOWEL SIGN VOCALIC LL', 69694: 'BRAHMI VOWEL SIGN VOCALIC R', 69695: 'BRAHMI VOWEL SIGN VOCALIC RR', 127838: 'BREAD', 128112: 'BRIDE WITH VEIL', 127753: 'BRIDGE AT NIGHT', 128188: 'BRIEFCASE', 128148: 'BROKEN HEART', 128027: 'BUG', 128652: 'BUS', 128655: 'BUS STOP', 128100: 'BUST IN SILHOUETTE', 128101: 'BUSTS IN SILHOUETTE', 127797: 'CACTUS', 128197: 'CALENDAR', 128247: 'CAMERA', 127852: 'CANDY', 128199: 'CARD INDEX', 127904: 'CAROUSEL HORSE', 127887: 'CARP STREAMER', 128008: 'CAT', 128049: 'CAT FACE', 128569: 'CAT FACE WITH TEARS OF JOY', 128572: 'CAT FACE WITH WRY SMILE', 128201: 'CHART WITH DOWNWARDS TREND', 128200: 'CHART WITH UPWARDS TREND', 128185: 'CHART WITH UPWARDS TREND AND YEN SIGN', 128227: 'CHEERING MEGAPHONE', 127937: 'CHEQUERED FLAG', 127826: 'CHERRIES', 127800: 'CHERRY BLOSSOM', 127792: 'CHESTNUT', 128020: 'CHICKEN', 128696: 'CHILDREN CROSSING', 127851: 'CHOCOLATE BAR', 127876: 'CHRISTMAS TREE', 127910: 'CINEMA', 127569: 'CIRCLED IDEOGRAPH ACCEPT', 127568: 'CIRCLED IDEOGRAPH ADVANTAGE', 127914: 'CIRCUS TENT', 127750: 'CITYSCAPE AT DUSK', 127916: 'CLAPPER BOARD', 128079: 'CLAPPING HANDS SIGN', 127867: 'CLINKING BEER MUGS', 128203: 'CLIPBOARD', 128343: 'CLOCK FACE EIGHT OCLOCK', 128355: 'CLOCK FACE EIGHT-THIRTY', 128346: 'CLOCK FACE ELEVEN OCLOCK', 128358: 'CLOCK FACE ELEVEN-THIRTY', 128340: 'CLOCK FACE FIVE OCLOCK', 128352: 'CLOCK FACE FIVE-THIRTY', 128339: 'CLOCK FACE FOUR OCLOCK', 128351: 'CLOCK FACE FOUR-THIRTY', 128344: 'CLOCK FACE NINE OCLOCK', 128356: 'CLOCK FACE NINE-THIRTY', 128336: 'CLOCK FACE ONE OCLOCK', 128348: 'CLOCK FACE ONE-THIRTY', 128342: 'CLOCK FACE SEVEN OCLOCK', 128354: 'CLOCK FACE SEVEN-THIRTY', 128341: 'CLOCK FACE SIX OCLOCK', 128353: 'CLOCK FACE SIX-THIRTY', 128345: 'CLOCK FACE TEN OCLOCK', 128357: 'CLOCK FACE TEN-THIRTY', 128338: 'CLOCK FACE THREE OCLOCK', 128350: 'CLOCK FACE THREE-THIRTY', 128347: 'CLOCK FACE TWELVE OCLOCK', 128359: 'CLOCK FACE TWELVE-THIRTY', 128337: 'CLOCK FACE TWO OCLOCK', 128349: 'CLOCK FACE TWO-THIRTY', 128259: 'CLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS', 128257: 'CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS', 128258: 'CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS WITH CIRCLED ONE OVERLAY', 128213: 'CLOSED BOOK', 128272: 'CLOSED LOCK WITH KEY', 128234: 'CLOSED MAILBOX WITH LOWERED FLAG', 128235: 'CLOSED MAILBOX WITH RAISED FLAG', 127746: 'CLOSED UMBRELLA', 127864: 'COCKTAIL GLASS', 128165: 'COLLISION SYMBOL', 7676: 'COMBINING DOUBLE INVERTED BREVE BELOW', 127882: 'CONFETTI BALL', 128534: 'CONFOUNDED FACE', 128679: 'CONSTRUCTION SIGN', 128119: 'CONSTRUCTION WORKER', 127978: 'CONVENIENCE STORE', 127834: 'COOKED RICE', 127850: 'COOKIE', 127859: 'COOKING', 128145: 'COUPLE WITH HEART', 128004: 'COW', 128046: 'COW FACE', 128179: 'CREDIT CARD', 127769: 'CRESCENT MOON', 128010: 'CROCODILE', 10060: 'CROSS MARK', 127884: 'CROSSED FLAGS', 128081: 'CROWN', 128575: 'CRYING CAT FACE', 128546: 'CRYING FACE', 128302: 'CRYSTAL BALL', 10160: 'CURLY LOOP', 128177: 'CURRENCY EXCHANGE', 127835: 'CURRY AND RICE', 127854: 'CUSTARD', 128707: 'CUSTOMS', 127744: 'CYCLONE', 42592: 'CYRILLIC CAPITAL LETTER REVERSED TSE', 1318: 'CYRILLIC CAPITAL LETTER SHHA WITH DESCENDER', 42593: 'CYRILLIC SMALL LETTER REVERSED TSE', 1319: 'CYRILLIC SMALL LETTER SHHA WITH DESCENDER', 128131: 'DANCER', 127841: 'DANGO', 128168: 'DASH SYMBOL', 127795: 'DECIDUOUS TREE', 128666: 'DELIVERY TRUCK', 127980: 'DEPARTMENT STORE', 2421: 'DEVANAGARI LETTER AW', 2419: 'DEVANAGARI LETTER OE', 2420: 'DEVANAGARI LETTER OOE', 2422: 'DEVANAGARI LETTER UE', 2423: 'DEVANAGARI LETTER UUE', 2383: 'DEVANAGARI VOWEL SIGN AW', 2362: 'DEVANAGARI VOWEL SIGN OE', 2363: 'DEVANAGARI VOWEL SIGN OOE', 2390: 'DEVANAGARI VOWEL SIGN UE', 2391: 'DEVANAGARI VOWEL SIGN UUE', 128160: 'DIAMOND SHAPE WITH A DOT INSIDE', 127919: 'DIRECT HIT', 128549: 'DISAPPOINTED BUT RELIEVED FACE', 128542: 'DISAPPOINTED FACE', 128565: 'DIZZY FACE', 128171: 'DIZZY SYMBOL', 128687: 'DO NOT LITTER SYMBOL', 128021: 'DOG', 128054: 'DOG FACE', 128044: 'DOLPHIN', 128682: 'DOOR', 10175: 'DOUBLE CURLY LOOP', 127849: 'DOUGHNUT', 128315: 'DOWN-POINTING RED TRIANGLE', 128317: 'DOWN-POINTING SMALL RED TRIANGLE', 128009: 'DRAGON', 128050: 'DRAGON FACE', 128087: 'DRESS', 128042: 'DROMEDARY CAMEL', 128167: 'DROPLET', 128192: 'DVD', 128231: 'E-MAIL SYMBOL', 128066: 'EAR', 127805: 'EAR OF MAIZE', 127806: 'EAR OF RICE', 127758: 'EARTH GLOBE AMERICAS', 127759: 'EARTH GLOBE ASIA-AUSTRALIA', 127757: 'EARTH GLOBE EUROPE-AFRICA', 128161: 'ELECTRIC LIGHT BULB', 128268: 'ELECTRIC PLUG', 128294: 'ELECTRIC TORCH', 128024: 'ELEPHANT', 128282: 'END WITH LEFTWARDS ARROW ABOVE', 128233: 'ENVELOPE WITH DOWNWARDS ARROW ABOVE', 4957: 'ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK', 4958: 'ETHIOPIC COMBINING VOWEL LENGTH MARK', 43816: 'ETHIOPIC SYLLABLE BBA', 43819: 'ETHIOPIC SYLLABLE BBAA', 43821: 'ETHIOPIC SYLLABLE BBE', 43820: 'ETHIOPIC SYLLABLE BBEE', 43818: 'ETHIOPIC SYLLABLE BBI', 43822: 'ETHIOPIC SYLLABLE BBO', 43817: 'ETHIOPIC SYLLABLE BBU', 43808: 'ETHIOPIC SYLLABLE CCHHA', 43811: 'ETHIOPIC SYLLABLE CCHHAA', 43813: 'ETHIOPIC SYLLABLE CCHHE', 43812: 'ETHIOPIC SYLLABLE CCHHEE', 43810: 'ETHIOPIC SYLLABLE CCHHI', 43814: 'ETHIOPIC SYLLABLE CCHHO', 43809: 'ETHIOPIC SYLLABLE CCHHU', 43787: 'ETHIOPIC SYLLABLE DDHAA', 43789: 'ETHIOPIC SYLLABLE DDHE', 43788: 'ETHIOPIC SYLLABLE DDHEE', 43786: 'ETHIOPIC SYLLABLE DDHI', 43790: 'ETHIOPIC SYLLABLE DDHO', 43785: 'ETHIOPIC SYLLABLE DDHU', 43795: 'ETHIOPIC SYLLABLE DZAA', 43797: 'ETHIOPIC SYLLABLE DZE', 43796: 'ETHIOPIC SYLLABLE DZEE', 43794: 'ETHIOPIC SYLLABLE DZI', 43798: 'ETHIOPIC SYLLABLE DZO', 43793: 'ETHIOPIC SYLLABLE DZU', 43779: 'ETHIOPIC SYLLABLE TTHAA', 43781: 'ETHIOPIC SYLLABLE TTHE', 43780: 'ETHIOPIC SYLLABLE TTHEE', 43778: 'ETHIOPIC SYLLABLE TTHI', 43782: 'ETHIOPIC SYLLABLE TTHO', 43777: 'ETHIOPIC SYLLABLE TTHU', 127984: 'EUROPEAN CASTLE', 127972: 'EUROPEAN POST OFFICE', 127794: 'EVERGREEN TREE', 128125: 'EXTRATERRESTRIAL ALIEN', 128083: 'EYEGLASSES', 128064: 'EYES', 128134: 'FACE MASSAGE', 128523: 'FACE SAVOURING DELICIOUS FOOD', 128561: 'FACE SCREAMING IN FEAR', 128536: 'FACE THROWING A KISS', 128531: 'FACE WITH COLD SWEAT', 128548: 'FACE WITH LOOK OF TRIUMPH', 128567: 'FACE WITH MEDICAL MASK', 128581: 'FACE WITH NO GOOD GESTURE', 128582: 'FACE WITH OK GESTURE', 128560: 'FACE WITH OPEN MOUTH AND COLD SWEAT', 128541: 'FACE WITH STUCK-OUT TONGUE AND TIGHTLY-CLOSED EYES', 128540: 'FACE WITH STUCK-OUT TONGUE AND WINKING EYE', 128514: 'FACE WITH TEARS OF JOY', 128566: 'FACE WITHOUT MOUTH', 127981: 'FACTORY', 127810: 'FALLEN LEAF', 128106: 'FAMILY', 127877: 'FATHER CHRISTMAS', 128224: 'FAX MACHINE', 128552: 'FEARFUL FACE', 127905: 'FERRIS WHEEL', 128193: 'FILE FOLDER', 128293: 'FIRE', 128658: 'FIRE ENGINE', 127879: 'FIREWORK SPARKLER', 127878: 'FIREWORKS', 127763: 'FIRST QUARTER MOON SYMBOL', 127771: 'FIRST QUARTER MOON WITH FACE', 128031: 'FISH', 127845: 'FISH CAKE WITH SWIRL DESIGN', 127907: 'FISHING POLE AND FISH', 128074: 'FISTED HAND SIGN', 128170: 'FLEXED BICEPS', 128190: 'FLOPPY DISK', 127924: 'FLOWER PLAYING CARDS', 128563: 'FLUSHED FACE', 127745: 'FOGGY', 128099: 'FOOTPRINTS', 127860: 'FORK AND KNIFE', 127808: 'FOUR LEAF CLOVER', 127839: 'FRENCH FRIES', 127844: 'FRIED SHRIMP', 128056: 'FROG FACE', 128037: 'FRONT-FACING BABY CHICK', 127765: 'FULL MOON SYMBOL', 127773: 'FULL MOON WITH FACE', 127922: 'GAME DIE', 128142: 'GEM STONE', 983912: 'GEORGIAN LETTER U-BRJGU', 128123: 'GHOST', 128103: 'GIRL', 127760: 'GLOBE WITH MERIDIANS', 127775: 'GLOWING STAR', 128016: 'GOAT', 127891: 'GRADUATION CAP', 127815: 'GRAPES', 127823: 'GREEN APPLE', 128215: 'GREEN BOOK', 128154: 'GREEN HEART', 128568: 'GRINNING CAT FACE WITH SMILING EYES', 128513: 'GRINNING FACE WITH SMILING EYES', 128151: 'GROWING HEART', 128130: 'GUARDSMAN', 127928: 'GUITAR', 128135: 'HAIRCUT', 127828: 'HAMBURGER', 128296: 'HAMMER', 128057: 'HAMSTER FACE', 128092: 'HANDBAG', 128587: 'HAPPY PERSON RAISING ONE HAND', 128035: 'HATCHING CHICK', 127911: 'HEADPHONE', 128585: 'HEAR-NO-EVIL MONKEY', 128159: 'HEART DECORATION', 128152: 'HEART WITH ARROW', 128157: 'HEART WITH RIBBON', 10135: 'HEAVY DIVISION SIGN', 128178: 'HEAVY DOLLAR SIGN', 10080: 'HEAVY LOW DOUBLE COMMA QUOTATION MARK ORNAMENT', 10079: 'HEAVY LOW SINGLE COMMA QUOTATION MARK ORNAMENT', 10134: 'HEAVY MINUS SIGN', 10133: 'HEAVY PLUS SIGN', 128641: 'HELICOPTER', 127807: 'HERB', 127802: 'HIBISCUS', 128262: 'HIGH BRIGHTNESS SYMBOL', 128096: 'HIGH-HEELED SHOE', 128644: 'HIGH-SPEED TRAIN', 128645: 'HIGH-SPEED TRAIN WITH BULLET NOSE', 110593: 'HIRAGANA LETTER ARCHAIC YE', 983954: 'HIRAGANA LETTER BIDAKUON NGA', 983957: 'HIRAGANA LETTER BIDAKUON NGE', 983955: 'HIRAGANA LETTER BIDAKUON NGI', 983958: 'HIRAGANA LETTER BIDAKUON NGO', 983956: 'HIRAGANA LETTER BIDAKUON NGU', 128298: 'HOCHO', 127855: 'HONEY POT', 128029: 'HONEYBEE', 128677: 'HORIZONTAL TRAFFIC LIGHT', 128014: 'HORSE', 128052: 'HORSE FACE', 127943: 'HORSE RACING', 127973: 'HOSPITAL', 127976: 'HOTEL', 9203: 'HOURGLASS WITH FLOWING SAND', 127968: 'HOUSE BUILDING', 127969: 'HOUSE WITH GARDEN', 128175: 'HUNDRED POINTS SYMBOL', 127848: 'ICE CREAM', 128127: 'IMP', 128229: 'INBOX TRAY', 128232: 'INCOMING ENVELOPE', 8377: 'INDIAN RUPEE SIGN', 128129: 'INFORMATION DESK PERSON', 128288: 'INPUT SYMBOL FOR LATIN CAPITAL LETTERS', 128292: 'INPUT SYMBOL FOR LATIN LETTERS', 128289: 'INPUT SYMBOL FOR LATIN SMALL LETTERS', 128290: 'INPUT SYMBOL FOR NUMBERS', 128291: 'INPUT SYMBOL FOR SYMBOLS', 9959: 'INVERTED PENTAGRAM', 127982: 'IZAKAYA LANTERN', 127875: 'JACK-O-LANTERN', 127983: 'JAPANESE CASTLE', 127886: 'JAPANESE DOLLS', 128122: 'JAPANESE GOBLIN', 128121: 'JAPANESE OGRE', 127971: 'JAPANESE POST OFFICE', 128304: 'JAPANESE SYMBOL FOR BEGINNER', 128086: 'JEANS', 983964: 'KATAKANA LETTER AINU CE', 983967: 'KATAKANA LETTER AINU P', 983966: 'KATAKANA LETTER AINU TO', 983965: 'KATAKANA LETTER AINU TU', 110592: 'KATAKANA LETTER ARCHAIC E', 983959: 'KATAKANA LETTER BIDAKUON NGA', 983962: 'KATAKANA LETTER BIDAKUON NGE', 983960: 'KATAKANA LETTER BIDAKUON NGI', 983963: 'KATAKANA LETTER BIDAKUON NGO', 983961: 'KATAKANA LETTER BIDAKUON NGU', 128273: 'KEY', 128287: 'KEYCAP TEN', 983933: 'KHMER CONSONANT SIGN COENG BA', 983918: 'KHMER CONSONANT SIGN COENG CA', 983919: 'KHMER CONSONANT SIGN COENG CHA', 983921: 'KHMER CONSONANT SIGN COENG CHO', 983920: 'KHMER CONSONANT SIGN COENG CO', 983923: 'KHMER CONSONANT SIGN COENG DA', 983925: 'KHMER CONSONANT SIGN COENG DO', 983945: 'KHMER CONSONANT SIGN COENG HA', 983913: 'KHMER CONSONANT SIGN COENG KA', 983914: 'KHMER CONSONANT SIGN COENG KHA', 983916: 'KHMER CONSONANT SIGN COENG KHO', 983915: 'KHMER CONSONANT SIGN COENG KO', 983946: 'KHMER CONSONANT SIGN COENG LA', 983940: 'KHMER CONSONANT SIGN COENG LO', 983937: 'KHMER CONSONANT SIGN COENG MO', 983927: 'KHMER CONSONANT SIGN COENG NA', 983917: 'KHMER CONSONANT SIGN COENG NGO', 983932: 'KHMER CONSONANT SIGN COENG NO', 983922: 'KHMER CONSONANT SIGN COENG NYO', 983934: 'KHMER CONSONANT SIGN COENG PHA', 983936: 'KHMER CONSONANT SIGN COENG PHO', 983935: 'KHMER CONSONANT SIGN COENG PO', 983939: 'KHMER CONSONANT SIGN COENG RO', 983944: 'KHMER CONSONANT SIGN COENG SA', 983942: 'KHMER CONSONANT SIGN COENG SHA', 983943: 'KHMER CONSONANT SIGN COENG SSA', 983928: 'KHMER CONSONANT SIGN COENG TA', 983929: 'KHMER CONSONANT SIGN COENG THA', 983931: 'KHMER CONSONANT SIGN COENG THO', 983930: 'KHMER CONSONANT SIGN COENG TO', 983924: 'KHMER CONSONANT SIGN COENG TTHA', 983926: 'KHMER CONSONANT SIGN COENG TTHO', 983941: 'KHMER CONSONANT SIGN COENG VO', 983938: 'KHMER CONSONANT SIGN COENG YO', 983951: 'KHMER INDEPENDENT VOWEL SIGN COENG QE', 983948: 'KHMER INDEPENDENT VOWEL SIGN COENG QU', 983949: 'KHMER INDEPENDENT VOWEL SIGN COENG RY', 983950: 'KHMER INDEPENDENT VOWEL SIGN COENG RYY', 983953: 'KHMER VOWEL SIGN AAM', 983947: 'KHMER VOWEL SIGN COENG QA', 983952: 'KHMER VOWEL SIGN OM', 128088: 'KIMONO', 128143: 'KISS', 128139: 'KISS MARK', 128573: 'KISSING CAT FACE WITH CLOSED EYES', 128538: 'KISSING FACE WITH CLOSED EYES', 128040: 'KOALA', 128030: 'LADY BEETLE', 128309: 'LARGE BLUE CIRCLE', 128311: 'LARGE BLUE DIAMOND', 128310: 'LARGE ORANGE DIAMOND', 128308: 'LARGE RED CIRCLE', 127767: 'LAST QUARTER MOON SYMBOL', 127772: 'LAST QUARTER MOON WITH FACE', 42912: 'LATIN CAPITAL LETTER G WITH OBLIQUE STROKE', 42914: 'LATIN CAPITAL LETTER K WITH OBLIQUE STROKE', 42896: 'LATIN CAPITAL LETTER N WITH DESCENDER', 42916: 'LATIN CAPITAL LETTER N WITH OBLIQUE STROKE', 42918: 'LATIN CAPITAL LETTER R WITH OBLIQUE STROKE', 42920: 'LATIN CAPITAL LETTER S WITH OBLIQUE STROKE', 42893: 'LATIN CAPITAL LETTER TURNED H', 43002: 'LATIN LETTER SMALL CAPITAL TURNED M', 983612: 'LATIN SMALL LETTER AE WITH GRAVE', 42913: 'LATIN SMALL LETTER G WITH OBLIQUE STROKE', 983620: 'LATIN SMALL LETTER HOOKED SCHWA WITH ACUTE', 983619: 'LATIN SMALL LETTER HOOKED SCHWA WITH GRAVE', 42915: 'LATIN SMALL LETTER K WITH OBLIQUE STROKE', 42894: 'LATIN SMALL LETTER L WITH RETROFLEX HOOK AND BELT', 42897: 'LATIN SMALL LETTER N WITH DESCENDER', 42917: 'LATIN SMALL LETTER N WITH OBLIQUE STROKE', 983614: 'LATIN SMALL LETTER OPEN O WITH ACUTE', 983613: 'LATIN SMALL LETTER OPEN O WITH GRAVE', 42919: 'LATIN SMALL LETTER R WITH OBLIQUE STROKE', 42921: 'LATIN SMALL LETTER S WITH OBLIQUE STROKE', 983618: 'LATIN SMALL LETTER SCHWA WITH ACUTE', 983617: 'LATIN SMALL LETTER SCHWA WITH GRAVE', 983616: 'LATIN SMALL LETTER TURNED V WITH ACUTE', 983615: 'LATIN SMALL LETTER TURNED V WITH GRAVE', 8341: 'LATIN SUBSCRIPT SMALL LETTER H', 8342: 'LATIN SUBSCRIPT SMALL LETTER K', 8343: 'LATIN SUBSCRIPT SMALL LETTER L', 8344: 'LATIN SUBSCRIPT SMALL LETTER M', 8345: 'LATIN SUBSCRIPT SMALL LETTER N', 8346: 'LATIN SUBSCRIPT SMALL LETTER P', 8347: 'LATIN SUBSCRIPT SMALL LETTER S', 8348: 'LATIN SUBSCRIPT SMALL LETTER T', 127811: 'LEAF FLUTTERING IN WIND', 128210: 'LEDGER', 128709: 'LEFT LUGGAGE', 9958: 'LEFT-HANDED INTERLACED PENTAGRAM', 128269: 'LEFT-POINTING MAGNIFYING GLASS', 127819: 'LEMON', 128006: 'LEOPARD', 128648: 'LIGHT RAIL', 128279: 'LINK SYMBOL', 128132: 'LIPSTICK', 128274: 'LOCK', 128271: 'LOCK WITH INK PEN', 127853: 'LOLLIPOP', 128557: 'LOUDLY CRYING FACE', 127977: 'LOVE HOTEL', 128140: 'LOVE LETTER', 128261: 'LOW BRIGHTNESS SYMBOL', 3406: 'MALAYALAM LETTER DOT REPH', 3369: 'MALAYALAM LETTER NNNA', 3386: 'MALAYALAM LETTER TTTA', 128104: 'MAN', 128107: 'MAN AND WOMAN HOLDING HANDS', 128114: 'MAN WITH GUA PI MAO', 128115: 'MAN WITH TURBAN', 2137: 'MANDAIC AFFRICATION MARK', 2139: 'MANDAIC GEMINATION MARK', 2113: 'MANDAIC LETTER AB', 2115: 'MANDAIC LETTER AD', 2114: 'MANDAIC LETTER AG', 2116: 'MANDAIC LETTER AH', 2136: 'MANDAIC LETTER AIN', 2122: 'MANDAIC LETTER AK', 2121: 'MANDAIC LETTER AKSA', 2123: 'MANDAIC LETTER AL', 2124: 'MANDAIC LETTER AM', 2125: 'MANDAIC LETTER AN', 2128: 'MANDAIC LETTER AP', 2130: 'MANDAIC LETTER AQ', 2131: 'MANDAIC LETTER AR', 2126: 'MANDAIC LETTER AS', 2132: 'MANDAIC LETTER ASH', 2129: 'MANDAIC LETTER ASZ', 2133: 'MANDAIC LETTER AT', 2120: 'MANDAIC LETTER ATT', 2118: 'MANDAIC LETTER AZ', 2134: 'MANDAIC LETTER DUSHENNA', 2112: 'MANDAIC LETTER HALQA', 2127: 'MANDAIC LETTER IN', 2119: 'MANDAIC LETTER IT', 2135: 'MANDAIC LETTER KAD', 2117: 'MANDAIC LETTER USHENNA', 2142: 'MANDAIC PUNCTUATION', 2138: 'MANDAIC VOCALIZATION MARK', 128094: 'MANS SHOE', 127809: 'MAPLE LEAF', 127830: 'MEAT ON BONE', 127816: 'MELON', 128221: 'MEMO', 128697: 'MENS SYMBOL', 128647: 'METRO', 127908: 'MICROPHONE', 128300: 'MICROSCOPE', 127756: 'MILKY WAY', 128656: 'MINIBUS', 128189: 'MINIDISC', 128241: 'MOBILE PHONE', 128244: 'MOBILE PHONE OFF', 128242: 'MOBILE PHONE WITH RIGHTWARDS ARROW AT LEFT', 983968: 'MODIFIER LETTER EXTRA-HIGH EXTRA-LOW CONTOUR TONE BAR', 983969: 'MODIFIER LETTER EXTRA-LOW EXTRA-HIGH CONTOUR TONE BAR', 128176: 'MONEY BAG', 128184: 'MONEY WITH WINGS', 128018: 'MONKEY', 128053: 'MONKEY FACE', 128669: 'MONORAIL', 127889: 'MOON VIEWING CEREMONY', 128507: 'MOUNT FUJI', 128693: 'MOUNTAIN BICYCLIST', 128672: 'MOUNTAIN CABLEWAY', 128670: 'MOUNTAIN RAILWAY', 128001: 'MOUSE', 128045: 'MOUSE FACE', 128068: 'MOUTH', 127909: 'MOVIE CAMERA', 128511: 'MOYAI', 127926: 'MULTIPLE MUSICAL NOTES', 127812: 'MUSHROOM', 127929: 'MUSICAL KEYBOARD', 127925: 'MUSICAL NOTE', 127932: 'MUSICAL SCORE', 128133: 'NAIL POLISH', 128219: 'NAME BADGE', 128084: 'NECKTIE', 127312: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER A', 127313: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER B', 127314: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER C', 127315: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER D', 127316: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER E', 127317: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER F', 127318: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER G', 127320: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER I', 127321: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER J', 127322: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER K', 127323: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER L', 127324: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER M', 127325: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER N', 127326: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER O', 127328: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER Q', 127329: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER R', 127330: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER S', 127331: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER T', 127332: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER U', 127333: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER V', 127334: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER W', 127335: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER X', 127336: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER Y', 127337: 'NEGATIVE CIRCLED LATIN CAPITAL LETTER Z', 127374: 'NEGATIVE SQUARED AB', 10062: 'NEGATIVE SQUARED CROSS MARK', 127344: 'NEGATIVE SQUARED LATIN CAPITAL LETTER A', 127345: 'NEGATIVE SQUARED LATIN CAPITAL LETTER B', 127346: 'NEGATIVE SQUARED LATIN CAPITAL LETTER C', 127347: 'NEGATIVE SQUARED LATIN CAPITAL LETTER D', 127348: 'NEGATIVE SQUARED LATIN CAPITAL LETTER E', 127349: 'NEGATIVE SQUARED LATIN CAPITAL LETTER F', 127350: 'NEGATIVE SQUARED LATIN CAPITAL LETTER G', 127351: 'NEGATIVE SQUARED LATIN CAPITAL LETTER H', 127352: 'NEGATIVE SQUARED LATIN CAPITAL LETTER I', 127354: 'NEGATIVE SQUARED LATIN CAPITAL LETTER K', 127357: 'NEGATIVE SQUARED LATIN CAPITAL LETTER N', 127358: 'NEGATIVE SQUARED LATIN CAPITAL LETTER O', 127360: 'NEGATIVE SQUARED LATIN CAPITAL LETTER Q', 127361: 'NEGATIVE SQUARED LATIN CAPITAL LETTER R', 127362: 'NEGATIVE SQUARED LATIN CAPITAL LETTER S', 127363: 'NEGATIVE SQUARED LATIN CAPITAL LETTER T', 127364: 'NEGATIVE SQUARED LATIN CAPITAL LETTER U', 127365: 'NEGATIVE SQUARED LATIN CAPITAL LETTER V', 127366: 'NEGATIVE SQUARED LATIN CAPITAL LETTER W', 127367: 'NEGATIVE SQUARED LATIN CAPITAL LETTER X', 127368: 'NEGATIVE SQUARED LATIN CAPITAL LETTER Y', 127369: 'NEGATIVE SQUARED LATIN CAPITAL LETTER Z', 127375: 'NEGATIVE SQUARED WC', 128528: 'NEUTRAL FACE', 127761: 'NEW MOON SYMBOL', 127770: 'NEW MOON WITH FACE', 128240: 'NEWSPAPER', 127747: 'NIGHT WITH STARS', 128691: 'NO BICYCLES', 128683: 'NO ENTRY SIGN', 128245: 'NO MOBILE PHONES', 128286: 'NO ONE UNDER EIGHTEEN SYMBOL', 128695: 'NO PEDESTRIANS', 128685: 'NO SMOKING SYMBOL', 128689: 'NON-POTABLE WATER SYMBOL', 128067: 'NOSE', 128211: 'NOTEBOOK', 128212: 'NOTEBOOK WITH DECORATIVE COVER', 128297: 'NUT AND BOLT', 128025: 'OCTOPUS', 127842: 'ODEN', 127970: 'OFFICE BUILDING', 128076: 'OK HAND SIGN', 128116: 'OLDER MAN', 128117: 'OLDER WOMAN', 128283: 'ON WITH EXCLAMATION MARK WITH LEFT RIGHT ARROW ABOVE', 128664: 'ONCOMING AUTOMOBILE', 128653: 'ONCOMING BUS', 128660: 'ONCOMING POLICE CAR', 128662: 'ONCOMING TAXI', 128214: 'OPEN BOOK', 128194: 'OPEN FILE FOLDER', 128080: 'OPEN HANDS SIGN', 128275: 'OPEN LOCK', 128237: 'OPEN MAILBOX WITH LOWERED FLAG', 128236: 'OPEN MAILBOX WITH RAISED FLAG', 9934: 'OPHIUCHUS', 128191: 'OPTICAL DISC', 128217: 'ORANGE BOOK', 2934: 'ORIYA FRACTION ONE EIGHTH', 2931: 'ORIYA FRACTION ONE HALF', 2930: 'ORIYA FRACTION ONE QUARTER', 2933: 'ORIYA FRACTION ONE SIXTEENTH', 2932: 'ORIYA FRACTION THREE QUARTERS', 2935: 'ORIYA FRACTION THREE SIXTEENTHS', 128228: 'OUTBOX TRAY', 128002: 'OX', 128230: 'PACKAGE', 128196: 'PAGE FACING UP', 128195: 'PAGE WITH CURL', 128223: 'PAGER', 127796: 'PALM TREE', 128060: 'PANDA FACE', 128206: 'PAPERCLIP', 127881: 'PARTY POPPER', 128706: 'PASSPORT CONTROL', 128062: 'PAW PRINTS', 127825: 'PEACH', 127824: 'PEAR', 128694: 'PEDESTRIAN', 128039: 'PENGUIN', 128532: 'PENSIVE FACE', 9956: 'PENTAGRAM', 127917: 'PERFORMING ARTS', 128547: 'PERSEVERING FACE', 128583: 'PERSON BOWING DEEPLY', 128589: 'PERSON FROWNING', 128588: 'PERSON RAISING BOTH HANDS IN CELEBRATION', 128113: 'PERSON WITH BLOND HAIR', 128591: 'PERSON WITH FOLDED HANDS', 128590: 'PERSON WITH POUTING FACE', 128187: 'PERSONAL COMPUTER', 128022: 'PIG', 128055: 'PIG FACE', 128061: 'PIG NOSE', 128169: 'PILE OF POO', 128138: 'PILL', 127885: 'PINE DECORATION', 127821: 'PINEAPPLE', 128299: 'PISTOL', 127185: 'PLAYING CARD ACE OF CLUBS', 127169: 'PLAYING CARD ACE OF DIAMONDS', 127153: 'PLAYING CARD ACE OF HEARTS', 127137: 'PLAYING CARD ACE OF SPADES', 127136: 'PLAYING CARD BACK', 127183: 'PLAYING CARD BLACK JOKER', 127192: 'PLAYING CARD EIGHT OF CLUBS', 127176: 'PLAYING CARD EIGHT OF DIAMONDS', 127160: 'PLAYING CARD EIGHT OF HEARTS', 127144: 'PLAYING CARD EIGHT OF SPADES', 127189: 'PLAYING CARD FIVE OF CLUBS', 127173: 'PLAYING CARD FIVE OF DIAMONDS', 127157: 'PLAYING CARD FIVE OF HEARTS', 127141: 'PLAYING CARD FIVE OF SPADES', 127188: 'PLAYING CARD FOUR OF CLUBS', 127172: 'PLAYING CARD FOUR OF DIAMONDS', 127156: 'PLAYING CARD FOUR OF HEARTS', 127140: 'PLAYING CARD FOUR OF SPADES', 127195: 'PLAYING CARD JACK OF CLUBS', 127179: 'PLAYING CARD JACK OF DIAMONDS', 127163: 'PLAYING CARD JACK OF HEARTS', 127147: 'PLAYING CARD JACK OF SPADES', 127198: 'PLAYING CARD KING OF CLUBS', 127182: 'PLAYING CARD KING OF DIAMONDS', 127166: 'PLAYING CARD KING OF HEARTS', 127150: 'PLAYING CARD KING OF SPADES', 127196: 'PLAYING CARD KNIGHT OF CLUBS', 127180: 'PLAYING CARD KNIGHT OF DIAMONDS', 127164: 'PLAYING CARD KNIGHT OF HEARTS', 127148: 'PLAYING CARD KNIGHT OF SPADES', 127193: 'PLAYING CARD NINE OF CLUBS', 127177: 'PLAYING CARD NINE OF DIAMONDS', 127161: 'PLAYING CARD NINE OF HEARTS', 127145: 'PLAYING CARD NINE OF SPADES', 127197: 'PLAYING CARD QUEEN OF CLUBS', 127181: 'PLAYING CARD QUEEN OF DIAMONDS', 127165: 'PLAYING CARD QUEEN OF HEARTS', 127149: 'PLAYING CARD QUEEN OF SPADES', 127191: 'PLAYING CARD SEVEN OF CLUBS', 127175: 'PLAYING CARD SEVEN OF DIAMONDS', 127159: 'PLAYING CARD SEVEN OF HEARTS', 127143: 'PLAYING CARD SEVEN OF SPADES', 127190: 'PLAYING CARD SIX OF CLUBS', 127174: 'PLAYING CARD SIX OF DIAMONDS', 127158: 'PLAYING CARD SIX OF HEARTS', 127142: 'PLAYING CARD SIX OF SPADES', 127194: 'PLAYING CARD TEN OF CLUBS', 127178: 'PLAYING CARD TEN OF DIAMONDS', 127162: 'PLAYING CARD TEN OF HEARTS', 127146: 'PLAYING CARD TEN OF SPADES', 127187: 'PLAYING CARD THREE OF CLUBS', 127171: 'PLAYING CARD THREE OF DIAMONDS', 127155: 'PLAYING CARD THREE OF HEARTS', 127139: 'PLAYING CARD THREE OF SPADES', 127186: 'PLAYING CARD TWO OF CLUBS', 127170: 'PLAYING CARD TWO OF DIAMONDS', 127154: 'PLAYING CARD TWO OF HEARTS', 127138: 'PLAYING CARD TWO OF SPADES', 127199: 'PLAYING CARD WHITE JOKER', 128659: 'POLICE CAR', 128680: 'POLICE CARS REVOLVING LIGHT', 128110: 'POLICE OFFICER', 128041: 'POODLE', 128239: 'POSTAL HORN', 128238: 'POSTBOX', 127858: 'POT OF FOOD', 128688: 'POTABLE WATER SYMBOL', 128093: 'POUCH', 127831: 'POULTRY LEG', 128574: 'POUTING CAT FACE', 128545: 'POUTING FACE', 128120: 'PRINCESS', 128226: 'PUBLIC ADDRESS LOUDSPEAKER', 128156: 'PURPLE HEART', 128091: 'PURSE', 128204: 'PUSHPIN', 128686: 'PUT LITTER IN ITS PLACE SYMBOL', 128007: 'RABBIT', 128048: 'RABBIT FACE', 128251: 'RADIO', 128280: 'RADIO BUTTON', 128643: 'RAILWAY CAR', 127752: 'RAINBOW', 9994: 'RAISED FIST', 9995: 'RAISED HAND', 128015: 'RAM', 128000: 'RAT', 128665: 'RECREATIONAL VEHICLE', 127822: 'RED APPLE', 127462: 'REGIONAL INDICATOR SYMBOL LETTER A', 127463: 'REGIONAL INDICATOR SYMBOL LETTER B', 127464: 'REGIONAL INDICATOR SYMBOL LETTER C', 127465: 'REGIONAL INDICATOR SYMBOL LETTER D', 127466: 'REGIONAL INDICATOR SYMBOL LETTER E', 127467: 'REGIONAL INDICATOR SYMBOL LETTER F', 127468: 'REGIONAL INDICATOR SYMBOL LETTER G', 127469: 'REGIONAL INDICATOR SYMBOL LETTER H', 127470: 'REGIONAL INDICATOR SYMBOL LETTER I', 127471: 'REGIONAL INDICATOR SYMBOL LETTER J', 127472: 'REGIONAL INDICATOR SYMBOL LETTER K', 127473: 'REGIONAL INDICATOR SYMBOL LETTER L', 127474: 'REGIONAL INDICATOR SYMBOL LETTER M', 127475: 'REGIONAL INDICATOR SYMBOL LETTER N', 127476: 'REGIONAL INDICATOR SYMBOL LETTER O', 127477: 'REGIONAL INDICATOR SYMBOL LETTER P', 127478: 'REGIONAL INDICATOR SYMBOL LETTER Q', 127479: 'REGIONAL INDICATOR SYMBOL LETTER R', 127480: 'REGIONAL INDICATOR SYMBOL LETTER S', 127481: 'REGIONAL INDICATOR SYMBOL LETTER T', 127482: 'REGIONAL INDICATOR SYMBOL LETTER U', 127483: 'REGIONAL INDICATOR SYMBOL LETTER V', 127484: 'REGIONAL INDICATOR SYMBOL LETTER W', 127485: 'REGIONAL INDICATOR SYMBOL LETTER X', 127486: 'REGIONAL INDICATOR SYMBOL LETTER Y', 127487: 'REGIONAL INDICATOR SYMBOL LETTER Z', 128524: 'RELIEVED FACE', 128699: 'RESTROOM', 128158: 'REVOLVING HEARTS', 127872: 'RIBBON', 127833: 'RICE BALL', 127832: 'RICE CRACKER', 9957: 'RIGHT-HANDED INTERLACED PENTAGRAM', 128270: 'RIGHT-POINTING MAGNIFYING GLASS', 128141: 'RING', 127840: 'ROASTED SWEET POTATO', 128640: 'ROCKET', 127906: 'ROLLER COASTER', 128019: 'ROOSTER', 127801: 'ROSE', 128205: 'ROUND PUSHPIN', 128675: 'ROWBOAT', 127945: 'RUGBY FOOTBALL', 127939: 'RUNNER', 127933: 'RUNNING SHIRT WITH SASH', 127862: 'SAKE BOTTLE AND CUP', 128225: 'SATELLITE ANTENNA', 127927: 'SAXOPHONE', 127979: 'SCHOOL', 127890: 'SCHOOL SATCHEL', 128220: 'SCROLL', 128186: 'SEAT', 128584: 'SEE-NO-EVIL MONKEY', 127793: 'SEEDLING', 127847: 'SHAVED ICE', 128017: 'SHEEP', 128674: 'SHIP', 127776: 'SHOOTING STAR', 127856: 'SHORTCAKE', 128703: 'SHOWER', 128510: 'SILHOUETTE OF JAPAN', 128303: 'SIX POINTED STAR WITH MIDDLE DOT', 127935: 'SKI AND SKI BOOT', 128128: 'SKULL', 128164: 'SLEEPING SYMBOL', 128554: 'SLEEPY FACE', 127829: 'SLICE OF PIZZA', 127920: 'SLOT MACHINE', 128313: 'SMALL BLUE DIAMOND', 128312: 'SMALL ORANGE DIAMOND', 128571: 'SMILING CAT FACE WITH HEART-SHAPED EYES', 128570: 'SMILING CAT FACE WITH OPEN MOUTH', 128519: 'SMILING FACE WITH HALO', 128525: 'SMILING FACE WITH HEART-SHAPED EYES', 128520: 'SMILING FACE WITH HORNS', 128515: 'SMILING FACE WITH OPEN MOUTH', 128517: 'SMILING FACE WITH OPEN MOUTH AND COLD SWEAT', 128516: 'SMILING FACE WITH OPEN MOUTH AND SMILING EYES', 128518: 'SMILING FACE WITH OPEN MOUTH AND TIGHTLY-CLOSED EYES', 128522: 'SMILING FACE WITH SMILING EYES', 128526: 'SMILING FACE WITH SUNGLASSES', 128527: 'SMIRKING FACE', 128684: 'SMOKING SYMBOL', 128012: 'SNAIL', 128013: 'SNAKE', 127938: 'SNOWBOARDER', 127846: 'SOFT ICE CREAM', 128284: 'SOON WITH RIGHTWARDS ARROW ABOVE', 127837: 'SPAGHETTI', 10024: 'SPARKLES', 128150: 'SPARKLING HEART', 128586: 'SPEAK-NO-EVIL MONKEY', 128264: 'SPEAKER', 128263: 'SPEAKER WITH CANCELLATION STROKE', 128265: 'SPEAKER WITH ONE SOUND WAVE', 128266: 'SPEAKER WITH THREE SOUND WAVES', 128172: 'SPEECH BALLOON', 128676: 'SPEEDBOAT', 128026: 'SPIRAL SHELL', 128166: 'SPLASHING SWEAT SYMBOL', 128051: 'SPOUTING WHALE', 127545: 'SQUARED CJK UNIFIED IDEOGRAPH-5272', 127540: 'SQUARED CJK UNIFIED IDEOGRAPH-5408', 127546: 'SQUARED CJK UNIFIED IDEOGRAPH-55B6', 127543: 'SQUARED CJK UNIFIED IDEOGRAPH-6708', 127542: 'SQUARED CJK UNIFIED IDEOGRAPH-6709', 127541: 'SQUARED CJK UNIFIED IDEOGRAPH-6E80', 127544: 'SQUARED CJK UNIFIED IDEOGRAPH-7533', 127538: 'SQUARED CJK UNIFIED IDEOGRAPH-7981', 127539: 'SQUARED CJK UNIFIED IDEOGRAPH-7A7A', 127377: 'SQUARED CL', 127378: 'SQUARED COOL', 127379: 'SQUARED FREE', 127380: 'SQUARED ID', 127489: 'SQUARED KATAKANA KOKO', 127490: 'SQUARED KATAKANA SA', 127280: 'SQUARED LATIN CAPITAL LETTER A', 127282: 'SQUARED LATIN CAPITAL LETTER C', 127283: 'SQUARED LATIN CAPITAL LETTER D', 127284: 'SQUARED LATIN CAPITAL LETTER E', 127285: 'SQUARED LATIN CAPITAL LETTER F', 127286: 'SQUARED LATIN CAPITAL LETTER G', 127287: 'SQUARED LATIN CAPITAL LETTER H', 127288: 'SQUARED LATIN CAPITAL LETTER I', 127289: 'SQUARED LATIN CAPITAL LETTER J', 127290: 'SQUARED LATIN CAPITAL LETTER K', 127291: 'SQUARED LATIN CAPITAL LETTER L', 127292: 'SQUARED LATIN CAPITAL LETTER M', 127294: 'SQUARED LATIN CAPITAL LETTER O', 127296: 'SQUARED LATIN CAPITAL LETTER Q', 127297: 'SQUARED LATIN CAPITAL LETTER R', 127299: 'SQUARED LATIN CAPITAL LETTER T', 127300: 'SQUARED LATIN CAPITAL LETTER U', 127301: 'SQUARED LATIN CAPITAL LETTER V', 127303: 'SQUARED LATIN CAPITAL LETTER X', 127304: 'SQUARED LATIN CAPITAL LETTER Y', 127305: 'SQUARED LATIN CAPITAL LETTER Z', 10190: 'SQUARED LOGICAL AND', 10191: 'SQUARED LOGICAL OR', 127381: 'SQUARED NEW', 127382: 'SQUARED NG', 127383: 'SQUARED OK', 127384: 'SQUARED SOS', 127385: 'SQUARED UP WITH EXCLAMATION MARK', 127386: 'SQUARED VS', 127311: 'SQUARED WC', 128649: 'STATION', 128509: 'STATUE OF LIBERTY', 128642: 'STEAM LOCOMOTIVE', 127836: 'STEAMING BOWL', 9201: 'STOPWATCH', 128207: 'STRAIGHT RULER', 127827: 'STRAWBERRY', 127774: 'SUN WITH FACE', 127803: 'SUNFLOWER', 127749: 'SUNRISE', 127748: 'SUNRISE OVER MOUNTAINS', 127751: 'SUNSET OVER BUILDINGS', 127940: 'SURFER', 127843: 'SUSHI', 128671: 'SUSPENSION RAILWAY', 127946: 'SWIMMER', 128137: 'SYRINGE', 128085: 'T-SHIRT', 983624: 'TAMIL CONSONANT C', 983644: 'TAMIL CONSONANT H', 983640: 'TAMIL CONSONANT J', 983622: 'TAMIL CONSONANT K', 983645: 'TAMIL CONSONANT KSS', 983634: 'TAMIL CONSONANT L', 983637: 'TAMIL CONSONANT LL', 983636: 'TAMIL CONSONANT LLL', 983631: 'TAMIL CONSONANT M', 983629: 'TAMIL CONSONANT N', 983623: 'TAMIL CONSONANT NG', 983627: 'TAMIL CONSONANT NN', 983639: 'TAMIL CONSONANT NNN', 983625: 'TAMIL CONSONANT NY', 983630: 'TAMIL CONSONANT P', 983633: 'TAMIL CONSONANT R', 983638: 'TAMIL CONSONANT RR', 983643: 'TAMIL CONSONANT S', 983641: 'TAMIL CONSONANT SH', 983642: 'TAMIL CONSONANT SS', 983628: 'TAMIL CONSONANT T', 983626: 'TAMIL CONSONANT TT', 983635: 'TAMIL CONSONANT V', 983632: 'TAMIL CONSONANT Y', 983668: 'TAMIL SYLLABLE CAA', 983675: 'TAMIL SYLLABLE CAI', 983678: 'TAMIL SYLLABLE CAU', 983673: 'TAMIL SYLLABLE CE', 983674: 'TAMIL SYLLABLE CEE', 983669: 'TAMIL SYLLABLE CI', 983670: 'TAMIL SYLLABLE CII', 983676: 'TAMIL SYLLABLE CO', 983677: 'TAMIL SYLLABLE COO', 983671: 'TAMIL SYLLABLE CU', 983672: 'TAMIL SYLLABLE CUU', 983888: 'TAMIL SYLLABLE HAA', 983895: 'TAMIL SYLLABLE HAI', 983898: 'TAMIL SYLLABLE HAU', 983893: 'TAMIL SYLLABLE HE', 983894: 'TAMIL SYLLABLE HEE', 983889: 'TAMIL SYLLABLE HI', 983890: 'TAMIL SYLLABLE HII', 983896: 'TAMIL SYLLABLE HO', 983897: 'TAMIL SYLLABLE HOO', 983891: 'TAMIL SYLLABLE HU', 983892: 'TAMIL SYLLABLE HUU', 983844: 'TAMIL SYLLABLE JAA', 983851: 'TAMIL SYLLABLE JAI', 983854: 'TAMIL SYLLABLE JAU', 983849: 'TAMIL SYLLABLE JE', 983850: 'TAMIL SYLLABLE JEE', 983845: 'TAMIL SYLLABLE JI', 983846: 'TAMIL SYLLABLE JII', 983852: 'TAMIL SYLLABLE JO', 983853: 'TAMIL SYLLABLE JOO', 983847: 'TAMIL SYLLABLE JU', 983848: 'TAMIL SYLLABLE JUU', 983646: 'TAMIL SYLLABLE KAA', 983653: 'TAMIL SYLLABLE KAI', 983656: 'TAMIL SYLLABLE KAU', 983651: 'TAMIL SYLLABLE KE', 983652: 'TAMIL SYLLABLE KEE', 983647: 'TAMIL SYLLABLE KI', 983648: 'TAMIL SYLLABLE KII', 983654: 'TAMIL SYLLABLE KO', 983655: 'TAMIL SYLLABLE KOO', 983899: 'TAMIL SYLLABLE KSSA', 983900: 'TAMIL SYLLABLE KSSAA', 983907: 'TAMIL SYLLABLE KSSAI', 983910: 'TAMIL SYLLABLE KSSAU', 983905: 'TAMIL SYLLABLE KSSE', 983906: 'TAMIL SYLLABLE KSSEE', 983901: 'TAMIL SYLLABLE KSSI', 983902: 'TAMIL SYLLABLE KSSII', 983908: 'TAMIL SYLLABLE KSSO', 983909: 'TAMIL SYLLABLE KSSOO', 983903: 'TAMIL SYLLABLE KSSU', 983904: 'TAMIL SYLLABLE KSSUU', 983649: 'TAMIL SYLLABLE KU', 983650: 'TAMIL SYLLABLE KUU', 983778: 'TAMIL SYLLABLE LAA', 983785: 'TAMIL SYLLABLE LAI', 983788: 'TAMIL SYLLABLE LAU', 983783: 'TAMIL SYLLABLE LE', 983784: 'TAMIL SYLLABLE LEE', 983779: 'TAMIL SYLLABLE LI', 983780: 'TAMIL SYLLABLE LII', 983811: 'TAMIL SYLLABLE LLAA', 983818: 'TAMIL SYLLABLE LLAI', 983821: 'TAMIL SYLLABLE LLAU', 983816: 'TAMIL SYLLABLE LLE', 983817: 'TAMIL SYLLABLE LLEE', 983812: 'TAMIL SYLLABLE LLI', 983813: 'TAMIL SYLLABLE LLII', 983800: 'TAMIL SYLLABLE LLLAA', 983807: 'TAMIL SYLLABLE LLLAI', 983810: 'TAMIL SYLLABLE LLLAU', 983805: 'TAMIL SYLLABLE LLLE', 983806: 'TAMIL SYLLABLE LLLEE', 983801: 'TAMIL SYLLABLE LLLI', 983802: 'TAMIL SYLLABLE LLLII', 983808: 'TAMIL SYLLABLE LLLO', 983809: 'TAMIL SYLLABLE LLLOO', 983803: 'TAMIL SYLLABLE LLLU', 983804: 'TAMIL SYLLABLE LLLUU', 983819: 'TAMIL SYLLABLE LLO', 983820: 'TAMIL SYLLABLE LLOO', 983814: 'TAMIL SYLLABLE LLU', 983815: 'TAMIL SYLLABLE LLUU', 983786: 'TAMIL SYLLABLE LO', 983787: 'TAMIL SYLLABLE LOO', 983781: 'TAMIL SYLLABLE LU', 983782: 'TAMIL SYLLABLE LUU', 983745: 'TAMIL SYLLABLE MAA', 983752: 'TAMIL SYLLABLE MAI', 983755: 'TAMIL SYLLABLE MAU', 983750: 'TAMIL SYLLABLE ME', 983751: 'TAMIL SYLLABLE MEE', 983746: 'TAMIL SYLLABLE MI', 983747: 'TAMIL SYLLABLE MII', 983753: 'TAMIL SYLLABLE MO', 983754: 'TAMIL SYLLABLE MOO', 983748: 'TAMIL SYLLABLE MU', 983749: 'TAMIL SYLLABLE MUU', 983723: 'TAMIL SYLLABLE NAA', 983730: 'TAMIL SYLLABLE NAI', 983733: 'TAMIL SYLLABLE NAU', 983728: 'TAMIL SYLLABLE NE', 983729: 'TAMIL SYLLABLE NEE', 983657: 'TAMIL SYLLABLE NGAA', 983664: 'TAMIL SYLLABLE NGAI', 983667: 'TAMIL SYLLABLE NGAU', 983662: 'TAMIL SYLLABLE NGE', 983663: 'TAMIL SYLLABLE NGEE', 983658: 'TAMIL SYLLABLE NGI', 983659: 'TAMIL SYLLABLE NGII', 983665: 'TAMIL SYLLABLE NGO', 983666: 'TAMIL SYLLABLE NGOO', 983660: 'TAMIL SYLLABLE NGU', 983661: 'TAMIL SYLLABLE NGUU', 983724: 'TAMIL SYLLABLE NI', 983725: 'TAMIL SYLLABLE NII', 983701: 'TAMIL SYLLABLE NNAA', 983708: 'TAMIL SYLLABLE NNAI', 983711: 'TAMIL SYLLABLE NNAU', 983706: 'TAMIL SYLLABLE NNE', 983707: 'TAMIL SYLLABLE NNEE', 983702: 'TAMIL SYLLABLE NNI', 983703: 'TAMIL SYLLABLE NNII', 983833: 'TAMIL SYLLABLE NNNAA', 983840: 'TAMIL SYLLABLE NNNAI', 983843: 'TAMIL SYLLABLE NNNAU', 983838: 'TAMIL SYLLABLE NNNE', 983839: 'TAMIL SYLLABLE NNNEE', 983834: 'TAMIL SYLLABLE NNNI', 983835: 'TAMIL SYLLABLE NNNII', 983841: 'TAMIL SYLLABLE NNNO', 983842: 'TAMIL SYLLABLE NNNOO', 983836: 'TAMIL SYLLABLE NNNU', 983837: 'TAMIL SYLLABLE NNNUU', 983709: 'TAMIL SYLLABLE NNO', 983710: 'TAMIL SYLLABLE NNOO', 983704: 'TAMIL SYLLABLE NNU', 983705: 'TAMIL SYLLABLE NNUU', 983731: 'TAMIL SYLLABLE NO', 983732: 'TAMIL SYLLABLE NOO', 983726: 'TAMIL SYLLABLE NU', 983727: 'TAMIL SYLLABLE NUU', 983679: 'TAMIL SYLLABLE NYAA', 983686: 'TAMIL SYLLABLE NYAI', 983689: 'TAMIL SYLLABLE NYAU', 983684: 'TAMIL SYLLABLE NYE', 983685: 'TAMIL SYLLABLE NYEE', 983680: 'TAMIL SYLLABLE NYI', 983681: 'TAMIL SYLLABLE NYII', 983687: 'TAMIL SYLLABLE NYO', 983688: 'TAMIL SYLLABLE NYOO', 983682: 'TAMIL SYLLABLE NYU', 983683: 'TAMIL SYLLABLE NYUU', 983734: 'TAMIL SYLLABLE PAA', 983741: 'TAMIL SYLLABLE PAI', 983744: 'TAMIL SYLLABLE PAU', 983739: 'TAMIL SYLLABLE PE', 983740: 'TAMIL SYLLABLE PEE', 983735: 'TAMIL SYLLABLE PI', 983736: 'TAMIL SYLLABLE PII', 983742: 'TAMIL SYLLABLE PO', 983743: 'TAMIL SYLLABLE POO', 983737: 'TAMIL SYLLABLE PU', 983738: 'TAMIL SYLLABLE PUU', 983767: 'TAMIL SYLLABLE RAA', 983774: 'TAMIL SYLLABLE RAI', 983777: 'TAMIL SYLLABLE RAU', 983772: 'TAMIL SYLLABLE RE', 983773: 'TAMIL SYLLABLE REE', 983768: 'TAMIL SYLLABLE RI', 983769: 'TAMIL SYLLABLE RII', 983775: 'TAMIL SYLLABLE RO', 983776: 'TAMIL SYLLABLE ROO', 983822: 'TAMIL SYLLABLE RRAA', 983829: 'TAMIL SYLLABLE RRAI', 983832: 'TAMIL SYLLABLE RRAU', 983827: 'TAMIL SYLLABLE RRE', 983828: 'TAMIL SYLLABLE RREE', 983823: 'TAMIL SYLLABLE RRI', 983824: 'TAMIL SYLLABLE RRII', 983830: 'TAMIL SYLLABLE RRO', 983831: 'TAMIL SYLLABLE RROO', 983825: 'TAMIL SYLLABLE RRU', 983826: 'TAMIL SYLLABLE RRUU', 983770: 'TAMIL SYLLABLE RU', 983771: 'TAMIL SYLLABLE RUU', 983877: 'TAMIL SYLLABLE SAA', 983884: 'TAMIL SYLLABLE SAI', 983887: 'TAMIL SYLLABLE SAU', 983882: 'TAMIL SYLLABLE SE', 983883: 'TAMIL SYLLABLE SEE', 983855: 'TAMIL SYLLABLE SHAA', 983862: 'TAMIL SYLLABLE SHAI', 983865: 'TAMIL SYLLABLE SHAU', 983860: 'TAMIL SYLLABLE SHE', 983861: 'TAMIL SYLLABLE SHEE', 983856: 'TAMIL SYLLABLE SHI', 983857: 'TAMIL SYLLABLE SHII', 983863: 'TAMIL SYLLABLE SHO', 983864: 'TAMIL SYLLABLE SHOO', 983911: 'TAMIL SYLLABLE SHRII', 983858: 'TAMIL SYLLABLE SHU', 983859: 'TAMIL SYLLABLE SHUU', 983878: 'TAMIL SYLLABLE SI', 983879: 'TAMIL SYLLABLE SII', 983885: 'TAMIL SYLLABLE SO', 983886: 'TAMIL SYLLABLE SOO', 983866: 'TAMIL SYLLABLE SSAA', 983873: 'TAMIL SYLLABLE SSAI', 983876: 'TAMIL SYLLABLE SSAU', 983871: 'TAMIL SYLLABLE SSE', 983872: 'TAMIL SYLLABLE SSEE', 983867: 'TAMIL SYLLABLE SSI', 983868: 'TAMIL SYLLABLE SSII', 983874: 'TAMIL SYLLABLE SSO', 983875: 'TAMIL SYLLABLE SSOO', 983869: 'TAMIL SYLLABLE SSU', 983870: 'TAMIL SYLLABLE SSUU', 983880: 'TAMIL SYLLABLE SU', 983881: 'TAMIL SYLLABLE SUU', 983712: 'TAMIL SYLLABLE TAA', 983719: 'TAMIL SYLLABLE TAI', 983722: 'TAMIL SYLLABLE TAU', 983717: 'TAMIL SYLLABLE TE', 983718: 'TAMIL SYLLABLE TEE', 983713: 'TAMIL SYLLABLE TI', 983714: 'TAMIL SYLLABLE TII', 983720: 'TAMIL SYLLABLE TO', 983721: 'TAMIL SYLLABLE TOO', 983690: 'TAMIL SYLLABLE TTAA', 983697: 'TAMIL SYLLABLE TTAI', 983700: 'TAMIL SYLLABLE TTAU', 983695: 'TAMIL SYLLABLE TTE', 983696: 'TAMIL SYLLABLE TTEE', 983691: 'TAMIL SYLLABLE TTI', 983692: 'TAMIL SYLLABLE TTII', 983698: 'TAMIL SYLLABLE TTO', 983699: 'TAMIL SYLLABLE TTOO', 983693: 'TAMIL SYLLABLE TTU', 983694: 'TAMIL SYLLABLE TTUU', 983715: 'TAMIL SYLLABLE TU', 983716: 'TAMIL SYLLABLE TUU', 983789: 'TAMIL SYLLABLE VAA', 983796: 'TAMIL SYLLABLE VAI', 983799: 'TAMIL SYLLABLE VAU', 983794: 'TAMIL SYLLABLE VE', 983795: 'TAMIL SYLLABLE VEE', 983790: 'TAMIL SYLLABLE VI', 983791: 'TAMIL SYLLABLE VII', 983797: 'TAMIL SYLLABLE VO', 983798: 'TAMIL SYLLABLE VOO', 983792: 'TAMIL SYLLABLE VU', 983793: 'TAMIL SYLLABLE VUU', 983756: 'TAMIL SYLLABLE YAA', 983763: 'TAMIL SYLLABLE YAI', 983766: 'TAMIL SYLLABLE YAU', 983761: 'TAMIL SYLLABLE YE', 983762: 'TAMIL SYLLABLE YEE', 983757: 'TAMIL SYLLABLE YI', 983758: 'TAMIL SYLLABLE YII', 983764: 'TAMIL SYLLABLE YO', 983765: 'TAMIL SYLLABLE YOO', 983759: 'TAMIL SYLLABLE YU', 983760: 'TAMIL SYLLABLE YUU', 127883: 'TANABATA TREE', 127818: 'TANGERINE', 128661: 'TAXI', 127861: 'TEACUP WITHOUT HANDLE', 128198: 'TEAR-OFF CALENDAR', 128222: 'TELEPHONE RECEIVER', 128301: 'TELESCOPE', 128250: 'TELEVISION', 127934: 'TENNIS RACQUET AND BALL', 128173: 'THOUGHT BALLOON', 128078: 'THUMBS DOWN SIGN', 128077: 'THUMBS UP SIGN', 4057: 'TIBETAN MARK LEADING MCHAN RTAGS', 4058: 'TIBETAN MARK TRAILING MCHAN RTAGS', 3980: 'TIBETAN SIGN INVERTED MCHU CAN', 3983: 'TIBETAN SUBJOINED SIGN INVERTED MCHU CAN', 3981: 'TIBETAN SUBJOINED SIGN LCE TSA CAN', 3982: 'TIBETAN SUBJOINED SIGN MCHU CAN', 127915: 'TICKET', 11647: 'TIFINAGH CONSONANT JOINER', 11632: 'TIFINAGH SEPARATOR MARK', 128005: 'TIGER', 128047: 'TIGER FACE', 9202: 'TIMER CLOCK', 128555: 'TIRED FACE', 128701: 'TOILET', 128508: 'TOKYO TOWER', 127813: 'TOMATO', 128069: 'TONGUE', 127913: 'TOP HAT', 128285: 'TOP WITH UPWARDS ARROW ABOVE', 128668: 'TRACTOR', 128646: 'TRAIN', 128650: 'TRAM', 128651: 'TRAM CAR', 128681: 'TRIANGULAR FLAG ON POST', 128208: 'TRIANGULAR RULER', 128305: 'TRIDENT EMBLEM', 128654: 'TROLLEYBUS', 127942: 'TROPHY', 127865: 'TROPICAL DRINK', 128032: 'TROPICAL FISH', 127930: 'TRUMPET', 127799: 'TULIP', 128034: 'TURTLE', 128256: 'TWISTED RIGHTWARDS ARROWS', 128149: 'TWO HEARTS', 128108: 'TWO MEN HOLDING HANDS', 128109: 'TWO WOMEN HOLDING HANDS', 128530: 'UNAMUSED FACE', 128314: 'UP-POINTING RED TRIANGLE', 128316: 'UP-POINTING SMALL RED TRIANGLE', 128678: 'VERTICAL TRAFFIC LIGHT', 128243: 'VIBRATION MODE', 128249: 'VIDEO CAMERA', 127918: 'VIDEO GAME', 128252: 'VIDEOCASSETTE', 127931: 'VIOLIN', 127755: 'VOLCANO', 127768: 'WANING CRESCENT MOON SYMBOL', 127766: 'WANING GIBBOUS MOON SYMBOL', 128003: 'WATER BUFFALO', 128702: 'WATER CLOSET', 127754: 'WATER WAVE', 127817: 'WATERMELON', 128075: 'WAVING HAND SIGN', 127762: 'WAXING CRESCENT MOON SYMBOL', 127764: 'WAXING GIBBOUS MOON SYMBOL', 128576: 'WEARY CAT FACE', 128553: 'WEARY FACE', 128146: 'WEDDING', 128011: 'WHALE', 128071: 'WHITE DOWN POINTING BACKHAND INDEX', 10069: 'WHITE EXCLAMATION MARK ORNAMENT', 128174: 'WHITE FLOWER', 9989: 'WHITE HEAVY CHECK MARK', 128072: 'WHITE LEFT POINTING BACKHAND INDEX', 10068: 'WHITE QUESTION MARK ORNAMENT', 128073: 'WHITE RIGHT POINTING BACKHAND INDEX', 128307: 'WHITE SQUARE BUTTON', 128070: 'WHITE UP POINTING BACKHAND INDEX', 127888: 'WIND CHIME', 127863: 'WINE GLASS', 128521: 'WINKING FACE', 128058: 'WOLF FACE', 128105: 'WOMAN', 128111: 'WOMAN WITH BUNNY EARS', 128098: 'WOMANS BOOTS', 128090: 'WOMANS CLOTHES', 128082: 'WOMANS HAT', 128097: 'WOMANS SANDAL', 128698: 'WOMENS SYMBOL', 127873: 'WRAPPED PRESENT', 128295: 'WRENCH', 128155: 'YELLOW HEART', } _names_corrected = { } _code_by_name = { 'AERIAL TRAMWAY': 128673, 'ALARM CLOCK': 9200, 'ALCHEMICAL SYMBOL FOR AIR': 128769, 'ALCHEMICAL SYMBOL FOR ALEMBIC': 128874, 'ALCHEMICAL SYMBOL FOR ALKALI': 128822, 'ALCHEMICAL SYMBOL FOR ALKALI-2': 128823, 'ALCHEMICAL SYMBOL FOR ALUM': 128837, 'ALCHEMICAL SYMBOL FOR AMALGAM': 128859, 'ALCHEMICAL SYMBOL FOR ANTIMONY ORE': 128811, 'ALCHEMICAL SYMBOL FOR AQUA REGIA': 128774, 'ALCHEMICAL SYMBOL FOR AQUA REGIA-2': 128775, 'ALCHEMICAL SYMBOL FOR AQUA VITAE': 128776, 'ALCHEMICAL SYMBOL FOR AQUA VITAE-2': 128777, 'ALCHEMICAL SYMBOL FOR AQUAFORTIS': 128773, 'ALCHEMICAL SYMBOL FOR ARSENIC': 128826, 'ALCHEMICAL SYMBOL FOR ASHES': 128855, 'ALCHEMICAL SYMBOL FOR AURIPIGMENT': 128829, 'ALCHEMICAL SYMBOL FOR BATH OF MARY': 128875, 'ALCHEMICAL SYMBOL FOR BATH OF VAPOURS': 128876, 'ALCHEMICAL SYMBOL FOR BISMUTH ORE': 128830, 'ALCHEMICAL SYMBOL FOR BLACK SULFUR': 128783, 'ALCHEMICAL SYMBOL FOR BORAX': 128834, 'ALCHEMICAL SYMBOL FOR BORAX-2': 128835, 'ALCHEMICAL SYMBOL FOR BORAX-3': 128836, 'ALCHEMICAL SYMBOL FOR BRICK': 128857, 'ALCHEMICAL SYMBOL FOR CADUCEUS': 128848, 'ALCHEMICAL SYMBOL FOR CALX': 128844, 'ALCHEMICAL SYMBOL FOR CAPUT MORTUUM': 128846, 'ALCHEMICAL SYMBOL FOR CINNABAR': 128787, 'ALCHEMICAL SYMBOL FOR COPPER ANTIMONIATE': 128805, 'ALCHEMICAL SYMBOL FOR COPPER ORE': 128800, 'ALCHEMICAL SYMBOL FOR CROCUS OF COPPER': 128803, 'ALCHEMICAL SYMBOL FOR CROCUS OF COPPER-2': 128804, 'ALCHEMICAL SYMBOL FOR CROCUS OF IRON': 128798, 'ALCHEMICAL SYMBOL FOR CRUCIBLE': 128869, 'ALCHEMICAL SYMBOL FOR CRUCIBLE-2': 128870, 'ALCHEMICAL SYMBOL FOR CRUCIBLE-3': 128871, 'ALCHEMICAL SYMBOL FOR CRUCIBLE-4': 128872, 'ALCHEMICAL SYMBOL FOR CRUCIBLE-5': 128873, 'ALCHEMICAL SYMBOL FOR DAY-NIGHT': 128880, 'ALCHEMICAL SYMBOL FOR DISSOLVE': 128865, 'ALCHEMICAL SYMBOL FOR DISSOLVE-2': 128866, 'ALCHEMICAL SYMBOL FOR DISTILL': 128864, 'ALCHEMICAL SYMBOL FOR EARTH': 128771, 'ALCHEMICAL SYMBOL FOR FIRE': 128770, 'ALCHEMICAL SYMBOL FOR GOLD': 128794, 'ALCHEMICAL SYMBOL FOR GUM': 128841, 'ALCHEMICAL SYMBOL FOR HALF DRAM': 128882, 'ALCHEMICAL SYMBOL FOR HALF OUNCE': 128883, 'ALCHEMICAL SYMBOL FOR HORSE DUNG': 128854, 'ALCHEMICAL SYMBOL FOR HOUR': 128878, 'ALCHEMICAL SYMBOL FOR IRON ORE': 128796, 'ALCHEMICAL SYMBOL FOR IRON ORE-2': 128797, 'ALCHEMICAL SYMBOL FOR IRON-COPPER ORE': 128801, 'ALCHEMICAL SYMBOL FOR LEAD ORE': 128810, 'ALCHEMICAL SYMBOL FOR LODESTONE': 128851, 'ALCHEMICAL SYMBOL FOR MARCASITE': 128824, 'ALCHEMICAL SYMBOL FOR MERCURY SUBLIMATE': 128784, 'ALCHEMICAL SYMBOL FOR MERCURY SUBLIMATE-2': 128785, 'ALCHEMICAL SYMBOL FOR MERCURY SUBLIMATE-3': 128786, 'ALCHEMICAL SYMBOL FOR MONTH': 128881, 'ALCHEMICAL SYMBOL FOR NIGHT': 128879, 'ALCHEMICAL SYMBOL FOR NITRE': 128789, 'ALCHEMICAL SYMBOL FOR OIL': 128838, 'ALCHEMICAL SYMBOL FOR PHILOSOPHERS SULFUR': 128782, 'ALCHEMICAL SYMBOL FOR POT ASHES': 128856, 'ALCHEMICAL SYMBOL FOR POWDER': 128843, 'ALCHEMICAL SYMBOL FOR POWDERED BRICK': 128858, 'ALCHEMICAL SYMBOL FOR PRECIPITATE': 128863, 'ALCHEMICAL SYMBOL FOR PURIFY': 128867, 'ALCHEMICAL SYMBOL FOR PUTREFACTION': 128868, 'ALCHEMICAL SYMBOL FOR QUICK LIME': 128833, 'ALCHEMICAL SYMBOL FOR QUINTESSENCE': 128768, 'ALCHEMICAL SYMBOL FOR REALGAR': 128827, 'ALCHEMICAL SYMBOL FOR REALGAR-2': 128828, 'ALCHEMICAL SYMBOL FOR REGULUS': 128818, 'ALCHEMICAL SYMBOL FOR REGULUS OF ANTIMONY': 128816, 'ALCHEMICAL SYMBOL FOR REGULUS OF ANTIMONY-2': 128817, 'ALCHEMICAL SYMBOL FOR REGULUS OF IRON': 128799, 'ALCHEMICAL SYMBOL FOR REGULUS-2': 128819, 'ALCHEMICAL SYMBOL FOR REGULUS-3': 128820, 'ALCHEMICAL SYMBOL FOR REGULUS-4': 128821, 'ALCHEMICAL SYMBOL FOR RETORT': 128877, 'ALCHEMICAL SYMBOL FOR ROCK SALT': 128792, 'ALCHEMICAL SYMBOL FOR ROCK SALT-2': 128793, 'ALCHEMICAL SYMBOL FOR SAL-AMMONIAC': 128825, 'ALCHEMICAL SYMBOL FOR SALT': 128788, 'ALCHEMICAL SYMBOL FOR SALT OF ANTIMONY': 128813, 'ALCHEMICAL SYMBOL FOR SALT OF COPPER ANTIMONIATE': 128806, 'ALCHEMICAL SYMBOL FOR SCEPTER OF JOVE': 128847, 'ALCHEMICAL SYMBOL FOR SILVER': 128795, 'ALCHEMICAL SYMBOL FOR SOAP': 128852, 'ALCHEMICAL SYMBOL FOR SPIRIT': 128839, 'ALCHEMICAL SYMBOL FOR STARRED TRIDENT': 128850, 'ALCHEMICAL SYMBOL FOR STRATUM SUPER STRATUM': 128860, 'ALCHEMICAL SYMBOL FOR STRATUM SUPER STRATUM-2': 128861, 'ALCHEMICAL SYMBOL FOR SUBLIMATE OF ANTIMONY': 128812, 'ALCHEMICAL SYMBOL FOR SUBLIMATE OF COPPER': 128802, 'ALCHEMICAL SYMBOL FOR SUBLIMATE OF SALT OF ANTIMONY': 128814, 'ALCHEMICAL SYMBOL FOR SUBLIMATE OF SALT OF COPPER': 128807, 'ALCHEMICAL SYMBOL FOR SUBLIMATION': 128862, 'ALCHEMICAL SYMBOL FOR SULFUR': 128781, 'ALCHEMICAL SYMBOL FOR TARTAR': 128831, 'ALCHEMICAL SYMBOL FOR TARTAR-2': 128832, 'ALCHEMICAL SYMBOL FOR TIN ORE': 128809, 'ALCHEMICAL SYMBOL FOR TINCTURE': 128840, 'ALCHEMICAL SYMBOL FOR TRIDENT': 128849, 'ALCHEMICAL SYMBOL FOR TUTTY': 128845, 'ALCHEMICAL SYMBOL FOR URINE': 128853, 'ALCHEMICAL SYMBOL FOR VERDIGRIS': 128808, 'ALCHEMICAL SYMBOL FOR VINEGAR': 128778, 'ALCHEMICAL SYMBOL FOR VINEGAR OF ANTIMONY': 128815, 'ALCHEMICAL SYMBOL FOR VINEGAR-2': 128779, 'ALCHEMICAL SYMBOL FOR VINEGAR-3': 128780, 'ALCHEMICAL SYMBOL FOR VITRIOL': 128790, 'ALCHEMICAL SYMBOL FOR VITRIOL-2': 128791, 'ALCHEMICAL SYMBOL FOR WATER': 128772, 'ALCHEMICAL SYMBOL FOR WAX': 128842, 'ALIEN MONSTER': 128126, 'AMBULANCE': 128657, 'AMERICAN FOOTBALL': 127944, 'ANGER SYMBOL': 128162, 'ANGRY FACE': 128544, 'ANT': 128028, 'ANTENNA WITH BARS': 128246, 'ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS': 128260, 'ARABIC LETTER KASHMIRI YEH': 1568, 'ARABIC SYMBOL DOT ABOVE': 64434, 'ARABIC SYMBOL DOT BELOW': 64435, 'ARABIC SYMBOL DOUBLE VERTICAL BAR BELOW': 64444, 'ARABIC SYMBOL FOUR DOTS ABOVE': 64442, 'ARABIC SYMBOL FOUR DOTS BELOW': 64443, 'ARABIC SYMBOL RING': 64447, 'ARABIC SYMBOL SMALL TAH ABOVE': 64448, 'ARABIC SYMBOL SMALL TAH BELOW': 64449, 'ARABIC SYMBOL THREE DOTS ABOVE': 64438, 'ARABIC SYMBOL THREE DOTS BELOW': 64439, 'ARABIC SYMBOL THREE DOTS POINTING DOWNWARDS ABOVE': 64440, 'ARABIC SYMBOL THREE DOTS POINTING DOWNWARDS BELOW': 64441, 'ARABIC SYMBOL TWO DOTS ABOVE': 64436, 'ARABIC SYMBOL TWO DOTS BELOW': 64437, 'ARABIC SYMBOL TWO DOTS VERTICALLY ABOVE': 64445, 'ARABIC SYMBOL TWO DOTS VERTICALLY BELOW': 64446, 'ARABIC WAVY HAMZA BELOW': 1631, 'ARTICULATED LORRY': 128667, 'ARTIST PALETTE': 127912, 'ASTONISHED FACE': 128562, 'ASTRONOMICAL SYMBOL FOR URANUS': 9954, 'ATHLETIC SHOE': 128095, 'AUBERGINE': 127814, 'AUTOMATED TELLER MACHINE': 127975, 'AUTOMOBILE': 128663, 'BABY': 128118, 'BABY ANGEL': 128124, 'BABY BOTTLE': 127868, 'BABY CHICK': 128036, 'BABY SYMBOL': 128700, 'BACK WITH LEFTWARDS ARROW ABOVE': 128281, 'BACTRIAN CAMEL': 128043, 'BAGGAGE CLAIM': 128708, 'BALLOON': 127880, 'BAMUM LETTER PHASE-A FIRI': 92217, 'BAMUM LETTER PHASE-A GBIEE FON': 92161, 'BAMUM LETTER PHASE-A GHEUAEGHEUAE': 92193, 'BAMUM LETTER PHASE-A GHEUAERAE': 92188, 'BAMUM LETTER PHASE-A KAFA': 92199, 'BAMUM LETTER PHASE-A KAQ': 92240, 'BAMUM LETTER PHASE-A KET': 92211, 'BAMUM LETTER PHASE-A KEUKEUTNDA': 92179, 'BAMUM LETTER PHASE-A KPOQ': 92219, 'BAMUM LETTER PHASE-A KUOQ': 92213, 'BAMUM LETTER PHASE-A LAPAQ': 92183, 'BAMUM LETTER PHASE-A LET KUT': 92184, 'BAMUM LETTER PHASE-A LOMMAE': 92216, 'BAMUM LETTER PHASE-A LU': 92243, 'BAMUM LETTER PHASE-A LUAEP': 92207, 'BAMUM LETTER PHASE-A MAEKEUP': 92186, 'BAMUM LETTER PHASE-A MAEM': 92238, 'BAMUM LETTER PHASE-A MAEMBGBIEE': 92171, 'BAMUM LETTER PHASE-A MAEMKPEN': 92203, 'BAMUM LETTER PHASE-A MAEMVEUX': 92174, 'BAMUM LETTER PHASE-A MAENYI': 92210, 'BAMUM LETTER PHASE-A MAESI': 92230, 'BAMUM LETTER PHASE-A MANSUAE': 92175, 'BAMUM LETTER PHASE-A MAP PIEET': 92221, 'BAMUM LETTER PHASE-A MBANYI': 92232, 'BAMUM LETTER PHASE-A MBAQ': 92246, 'BAMUM LETTER PHASE-A MEUNJOMNDEUQ': 92197, 'BAMUM LETTER PHASE-A MGBASA': 92196, 'BAMUM LETTER PHASE-A MON NGGEUAET': 92190, 'BAMUM LETTER PHASE-A MOOMEUT': 92214, 'BAMUM LETTER PHASE-A MOOMPUQ': 92198, 'BAMUM LETTER PHASE-A MVEUAENGAM': 92176, 'BAMUM LETTER PHASE-A NAA MFON': 92164, 'BAMUM LETTER PHASE-A NAQ': 92245, 'BAMUM LETTER PHASE-A NDA LEERAEWA': 92201, 'BAMUM LETTER PHASE-A NDAANGGEUAET': 92212, 'BAMUM LETTER PHASE-A NEN': 92244, 'BAMUM LETTER PHASE-A NGANGU': 92173, 'BAMUM LETTER PHASE-A NGGEN': 92229, 'BAMUM LETTER PHASE-A NGKUE MFON': 92160, 'BAMUM LETTER PHASE-A NGKUENZEUM': 92182, 'BAMUM LETTER PHASE-A NIKA': 92204, 'BAMUM LETTER PHASE-A NJAM': 92231, 'BAMUM LETTER PHASE-A NKAARAE': 92227, 'BAMUM LETTER PHASE-A NKINDI': 92180, 'BAMUM LETTER PHASE-A NSHA': 92241, 'BAMUM LETTER PHASE-A NSHIEE': 92237, 'BAMUM LETTER PHASE-A NTAP': 92223, 'BAMUM LETTER PHASE-A NTAP MFAA': 92185, 'BAMUM LETTER PHASE-A NTAP NTAA': 92194, 'BAMUM LETTER PHASE-A NTOQPEN': 92178, 'BAMUM LETTER PHASE-A NYET': 92233, 'BAMUM LETTER PHASE-A NYI': 92239, 'BAMUM LETTER PHASE-A NYIT MONGKEUAEQ': 92225, 'BAMUM LETTER PHASE-A NZA MFON': 92167, 'BAMUM LETTER PHASE-A NZUN MEUT': 92191, 'BAMUM LETTER PHASE-A PA LEERAEWA': 92200, 'BAMUM LETTER PHASE-A PAAM': 92236, 'BAMUM LETTER PHASE-A PAARAE': 92226, 'BAMUM LETTER PHASE-A PAMSHAE': 92189, 'BAMUM LETTER PHASE-A PASHAE': 92187, 'BAMUM LETTER PHASE-A PET': 92202, 'BAMUM LETTER PHASE-A PON MFON PIPAEMBA': 92163, 'BAMUM LETTER PHASE-A PON MFON PIPAEMGBIEE': 92162, 'BAMUM LETTER PHASE-A PON PA NJI PIPAEMBA': 92170, 'BAMUM LETTER PHASE-A PON PA NJI PIPAEMGBIEE': 92169, 'BAMUM LETTER PHASE-A PUP': 92205, 'BAMUM LETTER PHASE-A ROM': 92218, 'BAMUM LETTER PHASE-A SEUNYAM': 92177, 'BAMUM LETTER PHASE-A SHINDA PA NJI': 92168, 'BAMUM LETTER PHASE-A SHIRAE': 92222, 'BAMUM LETTER PHASE-A SHOQ NSHUT YUM': 92224, 'BAMUM LETTER PHASE-A SHUENSHUET': 92165, 'BAMUM LETTER PHASE-A SHUM': 92215, 'BAMUM LETTER PHASE-A SISA': 92195, 'BAMUM LETTER PHASE-A SONJAM': 92208, 'BAMUM LETTER PHASE-A SOQ': 92220, 'BAMUM LETTER PHASE-A SOT': 92235, 'BAMUM LETTER PHASE-A SUU': 92181, 'BAMUM LETTER PHASE-A TEUAEN': 92234, 'BAMUM LETTER PHASE-A TEUTEUWEN': 92209, 'BAMUM LETTER PHASE-A TITA MFON': 92166, 'BAMUM LETTER PHASE-A TU MAEMBA': 92172, 'BAMUM LETTER PHASE-A TUAEP': 92206, 'BAMUM LETTER PHASE-A U YUQ NAE': 92192, 'BAMUM LETTER PHASE-A UNKNOWN': 92228, 'BAMUM LETTER PHASE-A VEE': 92242, 'BAMUM LETTER PHASE-B FEE': 92293, 'BAMUM LETTER PHASE-B FEUX': 92291, 'BAMUM LETTER PHASE-B GHEUGHEN': 92271, 'BAMUM LETTER PHASE-B GHEUGHEUAEM': 92262, 'BAMUM LETTER PHASE-B KAM': 92256, 'BAMUM LETTER PHASE-B KEUAEM': 92294, 'BAMUM LETTER PHASE-B KEUPUQ': 92270, 'BAMUM LETTER PHASE-B KEUYEUX': 92272, 'BAMUM LETTER PHASE-B KIEEM': 92253, 'BAMUM LETTER PHASE-B KIQ': 92301, 'BAMUM LETTER PHASE-B LAANAE': 92273, 'BAMUM LETTER PHASE-B LAM NSHUT NYAM': 92259, 'BAMUM LETTER PHASE-B LET': 92297, 'BAMUM LETTER PHASE-B LOM NTEUM': 92251, 'BAMUM LETTER PHASE-B MA': 92300, 'BAMUM LETTER PHASE-B MA NJEUAENA': 92295, 'BAMUM LETTER PHASE-B MA NJUQA': 92296, 'BAMUM LETTER PHASE-B MBA MAELEE': 92252, 'BAMUM LETTER PHASE-B MBAARAE': 92255, 'BAMUM LETTER PHASE-B MBEURI': 92286, 'BAMUM LETTER PHASE-B MBIT MBAAKET': 92268, 'BAMUM LETTER PHASE-B MBUOQ': 92292, 'BAMUM LETTER PHASE-B MEUQ': 92281, 'BAMUM LETTER PHASE-B MEUT NGGEET': 92290, 'BAMUM LETTER PHASE-B MFIYAQ': 92284, 'BAMUM LETTER PHASE-B MFON TEUAEQ': 92267, 'BAMUM LETTER PHASE-B MONTIEEN': 92287, 'BAMUM LETTER PHASE-B NDU NJAA': 92261, 'BAMUM LETTER PHASE-B NGGAAM': 92298, 'BAMUM LETTER PHASE-B NGGEU MBU': 92277, 'BAMUM LETTER PHASE-B NGGUOQ': 92282, 'BAMUM LETTER PHASE-B NGGUOQ LARGE': 92283, 'BAMUM LETTER PHASE-B NGKINDI MVOP': 92276, 'BAMUM LETTER PHASE-B NGOM': 92302, 'BAMUM LETTER PHASE-B NSEN': 92299, 'BAMUM LETTER PHASE-B NSHUET': 92247, 'BAMUM LETTER PHASE-B NTIEE SHEUOQ': 92260, 'BAMUM LETTER PHASE-B NYAEMAE': 92288, 'BAMUM LETTER PHASE-B NYI NTEUM': 92269, 'BAMUM LETTER PHASE-B PARUM': 92274, 'BAMUM LETTER PHASE-B PEESHI': 92257, 'BAMUM LETTER PHASE-B PIT': 92263, 'BAMUM LETTER PHASE-B PUNGAAM': 92289, 'BAMUM LETTER PHASE-B SAKEUAE': 92279, 'BAMUM LETTER PHASE-B SET TU': 92250, 'BAMUM LETTER PHASE-B SHET NJAQ': 92265, 'BAMUM LETTER PHASE-B SHEUAEQTU': 92266, 'BAMUM LETTER PHASE-B SIEE': 92249, 'BAMUM LETTER PHASE-B SUE': 92285, 'BAMUM LETTER PHASE-B TAAM': 92280, 'BAMUM LETTER PHASE-B TU MAEMGBIEE': 92248, 'BAMUM LETTER PHASE-B TU NSIEE': 92264, 'BAMUM LETTER PHASE-B VEUM': 92275, 'BAMUM LETTER PHASE-B WUAET': 92278, 'BAMUM LETTER PHASE-B YAFU LEERAEWA': 92258, 'BAMUM LETTER PHASE-B YEURAE': 92254, 'BAMUM LETTER PHASE-C BUNG': 92352, 'BAMUM LETTER PHASE-C FUE': 92348, 'BAMUM LETTER PHASE-C GBAYI': 92312, 'BAMUM LETTER PHASE-C GHAP': 92320, 'BAMUM LETTER PHASE-C GHARAE': 92310, 'BAMUM LETTER PHASE-C KAA': 92329, 'BAMUM LETTER PHASE-C KEN FATIGUE': 92394, 'BAMUM LETTER PHASE-C KEN LAW': 92393, 'BAMUM LETTER PHASE-C KET': 92361, 'BAMUM LETTER PHASE-C KEUKAQ': 92321, 'BAMUM LETTER PHASE-C KEUM': 92386, 'BAMUM LETTER PHASE-C KEUSEUX': 92384, 'BAMUM LETTER PHASE-C KEUSHEUAEP': 92319, 'BAMUM LETTER PHASE-C KPARAQ': 92328, 'BAMUM LETTER PHASE-C KUOP NKAARAE': 92373, 'BAMUM LETTER PHASE-C KUT': 92366, 'BAMUM LETTER PHASE-C LAM': 92357, 'BAMUM LETTER PHASE-C LAP': 92342, 'BAMUM LETTER PHASE-C LIQ': 92397, 'BAMUM LETTER PHASE-C LU': 92365, 'BAMUM LETTER PHASE-C MA KEUAERI': 92339, 'BAMUM LETTER PHASE-C MA NSIEE': 92376, 'BAMUM LETTER PHASE-C MAEMBA': 92382, 'BAMUM LETTER PHASE-C MAESI': 92363, 'BAMUM LETTER PHASE-C MBAA CABBAGE-TREE': 92318, 'BAMUM LETTER PHASE-C MBAA PICKET': 92387, 'BAMUM LETTER PHASE-C MBANYI': 92383, 'BAMUM LETTER PHASE-C MBEEKEET': 92311, 'BAMUM LETTER PHASE-C MBERAE': 92354, 'BAMUM LETTER PHASE-C MBEUM': 92315, 'BAMUM LETTER PHASE-C MBEUX': 92385, 'BAMUM LETTER PHASE-C MBI': 92381, 'BAMUM LETTER PHASE-C MBIRIEEN': 92343, 'BAMUM LETTER PHASE-C MBIT': 92326, 'BAMUM LETTER PHASE-C MBUAEM': 92364, 'BAMUM LETTER PHASE-C MBUE': 92324, 'BAMUM LETTER PHASE-C MGBASAQ': 92344, 'BAMUM LETTER PHASE-C MIEE': 92390, 'BAMUM LETTER PHASE-C MUAE': 92391, 'BAMUM LETTER PHASE-C NANSANAQ': 92338, 'BAMUM LETTER PHASE-C NAQ': 92396, 'BAMUM LETTER PHASE-C NDAM': 92375, 'BAMUM LETTER PHASE-C NDAP': 92378, 'BAMUM LETTER PHASE-C NDEUAEREE': 92308, 'BAMUM LETTER PHASE-C NDEUT': 92349, 'BAMUM LETTER PHASE-C NDIDA': 92331, 'BAMUM LETTER PHASE-C NDOMBU': 92317, 'BAMUM LETTER PHASE-C NGAQ': 92395, 'BAMUM LETTER PHASE-C NGGEN': 92307, 'BAMUM LETTER PHASE-C NGGU': 92362, 'BAMUM LETTER PHASE-C NGGUAEN NYAM': 92336, 'BAMUM LETTER PHASE-C NGGUEET': 92370, 'BAMUM LETTER PHASE-C NGGUM': 92347, 'BAMUM LETTER PHASE-C NGGUON': 92341, 'BAMUM LETTER PHASE-C NGKAQ': 92309, 'BAMUM LETTER PHASE-C NGKUE MAEMBA': 92303, 'BAMUM LETTER PHASE-C NGOM': 92368, 'BAMUM LETTER PHASE-C NJAEM': 92356, 'BAMUM LETTER PHASE-C NJAM': 92367, 'BAMUM LETTER PHASE-C NJEEEE': 92360, 'BAMUM LETTER PHASE-C NJEUX': 92389, 'BAMUM LETTER PHASE-C NJUEQ': 92333, 'BAMUM LETTER PHASE-C NSA': 92350, 'BAMUM LETTER PHASE-C NSEUAEN': 92325, 'BAMUM LETTER PHASE-C NSHAQ': 92351, 'BAMUM LETTER PHASE-C NSOM': 92371, 'BAMUM LETTER PHASE-C NSUN': 92374, 'BAMUM LETTER PHASE-C NSUOT NGOM': 92359, 'BAMUM LETTER PHASE-C NTAA': 92340, 'BAMUM LETTER PHASE-C NTEN': 92372, 'BAMUM LETTER PHASE-C NTEUNGBA': 92345, 'BAMUM LETTER PHASE-C NTU MBIT': 92314, 'BAMUM LETTER PHASE-C NYIR MKPARAQ MEUN': 92313, 'BAMUM LETTER PHASE-C NZA': 92304, 'BAMUM LETTER PHASE-C NZEUM': 92323, 'BAMUM LETTER PHASE-C PEN': 92399, 'BAMUM LETTER PHASE-C PIN': 92398, 'BAMUM LETTER PHASE-C PIRIEEN': 92316, 'BAMUM LETTER PHASE-C RU': 92355, 'BAMUM LETTER PHASE-C SETFON': 92380, 'BAMUM LETTER PHASE-C SEUX': 92330, 'BAMUM LETTER PHASE-C SHIQ': 92392, 'BAMUM LETTER PHASE-C SHUEQ': 92379, 'BAMUM LETTER PHASE-C SUAET': 92335, 'BAMUM LETTER PHASE-C TAASHAE': 92332, 'BAMUM LETTER PHASE-C TET': 92400, 'BAMUM LETTER PHASE-C TEUTEUX': 92346, 'BAMUM LETTER PHASE-C TITA YUE': 92334, 'BAMUM LETTER PHASE-C TITUAEP': 92358, 'BAMUM LETTER PHASE-C VEUAEPEN': 92353, 'BAMUM LETTER PHASE-C VEUX': 92337, 'BAMUM LETTER PHASE-C WANGKUOQ': 92306, 'BAMUM LETTER PHASE-C WUP': 92369, 'BAMUM LETTER PHASE-C YAA': 92377, 'BAMUM LETTER PHASE-C YEUQ': 92327, 'BAMUM LETTER PHASE-C YU MUOMAE': 92322, 'BAMUM LETTER PHASE-C YUM': 92305, 'BAMUM LETTER PHASE-C YUWOQ': 92388, 'BAMUM LETTER PHASE-D FAA': 92517, 'BAMUM LETTER PHASE-D FEUFEUAET': 92436, 'BAMUM LETTER PHASE-D GHAA': 92434, 'BAMUM LETTER PHASE-D GHEUAE': 92488, 'BAMUM LETTER PHASE-D KET': 92420, 'BAMUM LETTER PHASE-D KEUAETMEUN': 92415, 'BAMUM LETTER PHASE-D KEUM': 92454, 'BAMUM LETTER PHASE-D KEUOT MBUAE': 92431, 'BAMUM LETTER PHASE-D KEUP': 92460, 'BAMUM LETTER PHASE-D KU': 92489, 'BAMUM LETTER PHASE-D KUN': 92474, 'BAMUM LETTER PHASE-D KUOM': 92422, 'BAMUM LETTER PHASE-D KUQ': 92479, 'BAMUM LETTER PHASE-D KWAET': 92449, 'BAMUM LETTER PHASE-D KYEE': 92502, 'BAMUM LETTER PHASE-D LEEEE': 92495, 'BAMUM LETTER PHASE-D LET': 92464, 'BAMUM LETTER PHASE-D LEUAEP': 92439, 'BAMUM LETTER PHASE-D LEUM': 92484, 'BAMUM LETTER PHASE-D LIEE': 92406, 'BAMUM LETTER PHASE-D LOQ': 92511, 'BAMUM LETTER PHASE-D LUM': 92446, 'BAMUM LETTER PHASE-D M': 92497, 'BAMUM LETTER PHASE-D MAENJET': 92482, 'BAMUM LETTER PHASE-D MALEERI': 92426, 'BAMUM LETTER PHASE-D MBAA': 92448, 'BAMUM LETTER PHASE-D MBAA SEVEN': 92515, 'BAMUM LETTER PHASE-D MBUO': 92401, 'BAMUM LETTER PHASE-D MEEEE': 92496, 'BAMUM LETTER PHASE-D MEUN': 92478, 'BAMUM LETTER PHASE-D MEUT': 92427, 'BAMUM LETTER PHASE-D MFEUAE': 92458, 'BAMUM LETTER PHASE-D MFEUT': 92424, 'BAMUM LETTER PHASE-D MFIEE': 92466, 'BAMUM LETTER PHASE-D MFO': 92445, 'BAMUM LETTER PHASE-D MFON': 92404, 'BAMUM LETTER PHASE-D MGBEUN': 92442, 'BAMUM LETTER PHASE-D MGBIEE': 92444, 'BAMUM LETTER PHASE-D MGBOFUM': 92438, 'BAMUM LETTER PHASE-D MONI': 92441, 'BAMUM LETTER PHASE-D MU': 92499, 'BAMUM LETTER PHASE-D MVOP': 92510, 'BAMUM LETTER PHASE-D NDAM': 92471, 'BAMUM LETTER PHASE-D NDEE': 92437, 'BAMUM LETTER PHASE-D NDEUX': 92425, 'BAMUM LETTER PHASE-D NDON': 92440, 'BAMUM LETTER PHASE-D NGGAAM': 92465, 'BAMUM LETTER PHASE-D NGGAAMAE': 92409, 'BAMUM LETTER PHASE-D NGGAP': 92483, 'BAMUM LETTER PHASE-D NGGEUX': 92475, 'BAMUM LETTER PHASE-D NGGUOM': 92485, 'BAMUM LETTER PHASE-D NGGWAEN': 92467, 'BAMUM LETTER PHASE-D NGKAP': 92414, 'BAMUM LETTER PHASE-D NGKEUAEQ': 92457, 'BAMUM LETTER PHASE-D NGKEURI': 92432, 'BAMUM LETTER PHASE-D NGKIEE': 92476, 'BAMUM LETTER PHASE-D NGKUN': 92412, 'BAMUM LETTER PHASE-D NGKYEE': 92435, 'BAMUM LETTER PHASE-D NI': 92507, 'BAMUM LETTER PHASE-D NJAP': 92418, 'BAMUM LETTER PHASE-D NJEUAEM': 92430, 'BAMUM LETTER PHASE-D NJEUT': 92407, 'BAMUM LETTER PHASE-D NJI': 92403, 'BAMUM LETTER PHASE-D NJIEE': 92405, 'BAMUM LETTER PHASE-D NJUEQ': 92487, 'BAMUM LETTER PHASE-D NSHEE': 92408, 'BAMUM LETTER PHASE-D NSHUT': 92486, 'BAMUM LETTER PHASE-D NSIEEP': 92447, 'BAMUM LETTER PHASE-D NSIEET': 92459, 'BAMUM LETTER PHASE-D NSUM': 92480, 'BAMUM LETTER PHASE-D NTEE': 92505, 'BAMUM LETTER PHASE-D NTEUM': 92472, 'BAMUM LETTER PHASE-D NTUU': 92514, 'BAMUM LETTER PHASE-D NU': 92503, 'BAMUM LETTER PHASE-D NYAM': 92410, 'BAMUM LETTER PHASE-D NYET': 92450, 'BAMUM LETTER PHASE-D NYI': 92493, 'BAMUM LETTER PHASE-D NYUE': 92463, 'BAMUM LETTER PHASE-D PAP': 92469, 'BAMUM LETTER PHASE-D PEE': 92506, 'BAMUM LETTER PHASE-D PEUTAE': 92462, 'BAMUM LETTER PHASE-D PIP': 92461, 'BAMUM LETTER PHASE-D PUQ': 92509, 'BAMUM LETTER PHASE-D PUUT': 92443, 'BAMUM LETTER PHASE-D RAEM': 92455, 'BAMUM LETTER PHASE-D REN MUCH': 92512, 'BAMUM LETTER PHASE-D REN OLD': 92490, 'BAMUM LETTER PHASE-D RII': 92494, 'BAMUM LETTER PHASE-D SAP': 92423, 'BAMUM LETTER PHASE-D SAQ': 92516, 'BAMUM LETTER PHASE-D SEUAEQ': 92428, 'BAMUM LETTER PHASE-D SHEE': 92413, 'BAMUM LETTER PHASE-D SHEUAE': 92417, 'BAMUM LETTER PHASE-D SHEUX': 92501, 'BAMUM LETTER PHASE-D SHII': 92500, 'BAMUM LETTER PHASE-D SHOQ': 92508, 'BAMUM LETTER PHASE-D SHU': 92504, 'BAMUM LETTER PHASE-D SOT': 92452, 'BAMUM LETTER PHASE-D SUAE': 92473, 'BAMUM LETTER PHASE-D SUE': 92419, 'BAMUM LETTER PHASE-D SUU': 92498, 'BAMUM LETTER PHASE-D TAE': 92491, 'BAMUM LETTER PHASE-D TEEEE': 92456, 'BAMUM LETTER PHASE-D TEUAEN': 92451, 'BAMUM LETTER PHASE-D TEUN': 92481, 'BAMUM LETTER PHASE-D TEUT': 92416, 'BAMUM LETTER PHASE-D TI': 92513, 'BAMUM LETTER PHASE-D TOQ': 92492, 'BAMUM LETTER PHASE-D TU': 92433, 'BAMUM LETTER PHASE-D TUOT': 92477, 'BAMUM LETTER PHASE-D WAP': 92402, 'BAMUM LETTER PHASE-D WUAEN': 92411, 'BAMUM LETTER PHASE-D YAEMMAE': 92421, 'BAMUM LETTER PHASE-D YEN': 92429, 'BAMUM LETTER PHASE-D YUOM': 92468, 'BAMUM LETTER PHASE-D YUOP': 92470, 'BAMUM LETTER PHASE-D YUWOQ': 92453, 'BAMUM LETTER PHASE-E A': 92629, 'BAMUM LETTER PHASE-E FA': 92603, 'BAMUM LETTER PHASE-E FAQ': 92673, 'BAMUM LETTER PHASE-E FEE': 92651, 'BAMUM LETTER PHASE-E FOM': 92627, 'BAMUM LETTER PHASE-E FU CALL': 92626, 'BAMUM LETTER PHASE-E FU I': 92616, 'BAMUM LETTER PHASE-E FU REMEDY': 92661, 'BAMUM LETTER PHASE-E FUE': 92596, 'BAMUM LETTER PHASE-E FUET': 92612, 'BAMUM LETTER PHASE-E GBET': 92585, 'BAMUM LETTER PHASE-E GBEUX': 92597, 'BAMUM LETTER PHASE-E GHAAMAE': 92578, 'BAMUM LETTER PHASE-E GHET': 92602, 'BAMUM LETTER PHASE-E GHEUAE': 92615, 'BAMUM LETTER PHASE-E GHEUN': 92565, 'BAMUM LETTER PHASE-E GHEUX': 92552, 'BAMUM LETTER PHASE-E GHOM': 92674, 'BAMUM LETTER PHASE-E I': 92632, 'BAMUM LETTER PHASE-E KET': 92599, 'BAMUM LETTER PHASE-E KEUAE': 92570, 'BAMUM LETTER PHASE-E KEUX': 92646, 'BAMUM LETTER PHASE-E KI': 92670, 'BAMUM LETTER PHASE-E KO': 92665, 'BAMUM LETTER PHASE-E KPEUX': 92532, 'BAMUM LETTER PHASE-E KUET': 92587, 'BAMUM LETTER PHASE-E KUOP': 92538, 'BAMUM LETTER PHASE-E KUT': 92620, 'BAMUM LETTER PHASE-E LAAM': 92575, 'BAMUM LETTER PHASE-E LAP': 92521, 'BAMUM LETTER PHASE-E LAQ': 92633, 'BAMUM LETTER PHASE-E LEUAEM': 92595, 'BAMUM LETTER PHASE-E LOM': 92539, 'BAMUM LETTER PHASE-E LOON': 92523, 'BAMUM LETTER PHASE-E LOOT': 92556, 'BAMUM LETTER PHASE-E LOQ': 92664, 'BAMUM LETTER PHASE-E LU': 92653, 'BAMUM LETTER PHASE-E MA': 92667, 'BAMUM LETTER PHASE-E MAE': 92600, 'BAMUM LETTER PHASE-E MAEM': 92542, 'BAMUM LETTER PHASE-E MAP': 92555, 'BAMUM LETTER PHASE-E MAQ': 92668, 'BAMUM LETTER PHASE-E MBEE': 92582, 'BAMUM LETTER PHASE-E MBEUM': 92520, 'BAMUM LETTER PHASE-E MEN': 92666, 'BAMUM LETTER PHASE-E MFEUQ': 92591, 'BAMUM LETTER PHASE-E MGBA': 92551, 'BAMUM LETTER PHASE-E MGBEN': 92581, 'BAMUM LETTER PHASE-E MI': 92654, 'BAMUM LETTER PHASE-E MIEE': 92611, 'BAMUM LETTER PHASE-E MON': 92671, 'BAMUM LETTER PHASE-E MUAE': 92614, 'BAMUM LETTER PHASE-E MVI': 92617, 'BAMUM LETTER PHASE-E NA': 92662, 'BAMUM LETTER PHASE-E NAE': 92613, 'BAMUM LETTER PHASE-E NDAA MY HOUSE': 92637, 'BAMUM LETTER PHASE-E NDAA SOFTNESS': 92562, 'BAMUM LETTER PHASE-E NDAP': 92518, 'BAMUM LETTER PHASE-E NDIAQ': 92592, 'BAMUM LETTER PHASE-E NDIQ': 92558, 'BAMUM LETTER PHASE-E NDUN': 92528, 'BAMUM LETTER PHASE-E NGA': 92658, 'BAMUM LETTER PHASE-E NGEUREUT': 92579, 'BAMUM LETTER PHASE-E NGGEEEE': 92557, 'BAMUM LETTER PHASE-E NGGEUAE': 92607, 'BAMUM LETTER PHASE-E NGGEUAET': 92535, 'BAMUM LETTER PHASE-E NGGUAESHAE NYAM': 92563, 'BAMUM LETTER PHASE-E NGGUP': 92624, 'BAMUM LETTER PHASE-E NGGURAE': 92550, 'BAMUM LETTER PHASE-E NGKA': 92531, 'BAMUM LETTER PHASE-E NGKAAMI': 92601, 'BAMUM LETTER PHASE-E NGKEUAEM': 92553, 'BAMUM LETTER PHASE-E NGKEUX': 92543, 'BAMUM LETTER PHASE-E NGKUM': 92619, 'BAMUM LETTER PHASE-E NGKUP': 92598, 'BAMUM LETTER PHASE-E NGOP': 92541, 'BAMUM LETTER PHASE-E NGOQ': 92544, 'BAMUM LETTER PHASE-E NGUAE': 92640, 'BAMUM LETTER PHASE-E NGUAET': 92657, 'BAMUM LETTER PHASE-E NJAEMLI': 92554, 'BAMUM LETTER PHASE-E NJEE': 92628, 'BAMUM LETTER PHASE-E NJEE EPOCH': 92648, 'BAMUM LETTER PHASE-E NJEUX': 92547, 'BAMUM LETTER PHASE-E NKOM': 92584, 'BAMUM LETTER PHASE-E NSHIEE': 92540, 'BAMUM LETTER PHASE-E NSHUE': 92545, 'BAMUM LETTER PHASE-E NSHUOP': 92527, 'BAMUM LETTER PHASE-E NTAP': 92622, 'BAMUM LETTER PHASE-E NTUM': 92604, 'BAMUM LETTER PHASE-E NYI BETWEEN': 92608, 'BAMUM LETTER PHASE-E NYI CLEAVER': 92589, 'BAMUM LETTER PHASE-E NZAQ': 92583, 'BAMUM LETTER PHASE-E NZUQ': 92609, 'BAMUM LETTER PHASE-E O': 92631, 'BAMUM LETTER PHASE-E PA PEOPLE': 92625, 'BAMUM LETTER PHASE-E PA PLURAL': 92634, 'BAMUM LETTER PHASE-E PAA': 92524, 'BAMUM LETTER PHASE-E PAAM': 92536, 'BAMUM LETTER PHASE-E PEEM': 92548, 'BAMUM LETTER PHASE-E PEUT': 92605, 'BAMUM LETTER PHASE-E PEUX': 92647, 'BAMUM LETTER PHASE-E PI': 92663, 'BAMUM LETTER PHASE-E PIEEQ': 92593, 'BAMUM LETTER PHASE-E PIET': 92621, 'BAMUM LETTER PHASE-E PO': 92568, 'BAMUM LETTER PHASE-E POON': 92610, 'BAMUM LETTER PHASE-E PU': 92576, 'BAMUM LETTER PHASE-E PUAE': 92529, 'BAMUM LETTER PHASE-E PUAQ': 92618, 'BAMUM LETTER PHASE-E PUE': 92649, 'BAMUM LETTER PHASE-E PUM': 92561, 'BAMUM LETTER PHASE-E RAE': 92656, 'BAMUM LETTER PHASE-E RAQ': 92526, 'BAMUM LETTER PHASE-E REUX': 92655, 'BAMUM LETTER PHASE-E RIMGBA': 92546, 'BAMUM LETTER PHASE-E SAA': 92549, 'BAMUM LETTER PHASE-E SEE': 92534, 'BAMUM LETTER PHASE-E SET': 92560, 'BAMUM LETTER PHASE-E SHEUAEQ': 92580, 'BAMUM LETTER PHASE-E SHIQ': 92638, 'BAMUM LETTER PHASE-E SHO': 92659, 'BAMUM LETTER PHASE-E SHOQ': 92660, 'BAMUM LETTER PHASE-E SOM': 92525, 'BAMUM LETTER PHASE-E SUAEN': 92571, 'BAMUM LETTER PHASE-E TAA': 92635, 'BAMUM LETTER PHASE-E TAAQ': 92577, 'BAMUM LETTER PHASE-E TAEN NTEUM': 92559, 'BAMUM LETTER PHASE-E TAM': 92530, 'BAMUM LETTER PHASE-E TAQ': 92636, 'BAMUM LETTER PHASE-E TEN': 92672, 'BAMUM LETTER PHASE-E TEU': 92669, 'BAMUM LETTER PHASE-E TEUAEQ': 92572, 'BAMUM LETTER PHASE-E TOO': 92537, 'BAMUM LETTER PHASE-E TOON': 92519, 'BAMUM LETTER PHASE-E TOQ': 92630, 'BAMUM LETTER PHASE-E TUAE': 92566, 'BAMUM LETTER PHASE-E TUM': 92586, 'BAMUM LETTER PHASE-E TUMAE': 92569, 'BAMUM LETTER PHASE-E VEE': 92652, 'BAMUM LETTER PHASE-E VEUAE': 92573, 'BAMUM LETTER PHASE-E VOM': 92522, 'BAMUM LETTER PHASE-E WEUX': 92574, 'BAMUM LETTER PHASE-E WUE': 92650, 'BAMUM LETTER PHASE-E WUO': 92533, 'BAMUM LETTER PHASE-E YAP': 92588, 'BAMUM LETTER PHASE-E YEUAE': 92567, 'BAMUM LETTER PHASE-E YEUAET': 92623, 'BAMUM LETTER PHASE-E YEUM': 92606, 'BAMUM LETTER PHASE-E YEUX': 92639, 'BAMUM LETTER PHASE-E YIEE': 92564, 'BAMUM LETTER PHASE-E YIT': 92590, 'BAMUM LETTER PHASE-E YOQ COVER': 92643, 'BAMUM LETTER PHASE-E YOQ SWIMMING': 92642, 'BAMUM LETTER PHASE-E YUAEN': 92641, 'BAMUM LETTER PHASE-E YUEQ': 92594, 'BAMUM LETTER PHASE-E YUN': 92645, 'BAMUM LETTER PHASE-E YUQ': 92644, 'BAMUM LETTER PHASE-F EE': 92678, 'BAMUM LETTER PHASE-F FOM': 92715, 'BAMUM LETTER PHASE-F KA': 92675, 'BAMUM LETTER PHASE-F KEN': 92710, 'BAMUM LETTER PHASE-F KET': 92695, 'BAMUM LETTER PHASE-F KO': 92719, 'BAMUM LETTER PHASE-F KPA': 92726, 'BAMUM LETTER PHASE-F KU': 92677, 'BAMUM LETTER PHASE-F KYEE': 92694, 'BAMUM LETTER PHASE-F LA': 92682, 'BAMUM LETTER PHASE-F LI': 92717, 'BAMUM LETTER PHASE-F LOQ': 92718, 'BAMUM LETTER PHASE-F M': 92689, 'BAMUM LETTER PHASE-F MA': 92722, 'BAMUM LETTER PHASE-F MBAA': 92724, 'BAMUM LETTER PHASE-F MBEN': 92720, 'BAMUM LETTER PHASE-F MEEEE': 92685, 'BAMUM LETTER PHASE-F MO': 92723, 'BAMUM LETTER PHASE-F NDAA': 92687, 'BAMUM LETTER PHASE-F NGGA': 92712, 'BAMUM LETTER PHASE-F NGKWAEN': 92711, 'BAMUM LETTER PHASE-F NI': 92708, 'BAMUM LETTER PHASE-F NJAEM': 92688, 'BAMUM LETTER PHASE-F NJUAE': 92698, 'BAMUM LETTER PHASE-F NSHA': 92702, 'BAMUM LETTER PHASE-F NTEE': 92704, 'BAMUM LETTER PHASE-F NU': 92697, 'BAMUM LETTER PHASE-F NUAE': 92696, 'BAMUM LETTER PHASE-F NYI': 92681, 'BAMUM LETTER PHASE-F PEE': 92706, 'BAMUM LETTER PHASE-F PEUX': 92703, 'BAMUM LETTER PHASE-F PUAE': 92714, 'BAMUM LETTER PHASE-F REE': 92679, 'BAMUM LETTER PHASE-F REN': 92721, 'BAMUM LETTER PHASE-F REUX': 92709, 'BAMUM LETTER PHASE-F RIEE': 92684, 'BAMUM LETTER PHASE-F RII': 92683, 'BAMUM LETTER PHASE-F RU': 92707, 'BAMUM LETTER PHASE-F SAMBA': 92727, 'BAMUM LETTER PHASE-F SEUX': 92693, 'BAMUM LETTER PHASE-F SHII': 92691, 'BAMUM LETTER PHASE-F SHO': 92713, 'BAMUM LETTER PHASE-F SHU': 92700, 'BAMUM LETTER PHASE-F SI': 92692, 'BAMUM LETTER PHASE-F SUU': 92690, 'BAMUM LETTER PHASE-F TAA': 92686, 'BAMUM LETTER PHASE-F TAE': 92680, 'BAMUM LETTER PHASE-F TET': 92725, 'BAMUM LETTER PHASE-F U': 92676, 'BAMUM LETTER PHASE-F VUEQ': 92728, 'BAMUM LETTER PHASE-F WA': 92716, 'BAMUM LETTER PHASE-F WUE': 92705, 'BAMUM LETTER PHASE-F YA': 92701, 'BAMUM LETTER PHASE-F YOQ': 92699, 'BANANA': 127820, 'BANK': 127974, 'BANKNOTE WITH DOLLAR SIGN': 128181, 'BANKNOTE WITH EURO SIGN': 128182, 'BANKNOTE WITH POUND SIGN': 128183, 'BANKNOTE WITH YEN SIGN': 128180, 'BAR CHART': 128202, 'BARBER POLE': 128136, 'BASKETBALL AND HOOP': 127936, 'BATAK CONSONANT SIGN H': 7153, 'BATAK CONSONANT SIGN NG': 7152, 'BATAK LETTER A': 7104, 'BATAK LETTER BA': 7109, 'BATAK LETTER CA': 7137, 'BATAK LETTER DA': 7121, 'BATAK LETTER GA': 7118, 'BATAK LETTER HA': 7106, 'BATAK LETTER I': 7140, 'BATAK LETTER JA': 7120, 'BATAK LETTER KARO BA': 7110, 'BATAK LETTER LA': 7134, 'BATAK LETTER MA': 7124, 'BATAK LETTER MANDAILING HA': 7108, 'BATAK LETTER MANDAILING NA': 7114, 'BATAK LETTER MANDAILING SA': 7130, 'BATAK LETTER MBA': 7139, 'BATAK LETTER NA': 7113, 'BATAK LETTER NDA': 7138, 'BATAK LETTER NGA': 7133, 'BATAK LETTER NORTHERN TA': 7127, 'BATAK LETTER NYA': 7136, 'BATAK LETTER PA': 7111, 'BATAK LETTER PAKPAK WA': 7117, 'BATAK LETTER RA': 7122, 'BATAK LETTER SA': 7128, 'BATAK LETTER SIMALUNGUN A': 7105, 'BATAK LETTER SIMALUNGUN GA': 7119, 'BATAK LETTER SIMALUNGUN HA': 7107, 'BATAK LETTER SIMALUNGUN LA': 7135, 'BATAK LETTER SIMALUNGUN MA': 7125, 'BATAK LETTER SIMALUNGUN PA': 7112, 'BATAK LETTER SIMALUNGUN RA': 7123, 'BATAK LETTER SIMALUNGUN SA': 7129, 'BATAK LETTER SIMALUNGUN WA': 7116, 'BATAK LETTER SIMALUNGUN YA': 7132, 'BATAK LETTER SOUTHERN TA': 7126, 'BATAK LETTER U': 7141, 'BATAK LETTER WA': 7115, 'BATAK LETTER YA': 7131, 'BATAK PANGOLAT': 7154, 'BATAK PANONGONAN': 7155, 'BATAK SIGN TOMPI': 7142, 'BATAK SYMBOL BINDU JUDUL': 7166, 'BATAK SYMBOL BINDU NA METEK': 7164, 'BATAK SYMBOL BINDU PANGOLAT': 7167, 'BATAK SYMBOL BINDU PINARBORAS': 7165, 'BATAK VOWEL SIGN E': 7143, 'BATAK VOWEL SIGN EE': 7145, 'BATAK VOWEL SIGN I': 7146, 'BATAK VOWEL SIGN KARO I': 7147, 'BATAK VOWEL SIGN KARO O': 7149, 'BATAK VOWEL SIGN O': 7148, 'BATAK VOWEL SIGN PAKPAK E': 7144, 'BATAK VOWEL SIGN U': 7150, 'BATAK VOWEL SIGN U FOR SIMALUNGUN SA': 7151, 'BATH': 128704, 'BATHTUB': 128705, 'BATTERY': 128267, 'BEAR FACE': 128059, 'BEATING HEART': 128147, 'BEER MUG': 127866, 'BELL': 128276, 'BELL WITH CANCELLATION STROKE': 128277, 'BENGALI LETTER KHINYA': 983621, 'BENTO BOX': 127857, 'BICYCLE': 128690, 'BICYCLIST': 128692, 'BIKINI': 128089, 'BILLIARDS': 127921, 'BIRD': 128038, 'BIRTHDAY CAKE': 127874, 'BLACK DOWN-POINTING DOUBLE TRIANGLE': 9196, 'BLACK LEFT-POINTING DOUBLE TRIANGLE': 9194, 'BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR': 9198, 'BLACK QUESTION MARK ORNAMENT': 10067, 'BLACK RIGHT-POINTING DOUBLE TRIANGLE': 9193, 'BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR': 9197, 'BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR': 9199, 'BLACK SQUARE BUTTON': 128306, 'BLACK UP-POINTING DOUBLE TRIANGLE': 9195, 'BLOSSOM': 127804, 'BLOWFISH': 128033, 'BLUE BOOK': 128216, 'BLUE HEART': 128153, 'BOAR': 128023, 'BOMB': 128163, 'BOOKMARK': 128278, 'BOOKMARK TABS': 128209, 'BOOKS': 128218, 'BOPOMOFO LETTER GH': 12728, 'BOPOMOFO LETTER LH': 12729, 'BOPOMOFO LETTER ZY': 12730, 'BOUQUET': 128144, 'BOWLING': 127923, 'BOY': 128102, 'BRAHMI DANDA': 69703, 'BRAHMI DIGIT EIGHT': 69742, 'BRAHMI DIGIT FIVE': 69739, 'BRAHMI DIGIT FOUR': 69738, 'BRAHMI DIGIT NINE': 69743, 'BRAHMI DIGIT ONE': 69735, 'BRAHMI DIGIT SEVEN': 69741, 'BRAHMI DIGIT SIX': 69740, 'BRAHMI DIGIT THREE': 69737, 'BRAHMI DIGIT TWO': 69736, 'BRAHMI DIGIT ZERO': 69734, 'BRAHMI DOUBLE DANDA': 69704, 'BRAHMI LETTER A': 69637, 'BRAHMI LETTER AA': 69638, 'BRAHMI LETTER AI': 69648, 'BRAHMI LETTER AU': 69650, 'BRAHMI LETTER BA': 69673, 'BRAHMI LETTER BHA': 69674, 'BRAHMI LETTER CA': 69656, 'BRAHMI LETTER CHA': 69657, 'BRAHMI LETTER DA': 69668, 'BRAHMI LETTER DDA': 69663, 'BRAHMI LETTER DDHA': 69664, 'BRAHMI LETTER DHA': 69669, 'BRAHMI LETTER E': 69647, 'BRAHMI LETTER GA': 69653, 'BRAHMI LETTER GHA': 69654, 'BRAHMI LETTER HA': 69683, 'BRAHMI LETTER I': 69639, 'BRAHMI LETTER II': 69640, 'BRAHMI LETTER JA': 69658, 'BRAHMI LETTER JHA': 69659, 'BRAHMI LETTER KA': 69651, 'BRAHMI LETTER KHA': 69652, 'BRAHMI LETTER LA': 69678, 'BRAHMI LETTER LLA': 69684, 'BRAHMI LETTER MA': 69675, 'BRAHMI LETTER NA': 69670, 'BRAHMI LETTER NGA': 69655, 'BRAHMI LETTER NNA': 69665, 'BRAHMI LETTER NYA': 69660, 'BRAHMI LETTER O': 69649, 'BRAHMI LETTER OLD TAMIL LLLA': 69685, 'BRAHMI LETTER OLD TAMIL NNNA': 69687, 'BRAHMI LETTER OLD TAMIL RRA': 69686, 'BRAHMI LETTER PA': 69671, 'BRAHMI LETTER PHA': 69672, 'BRAHMI LETTER RA': 69677, 'BRAHMI LETTER SA': 69682, 'BRAHMI LETTER SHA': 69680, 'BRAHMI LETTER SSA': 69681, 'BRAHMI LETTER TA': 69666, 'BRAHMI LETTER THA': 69667, 'BRAHMI LETTER TTA': 69661, 'BRAHMI LETTER TTHA': 69662, 'BRAHMI LETTER U': 69641, 'BRAHMI LETTER UU': 69642, 'BRAHMI LETTER VA': 69679, 'BRAHMI LETTER VOCALIC L': 69645, 'BRAHMI LETTER VOCALIC LL': 69646, 'BRAHMI LETTER VOCALIC R': 69643, 'BRAHMI LETTER VOCALIC RR': 69644, 'BRAHMI LETTER YA': 69676, 'BRAHMI NUMBER EIGHT': 69721, 'BRAHMI NUMBER EIGHTY': 69730, 'BRAHMI NUMBER FIFTY': 69727, 'BRAHMI NUMBER FIVE': 69718, 'BRAHMI NUMBER FORTY': 69726, 'BRAHMI NUMBER FOUR': 69717, 'BRAHMI NUMBER NINE': 69722, 'BRAHMI NUMBER NINETY': 69731, 'BRAHMI NUMBER ONE': 69714, 'BRAHMI NUMBER ONE HUNDRED': 69732, 'BRAHMI NUMBER ONE THOUSAND': 69733, 'BRAHMI NUMBER SEVEN': 69720, 'BRAHMI NUMBER SEVENTY': 69729, 'BRAHMI NUMBER SIX': 69719, 'BRAHMI NUMBER SIXTY': 69728, 'BRAHMI NUMBER TEN': 69723, 'BRAHMI NUMBER THIRTY': 69725, 'BRAHMI NUMBER THREE': 69716, 'BRAHMI NUMBER TWENTY': 69724, 'BRAHMI NUMBER TWO': 69715, 'BRAHMI PUNCTUATION CRESCENT BAR': 69708, 'BRAHMI PUNCTUATION DOT': 69705, 'BRAHMI PUNCTUATION DOUBLE DOT': 69706, 'BRAHMI PUNCTUATION LINE': 69707, 'BRAHMI PUNCTUATION LOTUS': 69709, 'BRAHMI SIGN ANUSVARA': 69633, 'BRAHMI SIGN CANDRABINDU': 69632, 'BRAHMI SIGN JIHVAMULIYA': 69635, 'BRAHMI SIGN UPADHMANIYA': 69636, 'BRAHMI SIGN VISARGA': 69634, 'BRAHMI VIRAMA': 69702, 'BRAHMI VOWEL SIGN AA': 69688, 'BRAHMI VOWEL SIGN AI': 69699, 'BRAHMI VOWEL SIGN AU': 69701, 'BRAHMI VOWEL SIGN BHATTIPROLU AA': 69689, 'BRAHMI VOWEL SIGN E': 69698, 'BRAHMI VOWEL SIGN I': 69690, 'BRAHMI VOWEL SIGN II': 69691, 'BRAHMI VOWEL SIGN O': 69700, 'BRAHMI VOWEL SIGN U': 69692, 'BRAHMI VOWEL SIGN UU': 69693, 'BRAHMI VOWEL SIGN VOCALIC L': 69696, 'BRAHMI VOWEL SIGN VOCALIC LL': 69697, 'BRAHMI VOWEL SIGN VOCALIC R': 69694, 'BRAHMI VOWEL SIGN VOCALIC RR': 69695, 'BREAD': 127838, 'BRIDE WITH VEIL': 128112, 'BRIDGE AT NIGHT': 127753, 'BRIEFCASE': 128188, 'BROKEN HEART': 128148, 'BUG': 128027, 'BUS': 128652, 'BUS STOP': 128655, 'BUST IN SILHOUETTE': 128100, 'BUSTS IN SILHOUETTE': 128101, 'CACTUS': 127797, 'CALENDAR': 128197, 'CAMERA': 128247, 'CANDY': 127852, 'CARD INDEX': 128199, 'CAROUSEL HORSE': 127904, 'CARP STREAMER': 127887, 'CAT': 128008, 'CAT FACE': 128049, 'CAT FACE WITH TEARS OF JOY': 128569, 'CAT FACE WITH WRY SMILE': 128572, 'CHART WITH DOWNWARDS TREND': 128201, 'CHART WITH UPWARDS TREND': 128200, 'CHART WITH UPWARDS TREND AND YEN SIGN': 128185, 'CHEERING MEGAPHONE': 128227, 'CHEQUERED FLAG': 127937, 'CHERRIES': 127826, 'CHERRY BLOSSOM': 127800, 'CHESTNUT': 127792, 'CHICKEN': 128020, 'CHILDREN CROSSING': 128696, 'CHOCOLATE BAR': 127851, 'CHRISTMAS TREE': 127876, 'CINEMA': 127910, 'CIRCLED IDEOGRAPH ACCEPT': 127569, 'CIRCLED IDEOGRAPH ADVANTAGE': 127568, 'CIRCUS TENT': 127914, 'CITYSCAPE AT DUSK': 127750, 'CLAPPER BOARD': 127916, 'CLAPPING HANDS SIGN': 128079, 'CLINKING BEER MUGS': 127867, 'CLIPBOARD': 128203, 'CLOCK FACE EIGHT OCLOCK': 128343, 'CLOCK FACE EIGHT-THIRTY': 128355, 'CLOCK FACE ELEVEN OCLOCK': 128346, 'CLOCK FACE ELEVEN-THIRTY': 128358, 'CLOCK FACE FIVE OCLOCK': 128340, 'CLOCK FACE FIVE-THIRTY': 128352, 'CLOCK FACE FOUR OCLOCK': 128339, 'CLOCK FACE FOUR-THIRTY': 128351, 'CLOCK FACE NINE OCLOCK': 128344, 'CLOCK FACE NINE-THIRTY': 128356, 'CLOCK FACE ONE OCLOCK': 128336, 'CLOCK FACE ONE-THIRTY': 128348, 'CLOCK FACE SEVEN OCLOCK': 128342, 'CLOCK FACE SEVEN-THIRTY': 128354, 'CLOCK FACE SIX OCLOCK': 128341, 'CLOCK FACE SIX-THIRTY': 128353, 'CLOCK FACE TEN OCLOCK': 128345, 'CLOCK FACE TEN-THIRTY': 128357, 'CLOCK FACE THREE OCLOCK': 128338, 'CLOCK FACE THREE-THIRTY': 128350, 'CLOCK FACE TWELVE OCLOCK': 128347, 'CLOCK FACE TWELVE-THIRTY': 128359, 'CLOCK FACE TWO OCLOCK': 128337, 'CLOCK FACE TWO-THIRTY': 128349, 'CLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS': 128259, 'CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS': 128257, 'CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS WITH CIRCLED ONE OVERLAY': 128258, 'CLOSED BOOK': 128213, 'CLOSED LOCK WITH KEY': 128272, 'CLOSED MAILBOX WITH LOWERED FLAG': 128234, 'CLOSED MAILBOX WITH RAISED FLAG': 128235, 'CLOSED UMBRELLA': 127746, 'COCKTAIL GLASS': 127864, 'COLLISION SYMBOL': 128165, 'COMBINING DOUBLE INVERTED BREVE BELOW': 7676, 'CONFETTI BALL': 127882, 'CONFOUNDED FACE': 128534, 'CONSTRUCTION SIGN': 128679, 'CONSTRUCTION WORKER': 128119, 'CONVENIENCE STORE': 127978, 'COOKED RICE': 127834, 'COOKIE': 127850, 'COOKING': 127859, 'COUPLE WITH HEART': 128145, 'COW': 128004, 'COW FACE': 128046, 'CREDIT CARD': 128179, 'CRESCENT MOON': 127769, 'CROCODILE': 128010, 'CROSS MARK': 10060, 'CROSSED FLAGS': 127884, 'CROWN': 128081, 'CRYING CAT FACE': 128575, 'CRYING FACE': 128546, 'CRYSTAL BALL': 128302, 'CURLY LOOP': 10160, 'CURRENCY EXCHANGE': 128177, 'CURRY AND RICE': 127835, 'CUSTARD': 127854, 'CUSTOMS': 128707, 'CYCLONE': 127744, 'CYRILLIC CAPITAL LETTER REVERSED TSE': 42592, 'CYRILLIC CAPITAL LETTER SHHA WITH DESCENDER': 1318, 'CYRILLIC SMALL LETTER REVERSED TSE': 42593, 'CYRILLIC SMALL LETTER SHHA WITH DESCENDER': 1319, 'DANCER': 128131, 'DANGO': 127841, 'DASH SYMBOL': 128168, 'DECIDUOUS TREE': 127795, 'DELIVERY TRUCK': 128666, 'DEPARTMENT STORE': 127980, 'DEVANAGARI LETTER AW': 2421, 'DEVANAGARI LETTER OE': 2419, 'DEVANAGARI LETTER OOE': 2420, 'DEVANAGARI LETTER UE': 2422, 'DEVANAGARI LETTER UUE': 2423, 'DEVANAGARI VOWEL SIGN AW': 2383, 'DEVANAGARI VOWEL SIGN OE': 2362, 'DEVANAGARI VOWEL SIGN OOE': 2363, 'DEVANAGARI VOWEL SIGN UE': 2390, 'DEVANAGARI VOWEL SIGN UUE': 2391, 'DIAMOND SHAPE WITH A DOT INSIDE': 128160, 'DIRECT HIT': 127919, 'DISAPPOINTED BUT RELIEVED FACE': 128549, 'DISAPPOINTED FACE': 128542, 'DIZZY FACE': 128565, 'DIZZY SYMBOL': 128171, 'DO NOT LITTER SYMBOL': 128687, 'DOG': 128021, 'DOG FACE': 128054, 'DOLPHIN': 128044, 'DOOR': 128682, 'DOUBLE CURLY LOOP': 10175, 'DOUGHNUT': 127849, 'DOWN-POINTING RED TRIANGLE': 128315, 'DOWN-POINTING SMALL RED TRIANGLE': 128317, 'DRAGON': 128009, 'DRAGON FACE': 128050, 'DRESS': 128087, 'DROMEDARY CAMEL': 128042, 'DROPLET': 128167, 'DVD': 128192, 'E-MAIL SYMBOL': 128231, 'EAR': 128066, 'EAR OF MAIZE': 127805, 'EAR OF RICE': 127806, 'EARTH GLOBE AMERICAS': 127758, 'EARTH GLOBE ASIA-AUSTRALIA': 127759, 'EARTH GLOBE EUROPE-AFRICA': 127757, 'ELECTRIC LIGHT BULB': 128161, 'ELECTRIC PLUG': 128268, 'ELECTRIC TORCH': 128294, 'ELEPHANT': 128024, 'END WITH LEFTWARDS ARROW ABOVE': 128282, 'ENVELOPE WITH DOWNWARDS ARROW ABOVE': 128233, 'ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK': 4957, 'ETHIOPIC COMBINING VOWEL LENGTH MARK': 4958, 'ETHIOPIC SYLLABLE BBA': 43816, 'ETHIOPIC SYLLABLE BBAA': 43819, 'ETHIOPIC SYLLABLE BBE': 43821, 'ETHIOPIC SYLLABLE BBEE': 43820, 'ETHIOPIC SYLLABLE BBI': 43818, 'ETHIOPIC SYLLABLE BBO': 43822, 'ETHIOPIC SYLLABLE BBU': 43817, 'ETHIOPIC SYLLABLE CCHHA': 43808, 'ETHIOPIC SYLLABLE CCHHAA': 43811, 'ETHIOPIC SYLLABLE CCHHE': 43813, 'ETHIOPIC SYLLABLE CCHHEE': 43812, 'ETHIOPIC SYLLABLE CCHHI': 43810, 'ETHIOPIC SYLLABLE CCHHO': 43814, 'ETHIOPIC SYLLABLE CCHHU': 43809, 'ETHIOPIC SYLLABLE DDHAA': 43787, 'ETHIOPIC SYLLABLE DDHE': 43789, 'ETHIOPIC SYLLABLE DDHEE': 43788, 'ETHIOPIC SYLLABLE DDHI': 43786, 'ETHIOPIC SYLLABLE DDHO': 43790, 'ETHIOPIC SYLLABLE DDHU': 43785, 'ETHIOPIC SYLLABLE DZAA': 43795, 'ETHIOPIC SYLLABLE DZE': 43797, 'ETHIOPIC SYLLABLE DZEE': 43796, 'ETHIOPIC SYLLABLE DZI': 43794, 'ETHIOPIC SYLLABLE DZO': 43798, 'ETHIOPIC SYLLABLE DZU': 43793, 'ETHIOPIC SYLLABLE TTHAA': 43779, 'ETHIOPIC SYLLABLE TTHE': 43781, 'ETHIOPIC SYLLABLE TTHEE': 43780, 'ETHIOPIC SYLLABLE TTHI': 43778, 'ETHIOPIC SYLLABLE TTHO': 43782, 'ETHIOPIC SYLLABLE TTHU': 43777, 'EUROPEAN CASTLE': 127984, 'EUROPEAN POST OFFICE': 127972, 'EVERGREEN TREE': 127794, 'EXTRATERRESTRIAL ALIEN': 128125, 'EYEGLASSES': 128083, 'EYES': 128064, 'FACE MASSAGE': 128134, 'FACE SAVOURING DELICIOUS FOOD': 128523, 'FACE SCREAMING IN FEAR': 128561, 'FACE THROWING A KISS': 128536, 'FACE WITH COLD SWEAT': 128531, 'FACE WITH LOOK OF TRIUMPH': 128548, 'FACE WITH MEDICAL MASK': 128567, 'FACE WITH NO GOOD GESTURE': 128581, 'FACE WITH OK GESTURE': 128582, 'FACE WITH OPEN MOUTH AND COLD SWEAT': 128560, 'FACE WITH STUCK-OUT TONGUE AND TIGHTLY-CLOSED EYES': 128541, 'FACE WITH STUCK-OUT TONGUE AND WINKING EYE': 128540, 'FACE WITH TEARS OF JOY': 128514, 'FACE WITHOUT MOUTH': 128566, 'FACTORY': 127981, 'FALLEN LEAF': 127810, 'FAMILY': 128106, 'FATHER CHRISTMAS': 127877, 'FAX MACHINE': 128224, 'FEARFUL FACE': 128552, 'FERRIS WHEEL': 127905, 'FILE FOLDER': 128193, 'FIRE': 128293, 'FIRE ENGINE': 128658, 'FIREWORK SPARKLER': 127879, 'FIREWORKS': 127878, 'FIRST QUARTER MOON SYMBOL': 127763, 'FIRST QUARTER MOON WITH FACE': 127771, 'FISH': 128031, 'FISH CAKE WITH SWIRL DESIGN': 127845, 'FISHING POLE AND FISH': 127907, 'FISTED HAND SIGN': 128074, 'FLEXED BICEPS': 128170, 'FLOPPY DISK': 128190, 'FLOWER PLAYING CARDS': 127924, 'FLUSHED FACE': 128563, 'FOGGY': 127745, 'FOOTPRINTS': 128099, 'FORK AND KNIFE': 127860, 'FOUR LEAF CLOVER': 127808, 'FRENCH FRIES': 127839, 'FRIED SHRIMP': 127844, 'FROG FACE': 128056, 'FRONT-FACING BABY CHICK': 128037, 'FULL MOON SYMBOL': 127765, 'FULL MOON WITH FACE': 127773, 'GAME DIE': 127922, 'GEM STONE': 128142, 'GEORGIAN LETTER U-BRJGU': 983912, 'GHOST': 128123, 'GIRL': 128103, 'GLOBE WITH MERIDIANS': 127760, 'GLOWING STAR': 127775, 'GOAT': 128016, 'GRADUATION CAP': 127891, 'GRAPES': 127815, 'GREEN APPLE': 127823, 'GREEN BOOK': 128215, 'GREEN HEART': 128154, 'GRINNING CAT FACE WITH SMILING EYES': 128568, 'GRINNING FACE WITH SMILING EYES': 128513, 'GROWING HEART': 128151, 'GUARDSMAN': 128130, 'GUITAR': 127928, 'HAIRCUT': 128135, 'HAMBURGER': 127828, 'HAMMER': 128296, 'HAMSTER FACE': 128057, 'HANDBAG': 128092, 'HAPPY PERSON RAISING ONE HAND': 128587, 'HATCHING CHICK': 128035, 'HEADPHONE': 127911, 'HEAR-NO-EVIL MONKEY': 128585, 'HEART DECORATION': 128159, 'HEART WITH ARROW': 128152, 'HEART WITH RIBBON': 128157, 'HEAVY DIVISION SIGN': 10135, 'HEAVY DOLLAR SIGN': 128178, 'HEAVY LOW DOUBLE COMMA QUOTATION MARK ORNAMENT': 10080, 'HEAVY LOW SINGLE COMMA QUOTATION MARK ORNAMENT': 10079, 'HEAVY MINUS SIGN': 10134, 'HEAVY PLUS SIGN': 10133, 'HELICOPTER': 128641, 'HERB': 127807, 'HIBISCUS': 127802, 'HIGH BRIGHTNESS SYMBOL': 128262, 'HIGH-HEELED SHOE': 128096, 'HIGH-SPEED TRAIN': 128644, 'HIGH-SPEED TRAIN WITH BULLET NOSE': 128645, 'HIRAGANA LETTER ARCHAIC YE': 110593, 'HIRAGANA LETTER BIDAKUON NGA': 983954, 'HIRAGANA LETTER BIDAKUON NGE': 983957, 'HIRAGANA LETTER BIDAKUON NGI': 983955, 'HIRAGANA LETTER BIDAKUON NGO': 983958, 'HIRAGANA LETTER BIDAKUON NGU': 983956, 'HOCHO': 128298, 'HONEY POT': 127855, 'HONEYBEE': 128029, 'HORIZONTAL TRAFFIC LIGHT': 128677, 'HORSE': 128014, 'HORSE FACE': 128052, 'HORSE RACING': 127943, 'HOSPITAL': 127973, 'HOTEL': 127976, 'HOURGLASS WITH FLOWING SAND': 9203, 'HOUSE BUILDING': 127968, 'HOUSE WITH GARDEN': 127969, 'HUNDRED POINTS SYMBOL': 128175, 'ICE CREAM': 127848, 'IMP': 128127, 'INBOX TRAY': 128229, 'INCOMING ENVELOPE': 128232, 'INDIAN RUPEE SIGN': 8377, 'INFORMATION DESK PERSON': 128129, 'INPUT SYMBOL FOR LATIN CAPITAL LETTERS': 128288, 'INPUT SYMBOL FOR LATIN LETTERS': 128292, 'INPUT SYMBOL FOR LATIN SMALL LETTERS': 128289, 'INPUT SYMBOL FOR NUMBERS': 128290, 'INPUT SYMBOL FOR SYMBOLS': 128291, 'INVERTED PENTAGRAM': 9959, 'IZAKAYA LANTERN': 127982, 'JACK-O-LANTERN': 127875, 'JAPANESE CASTLE': 127983, 'JAPANESE DOLLS': 127886, 'JAPANESE GOBLIN': 128122, 'JAPANESE OGRE': 128121, 'JAPANESE POST OFFICE': 127971, 'JAPANESE SYMBOL FOR BEGINNER': 128304, 'JEANS': 128086, 'KATAKANA LETTER AINU CE': 983964, 'KATAKANA LETTER AINU P': 983967, 'KATAKANA LETTER AINU TO': 983966, 'KATAKANA LETTER AINU TU': 983965, 'KATAKANA LETTER ARCHAIC E': 110592, 'KATAKANA LETTER BIDAKUON NGA': 983959, 'KATAKANA LETTER BIDAKUON NGE': 983962, 'KATAKANA LETTER BIDAKUON NGI': 983960, 'KATAKANA LETTER BIDAKUON NGO': 983963, 'KATAKANA LETTER BIDAKUON NGU': 983961, 'KEY': 128273, 'KEYCAP TEN': 128287, 'KHMER CONSONANT SIGN COENG BA': 983933, 'KHMER CONSONANT SIGN COENG CA': 983918, 'KHMER CONSONANT SIGN COENG CHA': 983919, 'KHMER CONSONANT SIGN COENG CHO': 983921, 'KHMER CONSONANT SIGN COENG CO': 983920, 'KHMER CONSONANT SIGN COENG DA': 983923, 'KHMER CONSONANT SIGN COENG DO': 983925, 'KHMER CONSONANT SIGN COENG HA': 983945, 'KHMER CONSONANT SIGN COENG KA': 983913, 'KHMER CONSONANT SIGN COENG KHA': 983914, 'KHMER CONSONANT SIGN COENG KHO': 983916, 'KHMER CONSONANT SIGN COENG KO': 983915, 'KHMER CONSONANT SIGN COENG LA': 983946, 'KHMER CONSONANT SIGN COENG LO': 983940, 'KHMER CONSONANT SIGN COENG MO': 983937, 'KHMER CONSONANT SIGN COENG NA': 983927, 'KHMER CONSONANT SIGN COENG NGO': 983917, 'KHMER CONSONANT SIGN COENG NO': 983932, 'KHMER CONSONANT SIGN COENG NYO': 983922, 'KHMER CONSONANT SIGN COENG PHA': 983934, 'KHMER CONSONANT SIGN COENG PHO': 983936, 'KHMER CONSONANT SIGN COENG PO': 983935, 'KHMER CONSONANT SIGN COENG RO': 983939, 'KHMER CONSONANT SIGN COENG SA': 983944, 'KHMER CONSONANT SIGN COENG SHA': 983942, 'KHMER CONSONANT SIGN COENG SSA': 983943, 'KHMER CONSONANT SIGN COENG TA': 983928, 'KHMER CONSONANT SIGN COENG THA': 983929, 'KHMER CONSONANT SIGN COENG THO': 983931, 'KHMER CONSONANT SIGN COENG TO': 983930, 'KHMER CONSONANT SIGN COENG TTHA': 983924, 'KHMER CONSONANT SIGN COENG TTHO': 983926, 'KHMER CONSONANT SIGN COENG VO': 983941, 'KHMER CONSONANT SIGN COENG YO': 983938, 'KHMER INDEPENDENT VOWEL SIGN COENG QE': 983951, 'KHMER INDEPENDENT VOWEL SIGN COENG QU': 983948, 'KHMER INDEPENDENT VOWEL SIGN COENG RY': 983949, 'KHMER INDEPENDENT VOWEL SIGN COENG RYY': 983950, 'KHMER VOWEL SIGN AAM': 983953, 'KHMER VOWEL SIGN COENG QA': 983947, 'KHMER VOWEL SIGN OM': 983952, 'KIMONO': 128088, 'KISS': 128143, 'KISS MARK': 128139, 'KISSING CAT FACE WITH CLOSED EYES': 128573, 'KISSING FACE WITH CLOSED EYES': 128538, 'KOALA': 128040, 'LADY BEETLE': 128030, 'LARGE BLUE CIRCLE': 128309, 'LARGE BLUE DIAMOND': 128311, 'LARGE ORANGE DIAMOND': 128310, 'LARGE RED CIRCLE': 128308, 'LAST QUARTER MOON SYMBOL': 127767, 'LAST QUARTER MOON WITH FACE': 127772, 'LATIN CAPITAL LETTER G WITH OBLIQUE STROKE': 42912, 'LATIN CAPITAL LETTER K WITH OBLIQUE STROKE': 42914, 'LATIN CAPITAL LETTER N WITH DESCENDER': 42896, 'LATIN CAPITAL LETTER N WITH OBLIQUE STROKE': 42916, 'LATIN CAPITAL LETTER R WITH OBLIQUE STROKE': 42918, 'LATIN CAPITAL LETTER S WITH OBLIQUE STROKE': 42920, 'LATIN CAPITAL LETTER TURNED H': 42893, 'LATIN LETTER SMALL CAPITAL TURNED M': 43002, 'LATIN SMALL LETTER AE WITH GRAVE': 983612, 'LATIN SMALL LETTER G WITH OBLIQUE STROKE': 42913, 'LATIN SMALL LETTER HOOKED SCHWA WITH ACUTE': 983620, 'LATIN SMALL LETTER HOOKED SCHWA WITH GRAVE': 983619, 'LATIN SMALL LETTER K WITH OBLIQUE STROKE': 42915, 'LATIN SMALL LETTER L WITH RETROFLEX HOOK AND BELT': 42894, 'LATIN SMALL LETTER N WITH DESCENDER': 42897, 'LATIN SMALL LETTER N WITH OBLIQUE STROKE': 42917, 'LATIN SMALL LETTER OPEN O WITH ACUTE': 983614, 'LATIN SMALL LETTER OPEN O WITH GRAVE': 983613, 'LATIN SMALL LETTER R WITH OBLIQUE STROKE': 42919, 'LATIN SMALL LETTER S WITH OBLIQUE STROKE': 42921, 'LATIN SMALL LETTER SCHWA WITH ACUTE': 983618, 'LATIN SMALL LETTER SCHWA WITH GRAVE': 983617, 'LATIN SMALL LETTER TURNED V WITH ACUTE': 983616, 'LATIN SMALL LETTER TURNED V WITH GRAVE': 983615, 'LATIN SUBSCRIPT SMALL LETTER H': 8341, 'LATIN SUBSCRIPT SMALL LETTER K': 8342, 'LATIN SUBSCRIPT SMALL LETTER L': 8343, 'LATIN SUBSCRIPT SMALL LETTER M': 8344, 'LATIN SUBSCRIPT SMALL LETTER N': 8345, 'LATIN SUBSCRIPT SMALL LETTER P': 8346, 'LATIN SUBSCRIPT SMALL LETTER S': 8347, 'LATIN SUBSCRIPT SMALL LETTER T': 8348, 'LEAF FLUTTERING IN WIND': 127811, 'LEDGER': 128210, 'LEFT LUGGAGE': 128709, 'LEFT-HANDED INTERLACED PENTAGRAM': 9958, 'LEFT-POINTING MAGNIFYING GLASS': 128269, 'LEMON': 127819, 'LEOPARD': 128006, 'LIGHT RAIL': 128648, 'LINK SYMBOL': 128279, 'LIPSTICK': 128132, 'LOCK': 128274, 'LOCK WITH INK PEN': 128271, 'LOLLIPOP': 127853, 'LOUDLY CRYING FACE': 128557, 'LOVE HOTEL': 127977, 'LOVE LETTER': 128140, 'LOW BRIGHTNESS SYMBOL': 128261, 'MALAYALAM LETTER DOT REPH': 3406, 'MALAYALAM LETTER NNNA': 3369, 'MALAYALAM LETTER TTTA': 3386, 'MAN': 128104, 'MAN AND WOMAN HOLDING HANDS': 128107, 'MAN WITH GUA PI MAO': 128114, 'MAN WITH TURBAN': 128115, 'MANDAIC AFFRICATION MARK': 2137, 'MANDAIC GEMINATION MARK': 2139, 'MANDAIC LETTER AB': 2113, 'MANDAIC LETTER AD': 2115, 'MANDAIC LETTER AG': 2114, 'MANDAIC LETTER AH': 2116, 'MANDAIC LETTER AIN': 2136, 'MANDAIC LETTER AK': 2122, 'MANDAIC LETTER AKSA': 2121, 'MANDAIC LETTER AL': 2123, 'MANDAIC LETTER AM': 2124, 'MANDAIC LETTER AN': 2125, 'MANDAIC LETTER AP': 2128, 'MANDAIC LETTER AQ': 2130, 'MANDAIC LETTER AR': 2131, 'MANDAIC LETTER AS': 2126, 'MANDAIC LETTER ASH': 2132, 'MANDAIC LETTER ASZ': 2129, 'MANDAIC LETTER AT': 2133, 'MANDAIC LETTER ATT': 2120, 'MANDAIC LETTER AZ': 2118, 'MANDAIC LETTER DUSHENNA': 2134, 'MANDAIC LETTER HALQA': 2112, 'MANDAIC LETTER IN': 2127, 'MANDAIC LETTER IT': 2119, 'MANDAIC LETTER KAD': 2135, 'MANDAIC LETTER USHENNA': 2117, 'MANDAIC PUNCTUATION': 2142, 'MANDAIC VOCALIZATION MARK': 2138, 'MANS SHOE': 128094, 'MAPLE LEAF': 127809, 'MEAT ON BONE': 127830, 'MELON': 127816, 'MEMO': 128221, 'MENS SYMBOL': 128697, 'METRO': 128647, 'MICROPHONE': 127908, 'MICROSCOPE': 128300, 'MILKY WAY': 127756, 'MINIBUS': 128656, 'MINIDISC': 128189, 'MOBILE PHONE': 128241, 'MOBILE PHONE OFF': 128244, 'MOBILE PHONE WITH RIGHTWARDS ARROW AT LEFT': 128242, 'MODIFIER LETTER EXTRA-HIGH EXTRA-LOW CONTOUR TONE BAR': 983968, 'MODIFIER LETTER EXTRA-LOW EXTRA-HIGH CONTOUR TONE BAR': 983969, 'MONEY BAG': 128176, 'MONEY WITH WINGS': 128184, 'MONKEY': 128018, 'MONKEY FACE': 128053, 'MONORAIL': 128669, 'MOON VIEWING CEREMONY': 127889, 'MOUNT FUJI': 128507, 'MOUNTAIN BICYCLIST': 128693, 'MOUNTAIN CABLEWAY': 128672, 'MOUNTAIN RAILWAY': 128670, 'MOUSE': 128001, 'MOUSE FACE': 128045, 'MOUTH': 128068, 'MOVIE CAMERA': 127909, 'MOYAI': 128511, 'MULTIPLE MUSICAL NOTES': 127926, 'MUSHROOM': 127812, 'MUSICAL KEYBOARD': 127929, 'MUSICAL NOTE': 127925, 'MUSICAL SCORE': 127932, 'NAIL POLISH': 128133, 'NAME BADGE': 128219, 'NECKTIE': 128084, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER A': 127312, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER B': 127313, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER C': 127314, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER D': 127315, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER E': 127316, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER F': 127317, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER G': 127318, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER I': 127320, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER J': 127321, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER K': 127322, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER L': 127323, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER M': 127324, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER N': 127325, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER O': 127326, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER Q': 127328, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER R': 127329, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER S': 127330, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER T': 127331, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER U': 127332, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER V': 127333, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER W': 127334, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER X': 127335, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER Y': 127336, 'NEGATIVE CIRCLED LATIN CAPITAL LETTER Z': 127337, 'NEGATIVE SQUARED AB': 127374, 'NEGATIVE SQUARED CROSS MARK': 10062, 'NEGATIVE SQUARED LATIN CAPITAL LETTER A': 127344, 'NEGATIVE SQUARED LATIN CAPITAL LETTER B': 127345, 'NEGATIVE SQUARED LATIN CAPITAL LETTER C': 127346, 'NEGATIVE SQUARED LATIN CAPITAL LETTER D': 127347, 'NEGATIVE SQUARED LATIN CAPITAL LETTER E': 127348, 'NEGATIVE SQUARED LATIN CAPITAL LETTER F': 127349, 'NEGATIVE SQUARED LATIN CAPITAL LETTER G': 127350, 'NEGATIVE SQUARED LATIN CAPITAL LETTER H': 127351, 'NEGATIVE SQUARED LATIN CAPITAL LETTER I': 127352, 'NEGATIVE SQUARED LATIN CAPITAL LETTER K': 127354, 'NEGATIVE SQUARED LATIN CAPITAL LETTER N': 127357, 'NEGATIVE SQUARED LATIN CAPITAL LETTER O': 127358, 'NEGATIVE SQUARED LATIN CAPITAL LETTER Q': 127360, 'NEGATIVE SQUARED LATIN CAPITAL LETTER R': 127361, 'NEGATIVE SQUARED LATIN CAPITAL LETTER S': 127362, 'NEGATIVE SQUARED LATIN CAPITAL LETTER T': 127363, 'NEGATIVE SQUARED LATIN CAPITAL LETTER U': 127364, 'NEGATIVE SQUARED LATIN CAPITAL LETTER V': 127365, 'NEGATIVE SQUARED LATIN CAPITAL LETTER W': 127366, 'NEGATIVE SQUARED LATIN CAPITAL LETTER X': 127367, 'NEGATIVE SQUARED LATIN CAPITAL LETTER Y': 127368, 'NEGATIVE SQUARED LATIN CAPITAL LETTER Z': 127369, 'NEGATIVE SQUARED WC': 127375, 'NEUTRAL FACE': 128528, 'NEW MOON SYMBOL': 127761, 'NEW MOON WITH FACE': 127770, 'NEWSPAPER': 128240, 'NIGHT WITH STARS': 127747, 'NO BICYCLES': 128691, 'NO ENTRY SIGN': 128683, 'NO MOBILE PHONES': 128245, 'NO ONE UNDER EIGHTEEN SYMBOL': 128286, 'NO PEDESTRIANS': 128695, 'NO SMOKING SYMBOL': 128685, 'NON-POTABLE WATER SYMBOL': 128689, 'NOSE': 128067, 'NOTEBOOK': 128211, 'NOTEBOOK WITH DECORATIVE COVER': 128212, 'NUT AND BOLT': 128297, 'OCTOPUS': 128025, 'ODEN': 127842, 'OFFICE BUILDING': 127970, 'OK HAND SIGN': 128076, 'OLDER MAN': 128116, 'OLDER WOMAN': 128117, 'ON WITH EXCLAMATION MARK WITH LEFT RIGHT ARROW ABOVE': 128283, 'ONCOMING AUTOMOBILE': 128664, 'ONCOMING BUS': 128653, 'ONCOMING POLICE CAR': 128660, 'ONCOMING TAXI': 128662, 'OPEN BOOK': 128214, 'OPEN FILE FOLDER': 128194, 'OPEN HANDS SIGN': 128080, 'OPEN LOCK': 128275, 'OPEN MAILBOX WITH LOWERED FLAG': 128237, 'OPEN MAILBOX WITH RAISED FLAG': 128236, 'OPHIUCHUS': 9934, 'OPTICAL DISC': 128191, 'ORANGE BOOK': 128217, 'ORIYA FRACTION ONE EIGHTH': 2934, 'ORIYA FRACTION ONE HALF': 2931, 'ORIYA FRACTION ONE QUARTER': 2930, 'ORIYA FRACTION ONE SIXTEENTH': 2933, 'ORIYA FRACTION THREE QUARTERS': 2932, 'ORIYA FRACTION THREE SIXTEENTHS': 2935, 'OUTBOX TRAY': 128228, 'OX': 128002, 'PACKAGE': 128230, 'PAGE FACING UP': 128196, 'PAGE WITH CURL': 128195, 'PAGER': 128223, 'PALM TREE': 127796, 'PANDA FACE': 128060, 'PAPERCLIP': 128206, 'PARTY POPPER': 127881, 'PASSPORT CONTROL': 128706, 'PAW PRINTS': 128062, 'PEACH': 127825, 'PEAR': 127824, 'PEDESTRIAN': 128694, 'PENGUIN': 128039, 'PENSIVE FACE': 128532, 'PENTAGRAM': 9956, 'PERFORMING ARTS': 127917, 'PERSEVERING FACE': 128547, 'PERSON BOWING DEEPLY': 128583, 'PERSON FROWNING': 128589, 'PERSON RAISING BOTH HANDS IN CELEBRATION': 128588, 'PERSON WITH BLOND HAIR': 128113, 'PERSON WITH FOLDED HANDS': 128591, 'PERSON WITH POUTING FACE': 128590, 'PERSONAL COMPUTER': 128187, 'PIG': 128022, 'PIG FACE': 128055, 'PIG NOSE': 128061, 'PILE OF POO': 128169, 'PILL': 128138, 'PINE DECORATION': 127885, 'PINEAPPLE': 127821, 'PISTOL': 128299, 'PLAYING CARD ACE OF CLUBS': 127185, 'PLAYING CARD ACE OF DIAMONDS': 127169, 'PLAYING CARD ACE OF HEARTS': 127153, 'PLAYING CARD ACE OF SPADES': 127137, 'PLAYING CARD BACK': 127136, 'PLAYING CARD BLACK JOKER': 127183, 'PLAYING CARD EIGHT OF CLUBS': 127192, 'PLAYING CARD EIGHT OF DIAMONDS': 127176, 'PLAYING CARD EIGHT OF HEARTS': 127160, 'PLAYING CARD EIGHT OF SPADES': 127144, 'PLAYING CARD FIVE OF CLUBS': 127189, 'PLAYING CARD FIVE OF DIAMONDS': 127173, 'PLAYING CARD FIVE OF HEARTS': 127157, 'PLAYING CARD FIVE OF SPADES': 127141, 'PLAYING CARD FOUR OF CLUBS': 127188, 'PLAYING CARD FOUR OF DIAMONDS': 127172, 'PLAYING CARD FOUR OF HEARTS': 127156, 'PLAYING CARD FOUR OF SPADES': 127140, 'PLAYING CARD JACK OF CLUBS': 127195, 'PLAYING CARD JACK OF DIAMONDS': 127179, 'PLAYING CARD JACK OF HEARTS': 127163, 'PLAYING CARD JACK OF SPADES': 127147, 'PLAYING CARD KING OF CLUBS': 127198, 'PLAYING CARD KING OF DIAMONDS': 127182, 'PLAYING CARD KING OF HEARTS': 127166, 'PLAYING CARD KING OF SPADES': 127150, 'PLAYING CARD KNIGHT OF CLUBS': 127196, 'PLAYING CARD KNIGHT OF DIAMONDS': 127180, 'PLAYING CARD KNIGHT OF HEARTS': 127164, 'PLAYING CARD KNIGHT OF SPADES': 127148, 'PLAYING CARD NINE OF CLUBS': 127193, 'PLAYING CARD NINE OF DIAMONDS': 127177, 'PLAYING CARD NINE OF HEARTS': 127161, 'PLAYING CARD NINE OF SPADES': 127145, 'PLAYING CARD QUEEN OF CLUBS': 127197, 'PLAYING CARD QUEEN OF DIAMONDS': 127181, 'PLAYING CARD QUEEN OF HEARTS': 127165, 'PLAYING CARD QUEEN OF SPADES': 127149, 'PLAYING CARD SEVEN OF CLUBS': 127191, 'PLAYING CARD SEVEN OF DIAMONDS': 127175, 'PLAYING CARD SEVEN OF HEARTS': 127159, 'PLAYING CARD SEVEN OF SPADES': 127143, 'PLAYING CARD SIX OF CLUBS': 127190, 'PLAYING CARD SIX OF DIAMONDS': 127174, 'PLAYING CARD SIX OF HEARTS': 127158, 'PLAYING CARD SIX OF SPADES': 127142, 'PLAYING CARD TEN OF CLUBS': 127194, 'PLAYING CARD TEN OF DIAMONDS': 127178, 'PLAYING CARD TEN OF HEARTS': 127162, 'PLAYING CARD TEN OF SPADES': 127146, 'PLAYING CARD THREE OF CLUBS': 127187, 'PLAYING CARD THREE OF DIAMONDS': 127171, 'PLAYING CARD THREE OF HEARTS': 127155, 'PLAYING CARD THREE OF SPADES': 127139, 'PLAYING CARD TWO OF CLUBS': 127186, 'PLAYING CARD TWO OF DIAMONDS': 127170, 'PLAYING CARD TWO OF HEARTS': 127154, 'PLAYING CARD TWO OF SPADES': 127138, 'PLAYING CARD WHITE JOKER': 127199, 'POLICE CAR': 128659, 'POLICE CARS REVOLVING LIGHT': 128680, 'POLICE OFFICER': 128110, 'POODLE': 128041, 'POSTAL HORN': 128239, 'POSTBOX': 128238, 'POT OF FOOD': 127858, 'POTABLE WATER SYMBOL': 128688, 'POUCH': 128093, 'POULTRY LEG': 127831, 'POUTING CAT FACE': 128574, 'POUTING FACE': 128545, 'PRINCESS': 128120, 'PUBLIC ADDRESS LOUDSPEAKER': 128226, 'PURPLE HEART': 128156, 'PURSE': 128091, 'PUSHPIN': 128204, 'PUT LITTER IN ITS PLACE SYMBOL': 128686, 'RABBIT': 128007, 'RABBIT FACE': 128048, 'RADIO': 128251, 'RADIO BUTTON': 128280, 'RAILWAY CAR': 128643, 'RAINBOW': 127752, 'RAISED FIST': 9994, 'RAISED HAND': 9995, 'RAM': 128015, 'RAT': 128000, 'RECREATIONAL VEHICLE': 128665, 'RED APPLE': 127822, 'REGIONAL INDICATOR SYMBOL LETTER A': 127462, 'REGIONAL INDICATOR SYMBOL LETTER B': 127463, 'REGIONAL INDICATOR SYMBOL LETTER C': 127464, 'REGIONAL INDICATOR SYMBOL LETTER D': 127465, 'REGIONAL INDICATOR SYMBOL LETTER E': 127466, 'REGIONAL INDICATOR SYMBOL LETTER F': 127467, 'REGIONAL INDICATOR SYMBOL LETTER G': 127468, 'REGIONAL INDICATOR SYMBOL LETTER H': 127469, 'REGIONAL INDICATOR SYMBOL LETTER I': 127470, 'REGIONAL INDICATOR SYMBOL LETTER J': 127471, 'REGIONAL INDICATOR SYMBOL LETTER K': 127472, 'REGIONAL INDICATOR SYMBOL LETTER L': 127473, 'REGIONAL INDICATOR SYMBOL LETTER M': 127474, 'REGIONAL INDICATOR SYMBOL LETTER N': 127475, 'REGIONAL INDICATOR SYMBOL LETTER O': 127476, 'REGIONAL INDICATOR SYMBOL LETTER P': 127477, 'REGIONAL INDICATOR SYMBOL LETTER Q': 127478, 'REGIONAL INDICATOR SYMBOL LETTER R': 127479, 'REGIONAL INDICATOR SYMBOL LETTER S': 127480, 'REGIONAL INDICATOR SYMBOL LETTER T': 127481, 'REGIONAL INDICATOR SYMBOL LETTER U': 127482, 'REGIONAL INDICATOR SYMBOL LETTER V': 127483, 'REGIONAL INDICATOR SYMBOL LETTER W': 127484, 'REGIONAL INDICATOR SYMBOL LETTER X': 127485, 'REGIONAL INDICATOR SYMBOL LETTER Y': 127486, 'REGIONAL INDICATOR SYMBOL LETTER Z': 127487, 'RELIEVED FACE': 128524, 'RESTROOM': 128699, 'REVOLVING HEARTS': 128158, 'RIBBON': 127872, 'RICE BALL': 127833, 'RICE CRACKER': 127832, 'RIGHT-HANDED INTERLACED PENTAGRAM': 9957, 'RIGHT-POINTING MAGNIFYING GLASS': 128270, 'RING': 128141, 'ROASTED SWEET POTATO': 127840, 'ROCKET': 128640, 'ROLLER COASTER': 127906, 'ROOSTER': 128019, 'ROSE': 127801, 'ROUND PUSHPIN': 128205, 'ROWBOAT': 128675, 'RUGBY FOOTBALL': 127945, 'RUNNER': 127939, 'RUNNING SHIRT WITH SASH': 127933, 'SAKE BOTTLE AND CUP': 127862, 'SATELLITE ANTENNA': 128225, 'SAXOPHONE': 127927, 'SCHOOL': 127979, 'SCHOOL SATCHEL': 127890, 'SCROLL': 128220, 'SEAT': 128186, 'SEE-NO-EVIL MONKEY': 128584, 'SEEDLING': 127793, 'SHAVED ICE': 127847, 'SHEEP': 128017, 'SHIP': 128674, 'SHOOTING STAR': 127776, 'SHORTCAKE': 127856, 'SHOWER': 128703, 'SILHOUETTE OF JAPAN': 128510, 'SIX POINTED STAR WITH MIDDLE DOT': 128303, 'SKI AND SKI BOOT': 127935, 'SKULL': 128128, 'SLEEPING SYMBOL': 128164, 'SLEEPY FACE': 128554, 'SLICE OF PIZZA': 127829, 'SLOT MACHINE': 127920, 'SMALL BLUE DIAMOND': 128313, 'SMALL ORANGE DIAMOND': 128312, 'SMILING CAT FACE WITH HEART-SHAPED EYES': 128571, 'SMILING CAT FACE WITH OPEN MOUTH': 128570, 'SMILING FACE WITH HALO': 128519, 'SMILING FACE WITH HEART-SHAPED EYES': 128525, 'SMILING FACE WITH HORNS': 128520, 'SMILING FACE WITH OPEN MOUTH': 128515, 'SMILING FACE WITH OPEN MOUTH AND COLD SWEAT': 128517, 'SMILING FACE WITH OPEN MOUTH AND SMILING EYES': 128516, 'SMILING FACE WITH OPEN MOUTH AND TIGHTLY-CLOSED EYES': 128518, 'SMILING FACE WITH SMILING EYES': 128522, 'SMILING FACE WITH SUNGLASSES': 128526, 'SMIRKING FACE': 128527, 'SMOKING SYMBOL': 128684, 'SNAIL': 128012, 'SNAKE': 128013, 'SNOWBOARDER': 127938, 'SOFT ICE CREAM': 127846, 'SOON WITH RIGHTWARDS ARROW ABOVE': 128284, 'SPAGHETTI': 127837, 'SPARKLES': 10024, 'SPARKLING HEART': 128150, 'SPEAK-NO-EVIL MONKEY': 128586, 'SPEAKER': 128264, 'SPEAKER WITH CANCELLATION STROKE': 128263, 'SPEAKER WITH ONE SOUND WAVE': 128265, 'SPEAKER WITH THREE SOUND WAVES': 128266, 'SPEECH BALLOON': 128172, 'SPEEDBOAT': 128676, 'SPIRAL SHELL': 128026, 'SPLASHING SWEAT SYMBOL': 128166, 'SPOUTING WHALE': 128051, 'SQUARED CJK UNIFIED IDEOGRAPH-5272': 127545, 'SQUARED CJK UNIFIED IDEOGRAPH-5408': 127540, 'SQUARED CJK UNIFIED IDEOGRAPH-55B6': 127546, 'SQUARED CJK UNIFIED IDEOGRAPH-6708': 127543, 'SQUARED CJK UNIFIED IDEOGRAPH-6709': 127542, 'SQUARED CJK UNIFIED IDEOGRAPH-6E80': 127541, 'SQUARED CJK UNIFIED IDEOGRAPH-7533': 127544, 'SQUARED CJK UNIFIED IDEOGRAPH-7981': 127538, 'SQUARED CJK UNIFIED IDEOGRAPH-7A7A': 127539, 'SQUARED CL': 127377, 'SQUARED COOL': 127378, 'SQUARED FREE': 127379, 'SQUARED ID': 127380, 'SQUARED KATAKANA KOKO': 127489, 'SQUARED KATAKANA SA': 127490, 'SQUARED LATIN CAPITAL LETTER A': 127280, 'SQUARED LATIN CAPITAL LETTER C': 127282, 'SQUARED LATIN CAPITAL LETTER D': 127283, 'SQUARED LATIN CAPITAL LETTER E': 127284, 'SQUARED LATIN CAPITAL LETTER F': 127285, 'SQUARED LATIN CAPITAL LETTER G': 127286, 'SQUARED LATIN CAPITAL LETTER H': 127287, 'SQUARED LATIN CAPITAL LETTER I': 127288, 'SQUARED LATIN CAPITAL LETTER J': 127289, 'SQUARED LATIN CAPITAL LETTER K': 127290, 'SQUARED LATIN CAPITAL LETTER L': 127291, 'SQUARED LATIN CAPITAL LETTER M': 127292, 'SQUARED LATIN CAPITAL LETTER O': 127294, 'SQUARED LATIN CAPITAL LETTER Q': 127296, 'SQUARED LATIN CAPITAL LETTER R': 127297, 'SQUARED LATIN CAPITAL LETTER T': 127299, 'SQUARED LATIN CAPITAL LETTER U': 127300, 'SQUARED LATIN CAPITAL LETTER V': 127301, 'SQUARED LATIN CAPITAL LETTER X': 127303, 'SQUARED LATIN CAPITAL LETTER Y': 127304, 'SQUARED LATIN CAPITAL LETTER Z': 127305, 'SQUARED LOGICAL AND': 10190, 'SQUARED LOGICAL OR': 10191, 'SQUARED NEW': 127381, 'SQUARED NG': 127382, 'SQUARED OK': 127383, 'SQUARED SOS': 127384, 'SQUARED UP WITH EXCLAMATION MARK': 127385, 'SQUARED VS': 127386, 'SQUARED WC': 127311, 'STATION': 128649, 'STATUE OF LIBERTY': 128509, 'STEAM LOCOMOTIVE': 128642, 'STEAMING BOWL': 127836, 'STOPWATCH': 9201, 'STRAIGHT RULER': 128207, 'STRAWBERRY': 127827, 'SUN WITH FACE': 127774, 'SUNFLOWER': 127803, 'SUNRISE': 127749, 'SUNRISE OVER MOUNTAINS': 127748, 'SUNSET OVER BUILDINGS': 127751, 'SURFER': 127940, 'SUSHI': 127843, 'SUSPENSION RAILWAY': 128671, 'SWIMMER': 127946, 'SYRINGE': 128137, 'T-SHIRT': 128085, 'TAMIL CONSONANT C': 983624, 'TAMIL CONSONANT H': 983644, 'TAMIL CONSONANT J': 983640, 'TAMIL CONSONANT K': 983622, 'TAMIL CONSONANT KSS': 983645, 'TAMIL CONSONANT L': 983634, 'TAMIL CONSONANT LL': 983637, 'TAMIL CONSONANT LLL': 983636, 'TAMIL CONSONANT M': 983631, 'TAMIL CONSONANT N': 983629, 'TAMIL CONSONANT NG': 983623, 'TAMIL CONSONANT NN': 983627, 'TAMIL CONSONANT NNN': 983639, 'TAMIL CONSONANT NY': 983625, 'TAMIL CONSONANT P': 983630, 'TAMIL CONSONANT R': 983633, 'TAMIL CONSONANT RR': 983638, 'TAMIL CONSONANT S': 983643, 'TAMIL CONSONANT SH': 983641, 'TAMIL CONSONANT SS': 983642, 'TAMIL CONSONANT T': 983628, 'TAMIL CONSONANT TT': 983626, 'TAMIL CONSONANT V': 983635, 'TAMIL CONSONANT Y': 983632, 'TAMIL SYLLABLE CAA': 983668, 'TAMIL SYLLABLE CAI': 983675, 'TAMIL SYLLABLE CAU': 983678, 'TAMIL SYLLABLE CE': 983673, 'TAMIL SYLLABLE CEE': 983674, 'TAMIL SYLLABLE CI': 983669, 'TAMIL SYLLABLE CII': 983670, 'TAMIL SYLLABLE CO': 983676, 'TAMIL SYLLABLE COO': 983677, 'TAMIL SYLLABLE CU': 983671, 'TAMIL SYLLABLE CUU': 983672, 'TAMIL SYLLABLE HAA': 983888, 'TAMIL SYLLABLE HAI': 983895, 'TAMIL SYLLABLE HAU': 983898, 'TAMIL SYLLABLE HE': 983893, 'TAMIL SYLLABLE HEE': 983894, 'TAMIL SYLLABLE HI': 983889, 'TAMIL SYLLABLE HII': 983890, 'TAMIL SYLLABLE HO': 983896, 'TAMIL SYLLABLE HOO': 983897, 'TAMIL SYLLABLE HU': 983891, 'TAMIL SYLLABLE HUU': 983892, 'TAMIL SYLLABLE JAA': 983844, 'TAMIL SYLLABLE JAI': 983851, 'TAMIL SYLLABLE JAU': 983854, 'TAMIL SYLLABLE JE': 983849, 'TAMIL SYLLABLE JEE': 983850, 'TAMIL SYLLABLE JI': 983845, 'TAMIL SYLLABLE JII': 983846, 'TAMIL SYLLABLE JO': 983852, 'TAMIL SYLLABLE JOO': 983853, 'TAMIL SYLLABLE JU': 983847, 'TAMIL SYLLABLE JUU': 983848, 'TAMIL SYLLABLE KAA': 983646, 'TAMIL SYLLABLE KAI': 983653, 'TAMIL SYLLABLE KAU': 983656, 'TAMIL SYLLABLE KE': 983651, 'TAMIL SYLLABLE KEE': 983652, 'TAMIL SYLLABLE KI': 983647, 'TAMIL SYLLABLE KII': 983648, 'TAMIL SYLLABLE KO': 983654, 'TAMIL SYLLABLE KOO': 983655, 'TAMIL SYLLABLE KSSA': 983899, 'TAMIL SYLLABLE KSSAA': 983900, 'TAMIL SYLLABLE KSSAI': 983907, 'TAMIL SYLLABLE KSSAU': 983910, 'TAMIL SYLLABLE KSSE': 983905, 'TAMIL SYLLABLE KSSEE': 983906, 'TAMIL SYLLABLE KSSI': 983901, 'TAMIL SYLLABLE KSSII': 983902, 'TAMIL SYLLABLE KSSO': 983908, 'TAMIL SYLLABLE KSSOO': 983909, 'TAMIL SYLLABLE KSSU': 983903, 'TAMIL SYLLABLE KSSUU': 983904, 'TAMIL SYLLABLE KU': 983649, 'TAMIL SYLLABLE KUU': 983650, 'TAMIL SYLLABLE LAA': 983778, 'TAMIL SYLLABLE LAI': 983785, 'TAMIL SYLLABLE LAU': 983788, 'TAMIL SYLLABLE LE': 983783, 'TAMIL SYLLABLE LEE': 983784, 'TAMIL SYLLABLE LI': 983779, 'TAMIL SYLLABLE LII': 983780, 'TAMIL SYLLABLE LLAA': 983811, 'TAMIL SYLLABLE LLAI': 983818, 'TAMIL SYLLABLE LLAU': 983821, 'TAMIL SYLLABLE LLE': 983816, 'TAMIL SYLLABLE LLEE': 983817, 'TAMIL SYLLABLE LLI': 983812, 'TAMIL SYLLABLE LLII': 983813, 'TAMIL SYLLABLE LLLAA': 983800, 'TAMIL SYLLABLE LLLAI': 983807, 'TAMIL SYLLABLE LLLAU': 983810, 'TAMIL SYLLABLE LLLE': 983805, 'TAMIL SYLLABLE LLLEE': 983806, 'TAMIL SYLLABLE LLLI': 983801, 'TAMIL SYLLABLE LLLII': 983802, 'TAMIL SYLLABLE LLLO': 983808, 'TAMIL SYLLABLE LLLOO': 983809, 'TAMIL SYLLABLE LLLU': 983803, 'TAMIL SYLLABLE LLLUU': 983804, 'TAMIL SYLLABLE LLO': 983819, 'TAMIL SYLLABLE LLOO': 983820, 'TAMIL SYLLABLE LLU': 983814, 'TAMIL SYLLABLE LLUU': 983815, 'TAMIL SYLLABLE LO': 983786, 'TAMIL SYLLABLE LOO': 983787, 'TAMIL SYLLABLE LU': 983781, 'TAMIL SYLLABLE LUU': 983782, 'TAMIL SYLLABLE MAA': 983745, 'TAMIL SYLLABLE MAI': 983752, 'TAMIL SYLLABLE MAU': 983755, 'TAMIL SYLLABLE ME': 983750, 'TAMIL SYLLABLE MEE': 983751, 'TAMIL SYLLABLE MI': 983746, 'TAMIL SYLLABLE MII': 983747, 'TAMIL SYLLABLE MO': 983753, 'TAMIL SYLLABLE MOO': 983754, 'TAMIL SYLLABLE MU': 983748, 'TAMIL SYLLABLE MUU': 983749, 'TAMIL SYLLABLE NAA': 983723, 'TAMIL SYLLABLE NAI': 983730, 'TAMIL SYLLABLE NAU': 983733, 'TAMIL SYLLABLE NE': 983728, 'TAMIL SYLLABLE NEE': 983729, 'TAMIL SYLLABLE NGAA': 983657, 'TAMIL SYLLABLE NGAI': 983664, 'TAMIL SYLLABLE NGAU': 983667, 'TAMIL SYLLABLE NGE': 983662, 'TAMIL SYLLABLE NGEE': 983663, 'TAMIL SYLLABLE NGI': 983658, 'TAMIL SYLLABLE NGII': 983659, 'TAMIL SYLLABLE NGO': 983665, 'TAMIL SYLLABLE NGOO': 983666, 'TAMIL SYLLABLE NGU': 983660, 'TAMIL SYLLABLE NGUU': 983661, 'TAMIL SYLLABLE NI': 983724, 'TAMIL SYLLABLE NII': 983725, 'TAMIL SYLLABLE NNAA': 983701, 'TAMIL SYLLABLE NNAI': 983708, 'TAMIL SYLLABLE NNAU': 983711, 'TAMIL SYLLABLE NNE': 983706, 'TAMIL SYLLABLE NNEE': 983707, 'TAMIL SYLLABLE NNI': 983702, 'TAMIL SYLLABLE NNII': 983703, 'TAMIL SYLLABLE NNNAA': 983833, 'TAMIL SYLLABLE NNNAI': 983840, 'TAMIL SYLLABLE NNNAU': 983843, 'TAMIL SYLLABLE NNNE': 983838, 'TAMIL SYLLABLE NNNEE': 983839, 'TAMIL SYLLABLE NNNI': 983834, 'TAMIL SYLLABLE NNNII': 983835, 'TAMIL SYLLABLE NNNO': 983841, 'TAMIL SYLLABLE NNNOO': 983842, 'TAMIL SYLLABLE NNNU': 983836, 'TAMIL SYLLABLE NNNUU': 983837, 'TAMIL SYLLABLE NNO': 983709, 'TAMIL SYLLABLE NNOO': 983710, 'TAMIL SYLLABLE NNU': 983704, 'TAMIL SYLLABLE NNUU': 983705, 'TAMIL SYLLABLE NO': 983731, 'TAMIL SYLLABLE NOO': 983732, 'TAMIL SYLLABLE NU': 983726, 'TAMIL SYLLABLE NUU': 983727, 'TAMIL SYLLABLE NYAA': 983679, 'TAMIL SYLLABLE NYAI': 983686, 'TAMIL SYLLABLE NYAU': 983689, 'TAMIL SYLLABLE NYE': 983684, 'TAMIL SYLLABLE NYEE': 983685, 'TAMIL SYLLABLE NYI': 983680, 'TAMIL SYLLABLE NYII': 983681, 'TAMIL SYLLABLE NYO': 983687, 'TAMIL SYLLABLE NYOO': 983688, 'TAMIL SYLLABLE NYU': 983682, 'TAMIL SYLLABLE NYUU': 983683, 'TAMIL SYLLABLE PAA': 983734, 'TAMIL SYLLABLE PAI': 983741, 'TAMIL SYLLABLE PAU': 983744, 'TAMIL SYLLABLE PE': 983739, 'TAMIL SYLLABLE PEE': 983740, 'TAMIL SYLLABLE PI': 983735, 'TAMIL SYLLABLE PII': 983736, 'TAMIL SYLLABLE PO': 983742, 'TAMIL SYLLABLE POO': 983743, 'TAMIL SYLLABLE PU': 983737, 'TAMIL SYLLABLE PUU': 983738, 'TAMIL SYLLABLE RAA': 983767, 'TAMIL SYLLABLE RAI': 983774, 'TAMIL SYLLABLE RAU': 983777, 'TAMIL SYLLABLE RE': 983772, 'TAMIL SYLLABLE REE': 983773, 'TAMIL SYLLABLE RI': 983768, 'TAMIL SYLLABLE RII': 983769, 'TAMIL SYLLABLE RO': 983775, 'TAMIL SYLLABLE ROO': 983776, 'TAMIL SYLLABLE RRAA': 983822, 'TAMIL SYLLABLE RRAI': 983829, 'TAMIL SYLLABLE RRAU': 983832, 'TAMIL SYLLABLE RRE': 983827, 'TAMIL SYLLABLE RREE': 983828, 'TAMIL SYLLABLE RRI': 983823, 'TAMIL SYLLABLE RRII': 983824, 'TAMIL SYLLABLE RRO': 983830, 'TAMIL SYLLABLE RROO': 983831, 'TAMIL SYLLABLE RRU': 983825, 'TAMIL SYLLABLE RRUU': 983826, 'TAMIL SYLLABLE RU': 983770, 'TAMIL SYLLABLE RUU': 983771, 'TAMIL SYLLABLE SAA': 983877, 'TAMIL SYLLABLE SAI': 983884, 'TAMIL SYLLABLE SAU': 983887, 'TAMIL SYLLABLE SE': 983882, 'TAMIL SYLLABLE SEE': 983883, 'TAMIL SYLLABLE SHAA': 983855, 'TAMIL SYLLABLE SHAI': 983862, 'TAMIL SYLLABLE SHAU': 983865, 'TAMIL SYLLABLE SHE': 983860, 'TAMIL SYLLABLE SHEE': 983861, 'TAMIL SYLLABLE SHI': 983856, 'TAMIL SYLLABLE SHII': 983857, 'TAMIL SYLLABLE SHO': 983863, 'TAMIL SYLLABLE SHOO': 983864, 'TAMIL SYLLABLE SHRII': 983911, 'TAMIL SYLLABLE SHU': 983858, 'TAMIL SYLLABLE SHUU': 983859, 'TAMIL SYLLABLE SI': 983878, 'TAMIL SYLLABLE SII': 983879, 'TAMIL SYLLABLE SO': 983885, 'TAMIL SYLLABLE SOO': 983886, 'TAMIL SYLLABLE SSAA': 983866, 'TAMIL SYLLABLE SSAI': 983873, 'TAMIL SYLLABLE SSAU': 983876, 'TAMIL SYLLABLE SSE': 983871, 'TAMIL SYLLABLE SSEE': 983872, 'TAMIL SYLLABLE SSI': 983867, 'TAMIL SYLLABLE SSII': 983868, 'TAMIL SYLLABLE SSO': 983874, 'TAMIL SYLLABLE SSOO': 983875, 'TAMIL SYLLABLE SSU': 983869, 'TAMIL SYLLABLE SSUU': 983870, 'TAMIL SYLLABLE SU': 983880, 'TAMIL SYLLABLE SUU': 983881, 'TAMIL SYLLABLE TAA': 983712, 'TAMIL SYLLABLE TAI': 983719, 'TAMIL SYLLABLE TAU': 983722, 'TAMIL SYLLABLE TE': 983717, 'TAMIL SYLLABLE TEE': 983718, 'TAMIL SYLLABLE TI': 983713, 'TAMIL SYLLABLE TII': 983714, 'TAMIL SYLLABLE TO': 983720, 'TAMIL SYLLABLE TOO': 983721, 'TAMIL SYLLABLE TTAA': 983690, 'TAMIL SYLLABLE TTAI': 983697, 'TAMIL SYLLABLE TTAU': 983700, 'TAMIL SYLLABLE TTE': 983695, 'TAMIL SYLLABLE TTEE': 983696, 'TAMIL SYLLABLE TTI': 983691, 'TAMIL SYLLABLE TTII': 983692, 'TAMIL SYLLABLE TTO': 983698, 'TAMIL SYLLABLE TTOO': 983699, 'TAMIL SYLLABLE TTU': 983693, 'TAMIL SYLLABLE TTUU': 983694, 'TAMIL SYLLABLE TU': 983715, 'TAMIL SYLLABLE TUU': 983716, 'TAMIL SYLLABLE VAA': 983789, 'TAMIL SYLLABLE VAI': 983796, 'TAMIL SYLLABLE VAU': 983799, 'TAMIL SYLLABLE VE': 983794, 'TAMIL SYLLABLE VEE': 983795, 'TAMIL SYLLABLE VI': 983790, 'TAMIL SYLLABLE VII': 983791, 'TAMIL SYLLABLE VO': 983797, 'TAMIL SYLLABLE VOO': 983798, 'TAMIL SYLLABLE VU': 983792, 'TAMIL SYLLABLE VUU': 983793, 'TAMIL SYLLABLE YAA': 983756, 'TAMIL SYLLABLE YAI': 983763, 'TAMIL SYLLABLE YAU': 983766, 'TAMIL SYLLABLE YE': 983761, 'TAMIL SYLLABLE YEE': 983762, 'TAMIL SYLLABLE YI': 983757, 'TAMIL SYLLABLE YII': 983758, 'TAMIL SYLLABLE YO': 983764, 'TAMIL SYLLABLE YOO': 983765, 'TAMIL SYLLABLE YU': 983759, 'TAMIL SYLLABLE YUU': 983760, 'TANABATA TREE': 127883, 'TANGERINE': 127818, 'TAXI': 128661, 'TEACUP WITHOUT HANDLE': 127861, 'TEAR-OFF CALENDAR': 128198, 'TELEPHONE RECEIVER': 128222, 'TELESCOPE': 128301, 'TELEVISION': 128250, 'TENNIS RACQUET AND BALL': 127934, 'THOUGHT BALLOON': 128173, 'THUMBS DOWN SIGN': 128078, 'THUMBS UP SIGN': 128077, 'TIBETAN MARK LEADING MCHAN RTAGS': 4057, 'TIBETAN MARK TRAILING MCHAN RTAGS': 4058, 'TIBETAN SIGN INVERTED MCHU CAN': 3980, 'TIBETAN SUBJOINED SIGN INVERTED MCHU CAN': 3983, 'TIBETAN SUBJOINED SIGN LCE TSA CAN': 3981, 'TIBETAN SUBJOINED SIGN MCHU CAN': 3982, 'TICKET': 127915, 'TIFINAGH CONSONANT JOINER': 11647, 'TIFINAGH SEPARATOR MARK': 11632, 'TIGER': 128005, 'TIGER FACE': 128047, 'TIMER CLOCK': 9202, 'TIRED FACE': 128555, 'TOILET': 128701, 'TOKYO TOWER': 128508, 'TOMATO': 127813, 'TONGUE': 128069, 'TOP HAT': 127913, 'TOP WITH UPWARDS ARROW ABOVE': 128285, 'TRACTOR': 128668, 'TRAIN': 128646, 'TRAM': 128650, 'TRAM CAR': 128651, 'TRIANGULAR FLAG ON POST': 128681, 'TRIANGULAR RULER': 128208, 'TRIDENT EMBLEM': 128305, 'TROLLEYBUS': 128654, 'TROPHY': 127942, 'TROPICAL DRINK': 127865, 'TROPICAL FISH': 128032, 'TRUMPET': 127930, 'TULIP': 127799, 'TURTLE': 128034, 'TWISTED RIGHTWARDS ARROWS': 128256, 'TWO HEARTS': 128149, 'TWO MEN HOLDING HANDS': 128108, 'TWO WOMEN HOLDING HANDS': 128109, 'UNAMUSED FACE': 128530, 'UP-POINTING RED TRIANGLE': 128314, 'UP-POINTING SMALL RED TRIANGLE': 128316, 'VERTICAL TRAFFIC LIGHT': 128678, 'VIBRATION MODE': 128243, 'VIDEO CAMERA': 128249, 'VIDEO GAME': 127918, 'VIDEOCASSETTE': 128252, 'VIOLIN': 127931, 'VOLCANO': 127755, 'WANING CRESCENT MOON SYMBOL': 127768, 'WANING GIBBOUS MOON SYMBOL': 127766, 'WATER BUFFALO': 128003, 'WATER CLOSET': 128702, 'WATER WAVE': 127754, 'WATERMELON': 127817, 'WAVING HAND SIGN': 128075, 'WAXING CRESCENT MOON SYMBOL': 127762, 'WAXING GIBBOUS MOON SYMBOL': 127764, 'WEARY CAT FACE': 128576, 'WEARY FACE': 128553, 'WEDDING': 128146, 'WHALE': 128011, 'WHITE DOWN POINTING BACKHAND INDEX': 128071, 'WHITE EXCLAMATION MARK ORNAMENT': 10069, 'WHITE FLOWER': 128174, 'WHITE HEAVY CHECK MARK': 9989, 'WHITE LEFT POINTING BACKHAND INDEX': 128072, 'WHITE QUESTION MARK ORNAMENT': 10068, 'WHITE RIGHT POINTING BACKHAND INDEX': 128073, 'WHITE SQUARE BUTTON': 128307, 'WHITE UP POINTING BACKHAND INDEX': 128070, 'WIND CHIME': 127888, 'WINE GLASS': 127863, 'WINKING FACE': 128521, 'WOLF FACE': 128058, 'WOMAN': 128105, 'WOMAN WITH BUNNY EARS': 128111, 'WOMANS BOOTS': 128098, 'WOMANS CLOTHES': 128090, 'WOMANS HAT': 128082, 'WOMANS SANDAL': 128097, 'WOMENS SYMBOL': 128698, 'WRAPPED PRESENT': 127873, 'WRENCH': 128295, 'YELLOW HEART': 128155, } _code_by_name_corrected = { } _cjk_prefix = "CJK UNIFIED IDEOGRAPH-" _hangul_prefix = 'HANGUL SYLLABLE ' _hangul_L = ['G', 'GG', 'N', 'D', 'DD', 'R', 'M', 'B', 'BB', 'S', 'SS', '', 'J', 'JJ', 'C', 'K', 'T', 'P', 'H'] _hangul_V = ['A', 'AE', 'YA', 'YAE', 'EO', 'E', 'YEO', 'YE', 'O', 'WA', 'WAE', 'OE', 'YO', 'U', 'WEO', 'WE', 'WI', 'YU', 'EU', 'YI', 'I'] _hangul_T = ['', 'G', 'GG', 'GS', 'N', 'NJ', 'NH', 'D', 'L', 'LG', 'LM', 'LB', 'LS', 'LT', 'LP', 'LH', 'M', 'B', 'BS', 'S', 'SS', 'NG', 'J', 'C', 'K', 'T', 'P', 'H'] def _lookup_hangul(syllables): l_code = v_code = t_code = -1 for i in range(len(_hangul_L)): jamo = _hangul_L[i] if (syllables[:len(jamo)] == jamo and (l_code < 0 or len(jamo) > len(_hangul_L[l_code]))): l_code = i if l_code < 0: raise KeyError start = len(_hangul_L[l_code]) for i in range(len(_hangul_V)): jamo = _hangul_V[i] if (syllables[start:start + len(jamo)] == jamo and (v_code < 0 or len(jamo) > len(_hangul_V[v_code]))): v_code = i if v_code < 0: raise KeyError start += len(_hangul_V[v_code]) for i in range(len(_hangul_T)): jamo = _hangul_T[i] if (syllables[start:start + len(jamo)] == jamo and (t_code < 0 or len(jamo) > len(_hangul_T[t_code]))): t_code = i if t_code < 0: raise KeyError start += len(_hangul_T[t_code]) if len(syllables[start:]): raise KeyError return 0xAC00 + (l_code * 21 + v_code) * 28 + t_code def _lookup_cjk(cjk_code): if len(cjk_code) != 4 and len(cjk_code) != 5: raise KeyError for c in cjk_code: if not ('0' <= c <= '9' or 'A' <= c <= 'F'): raise KeyError code = int(cjk_code, 16) if (0x3400 <= code <= 0x4DB5 or 0x4E00 <= code <= 0x9FCB or 0x20000 <= code <= 0x2A6D6 or 0x2A700 <= code <= 0x2B734 or 0x2B740 <= code <= 0x2B81D): return code raise KeyError def lookup(name, with_named_sequence=False): if name[:len(_cjk_prefix)] == _cjk_prefix: return _lookup_cjk(name[len(_cjk_prefix):]) if name[:len(_hangul_prefix)] == _hangul_prefix: return _lookup_hangul(name[len(_hangul_prefix):]) if not base_mod: code = trie_lookup(name) else: try: code = _code_by_name[name] except KeyError: if name not in _code_by_name_corrected: code = base_mod.trie_lookup(name) else: raise if not with_named_sequence and 0xF0200 <= code < 0xF0400: raise KeyError return code def name(code): if (0x3400 <= code <= 0x4DB5 or 0x4E00 <= code <= 0x9FCB or 0x20000 <= code <= 0x2A6D6 or 0x2A700 <= code <= 0x2B734 or 0x2B740 <= code <= 0x2B81D): return "CJK UNIFIED IDEOGRAPH-" + hex(code)[2:].upper() if 0xAC00 <= code <= 0xD7A3: # vl_code, t_code = divmod(code - 0xAC00, len(_hangul_T)) vl_code = (code - 0xAC00) // len(_hangul_T) t_code = (code - 0xAC00) % len(_hangul_T) # l_code, v_code = divmod(vl_code, len(_hangul_V)) l_code = vl_code // len(_hangul_V) v_code = vl_code % len(_hangul_V) return ("HANGUL SYLLABLE " + _hangul_L[l_code] + _hangul_V[v_code] + _hangul_T[t_code]) if 0xF0000 <= code < 0xF0400: raise KeyError if not base_mod: return lookup_charcode(code) else: try: return _names[code] except KeyError: if code not in _names_corrected: return base_mod.lookup_charcode(code) else: raise _db_records = [ ('Cc', 'B', 'N', 5), ('Cc', 'BN', 'N', 0), ('Cc', 'S', 'N', 1), ('Cc', 'S', 'N', 5), ('Cc', 'WS', 'N', 5), ('Cf', 'AN', 'N', 8192), ('Cf', 'BN', 'A', 8192), ('Cf', 'BN', 'N', 8192), ('Cf', 'L', 'N', 8192), ('Cf', 'LRE', 'N', 8192), ('Cf', 'LRO', 'N', 8192), ('Cf', 'ON', 'N', 8192), ('Cf', 'PDF', 'N', 8192), ('Cf', 'R', 'N', 8192), ('Cf', 'RLE', 'N', 8192), ('Cf', 'RLO', 'N', 8192), ('Cn', '', 'N', 0), ('Cn', '', 'W', 0), ('Co', 'L', 'A', 0), ('Cs', 'L', 'N', 0), ('Ll', 'L', 'A', 7202), ('Ll', 'L', 'F', 7202), ('Ll', 'L', 'N', 7202), ('Ll', 'L', 'Na', 7202), ('Lm', 'AL', 'N', 15362), ('Lm', 'L', 'A', 15362), ('Lm', 'L', 'H', 14338), ('Lm', 'L', 'H', 15362), ('Lm', 'L', 'N', 12322), ('Lm', 'L', 'N', 15362), ('Lm', 'L', 'N', 15394), ('Lm', 'L', 'W', 15362), ('Lm', 'ON', 'A', 15362), ('Lm', 'ON', 'N', 12290), ('Lm', 'ON', 'N', 15362), ('Lm', 'R', 'N', 15362), ('Lo', 'AL', 'N', 4098), ('Lo', 'AL', 'N', 7170), ('Lo', 'L', 'H', 7170), ('Lo', 'L', 'N', 6146), ('Lo', 'L', 'N', 7170), ('Lo', 'L', 'W', 7170), ('Lo', 'L', 'W', 7234), ('Lo', 'R', 'N', 7170), ('Lt', 'L', 'N', 7186), ('Lu', 'L', 'A', 7178), ('Lu', 'L', 'F', 7178), ('Lu', 'L', 'N', 7178), ('Lu', 'L', 'Na', 7178), ('Mc', 'L', 'N', 6144), ('Me', 'NSM', 'N', 12288), ('Mn', 'L', 'N', 14336), ('Mn', 'NSM', 'A', 14336), ('Mn', 'NSM', 'A', 14368), ('Mn', 'NSM', 'N', 14336), ('Mn', 'NSM', 'W', 14336), ('Nd', 'AN', 'N', 6592), ('Nd', 'EN', 'F', 6592), ('Nd', 'EN', 'N', 6592), ('Nd', 'EN', 'Na', 6592), ('Nd', 'L', 'N', 6592), ('Nd', 'R', 'N', 6592), ('Nl', 'L', 'A', 7240), ('Nl', 'L', 'A', 7264), ('Nl', 'L', 'N', 7168), ('Nl', 'L', 'N', 7232), ('Nl', 'L', 'N', 7240), ('Nl', 'L', 'N', 7264), ('Nl', 'L', 'W', 7232), ('Nl', 'ON', 'N', 7232), ('No', 'AN', 'N', 4160), ('No', 'AN', 'N', 4288), ('No', 'EN', 'A', 4160), ('No', 'EN', 'A', 4288), ('No', 'EN', 'N', 4288), ('No', 'L', 'N', 4160), ('No', 'L', 'N', 6336), ('No', 'L', 'W', 4160), ('No', 'ON', 'A', 4160), ('No', 'ON', 'A', 4288), ('No', 'ON', 'N', 4160), ('No', 'ON', 'N', 4288), ('No', 'ON', 'W', 4160), ('No', 'R', 'N', 4160), ('No', 'R', 'N', 4288), ('Pc', 'ON', 'F', 6144), ('Pc', 'ON', 'N', 6144), ('Pc', 'ON', 'Na', 6144), ('Pc', 'ON', 'W', 6144), ('Pd', 'ES', 'F', 4096), ('Pd', 'ES', 'Na', 4096), ('Pd', 'ES', 'W', 4096), ('Pd', 'ON', 'A', 4096), ('Pd', 'ON', 'N', 4096), ('Pd', 'ON', 'W', 4096), ('Pd', 'R', 'N', 4096), ('Pe', 'ON', 'F', 4608), ('Pe', 'ON', 'H', 4608), ('Pe', 'ON', 'N', 4096), ('Pe', 'ON', 'N', 4608), ('Pe', 'ON', 'Na', 4608), ('Pe', 'ON', 'W', 4096), ('Pe', 'ON', 'W', 4608), ('Pf', 'ON', 'A', 4096), ('Pf', 'ON', 'A', 12288), ('Pf', 'ON', 'N', 4608), ('Pi', 'ON', 'A', 4096), ('Pi', 'ON', 'A', 12288), ('Pi', 'ON', 'N', 4096), ('Pi', 'ON', 'N', 4608), ('Po', 'AL', 'N', 4096), ('Po', 'AN', 'N', 4096), ('Po', 'CS', 'F', 4096), ('Po', 'CS', 'F', 12288), ('Po', 'CS', 'N', 4096), ('Po', 'CS', 'Na', 4096), ('Po', 'CS', 'Na', 12288), ('Po', 'CS', 'W', 4096), ('Po', 'CS', 'W', 12288), ('Po', 'ET', 'A', 4096), ('Po', 'ET', 'F', 4096), ('Po', 'ET', 'N', 4096), ('Po', 'ET', 'Na', 4096), ('Po', 'ET', 'W', 4096), ('Po', 'L', 'N', 4096), ('Po', 'ON', 'A', 4096), ('Po', 'ON', 'A', 12288), ('Po', 'ON', 'A', 14336), ('Po', 'ON', 'F', 4096), ('Po', 'ON', 'F', 12288), ('Po', 'ON', 'H', 4096), ('Po', 'ON', 'N', 4096), ('Po', 'ON', 'N', 14336), ('Po', 'ON', 'Na', 4096), ('Po', 'ON', 'Na', 12288), ('Po', 'ON', 'W', 4096), ('Po', 'ON', 'W', 12288), ('Po', 'R', 'N', 4096), ('Po', 'R', 'N', 12288), ('Ps', 'ON', 'F', 4608), ('Ps', 'ON', 'H', 4608), ('Ps', 'ON', 'N', 4096), ('Ps', 'ON', 'N', 4608), ('Ps', 'ON', 'Na', 4608), ('Ps', 'ON', 'W', 4096), ('Ps', 'ON', 'W', 4608), ('Sc', 'AL', 'N', 4096), ('Sc', 'ET', 'A', 4096), ('Sc', 'ET', 'F', 4096), ('Sc', 'ET', 'H', 4096), ('Sc', 'ET', 'N', 4096), ('Sc', 'ET', 'Na', 4096), ('Sc', 'ET', 'W', 4096), ('Sk', 'AL', 'N', 12288), ('Sk', 'L', 'N', 12288), ('Sk', 'ON', 'A', 12288), ('Sk', 'ON', 'F', 12288), ('Sk', 'ON', 'N', 12288), ('Sk', 'ON', 'Na', 12288), ('Sk', 'ON', 'W', 12288), ('Sm', 'AL', 'N', 4096), ('Sm', 'CS', 'N', 4096), ('Sm', 'ES', 'F', 4096), ('Sm', 'ES', 'N', 4096), ('Sm', 'ES', 'Na', 4096), ('Sm', 'ES', 'W', 4096), ('Sm', 'ET', 'A', 4096), ('Sm', 'ET', 'N', 4096), ('Sm', 'L', 'N', 4096), ('Sm', 'ON', 'A', 4096), ('Sm', 'ON', 'A', 4608), ('Sm', 'ON', 'F', 4096), ('Sm', 'ON', 'F', 4608), ('Sm', 'ON', 'H', 4096), ('Sm', 'ON', 'N', 4096), ('Sm', 'ON', 'N', 4608), ('Sm', 'ON', 'N', 7168), ('Sm', 'ON', 'Na', 4096), ('Sm', 'ON', 'Na', 4608), ('Sm', 'ON', 'W', 4096), ('Sm', 'ON', 'W', 4608), ('So', 'AL', 'N', 4096), ('So', 'ET', 'A', 4096), ('So', 'ET', 'N', 4096), ('So', 'ET', 'N', 7168), ('So', 'L', 'A', 4096), ('So', 'L', 'A', 4104), ('So', 'L', 'A', 4128), ('So', 'L', 'N', 4096), ('So', 'L', 'W', 4096), ('So', 'ON', 'A', 4096), ('So', 'ON', 'F', 4096), ('So', 'ON', 'H', 4096), ('So', 'ON', 'N', 4096), ('So', 'ON', 'Na', 4096), ('So', 'ON', 'W', 4096), ('Zl', 'WS', 'N', 5), ('Zp', 'B', 'N', 5), ('Zs', 'CS', 'N', 1), ('Zs', 'WS', 'F', 1), ('Zs', 'WS', 'N', 1), ('Zs', 'WS', 'Na', 4097), ] _db_pgtbl = ( '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123455565575555' '555555555555589:5;5<55=5>55555?@55AB555C5555555D555E55F555555555' 'G555H5555555IJ55555555K55555555LM555N\x15OPQRST55555555555555555555' '55555555555555555555555UVVVVVVVVWWWWWWWWWWWWWWWWWWWWWWWWWXYZ[\\]^' '_`abcdddefghidjdkddddddddddddddd\x15\x15\x15lmddddddddddd\x15\x15\x15\x15nddddddddddd' 'dddddddddddddddddddddddddddddddddddddddd\x15\x15oddddddddddddddddddddd' 'ddddddddddddddddddddddddddddddddddddddddddddddddpddddddddddddddd' 'ddddddddddddddddqrstuvwxddddddddddddddddddddddddyz{|}~\x7f\x80dddddddd' '\x81\x825555555\x83\x84\x8555555555555555555555555\x8655555\x8755555555555555555\x885555' '5555555555555555555555555555555555\x8955555555555555555555555555555' '55555555555555555555555555555555555555\x8a5555555555555555\x8b\x8c\x8d\x8d\x8d\x8d\x8d\x8d\x8d' '\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x865\x8c\x8d\x8d\x8d\x8d\x8e' '\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d' '\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d' '\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d' '\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8d\x8e' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' '\x8f\x90dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' 'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW' 'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW' 'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW' 'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW\x91' 'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW' 'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW' 'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW' 'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW\x91' ) _db_pages = ( '\x01\x01\x01\x01\x01\x01\x01\x01\x01\x02\x00\x03\x04\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x02\xc9\x85\x85z\x97z\x85\x86\x8fd\x85\xa4sZts;;;;;;;;;;t\x85\xb2\xb1\xb2\x85' '\x8500000000000000000000000000\x8f\x85d\x9eW\x9e\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x8f\xb1d\xb1\x01' '\x01\x01\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\xc6}\x97\x97\x93\x97\xc2\xbe\x9b\xc1\x14m\xb1\x06\xbe\x9e\xb6\xa6II\x9b\x16\xbe\x7f\x9bI\x14iNNN}' '//////-/////////-//////\xa9-/////-\x14\x14\x14\x16\x16\x16\x16\x14\x16\x14\x14\x14\x16\x14\x14\x16\x16\x14\x16\x14\x14\x16\x16\x16\xa9\x14\x14\x14\x16\x14\x16\x14\x16' '/\x14/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x14/\x14/\x16/\x16/\x16/\x14/\x16/\x16/\x16/\x16/\x16-\x14/\x16/\x14/\x16/\x16/\x14-\x14/\x16/\x16\x14/\x16/\x16/\x16-' '\x14-\x14/\x14/\x16/\x14\x14-\x14/\x14/\x16/\x16-\x14/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16-\x14/\x16/\x14/\x16/\x16/\x16/\x16/\x16/\x16//\x16/\x16/\x16\x16' '\x16//\x16/\x16//\x16///\x16\x16////\x16//\x16///\x16\x16\x16//\x16//\x16/\x16/\x16//\x16/\x16\x16/\x16//\x16///\x16/\x16//\x16\x16(/\x16\x16\x16' '((((/,\x16/,\x16/,\x16/\x14/\x14/\x14/\x14/\x14/\x14/\x14/\x14\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x16/,\x16/\x16///\x16/\x16/\x16/\x16' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x16\x16\x16\x16\x16\x16//\x16//\x16' '\x16/\x16////\x16/\x16/\x16/\x16/\x16\x16\x14\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x14\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16(\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e""\x1d\x1d\x1d\x1d\x1d' '\x1e\x1e\x9d\x9d\x9b\x9d" " " ""\x19\x1d\x9d\x9d\x9d\x9d\x9d\x9d\x9b\x9b\x9b\x9b\x9d\x9b\x9d\x9b\x1e\x1e\x1e\x1e\x1e\x9d\x9d\x9d\x9d\x9d\x9d\x9d"\x9d\x1d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d' '4444444444444444444444444444444444444444444444444444444444444444' '444445444444444444444444444444444444444444444444/\x16/\x16"\x9d/\x16\x10\x10\x1c\x16\x16\x16\x83\x10' '\x10\x10\x10\x10\x9d\x9d/\x84///\x10/\x10//\x16-----------------\x10-------//\x16\x16\x16\x16\x16\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14' '\x14\x14\x16\x14\x14\x14\x14\x14\x14\x14\x16\x16\x16\x16\x16/\x16\x16///\x16\x16\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x16\x16\x16\x16/\x16\xae/\x16//\x16\x16///' '/-//////////////--------------------------------\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14' '\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x14\x16\x14\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '/\x16\xbc6666622/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '//\x16/\x16/\x16/\x16/\x16/\x16/\x16\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x10\x10\x10\x10\x10\x10\x10\x10\x10///////////////' '///////////////////////\x10\x10\x1d||||||\x10\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x10|]\x10\x10\x10\x10\x10\x10666666666666666666666666666666666666666666666_6' '\x8966\x8966\x896\x10\x10\x10\x10\x10\x10\x10\x10+++++++++++++++++++++++++++\x10\x10\x10\x10\x10+++\x89\x8a\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x05\x05\x05\x05\x10\x10\xae\xae\xa0yy\x92rn\xc1\xc166666666666n\x10\x10nn%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '\x18%%%%%%%%%%6666666666666666666668888888888yoon%%6%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%n%6666666\x05\xc1666666\x18\x1866\xc16666%%::::::::::%%%\xb5\xb5%' 'nnnnnnnnnnnnnn\x10\x05%6%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%6666666666666666' '66666666666\x10\x10%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%66666666666%\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '==========+++++++++++++++++++++++++++++++++666666666##\xc1\x83\x83\x83#\x10\x10\x10\x10\x10' '++++++++++++++++++++++6666#666666666#666#66666\x10\x10\x89\x89\x89\x89\x89\x89\x89\x89\x89\x89\x89\x89\x89\x89\x89\x10' '+++++++++++++++++++++++++666\x10\x10\x89\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '6661((((((((((((((((((((((((((((((((((((((((((((((((((((((616(11' '1666666661111611(6666666((((((((((66||<<<<<<<<<<|\x1d((((((\x10(((((((' '\x10611\x10((((((((\x10\x10((\x10\x10((((((((((((((((((((((\x10(((((((\x10(\x10\x10\x10((((\x10\x106(11' '16666\x10\x1011\x10\x10116(\x10\x10\x10\x10\x10\x10\x10\x101\x10\x10\x10\x10((\x10(((66\x10\x10<<<<<<<<<<((\x96\x96KKKKKK\xbc\x96\x10\x10\x10\x10' '\x10661\x10((((((\x10\x10\x10\x10((\x10\x10((((((((((((((((((((((\x10(((((((\x10((\x10((\x10((\x10\x106\x1011' '166\x10\x10\x10\x1066\x10\x10666\x10\x10\x106\x10\x10\x10\x10\x10\x10\x10((((\x10(\x10\x10\x10\x10\x10\x10\x10<<<<<<<<<<66(((6\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10661\x10(((((((((\x10(((\x10((((((((((((((((((((((\x10(((((((\x10((\x10(((((\x10\x106(11' '166666\x10661\x10116\x10\x10(\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10((66\x10\x10<<<<<<<<<<\x10\x96\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10611\x10((((((((\x10\x10((\x10\x10((((((((((((((((((((((\x10(((((((\x10((\x10(((((\x10\x106(16' '16666\x10\x1011\x10\x10116\x10\x10\x10\x10\x10\x10\x10\x1061\x10\x10\x10\x10((\x10(((66\x10\x10<<<<<<<<<<\xbc(KKKKKK\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x106(\x10((((((\x10\x10\x10(((\x10((((\x10\x10\x10((\x10(\x10((\x10\x10\x10((\x10\x10\x10(((\x10\x10\x10((((((((((((\x10\x10\x10\x1011' '611\x10\x10\x10111\x101116\x10\x10(\x10\x10\x10\x10\x10\x101\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10<<<<<<<<<<KKK\xc1\xc1\xc1\xc1\xc1\xc1\x96\xc1\x10\x10\x10\x10\x10' '\x10111\x10((((((((\x10(((\x10(((((((((((((((((((((((\x10((((((((((\x10(((((\x10\x10\x10(66' '61111\x10666\x106666\x10\x10\x10\x10\x10\x10\x1066\x10((\x10\x10\x10\x10\x10\x10((66\x10\x10<<<<<<<<<<\x10\x10\x10\x10\x10\x10\x10\x10PPPPPPP\xbc' '\x10\x1011\x10((((((((\x10(((\x10(((((((((((((((((((((((\x10((((((((((\x10(((((\x10\x106(13' '11111\x10311\x101166\x10\x10\x10\x10\x10\x10\x1011\x10\x10\x10\x10\x10\x10\x10(\x10((66\x10\x10<<<<<<<<<<\x10((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x1011\x10((((((((\x10(((\x10(((((((((((((((((((((((((((((((((((((((((\x10\x10(11' '16666\x10111\x101116(\x10\x10\x10\x10\x10\x10\x10\x101\x10\x10\x10\x10\x10\x10\x10\x10((66\x10\x10<<<<<<<<<<KKKKKK\x10\x10\x10\xbc((((((' '\x10\x1011\x10((((((((((((((((((\x10\x10\x10((((((((((((((((((((((((\x10(((((((((\x10(\x10\x10' '(((((((\x10\x10\x106\x10\x10\x10\x10111666\x106\x1011111111\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x1011|\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' "\x10((((((((((((((((((((((((((((((((((((((((((((((((6('6666666\x10\x10\x10\x10\x96" '((((((\x1d66666666|<<<<<<<<<<||\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' "\x10((\x10(\x10\x10((\x10(\x10\x10(\x10\x10\x10\x10\x10\x10((((\x10(((((((\x10(((\x10(\x10(\x10\x10((\x10((((6('666666\x1066(\x10\x10" '(((((\x10\x1d\x10666666\x10\x10<<<<<<<<<<\x10\x10((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(\xbc\xbc\xbc|||||||||||||||\xbc\xbc\xbc\xbc\xbc66\xbc\xbc\xbc\xbc\xbc\xbc<<<<<<<<<<KKKKKKKKKK\xbc6\xbc6\xbc6\x8ec\x8ec11' '((((((((\x10((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10666666666666661' '66666|66(((((66666666666\x10666666666666666666666666666666666666\x10\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc6\xbc\xbc\xbc\xbc\xbc\xbc\x10\xbc\xbc|||||\xbc\xbc\xbc\xbc||\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((((((((((((((((11666616666661661166(' '<<<<<<<<<<||||||((((((1166((((666(111((1111111(((6666(((((((((((' '((611661111116(1<<<<<<<<<<1116\xbc\xbc////////////////////////////////' '//////\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10(((((((((((((((((((((((((((((((((((((((((((|\x1d\x10\x10\x10' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((()))))((((((((((((((((((((((((' '(((((((((((((((((((((((((((((((((((((((((((((((((((((((((())))))' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '(((((((((\x10((((\x10\x10(((((((\x10(\x10((((\x10\x10((((((((((((((((((((((((((((((((' '(((((((((\x10((((\x10\x10(((((((((((((((((((((((((((((((((\x10((((\x10\x10(((((((\x10' '(\x10((((\x10\x10(((((((((((((((\x10((((((((((((((((((((((((((((((((((((((((' '(((((((((((((((((\x10((((\x10\x10((((((((((((((((((((((((((((((((((((((((' '(((((((((((((((((((((((((((\x10\x10666\xbc||||||||LLLLLLLLLKKKKKKKKKKK\x10\x10\x10' '((((((((((((((((\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10((((((((((((((((((((((((((((((((' '(((((((((((((((((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '](((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '(((((((((((((((((((((((((((((((((((((((((((((||(((((((((((((((((' '\xc8((((((((((((((((((((((((((\x8ec\x10\x10\x10((((((((((((((((((((((((((((((((' '(((((((((((((((((((((((((((((((((((((((((((|||AAA\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((\x10((((666\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10((((((((((((((((((666||\x10\x10\x10\x10\x10\x10\x10\x10\x10' '((((((((((((((((((66\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10(((((((((((((\x10(((\x1066\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '((((((((((((((((((((((((((((((((((((((((((((((((((((\x08\x081666666611' '11111161166666666666|||\x1d|||\x96(6\x10\x10<<<<<<<<<<\x10\x10\x10\x10\x10\x10PPPPPPPPPP\x10\x10\x10\x10\x10\x10' '\x83\x83\x83\x83\x83\x83]\x83\x83\x83\x83666\xc8\x10<<<<<<<<<<\x10\x10\x10\x10\x10\x10((((((((((((((((((((((((((((((((' '(((\x1d((((((((((((((((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((((((((((((((6(\x10\x10\x10\x10\x10((((((((((((((((' '((((((((((((((((((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((\x10\x10\x10666111166111\x10\x10\x10\x10116111111666\x10\x10\x10\x10' '\xc1\x10\x10\x10\x83\x83<<<<<<<<<<((((((((((((((((((((((((((((((\x10\x10(((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '((((((((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x101111111111111111' '1(((((((11\x10\x10\x10\x10\x10\x10<<<<<<<<<<L\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '(((((((((((((((((((((((66111\x10\x10||((((((((((((((((((((((((((((((((' '(((((((((((((((((((((1616666666\x1061611666666661111116666666666\x10\x106' '<<<<<<<<<<\x10\x10\x10\x10\x10\x10<<<<<<<<<<\x10\x10\x10\x10\x10\x10|||||||\x1d||||||\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '66661(((((((((((((((((((((((((((((((((((((((((((((((616666616111' '11611(((((((\x10\x10\x10\x10<<<<<<<<<<|||||||\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc666666666\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\x10\x10\x10' '661((((((((((((((((((((((((((((((1666611661\x10\x10\x10((<<<<<<<<<<\x10\x10\x10\x10\x10\x10' '((((((((((((((((((((((((((((((((((((((61661116166611\x10\x10\x10\x10\x10\x10\x10\x10||||' '((((((((((((((((((((((((((((((((((((11111111666666661166\x10\x10\x10|||||' '<<<<<<<<<<\x10\x10\x10(((<<<<<<<<<<((((((((((((((((((((((((((((((\x1d\x1d\x1d\x1d\x1d\x1d||' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10666|666666666666616666666((((6((((1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e' '\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x1e\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e' '666666666666666666666666666666666666666\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x106666' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x16\x16\x16\x16\x16\x16\x16\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '\x16\x16\x16\x16\x16\x16\x16\x16////////\x16\x16\x16\x16\x16\x16\x10\x10//////\x10\x10\x16\x16\x16\x16\x16\x16\x16\x16////////\x16\x16\x16\x16\x16\x16\x16\x16////////' '\x16\x16\x16\x16\x16\x16\x10\x10//////\x10\x10\x16\x16\x16\x16\x16\x16\x16\x16\x10/\x10/\x10/\x10/\x16\x16\x16\x16\x16\x16\x16\x16////////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x10\x10' '\x16\x16\x16\x16\x16\x16\x16\x16,,,,,,,,\x16\x16\x16\x16\x16\x16\x16\x16,,,,,,,,\x16\x16\x16\x16\x16\x16\x16\x16,,,,,,,,\x16\x16\x16\x16\x16\x10\x16\x16////,\x9d\x16\x9d' '\x9d\x9d\x16\x16\x16\x10\x16\x16////,\x9d\x9d\x9d\x16\x16\x16\x16\x10\x10\x16\x16////\x10\x9d\x9d\x9d\x16\x16\x16\x16\x16\x16\x16\x16/////\x9d\x9d\x9d\x10\x10\x16\x16\x16\x10\x16\x16////,\x9d\x9d\x10' '\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\x07\x07\x07\x08\r\\]]\\\\\\}\x83kh\x8dljg\x8dl}}}\x83~}}~\xc4\xc5\t\x0e\x0c\n\x0f\xc6wywwy}\x83\x83\x83mi}\x83\x83}V' 'V\x83\x83\x83\xa1\x8ec\x83\x83\x83\x83\x83\x83\x83\x83\x83\x83\x83\xae\x83V\x83\x83\x83\x83\x83\x83\x83\x83\x83\x83\xc8\x07\x07\x07\x07\x07\x10\x10\x10\x10\x10\x07\x07\x07\x07\x07\x07J\x1d\x10\x10IJJJJJ\xa3\xa3\xae\x8ec\x19' 'JIIIIJJJJJ\xa3\xa3\xae\x8ec\x10\x1e\x1e\x1e\x1e\x1e\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x10\x10\x10\x96\x96\x96\x96\x96\x96\x96\x96\x96\x95\x96\x96\x93\x96\x96\x96\x96\x96\x96\x96\x96\x96\x96\x96\x96\x96\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10666666666666622226222666666666666\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1/\xbe\xc1\xbe\xc1/\xc1\xbe\x16///\x16\x16///\x14\xc1/\xbe\xc1\xb0/////\xc1\xc1\xc1\xbe\xbe\xc1/\xc1-\xc1/\xc1/-//\xb8\x16////\x16((((\x16\xc1\xc1\x16\x16//' '\xaf\xae\xae\xae\xae/\x16\x16\x16\x16\xc1\xae\xc1\xc1\x16\xbcPPPNNPPPPPPNNNNP>>>>>>>>>>>>BBBB??????????CCCCCC' 'AAA/\x16AAAAN\x10\x10\x10\x10\x10\x10\xa9\xa9\xa9\xa9\xa9\xbe\xbe\xbe\xbe\xbe\xae\xae\xc1\xc1\xc1\xc1\xae\xc1\xc1\xae\xc1\xc1\xae\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xae\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xae\xae\xc1\xc1\xa9\xc1\xa9\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae' '\xa9\xaf\xaa\xaa\xaf\xae\xae\xa9\xaa\xaf\xaf\xaa\xaf\xaf\xae\xa9\xae\xaa\xa3\xa7\xae\xaa\xaf\xae\xae\xae\xaa\xaf\xaf\xaa\xa9\xaa\xaa\xaf\xaf\xa9\xaf\xa9\xaf\xa9\xa9\xa9\xa9\xaa\xaa\xaf\xaa\xaf\xaf\xaf\xaf\xaf\xa9\xa9\xa9\xa9\xae\xaf\xae\xaf\xaa\xaa\xaf\xaf' '\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaa\xaf\xaf\xaf\xaa\xae\xae\xae\xae\xae\xaa\xaf\xaf\xaf\xae\xae\xae\xae\xae\xae\xae\xae\xae\xaf\xaa\xa9\xaf\xae\xaa\xaa\xaa\xaa\xaf\xaf\xaa\xaa\xae\xae\xaa\xaa\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf' '\xaf\xaf\xaa\xaa\xaf\xaf\xaa\xaa\xaf\xaf\xaf\xaf\xaf\xae\xae\xaf\xaf\xaf\xaf\xae\xae\xa9\xae\xae\xaf\xa9\xae\xae\xae\xae\xae\xae\xae\xae\xaf\xaf\xae\xa9\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xae\xae\xae\xae\xae\xaf\xaa' '\xae\xae\xae\xae\xae\xae\xae\xae\xae\xaf\xaf\xaf\xaf\xaf\xae\xae\xaf\xaf\xae\xae\xae\xae\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xae\xae\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xaf\xaf\xaf\xaf\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xaf\xaf\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x91f\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xc1\xae\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbc\xc1\xc1\xc1\xc1\xc1\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xae\xae\xae\xae\xae\xae\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10OOOOOOOOONNNNNNNNNNNOOOOOOOOONNN' 'NNNNNNNNIIIIIIIIIHHHHHHHHHHH\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba' '\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbbQNNNNNNNNNNOOOOOOOOONO' '\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe' '\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xc1\xc1\xc1\xc1\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xc1\xc1\xbe\xbe\xbe\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xc1\xbe\xa9\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xc1' '\xbe\xa9\xc1\xc1\xc1\xc1\xbe\xbe\xbe\xc1\xc1\xbe\xc1\xc1\xbe\xbe\xbe\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xbe\xbe\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xae\xae\xae\xae\xae\xae\xae\xae' '\xc1\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xc1\xbe\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xc1\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xbe\xc1\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xbe\xbe\xbe\xc1\xbe\xbe\xbe\xbe\xc1\xbe\xbe\xc1\xa9\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbc\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xbe' '\xc1\xc1\xc1\xc1\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xc1\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xc1\xbe\xc1\xc1\xc1\xc1\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe\xbe' '\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbe\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x8ec\x8ec\x8ec\x8ec\x8ec\x8ec\x8ecOOOOOOOOON' 'QQQQQQQQQPQQQQQQQQQP\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xaf\xae\xae\xaf\xaf\x8ec\xae\xaf\xaf\xae\x10\xaf\x10\xae\xae\xae\xae\xae\xaf\xaf\xaf\xaf\xae\xae\xae\xae\xae\xaf\xaf\xaf\xae\xae\xae\xaf\xaf\xaf\xaf\x8fd\x8fd\x8fd\x8fd\x8ec\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae' '\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae' '\xae\xae\xae\x8ec\x8fd\x8ec\x8ec\x8ec\x8ec\x8ec\x8ec\x8ec\x8ec\x8ec\xae\xae\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xae\xae\xae\xae\xae\xae\xae\xae\xaf\xae\xae\xae\xae\xae\xae\xae' '\xaf\xaf\xaf\xaf\xaf\xaf\xae\xae\xae\xaf\xae\xae\xae\xae\xaf\xaf\xaf\xaf\xaf\xae\xaf\xaf\xae\xae\x8ec\x8ec\xaf\xae\xae\xae\xae\xaf\xae\xaf\xaf\xaf\xae\xae\xaf\xaf\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xaf\xaf\xaf\xaf\xaf\xaf\xae\xae\x8ec\xae\xae' '\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xae\xaf\xaf\xaf\xaf\xae\xae\xaf\xae\xaf\xae\xae\xaf\xae\xaf\xaf\xaf\xaf\xae\xae\xae\xae\xae\xaf\xaf\xae\xae\xae\xae\xae\xae\xaf\xaf\xaf\xae' '\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xaf\xaf\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xaf\xaf\xae\xae\xae\xae\xaf\xaf\xaf\xaf\xae\xaf\xaf\xae\xae\xaf\xaf\xae\xae\xae\xae\xaf\xaf\xaf\xaf\xaf\xaf\xaf' '\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xae\xae\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xae\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf' '\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xaf\xae\xae\xae\xae\xae\xaf\xae\xaf\xae\xae\xae\xaf\xaf\xaf\xaf\xaf\xae\xae\xae\xae\xae\xaf\xaf\xaf\xae\xae\xae\xae\xaf\xae\xae\xae\xaf\xaf\xaf\xaf\xaf\xae\xaf\xae\xae' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae\xae' '\xae\xae\xae\xae\xae\xc1\xc1\xae\xae\xae\xae\xae\xae\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xbe\xbe\xbe\xbe\xbe\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '///////////////////////////////////////////////\x10\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x10/\x16///\x16\x16/\x16/\x16/\x16////\x16/\x16\x16/\x16\x16\x16\x16\x16\x16\x16\x1e//' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x16\xc1\xc1\xc1\xc1\xc1\xc1/\x16/\x16666\x10\x10\x10\x10\x10\x10\x10\x83\x83\x83\x83P\x83\x83' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10((((((((((((((((' '((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x1d|\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x106' '(((((((((((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10(((((((\x10(((((((\x10(((((((\x10(((((((\x10' '(((((((\x10(((((((\x10(((((((\x10(((((((\x1066666666666666666666666666666666' '\x83\x83mimi\x83\x83\x83mi\x83mi\x83\x83\x83\x83\x83\x83\x83\x83\x83]\x83\x83]\x83mi\x83\x83mi\x8ec\x8ec\x8ec\x8ec\x83\x83\x83\x83\x83!\x83\x83\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\x10\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3' '\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3' '\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3' '\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3' '\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\x10\x10\x10\x10' '\xc7\x87\x87\x87\xc3\x1f)D\x91f\x91f\x91f\x91f\x91f\xc3\xc3\x91f\x91f\x91f\x91f^\x90ee\xc3DDDDDDDDD777777^\x1f\x1f\x1f\x1f\x1f\xc3\xc3DDD\x1f)\x87\xc3\xc1' '\x10)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))\x10\x1077\x9f\x9f\x1f\x1f)^)))))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))))))))))))))))))))))))))))))))))\x87\x1f\x1f\x1f)' '\x10\x10\x10\x10\x10)))))))))))))))))))))))))))))))))))))))))\x10\x10\x10)))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))\x10\xbd\xbdMMMM\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd)))))))))))))))))))))))))))\x10\x10\x10\x10\x10' '\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10))))))))))))))))' '\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xc3\xc3\x10MMMMMMMMMM\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd' '\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xc3RRRRRRRRRRRRRRR\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xc3\xc3\xc3\xbd' 'MMMMMMMMMM\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbdRRRRRRRRRRRRRRR' '\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xc3\xc3\xc3\xc3\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\x10' '\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd' '\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xc3\xc3\xc3\xc3\xbd\xbd\xbd\xbd\xbd' '\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd' '\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xc3\xc3\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xc3' ')))))*))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))*))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))*)))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))*))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '*))*)))*)*))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))))*))))))))))))))))))))))))))))))))))' '))))))))))))*)))))))*)*))))))))))))))))))))))))))))))))))))))))*' '*))))))))))))))))))))))))))))))*))))))))*)))))))))))))))))))))))' ')))))))))))))*))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))*)))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))*)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))*))))))))))))))))))))))))))))))))))*)*)*))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')*)***))))))*)))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')****)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))*))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))))))))))))))))))))))))*)))))))*))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))*)))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))**' '))))))))))))***)*)))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))*)' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))*)))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))*)))))))))))))))))))))))))))))))))))))))))))))' '))))))*)))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))*)))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))*)' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))*)))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))*)))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))*))))*))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))*)))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))))))))*))))))))))))))))))))))))))))))' '))))))*)))))*)))))))))))))))))))))))))))))))))))))))))))*)))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))*)))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' ')))))))))))))))))))))\x1f))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))\x10\x10\x10\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3\xc3' '\xc3\xc3\xc3\xc3\xc3\xc3\xc3\x10\x10\x10\x10\x10\x10\x10\x10\x10((((((((((((((((((((((((((((((((((((((((\x1d\x1d\x1d\x1d\x1d\x1d||' '((((((((((((\x1d\x83\x83\x83((((((((((((((((<<<<<<<<<<((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16(6222\x83\x10\x10\x10\x10\x10\x10\x10\x1066\x83"' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x10\x10\x10\x10\x10\x10\x10\x10((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((((((((((AAAAAAAAAA66||||||\x10\x10\x10\x10\x10\x10\x10\x10' '\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d\x9d"""""""""\x9d\x9d/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x16\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16' '/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16/\x16\x1e\x16\x16\x16\x16\x16\x16\x16\x16/\x16/\x16//\x16' '/\x16/\x16/\x16/\x16"\x9a\x9a/\x16/\x16\x10/\x16\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10/\x16/\x16/\x16/\x16/\x16\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x16(((((' '((6(((6((((6(((((((((((((((((((((((11661\xc1\xc1\xc1\xc1\x10\x10\x10\x10KKKKKK\xbc\xbc\x96\xb7\x10\x10\x10\x10\x10\x10' '((((((((((((((((((((((((((((((((((((((((((((((((((((\x83\x83\x83\x83\x10\x10\x10\x10\x10\x10\x10\x10' '11((((((((((((((((((((((((((((((((((((((((((((((((((111111111111' '11116\x10\x10\x10\x10\x10\x10\x10\x10\x10||<<<<<<<<<<\x10\x10\x10\x10\x10\x10666666666666666666((((((|||(\x10\x10\x10\x10' '<<<<<<<<<<((((((((((((((((((((((((((((66666666||((((((((((((((((' '(((((((6666666666611\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10|)))))))))))))))))))))))))))))\x10\x10\x10' '6661(((((((((((((((((((((((((((((((((((((((((((((((6116666116111' '1|||||||||||||\x10\x1d<<<<<<<<<<\x10\x10\x10\x10||\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((((((((((((((66666611661166\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((6((((((((61\x10\x10<<<<<<<<<<\x10\x10||||((((((((((((((((\x1d((((((\xbc\xbc\xbc(1\x10\x10\x10\x10' '((((((((((((((((((((((((((((((((((((((((((((((((6(666((66(((((66' '(6(\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10((\x1d||\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10((((((\x10\x10((((((\x10\x10((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10(((((((\x10(((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((((((((11611611|16\x10\x10<<<<<<<<<<\x10\x10\x10\x10\x10\x10' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10))))))))))))))))' ')))))))\x10\x10\x10\x10)))))))))))))))))))))))))))))))))))))))))))))))))\x10\x10\x10\x10' '\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13' '\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13' '\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13' '\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13\x13' '\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12' '\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12' '\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12' '\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))))))))))))))))))*)))))))*))))*)))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))*)))))))))))))' ')))))))))))))))))*)*)))))))))))))))))))))))))))))))))))))))))*))' '))))))))))))))))))))))))))))))))))))))))))))))\x11\x11))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))\x11\x11))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x16\x16\x16\x16\x16\x16\x16\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x16\x16\x16\x16\x16\x10\x10\x10\x10\x10+6++++++++++\xa3+++++++++++++\x10+++++\x10+\x10' '++\x10++\x10++++++++++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99' '\x99\x99\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%$$$$$$%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\x8db' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%\x10\x10%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10%%%%%%%%%%$$\x92\xc1\x10\x10' '4444444444444444\x87\x87\x87\x88\x87\x87\x87\x90e\x87\x10\x10\x10\x10\x10\x106666666\x10\x10\x10\x10\x10\x10\x10\x10\x10\x87^^XX\x90e\x90e\x90e\x90e\x90e\x90' 'e\x90e\x90e\x87\x87\x90e\x87\x87\x87\x87XXXu\x87v\x10\x87v\x87\x87^\x91f\x91f\x91f{\x87\x87\xa5[\xb4\xb4\xb3\x10\x87\x98{\x87\x10\x10\x10\x10$%$%$\x10$%$%$%$%$%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%' '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\x10\x10\x07' '\x10\x80\x80x\x94x\x80\x81\x8b`\x80\xa2pYqp9999999999q\x80\xac\xab\xac\x80\x80..........................\x8b\x80`\x9cU' '\x9c\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x8b\xab`\xab\x8b`\x82\x8ca\x82\x82&&&&&&&&&&\x1b&&&&&&&&&&&&&&&' '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\x1a\x1a&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\x10' '\x10\x10&&&&&&\x10\x10&&&&&&\x10\x10&&&&&&\x10\x10&&&\x10\x10\x10\x94\x94\xab\x9c\xbf\x94\x94\x10\xc0\xad\xad\xad\xad\xc0\xc0\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x0b\x0b\x0b\xc1\xbe\x10\x10' '((((((((((((\x10((((((((((((((((((((((((((\x10(((((((((((((((((((\x10((\x10(' '((((((((((((((\x10\x10((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10\x10' '|\x83\xbc\x10\x10\x10\x10KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK\x10\x10\x10\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' 'EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEPPPP\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1P\x10\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc6\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((\x10\x10\x10((((((((((((((((((((((((((((((((' '(((((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((((\x10KKKK\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10((((((((((((((((' '(A((((((((A\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '((((((((((((((((((((((((((((((\x10|((((((((((((((((((((((((((((((((' '((((\x10\x10\x10\x10((((((((|AAAAA\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '////////////////////////////////////////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16((((((((((((((((((((((((((((((((((((((((((((((((' '((((((((((((((((((((((((((((((\x10\x10<<<<<<<<<<\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '++++++\x10\x10+\x10++++++++++++++++++++++++++++++++++++++++++++\x10++\x10\x10\x10+\x10\x10+' '++++++++++++++++++++++\x10\x89SSSSSSSS\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '++++++++++++++++++++++SSSSSS\x10\x10\x10\x83++++++++++++++++++++++++++\x10\x10\x10\x10\x10\x89' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '+666\x1066\x10\x10\x10\x10\x106666++++\x10+++\x10+++++++++++++++++++++++++++\x10\x10\x10\x10666\x10\x10\x10\x106' 'TTTTSSSS\x10\x10\x10\x10\x10\x10\x10\x10\x89\x89\x89\x89\x89\x89\x89\x89\x89\x10\x10\x10\x10\x10\x10\x10+++++++++++++++++++++++++++++SS\x89' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '++++++++++++++++++++++++++++++++++++++++++++++++++++++\x10\x10\x10\x83\x83\x83\x83\x83\x83\x83' '++++++++++++++++++++++\x10\x10SSSSSSSS+++++++++++++++++++\x10\x10\x10\x10\x10SSSSSSSS' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' '+++++++++\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10GGGGGGGGGFFFFFFFFFFFFFFFFFFFFFF\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '161(((((((((((((((((((((((((((((((((((((((((((((((((((((66666666' '6666666|||||||\x10\x10\x10\x10QQQQQQQQQPPPPPPPPPPP<<<<<<<<<<\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '661(((((((((((((((((((((((((((((((((((((((((((((11166661166||\x08||' '||\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((' '(((((((((((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA@@AAAAAAAAAAAA' 'AAAAAAAAAAAAAAAAAAAAAA@@AAAAAAAAAAA\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10||||\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '(((((((((((((((((((((((((((((((((((((((((((((((((((((((((\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '))\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\x10\x10\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc11666\xbc\xbc\xbc111111\x07\x07\x07\x07\x07\x07\x07\x0766666' '666\xbc\xbc6666666\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc6666\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1666\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10KKKKKKKKKKKKKKKKKK\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '//////////////////////////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16////////////' '//////////////\x16\x16\x16\x16\x16\x16\x16\x10\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16////////////////////////' '//\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16/\x10//\x10\x10/\x10\x10//\x10\x10////\x10////////\x16\x16\x16\x16\x10\x16\x10\x16\x16\x16' '\x16\x16\x16\x16\x10\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16//////////////////////////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16//\x10////\x10\x10////////\x10///////\x10\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16//\x10////\x10' '/////\x10/\x10\x10\x10///////\x10\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16////////////////////' '//////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16//////////////////////////\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16//////////////////////////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16//////////////////////////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16////' '//////////////////////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16////////////////' '//////////\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x10\x10////////////////////////' '/\xa8\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\xaf\x16\x16\x16\x16\x16\x16/////////////////////////\xa8\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\xaf\x16\x16\x16\x16\x16\x16/////////////////////////\xa8\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\xaf\x16\x16\x16\x16\x16\x16/////////////////////////\xa8\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\x16\x16\x16\x16\x16\x16\xaf\x16\x16\x16\x16\x16\x16/////////////////////////\xa8\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16' '\x16\x16\x16\xaf\x16\x16\x16\x16\x16\x16/\x16\x10\x10::::::::::::::::::::::::::::::::::::::::::::::::::' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10' '\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' 'IIIIIIIIIII\x10\x10\x10\x10\x10\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xbc\x10\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9' '\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\x10\x10\x10\x10\x10\x10\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9' '\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc\xbc' '\xbd\xbd\xbd\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\x10\x10\x10\x10\x10' '\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\x10\x10\x10\x10\x10\x10\x10\xbd\xbd\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\x10\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10' '\xc1\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbc\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\xc1\xc1\xc1\xc1\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xbc\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1' '\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\xc1\xc1\xc1\x10\xc1\x10\xc1\x10\xc1\x10\xc1\xc1\xc1\x10\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\xc1\xc1\xc1\xc1\x10\xc1\x10\x10\xc1\xc1\xc1\xc1\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\x10\x10\x10\x10\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1' '\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' ')*))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))*)))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))*)))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))))))))*))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))*)))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))*))))))))*)))))))))))))))*)))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))*))))))))))))))))))*))' ')))))))))))))))))))))))))*))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))*)))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))*)))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))*))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))))))))))))))))))))))))*))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' ')))))))))))))))))))))))\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' ')))))))))))))))))))))))))))))))))))))))))))))))))))))\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))' '))))))))))))))))))))))))))))))\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11' '\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x10\x10' '\x10\x07\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07' '\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '4444444444444444444444444444444444444444444444444444444444444444' '4444444444444444444444444444444444444444444444444444444444444444' '4444444444444444444444444444444444444444444444444444444444444444' '444444444444444444444444444444444444444444444444\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10' '\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12' '\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12' '\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12' '\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x12\x10\x10' ) def _get_record(code): return _db_records[ord(_db_pages[(ord(_db_pgtbl[code >> 8]) << 8) + (code & 255)])] def category(code): return _get_record(code)[0] def bidirectional(code): return _get_record(code)[1] def east_asian_width(code): return _get_record(code)[2] def isspace(code): return _get_record(code)[3] & 1 != 0 def isalpha(code): return _get_record(code)[3] & 2 != 0 def islinebreak(code): return _get_record(code)[3] & 4 != 0 def isnumeric(code): return _get_record(code)[3] & 64 != 0 def isdigit(code): return _get_record(code)[3] & 128 != 0 def isdecimal(code): return _get_record(code)[3] & 256 != 0 def isalnum(code): return _get_record(code)[3] & 66 != 0 def isupper(code): return _get_record(code)[3] & 8 != 0 def istitle(code): return _get_record(code)[3] & 16 != 0 def islower(code): return _get_record(code)[3] & 32 != 0 def iscased(code): return _get_record(code)[3] & 56 != 0 def isxidstart(code): return _get_record(code)[3] & 1024 != 0 def isxidcontinue(code): return _get_record(code)[3] & 2048 != 0 def isprintable(code): return _get_record(code)[3] & 4096 != 0 def mirrored(code): return _get_record(code)[3] & 512 != 0 def iscaseignorable(code): return _get_record(code)[3] & 8192 != 0 _decimal = { 69734: 0, 69735: 1, 69736: 2, 69737: 3, 69738: 4, 69739: 5, 69740: 6, 69741: 7, 69742: 8, 69743: 9, } _decimal_corrected = { 6618: None, } _digit = { 69714: 1, 69715: 2, 69716: 3, 69717: 4, 69718: 5, 69719: 6, 69720: 7, 69721: 8, 69722: 9, 69734: 0, 69735: 1, 69736: 2, 69737: 3, 69738: 4, 69739: 5, 69740: 6, 69741: 7, 69742: 8, 69743: 9, } _digit_corrected = { } _numeric = { 188: 1.0 / 4.0, 189: 1.0 / 2.0, 190: 3.0 / 4.0, 2548: 1.0 / 16.0, 2549: 1.0 / 8.0, 2550: 3.0 / 16.0, 2551: 1.0 / 4.0, 2552: 3.0 / 4.0, 2930: 1.0 / 4.0, 2931: 1.0 / 2.0, 2932: 3.0 / 4.0, 2933: 1.0 / 16.0, 2934: 1.0 / 8.0, 2935: 3.0 / 16.0, 3443: 1.0 / 4.0, 3444: 1.0 / 2.0, 3445: 3.0 / 4.0, 3882: 1.0 / 2.0, 3883: 3.0 / 2.0, 3884: 5.0 / 2.0, 3885: 7.0 / 2.0, 3886: 9.0 / 2.0, 3887: 11.0 / 2.0, 3888: 13.0 / 2.0, 3889: 15.0 / 2.0, 3890: 17.0 / 2.0, 3891: -1.0 / 2.0, 8528: 1.0 / 7.0, 8529: 1.0 / 9.0, 8530: 1.0 / 10.0, 8531: 1.0 / 3.0, 8532: 2.0 / 3.0, 8533: 1.0 / 5.0, 8534: 2.0 / 5.0, 8535: 3.0 / 5.0, 8536: 4.0 / 5.0, 8537: 1.0 / 6.0, 8538: 5.0 / 6.0, 8539: 1.0 / 8.0, 8540: 3.0 / 8.0, 8541: 5.0 / 8.0, 8542: 7.0 / 8.0, 11517: 1.0 / 2.0, 43056: 1.0 / 4.0, 43057: 1.0 / 2.0, 43058: 3.0 / 4.0, 43059: 1.0 / 16.0, 43060: 1.0 / 8.0, 43061: 3.0 / 16.0, 65856: 1.0 / 4.0, 65857: 1.0 / 2.0, 65909: 1.0 / 2.0, 65910: 1.0 / 2.0, 65911: 2.0 / 3.0, 65912: 3.0 / 4.0, 69243: 1.0 / 2.0, 69244: 1.0 / 4.0, 69245: 1.0 / 3.0, 69246: 2.0 / 3.0, 69714: 1.0, 69715: 2.0, 69716: 3.0, 69717: 4.0, 69718: 5.0, 69719: 6.0, 69720: 7.0, 69721: 8.0, 69722: 9.0, 69723: 10.0, 69724: 20.0, 69725: 30.0, 69726: 40.0, 69727: 50.0, 69728: 60.0, 69729: 70.0, 69730: 80.0, 69731: 90.0, 69732: 100.0, 69733: 1000.0, 69734: 0.0, 69735: 1.0, 69736: 2.0, 69737: 3.0, 69738: 4.0, 69739: 5.0, 69740: 6.0, 69741: 7.0, 69742: 8.0, 69743: 9.0, 74842: 1.0 / 3.0, 74843: 2.0 / 3.0, 74844: 5.0 / 6.0, 74845: 1.0 / 3.0, 74846: 2.0 / 3.0, 74847: 1.0 / 8.0, 74848: 1.0 / 4.0, 74849: 1.0 / 6.0, 74850: 1.0 / 4.0, } _numeric_corrected = { } def decimal(code): try: return _decimal[code] except KeyError: if base_mod is not None and code not in _decimal_corrected: return base_mod._decimal[code] else: raise def digit(code): try: return _digit[code] except KeyError: if base_mod is not None and code not in _digit_corrected: return base_mod._digit[code] else: raise def numeric(code): try: return _numeric[code] except KeyError: if base_mod is not None and code not in _numeric_corrected: return base_mod._numeric[code] else: raise _toupper = { 613: 42893, 1319: 1318, 42593: 42592, 42897: 42896, 42913: 42912, 42915: 42914, 42917: 42916, 42919: 42918, 42921: 42920, } _toupper_corrected = { } _tolower = { 1318: 1319, 42592: 42593, 42893: 613, 42896: 42897, 42912: 42913, 42914: 42915, 42916: 42917, 42918: 42919, 42920: 42921, } _tolower_corrected = { } _totitle = { 613: 42893, 1319: 1318, 42593: 42592, 42897: 42896, 42913: 42912, 42915: 42914, 42917: 42916, 42919: 42918, 42921: 42920, } _totitle_corrected = { } _special_casing = { } _special_casing_corrected = { } def toupper(code): try: return _toupper[code] except KeyError: if base_mod is not None and code not in _toupper_corrected: return base_mod._toupper.get(code, code) else: return code def tolower(code): try: return _tolower[code] except KeyError: if base_mod is not None and code not in _tolower_corrected: return base_mod._tolower.get(code, code) else: return code def totitle(code): try: return _totitle[code] except KeyError: if base_mod is not None and code not in _totitle_corrected: return base_mod._totitle.get(code, code) else: return code def toupper_full(code): try: return _special_casing[code][2] except KeyError: if base_mod is not None and code not in _special_casing_corrected: try: return base_mod._special_casing[code][2] except KeyError: pass return [toupper(code)] def tolower_full(code): try: return _special_casing[code][0] except KeyError: if base_mod is not None and code not in _special_casing_corrected: try: return base_mod._special_casing[code][0] except KeyError: pass return [tolower(code)] def totitle_full(code): try: return _special_casing[code][1] except KeyError: if base_mod is not None and code not in _special_casing_corrected: try: return base_mod._special_casing[code][1] except KeyError: pass return [totitle(code)] _raw_decomposition = { 8341: '<sub> 0068', 8342: '<sub> 006B', 8343: '<sub> 006C', 8344: '<sub> 006D', 8345: '<sub> 006E', 8346: '<sub> 0070', 8347: '<sub> 0073', 8348: '<sub> 0074', 127280: '<square> 0041', 127282: '<square> 0043', 127283: '<square> 0044', 127284: '<square> 0045', 127285: '<square> 0046', 127286: '<square> 0047', 127287: '<square> 0048', 127288: '<square> 0049', 127289: '<square> 004A', 127290: '<square> 004B', 127291: '<square> 004C', 127292: '<square> 004D', 127294: '<square> 004F', 127296: '<square> 0051', 127297: '<square> 0052', 127299: '<square> 0054', 127300: '<square> 0055', 127301: '<square> 0056', 127303: '<square> 0058', 127304: '<square> 0059', 127305: '<square> 005A', 127311: '<square> 0057 0043', 127489: '<square> 30B3 30B3', 127490: '<square> 30B5', 127538: '<square> 7981', 127539: '<square> 7A7A', 127540: '<square> 5408', 127541: '<square> 6E80', 127542: '<square> 6709', 127543: '<square> 6708', 127544: '<square> 7533', 127545: '<square> 5272', 127546: '<square> 55B6', 127568: '<circle> 5F97', 127569: '<circle> 53EF', } _raw_decomposition_corrected = { } def decomposition(code): try: return _raw_decomposition[code] except KeyError: if base_mod is not None and code not in _raw_decomposition_corrected: return base_mod._raw_decomposition.get(code, '') else: return '' _composition = { r_longlong( 65 << 32 | 768): 192, r_longlong( 65 << 32 | 769): 193, r_longlong( 65 << 32 | 770): 194, r_longlong( 65 << 32 | 771): 195, r_longlong( 65 << 32 | 776): 196, r_longlong( 65 << 32 | 778): 197, r_longlong( 67 << 32 | 807): 199, r_longlong( 69 << 32 | 768): 200, r_longlong( 69 << 32 | 769): 201, r_longlong( 69 << 32 | 770): 202, r_longlong( 69 << 32 | 776): 203, r_longlong( 73 << 32 | 768): 204, r_longlong( 73 << 32 | 769): 205, r_longlong( 73 << 32 | 770): 206, r_longlong( 73 << 32 | 776): 207, r_longlong( 78 << 32 | 771): 209, r_longlong( 79 << 32 | 768): 210, r_longlong( 79 << 32 | 769): 211, r_longlong( 79 << 32 | 770): 212, r_longlong( 79 << 32 | 771): 213, r_longlong( 79 << 32 | 776): 214, r_longlong( 85 << 32 | 768): 217, r_longlong( 85 << 32 | 769): 218, r_longlong( 85 << 32 | 770): 219, r_longlong( 85 << 32 | 776): 220, r_longlong( 89 << 32 | 769): 221, r_longlong( 97 << 32 | 768): 224, r_longlong( 97 << 32 | 769): 225, r_longlong( 97 << 32 | 770): 226, r_longlong( 97 << 32 | 771): 227, r_longlong( 97 << 32 | 776): 228, r_longlong( 97 << 32 | 778): 229, r_longlong( 99 << 32 | 807): 231, r_longlong( 101 << 32 | 768): 232, r_longlong( 101 << 32 | 769): 233, r_longlong( 101 << 32 | 770): 234, r_longlong( 101 << 32 | 776): 235, r_longlong( 105 << 32 | 768): 236, r_longlong( 105 << 32 | 769): 237, r_longlong( 105 << 32 | 770): 238, r_longlong( 105 << 32 | 776): 239, r_longlong( 110 << 32 | 771): 241, r_longlong( 111 << 32 | 768): 242, r_longlong( 111 << 32 | 769): 243, r_longlong( 111 << 32 | 770): 244, r_longlong( 111 << 32 | 771): 245, r_longlong( 111 << 32 | 776): 246, r_longlong( 117 << 32 | 768): 249, r_longlong( 117 << 32 | 769): 250, r_longlong( 117 << 32 | 770): 251, r_longlong( 117 << 32 | 776): 252, r_longlong( 121 << 32 | 769): 253, r_longlong( 121 << 32 | 776): 255, r_longlong( 65 << 32 | 772): 256, r_longlong( 97 << 32 | 772): 257, r_longlong( 65 << 32 | 774): 258, r_longlong( 97 << 32 | 774): 259, r_longlong( 65 << 32 | 808): 260, r_longlong( 97 << 32 | 808): 261, r_longlong( 67 << 32 | 769): 262, r_longlong( 99 << 32 | 769): 263, r_longlong( 67 << 32 | 770): 264, r_longlong( 99 << 32 | 770): 265, r_longlong( 67 << 32 | 775): 266, r_longlong( 99 << 32 | 775): 267, r_longlong( 67 << 32 | 780): 268, r_longlong( 99 << 32 | 780): 269, r_longlong( 68 << 32 | 780): 270, r_longlong( 100 << 32 | 780): 271, r_longlong( 69 << 32 | 772): 274, r_longlong( 101 << 32 | 772): 275, r_longlong( 69 << 32 | 774): 276, r_longlong( 101 << 32 | 774): 277, r_longlong( 69 << 32 | 775): 278, r_longlong( 101 << 32 | 775): 279, r_longlong( 69 << 32 | 808): 280, r_longlong( 101 << 32 | 808): 281, r_longlong( 69 << 32 | 780): 282, r_longlong( 101 << 32 | 780): 283, r_longlong( 71 << 32 | 770): 284, r_longlong( 103 << 32 | 770): 285, r_longlong( 71 << 32 | 774): 286, r_longlong( 103 << 32 | 774): 287, r_longlong( 71 << 32 | 775): 288, r_longlong( 103 << 32 | 775): 289, r_longlong( 71 << 32 | 807): 290, r_longlong( 103 << 32 | 807): 291, r_longlong( 72 << 32 | 770): 292, r_longlong( 104 << 32 | 770): 293, r_longlong( 73 << 32 | 771): 296, r_longlong( 105 << 32 | 771): 297, r_longlong( 73 << 32 | 772): 298, r_longlong( 105 << 32 | 772): 299, r_longlong( 73 << 32 | 774): 300, r_longlong( 105 << 32 | 774): 301, r_longlong( 73 << 32 | 808): 302, r_longlong( 105 << 32 | 808): 303, r_longlong( 73 << 32 | 775): 304, r_longlong( 74 << 32 | 770): 308, r_longlong( 106 << 32 | 770): 309, r_longlong( 75 << 32 | 807): 310, r_longlong( 107 << 32 | 807): 311, r_longlong( 76 << 32 | 769): 313, r_longlong( 108 << 32 | 769): 314, r_longlong( 76 << 32 | 807): 315, r_longlong( 108 << 32 | 807): 316, r_longlong( 76 << 32 | 780): 317, r_longlong( 108 << 32 | 780): 318, r_longlong( 78 << 32 | 769): 323, r_longlong( 110 << 32 | 769): 324, r_longlong( 78 << 32 | 807): 325, r_longlong( 110 << 32 | 807): 326, r_longlong( 78 << 32 | 780): 327, r_longlong( 110 << 32 | 780): 328, r_longlong( 79 << 32 | 772): 332, r_longlong( 111 << 32 | 772): 333, r_longlong( 79 << 32 | 774): 334, r_longlong( 111 << 32 | 774): 335, r_longlong( 79 << 32 | 779): 336, r_longlong( 111 << 32 | 779): 337, r_longlong( 82 << 32 | 769): 340, r_longlong( 114 << 32 | 769): 341, r_longlong( 82 << 32 | 807): 342, r_longlong( 114 << 32 | 807): 343, r_longlong( 82 << 32 | 780): 344, r_longlong( 114 << 32 | 780): 345, r_longlong( 83 << 32 | 769): 346, r_longlong( 115 << 32 | 769): 347, r_longlong( 83 << 32 | 770): 348, r_longlong( 115 << 32 | 770): 349, r_longlong( 83 << 32 | 807): 350, r_longlong( 115 << 32 | 807): 351, r_longlong( 83 << 32 | 780): 352, r_longlong( 115 << 32 | 780): 353, r_longlong( 84 << 32 | 807): 354, r_longlong( 116 << 32 | 807): 355, r_longlong( 84 << 32 | 780): 356, r_longlong( 116 << 32 | 780): 357, r_longlong( 85 << 32 | 771): 360, r_longlong( 117 << 32 | 771): 361, r_longlong( 85 << 32 | 772): 362, r_longlong( 117 << 32 | 772): 363, r_longlong( 85 << 32 | 774): 364, r_longlong( 117 << 32 | 774): 365, r_longlong( 85 << 32 | 778): 366, r_longlong( 117 << 32 | 778): 367, r_longlong( 85 << 32 | 779): 368, r_longlong( 117 << 32 | 779): 369, r_longlong( 85 << 32 | 808): 370, r_longlong( 117 << 32 | 808): 371, r_longlong( 87 << 32 | 770): 372, r_longlong( 119 << 32 | 770): 373, r_longlong( 89 << 32 | 770): 374, r_longlong( 121 << 32 | 770): 375, r_longlong( 89 << 32 | 776): 376, r_longlong( 90 << 32 | 769): 377, r_longlong( 122 << 32 | 769): 378, r_longlong( 90 << 32 | 775): 379, r_longlong( 122 << 32 | 775): 380, r_longlong( 90 << 32 | 780): 381, r_longlong( 122 << 32 | 780): 382, r_longlong( 79 << 32 | 795): 416, r_longlong( 111 << 32 | 795): 417, r_longlong( 85 << 32 | 795): 431, r_longlong( 117 << 32 | 795): 432, r_longlong( 65 << 32 | 780): 461, r_longlong( 97 << 32 | 780): 462, r_longlong( 73 << 32 | 780): 463, r_longlong( 105 << 32 | 780): 464, r_longlong( 79 << 32 | 780): 465, r_longlong( 111 << 32 | 780): 466, r_longlong( 85 << 32 | 780): 467, r_longlong( 117 << 32 | 780): 468, r_longlong( 220 << 32 | 772): 469, r_longlong( 252 << 32 | 772): 470, r_longlong( 220 << 32 | 769): 471, r_longlong( 252 << 32 | 769): 472, r_longlong( 220 << 32 | 780): 473, r_longlong( 252 << 32 | 780): 474, r_longlong( 220 << 32 | 768): 475, r_longlong( 252 << 32 | 768): 476, r_longlong( 196 << 32 | 772): 478, r_longlong( 228 << 32 | 772): 479, r_longlong( 550 << 32 | 772): 480, r_longlong( 551 << 32 | 772): 481, r_longlong( 198 << 32 | 772): 482, r_longlong( 230 << 32 | 772): 483, r_longlong( 71 << 32 | 780): 486, r_longlong( 103 << 32 | 780): 487, r_longlong( 75 << 32 | 780): 488, r_longlong( 107 << 32 | 780): 489, r_longlong( 79 << 32 | 808): 490, r_longlong( 111 << 32 | 808): 491, r_longlong( 490 << 32 | 772): 492, r_longlong( 491 << 32 | 772): 493, r_longlong( 439 << 32 | 780): 494, r_longlong( 658 << 32 | 780): 495, r_longlong( 106 << 32 | 780): 496, r_longlong( 71 << 32 | 769): 500, r_longlong( 103 << 32 | 769): 501, r_longlong( 78 << 32 | 768): 504, r_longlong( 110 << 32 | 768): 505, r_longlong( 197 << 32 | 769): 506, r_longlong( 229 << 32 | 769): 507, r_longlong( 198 << 32 | 769): 508, r_longlong( 230 << 32 | 769): 509, r_longlong( 216 << 32 | 769): 510, r_longlong( 248 << 32 | 769): 511, r_longlong( 65 << 32 | 783): 512, r_longlong( 97 << 32 | 783): 513, r_longlong( 65 << 32 | 785): 514, r_longlong( 97 << 32 | 785): 515, r_longlong( 69 << 32 | 783): 516, r_longlong( 101 << 32 | 783): 517, r_longlong( 69 << 32 | 785): 518, r_longlong( 101 << 32 | 785): 519, r_longlong( 73 << 32 | 783): 520, r_longlong( 105 << 32 | 783): 521, r_longlong( 73 << 32 | 785): 522, r_longlong( 105 << 32 | 785): 523, r_longlong( 79 << 32 | 783): 524, r_longlong( 111 << 32 | 783): 525, r_longlong( 79 << 32 | 785): 526, r_longlong( 111 << 32 | 785): 527, r_longlong( 82 << 32 | 783): 528, r_longlong( 114 << 32 | 783): 529, r_longlong( 82 << 32 | 785): 530, r_longlong( 114 << 32 | 785): 531, r_longlong( 85 << 32 | 783): 532, r_longlong( 117 << 32 | 783): 533, r_longlong( 85 << 32 | 785): 534, r_longlong( 117 << 32 | 785): 535, r_longlong( 83 << 32 | 806): 536, r_longlong( 115 << 32 | 806): 537, r_longlong( 84 << 32 | 806): 538, r_longlong( 116 << 32 | 806): 539, r_longlong( 72 << 32 | 780): 542, r_longlong( 104 << 32 | 780): 543, r_longlong( 65 << 32 | 775): 550, r_longlong( 97 << 32 | 775): 551, r_longlong( 69 << 32 | 807): 552, r_longlong( 101 << 32 | 807): 553, r_longlong( 214 << 32 | 772): 554, r_longlong( 246 << 32 | 772): 555, r_longlong( 213 << 32 | 772): 556, r_longlong( 245 << 32 | 772): 557, r_longlong( 79 << 32 | 775): 558, r_longlong( 111 << 32 | 775): 559, r_longlong( 558 << 32 | 772): 560, r_longlong( 559 << 32 | 772): 561, r_longlong( 89 << 32 | 772): 562, r_longlong( 121 << 32 | 772): 563, r_longlong( 168 << 32 | 769): 901, r_longlong( 913 << 32 | 769): 902, r_longlong( 917 << 32 | 769): 904, r_longlong( 919 << 32 | 769): 905, r_longlong( 921 << 32 | 769): 906, r_longlong( 927 << 32 | 769): 908, r_longlong( 933 << 32 | 769): 910, r_longlong( 937 << 32 | 769): 911, r_longlong( 970 << 32 | 769): 912, r_longlong( 921 << 32 | 776): 938, r_longlong( 933 << 32 | 776): 939, r_longlong( 945 << 32 | 769): 940, r_longlong( 949 << 32 | 769): 941, r_longlong( 951 << 32 | 769): 942, r_longlong( 953 << 32 | 769): 943, r_longlong( 971 << 32 | 769): 944, r_longlong( 953 << 32 | 776): 970, r_longlong( 965 << 32 | 776): 971, r_longlong( 959 << 32 | 769): 972, r_longlong( 965 << 32 | 769): 973, r_longlong( 969 << 32 | 769): 974, r_longlong( 978 << 32 | 769): 979, r_longlong( 978 << 32 | 776): 980, r_longlong( 1045 << 32 | 768): 1024, r_longlong( 1045 << 32 | 776): 1025, r_longlong( 1043 << 32 | 769): 1027, r_longlong( 1030 << 32 | 776): 1031, r_longlong( 1050 << 32 | 769): 1036, r_longlong( 1048 << 32 | 768): 1037, r_longlong( 1059 << 32 | 774): 1038, r_longlong( 1048 << 32 | 774): 1049, r_longlong( 1080 << 32 | 774): 1081, r_longlong( 1077 << 32 | 768): 1104, r_longlong( 1077 << 32 | 776): 1105, r_longlong( 1075 << 32 | 769): 1107, r_longlong( 1110 << 32 | 776): 1111, r_longlong( 1082 << 32 | 769): 1116, r_longlong( 1080 << 32 | 768): 1117, r_longlong( 1091 << 32 | 774): 1118, r_longlong( 1140 << 32 | 783): 1142, r_longlong( 1141 << 32 | 783): 1143, r_longlong( 1046 << 32 | 774): 1217, r_longlong( 1078 << 32 | 774): 1218, r_longlong( 1040 << 32 | 774): 1232, r_longlong( 1072 << 32 | 774): 1233, r_longlong( 1040 << 32 | 776): 1234, r_longlong( 1072 << 32 | 776): 1235, r_longlong( 1045 << 32 | 774): 1238, r_longlong( 1077 << 32 | 774): 1239, r_longlong( 1240 << 32 | 776): 1242, r_longlong( 1241 << 32 | 776): 1243, r_longlong( 1046 << 32 | 776): 1244, r_longlong( 1078 << 32 | 776): 1245, r_longlong( 1047 << 32 | 776): 1246, r_longlong( 1079 << 32 | 776): 1247, r_longlong( 1048 << 32 | 772): 1250, r_longlong( 1080 << 32 | 772): 1251, r_longlong( 1048 << 32 | 776): 1252, r_longlong( 1080 << 32 | 776): 1253, r_longlong( 1054 << 32 | 776): 1254, r_longlong( 1086 << 32 | 776): 1255, r_longlong( 1256 << 32 | 776): 1258, r_longlong( 1257 << 32 | 776): 1259, r_longlong( 1069 << 32 | 776): 1260, r_longlong( 1101 << 32 | 776): 1261, r_longlong( 1059 << 32 | 772): 1262, r_longlong( 1091 << 32 | 772): 1263, r_longlong( 1059 << 32 | 776): 1264, r_longlong( 1091 << 32 | 776): 1265, r_longlong( 1059 << 32 | 779): 1266, r_longlong( 1091 << 32 | 779): 1267, r_longlong( 1063 << 32 | 776): 1268, r_longlong( 1095 << 32 | 776): 1269, r_longlong( 1067 << 32 | 776): 1272, r_longlong( 1099 << 32 | 776): 1273, r_longlong( 1575 << 32 | 1619): 1570, r_longlong( 1575 << 32 | 1620): 1571, r_longlong( 1608 << 32 | 1620): 1572, r_longlong( 1575 << 32 | 1621): 1573, r_longlong( 1610 << 32 | 1620): 1574, r_longlong( 1749 << 32 | 1620): 1728, r_longlong( 1729 << 32 | 1620): 1730, r_longlong( 1746 << 32 | 1620): 1747, r_longlong( 2344 << 32 | 2364): 2345, r_longlong( 2352 << 32 | 2364): 2353, r_longlong( 2355 << 32 | 2364): 2356, r_longlong( 2503 << 32 | 2494): 2507, r_longlong( 2503 << 32 | 2519): 2508, r_longlong( 2887 << 32 | 2902): 2888, r_longlong( 2887 << 32 | 2878): 2891, r_longlong( 2887 << 32 | 2903): 2892, r_longlong( 2962 << 32 | 3031): 2964, r_longlong( 3014 << 32 | 3006): 3018, r_longlong( 3015 << 32 | 3006): 3019, r_longlong( 3014 << 32 | 3031): 3020, r_longlong( 3142 << 32 | 3158): 3144, r_longlong( 3263 << 32 | 3285): 3264, r_longlong( 3270 << 32 | 3285): 3271, r_longlong( 3270 << 32 | 3286): 3272, r_longlong( 3270 << 32 | 3266): 3274, r_longlong( 3274 << 32 | 3285): 3275, r_longlong( 3398 << 32 | 3390): 3402, r_longlong( 3399 << 32 | 3390): 3403, r_longlong( 3398 << 32 | 3415): 3404, r_longlong( 3545 << 32 | 3530): 3546, r_longlong( 3545 << 32 | 3535): 3548, r_longlong( 3548 << 32 | 3530): 3549, r_longlong( 3545 << 32 | 3551): 3550, r_longlong( 4133 << 32 | 4142): 4134, r_longlong( 6917 << 32 | 6965): 6918, r_longlong( 6919 << 32 | 6965): 6920, r_longlong( 6921 << 32 | 6965): 6922, r_longlong( 6923 << 32 | 6965): 6924, r_longlong( 6925 << 32 | 6965): 6926, r_longlong( 6929 << 32 | 6965): 6930, r_longlong( 6970 << 32 | 6965): 6971, r_longlong( 6972 << 32 | 6965): 6973, r_longlong( 6974 << 32 | 6965): 6976, r_longlong( 6975 << 32 | 6965): 6977, r_longlong( 6978 << 32 | 6965): 6979, r_longlong( 65 << 32 | 805): 7680, r_longlong( 97 << 32 | 805): 7681, r_longlong( 66 << 32 | 775): 7682, r_longlong( 98 << 32 | 775): 7683, r_longlong( 66 << 32 | 803): 7684, r_longlong( 98 << 32 | 803): 7685, r_longlong( 66 << 32 | 817): 7686, r_longlong( 98 << 32 | 817): 7687, r_longlong( 199 << 32 | 769): 7688, r_longlong( 231 << 32 | 769): 7689, r_longlong( 68 << 32 | 775): 7690, r_longlong( 100 << 32 | 775): 7691, r_longlong( 68 << 32 | 803): 7692, r_longlong( 100 << 32 | 803): 7693, r_longlong( 68 << 32 | 817): 7694, r_longlong( 100 << 32 | 817): 7695, r_longlong( 68 << 32 | 807): 7696, r_longlong( 100 << 32 | 807): 7697, r_longlong( 68 << 32 | 813): 7698, r_longlong( 100 << 32 | 813): 7699, r_longlong( 274 << 32 | 768): 7700, r_longlong( 275 << 32 | 768): 7701, r_longlong( 274 << 32 | 769): 7702, r_longlong( 275 << 32 | 769): 7703, r_longlong( 69 << 32 | 813): 7704, r_longlong( 101 << 32 | 813): 7705, r_longlong( 69 << 32 | 816): 7706, r_longlong( 101 << 32 | 816): 7707, r_longlong( 552 << 32 | 774): 7708, r_longlong( 553 << 32 | 774): 7709, r_longlong( 70 << 32 | 775): 7710, r_longlong( 102 << 32 | 775): 7711, r_longlong( 71 << 32 | 772): 7712, r_longlong( 103 << 32 | 772): 7713, r_longlong( 72 << 32 | 775): 7714, r_longlong( 104 << 32 | 775): 7715, r_longlong( 72 << 32 | 803): 7716, r_longlong( 104 << 32 | 803): 7717, r_longlong( 72 << 32 | 776): 7718, r_longlong( 104 << 32 | 776): 7719, r_longlong( 72 << 32 | 807): 7720, r_longlong( 104 << 32 | 807): 7721, r_longlong( 72 << 32 | 814): 7722, r_longlong( 104 << 32 | 814): 7723, r_longlong( 73 << 32 | 816): 7724, r_longlong( 105 << 32 | 816): 7725, r_longlong( 207 << 32 | 769): 7726, r_longlong( 239 << 32 | 769): 7727, r_longlong( 75 << 32 | 769): 7728, r_longlong( 107 << 32 | 769): 7729, r_longlong( 75 << 32 | 803): 7730, r_longlong( 107 << 32 | 803): 7731, r_longlong( 75 << 32 | 817): 7732, r_longlong( 107 << 32 | 817): 7733, r_longlong( 76 << 32 | 803): 7734, r_longlong( 108 << 32 | 803): 7735, r_longlong( 7734 << 32 | 772): 7736, r_longlong( 7735 << 32 | 772): 7737, r_longlong( 76 << 32 | 817): 7738, r_longlong( 108 << 32 | 817): 7739, r_longlong( 76 << 32 | 813): 7740, r_longlong( 108 << 32 | 813): 7741, r_longlong( 77 << 32 | 769): 7742, r_longlong( 109 << 32 | 769): 7743, r_longlong( 77 << 32 | 775): 7744, r_longlong( 109 << 32 | 775): 7745, r_longlong( 77 << 32 | 803): 7746, r_longlong( 109 << 32 | 803): 7747, r_longlong( 78 << 32 | 775): 7748, r_longlong( 110 << 32 | 775): 7749, r_longlong( 78 << 32 | 803): 7750, r_longlong( 110 << 32 | 803): 7751, r_longlong( 78 << 32 | 817): 7752, r_longlong( 110 << 32 | 817): 7753, r_longlong( 78 << 32 | 813): 7754, r_longlong( 110 << 32 | 813): 7755, r_longlong( 213 << 32 | 769): 7756, r_longlong( 245 << 32 | 769): 7757, r_longlong( 213 << 32 | 776): 7758, r_longlong( 245 << 32 | 776): 7759, r_longlong( 332 << 32 | 768): 7760, r_longlong( 333 << 32 | 768): 7761, r_longlong( 332 << 32 | 769): 7762, r_longlong( 333 << 32 | 769): 7763, r_longlong( 80 << 32 | 769): 7764, r_longlong( 112 << 32 | 769): 7765, r_longlong( 80 << 32 | 775): 7766, r_longlong( 112 << 32 | 775): 7767, r_longlong( 82 << 32 | 775): 7768, r_longlong( 114 << 32 | 775): 7769, r_longlong( 82 << 32 | 803): 7770, r_longlong( 114 << 32 | 803): 7771, r_longlong( 7770 << 32 | 772): 7772, r_longlong( 7771 << 32 | 772): 7773, r_longlong( 82 << 32 | 817): 7774, r_longlong( 114 << 32 | 817): 7775, r_longlong( 83 << 32 | 775): 7776, r_longlong( 115 << 32 | 775): 7777, r_longlong( 83 << 32 | 803): 7778, r_longlong( 115 << 32 | 803): 7779, r_longlong( 346 << 32 | 775): 7780, r_longlong( 347 << 32 | 775): 7781, r_longlong( 352 << 32 | 775): 7782, r_longlong( 353 << 32 | 775): 7783, r_longlong( 7778 << 32 | 775): 7784, r_longlong( 7779 << 32 | 775): 7785, r_longlong( 84 << 32 | 775): 7786, r_longlong( 116 << 32 | 775): 7787, r_longlong( 84 << 32 | 803): 7788, r_longlong( 116 << 32 | 803): 7789, r_longlong( 84 << 32 | 817): 7790, r_longlong( 116 << 32 | 817): 7791, r_longlong( 84 << 32 | 813): 7792, r_longlong( 116 << 32 | 813): 7793, r_longlong( 85 << 32 | 804): 7794, r_longlong( 117 << 32 | 804): 7795, r_longlong( 85 << 32 | 816): 7796, r_longlong( 117 << 32 | 816): 7797, r_longlong( 85 << 32 | 813): 7798, r_longlong( 117 << 32 | 813): 7799, r_longlong( 360 << 32 | 769): 7800, r_longlong( 361 << 32 | 769): 7801, r_longlong( 362 << 32 | 776): 7802, r_longlong( 363 << 32 | 776): 7803, r_longlong( 86 << 32 | 771): 7804, r_longlong( 118 << 32 | 771): 7805, r_longlong( 86 << 32 | 803): 7806, r_longlong( 118 << 32 | 803): 7807, r_longlong( 87 << 32 | 768): 7808, r_longlong( 119 << 32 | 768): 7809, r_longlong( 87 << 32 | 769): 7810, r_longlong( 119 << 32 | 769): 7811, r_longlong( 87 << 32 | 776): 7812, r_longlong( 119 << 32 | 776): 7813, r_longlong( 87 << 32 | 775): 7814, r_longlong( 119 << 32 | 775): 7815, r_longlong( 87 << 32 | 803): 7816, r_longlong( 119 << 32 | 803): 7817, r_longlong( 88 << 32 | 775): 7818, r_longlong( 120 << 32 | 775): 7819, r_longlong( 88 << 32 | 776): 7820, r_longlong( 120 << 32 | 776): 7821, r_longlong( 89 << 32 | 775): 7822, r_longlong( 121 << 32 | 775): 7823, r_longlong( 90 << 32 | 770): 7824, r_longlong( 122 << 32 | 770): 7825, r_longlong( 90 << 32 | 803): 7826, r_longlong( 122 << 32 | 803): 7827, r_longlong( 90 << 32 | 817): 7828, r_longlong( 122 << 32 | 817): 7829, r_longlong( 104 << 32 | 817): 7830, r_longlong( 116 << 32 | 776): 7831, r_longlong( 119 << 32 | 778): 7832, r_longlong( 121 << 32 | 778): 7833, r_longlong( 383 << 32 | 775): 7835, r_longlong( 65 << 32 | 803): 7840, r_longlong( 97 << 32 | 803): 7841, r_longlong( 65 << 32 | 777): 7842, r_longlong( 97 << 32 | 777): 7843, r_longlong( 194 << 32 | 769): 7844, r_longlong( 226 << 32 | 769): 7845, r_longlong( 194 << 32 | 768): 7846, r_longlong( 226 << 32 | 768): 7847, r_longlong( 194 << 32 | 777): 7848, r_longlong( 226 << 32 | 777): 7849, r_longlong( 194 << 32 | 771): 7850, r_longlong( 226 << 32 | 771): 7851, r_longlong( 7840 << 32 | 770): 7852, r_longlong( 7841 << 32 | 770): 7853, r_longlong( 258 << 32 | 769): 7854, r_longlong( 259 << 32 | 769): 7855, r_longlong( 258 << 32 | 768): 7856, r_longlong( 259 << 32 | 768): 7857, r_longlong( 258 << 32 | 777): 7858, r_longlong( 259 << 32 | 777): 7859, r_longlong( 258 << 32 | 771): 7860, r_longlong( 259 << 32 | 771): 7861, r_longlong( 7840 << 32 | 774): 7862, r_longlong( 7841 << 32 | 774): 7863, r_longlong( 69 << 32 | 803): 7864, r_longlong( 101 << 32 | 803): 7865, r_longlong( 69 << 32 | 777): 7866, r_longlong( 101 << 32 | 777): 7867, r_longlong( 69 << 32 | 771): 7868, r_longlong( 101 << 32 | 771): 7869, r_longlong( 202 << 32 | 769): 7870, r_longlong( 234 << 32 | 769): 7871, r_longlong( 202 << 32 | 768): 7872, r_longlong( 234 << 32 | 768): 7873, r_longlong( 202 << 32 | 777): 7874, r_longlong( 234 << 32 | 777): 7875, r_longlong( 202 << 32 | 771): 7876, r_longlong( 234 << 32 | 771): 7877, r_longlong( 7864 << 32 | 770): 7878, r_longlong( 7865 << 32 | 770): 7879, r_longlong( 73 << 32 | 777): 7880, r_longlong( 105 << 32 | 777): 7881, r_longlong( 73 << 32 | 803): 7882, r_longlong( 105 << 32 | 803): 7883, r_longlong( 79 << 32 | 803): 7884, r_longlong( 111 << 32 | 803): 7885, r_longlong( 79 << 32 | 777): 7886, r_longlong( 111 << 32 | 777): 7887, r_longlong( 212 << 32 | 769): 7888, r_longlong( 244 << 32 | 769): 7889, r_longlong( 212 << 32 | 768): 7890, r_longlong( 244 << 32 | 768): 7891, r_longlong( 212 << 32 | 777): 7892, r_longlong( 244 << 32 | 777): 7893, r_longlong( 212 << 32 | 771): 7894, r_longlong( 244 << 32 | 771): 7895, r_longlong( 7884 << 32 | 770): 7896, r_longlong( 7885 << 32 | 770): 7897, r_longlong( 416 << 32 | 769): 7898, r_longlong( 417 << 32 | 769): 7899, r_longlong( 416 << 32 | 768): 7900, r_longlong( 417 << 32 | 768): 7901, r_longlong( 416 << 32 | 777): 7902, r_longlong( 417 << 32 | 777): 7903, r_longlong( 416 << 32 | 771): 7904, r_longlong( 417 << 32 | 771): 7905, r_longlong( 416 << 32 | 803): 7906, r_longlong( 417 << 32 | 803): 7907, r_longlong( 85 << 32 | 803): 7908, r_longlong( 117 << 32 | 803): 7909, r_longlong( 85 << 32 | 777): 7910, r_longlong( 117 << 32 | 777): 7911, r_longlong( 431 << 32 | 769): 7912, r_longlong( 432 << 32 | 769): 7913, r_longlong( 431 << 32 | 768): 7914, r_longlong( 432 << 32 | 768): 7915, r_longlong( 431 << 32 | 777): 7916, r_longlong( 432 << 32 | 777): 7917, r_longlong( 431 << 32 | 771): 7918, r_longlong( 432 << 32 | 771): 7919, r_longlong( 431 << 32 | 803): 7920, r_longlong( 432 << 32 | 803): 7921, r_longlong( 89 << 32 | 768): 7922, r_longlong( 121 << 32 | 768): 7923, r_longlong( 89 << 32 | 803): 7924, r_longlong( 121 << 32 | 803): 7925, r_longlong( 89 << 32 | 777): 7926, r_longlong( 121 << 32 | 777): 7927, r_longlong( 89 << 32 | 771): 7928, r_longlong( 121 << 32 | 771): 7929, r_longlong( 945 << 32 | 787): 7936, r_longlong( 945 << 32 | 788): 7937, r_longlong( 7936 << 32 | 768): 7938, r_longlong( 7937 << 32 | 768): 7939, r_longlong( 7936 << 32 | 769): 7940, r_longlong( 7937 << 32 | 769): 7941, r_longlong( 7936 << 32 | 834): 7942, r_longlong( 7937 << 32 | 834): 7943, r_longlong( 913 << 32 | 787): 7944, r_longlong( 913 << 32 | 788): 7945, r_longlong( 7944 << 32 | 768): 7946, r_longlong( 7945 << 32 | 768): 7947, r_longlong( 7944 << 32 | 769): 7948, r_longlong( 7945 << 32 | 769): 7949, r_longlong( 7944 << 32 | 834): 7950, r_longlong( 7945 << 32 | 834): 7951, r_longlong( 949 << 32 | 787): 7952, r_longlong( 949 << 32 | 788): 7953, r_longlong( 7952 << 32 | 768): 7954, r_longlong( 7953 << 32 | 768): 7955, r_longlong( 7952 << 32 | 769): 7956, r_longlong( 7953 << 32 | 769): 7957, r_longlong( 917 << 32 | 787): 7960, r_longlong( 917 << 32 | 788): 7961, r_longlong( 7960 << 32 | 768): 7962, r_longlong( 7961 << 32 | 768): 7963, r_longlong( 7960 << 32 | 769): 7964, r_longlong( 7961 << 32 | 769): 7965, r_longlong( 951 << 32 | 787): 7968, r_longlong( 951 << 32 | 788): 7969, r_longlong( 7968 << 32 | 768): 7970, r_longlong( 7969 << 32 | 768): 7971, r_longlong( 7968 << 32 | 769): 7972, r_longlong( 7969 << 32 | 769): 7973, r_longlong( 7968 << 32 | 834): 7974, r_longlong( 7969 << 32 | 834): 7975, r_longlong( 919 << 32 | 787): 7976, r_longlong( 919 << 32 | 788): 7977, r_longlong( 7976 << 32 | 768): 7978, r_longlong( 7977 << 32 | 768): 7979, r_longlong( 7976 << 32 | 769): 7980, r_longlong( 7977 << 32 | 769): 7981, r_longlong( 7976 << 32 | 834): 7982, r_longlong( 7977 << 32 | 834): 7983, r_longlong( 953 << 32 | 787): 7984, r_longlong( 953 << 32 | 788): 7985, r_longlong( 7984 << 32 | 768): 7986, r_longlong( 7985 << 32 | 768): 7987, r_longlong( 7984 << 32 | 769): 7988, r_longlong( 7985 << 32 | 769): 7989, r_longlong( 7984 << 32 | 834): 7990, r_longlong( 7985 << 32 | 834): 7991, r_longlong( 921 << 32 | 787): 7992, r_longlong( 921 << 32 | 788): 7993, r_longlong( 7992 << 32 | 768): 7994, r_longlong( 7993 << 32 | 768): 7995, r_longlong( 7992 << 32 | 769): 7996, r_longlong( 7993 << 32 | 769): 7997, r_longlong( 7992 << 32 | 834): 7998, r_longlong( 7993 << 32 | 834): 7999, r_longlong( 959 << 32 | 787): 8000, r_longlong( 959 << 32 | 788): 8001, r_longlong( 8000 << 32 | 768): 8002, r_longlong( 8001 << 32 | 768): 8003, r_longlong( 8000 << 32 | 769): 8004, r_longlong( 8001 << 32 | 769): 8005, r_longlong( 927 << 32 | 787): 8008, r_longlong( 927 << 32 | 788): 8009, r_longlong( 8008 << 32 | 768): 8010, r_longlong( 8009 << 32 | 768): 8011, r_longlong( 8008 << 32 | 769): 8012, r_longlong( 8009 << 32 | 769): 8013, r_longlong( 965 << 32 | 787): 8016, r_longlong( 965 << 32 | 788): 8017, r_longlong( 8016 << 32 | 768): 8018, r_longlong( 8017 << 32 | 768): 8019, r_longlong( 8016 << 32 | 769): 8020, r_longlong( 8017 << 32 | 769): 8021, r_longlong( 8016 << 32 | 834): 8022, r_longlong( 8017 << 32 | 834): 8023, r_longlong( 933 << 32 | 788): 8025, r_longlong( 8025 << 32 | 768): 8027, r_longlong( 8025 << 32 | 769): 8029, r_longlong( 8025 << 32 | 834): 8031, r_longlong( 969 << 32 | 787): 8032, r_longlong( 969 << 32 | 788): 8033, r_longlong( 8032 << 32 | 768): 8034, r_longlong( 8033 << 32 | 768): 8035, r_longlong( 8032 << 32 | 769): 8036, r_longlong( 8033 << 32 | 769): 8037, r_longlong( 8032 << 32 | 834): 8038, r_longlong( 8033 << 32 | 834): 8039, r_longlong( 937 << 32 | 787): 8040, r_longlong( 937 << 32 | 788): 8041, r_longlong( 8040 << 32 | 768): 8042, r_longlong( 8041 << 32 | 768): 8043, r_longlong( 8040 << 32 | 769): 8044, r_longlong( 8041 << 32 | 769): 8045, r_longlong( 8040 << 32 | 834): 8046, r_longlong( 8041 << 32 | 834): 8047, r_longlong( 945 << 32 | 768): 8048, r_longlong( 949 << 32 | 768): 8050, r_longlong( 951 << 32 | 768): 8052, r_longlong( 953 << 32 | 768): 8054, r_longlong( 959 << 32 | 768): 8056, r_longlong( 965 << 32 | 768): 8058, r_longlong( 969 << 32 | 768): 8060, r_longlong( 7936 << 32 | 837): 8064, r_longlong( 7937 << 32 | 837): 8065, r_longlong( 7938 << 32 | 837): 8066, r_longlong( 7939 << 32 | 837): 8067, r_longlong( 7940 << 32 | 837): 8068, r_longlong( 7941 << 32 | 837): 8069, r_longlong( 7942 << 32 | 837): 8070, r_longlong( 7943 << 32 | 837): 8071, r_longlong( 7944 << 32 | 837): 8072, r_longlong( 7945 << 32 | 837): 8073, r_longlong( 7946 << 32 | 837): 8074, r_longlong( 7947 << 32 | 837): 8075, r_longlong( 7948 << 32 | 837): 8076, r_longlong( 7949 << 32 | 837): 8077, r_longlong( 7950 << 32 | 837): 8078, r_longlong( 7951 << 32 | 837): 8079, r_longlong( 7968 << 32 | 837): 8080, r_longlong( 7969 << 32 | 837): 8081, r_longlong( 7970 << 32 | 837): 8082, r_longlong( 7971 << 32 | 837): 8083, r_longlong( 7972 << 32 | 837): 8084, r_longlong( 7973 << 32 | 837): 8085, r_longlong( 7974 << 32 | 837): 8086, r_longlong( 7975 << 32 | 837): 8087, r_longlong( 7976 << 32 | 837): 8088, r_longlong( 7977 << 32 | 837): 8089, r_longlong( 7978 << 32 | 837): 8090, r_longlong( 7979 << 32 | 837): 8091, r_longlong( 7980 << 32 | 837): 8092, r_longlong( 7981 << 32 | 837): 8093, r_longlong( 7982 << 32 | 837): 8094, r_longlong( 7983 << 32 | 837): 8095, r_longlong( 8032 << 32 | 837): 8096, r_longlong( 8033 << 32 | 837): 8097, r_longlong( 8034 << 32 | 837): 8098, r_longlong( 8035 << 32 | 837): 8099, r_longlong( 8036 << 32 | 837): 8100, r_longlong( 8037 << 32 | 837): 8101, r_longlong( 8038 << 32 | 837): 8102, r_longlong( 8039 << 32 | 837): 8103, r_longlong( 8040 << 32 | 837): 8104, r_longlong( 8041 << 32 | 837): 8105, r_longlong( 8042 << 32 | 837): 8106, r_longlong( 8043 << 32 | 837): 8107, r_longlong( 8044 << 32 | 837): 8108, r_longlong( 8045 << 32 | 837): 8109, r_longlong( 8046 << 32 | 837): 8110, r_longlong( 8047 << 32 | 837): 8111, r_longlong( 945 << 32 | 774): 8112, r_longlong( 945 << 32 | 772): 8113, r_longlong( 8048 << 32 | 837): 8114, r_longlong( 945 << 32 | 837): 8115, r_longlong( 940 << 32 | 837): 8116, r_longlong( 945 << 32 | 834): 8118, r_longlong( 8118 << 32 | 837): 8119, r_longlong( 913 << 32 | 774): 8120, r_longlong( 913 << 32 | 772): 8121, r_longlong( 913 << 32 | 768): 8122, r_longlong( 913 << 32 | 837): 8124, r_longlong( 168 << 32 | 834): 8129, r_longlong( 8052 << 32 | 837): 8130, r_longlong( 951 << 32 | 837): 8131, r_longlong( 942 << 32 | 837): 8132, r_longlong( 951 << 32 | 834): 8134, r_longlong( 8134 << 32 | 837): 8135, r_longlong( 917 << 32 | 768): 8136, r_longlong( 919 << 32 | 768): 8138, r_longlong( 919 << 32 | 837): 8140, r_longlong( 8127 << 32 | 768): 8141, r_longlong( 8127 << 32 | 769): 8142, r_longlong( 8127 << 32 | 834): 8143, r_longlong( 953 << 32 | 774): 8144, r_longlong( 953 << 32 | 772): 8145, r_longlong( 970 << 32 | 768): 8146, r_longlong( 953 << 32 | 834): 8150, r_longlong( 970 << 32 | 834): 8151, r_longlong( 921 << 32 | 774): 8152, r_longlong( 921 << 32 | 772): 8153, r_longlong( 921 << 32 | 768): 8154, r_longlong( 8190 << 32 | 768): 8157, r_longlong( 8190 << 32 | 769): 8158, r_longlong( 8190 << 32 | 834): 8159, r_longlong( 965 << 32 | 774): 8160, r_longlong( 965 << 32 | 772): 8161, r_longlong( 971 << 32 | 768): 8162, r_longlong( 961 << 32 | 787): 8164, r_longlong( 961 << 32 | 788): 8165, r_longlong( 965 << 32 | 834): 8166, r_longlong( 971 << 32 | 834): 8167, r_longlong( 933 << 32 | 774): 8168, r_longlong( 933 << 32 | 772): 8169, r_longlong( 933 << 32 | 768): 8170, r_longlong( 929 << 32 | 788): 8172, r_longlong( 168 << 32 | 768): 8173, r_longlong( 8060 << 32 | 837): 8178, r_longlong( 969 << 32 | 837): 8179, r_longlong( 974 << 32 | 837): 8180, r_longlong( 969 << 32 | 834): 8182, r_longlong( 8182 << 32 | 837): 8183, r_longlong( 927 << 32 | 768): 8184, r_longlong( 937 << 32 | 768): 8186, r_longlong( 937 << 32 | 837): 8188, r_longlong( 8592 << 32 | 824): 8602, r_longlong( 8594 << 32 | 824): 8603, r_longlong( 8596 << 32 | 824): 8622, r_longlong( 8656 << 32 | 824): 8653, r_longlong( 8660 << 32 | 824): 8654, r_longlong( 8658 << 32 | 824): 8655, r_longlong( 8707 << 32 | 824): 8708, r_longlong( 8712 << 32 | 824): 8713, r_longlong( 8715 << 32 | 824): 8716, r_longlong( 8739 << 32 | 824): 8740, r_longlong( 8741 << 32 | 824): 8742, r_longlong( 8764 << 32 | 824): 8769, r_longlong( 8771 << 32 | 824): 8772, r_longlong( 8773 << 32 | 824): 8775, r_longlong( 8776 << 32 | 824): 8777, r_longlong( 61 << 32 | 824): 8800, r_longlong( 8801 << 32 | 824): 8802, r_longlong( 8781 << 32 | 824): 8813, r_longlong( 60 << 32 | 824): 8814, r_longlong( 62 << 32 | 824): 8815, r_longlong( 8804 << 32 | 824): 8816, r_longlong( 8805 << 32 | 824): 8817, r_longlong( 8818 << 32 | 824): 8820, r_longlong( 8819 << 32 | 824): 8821, r_longlong( 8822 << 32 | 824): 8824, r_longlong( 8823 << 32 | 824): 8825, r_longlong( 8826 << 32 | 824): 8832, r_longlong( 8827 << 32 | 824): 8833, r_longlong( 8834 << 32 | 824): 8836, r_longlong( 8835 << 32 | 824): 8837, r_longlong( 8838 << 32 | 824): 8840, r_longlong( 8839 << 32 | 824): 8841, r_longlong( 8866 << 32 | 824): 8876, r_longlong( 8872 << 32 | 824): 8877, r_longlong( 8873 << 32 | 824): 8878, r_longlong( 8875 << 32 | 824): 8879, r_longlong( 8828 << 32 | 824): 8928, r_longlong( 8829 << 32 | 824): 8929, r_longlong( 8849 << 32 | 824): 8930, r_longlong( 8850 << 32 | 824): 8931, r_longlong( 8882 << 32 | 824): 8938, r_longlong( 8883 << 32 | 824): 8939, r_longlong( 8884 << 32 | 824): 8940, r_longlong( 8885 << 32 | 824): 8941, r_longlong(12363 << 32 | 12441): 12364, r_longlong(12365 << 32 | 12441): 12366, r_longlong(12367 << 32 | 12441): 12368, r_longlong(12369 << 32 | 12441): 12370, r_longlong(12371 << 32 | 12441): 12372, r_longlong(12373 << 32 | 12441): 12374, r_longlong(12375 << 32 | 12441): 12376, r_longlong(12377 << 32 | 12441): 12378, r_longlong(12379 << 32 | 12441): 12380, r_longlong(12381 << 32 | 12441): 12382, r_longlong(12383 << 32 | 12441): 12384, r_longlong(12385 << 32 | 12441): 12386, r_longlong(12388 << 32 | 12441): 12389, r_longlong(12390 << 32 | 12441): 12391, r_longlong(12392 << 32 | 12441): 12393, r_longlong(12399 << 32 | 12441): 12400, r_longlong(12399 << 32 | 12442): 12401, r_longlong(12402 << 32 | 12441): 12403, r_longlong(12402 << 32 | 12442): 12404, r_longlong(12405 << 32 | 12441): 12406, r_longlong(12405 << 32 | 12442): 12407, r_longlong(12408 << 32 | 12441): 12409, r_longlong(12408 << 32 | 12442): 12410, r_longlong(12411 << 32 | 12441): 12412, r_longlong(12411 << 32 | 12442): 12413, r_longlong(12358 << 32 | 12441): 12436, r_longlong(12445 << 32 | 12441): 12446, r_longlong(12459 << 32 | 12441): 12460, r_longlong(12461 << 32 | 12441): 12462, r_longlong(12463 << 32 | 12441): 12464, r_longlong(12465 << 32 | 12441): 12466, r_longlong(12467 << 32 | 12441): 12468, r_longlong(12469 << 32 | 12441): 12470, r_longlong(12471 << 32 | 12441): 12472, r_longlong(12473 << 32 | 12441): 12474, r_longlong(12475 << 32 | 12441): 12476, r_longlong(12477 << 32 | 12441): 12478, r_longlong(12479 << 32 | 12441): 12480, r_longlong(12481 << 32 | 12441): 12482, r_longlong(12484 << 32 | 12441): 12485, r_longlong(12486 << 32 | 12441): 12487, r_longlong(12488 << 32 | 12441): 12489, r_longlong(12495 << 32 | 12441): 12496, r_longlong(12495 << 32 | 12442): 12497, r_longlong(12498 << 32 | 12441): 12499, r_longlong(12498 << 32 | 12442): 12500, r_longlong(12501 << 32 | 12441): 12502, r_longlong(12501 << 32 | 12442): 12503, r_longlong(12504 << 32 | 12441): 12505, r_longlong(12504 << 32 | 12442): 12506, r_longlong(12507 << 32 | 12441): 12508, r_longlong(12507 << 32 | 12442): 12509, r_longlong(12454 << 32 | 12441): 12532, r_longlong(12527 << 32 | 12441): 12535, r_longlong(12528 << 32 | 12441): 12536, r_longlong(12529 << 32 | 12441): 12537, r_longlong(12530 << 32 | 12441): 12538, r_longlong(12541 << 32 | 12441): 12542, r_longlong(69785 << 32 | 69818): 69786, r_longlong(69787 << 32 | 69818): 69788, r_longlong(69797 << 32 | 69818): 69803, } _canon_decomposition = { } _canon_decomposition_corrected = { } _compat_decomposition = { 8341: [104], 8342: [107], 8343: [108], 8344: [109], 8345: [110], 8346: [112], 8347: [115], 8348: [116], 127280: [65], 127282: [67], 127283: [68], 127284: [69], 127285: [70], 127286: [71], 127287: [72], 127288: [73], 127289: [74], 127290: [75], 127291: [76], 127292: [77], 127294: [79], 127296: [81], 127297: [82], 127299: [84], 127300: [85], 127301: [86], 127303: [88], 127304: [89], 127305: [90], 127311: [87, 67], 127489: [12467, 12467], 127490: [12469], 127538: [31105], 127539: [31354], 127540: [21512], 127541: [28288], 127542: [26377], 127543: [26376], 127544: [30003], 127545: [21106], 127546: [21942], 127568: [24471], 127569: [21487], } _compat_decomposition_corrected = { } def canon_decomposition(code): try: return _canon_decomposition[code] except KeyError: if base_mod is not None and code not in _canon_decomposition_corrected: return base_mod._canon_decomposition.get(code, []) else: return [] def compat_decomposition(code): try: return _compat_decomposition[code] except KeyError: if base_mod is not None and code not in _compat_decomposition_corrected: return base_mod._compat_decomposition.get(code, []) else: return [] _named_sequences = [ u'\u0100\u0300', u'\u0101\u0300', u'E\u0329', u'e\u0329', u'\xc8\u0329', u'\xe8\u0329', u'\xc9\u0329', u'\xe9\u0329', u'\xca\u0304', u'\xea\u0304', u'\xca\u030c', u'\xea\u030c', u'\u012a\u0300', u'\u012b\u0300', u'i\u0307\u0301', u'n\u0360g', u'O\u0329', u'o\u0329', u'\xd2\u0329', u'\xf2\u0329', u'\xd3\u0329', u'\xf3\u0329', u'S\u0329', u's\u0329', u'\u016a\u0300', u'\u016b\u0300', u'\u0104\u0301', u'\u0105\u0301', u'\u0104\u0303', u'\u0105\u0303', u'\u0118\u0301', u'\u0119\u0301', u'\u0118\u0303', u'\u0119\u0303', u'\u0116\u0301', u'\u0117\u0301', u'\u0116\u0303', u'\u0117\u0303', u'i\u0307\u0300', u'i\u0307\u0303', u'\u012e\u0301', u'\u012f\u0307\u0301', u'\u012e\u0303', u'\u012f\u0307\u0303', u'J\u0303', u'j\u0307\u0303', u'L\u0303', u'l\u0303', u'M\u0303', u'm\u0303', u'R\u0303', u'r\u0303', u'\u0172\u0301', u'\u0173\u0301', u'\u0172\u0303', u'\u0173\u0303', u'\u016a\u0301', u'\u016b\u0301', u'\u016a\u0303', u'\u016b\u0303', u'\xe6\u0300', u'\u0254\u0300', u'\u0254\u0301', u'\u028c\u0300', u'\u028c\u0301', u'\u0259\u0300', u'\u0259\u0301', u'\u025a\u0300', u'\u025a\u0301', u'\u0995\u09cd\u09b7', u'\u0b95\u0bcd', u'\u0b99\u0bcd', u'\u0b9a\u0bcd', u'\u0b9e\u0bcd', u'\u0b9f\u0bcd', u'\u0ba3\u0bcd', u'\u0ba4\u0bcd', u'\u0ba8\u0bcd', u'\u0baa\u0bcd', u'\u0bae\u0bcd', u'\u0baf\u0bcd', u'\u0bb0\u0bcd', u'\u0bb2\u0bcd', u'\u0bb5\u0bcd', u'\u0bb4\u0bcd', u'\u0bb3\u0bcd', u'\u0bb1\u0bcd', u'\u0ba9\u0bcd', u'\u0b9c\u0bcd', u'\u0bb6\u0bcd', u'\u0bb7\u0bcd', u'\u0bb8\u0bcd', u'\u0bb9\u0bcd', u'\u0b95\u0bcd\u0bb7\u0bcd', u'\u0b95\u0bbe', u'\u0b95\u0bbf', u'\u0b95\u0bc0', u'\u0b95\u0bc1', u'\u0b95\u0bc2', u'\u0b95\u0bc6', u'\u0b95\u0bc7', u'\u0b95\u0bc8', u'\u0b95\u0bca', u'\u0b95\u0bcb', u'\u0b95\u0bcc', u'\u0b99\u0bbe', u'\u0b99\u0bbf', u'\u0b99\u0bc0', u'\u0b99\u0bc1', u'\u0b99\u0bc2', u'\u0b99\u0bc6', u'\u0b99\u0bc7', u'\u0b99\u0bc8', u'\u0b99\u0bca', u'\u0b99\u0bcb', u'\u0b99\u0bcc', u'\u0b9a\u0bbe', u'\u0b9a\u0bbf', u'\u0b9a\u0bc0', u'\u0b9a\u0bc1', u'\u0b9a\u0bc2', u'\u0b9a\u0bc6', u'\u0b9a\u0bc7', u'\u0b9a\u0bc8', u'\u0b9a\u0bca', u'\u0b9a\u0bcb', u'\u0b9a\u0bcc', u'\u0b9e\u0bbe', u'\u0b9e\u0bbf', u'\u0b9e\u0bc0', u'\u0b9e\u0bc1', u'\u0b9e\u0bc2', u'\u0b9e\u0bc6', u'\u0b9e\u0bc7', u'\u0b9e\u0bc8', u'\u0b9e\u0bca', u'\u0b9e\u0bcb', u'\u0b9e\u0bcc', u'\u0b9f\u0bbe', u'\u0b9f\u0bbf', u'\u0b9f\u0bc0', u'\u0b9f\u0bc1', u'\u0b9f\u0bc2', u'\u0b9f\u0bc6', u'\u0b9f\u0bc7', u'\u0b9f\u0bc8', u'\u0b9f\u0bca', u'\u0b9f\u0bcb', u'\u0b9f\u0bcc', u'\u0ba3\u0bbe', u'\u0ba3\u0bbf', u'\u0ba3\u0bc0', u'\u0ba3\u0bc1', u'\u0ba3\u0bc2', u'\u0ba3\u0bc6', u'\u0ba3\u0bc7', u'\u0ba3\u0bc8', u'\u0ba3\u0bca', u'\u0ba3\u0bcb', u'\u0ba3\u0bcc', u'\u0ba4\u0bbe', u'\u0ba4\u0bbf', u'\u0ba4\u0bc0', u'\u0ba4\u0bc1', u'\u0ba4\u0bc2', u'\u0ba4\u0bc6', u'\u0ba4\u0bc7', u'\u0ba4\u0bc8', u'\u0ba4\u0bca', u'\u0ba4\u0bcb', u'\u0ba4\u0bcc', u'\u0ba8\u0bbe', u'\u0ba8\u0bbf', u'\u0ba8\u0bc0', u'\u0ba8\u0bc1', u'\u0ba8\u0bc2', u'\u0ba8\u0bc6', u'\u0ba8\u0bc7', u'\u0ba8\u0bc8', u'\u0ba8\u0bca', u'\u0ba8\u0bcb', u'\u0ba8\u0bcc', u'\u0baa\u0bbe', u'\u0baa\u0bbf', u'\u0baa\u0bc0', u'\u0baa\u0bc1', u'\u0baa\u0bc2', u'\u0baa\u0bc6', u'\u0baa\u0bc7', u'\u0baa\u0bc8', u'\u0baa\u0bca', u'\u0baa\u0bcb', u'\u0baa\u0bcc', u'\u0bae\u0bbe', u'\u0bae\u0bbf', u'\u0bae\u0bc0', u'\u0bae\u0bc1', u'\u0bae\u0bc2', u'\u0bae\u0bc6', u'\u0bae\u0bc7', u'\u0bae\u0bc8', u'\u0bae\u0bca', u'\u0bae\u0bcb', u'\u0bae\u0bcc', u'\u0baf\u0bbe', u'\u0baf\u0bbf', u'\u0baf\u0bc0', u'\u0baf\u0bc1', u'\u0baf\u0bc2', u'\u0baf\u0bc6', u'\u0baf\u0bc7', u'\u0baf\u0bc8', u'\u0baf\u0bca', u'\u0baf\u0bcb', u'\u0baf\u0bcc', u'\u0bb0\u0bbe', u'\u0bb0\u0bbf', u'\u0bb0\u0bc0', u'\u0bb0\u0bc1', u'\u0bb0\u0bc2', u'\u0bb0\u0bc6', u'\u0bb0\u0bc7', u'\u0bb0\u0bc8', u'\u0bb0\u0bca', u'\u0bb0\u0bcb', u'\u0bb0\u0bcc', u'\u0bb2\u0bbe', u'\u0bb2\u0bbf', u'\u0bb2\u0bc0', u'\u0bb2\u0bc1', u'\u0bb2\u0bc2', u'\u0bb2\u0bc6', u'\u0bb2\u0bc7', u'\u0bb2\u0bc8', u'\u0bb2\u0bca', u'\u0bb2\u0bcb', u'\u0bb2\u0bcc', u'\u0bb5\u0bbe', u'\u0bb5\u0bbf', u'\u0bb5\u0bc0', u'\u0bb5\u0bc1', u'\u0bb5\u0bc2', u'\u0bb5\u0bc6', u'\u0bb5\u0bc7', u'\u0bb5\u0bc8', u'\u0bb5\u0bca', u'\u0bb5\u0bcb', u'\u0bb5\u0bcc', u'\u0bb4\u0bbe', u'\u0bb4\u0bbf', u'\u0bb4\u0bc0', u'\u0bb4\u0bc1', u'\u0bb4\u0bc2', u'\u0bb4\u0bc6', u'\u0bb4\u0bc7', u'\u0bb4\u0bc8', u'\u0bb4\u0bca', u'\u0bb4\u0bcb', u'\u0bb4\u0bcc', u'\u0bb3\u0bbe', u'\u0bb3\u0bbf', u'\u0bb3\u0bc0', u'\u0bb3\u0bc1', u'\u0bb3\u0bc2', u'\u0bb3\u0bc6', u'\u0bb3\u0bc7', u'\u0bb3\u0bc8', u'\u0bb3\u0bca', u'\u0bb3\u0bcb', u'\u0bb3\u0bcc', u'\u0bb1\u0bbe', u'\u0bb1\u0bbf', u'\u0bb1\u0bc0', u'\u0bb1\u0bc1', u'\u0bb1\u0bc2', u'\u0bb1\u0bc6', u'\u0bb1\u0bc7', u'\u0bb1\u0bc8', u'\u0bb1\u0bca', u'\u0bb1\u0bcb', u'\u0bb1\u0bcc', u'\u0ba9\u0bbe', u'\u0ba9\u0bbf', u'\u0ba9\u0bc0', u'\u0ba9\u0bc1', u'\u0ba9\u0bc2', u'\u0ba9\u0bc6', u'\u0ba9\u0bc7', u'\u0ba9\u0bc8', u'\u0ba9\u0bca', u'\u0ba9\u0bcb', u'\u0ba9\u0bcc', u'\u0b9c\u0bbe', u'\u0b9c\u0bbf', u'\u0b9c\u0bc0', u'\u0b9c\u0bc1', u'\u0b9c\u0bc2', u'\u0b9c\u0bc6', u'\u0b9c\u0bc7', u'\u0b9c\u0bc8', u'\u0b9c\u0bca', u'\u0b9c\u0bcb', u'\u0b9c\u0bcc', u'\u0bb6\u0bbe', u'\u0bb6\u0bbf', u'\u0bb6\u0bc0', u'\u0bb6\u0bc1', u'\u0bb6\u0bc2', u'\u0bb6\u0bc6', u'\u0bb6\u0bc7', u'\u0bb6\u0bc8', u'\u0bb6\u0bca', u'\u0bb6\u0bcb', u'\u0bb6\u0bcc', u'\u0bb7\u0bbe', u'\u0bb7\u0bbf', u'\u0bb7\u0bc0', u'\u0bb7\u0bc1', u'\u0bb7\u0bc2', u'\u0bb7\u0bc6', u'\u0bb7\u0bc7', u'\u0bb7\u0bc8', u'\u0bb7\u0bca', u'\u0bb7\u0bcb', u'\u0bb7\u0bcc', u'\u0bb8\u0bbe', u'\u0bb8\u0bbf', u'\u0bb8\u0bc0', u'\u0bb8\u0bc1', u'\u0bb8\u0bc2', u'\u0bb8\u0bc6', u'\u0bb8\u0bc7', u'\u0bb8\u0bc8', u'\u0bb8\u0bca', u'\u0bb8\u0bcb', u'\u0bb8\u0bcc', u'\u0bb9\u0bbe', u'\u0bb9\u0bbf', u'\u0bb9\u0bc0', u'\u0bb9\u0bc1', u'\u0bb9\u0bc2', u'\u0bb9\u0bc6', u'\u0bb9\u0bc7', u'\u0bb9\u0bc8', u'\u0bb9\u0bca', u'\u0bb9\u0bcb', u'\u0bb9\u0bcc', u'\u0b95\u0bcd\u0bb7', u'\u0b95\u0bcd\u0bb7\u0bbe', u'\u0b95\u0bcd\u0bb7\u0bbf', u'\u0b95\u0bcd\u0bb7\u0bc0', u'\u0b95\u0bcd\u0bb7\u0bc1', u'\u0b95\u0bcd\u0bb7\u0bc2', u'\u0b95\u0bcd\u0bb7\u0bc6', u'\u0b95\u0bcd\u0bb7\u0bc7', u'\u0b95\u0bcd\u0bb7\u0bc8', u'\u0b95\u0bcd\u0bb7\u0bca', u'\u0b95\u0bcd\u0bb7\u0bcb', u'\u0b95\u0bcd\u0bb7\u0bcc', u'\u0bb6\u0bcd\u0bb0\u0bc0', u'\u10e3\u0302', u'\u17d2\u1780', u'\u17d2\u1781', u'\u17d2\u1782', u'\u17d2\u1783', u'\u17d2\u1784', u'\u17d2\u1785', u'\u17d2\u1786', u'\u17d2\u1787', u'\u17d2\u1788', u'\u17d2\u1789', u'\u17d2\u178a', u'\u17d2\u178b', u'\u17d2\u178c', u'\u17d2\u178d', u'\u17d2\u178e', u'\u17d2\u178f', u'\u17d2\u1790', u'\u17d2\u1791', u'\u17d2\u1792', u'\u17d2\u1793', u'\u17d2\u1794', u'\u17d2\u1795', u'\u17d2\u1796', u'\u17d2\u1797', u'\u17d2\u1798', u'\u17d2\u1799', u'\u17d2\u179a', u'\u17d2\u179b', u'\u17d2\u179c', u'\u17d2\u179d', u'\u17d2\u179e', u'\u17d2\u179f', u'\u17d2\u17a0', u'\u17d2\u17a1', u'\u17d2\u17a2', u'\u17d2\u17a7', u'\u17d2\u17ab', u'\u17d2\u17ac', u'\u17d2\u17af', u'\u17bb\u17c6', u'\u17b6\u17c6', u'\u304b\u309a', u'\u304d\u309a', u'\u304f\u309a', u'\u3051\u309a', u'\u3053\u309a', u'\u30ab\u309a', u'\u30ad\u309a', u'\u30af\u309a', u'\u30b1\u309a', u'\u30b3\u309a', u'\u30bb\u309a', u'\u30c4\u309a', u'\u30c8\u309a', u'\u31f7\u309a', u'\u02e5\u02e9', u'\u02e9\u02e5', ] def lookup_named_sequence(code): if 0 <= code - 983552 < len(_named_sequences): return _named_sequences[code - 983552] else: return None _name_aliases = [ 418, 419, 3294, 3741, 3743, 3747, 3749, 4048, 40981, 65048, 118981, ] def lookup_with_alias(name, with_named_sequence=False): code = lookup(name, with_named_sequence=with_named_sequence) if 0 <= code - 983040 < len(_name_aliases): return _name_aliases[code - 983040] else: return code _casefolds = { } _casefolds_corrected = { } def casefold_lookup(code): try: return _casefolds[code] except KeyError: if base_mod is not None and code not in _casefolds_corrected: return base_mod._casefolds.get(code, None) else: return None _combining = { 1631: 220, 2137: 220, 2138: 220, 2139: 220, 4957: 230, 4958: 230, 7142: 7, 7154: 9, 7155: 9, 7676: 233, 11647: 9, 69702: 9, } _combining_corrected = { } def combining(code): try: return _combining[code] except KeyError: if base_mod is not None and code not in _combining_corrected: return base_mod._combining.get(code, 0) else: return 0
oblique-labs/pyVM
rpython/rlib/unicodedata/unicodedb_6_0_0.py
Python
mit
306,402
[ "CRYSTAL", "Octopus" ]
bdcd08d3b2319d36d581f213b2b517da1a231170d59b2cf8c71fa76660fb8f20
# Note this also shows the final datastructure for ontology.json ntr_assays = { "NTR:0000612": { "assay": ['RNA binding'], "category": [], "developmental": [], "name": "Switchgear", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000762": { "assay": ['Transcription'], "category": [], "developmental": [], "name": "shRNA knockdown followed by RNA-seq", "objectives": [], "organs": [], "preferred_name": "shRNA RNA-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000763": { "assay": ['Transcription'], "category": [], "developmental": [], "name": "siRNA knockdown followed by RNA-seq", "objectives": [], "organs": [], "preferred_name": "siRNA RNA-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0001132": { "assay": ['RNA binding'], "category": [], "developmental": [], "name": "RNA Bind-N-Seq", "objectives": [], "organs": [], "preferred_name": "RNA Bind-N-Seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0004774": { "assay": ['DNA accessibility'], "category": [], "developmental": [], "name": "genetic modification followed by DNase-seq", "objectives": [], "organs": [], "preferred_name": "GM DNase-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0003814": { "assay": ['Transcription'], "category": [], "developmental": [], "name": "CRISPR genome editing followed by RNA-seq", "objectives": [], "organs": [], "preferred_name": "CRISPR RNA-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0004619": { "assay": ['Transcription'], "category": [], "developmental": [], "name": "CRISPRi followed by RNA-seq", "objectives": [], "organs": [], "preferred_name": "CRISPRi RNA-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000433": { "assay": ['RNA structure'], "category": [], "developmental": [], "name": "in vivo click selective 2-hydroxyl acylation and profiling experiment", "objectives": [], "organs": [], "preferred_name": "icSHAPE", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000445": { "assay": ['Transcription'], "category": [], "developmental": [], "name": "long read RNA sequencing", "objectives": [], "organs": [], "preferred_name": "long read RNA-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000458": { "assay": ['DNA sequencing'], "category": [], "developmental": [], "name": "Clone-seq", "objectives": [], "organs": [], "preferred_name": "Clone-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000513": { "assay": ['3D chromatin structure'], "category": [], "developmental": [], "name": "proximity ligation-assisted ChIP-seq", "objectives": [], "organs": [], "preferred_name": "PLAC-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000520": { "assay": ['CRISPR screen'], "category": [], "developmental": [], "name": "CRISPR screen", "objectives": [], "organs": [], "preferred_name": "CRISPR screen", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000538": { "assay": ['RNA structure'], "category": [], "developmental": [], "name": "in vivo click light activated structural examination of RNA", "objectives": [], "organs": [], "preferred_name": "icLASER", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000544": { "assay": ['3D chromatin structure'], "category": [], "developmental": [], "name": "split-pool recognition of interactions by tag extension", "objectives": [], "organs": [], "preferred_name": "SPRITE", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000562": { "assay": ['3D chromatin structure'], "category": [], "developmental": [], "name": "split-pool recognition of interactions by tag extension with immunoprecipitation", "objectives": [], "organs": [], "preferred_name": "SPRITE-IP", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000642": { "assay": ['Single cell'], "category": [], "developmental": [], "name": "long read single-cell RNA-seq", "objectives": [], "organs": [], "preferred_name": "long read scRNA-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000641": { "assay": ['Transcription'], "category": [], "developmental": [], "name": "GRO-cap", "objectives": [], "organs": [], "preferred_name": "GRO-cap", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000640": { "assay": ['Transcription'], "category": [], "developmental": [], "name": "GRO-seq", "objectives": [], "organs": [], "preferred_name": "GRO-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000643": { "assay": ['Single cell'], "category": [], "developmental": [], "name": "perturbation followed by scRNA-seq", "objectives": [], "organs": [], "preferred_name": "perturbation followed by scRNA-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000644": { "assay": ['Single cell'], "category": [], "developmental": [], "name": "perturbation followed by snATAC-seq", "objectives": [], "organs": [], "preferred_name": "perturbation followed by snATAC-seq", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000649": { "assay": ['Proteomics'], "category": [], "developmental": [], "name": "LC/MS label-free quantitative proteomics", "objectives": [], "organs": [], "preferred_name": "LC/MS label-free quantitative proteomics", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000650": { "assay": ['Proteomics'], "category": [], "developmental": [], "name": "LC-MS/MS isobaric label quantitative proteomics", "objectives": [], "organs": [], "preferred_name": "LC-MS/MS isobaric label quantitative proteomics", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000657": { "assay": ['CRISPR screen'], "category": [], "developmental": [], "name": "proliferation CRISPR screen", "objectives": [], "organs": [], "preferred_name": "proliferation CRISPR screen", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000658": { "assay": ['CRISPR screen'], "category": [], "developmental": [], "name": "FACS CRISPR screen", "objectives": [], "organs": [], "preferred_name": "FACS CRISPR screen", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000659": { "assay": ['CRISPR screen'], "category": [], "developmental": [], "name": "Flow-FISH CRISPR screen", "objectives": [], "organs": [], "preferred_name": "Flow-FISH CRISPR screen", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000675": { "assay": ['Ribosome activity'], "category": [], "developmental": [], "name": "Ribosome activity", "objectives": [], "organs": [], "preferred_name": "Ribosome activity", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000676": { "assay": ['element activity validation'], "category": [], "developmental": [], "name": "genomic perturbation followed by RT-qPCR", "objectives": [], "organs": [], "preferred_name": "genomic perturbation followed by RT-qPCR", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000677": { "assay": ['element activity validation'], "category": [], "developmental": [], "name": "element activity validation", "objectives": [], "organs": [], "preferred_name": "element activity validation", "slims": [], "synonyms": [], "systems": [], "types": [] }, } ntr_biosamples = { "NTR:0000491": { "category": [], "cells": [], "developmental": ["mesoderm"], "name": "Right ventricle myocardium inferior", "objectives": [], "organs": ["heart"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["circulatory system"], "types": [] }, "NTR:0000492": { "category": [], "cells": [], "developmental": ["mesoderm"], "name": "Right ventricle myocardium superior", "objectives": [], "organs": ["heart"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["circulatory system"], "types": [] }, "NTR:0000493": { "category": [], "cells": [], "developmental": ["mesoderm"], "name": "left ventricle myocardium inferior", "objectives": [], "organs": ["heart"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["circulatory system"], "types": [] }, "NTR:0000494": { "category": [], "cells": [], "developmental": ["mesoderm"], "name": "left ventricle myocardium superior", "objectives": [], "organs": ["heart"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["circulatory system"], "types": [] }, "NTR:0001226": { "category": [], "cells": [], "developmental": ["endoderm"], "name": "parathyroid adenoma", "objectives": [], "organs": ["paraythroid gland"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["endocrine system"], "types": [] }, "NTR:0001407": { "category": [], "cells": [], "developmental": ["ectoderm"], "name": "germinal matrix", "objectives": [], "organs": ["brain"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["central nervous system"], "types": [] }, "NTR:0002929": { "category": [], "cells": [], "developmental": ["endoderm"], "name": "posterior foregut", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["digestive system"], "types": [] }, "NTR:0003013": { "category": [], "cells": [], "developmental": ["mesoderm"], "name": "placental basal plate", "objectives": [], "organs": ["extraembryonic component", "placenta"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["reproductive system"], "types": [] }, "NTR:0005063": { "category": [], "cells": ["stem cell"], "developmental": [], "name": "dedifferentiated amniotic fluid mesenchymal stem cell", "objectives": [], "organs": ["connective tissue", "extraembryonic component"], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0004218": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell"], "developmental": [], "name": "inflammation-experienced regulatory T-cells", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0004148": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated CD4-positive, CD25-positive, alpha-beta regulatory T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0003079": { "category": [], "cells": ["fibroblast"], "developmental": [], "name": "fibroblast of breast", "objectives": [], "organs": ["breast"], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0001801": { "category": [], "cells": ["neural cell", "retinal cell"], "developmental": ["ectoderm"], "name": "retinal alpha ganglion cell", "objectives": [], "organs": [ "eye" ], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["sensory system", "central nervous system"], "types": [] }, "NTR:0000741": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "B cell"], "developmental": [], "name": "splenic B cell", "objectives": [], "organs": ["spleen", "blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000722": { "category": [], "cells": ["hematopoietic cell", "myeloid cell", "progenitor cell"], "developmental": [], "name": "c-Kit-positive CD71-positive TER-119-positive erythroid progenitor cells", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000721": { "category": [], "cells": ["hematopoietic cell", "myeloid cell", "progenitor cell"], "developmental": [], "name": "c-Kit-positive CD71-positive TER-119-negative erythroid progenitor cells", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000720": { "category": [], "cells": ["hematopoietic cell", "myeloid cell", "progenitor cell"], "developmental": [], "name": "c-Kit-positive CD71-negative TER-119-negative erythroid progenitor cells", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000719": { "category": [], "cells": ["hematopoietic cell", "myeloid cell", "progenitor cell"], "developmental": [], "name": "c-Kit-negative CD71-positive TER-119-positive erythroid progenitor cells", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000714": { "category": [], "cells": ["stem cell"], "developmental": [], "name": "leukemia stem cell", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000537": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell"], "developmental": [], "name": "activated T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000535": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "NK cell"], "developmental": [], "name": "CD14-negative natural killer cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000525": { "category": [], "cells": ["fibroblast"], "developmental": ["ectoderm"], "name": "fibroblast of skin of right biceps", "objectives": [], "organs": ["skin of body"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["integumental system"], "types": [] }, "NTR:0000524": { "category": [], "cells": ["fibroblast"], "developmental": ["ectoderm"], "name": "fibroblast of skin of scalp", "objectives": [], "organs": ["skin of body"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["integumental system"], "types": [] }, "NTR:0000523": { "category": [], "cells": ["fibroblast"], "developmental": ["ectoderm"], "name": "fibroblast of skin of right quadriceps", "objectives": [], "organs": ["skin of body"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["integumental system"], "types": [] }, "NTR:0000522": { "category": [], "cells": ["fibroblast"], "developmental": [ "ectoderm" ], "name": "fibroblast of skin of left quadriceps", "objectives": [], "organs": ["skin of body"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["integumental system"], "types": [] }, "NTR:0000521": { "category": [], "cells": ["fibroblast"], "developmental": ["ectoderm"], "name": "fibroblast of skin of left biceps", "objectives": [], "organs": ["skin of body"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["integumental system"], "types": [] }, "NTR:0000506": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "B cell"], "developmental": [], "name": "B cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000505": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "dendritic cell", "myeloid cell"], "developmental": [], "name": "monocyte-derived dendritic cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000504": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "activated CD8-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000503": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "CD8-positive, alpha-beta memory T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000502": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "activated naive CD8-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000501": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "CD8-positive naive resting alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000500": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "activated CD8-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000499": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated CD4-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000498": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "CD4-positive, alpha-beta memory T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000497": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated naive CD4-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000496": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "CD4-positive naive resting alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000495": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated CD4-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000427": { "category": [], "cells": ["stem cell"], "developmental": ["ectoderm"], "name": "neurosphere", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["central nervous system"], "types": [] }, "NTR:0005192": { "category": [], "cells": [], "developmental": ["endoderm"], "name": "primitive gut cell", "objectives": [], "organs": ["embryo"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["digestive system"], "types": [] }, "NTR:0005188": { "category": [], "cells": [], "developmental": ["endoderm"], "name": "definitive endodermal cell", "objectives": [], "organs": ["embryo"], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0003830": { "category": [], "cells": ["neural cell"], "developmental": ["ectoderm"], "name": "mid-neurogenesis radial glial cells", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["central nervous system"], "types": [] }, "NTR:0001484": { "category": [], "cells": [], "developmental": ["endoderm"], "name": "islet precursor cell", "objectives": [], "organs": ["pancreas"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["endocrine system"], "types": [] }, "NTR:0000856": { "category": [], "cells": [], "developmental": ["mesoderm", "endoderm"], "name": "mesendoderm", "objectives": [], "organs": ["embryo"], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000561": { "category": [], "cells": [], "developmental": ["endoderm"], "name": "hepatic endoderm", "objectives": [], "organs": ["exocrine gland", "liver", "endocrine gland"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["endocrine system", "digestive system"], "types": [] }, "NTR:0000560": { "category": [], "cells": ["neural cell"], "developmental": ["ectoderm"], "name": "bipolar spindle neuron", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["central nervous system"], "types": [] }, "NTR:0000514": { "category": [], "cells": ["progenitor cell"], "developmental": [], "name": "cardiovascular progenitor cell", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["circulatory system"], "types": [] }, "NTR:0000512": { "category": [], "cells": ["progenitor cell"], "developmental": [], "name": "nephron progenitor cell", "objectives": [], "organs": ["kidney"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["excretory system"], "types": [] }, "NTR:0000474": { "category": [], "cells": ["progenitor cell"], "developmental": ["ectoderm"], "name": "ecto neural progenitor cell", "objectives": [], "organs": [], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["central nervous system"], "types": [] }, "NTR:0000428": { "category": [], "cells": ["stem cell"], "developmental": [], "name": "embryoid body cell", "objectives": [], "organs": ["embryo"], "preferred_name": "", "slims": [], "synonyms": [], "systems": [], "types": [] }, "NTR:0000646": { "category": [], "cells": [], "developmental": ["ectoderm"], "name": "left cerebral cortex", "objectives": [], "organs": ["brain"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["central nervous system"], "types": [] }, "NTR:0000647": { "category": [], "cells": [], "developmental": ["ectoderm"], "name": "right cerebral cortex", "objectives": [], "organs": ["brain"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["central nervous system"], "types": [] }, "NTR:0000629": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "activated effector memory CD8-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000661": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "B cell"], "developmental": ["mesoderm", "endoderm"], "name": "stimulated activated naive B cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000662": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "B cell"], "developmental": ["mesoderm", "endoderm"], "name": "stimulated activated memory B cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000663": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": ["mesoderm", "endoderm"], "name": "stimulated activated CD4-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000664": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": ["mesoderm", "endoderm"], "name": "stimulated activated naive CD4-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000665": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": ["mesoderm", "endoderm"], "name": "stimulated activated CD4-positive, alpha-beta memory T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000666": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "stimulated activated CD8-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000667": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "stimulated activated naive CD8-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000668": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "stimulated activated effector memory CD8-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000669": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "stimulated activated CD8-positive, alpha-beta memory T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000633": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated T-helper 1 cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000634": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated T-helper 2 cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000635": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated T-helper 17 cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000670": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated T-helper 9 cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000671": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "NK cell"], "developmental": [], "name": "stimulated natural killer cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000631": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "NK cell"], "developmental": [], "name": "activated CD16-positive, CD56-dim natural killer cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000632": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "NK cell"], "developmental": [], "name": "activated CD16-negative, CD56-bright natural killer cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000652": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "B cell"], "developmental": [], "name": "stimulated activated naive B cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000660": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD4+ T cell"], "developmental": [], "name": "activated effector memory CD4-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000672": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell"], "developmental": [], "name": "activated gamma-delta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000673": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell"], "developmental": [], "name": "stimulated activated gamma-delta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [] }, "NTR:0000630": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "T cell", "CD8+ T cell"], "developmental": [], "name": "activated central memory CD8-positive, alpha-beta T cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], }, "NTR:0000636": { "category": [], "cells": ["hematopoietic cell", "leukocyte", "B cell"], "developmental": ["mesoderm", "endoderm"], "name": "activated memory B cell", "objectives": [], "organs": ["blood", "bodily fluid"], "preferred_name": "", "slims": [], "synonyms": [], "systems": ["immune system"], "types": [], } }
ENCODE-DCC/encoded
src/encoded/commands/ntr_terms.py
Python
mit
39,761
[ "NEURON" ]
1d67dcdbb4d30616bc7af8ad680536ad42dbc1b3605655677186d3e5c83c5c0f
"""Read basis in CP2K format.""" from pathlib import Path import h5py import pkg_resources from qmflows.type_hints import PathLike from nanoqm.workflows.initialization import store_cp2k_basis def test_read_cp2k_basis(tmp_path: PathLike) -> None: """Read Basis set in CP2K format.""" tmp_hdf5 = Path(tmp_path) / 'cp2k_basis.hdf5' tmp_hdf5.touch() path_basis = pkg_resources.resource_filename( "nanoqm", "basis/BASIS_MOLOPT") coefficients_format_carbon_DZVP_MOLOPT_GTH = '[2, 0, 2, 7, 2, 2, 1]' store_cp2k_basis(tmp_hdf5, path_basis) with h5py.File(tmp_hdf5, 'a') as f5: dset = f5["cp2k/basis/c/DZVP-MOLOPT-GTH/coefficients"] # Check that the format is store assert dset.attrs['basisFormat'] == coefficients_format_carbon_DZVP_MOLOPT_GTH # Check Shape of the coefficients assert dset.shape == (5, 7)
SCM-NV/qmworks-namd
test/test_read_cp2k_basis.py
Python
mit
881
[ "CP2K" ]
786bc759caa5d6c6bef584d7ce9975a5aba97e309dfc66d2c0d217a4be513737
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import logging import re import pyparsing as pp from td.constants import EV2NM, HARTREE2EV, HARTREE2NM from td.ExcitedState import ExcitedState def parse_ricc2(text): num_sym_spin_re = "symmetry, multiplicity:\s*(\d+)\s*([\w\"\']+)\s*(\d+)" ids_syms_spins = re.findall(num_sym_spin_re, text) ids, syms, spins = zip(*ids_syms_spins) exc_energy_re = "frequency\s*:.+?([\d\.]+)\s*e\.V." ees = [float(ee) for ee in re.findall(exc_energy_re, text)] #osc_strength_re = "\(mixed gauge\)\s*:\s*([\d\.]+)" osc_strength_re = "oscillator strength.+?length gauge\)\s*:\s*([\d\.]+)" oscs = [float(osc) for osc in re.findall(osc_strength_re, text)] mo_contribs = list() mo_contrib_re = "occ\. orb\..+?\%\s*\|(.+?)\s*norm" # Get the blocks containing the MO contributions for every state mo_contrib_blocks = re.findall(mo_contrib_re, text, re.DOTALL) for mcb in mo_contrib_blocks: block_lines = mcb.strip().split("\n")[1:-1] split_lines = [re.sub("[\|\(\)]", "", mol).split() for mol in block_lines] for sl in split_lines: if len(sl) == 8: sl.insert(3, "a") sl.insert(7, "a") mo_contribs.append(split_lines) # When excited state properties are requested the lists # 'syms', 'spins', 'ees' will be twice as long as 'oscs' # and 'mo_contribs', but they got the same data in both # halves. So we drop the second half. assert(len(syms) == len(spins) == len(ees)) if len(syms) == (2 * len(oscs)): first_half = slice(len(oscs)) syms = syms[first_half] spins = spins[first_half] ees = ees[first_half] #import pdb; pdb.set_trace() assert(len(syms) == len(spins) == len(ees) == len(oscs) == len(mo_contribs)) excited_states = list() for id, spin, sym, ee, osc, moc in zip(ids, spins, syms, ees, oscs, mo_contribs): l = EV2NM / ee exc_state = ExcitedState(id, spin, sym, ee, l, osc, "???") excited_states.append(exc_state) for (start_mo, start_irrep, _, start_spin, final_mo, final_irrep, _, final_spin, coeff, percent) in moc: to_or_from = "->" exc_state.add_mo_transition(start_mo, to_or_from, final_mo, ci_coeff=coeff, contrib=float(percent)/100, start_spin=start_spin, final_spin=final_spin, start_irrep=start_irrep, final_irrep=final_irrep) if "SUMMARY OF RELAXED EXCITATIONS WITH COSMO" in text: logging.warning("Using COSMO energies!") lines = cosmo_ricc2 = parse_cosmo_ricc2(text) # Using E(exc(OCC)) / eV to update the energies, # skipping the GS (first line) for i, (line, exc) in enumerate(zip(lines[1:], excited_states), 1): dE = line[6] l = EV2NM / dE print(f"State {i} shifted by {dE - exc.dE:+.2f} eV") exc.dE = dE exc.l = l # Print corrections # corr_re = "\| Correction(.+)" # corrs = [float(c.split("|")[-2]) for c in re.findall(corr_re, text)] # for i, _ in enumerate(excited_states): return excited_states def parse_cosmo_ricc2(text): def to_float(s, loc, toks): try: return float(toks[0]) except ValueError: return 0. float_ = pp.Word(pp.nums + ".-").setParseAction(to_float) int_ = pp.Word(pp.nums).setParseAction(lambda t: int(t[0])) big_sep = pp.Suppress(pp.Word("+=")) small_sep = pp.Suppress(pp.Word("+-")) bar = pp.Suppress(pp.Literal("|")) sym = pp.Word(pp.alphanums + "'" + '"' + "*") multi = int_ state = int_ E_tot = float_ E_diff = float_ E_exci = float_ E_exc= float_ line = pp.Group( bar + sym + bar + multi + bar + state + bar + E_tot + bar + E_diff + bar + E_exci + bar + E_exc + bar ) parser = ( pp.Suppress(pp.SkipTo("E(exc(OCC))/eV|", include=True)) + big_sep + pp.OneOrMore(line + small_sep) ) res = parser.parseString(text) return res def parse_escf(text): # In openshell calculations TURBOMOLE omits the multiplicity in # the string. sym = "(\d+)\s+(singlet|doublet|triplet|quartet|quintet|sextet)?" \ "\s*([\w'\"]+)\s+excitation" sym_re = re.compile(sym) syms = sym_re.findall(text) syms = [(int(id_), spin, spat) for id_, spin, spat in syms] exc_energy = "Excitation energy:\s*([\d\.E\+-]+)" exc_energy_re = re.compile(exc_energy) ees = exc_energy_re.findall(text) ees = [float(ee) for ee in ees] osc_strength = "mixed representation:\s*([\d\.E\+-]+)" osc_strength_re = re.compile(osc_strength) oscs = osc_strength_re.findall(text) oscs = [float(osc) for osc in oscs] dom_contrib = "2\*100(.*?)Change of electron number" dom_contrib_re = re.compile(dom_contrib, flags=re.MULTILINE | re.DOTALL) dcs = dom_contrib_re.findall(text) dc_str = "(\d+) ([\w'\"]+)\s*(beta|alpha)?\s+([-\d\.]+)\s*" \ "(\d+) ([\w'\"]+)\s*(beta|alpha)?\s+([-\d\.]+)\s*" \ "([\d\.]+)" dc_re = re.compile(dc_str) dcs_parsed = [dc_re.findall(exc) for exc in dcs] excited_states = list() for sym, ee, osc, dc in zip(syms, ees, oscs, dcs_parsed): id_, spin, spat = sym dE = ee * HARTREE2EV l = HARTREE2NM / ee exc_state = ExcitedState(id_, spin, spat, dE, l, osc, "???") excited_states.append(exc_state) for d in dc: start_mo = d[0] start_irrep = d[1] start_spin = d[2] final_mo = d[4] final_irrep = d[5] final_spin = d[6] to_or_from = "->" contrib = float(d[8]) / 100 exc_state.add_mo_transition(start_mo, to_or_from, final_mo, ci_coeff=-0, contrib=contrib, start_spin=start_spin, final_spin=final_spin, start_irrep=start_irrep, final_irrep=final_irrep) return excited_states
eljost/td
td/parser/turbomole.py
Python
gpl-3.0
6,601
[ "TURBOMOLE" ]
b9d628a79c8824e6f08fee90020f62ad687442d9c0365c43f7932bf56a735501
#!/usr/bin/env python ######################################################################## # # (C) 2013, James Cammarata <jcammarata@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import urllib from urllib2 import quote as urlquote, HTTPError from urlparse import urlparse import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.galaxy.token import GalaxyToken try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class GalaxyAPI(object): ''' This class is meant to be used as a API client for an Ansible Galaxy server ''' SUPPORTED_VERSIONS = ['v1'] def __init__(self, galaxy): self.galaxy = galaxy self.token = GalaxyToken() self._api_server = C.GALAXY_SERVER self._validate_certs = not C.GALAXY_IGNORE_CERTS # set validate_certs if galaxy.options.ignore_certs: self._validate_certs = False display.vvv('Validate TLS certificates: %s' % self._validate_certs) # set the API server if galaxy.options.api_server != C.GALAXY_SERVER: self._api_server = galaxy.options.api_server display.vvv("Connecting to galaxy_server: %s" % self._api_server) server_version = self.get_server_api_version() if not server_version in self.SUPPORTED_VERSIONS: raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version) self.baseurl = '%s/api/%s' % (self._api_server, server_version) self.version = server_version # for future use display.vvv("Base API: %s" % self.baseurl) def __auth_header(self): token = self.token.get() if token is None: raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.") return {'Authorization': 'Token ' + token} def __call_galaxy(self, url, args=None, headers=None, method=None): if args and not headers: headers = self.__auth_header() try: display.vvv(url) resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method) data = json.load(resp) except HTTPError as e: res = json.load(e) raise AnsibleError(res['detail']) return data @property def api_server(self): return self._api_server @property def validate_certs(self): return self._validate_certs def get_server_api_version(self): """ Fetches the Galaxy API current version to ensure the API server is up and reachable. """ try: url = '%s/api/' % self._api_server data = json.load(open_url(url, validate_certs=self._validate_certs)) return data['current_version'] except Exception as e: raise AnsibleError("The API server (%s) is not responding, please try again later." % url) def authenticate(self, github_token): """ Retrieve an authentication token """ url = '%s/tokens/' % self.baseurl args = urllib.urlencode({"github_token": github_token}) resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST") data = json.load(resp) return data def create_import_task(self, github_user, github_repo, reference=None): """ Post an import request """ url = '%s/imports/' % self.baseurl args = urllib.urlencode({ "github_user": github_user, "github_repo": github_repo, "github_reference": reference if reference else "" }) data = self.__call_galaxy(url, args=args) if data.get('results', None): return data['results'] return data def get_import_task(self, task_id=None, github_user=None, github_repo=None): """ Check the status of an import task. """ url = '%s/imports/' % self.baseurl if not task_id is None: url = "%s?id=%d" % (url,task_id) elif not github_user is None and not github_repo is None: url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo) else: raise AnsibleError("Expected task_id or github_user and github_repo") data = self.__call_galaxy(url) return data['results'] def lookup_role_by_name(self, role_name, notify=True): """ Find a role by name. """ role_name = urlquote(role_name) try: parts = role_name.split(".") user_name = ".".join(parts[0:-1]) role_name = parts[-1] if notify: display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) except: raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name) url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name) data = self.__call_galaxy(url) if len(data["results"]) != 0: return data["results"][0] return None def fetch_role_related(self, related, role_id): """ Fetch the list of related items for the given role. The url comes from the 'related' field of the role. """ try: url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related) data = self.__call_galaxy(url) results = data['results'] done = (data.get('next', None) is None) while not done: url = '%s%s' % (self.baseurl, data['next']) data = self.__call_galaxy(url) results += data['results'] done = (data.get('next', None) is None) return results except: return None def get_list(self, what): """ Fetch the list of items specified. """ try: url = '%s/%s/?page_size' % (self.baseurl, what) data = self.__call_galaxy(url) if "results" in data: results = data['results'] else: results = data done = True if "next" in data: done = (data.get('next', None) is None) while not done: url = '%s%s' % (self.baseurl, data['next']) data = self.__call_galaxy(url) results += data['results'] done = (data.get('next', None) is None) return results except Exception as error: raise AnsibleError("Failed to download the %s list: %s" % (what, str(error))) def search_roles(self, search, **kwargs): search_url = self.baseurl + '/search/roles/?' if search: search_url += '&autocomplete=' + urlquote(search) tags = kwargs.get('tags',None) platforms = kwargs.get('platforms', None) page_size = kwargs.get('page_size', None) author = kwargs.get('author', None) if tags and isinstance(tags, basestring): tags = tags.split(',') search_url += '&tags_autocomplete=' + '+'.join(tags) if platforms and isinstance(platforms, basestring): platforms = platforms.split(',') search_url += '&platforms_autocomplete=' + '+'.join(platforms) if page_size: search_url += '&page_size=%s' % page_size if author: search_url += '&username_autocomplete=%s' % author data = self.__call_galaxy(search_url) return data def add_secret(self, source, github_user, github_repo, secret): url = "%s/notification_secrets/" % self.baseurl args = urllib.urlencode({ "source": source, "github_user": github_user, "github_repo": github_repo, "secret": secret }) data = self.__call_galaxy(url, args=args) return data def list_secrets(self): url = "%s/notification_secrets" % self.baseurl data = self.__call_galaxy(url, headers=self.__auth_header()) return data def remove_secret(self, secret_id): url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id) data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE') return data def delete_role(self, github_user, github_repo): url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo) data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE') return data
goozbach/ansible
lib/ansible/galaxy/api.py
Python
gpl-3.0
9,665
[ "Galaxy" ]
85d86f2aa2f15c61e97a22744cb6c49d4b9ab363ba56a4a73ab99aed44734fa3
#!/usr/bin/env python """ zonal_stats - Calculates statistics of a raster variable within a region. Reads array from netCDF file and regions from ashapefile. Usage: zonal_stats raster_file variable_name region_file results_file raster_file - filename of netCDF file. If a directory is given all the netcdf files will be processed variable_name - name of netCDF variable region_file - shapefile name containing region(s) results_file - filename of csv file containing the results example: zonal_stats.py analysed_spim_199801.nc SMMean CP2_lv2.shp results.csv """ # Modified: # v0 2016-02-10 TAMS # v1 2016-02-15 TAMS # Reads all nc files in directory # v2 2016-02-16 TAMS # For each polygon lops over all raster to avoid recalculating mask # TODO # Use better arguments module # import time,sys,os import numpy as np import netCDF4 as nc import shapefile as shp import shapely.geometry as shply import matplotlib.pyplot as plt import csv #DEFINITONS # set individual parameters here stats=['mean','median','std','count'] #****************************************** def prelude(argv): #****************************************** # get input file name if len(sys.argv) <> 5: print ('ERROR: Must receive 4 arguments') print __doc__ sys.exit() #raise Exception('Must receive 5 arguments') ncPath = sys.argv[1] ncVname = sys.argv[2] shpFname = sys.argv[3] resultsFname = sys.argv[4] options={'ncPath':ncPath,'ncVname':ncVname,'shpFname':shpFname, 'resultsFname':resultsFname} return options #****************************************** def readNC(ncFname,ncVname): #****************************************** try: nc1 = nc.Dataset(ncFname, 'r') except: raise Exception('ERROR reading file '+ncFname) lat = nc1.variables['lat'] lon = nc1.variables['lon'] try: arrayVal = np.squeeze(nc1.variables[ncVname]) except: print('Available variables:') print(nc1.variables) raise Exception('ERROR cannot find var '+ncVname) # Masking _FillValue if missing_value is not being used in netcdf file FillValue = nc1.variables[ncVname]._FillValue arrayVal =np.ma.masked_equal(arrayVal,FillValue) [Nrows,Ncols] = arrayVal.shape if len(lat.shape) == 1: arrayX,arrayY = np.meshgrid(lon,lat) return arrayX,arrayY,arrayVal #********************************************** def make_mask(polygon,arrayX,arrayY): #********************************************** [Nrows,Nlines] = arrayX.shape mask = np.ma.zeros([Nrows,Nlines],dtype=bool) start=time.time() for row in range(Nrows): for col in range(Nlines): x = arrayX[row,col] y = arrayY[row,col] p=shply.Point(x,y) if p.within(polygon): mask[row,col] = True stop=time.time() elapsed= stop-start elapseds="%.2f"%elapsed print('make_mask: ' + elapseds + ' s') return mask #****************************************** def zonal_stats(poly,arrayX,arrayY,arrayVal,stats,mask): #****************************************** if len(mask)==0: mask = make_mask(poly,arrayX,arrayY) maskedVal = np.ma.masked_where(np.logical_not(mask),arrayVal) mean=[] median=[] std=[] max=[] min=[] count=[] for stat in stats: if stat == 'mean': mean = np.ma.mean(maskedVal) elif stat == 'median': # for some reason median returns masked_array: # masked_array(data = [1.0,mask = False,fill_value = 1e+20) median = float(np.ma.median(maskedVal)) elif stat == 'std': std = np.ma.std(maskedVal) elif stat == 'max': max = np.ma.max(maskedVal) elif stat == 'min': min = np.ma.min(maskedVal) elif stat == 'count': count = np.ma.count(maskedVal) results={'mean':mean,'median':median,'std':std,'max':max,'min':min,'count':count} return results, mask #****************************************** def area_overlap(bbox1,bbox2): #****************************************** if ( ( ( bbox1[0] > bbox2[2] ) or ( bbox1[2] < bbox2[0] ) ) or ( ( bbox1[1] > bbox2[3] ) or ( bbox1[3] < bbox2[1] ) ) ): overlap = False else: overlap = True return overlap #******************************************************** def write_results(ncFiles,resultsll,resultsFname,shprecs): #******************************************************** NPoly = len(resultsll) NFiles = len(resultsll[0]) f = open(resultsFname,'w') writer = csv.writer(f) writer.writerow(shprecs) rec = ['FName'] for iPoly in range(NPoly): for stat in stats: rec.append(stat) writer.writerow(rec) for iFile in range(NFiles): ncFile = ncFiles[iFile] rec=[ncFile] for iPoly in range(NPoly): for stat in stats: rec.append(resultsll[iPoly][iFile][stat]) writer.writerow(rec) #****************************************** def main(argv=None): #****************************************** start1 = time.time() if argv is None: argv = sys.argv options = prelude(argv) shpread = shp.Reader(options['shpFname']) shapes = shpread.shapes() #shapefile has no useful records describing the polygon try: shprecords = shpread.records() print(shprecords) shprecs=[] for rec in shprecords: # record 0 is OBJECTID shprecs=rec.append(rec[0]) print('shprecs') print(shprecs) except: print('ERROR: no records in shapefile') if os.path.isdir(options['ncPath']): ncFiles = [options['ncPath']+f for f in os.listdir(options['ncPath']) if f.endswith('.nc')] ncFiles.sort() elif os.path.isfile(options['ncPath']): ncFiles = [options['ncPath']] else: print('ERROR ncPath does not exist'+ncPath) sys.exit() resultsll = [] #for each polygon in shapefile for shape in shapes: points = shape.points poly = shply.Polygon(points) prevArrayX = [] prevArrayY = [] mask=[] resultsl=[] for ncFile in ncFiles: print('reading: '+os.path.basename(ncFile)) [arrayX,arrayY,arrayVal] = readNC(ncFile,options['ncVname']) array_bbox = [arrayX.min(),arrayY.min(),arrayX.max(),arrayY.max()] if True: #if area_overlap(poly.bounds,array_bbox): if ( np.array_equal(arrayX,prevArrayX) and np.array_equal(arrayY,prevArrayY) ): [result,mask] = zonal_stats(poly,arrayX,arrayY,arrayVal,stats,mask) else: [result,mask] = zonal_stats(poly,arrayX,arrayY,arrayVal,stats,[]) prevArrayX = arrayX prevArrayY = arrayY resultsl.append(result) resultsll.append(resultsl) write_results(ncFiles,resultsll,options['resultsFname'],shprecs) stop1 = time.time() elapsed = stop1-start1 elapseds="%.2f"% elapsed print('Time elapsed: ' + elapseds + ' s') if __name__ == "__main__": main()
tiagoams/python_tools
zonal_stats.py
Python
gpl-3.0
7,383
[ "NetCDF" ]
667041301185de8862e8fc19829e7c30aafb966b5c31bd15a63ffa6bcb72d7a3
""" JobEfficiencyPolicy Policy that calculates the efficiency following the formula:: ( completed + done ) / ( completed + done + failed ) if the denominator is smaller than 10, it does not take any decision. """ from DIRAC import S_OK from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase class JobEfficiencyPolicy(PolicyBase): """ The JobEfficiencyPolicy class is a policy that checks the efficiency of the jobs according to what is on JobDB. Evaluates the JobEfficiency results given by the JobCommand.JobCommand """ @staticmethod def _evaluate(commandResult): """_evaluate efficiency < 0.5 :: Banned efficiency < 0.9 :: Degraded """ result = {"Status": None, "Reason": None} if not commandResult["OK"]: result["Status"] = "Error" result["Reason"] = commandResult["Message"] return S_OK(result) commandResult = commandResult["Value"] if not commandResult: result["Status"] = "Unknown" result["Reason"] = "No values to take a decision" return S_OK(result) commandResult = commandResult[0] if not commandResult: result["Status"] = "Unknown" result["Reason"] = "No values to take a decision" return S_OK(result) completed = commandResult["Completed"] done = commandResult["Done"] failed = commandResult["Failed"] total = completed + done + failed # we want a minimum amount of jobs to take a decision ( at least 10 pilots ) if total < 10: result["Status"] = "Unknown" result["Reason"] = "Not enough jobs to take a decision" return S_OK(result) efficiency = (done + completed) / total if efficiency < 0.5: result["Status"] = "Banned" elif efficiency < 0.90: result["Status"] = "Degraded" else: result["Status"] = "Active" result["Reason"] = "Jobs Efficiency of %.2f" % efficiency return S_OK(result)
DIRACGrid/DIRAC
src/DIRAC/ResourceStatusSystem/Policy/JobEfficiencyPolicy.py
Python
gpl-3.0
2,134
[ "DIRAC" ]
be785e08a2c6bc7455612d30eb15b5f7fd64c89eec9516c85f3c7166ab7a9623
""" This module gathers tree-based methods, including decision, regression and randomized trees. Single and multi-output problems are both handled. """ # Authors: Gilles Louppe <g.louppe@gmail.com> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Brian Holt <bdholt1@gmail.com> # Noel Dawe <noel@dawe.me> # Satrajit Gosh <satrajit.ghosh@gmail.com> # Joly Arnaud <arnaud.v.joly@gmail.com> # Fares Hedayati <fares.hedayati@gmail.com> # Nelson Liu <nelson@nelsonliu.me> # # License: BSD 3 clause from __future__ import division import numbers from abc import ABCMeta from abc import abstractmethod from math import ceil import numpy as np from scipy.sparse import issparse from ..base import BaseEstimator from ..base import ClassifierMixin from ..base import RegressorMixin from ..externals import six from ..feature_selection.from_model import _LearntSelectorMixin from ..utils import check_array from ..utils import check_random_state from ..utils import compute_sample_weight from ..utils.multiclass import check_classification_targets from ..exceptions import NotFittedError from ._criterion import Criterion from ._splitter import Splitter from ._tree import DepthFirstTreeBuilder from ._tree import BestFirstTreeBuilder from ._tree import Tree from . import _tree, _splitter, _criterion __all__ = ["DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor"] # ============================================================================= # Types and constants # ============================================================================= DTYPE = _tree.DTYPE DOUBLE = _tree.DOUBLE CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy} CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE, "mae": _criterion.MAE} DENSE_SPLITTERS = {"best": _splitter.BestSplitter, "random": _splitter.RandomSplitter} SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter, "random": _splitter.RandomSparseSplitter} # ============================================================================= # Base decision tree # ============================================================================= class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator, _LearntSelectorMixin)): """Base class for decision trees. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, criterion, splitter, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_features, max_leaf_nodes, random_state, min_impurity_split, class_weight=None, presort=False): self.criterion = criterion self.splitter = splitter self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.random_state = random_state self.max_leaf_nodes = max_leaf_nodes self.min_impurity_split = min_impurity_split self.class_weight = class_weight self.presort = presort self.n_features_ = None self.n_outputs_ = None self.classes_ = None self.n_classes_ = None self.tree_ = None self.max_features_ = None def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None): """Build a decision tree from the training set (X, y). Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels in classification, real numbers in regression). In the regression case, use ``dtype=np.float64`` and ``order='C'`` for maximum efficiency. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. X_idx_sorted : array-like, shape = [n_samples, n_features], optional The indexes of the sorted training input samples. If many tree are grown on the same dataset, this allows the ordering to be cached between trees. If None, the data will be sorted here. Don't use this parameter unless you know what to do. Returns ------- self : object Returns self. """ random_state = check_random_state(self.random_state) if check_input: X = check_array(X, dtype=DTYPE, accept_sparse="csc") y = check_array(y, ensure_2d=False, dtype=None) if issparse(X): X.sort_indices() if X.indices.dtype != np.intc or X.indptr.dtype != np.intc: raise ValueError("No support for np.int64 index based " "sparse matrices") # Determine output settings n_samples, self.n_features_ = X.shape is_classification = isinstance(self, ClassifierMixin) y = np.atleast_1d(y) expanded_class_weight = None if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] if is_classification: check_classification_targets(y) y = np.copy(y) self.classes_ = [] self.n_classes_ = [] if self.class_weight is not None: y_original = np.copy(y) y_encoded = np.zeros(y.shape, dtype=np.int) for k in range(self.n_outputs_): classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_encoded if self.class_weight is not None: expanded_class_weight = compute_sample_weight( self.class_weight, y_original) else: self.classes_ = [None] * self.n_outputs_ self.n_classes_ = [1] * self.n_outputs_ self.n_classes_ = np.array(self.n_classes_, dtype=np.intp) if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) # Check parameters max_depth = ((2 ** 31) - 1 if self.max_depth is None else self.max_depth) max_leaf_nodes = (-1 if self.max_leaf_nodes is None else self.max_leaf_nodes) if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)): if not 1 <= self.min_samples_leaf: raise ValueError("min_samples_leaf must be at least 1 " "or in (0, 0.5], got %s" % self.min_samples_leaf) min_samples_leaf = self.min_samples_leaf else: # float if not 0. < self.min_samples_leaf <= 0.5: raise ValueError("min_samples_leaf must be at least 1 " "or in (0, 0.5], got %s" % self.min_samples_leaf) min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples)) if isinstance(self.min_samples_split, (numbers.Integral, np.integer)): if not 2 <= self.min_samples_split: raise ValueError("min_samples_split must be at least 2 " "or in (0, 1], got %s" % self.min_samples_split) min_samples_split = self.min_samples_split else: # float if not 0. < self.min_samples_split <= 1.: raise ValueError("min_samples_split must be at least 2 " "or in (0, 1], got %s" % self.min_samples_split) min_samples_split = int(ceil(self.min_samples_split * n_samples)) min_samples_split = max(2, min_samples_split) min_samples_split = max(min_samples_split, 2 * min_samples_leaf) if isinstance(self.max_features, six.string_types): if self.max_features == "auto": if is_classification: max_features = max(1, int(np.sqrt(self.n_features_))) else: max_features = self.n_features_ elif self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features_))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features_))) else: raise ValueError( 'Invalid value for max_features. Allowed string ' 'values are "auto", "sqrt" or "log2".') elif self.max_features is None: max_features = self.n_features_ elif isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float if self.max_features > 0.0: max_features = max(1, int(self.max_features * self.n_features_)) else: max_features = 0 self.max_features_ = max_features if len(y) != n_samples: raise ValueError("Number of labels=%d does not match " "number of samples=%d" % (len(y), n_samples)) if not 0 <= self.min_weight_fraction_leaf <= 0.5: raise ValueError("min_weight_fraction_leaf must in [0, 0.5]") if max_depth <= 0: raise ValueError("max_depth must be greater than zero. ") if not (0 < max_features <= self.n_features_): raise ValueError("max_features must be in (0, n_features]") if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)): raise ValueError("max_leaf_nodes must be integral number but was " "%r" % max_leaf_nodes) if -1 < max_leaf_nodes < 2: raise ValueError(("max_leaf_nodes {0} must be either smaller than " "0 or larger than 1").format(max_leaf_nodes)) if sample_weight is not None: if (getattr(sample_weight, "dtype", None) != DOUBLE or not sample_weight.flags.contiguous): sample_weight = np.ascontiguousarray( sample_weight, dtype=DOUBLE) if len(sample_weight.shape) > 1: raise ValueError("Sample weights array has more " "than one dimension: %d" % len(sample_weight.shape)) if len(sample_weight) != n_samples: raise ValueError("Number of weights=%d does not match " "number of samples=%d" % (len(sample_weight), n_samples)) if expanded_class_weight is not None: if sample_weight is not None: sample_weight = sample_weight * expanded_class_weight else: sample_weight = expanded_class_weight # Set min_weight_leaf from min_weight_fraction_leaf if sample_weight is None: min_weight_leaf = (self.min_weight_fraction_leaf * n_samples) else: min_weight_leaf = (self.min_weight_fraction_leaf * np.sum(sample_weight)) if self.min_impurity_split < 0.: raise ValueError("min_impurity_split must be greater than " "or equal to 0") presort = self.presort # Allow presort to be 'auto', which means True if the dataset is dense, # otherwise it will be False. if self.presort == 'auto' and issparse(X): presort = False elif self.presort == 'auto': presort = True if presort is True and issparse(X): raise ValueError("Presorting is not supported for sparse " "matrices.") # If multiple trees are built on the same dataset, we only want to # presort once. Splitters now can accept presorted indices if desired, # but do not handle any presorting themselves. Ensemble algorithms # which desire presorting must do presorting themselves and pass that # matrix into each tree. if X_idx_sorted is None and presort: X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0), dtype=np.int32) if presort and X_idx_sorted.shape != X.shape: raise ValueError("The shape of X (X.shape = {}) doesn't match " "the shape of X_idx_sorted (X_idx_sorted" ".shape = {})".format(X.shape, X_idx_sorted.shape)) # Build tree criterion = self.criterion if not isinstance(criterion, Criterion): if is_classification: criterion = CRITERIA_CLF[self.criterion](self.n_outputs_, self.n_classes_) else: criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples) SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS splitter = self.splitter if not isinstance(self.splitter, Splitter): splitter = SPLITTERS[self.splitter](criterion, self.max_features_, min_samples_leaf, min_weight_leaf, random_state, self.presort) self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_) # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise if max_leaf_nodes < 0: builder = DepthFirstTreeBuilder(splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, self.min_impurity_split) else: builder = BestFirstTreeBuilder(splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, max_leaf_nodes, self.min_impurity_split) builder.build(self.tree_, X, y, sample_weight, X_idx_sorted) if self.n_outputs_ == 1: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] return self def _validate_X_predict(self, X, check_input): """Validate X whenever one tries to predict, apply, predict_proba""" if self.tree_ is None: raise NotFittedError("Estimator not fitted, " "call `fit` before exploiting the model.") if check_input: X = check_array(X, dtype=DTYPE, accept_sparse="csr") if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc): raise ValueError("No support for np.int64 index based " "sparse matrices") n_features = X.shape[1] if self.n_features_ != n_features: raise ValueError("Number of features of the model must " "match the input. Model n_features is %s and " "input n_features is %s " % (self.n_features_, n_features)) return X def predict(self, X, check_input=True): """Predict class or regression value for X. For a classification model, the predicted class for each sample in X is returned. For a regression model, the predicted value based on X is returned. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] or [n_samples, n_outputs] The predicted classes, or the predict values. """ X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) n_samples = X.shape[0] # Classification if isinstance(self, ClassifierMixin): if self.n_outputs_ == 1: return self.classes_.take(np.argmax(proba, axis=1), axis=0) else: predictions = np.zeros((n_samples, self.n_outputs_)) for k in range(self.n_outputs_): predictions[:, k] = self.classes_[k].take( np.argmax(proba[:, k], axis=1), axis=0) return predictions # Regression else: if self.n_outputs_ == 1: return proba[:, 0] else: return proba[:, :, 0] def apply(self, X, check_input=True): """ Returns the index of the leaf that each sample is predicted as. .. versionadded:: 0.17 Parameters ---------- X : array_like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- X_leaves : array_like, shape = [n_samples,] For each datapoint x in X, return the index of the leaf x ends up in. Leaves are numbered within ``[0; self.tree_.node_count)``, possibly with gaps in the numbering. """ X = self._validate_X_predict(X, check_input) return self.tree_.apply(X) def decision_path(self, X, check_input=True): """Return the decision path in the tree .. versionadded:: 0.18 Parameters ---------- X : array_like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- indicator : sparse csr array, shape = [n_samples, n_nodes] Return a node indicator matrix where non zero elements indicates that the samples goes through the nodes. """ X = self._validate_X_predict(X, check_input) return self.tree_.decision_path(X) @property def feature_importances_(self): """Return the feature importances. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Returns ------- feature_importances_ : array, shape = [n_features] """ if self.tree_ is None: raise NotFittedError("Estimator not fitted, call `fit` before" " `feature_importances_`.") return self.tree_.compute_feature_importances() # ============================================================================= # Public estimators # ============================================================================= class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin): """A decision tree classifier. Read more in the :ref:`User Guide <tree>`. Parameters ---------- criterion : string, optional (default="gini") The function to measure the quality of a split. Supported criteria are "gini" for the Gini impurity and "entropy" for the information gain. splitter : string, optional (default="best") The strategy used to choose the split at each node. Supported strategies are "best" to choose the best split and "random" to choose the best random split. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : int or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow a tree with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. class_weight : dict, list of dicts, "balanced" or None, optional (default=None) Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` For multi-output, the weights of each column of y will be multiplied. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. min_impurity_split : float, optional (default=1e-7) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. versionadded:: 0.18 presort : bool, optional (default=False) Whether to presort the data to speed up the finding of best splits in fitting. For the default settings of a decision tree on large datasets, setting this to true may slow down the training process. When using either a smaller dataset or a restricted depth, this may speed up the training. Attributes ---------- classes_ : array of shape = [n_classes] or a list of such arrays The classes labels (single output problem), or a list of arrays of class labels (multi-output problem). feature_importances_ : array of shape = [n_features] The feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance [4]_. max_features_ : int, The inferred value of max_features. n_classes_ : int or list The number of classes (for single output problems), or a list containing the number of classes for each output (for multi-output problems). n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. tree_ : Tree object The underlying Tree object. See also -------- DecisionTreeRegressor References ---------- .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification and Regression Trees", Wadsworth, Belmont, CA, 1984. .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical Learning", Springer, 2009. .. [4] L. Breiman, and A. Cutler, "Random Forests", http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.model_selection import cross_val_score >>> from sklearn.tree import DecisionTreeClassifier >>> clf = DecisionTreeClassifier(random_state=0) >>> iris = load_iris() >>> cross_val_score(clf, iris.data, iris.target, cv=10) ... # doctest: +SKIP ... array([ 1. , 0.93..., 0.86..., 0.93..., 0.93..., 0.93..., 0.93..., 1. , 0.93..., 1. ]) """ def __init__(self, criterion="gini", splitter="best", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_split=1e-7, class_weight=None, presort=False): super(DecisionTreeClassifier, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight, random_state=random_state, min_impurity_split=min_impurity_split, presort=presort) def predict_proba(self, X, check_input=True): """Predict class probabilities of the input samples X. The predicted class probability is the fraction of samples of the same class in a leaf. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) if self.n_outputs_ == 1: proba = proba[:, :self.n_classes_] normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer return proba else: all_proba = [] for k in range(self.n_outputs_): proba_k = proba[:, k, :self.n_classes_[k]] normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba_k /= normalizer all_proba.append(proba_k) return all_proba def predict_log_proba(self, X): """Predict class log-probabilities of the input samples X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return np.log(proba) else: for k in range(self.n_outputs_): proba[k] = np.log(proba[k]) return proba class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin): """A decision tree regressor. Read more in the :ref:`User Guide <tree>`. Parameters ---------- criterion : string, optional (default="mse") The function to measure the quality of a split. Supported criteria are "mse" for the mean squared error, which is equal to variance reduction as feature selection criterion, and "mae" for the mean absolute error. .. versionadded:: 0.18 Mean Absolute Error (MAE) criterion. splitter : string, optional (default="best") The strategy used to choose the split at each node. Supported strategies are "best" to choose the best split and "random" to choose the best random split. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : int or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow a tree with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. min_impurity_split : float, optional (default=1e-7) Threshold for early stopping in tree growth. If the impurity of a node is below the threshold, the node is a leaf. .. versionadded:: 0.18 presort : bool, optional (default=False) Whether to presort the data to speed up the finding of best splits in fitting. For the default settings of a decision tree on large datasets, setting this to true may slow down the training process. When using either a smaller dataset or a restricted depth, this may speed up the training. Attributes ---------- feature_importances_ : array of shape = [n_features] The feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance [4]_. max_features_ : int, The inferred value of max_features. n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. tree_ : Tree object The underlying Tree object. See also -------- DecisionTreeClassifier References ---------- .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification and Regression Trees", Wadsworth, Belmont, CA, 1984. .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical Learning", Springer, 2009. .. [4] L. Breiman, and A. Cutler, "Random Forests", http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm Examples -------- >>> from sklearn.datasets import load_boston >>> from sklearn.model_selection import cross_val_score >>> from sklearn.tree import DecisionTreeRegressor >>> boston = load_boston() >>> regressor = DecisionTreeRegressor(random_state=0) >>> cross_val_score(regressor, boston.data, boston.target, cv=10) ... # doctest: +SKIP ... array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75..., 0.07..., 0.29..., 0.33..., -1.42..., -1.77...]) """ def __init__(self, criterion="mse", splitter="best", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_split=1e-7, presort=False): super(DecisionTreeRegressor, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, random_state=random_state, min_impurity_split=min_impurity_split, presort=presort) class ExtraTreeClassifier(DecisionTreeClassifier): """An extremely randomized tree classifier. Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the `max_features` randomly selected features and the best split among those is chosen. When `max_features` is set 1, this amounts to building a totally random decision tree. Warning: Extra-trees should only be used within ensemble methods. Read more in the :ref:`User Guide <tree>`. See also -------- ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. """ def __init__(self, criterion="gini", splitter="random", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", random_state=None, max_leaf_nodes=None, min_impurity_split=1e-7, class_weight=None): super(ExtraTreeClassifier, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight, min_impurity_split=min_impurity_split, random_state=random_state) class ExtraTreeRegressor(DecisionTreeRegressor): """An extremely randomized tree regressor. Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the `max_features` randomly selected features and the best split among those is chosen. When `max_features` is set 1, this amounts to building a totally random decision tree. Warning: Extra-trees should only be used within ensemble methods. Read more in the :ref:`User Guide <tree>`. See also -------- ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. """ def __init__(self, criterion="mse", splitter="random", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", random_state=None, min_impurity_split=1e-7, max_leaf_nodes=None): super(ExtraTreeRegressor, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, min_impurity_split=min_impurity_split, random_state=random_state)
glennq/scikit-learn
sklearn/tree/tree.py
Python
bsd-3-clause
42,704
[ "Brian" ]
e71958ef86efccec7df61ed8fd577ae6d2cbdd98e76e142bca016931ffa06cd3
# -*- coding: utf-8 -*- """ *************************************************************************** translate.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt.QtGui import QIcon from qgis.core import (QgsProcessingException, QgsProcessingParameterDefinition, QgsProcessingParameterRasterLayer, QgsProcessingParameterEnum, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingOutputRasterLayer) from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class gdaladdo(GdalAlgorithm): INPUT = 'INPUT' LEVELS = 'LEVELS' CLEAN = 'CLEAN' RESAMPLING = 'RESAMPLING' FORMAT = 'FORMAT' OUTPUT = 'OUTPUT' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.methods = ((self.tr('Nearest neighbour'), 'nearest'), (self.tr('Average'), 'average'), (self.tr('Gaussian'), 'gauss'), (self.tr('Cubic convolution.'), 'cubic'), (self.tr('B-Spline convolution'), 'cubicspline'), (self.tr('Lanczos windowed sinc'), 'lanczos'), (self.tr('Average MP'), 'average_mp'), (self.tr('Average in mag/phase space'), 'average_magphase'), (self.tr('Mode'), 'mode')) self.formats = (self.tr('Internal (if possible)'), self.tr('External (GTiff .ovr)'), self.tr('External (ERDAS Imagine .aux)')) self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer'))) self.addParameter(QgsProcessingParameterString(self.LEVELS, self.tr('Overview levels'), defaultValue='2 4 8 16')) self.addParameter(QgsProcessingParameterBoolean(self.CLEAN, self.tr('Remove all existing overviews'), defaultValue=False)) params = [] params.append(QgsProcessingParameterEnum(self.RESAMPLING, self.tr('Resampling method'), options=[i[0] for i in self.methods], allowMultiple=False, defaultValue=0, optional=True)) params.append(QgsProcessingParameterEnum(self.FORMAT, self.tr('Overviews format'), options=self.formats, allowMultiple=False, defaultValue=0, optional=True)) for p in params: p.setFlags(p.flags() | QgsProcessingParameterDefinition.FlagAdvanced) self.addParameter(p) self.addOutput(QgsProcessingOutputRasterLayer(self.OUTPUT, self.tr('Pyramidized'))) def name(self): return 'overviews' def displayName(self): return self.tr('Build overviews (pyramids)') def group(self): return self.tr('Raster miscellaneous') def groupId(self): return 'rastermiscellaneous' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-overview.png')) def commandName(self): return 'gdaladdo' def getConsoleCommands(self, parameters, context, feedback, executing=True): inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context) if inLayer is None: raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT)) fileName = inLayer.source() arguments = [] arguments.append(fileName) arguments.append('-r') arguments.append(self.methods[self.parameterAsEnum(parameters, self.RESAMPLING, context)][1]) ovrFormat = self.parameterAsEnum(parameters, self.FORMAT, context) if ovrFormat == 1: arguments.append('-ro') elif ovrFormat == 2: arguments.extend('--config USE_RRD YES'.split(' ')) if self.parameterAsBoolean(parameters, self.CLEAN, context): arguments.append('-clean') arguments.extend(self.parameterAsString(parameters, self.LEVELS, context).split(' ')) self.setOutputValue(self.OUTPUT, fileName) return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
m-kuhn/QGIS
python/plugins/processing/algs/gdal/gdaladdo.py
Python
gpl-2.0
5,974
[ "Gaussian" ]
23a42f22b0c988b7f0360ddb3c3d8a252882ff237f5ca84b6c11ba0d6169fedb
# -*- coding: utf-8 -*- # Copyright 2012 splinter authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. import tempfile import time import re import sys from contextlib import contextmanager from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.action_chains import ActionChains from splinter.driver import DriverAPI, ElementAPI from splinter.element_list import ElementList from splinter.utils import warn_deprecated if sys.version_info[0] > 2: _meth_func = '__func__' _func_name = '__name__' else: _meth_func = 'im_func' _func_name = 'func_name' class BaseWebDriver(DriverAPI): def __init__(self, wait_time=2): self.wait_time = wait_time def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.quit() @property def title(self): if sys.version_info[0] > 2: return self.driver.title.encode('utf-8') else: return self.driver.title @property def html(self): if sys.version_info[0] > 2: return self.driver.page_source.encode('utf-8') else: return self.driver.page_source @property def url(self): return self.driver.current_url def visit(self, url): self.connect(url) self.ensure_success_response() self.driver.get(url) def back(self): self.driver.back() def forward(self): self.driver.forward() def reload(self): self.driver.refresh() def execute_script(self, script): self.driver.execute_script(script) def evaluate_script(self, script): return self.driver.execute_script("return %s" % script) def is_element_present(self, finder, selector, wait_time=None): wait_time = wait_time or self.wait_time end_time = time.time() + wait_time while time.time() < end_time: if finder(selector): return True return False def is_element_not_present(self, finder, selector, wait_time=None): wait_time = wait_time or self.wait_time end_time = time.time() + wait_time while time.time() < end_time: if not finder(selector): return True return False def is_element_present_by_css(self, css_selector, wait_time=None): return self.is_element_present(self.find_by_css, css_selector, wait_time) def is_element_not_present_by_css(self, css_selector, wait_time=None): return self.is_element_not_present(self.find_by_css, css_selector, wait_time) def is_element_present_by_xpath(self, xpath, wait_time=None): return self.is_element_present(self.find_by_xpath, xpath, wait_time) def is_element_not_present_by_xpath(self, xpath, wait_time=None): return self.is_element_not_present(self.find_by_xpath, xpath, wait_time) def is_element_present_by_tag(self, tag, wait_time=None): return self.is_element_present(self.find_by_tag, tag, wait_time) def is_element_not_present_by_tag(self, tag, wait_time=None): return self.is_element_not_present(self.find_by_tag, tag, wait_time) def is_element_present_by_name(self, name, wait_time=None): return self.is_element_present(self.find_by_name, name, wait_time) def is_element_not_present_by_name(self, name, wait_time=None): return self.is_element_not_present(self.find_by_name, name, wait_time) def is_element_present_by_value(self, value, wait_time=None): return self.is_element_present(self.find_by_value, value, wait_time) def is_element_not_present_by_value(self, value, wait_time=None): return self.is_element_not_present(self.find_by_value, value, wait_time) def is_element_present_by_id(self, id, wait_time=None): return self.is_element_present(self.find_by_id, id, wait_time) def is_element_not_present_by_id(self, id, wait_time=None): return self.is_element_not_present(self.find_by_id, id, wait_time) def get_alert(self): return AlertElement(self.driver.switch_to_alert()) def is_text_present(self, text, wait_time=None): wait_time = wait_time or self.wait_time end_time = time.time() + wait_time while time.time() < end_time: try: self.driver.find_element_by_tag_name('body').text.index(text) return True except ValueError: pass except NoSuchElementException: # This exception will be thrown if the body tag isn't present # This has occasionally been observed. Assume that the # page isn't fully loaded yet pass return False def is_text_not_present(self, text, wait_time=None): wait_time = wait_time or self.wait_time end_time = time.time() + wait_time while time.time() < end_time: try: self.driver.find_element_by_tag_name('body').text.index(text) except ValueError: return True except NoSuchElementException: # This exception will be thrown if the body tag isn't present # This has occasionally been observed. Assume that the # page isn't fully loaded yet pass return False @contextmanager def get_iframe(self, id): self.driver.switch_to_frame(id) try: yield self finally: self.driver.switch_to_frame(None) def find_option_by_value(self, value): return self.find_by_xpath('//option[@value="%s"]' % value, original_find="option by value", original_query=value) def find_option_by_text(self, text): return self.find_by_xpath('//option[normalize-space(text())="%s"]' % text, original_find="option by text", original_query=text) def find_link_by_href(self, href): return self.find_by_xpath('//a[@href="%s"]' % href, original_find="link by href", original_query=href) def find_link_by_partial_href(self, partial_href): return self.find_by_xpath('//a[contains(@href, "%s")]' % partial_href, original_find="link by partial href", original_query=partial_href) def find_link_by_partial_text(self, partial_text): return self.find_by_xpath('//a[contains(normalize-space(.), "%s")]' % partial_text, original_find="link by partial text", original_query=partial_text) def find_link_by_text(self, text): return self.find_by_xpath('//a[text()="%s"]' % text, original_find="link by text", original_query=text) def find_by(self, finder, selector, original_find=None, original_query=None): elements = None end_time = time.time() + self.wait_time func_name = getattr(getattr(finder, _meth_func), _func_name) find_by = original_find or func_name[func_name.rfind('_by_') + 4:] query = original_query or selector while time.time() < end_time: try: elements = finder(selector) if not isinstance(elements, list): elements = [elements] except NoSuchElementException: pass if elements: return ElementList([self.element_class(element, self) for element in elements], find_by=find_by, query=query) return ElementList([], find_by=find_by, query=query) def find_by_css(self, css_selector): return self.find_by(self.driver.find_elements_by_css_selector, css_selector, original_find='css', original_query=css_selector) def find_by_xpath(self, xpath, original_find=None, original_query=None): original_find = original_find or "xpath" original_query = original_query or xpath return self.find_by(self.driver.find_elements_by_xpath, xpath, original_find=original_find, original_query=original_query) def find_by_name(self, name): return self.find_by(self.driver.find_elements_by_name, name) def find_by_tag(self, tag): return self.find_by(self.driver.find_elements_by_tag_name, tag) def find_by_value(self, value): return self.find_by_xpath('//*[@value="%s"]' % value, original_find='value', original_query=value) def find_by_id(self, id): return self.find_by(self.driver.find_element_by_id, id) def fill(self, name, value): field = self.find_by_name(name).first field.value = value attach_file = fill def fill_form(self, field_values): for name, value in field_values.items(): elements = self.find_by_name(name) element = elements.first if element['type'] in ['text', 'password', 'tel'] or element.tag_name == 'textarea': element.value = value elif element['type'] == 'checkbox': if value: element.check() else: element.uncheck() elif element['type'] == 'radio': for field in elements: if field.value == value: field.click() elif element._element.tag_name == 'select': element.find_by_value(value).first._element.click() else: element.value = value def type(self, name, value, slowly=False): element = self.driver.find_element_by_css_selector('[name="%s"]' % name) if slowly: return TypeIterator(element, value) element.send_keys(value) return value def choose(self, name, value): fields = self.find_by_name(name) for field in fields: if field.value == value: field.click() def check(self, name): self.find_by_name(name).first.check() def uncheck(self, name): self.find_by_name(name).first.uncheck() def screenshot(self, name=None, suffix='.png'): name = name or '' (fd, filename) = tempfile.mkstemp(prefix=name, suffix=suffix) self.driver.get_screenshot_as_file(filename) return filename def select(self, name, value): self.find_by_xpath('//select[@name="%s"]/option[@value="%s"]' % (name, value)).first._element.click() def quit(self): self.driver.quit() @property def cookies(self): return self._cookie_manager @property def current_window(self): """ Returns the handle of the current window. """ return self.driver.current_window_handle @property def windows(self): """ Returns the handles of all windows within the current session. """ return self.driver.window_handles def switch_to_window(self, window_name): """ Switches focus to the specified window. """ return self.driver.switch_to_window(window_name) class TypeIterator(object): def __init__(self, element, keys): self._element = element self._keys = keys def __iter__(self): for key in self._keys: self._element.send_keys(key) yield key class WebDriverElement(ElementAPI): def __init__(self, element, parent): self._element = element self.parent = parent self.action_chains = ActionChains(parent.driver) def _get_value(self): return self['value'] or self._element.text def _set_value(self, value): if self._element.get_attribute('type') != 'file': self._element.clear() self._element.send_keys(value) value = property(_get_value, _set_value) @property def text(self): return self._element.text @property def tag_name(self): return self._element.tag_name def fill(self, value): self.value = value def select(self, value): self.find_by_xpath('//select[@name="%s"]/option[@value="%s"]' % (self["name"], value))._element.click() def type(self, value, slowly=False): if slowly: return TypeIterator(self._element, value) self._element.send_keys(value) return value def click(self): self._element.click() def check(self): if not self.checked: self._element.click() def uncheck(self): if self.checked: self._element.click() @property def checked(self): return self._element.is_selected() selected = checked @property def visible(self): return self._element.is_displayed() @property def html(self): return self['innerHTML'] @property def outer_html(self): return self['outerHTML'] def find_by_css(self, selector, original_find=None, original_query=None): find_by = original_find or 'css' query = original_query or selector elements = self._element.find_elements_by_css_selector(selector) return ElementList([self.__class__(element, self.parent) for element in elements], find_by=find_by, query=query) def find_by_xpath(self, selector): elements = ElementList(self._element.find_elements_by_xpath(selector)) return ElementList([self.__class__(element, self.parent) for element in elements], find_by='xpath', query=selector) def find_by_name(self, name): elements = ElementList(self._element.find_elements_by_name(name)) return ElementList([self.__class__(element, self.parent) for element in elements], find_by='name', query=name) def find_by_tag(self, tag): elements = ElementList(self._element.find_elements_by_tag_name(tag)) return ElementList([self.__class__(element, self.parent) for element in elements], find_by='tag', query=tag) def find_by_value(self, value): selector = '[value="%s"]' % value return self.find_by_css(selector, original_find='value', original_query=value) def find_by_id(self, id): elements = ElementList(self._element.find_elements_by_id(id)) return ElementList([self.__class__(element, self.parent) for element in elements], find_by='id', query=id) def has_class(self, class_name): return bool(re.search(r'(?:^|\s)' + re.escape(class_name) + r'(?:$|\s)', self['class'])) def mouse_over(self): """ Performs a mouse over the element. Currently works only on Chrome driver. """ self.action_chains.move_to_element(self._element) self.action_chains.perform() def mouse_out(self): """ Performs a mouse out the element. Currently works only on Chrome driver. """ self.action_chains.move_by_offset(5000, 5000) self.action_chains.perform() mouseover = warn_deprecated(mouse_over, 'mouseover') mouseout = warn_deprecated(mouse_out, 'mouseout') def double_click(self): """ Performs a double click in the element. Currently works only on Chrome driver. """ self.action_chains.double_click(self._element) self.action_chains.perform() def right_click(self): """ Performs a right click in the element. Currently works only on Chrome driver. """ self.action_chains.context_click(self._element) self.action_chains.perform() def drag_and_drop(self, droppable): """ Performs drag a element to another elmenet. Currently works only on Chrome driver. """ self.action_chains.drag_and_drop(self._element, droppable._element) self.action_chains.perform() def __getitem__(self, attr): return self._element.get_attribute(attr) class AlertElement(object): def __init__(self, alert): self._alert = alert self.text = alert.text def accept(self): self._alert.accept() def dismiss(self): self._alert.dismiss() def fill_with(self, text): self._alert.send_keys(text) def __enter__(self): return self def __exit__(self, type, value, traceback): pass
devenbansod/SWD-Query
splinter/driver/webdriver/__init__.py
Python
gpl-2.0
16,225
[ "VisIt" ]
7658e298de7d6b03bffeee6946e6d8c83fc9c6013c70e4a1751a972104ef7fd1
import unittest import numpy as np import bayesnet as bn class TestGaussian(unittest.TestCase): def test_gaussian(self): self.assertRaises(ValueError, bn.random.Gaussian, 0, -1) self.assertRaises(ValueError, bn.random.Gaussian, 0, np.array([1, -1])) if __name__ == '__main__': unittest.main()
ctgk/BayesianNetwork
test/random/test_gaussian.py
Python
mit
322
[ "Gaussian" ]
6a744a2ca26330801d8a5d390a665e382cd5fee50ad5d6b77fdd2292446e9062
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2007 Donald N. Allingham # Copyright (C) 2002 Gary Shao # Copyright (C) 2007 Brian G. Matherly # Copyright (C) 2009 Benny Malengier # Copyright (C) 2009 Gary Burton # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ """ Provide base interface to text based documents. Specific document interfaces should be derived from the core classes. """ #------------------------------------------------------------------------- # # standard python modules # #------------------------------------------------------------------------- #------------------------------------------------------------------------- # # GRAMPS modules # #------------------------------------------------------------------------- from stylesheet import StyleSheet #------------------------------------------------------------------------- # # set up logging # #------------------------------------------------------------------------- import logging log = logging.getLogger(".basedoc") #------------------------------------------------------------------------ # # BaseDoc # #------------------------------------------------------------------------ class BaseDoc(object): """ Base class for document generators. Different output formats, such as OpenOffice, AbiWord, and LaTeX are derived from this base class, providing a common interface to all document generators. """ def __init__(self, styles, paper_style): """ Create a BaseDoc instance, which provides a document generation interface. This class should never be instantiated directly, but only through a derived class. @param styles: StyleSheet containing the styles used. @param paper_style: PaperStyle instance containing information about the paper. If set to None, then the document is not a page oriented document (e.g. HTML) """ self.paper = paper_style self._style_sheet = styles self._creator = "" self.init_called = False def init(self): self.init_called = True def set_creator(self, name): "Set the owner name" self._creator = name def get_creator(self): "Return the owner name" return self._creator def get_style_sheet(self): """ Return the StyleSheet of the document. """ return StyleSheet(self._style_sheet) def set_style_sheet(self, style_sheet): """ Set the StyleSheet of the document. @param style_sheet: The new style sheet for the document @type style_sheet: StyleSheet """ self._style_sheet = StyleSheet(style_sheet) def open(self, filename): """ Opens the file so that it can be generated. @param filename: path name of the file to create """ raise NotImplementedError def close(self): "Closes the generated file." raise NotImplementedError
arunkgupta/gramps
gramps/gen/plug/docgen/basedoc.py
Python
gpl-2.0
3,715
[ "Brian" ]
7595ed8d32d4b7dca6068a260e00c26ff95ffd70e2d562327a64ab978ebde96a
######################################################################## # $HeadURL $ # File: ReplicateAndRegister.py # Author: Krzysztof.Ciba@NOSPAMgmail.com # Date: 2013/03/13 18:49:12 ######################################################################## """ :mod: ReplicateAndRegister ========================== .. module: ReplicateAndRegister :synopsis: ReplicateAndRegister operation handler .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com ReplicateAndRegister operation handler """ __RCSID__ = "$Id $" # # # @file ReplicateAndRegister.py # @author Krzysztof.Ciba@NOSPAMgmail.com # @date 2013/03/13 18:49:28 # @brief Definition of ReplicateAndRegister class. # # imports import re # # from DIRAC from DIRAC import S_OK, S_ERROR, gMonitor, gLogger from DIRAC.Core.Utilities.Adler import compareAdler from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient from DIRAC.DataManagementSystem.Client.DataManager import DataManager from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase from DIRAC.Resources.Storage.StorageElement import StorageElement from DIRAC.Resources.Catalog.FileCatalog import FileCatalog def filterReplicas( opFile, logger = None, dataManager = None, seCache = None ): """ filter out banned/invalid source SEs """ if not logger: logger = gLogger if not dataManager: dataManager = DataManager() if not seCache: seCache = {} log = logger.getSubLogger( "filterReplicas" ) ret = { "Valid" : [], "NoMetadata" : [], "Bad" : [], 'NoReplicas':[], 'NoPFN':[] } replicas = dataManager.getActiveReplicas( opFile.LFN ) if not replicas["OK"]: log.error( 'Failed to get active replicas', replicas["Message"] ) return replicas reNotExists = re.compile( "not such file or directory" ) replicas = replicas["Value"] failed = replicas["Failed"].get( opFile.LFN , "" ) if reNotExists.match( failed.lower() ): opFile.Status = "Failed" opFile.Error = failed return S_ERROR( failed ) replicas = replicas["Successful"].get( opFile.LFN, {} ) for repSEName in replicas: repSE = seCache[repSEName] if repSEName in seCache else \ seCache.setdefault( repSEName, StorageElement( repSEName ) ) repSEMetadata = repSE.getFileMetadata( opFile.LFN ) error = repSEMetadata.get( 'Message', repSEMetadata.get( 'Value', {} ).get( 'Failed', {} ).get( opFile.LFN ) ) if error: log.warn( 'unable to get metadata at %s for %s' % ( repSEName, opFile.LFN ), error.replace( '\n', '' ) ) if 'File does not exist' in error: ret['NoReplicas'].append( repSEName ) else: ret["NoMetadata"].append( repSEName ) else: repSEMetadata = repSEMetadata['Value']['Successful'][opFile.LFN] seChecksum = repSEMetadata.get( "Checksum" ) if opFile.Checksum and seChecksum and not compareAdler( seChecksum, opFile.Checksum ) : # The checksum in the request may be wrong, check with FC fcMetadata = FileCatalog().getFileMetadata( opFile.LFN ) fcChecksum = fcMetadata.get( 'Value', {} ).get( 'Successful', {} ).get( opFile.LFN, {} ).get( 'Checksum' ) if fcChecksum and fcChecksum != opFile.Checksum and compareAdler( fcChecksum , seChecksum ): opFile.Checksum = fcChecksum ret['Valid'].append( repSEName ) else: log.warn( " %s checksum mismatch, request: %s @%s: %s" % ( opFile.LFN, opFile.Checksum, repSEName, seChecksum ) ) ret["Bad"].append( repSEName ) else: # # if we're here repSE is OK ret["Valid"].append( repSEName ) return S_OK( ret ) ######################################################################## class ReplicateAndRegister( DMSRequestOperationsBase ): """ .. class:: ReplicateAndRegister ReplicateAndRegister operation handler """ def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param Operation operation: Operation instance :param str csPath: CS path for this handler """ super( ReplicateAndRegister, self ).__init__( operation, csPath ) # # own gMonitor stuff for files gMonitor.registerActivity( "ReplicateAndRegisterAtt", "Replicate and register attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "ReplicateOK", "Replications successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "ReplicateFail", "Replications failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterOK", "Registrations successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterFail", "Registrations failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) # # for FTS gMonitor.registerActivity( "FTSScheduleAtt", "Files schedule attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSScheduleOK", "File schedule successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSScheduleFail", "File schedule failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) # # SE cache self.seCache = {} # Clients self.fc = FileCatalog() if hasattr( self, "FTSMode" ) and getattr( self, "FTSMode" ): self.ftsClient = FTSClient() def __call__( self ): """ call me maybe """ # # check replicas first checkReplicas = self.__checkReplicas() if not checkReplicas["OK"]: self.log.error( 'Failed to check replicas', checkReplicas["Message"] ) if hasattr( self, "FTSMode" ) and getattr( self, "FTSMode" ): bannedGroups = getattr( self, "FTSBannedGroups" ) if hasattr( self, "FTSBannedGroups" ) else () if self.request.OwnerGroup in bannedGroups: self.log.verbose( "usage of FTS system is banned for request's owner" ) return self.dmTransfer() return self.ftsTransfer() return self.dmTransfer() def __checkReplicas( self ): """ check done replicas and update file states """ waitingFiles = dict( [ ( opFile.LFN, opFile ) for opFile in self.operation if opFile.Status in ( "Waiting", "Scheduled" ) ] ) targetSESet = set( self.operation.targetSEList ) replicas = self.fc.getReplicas( waitingFiles.keys() ) if not replicas["OK"]: self.log.error( 'Failed to get replicas', replicas["Message"] ) return replicas reMissing = re.compile( "no such file or directory" ) for failedLFN, errStr in replicas["Value"]["Failed"].items(): waitingFiles[failedLFN].Error = errStr if reMissing.search( errStr.lower() ): self.log.error( "File does not exists", failedLFN ) gMonitor.addMark( "ReplicateFail", len( targetSESet ) ) waitingFiles[failedLFN].Status = "Failed" for successfulLFN, reps in replicas["Value"]["Successful"].items(): if targetSESet.issubset( set( reps ) ): self.log.info( "file %s has been replicated to all targets" % successfulLFN ) waitingFiles[successfulLFN].Status = "Done" return S_OK() def _addMetadataToFiles( self, toSchedule ): """ Add metadata to those files that need to be scheduled through FTS toSchedule is a dictionary: {'lfn1': [opFile, validReplicas, validTargets], 'lfn2': [opFile, validReplicas, validTargets]} """ if toSchedule: self.log.info( "found %s files to schedule, getting metadata from FC" % len( toSchedule ) ) lfns = toSchedule.keys() else: self.log.info( "No files to schedule" ) return S_OK() res = self.fc.getFileMetadata( lfns ) if not res['OK']: return res else: if res['Value']['Failed']: self.log.warn( "Can't schedule %d files: problems getting the metadata: %s" % ( len( res['Value']['Failed'] ), ', '.join( res['Value']['Failed'] ) ) ) metadata = res['Value']['Successful'] filesToScheduleList = [] for lfnsToSchedule, lfnMetadata in metadata.items(): opFileToSchedule = toSchedule[lfnsToSchedule][0] opFileToSchedule.GUID = lfnMetadata['GUID'] opFileToSchedule.Checksum = metadata[lfnsToSchedule]['Checksum'] opFileToSchedule.ChecksumType = metadata[lfnsToSchedule]['ChecksumType'] opFileToSchedule.Size = metadata[lfnsToSchedule]['Size'] filesToScheduleList.append( ( opFileToSchedule.toJSON()['Value'], toSchedule[lfnsToSchedule][1], toSchedule[lfnsToSchedule][2] ) ) return S_OK( filesToScheduleList ) def _filterReplicas( self, opFile ): """ filter out banned/invalid source SEs """ return filterReplicas( opFile, logger = self.log, dataManager = self.dm, seCache = self.seCache ) def ftsTransfer( self ): """ replicate and register using FTS """ self.log.info( "scheduling files in FTS..." ) bannedTargets = self.checkSEsRSS() if not bannedTargets['OK']: gMonitor.addMark( "FTSScheduleAtt" ) gMonitor.addMark( "FTSScheduleFail" ) return bannedTargets if bannedTargets['Value']: return S_OK( "%s targets are banned for writing" % ",".join( bannedTargets['Value'] ) ) # Can continue now self.log.verbose( "No targets banned for writing" ) toSchedule = {} for opFile in self.getWaitingFilesList(): opFile.Error = '' gMonitor.addMark( "FTSScheduleAtt" ) # # check replicas replicas = self._filterReplicas( opFile ) if not replicas["OK"]: continue replicas = replicas["Value"] validReplicas = replicas["Valid"] noMetaReplicas = replicas["NoMetadata"] noReplicas = replicas['NoReplicas'] badReplicas = replicas['Bad'] noPFN = replicas['NoPFN'] if validReplicas: validTargets = list( set( self.operation.targetSEList ) - set( validReplicas ) ) if not validTargets: self.log.info( "file %s is already present at all targets" % opFile.LFN ) opFile.Status = "Done" else: toSchedule[opFile.LFN] = [ opFile, validReplicas, validTargets ] else: gMonitor.addMark( "FTSScheduleFail" ) if noMetaReplicas: self.log.warn( "unable to schedule '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) ) opFile.Error = "Couldn't get metadata" elif noReplicas: self.log.error( "Unable to schedule transfer", "File %s doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) ) opFile.Error = 'No replicas found' opFile.Status = 'Failed' elif badReplicas: self.log.error( "Unable to schedule transfer", "File %s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) ) opFile.Error = 'All replicas have a bad checksum' opFile.Status = 'Failed' elif noPFN: self.log.warn( "unable to schedule %s, could not get a PFN at %s" % ( opFile.LFN, ','.join( noPFN ) ) ) res = self._addMetadataToFiles( toSchedule ) if not res['OK']: return res else: filesToScheduleList = res['Value'] if filesToScheduleList: ftsSchedule = FTSClient().ftsSchedule( self.request.RequestID, self.operation.OperationID, filesToScheduleList ) if not ftsSchedule["OK"]: self.log.error( "Completely failed to schedule to FTS:", ftsSchedule["Message"] ) return ftsSchedule # might have nothing to schedule ftsSchedule = ftsSchedule["Value"] if not ftsSchedule: return S_OK() self.log.info( "%d files have been scheduled to FTS" % len( ftsSchedule['Successful'] ) ) for opFile in self.operation: fileID = opFile.FileID if fileID in ftsSchedule["Successful"]: gMonitor.addMark( "FTSScheduleOK", 1 ) opFile.Status = "Scheduled" self.log.debug( "%s has been scheduled for FTS" % opFile.LFN ) elif fileID in ftsSchedule["Failed"]: gMonitor.addMark( "FTSScheduleFail", 1 ) opFile.Error = ftsSchedule["Failed"][fileID] if 'sourceSURL equals to targetSURL' in opFile.Error: # In this case there is no need to continue opFile.Status = 'Failed' self.log.warn( "unable to schedule %s for FTS: %s" % ( opFile.LFN, opFile.Error ) ) else: self.log.info( "No files to schedule after metadata checks" ) # Just in case some transfers could not be scheduled, try them with RM return self.dmTransfer( fromFTS = True ) def dmTransfer( self, fromFTS = False ): """ replicate and register using dataManager """ # # get waiting files. If none just return # # source SE sourceSE = self.operation.SourceSE if self.operation.SourceSE else None if sourceSE: # # check source se for read bannedSource = self.checkSEsRSS( sourceSE, 'ReadAccess' ) if not bannedSource["OK"]: gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) ) gMonitor.addMark( "ReplicateFail", len( self.operation ) ) return bannedSource if bannedSource["Value"]: self.operation.Error = "SourceSE %s is banned for reading" % sourceSE self.log.info( self.operation.Error ) return S_OK( self.operation.Error ) # # check targetSEs for write bannedTargets = self.checkSEsRSS() if not bannedTargets['OK']: gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) ) gMonitor.addMark( "ReplicateFail", len( self.operation ) ) return bannedTargets if bannedTargets['Value']: self.operation.Error = "%s targets are banned for writing" % ",".join( bannedTargets['Value'] ) return S_OK( self.operation.Error ) # Can continue now self.log.verbose( "No targets banned for writing" ) waitingFiles = self.getWaitingFilesList() if not waitingFiles: return S_OK() # # loop over files if fromFTS: self.log.info( "Trying transfer using replica manager as FTS failed" ) else: self.log.info( "Transferring files using Data manager..." ) for opFile in waitingFiles: gMonitor.addMark( "ReplicateAndRegisterAtt", 1 ) opFile.Error = '' lfn = opFile.LFN # Check if replica is at the specified source replicas = self._filterReplicas( opFile ) if not replicas["OK"]: self.log.error( 'Failed to check replicas', replicas["Message"] ) continue replicas = replicas["Value"] validReplicas = replicas["Valid"] noMetaReplicas = replicas["NoMetadata"] noReplicas = replicas['NoReplicas'] badReplicas = replicas['Bad'] noPFN = replicas['NoPFN'] if not validReplicas: gMonitor.addMark( "ReplicateFail" ) if noMetaReplicas: self.log.warn( "unable to replicate '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) ) opFile.Error = "Couldn't get metadata" elif noReplicas: self.log.error( "Unable to replicate", "File %s doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) ) opFile.Error = 'No replicas found' opFile.Status = 'Failed' elif badReplicas: self.log.error( "Unable to replicate", "%s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) ) opFile.Error = 'All replicas have a bad checksum' opFile.Status = 'Failed' elif noPFN: self.log.warn( "unable to replicate %s, could not get a PFN" % opFile.LFN ) continue # # get the first one in the list if sourceSE not in validReplicas: if sourceSE: self.log.warn( "%s is not at specified sourceSE %s, changed to %s" % ( lfn, sourceSE, validReplicas[0] ) ) sourceSE = validReplicas[0] # # loop over targetSE catalog = self.operation.Catalog for targetSE in self.operation.targetSEList: # # call DataManager if targetSE in validReplicas: self.log.warn( "Request to replicate %s to an existing location: %s" % ( lfn, targetSE ) ) opFile.Status = 'Done' continue res = self.dm.replicateAndRegister( lfn, targetSE, sourceSE = sourceSE, catalog = catalog ) if res["OK"]: if lfn in res["Value"]["Successful"]: if "replicate" in res["Value"]["Successful"][lfn]: repTime = res["Value"]["Successful"][lfn]["replicate"] prString = "file %s replicated at %s in %s s." % ( lfn, targetSE, repTime ) gMonitor.addMark( "ReplicateOK", 1 ) if "register" in res["Value"]["Successful"][lfn]: gMonitor.addMark( "RegisterOK", 1 ) regTime = res["Value"]["Successful"][lfn]["register"] prString += ' and registered in %s s.' % regTime self.log.info( prString ) else: gMonitor.addMark( "RegisterFail", 1 ) prString += " but failed to register" self.log.warn( prString ) opFile.Error = "Failed to register" # # add register replica operation registerOperation = self.getRegisterOperation( opFile, targetSE, type = 'RegisterReplica' ) self.request.insertAfter( registerOperation, self.operation ) else: self.log.error( "Failed to replicate", "%s to %s" % ( lfn, targetSE ) ) gMonitor.addMark( "ReplicateFail", 1 ) opFile.Error = "Failed to replicate" else: gMonitor.addMark( "ReplicateFail", 1 ) reason = res["Value"]["Failed"][lfn] self.log.error( "Failed to replicate and register", "File %s at %s:" % ( lfn, targetSE ), reason ) opFile.Error = reason else: gMonitor.addMark( "ReplicateFail", 1 ) opFile.Error = "DataManager error: %s" % res["Message"] self.log.error( "DataManager error", res["Message"] ) if not opFile.Error: if len( self.operation.targetSEList ) > 1: self.log.info( "file %s has been replicated to all targetSEs" % lfn ) opFile.Status = "Done" return S_OK()
Sbalbp/DIRAC
DataManagementSystem/Agent/RequestOperations/ReplicateAndRegister.py
Python
gpl-3.0
19,137
[ "DIRAC" ]
6678666899e473e30003142cf6ccff45fe976e230513cff3cb2779a91a67b71e
# Copyright (c) 2008-2011, Jan Gasthaus # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Various functions and classes related to plotting""" from pylab import * import matplotlib as M import matplotlib.cm as cm from matplotlib.colors import no_norm from matplotlib.patches import Ellipse from numpy import * from optparse import OptionParser import cPickle from utils import * markers = ['+','x','o','d','^','>' ,'v' ,'<' ,'s','p' ,'h' ,'8']*10 def plot_scatter_2d(data,labels): unique_labels = unique(labels) label_markers = array(markers)[unique_labels % len(markers)] label_colors = array(unique_labels,dtype=float64)/max(unique_labels + 1) for i in range(len(unique_labels)): l = unique_labels[i] colors = ones(sum(labels==l))*label_colors[i] scatter(data[0,labels==l],data[1,labels==l], marker=str(label_markers[i]), c=colors, cmap=matplotlib.cm.jet, norm=no_norm(), linewidths=(0.3,) ) def plot_pcs_against_time(data,time): num_dims = data.shape[0] for n in range(num_dims): subplot(num_dims,1,n+1) scatter(time,data[n,:]) grid() def plot_pcs_against_time_labeled(data,time,labels): num_dims = data.shape[0] for n in range(num_dims): subplot(num_dims,1,n+1) plot_scatter_2d(vstack([time,data[n,:]]),labels) grid() axis([0,max(time),-5,5]) def plot_pcs_against_time_labeled_with_particle(data,time,labels,particle): num_dims = data.shape[0] mstore = particle.mstore.to_array() mean_cluster_size = mean(mstore[mstore>0]) for n in range(num_dims): subplot(num_dims,1,n+1) plot_scatter_2d(vstack([time,data[n,:]]),labels) for c in range(particle.K): start = particle.birthtime[c] stop = particle.deathtime[c] if stop == 0: stop = particle.T length = stop-start mus = N.zeros(length) lams = N.zeros(length) for i in range(length): t = range(start,stop)[i] mus[i] = particle.U.get_array(t)[c].mu[n] lams[i] = particle.U.get_array(t)[c].lam[n] #plot(time[arange(start,stop)],mus) lw = mean(mstore[c,start:stop])/mean_cluster_size*0.5 errorbar(time[arange(start,stop)],mus,sqrt(1/lams), linewidth=lw,elinewidth=lw) xlabel("Time") ylabel("PC " + str(n+1)) grid() def plot_sampler_params(state): active = where(sum(state.mstore,1)>0)[0] num_dims = state.aux_vars.shape[2] mean_cluster_size = mean(state.mstore[state.mstore>0]) for n in range(num_dims): subplot(num_dims,1,n+1) for c in active: #start = state.birthtime[c] start = 0 stop = state.deathtime[c] if stop == 0: stop = state.T length = stop-start mus = N.zeros(length) lams = N.zeros(length) for i in range(length): t = range(start,stop)[i] mus[i] = state.U[c,t].mu[n] lams[i] = state.U[c,t].lam[n] lw = mean(state.mstore[c,start:stop])/mean_cluster_size*0.5 plot(arange(start,stop),mus,linewidth=0.3) #errorbar(arange(start,stop),mus,sqrt(1/lams), # linewidth=lw,elinewidth=lw) xlabel("Time") ylabel("PC " + str(n+1)) grid() def plot_mstore_against_time(particle): mstore = particle.mstore.to_array() for c in range(particle.K): subplot(particle.K,1,c+1) start = particle.birthtime[c] stop = particle.deathtime[c] if stop == 0: stop = particle.T plot(mstore[c,start:stop]) grid() def matlab_plot_3d(data,data_time,labeling): from mlabwrap import mlab mlab.scatter3(data_time,data[0,:],data[1,:],20,labeling) def plot_lifespan_histogram(particle_fn,rho=0.975): p = cPickle.load(open(particle_fn,'rb')) a = array(range(p.T)) ls = p.d - a hist(ls,bins=100,normed=True) plot(a,rho**a*(1-rho)) def plot_geometric(rhos,xaxis=(0,500)): """Plot the geometric distribution for the given values of rho. p(k|rho) = (1-rho)**(k) x rho """ x = range(xaxis[0],xaxis[1]) f = figure(figsize=(5.3,3.5)) for r in rhos: plot(x,(1-r)**x*r) grid() title("Geometric Distribution $(1-p)^k p$") legend(["p=%3.3f" % r for r in rhos]) savefig("geometric_distribution.pdf") def plot_gaussian(mu,sigma): """Plot the contour of a general bivariate gaussian with mean mu and covariance matrix sigma.""" t = arange(-pi,pi,0.01) x = sin(t) y = cos(t) dd,vv = eig(sigma) A = vv*sqrt(dd) z = dot(vstack([x,y]).T,A) plot(z[:,0]+mu[0],z[:,1]+mu[1]); def plot_diagonal_gaussian(mu,lam,color=get_cmap()(0)): """Plot a gaussian with mean mu and diagonal precision lam.""" t = arange(-pi,pi,0.01) x = sin(t) y = cos(t) A = eye(2)*sqrt(1/lam) z = dot(vstack([x,y]).T,A) plot(z[:,0]+mu[0],z[:,1]+mu[1],'-',color=color); def plot_state(particle,t): active = where(particle.mstore.get_array(t)>0)[0] for c in active: U = particle.U.get(t,c) plot_diagonal_gaussian(U.mu,U.lam) def plot_state_with_data(particle,data,data_time,t): # TODO: Fix color plotting so Gaussian contours and data points ahve the # same color active = where(particle.mstore.get_array(t)>0)[0] for c in active: idx = where( logical_and( particle.c==c, logical_and( t >= arange(particle.T), particle.d>t ) ) )[0] color = get_cmap("flag")(c*3) plot(data[0,idx],data[1,idx],'x',color=color) U = particle.U.get(t,c) # print t,c,U.mu, U.lam plot_diagonal_gaussian(U.mu[0:2],U.lam[0:2],color=color) axis([-5, 5, -5, 5]) def main(): HAVE_LABELS = False parser = OptionParser() parser.add_option("-l", "--labels", dest="label_fn", help="load labels from FILE", default=None, metavar="FILE") parser.add_option("-n", "--use-particle", dest="particle_idx", help="load lables of particle IDX", default=0, metavar="IDX",type="int") parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print status messages to stdout") options,args = parser.parse_args() fn = args[0] if options.label_fn != None: HAVE_LABELS = True labels = array(load_file(options.label_fn)[options.particle_idx],dtype=int32) data_file = load_file(fn) data_raw = data_file[:,2:].T data_time = data_file[:,1].T*1000 num_dims = data_raw.shape[0] ion() # clf() # p = cPickle.load(open("aparticle.pkl",'rb')) # for t in range(1,500): # ioff() # clf() # plot_state_with_data(p,data_raw,data_time,t) # axis([-6,6,-3,3]) # draw() # raw_input() # return # for t in range(1,500): # ioff() # clf() # plot_scatter_2d(data_raw[:,max(0,t-10):t],labels[max(0,t-10):t]) # plot_state(p,t) # axis([-5,1,-3,3]) # draw() # raw_input() # #plot_geometric(array([0.03,0.02,0.015,0.010,0.005,0.001]),(0,300)) # #show() # return # plot_lifespan_histogram("aparticle.pkl") # show() if HAVE_LABELS: plot_pcs_against_time_labeled(data_raw,data_time,labels) matlab_plot_3d(data_raw,data_time,labels) else: plot_pcs_against_time(data_raw,data_time) show() if __name__ == "__main__": main()
jgasthaus/gpu_python
plotting.py
Python
bsd-2-clause
9,120
[ "Gaussian" ]
64d66a84bcebed33f385286bb559c9da8bb0562e88fcf4015b39fa9e8999e320
#------------------------------------------------------------------------------- # Name: xlwsf_parse # Purpose: Parse Microsoft webpage source for Excel functions and page # addresses # # Author: Brian Skinn # bskinn@alum.mit.edu # # Created: 24 May 2016 # Copyright: (c) Brian Skinn 2016 # License: The MIT License; see "license.txt" for full license terms # and contributor agreement. # # This file is a helper script for rapid generation of an objects.inv # inventory file for intersphinx cross-referencing of Excel worksheet # functions. # # http://www.github.com/bskinn/intersphinx-xlwsf # #------------------------------------------------------------------------------- import re, sys, time def main(): # Try loading the input file try: with open(sys.argv[1], 'r') as f: datastr = f.read() except OSError: print("\nIndicated file not found. Exiting...\n") sys.exit(1) except IndexError: print("\nNo input filename passed. Exiting...\n") sys.exit(1) # Define the regex pattern pat = re.compile('a href="(?P<uri>[^"]+)" class="[^"]+" ' 'title="(?P<fname>[^ ]+) [^"]+">', re.I) # Pull the function names and URIs dic = {m.group("fname"): m.group("uri") for m in pat.finditer(datastr) if m.group("fname") == m.group("fname").upper()} # Open and write the objects.txt file with open('objects.txt', 'w') as f: # First header line f.write("# Sphinx inventory version 2\n") # Second header line, with project name f.write("# Project: Excel WSF\n") # Third header line, with timestamp f.write("# Version: {0}\n".format(time.strftime('%Y-%m-%d %H:%M:%S'))) # Fourth header line, verbatim f.write("# The remainder of this file is compressed using zlib.\n") # Dump the data lines for func, uri in sorted(dic.items()): f.write("{0} py:function 1 article/{1} -\n".format(func, uri)) if __name__ == '__main__': main()
bskinn/intersphinx-xlwsf
xlwsf_parse.py
Python
mit
2,136
[ "Brian" ]
060dcd9972b6b57d2e8e87b94f96c286a88d7ffd4b9f8fbd9cab5b1f4ad07abd
# -*- coding: utf-8 -*- try: import numpy as np except ImportError: np = None else: from ..stoich import ( get_coeff_mtx, decompose_yields, ) from chempy.units import units_library, allclose, _sum from ..testing import requires @requires("numpy") def test_get_coeff_mtx(): r = [({"A": 1}, {"B": 1}), ({"A": 1, "B": 1}, {"C": 2})] A = get_coeff_mtx("ABC", r) Aref = np.array([[-1, -1], [1, -1], [0, 2]]) assert np.allclose(A, Aref) @requires("numpy") def test_decompose_yields_1(): from chempy import Reaction gamma_yields = { "OH-": 0.5, "H2O2": 0.7, "OH": 2.7, "H2": 0.45, "H": 0.66, "H+": 3.1, "HO2": 0.02, "e-(aq)": 2.6, } rxns = [ Reaction({"H2O": 1}, {"H+": 1, "OH-": 1}), Reaction({"H2O": 1}, {"H+": 1, "e-(aq)": 1, "OH": 1}), Reaction({"H2O": 1}, {"H": 2, "H2O2": 1}, inact_reac={"H2O": 1}), Reaction({"H2O": 1}, {"H2": 1, "H2O2": 1}, inact_reac={"H2O": 1}), Reaction({"H2O": 1}, {"H2": 1, "OH": 2}, inact_reac={"H2O": 1}), Reaction({"H2O": 1}, {"H2": 3, "HO2": 2}, inact_reac={"H2O": 3}), ] k = decompose_yields(gamma_yields, rxns) k_ref = [0.5, 2.6, 0.33, 0.37, 0.05, 0.01] assert np.allclose(k, k_ref) G_H2O = sum(rxn.net_stoich(["H2O"])[0] * k[i] for i, rxn in enumerate(rxns)) assert abs(G_H2O + 4.64) < 1e-3 @requires(units_library) def test_decompose_yields__units_1(): from chempy import Reaction from chempy.units import default_units as u gamma_yields = { "OH-": 0.5 * u.per100eV, "H2O2": 0.7 * u.per100eV, "OH": 2.7 * u.per100eV, "H2": 0.45 * u.per100eV, "H": 0.66 * u.per100eV, "H+": 3.1 * u.per100eV, "HO2": 0.02 * u.per100eV, "e-(aq)": 2.6 * u.per100eV, } rxns = [ Reaction({"H2O": 1}, {"H+": 1, "OH-": 1}), Reaction({"H2O": 1}, {"H+": 1, "e-(aq)": 1, "OH": 1}), Reaction({"H2O": 1}, {"H": 2, "H2O2": 1}, inact_reac={"H2O": 1}), Reaction({"H2O": 1}, {"H2": 1, "H2O2": 1}, inact_reac={"H2O": 1}), Reaction({"H2O": 1}, {"H2": 1, "OH": 2}, inact_reac={"H2O": 1}), Reaction({"H2O": 1}, {"H2": 3, "HO2": 2}, inact_reac={"H2O": 3}), ] k = decompose_yields(gamma_yields, rxns) k_ref = [0.5, 2.6, 0.33, 0.37, 0.05, 0.01] * u.per100eV assert allclose(k, k_ref) G_H2O = [rxn.net_stoich(["H2O"])[0] * k[i] for i, rxn in enumerate(rxns)] ref = 4.64 * u.per100eV assert abs((_sum(G_H2O) + ref) / ref) < 1e-3 @requires("numpy") def test_decompose_yields_2(): from chempy import Reaction yields = {"B": 3.0, "C": 24.0} rxns = [ Reaction({"A": 1}, {"B": 1, "C": 1}, inact_reac={"A": 1}), Reaction({"A": 1}, {"C": 3}), ] k = decompose_yields(yields, rxns) k_ref = [3, 7] rtol = 1e-12 for a, b in zip(k, k_ref): assert abs(a - b) < abs(a * rtol)
bjodah/aqchem
chempy/util/tests/test_stoich.py
Python
bsd-2-clause
2,987
[ "ChemPy" ]
c0f40e0754ad85b793832bad09dd72e11906343033b481607279117ab56ee715
#!/usr/bin/env python # Copyright 2014-2020 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> ''' Density fitting =============== This module provides the fundamental functions to handle the 3-index tensors (including the 3-center 2-electron AO and MO integrals, the Cholesky decomposed integrals) required by the density fitting method or the RI (resolution of identity) approximation. Simple usage:: >>> from pyscf import gto, dft >>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz') >>> mf = dft.RKS(mol).density_fit().run() ''' from . import incore from . import outcore from . import addons from .addons import load, aug_etb, DEFAULT_AUXBASIS, make_auxbasis, make_auxmol from .df import DF, GDF, DF4C, GDF4C from . import r_incore def density_fit(obj, *args, **kwargs): '''Given SCF/MCSCF or post-HF object, use density fitting technique to approximate the 2e integrals.''' return obj.density_fit(*args, **kwargs)
gkc1000/pyscf
pyscf/df/__init__.py
Python
apache-2.0
1,540
[ "PySCF" ]
b12f7d8466fafcb118df874fb874267e07dbed93ca4cf5803c3934137b7bb2cd
from __future__ import division import math from random import random, randint from threading import Lock from NeuralNetwork.NeuralNetwork import NeuralNetwork TWO_PI = math.pi * 2 class Food(object): def __init__(self, world, x, y, size): self._world = world self.x = x self.y = y self._size = size self._smell = (0, 1, 0,) self._smell_size = self._size * self._world.constants.FOOD_SMELL_SIZE_RATIO # self.lock = Lock() def beating(self, value): # self.lock.acquire() real_value = min(self.size, value) self.size -= real_value # self.lock.release() return value @property def size(self): return self._size @size.setter def size(self, value): self._size = max(0, value) self._smell_size = self._size * self._world.constants.FOOD_SMELL_SIZE_RATIO @property def smell_size(self): return self._smell_size @property def smell(self): return self._smell class Gender: FEMALE = 0 MALE = 1 class Animal(object): def __init__(self, world, dna=""): self.world = world self._dna = dna self._x = randint(0, self.world.width) self._y = randint(0, self.world.height) self.size = self.world.constants.ANIMAL_SIZE self.angle = 0 self._smell = (0.0, 0.0, 1.0, ) self.smell_size = 0 self.sensor_values = [] self._sensors_positions = [] self._sensors_positions_calculated = False self.energy = self.world.constants.ENERGY_FOR_BIRTH self.readiness_to_sex = 0 self.close_females = [] self.lock = Lock() self.answer = [] if not self._dna: self._dna = "".join([ str(randint(0, self.world.constants.DNA_BASE-1)) for _ in range(self.world.constants.DNA_LEN) ]) print(self._dna) self.gender = int(self._dna[0], base=4) % 2 self.brain = Animal.create_brain(self._dna[1:], self.world.constants) @property def sensors_positions(self): if not self._sensors_positions_calculated: self._calculate_sensor_positions() self._sensors_positions_calculated = True return self._sensors_positions def _calculate_sensor_positions(self): self._sensors_positions = [] angle_between_sensors = TWO_PI / self.world.constants.ANIMAL_SENSOR_COUNT sensor_angle = self.angle for i in range(self.world.constants.ANIMAL_SENSOR_COUNT): x = math.cos(sensor_angle) * self.size + self._x y = math.sin(sensor_angle) * self.size + self._y self._sensors_positions.append((x, y)) sensor_angle += angle_between_sensors def update(self): self.answer = self.brain.calculate(self.sensor_values) self.energy -= self.world.constants.ENERGY_FOR_EXIST if self.energy_fullness > self.world.constants.ENERGY_FULLNESS_TO_INCREASE_READINESS_TO_SEX: self.readiness_to_sex += self.world.constants.READINESS_TO_SEX_INCREMENT if self.can_request_for_sex(): self._search_partner_and_try_to_sex() self.smell_size = (max(-1, self.answer[2]) + 1) / 2.0 * self.world.constants.MAX_ANIMAL_SMELL_SIZE self.move(self.answer[0], self.answer[1]) def can_request_for_sex(self): return self.gender == Gender.MALE and self.is_ready_do_sex() def is_ready_do_sex(self): return self.readiness_to_sex >= self.world.constants.READINESS_TO_SEX_THRESHOLD def _search_partner_and_try_to_sex(self): for female in self.close_females: success = self._thread_safe_request_for_sex(female) if success: break def _thread_safe_request_for_sex(self, female): with female.lock: success = female.be_requested_for_sex(self) return success def be_requested_for_sex(self, male): if self.is_ready_do_sex(): self.sex(male) return True return False def sex(mother, father): child_count = randint(mother.world.constants.MIN_AMOUNT_OF_CHILDREN, mother.world.constants.MAX_AMOUNT_OF_CHILDREN) # if it tries to birth more child than it can - bud so many as it can and die. if not mother.can_make_n_children(child_count): child_count = int(mother.energy / mother.world.constants.ENERGY_FOR_BIRTH) mother.energy = 0 if not father.can_make_n_children(child_count): child_count = int(father.energy / mother.world.constants.ENERGY_FOR_BIRTH) father.energy = 0 print("{}\n{}\n{}".format("="*10, mother.dna, father.dna)) for _ in range(child_count): mother.make_child(father) mother.readiness_to_sex = 0 father.readiness_to_sex = 0 def can_make_n_children(self, child_count): return child_count * self.world.constants.ENERGY_FOR_BIRTH <= self.energy def make_child(mother, father): mother.energy -= mother.world.constants.ENERGY_FOR_BIRTH father.energy -= mother.world.constants.ENERGY_FOR_BIRTH child = Animal(mother.world, Animal.mix_dna(mother.dna, father.dna, mother.world.constants)) print(child.dna) child.x = mother.x + randint(-30, 30) child.y = mother.y + randint(-30, 30) mother.world.add_animal(child) def eat(self, food): value = min(self.world.constants.EATING_VALUE, max(0, self.world.constants.ANIMAL_MAX_ENERGY - self.energy)) value = food.beating(value) self.energy += value def move(self, move, rotate): self.energy -= (abs(move) + abs(rotate)) * self.world.constants.MOVE_DISTANCE_TO_CONSUMED_ENERGY_RATIO self._sensors_positions_calculated = False self.angle += rotate self._x += math.cos(self.angle) * move * 2.0 self._y += math.sin(self.angle) * move * 2.0 @property def x(self): return self._x @x.setter def x(self, value): self._x = value self._sensors_positions_calculated = False @property def y(self): return self._y @y.setter def y(self, value): self._y = value self._sensors_positions_calculated = False @property def smell(self): return self._smell @property def energy_fullness(self): return self.energy / self.world.constants.ANIMAL_MAX_ENERGY @property def dna(self): return self._dna @staticmethod def create_brain(dna, world_constants): def dna_iter(dna): for i in range(0, len(dna), world_constants.DNA_BRAIN_VALUE_LEN): cur = dna[i:i+world_constants.DNA_BRAIN_VALUE_LEN] yield (int(cur, world_constants.DNA_BASE) - world_constants.DNA_HALF_MAX_VALUE) / world_constants.DNA_HALF_MAX_VALUE dna = dna_iter(dna) brain = NeuralNetwork(world_constants.NEURAL_NETWORK_SHAPE) for layer in brain: for neuron in layer: neuron.w = [dna.next() for _ in range(len(neuron.w))] return brain # for debug @staticmethod def brain_to_dna(brain, world_constants): def val_to_dna(x): x = max(0, int((x*world_constants.DNA_HALF_MAX_VALUE) + world_constants.DNA_HALF_MAX_VALUE)) res = [] while x: res.insert(0, str(x % world_constants.DNA_BASE)) x /= world_constants.DNA_BASE return "".join(res) dna = [] for layer in brain: for neuron in layer: for w in neuron.w: dna.append(val_to_dna(w)) return "".join(dna) @staticmethod def mutate_dna(dna, world_constants): dna_ba = bytearray(dna) for i in range(len(dna_ba)): if random() < world_constants.MUTATE_CHANCE: dna_ba[i] = ord(str(randint(0, world_constants.DNA_BASE-1))) return str(dna_ba) @staticmethod def mix_dna(dna1, dna2, world_constants): m = randint(0, len(dna1)) if randint(0, 1): return Animal.mutate_dna(dna1[:m] + dna2[m:], world_constants) else: return Animal.mutate_dna(dna2[:m] + dna1[m:], world_constants)
zshimanchik/iron-unconditioned-reflexes
iron-unconditioned-reflexes/animal.py
Python
mit
8,376
[ "NEURON" ]
90819dcd11ccfb69348760adedcf75e6b40d7021511e753ec19b884cea15ddf7
import os from secomo.convRBM import * from secomo.sequences import * from secomo.utils import * def tutorial(path): # List of Biopython sequences onehot = load_sample() # Obtain a cRBM object # Epoch = 1 is not sufficient to train meaningful features # however, it speeds up automatic testing. model = CRBM(num_motifs = 10, motif_length = 15, epochs = 1) # Fit the model model.fit(onehot) # Save parameters and hyper-parameters model.saveModel(os.path.join(path, 'oct4_model_params.pkl')) # Reinstantiate model model = CRBM.loadModel(os.path.join(path, 'oct4_model_params.pkl')) # Get a list of numpy matrices representing PFMs model.getPFMs() # Store the PFMs (by default in 'jaspar' format) # in the folder './pfms/' saveMotifs(model, path = os.path.join(path, 'pfms')) # Writes all logos in the logos/ directory createSeqLogos(model, path = os.path.join(path, 'logos')) # Alternatively, an individual sequence logo can be created: # Get first motif pfm = model.getPFMs()[0] # Create a corresponding sequence logo createSeqLogo(pfm, filename = os.path.join(path, 'logo1.png'), fformat = "png") # Per-position motif match probabilities # for the first 100 sequences matches = model.motifHitProbs(onehot[:100]) # Plot positional enrichment for all motifs in the given # test sequences positionalDensityPlot(model, onehot[:100], filename = os.path.join(path, 'densityplot.png')) # Run t-SNE clustering tsne = runTSNE(model, onehot) # Visualize the results in a scatter plot tsneScatter({'Oct4': tsne}, filename = os.path.join(path, 'tsnescatter.png')) # Visualize the results in the scatter plot # by augmenting with the respective motif abundances tsneScatterWithPies(model, onehot, tsne, filename = os.path.join(path, 'tsnescatter_pies.png')) # Assemble multiple datasets as follows data = {'set1': onehot[:1500], 'set2': onehot[1500:]} violinPlotMotifMatches(model, data, filename = os.path.join(path, 'violinplot.png'))
schulter/crbm
secomo/tutorial.py
Python
gpl-3.0
2,161
[ "Biopython" ]
010a3832529c82282169cd9a8e7c2f2ca8a82f66404b4f94e030dadbfa745885
# Read .Mat Files import pylab as pyl import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy as scp import scipy.ndimage as ni import scipy.io import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle import ghmm # Force HMM with No. of States states = [5, 10, 15, 20] acc_force_states = [87.14, 83.57, 89.29, 91.43] acc_force_rf_states = [97.14, 94.29, 94.29, 94.29] acc_force_rm_states = [80, 82.86, 94.29, 88.57] acc_force_sf_states = [94.29, 80, 97.14, 91.43] acc_force_sm_states = [77.14, 77.14, 71.43, 91.43] mpu.figure(1) pp.title('Performance of HMM with Force as Observations',fontsize='24') pp.xlabel('No. of States',fontsize='24') pp.ylabel('Cross-Validation Accuracy (%)',fontsize='24') pp.plot(states, acc_force_states, states, acc_force_rf_states, states, acc_force_rm_states, states, acc_force_sf_states, states, acc_force_sm_states, linewidth=3.0) pp.legend(["Total", "Rigid-Fixed Category", "Rigid-Movable Category", "Soft-Fixed Category", "Soft-Movable Category"], loc=3) pp.ylim([70,100]) pp.grid('True') # Area HMM with No. of States states = [5, 10, 15, 20] acc_area_states = [68.57, 84.99, 86.43, 87.85] acc_area_rf_states = [91.43, 94.29, 97.14, 94.29] acc_area_rm_states = [57.14, 80, 85.71, 85.71] acc_area_sf_states = [40, 74.29, 74.29, 82.86] acc_area_sm_states = [85.71, 91.43, 88.57, 88.57] mpu.figure(2) pp.title('Performance of HMM with Area as Observations',fontsize='24') pp.xlabel('No. of States',fontsize='24') pp.ylabel('Cross-Validation Accuracy (%)',fontsize='24') pp.plot(states, acc_area_states, states, acc_area_rf_states, states, acc_area_rm_states, states, acc_area_sf_states, states, acc_area_sm_states, linewidth=3.0) pp.legend(["Total", "Rigid-Fixed Category", "Rigid-Movable Category", "Soft-Fixed Category", "Soft-Movable Category"], loc=4) pp.ylim([40,100]) pp.grid('True') # Force HMM with Resolution resolution = [1, 6, 24, 96, 384] acc_force_resolution = [78.57, 80, 73.57, 78.57, 83.57] acc_force_rf_resolution = [91.43, 91.43, 91.43, 88.57, 94.29] acc_force_rm_resolution = [80, 80, 65.71, 77.14, 82.86] acc_force_sf_resolution = [51.43, 51.43, 48.57, 62.86, 80] acc_force_sm_resolution = [91.43, 97.14, 88.57, 85.71, 77.14] mpu.figure(3) pp.title('Performance of HMM with Force as Observations',fontsize='24') pp.xlabel('Skin Resolution (# of Taxels)',fontsize='24') pp.ylabel('Cross-Validation Accuracy (%)',fontsize='24') pp.plot(resolution, acc_force_resolution, resolution, acc_force_rf_resolution, resolution, acc_force_rm_resolution, resolution, acc_force_sf_resolution, resolution, acc_force_sm_resolution, linewidth=3.0) pp.legend(["Total", "Rigid-Fixed Category", "Rigid-Movable Category", "Soft-Fixed Category", "Soft-Movable Category"], loc=4) pp.ylim([60,100]) pp.grid('True') # Area HMM with Resolution resolution = [1, 6, 24, 96, 384] acc_area_resolution = [25, 40.71, 60.71, 67.86, 84.99] acc_area_rf_resolution = [0, 62.85, 74.29, 77.14, 94.29] acc_area_rm_resolution = [25, 42.86, 28.57, 65.71, 80] acc_area_sf_resolution = [0, 57.14, 51.43, 48.57, 74.29] acc_area_sm_resolution = [0, 0, 88.57, 80, 91.43] mpu.figure(4) pp.title('Performance of HMM with Area as Observations',fontsize='24') pp.xlabel('Skin Resolution (# of Taxels)',fontsize='24') pp.ylabel('Cross-Validation Accuracy (%)',fontsize='24') pp.plot(resolution, acc_area_resolution, resolution, acc_area_rf_resolution, resolution, acc_area_rm_resolution, resolution, acc_area_sf_resolution, resolution, acc_area_sm_resolution, linewidth=3.0) pp.legend(["Total", "Rigid-Fixed Category", "Rigid-Movable Category", "Soft-Fixed Category", "Soft-Movable Category"], loc=4) pp.ylim([0,100]) pp.grid('True') # Force HMM with Time-Window window = [400, 800, 1200] acc_force_window = [80, 80, 83.57] acc_force_rf_window = [88.57, 88.57, 94.29] acc_force_rm_window = [71.43, 85.71, 82.86] acc_force_sf_window = [88.57, 77.14, 80] acc_force_sm_window = [71.43, 68.57, 77.14] mpu.figure(5) pp.title('Performance of HMM with Force as Observations',fontsize='24') pp.xlabel('Time-Window (ms)',fontsize='24') pp.ylabel('Cross-Validation Accuracy (%)',fontsize='24') pp.plot(window, acc_force_window, window, acc_force_rf_window, window, acc_force_rm_window, window, acc_force_sf_window, window, acc_force_sm_window, linewidth=3.0) pp.legend(["Total", "Rigid-Fixed Category", "Rigid-Movable Category", "Soft-Fixed Category", "Soft-Movable Category"], loc=2) pp.ylim([65,100]) pp.grid('True') # Area HMM with Time-Window window = [400, 800, 1200] acc_area_window = [72.14, 70.71, 84.99] acc_area_rf_window = [80, 88.57, 94.29] acc_area_rm_window = [77.14, 48.57, 80] acc_area_sf_window = [42.86, 59.99, 74.29] acc_area_sm_window = [88.57, 85.71, 91.43] mpu.figure(6) pp.title('Performance of HMM with Area as Observations',fontsize='24') pp.xlabel('Time-Window (ms)',fontsize='24') pp.ylabel('Cross-Validation Accuracy (%)',fontsize='24') pp.plot(window, acc_area_window, window, acc_area_rf_window, window, acc_area_rm_window, window, acc_area_sf_window, window, acc_area_sm_window, linewidth=3.0) pp.legend(["Total", "Rigid-Fixed Category", "Rigid-Movable Category", "Soft-Fixed Category", "Soft-Movable Category"], loc=4) pp.ylim([40,100]) pp.grid('True') pp.show()
tapomayukh/projects_in_python
classification/Classification_with_HMM/Single_Contact_Classification/paper_related/state_accuracy_variation.py
Python
mit
5,407
[ "Mayavi" ]
f73d93c60a479e5acac641857005b986e672576824232bcbebe8918a8a33db41
# $Id$ # # Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """unit testing code for the EState fingerprinting validation values are from the paper (JCICS _35_ 1039-1045 (1995)) """ from __future__ import print_function import unittest import numpy from six import StringIO from rdkit import Chem from rdkit.Chem.EState import Fingerprinter class TestCase(unittest.TestCase): def _validate(self, vals, tol=1e-2, show=False): for smi, c, v in vals: mol = Chem.MolFromSmiles(smi) counts, vals = Fingerprinter.FingerprintMol(mol) counts = counts[numpy.nonzero(counts)] vals = vals[numpy.nonzero(vals)] if show: print(counts) print(vals) assert len(c) == len(counts), 'bad count len for smiles: %s' % (smi) assert len(v) == len(vals), 'bad val len for smiles: %s' % (smi) c = numpy.array(c) assert max(abs(c - counts)) < tol, 'bad count for SMILES: %s' % (smi) v = numpy.array(v) assert max(abs(v - vals)) < tol, 'bad val for SMILES: %s' % (smi) def test1_molecules(self): data = [ ('c1[nH]cnc1CC(N)C(O)=O', [1, 2, 1, 1, 1, 1, 1, 1, 1, 1], [0.26, 3.12, -0.86, -1.01, 0.67, 5.25, 2.71, 3.84, 8.42, 10.26]), ('NCCc1ccc(O)c(O)c1', [2, 3, 3, 1, 2], [1.26, 4.71, 0.75, 5.30, 17.97]), ] self._validate(data, show=False) def test_exampleCode(self): # We make sure that the example code runs from rdkit.TestRunner import redirect_stdout f = StringIO() with redirect_stdout(f): Fingerprinter._exampleCode() s = f.getvalue() self.assertIn('NCCc1ccc(O)c(O)c1', s) if __name__ == '__main__': unittest.main()
jandom/rdkit
rdkit/Chem/EState/UnitTestFingerprints.py
Python
bsd-3-clause
1,898
[ "RDKit" ]
a3670e2fbbcc00fdf93727fc9afb8504bb933bfca78468e26dc12d5944655fcb
import os import sys from bisect import bisect_left import neuron import numpy as np import CurrentGenerator import cPickle as pickle def data_records(dictionaryofvalues, path): import h5py import time timestr = time.strftime("%m.%d.%H:%M:%S") PATH_FLAT = '### FILL IN ###' EXPERIMENT_PATH = '### FILL IN ###' H5_PATH = PATH_FLAT + EXPERIMENT_PATH + \ '### FILL IN ###/{0}/data.{1}.{2}'.format(path, timestr, 'hdf5') print("Saving to: {0}".format(H5_PATH)) data_file = h5py.File('{}'.format(H5_PATH), 'w') for keys, values in dictionaryofvalues.iteritems(): saved = data_file.create_dataset('{0}'.format(keys), data=np.array(values), compression='gzip') data_file.close() def data_print_static(data): """ :rtype: Prints one line to Terminal """ sys.stdout.write("\r\x1b[K" + data) sys.stdout.flush() def take_closest(my_list, my_number): """ Assumes my_list is sorted. Returns closest value to my_number. If two numbers are equally close, return the smallest number. """ pos = bisect_left(my_list, my_number) if pos == 0: return my_list[0] if pos == len(my_list): return my_list[-1] before = my_list[pos - 1] after = my_list[pos] print("Before: {0}. Before Pos-1: {1}".format(before, pos - 1)) print("After: {0}. After Pos: {1}".format(after, pos)) if after - my_number < my_number - before: return pos else: return pos - 1 def find_opt(input_list, val): return min(range(len(input_list)), key=lambda i: abs(input_list[i] - val)) def init_simulation(): """Initialise simulation environment""" neuron.h.load_file("stdrun.hoc") neuron.h.load_file("import3d.hoc") print('Loading constants') neuron.h.load_file('constants.hoc') class Simulator: def __init__(self): # Creation Variables self.currentFlag = False self.recordings = [] self.stimuli = [] self.cell = [] """ :param time: simulation time :param sigmamax: sigmaMax used to determine Sigma and DeltaSigma :param sigmamin: sigmaMin used to determine Sigma and DeltaSigma :param i_e0: Injected current without noise """ self.time = 3000.0 self.sigmamax = 0.325 self.sigmamin = 0.215 self.i_e0 = 0.16 self.dt = 0.025 # Injection current self.playVector = [] self.current = [] # Recorded values self.rvoltage = [] self.rtime = [] self.rcurrent = [] # Optimization self.optimize = False self.sigmaopt = 0.15 self.variance = [] self.varPlot = [] self.sigmaoptPlot = [] self.deltasigma = 0.005 self.spks = [] self.hz = 0.0 self.RandomSeed = 777 # Current generating class self.cg = CurrentGenerator.CurrentGenerator def create_cell(self, add_synapses=True): # Load morphology """ Creates the cell in Neuron :return: Cell :rtype: Hoc """ neuron.h.load_file("morphology.hoc") # Load biophysics neuron.h.load_file("biophysics.hoc") # Load main cell template neuron.h.load_file("template.hoc") # Instantiate the cell from the template self.cell = neuron.h.cADpyr232_L5_TTPC1_0fb1ca4724(1 if add_synapses else 0) return self.cell def create_stimuli(self): """ Create stimulus input :return: Current Clamp :rtype: Neuron <HOC> Object """ self.stimuli = neuron.h.IClamp(0.5, sec=self.cell.soma[0]) self.stimuli.delay = 0 self.stimuli.dur = 1e9 return self.stimuli def create_current(self): """ Generate the noisy current needed for injection """ cg = CurrentGenerator.CurrentGenerator(time=self.time, i_e0=self.i_e0, sigmaMax=self.sigmamax, sigmaMin=self.sigmamin, sigmaOpt=self.sigmaopt, seed=self.RandomSeed, optimize_flag=False) self.current = [x for x in cg.generate_current()] self.playVector = neuron.h.Vector(np.size(self.current)) for k in xrange(np.size(self.current)): self.playVector.set(k, self.current[k]) self.currentFlag = False def create_recordings(self): """ Generates the Dictionary and Vectors used to store Neuron data :return: Time, Voltage, Current :rtype: Dictionary ['time', 'voltage', 'current'] """ self.recordings = {'time': neuron.h.Vector(), 'voltage': neuron.h.Vector(), 'current': neuron.h.Vector()} self.recordings['current'].record(self.stimuli._ref_amp, 0.1) self.recordings['time'].record(neuron.h._ref_t, 0.1) self.recordings['voltage'].record(self.cell.soma[0](0.5)._ref_v, 0.1) return self.recordings def record_recordings(self): self.rtime = np.array(self.recordings['time']) self.rvoltage = np.array(self.recordings['voltage']) self.rcurrent = np.array(self.recordings['current']) recordings_dir = 'python_recordings' soma_voltage_filename = os.path.join( recordings_dir, 'soma_voltage_step.dat') np.savetxt( soma_voltage_filename, np.transpose( np.vstack(( self.rtime, self.rvoltage, self.rcurrent)))) def run_step(self, time): self.time = time neuron.h.tstop = self.time self.create_current() self.playVector.play(self.stimuli._ref_amp, neuron.h.dt) print('Running for %f ms' % neuron.h.tstop) neuron.h.run() self.rvoltage = np.array(self.recordings['voltage']) self.rcurrent = np.array(self.recordings['current']) if not self.optimize: if self.time >= 50000: data_records(self.recordings, "Train") else: data_records(self.recordings, "Test") def brute_optimize_ie(self): while self.hz < 3.5 or self.hz > 5.5: self.optmize_ie() self.spks = self.cg( voltage=self.rvoltage[1000 / 0.1:]).detect_spikes() if self.spks.size: self.hz = len(self.spks) / (self.time / 1000.0) else: self.hz = 0.0 data_print_static("i_e0: {0}, Hz: {1}" .format(self.i_e0, self.hz)) if self.hz <= 3.5: self.i_e0 += 0.05 elif self.hz > 5.5: self.i_e0 -= 0.05 current_paras = {"i_e0": self.i_e0} pickle.dump(current_paras, open( "### FILL IN ###", "wb")) CurrentGenerator.plotcurrent(self.current) def optmize_ie(self): self.time = 15000 self.run_step(self.time) def run_optimize_sigma(self): self.optimize_play_vector() self.playVector.play(self.stimuli._ref_amp, neuron.h.dt) neuron.h.run() self.rvoltage = np.array(self.recordings['voltage']) self.variance = self.cg(voltage=self.rvoltage[ 1000 / 0.1:]).sub_threshold_var() def optimize_play_vector(self): self.time = 10000 neuron.h.tstop = self.time self.i_e0 = 0.0 # Be sure to set the flag here cg = CurrentGenerator.CurrentGenerator(time=self.time, sigmaOpt=self.sigmaopt, optimize_flag=True) self.current = [x for x in cg.generate_current()] assert (np.size(self.current) == self.time / self.dt) self.playVector = neuron.h.Vector(np.size(self.current)) for k in xrange(np.size(self.current)): self.playVector.set(k, self.current[k]) return self.playVector def brute_optimize_sigma(self): n = 1 while self.variance < 7 or not self.variance: self.run_optimize_sigma() data_print_static("Optimizing Sigma: {0}. " "Current Sigma: {1}. Current Var: {2}." .format(n, self.sigmaopt, self.variance)) print("") self.varPlot.append(self.variance) self.sigmaoptPlot.append(self.sigmaopt) self.sigmaopt += self.deltasigma n += 1 sminIndex = find_opt(self.varPlot, 3) smaxIndex = find_opt(self.varPlot, 7) self.sigmamin = self.sigmaoptPlot[sminIndex] self.sigmamax = self.sigmaoptPlot[smaxIndex] self.plot_trace(self.rvoltage[1000 / 0.1:]) if self.varPlot[sminIndex] > 4: raise Exception("Sigma Minimum is above acceptable range. " "Initiate fitting with smaller Sigma") elif self.varPlot[sminIndex] < 2: raise Exception("Sigma Minimum is below acceptable range. " "Initiate fitting with smaller d_sigma") if 5 > self.varPlot[smaxIndex] > 9: raise Exception("Sigma Maximum is out of bounds. " "Initiate fitting with smaller d_sigma.") print("") print("Optimization Complete: Sigma Min: {0}. Sigma Max {1}.".format( self.sigmamin, self.sigmamax)) sigmas = {"sigmamin": self.sigmamin, "sigmamax": self.sigmamax} pickle.dump(sigmas, open( "### FILL IN ###", "wb")) def plot_trace(self, val): plot_traces = True if plot_traces: import pylab pylab.figure() pylab.plot(val) pylab.ylabel('Vm (mV)') pylab.show() def main(self, optimize=False): """Main""" self.optimize = optimize init_simulation() self.cell = self.create_cell(add_synapses=False) self.stimuli = self.create_stimuli() self.recordings = self.create_recordings() neuron.h.tstop = self.time neuron.h.cvode_active(0) if optimize: self.brute_optimize_sigma() self.brute_optimize_ie() self.plot_trace(np.array(self.recordings['voltage'])) self.plot_trace(np.array(self.recordings['current'])) else: self.run_step(130000) n = 0 while n < 5: self.run_step(21000) n += 1 Simulator().main(optimize=False)
mjaquier/NestModelSimplification
Simulator.py
Python
mit
11,090
[ "NEURON" ]
a71488cbcd97a2434e087517b24c0c9f2abcc97f6461135aded2641f8da86c6f
"""milkyway URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from .views import index urlpatterns = [ url(r'^$', index, name='index'), url(r'^admin/', admin.site.urls), url(r'^stars/', include('stars.urls', namespace='stars')), url(r'^galaxies/', include('galaxies.urls', namespace='galaxy')), url(r'^stellar_objects/', include('stellar_objects.urls', namespace='stellar')), url(r'^constellations/', include('constellations.urls', namespace='constellation')), url(r'^accounts/', include('registration.backends.default.urls')), ]
craigderington/SagittariusA
milkyway/milkyway/urls.py
Python
gpl-3.0
1,213
[ "Galaxy" ]
04670d6125d90c7dcc4388c6ba5d2bfeee6b0fc3e088f29ebd2408d86ffa4c24
# coding: utf8 # Copyright 2014-2017 CERN. This software is distributed under the # terms of the GNU General Public Licence version 3 (GPL Version 3), # copied verbatim in the file LICENCE.md. # In applying this licence, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization or # submit itself to any jurisdiction. # Project website: http://blond.web.cern.ch/ """ Unittest for llrf.filters :Authors: **Helga Timko** """ import unittest import numpy as np from scipy.constants import e from blond.llrf.signal_processing import moving_average, modulator from blond.llrf.signal_processing import polar_to_cartesian, cartesian_to_polar from blond.llrf.signal_processing import comb_filter, low_pass_filter from blond.llrf.signal_processing import rf_beam_current, feedforward_filter from blond.llrf.signal_processing import feedforward_filter_TWC3, \ feedforward_filter_TWC4, feedforward_filter_TWC5 from blond.llrf.impulse_response import SPS3Section200MHzTWC, \ SPS4Section200MHzTWC, SPS5Section200MHzTWC from blond.input_parameters.ring import Ring from blond.beam.beam import Beam, Proton from blond.beam.profile import Profile, CutOptions from blond.beam.distributions import bigaussian from blond.input_parameters.rf_parameters import RFStation class TestIQ(unittest.TestCase): # Run before every test def setUp(self, f_rf=200.1e6, T_s=5e-10, n=1000): self.f_rf = f_rf # initial frequency in Hz self.T_s = T_s # sampling time self.n = n # number of points # Run after every test def tearDown(self): del self.f_rf del self.T_s del self.n def test_1(self): # Define signal in range (-pi, pi) phases = np.pi*(np.fmod(2*np.arange(self.n)*self.f_rf*self.T_s, 2) - 1) signal = np.cos(phases) + 1j*np.sin(phases) # From IQ to polar amplitude, phase = cartesian_to_polar(signal) # Drop some digits to avoid rounding errors amplitude = np.around(amplitude, 12) phase = np.around(phase, 12) phases = np.around(phases, 12) self.assertSequenceEqual(amplitude.tolist(), np.ones(self.n).tolist(), msg="In TestIQ test_1, amplitude is not correct") self.assertSequenceEqual(phase.tolist(), phases.tolist(), msg="In TestIQ test_1, phase is not correct") def test_2(self): # Define signal in range (-pi, pi) phase = np.pi*(np.fmod(2*np.arange(self.n)*self.f_rf*self.T_s, 2) - 1) amplitude = np.ones(self.n) # From polar to IQ signal = polar_to_cartesian(amplitude, phase) # Drop some digits to avoid rounding errors signal_real = np.around(signal.real, 12) signal_imag = np.around(signal.imag, 12) theor_real = np.around(np.cos(phase), 12) # what it should be theor_imag = np.around(np.sin(phase), 12) # what it should be self.assertSequenceEqual(signal_real.tolist(), theor_real.tolist(), msg="In TestIQ test_2, real part is not correct") self.assertSequenceEqual(signal_imag.tolist(), theor_imag.tolist(), msg="In TestIQ test_2, imaginary part is not correct") def test_3(self): # Define signal in range (-pi, pi) phase = np.pi*(np.fmod(2*np.arange(self.n)*self.f_rf*self.T_s, 2) - 1) amplitude = np.ones(self.n) # Forwards and backwards transform signal = polar_to_cartesian(amplitude, phase) amplitude_new, phase_new = cartesian_to_polar(signal) # Drop some digits to avoid rounding errors phase = np.around(phase, 11) amplitude = np.around(amplitude, 11) amplitude_new = np.around(amplitude_new, 11) phase_new = np.around(phase_new, 11) self.assertSequenceEqual(phase.tolist(), phase_new.tolist(), msg="In TestIQ test_3, phase is not correct") self.assertSequenceEqual(amplitude.tolist(), amplitude_new.tolist(), msg="In TestIQ test_3, amplitude is not correct") def test_4(self): # Define signal in range (-pi, pi) phase = np.pi*(np.fmod(2*np.arange(self.n)*self.f_rf*self.T_s, 2) - 1) signal = np.cos(phase) + 1j*np.sin(phase) # Forwards and backwards transform amplitude, phase = cartesian_to_polar(signal) signal_new = polar_to_cartesian(amplitude, phase) # Drop some digits to avoid rounding errors signal_real = np.around(signal.real, 11) signal_imag = np.around(signal.imag, 11) signal_real_2 = np.around(np.real(signal_new), 11) signal_imag_2 = np.around(np.imag(signal_new), 11) self.assertSequenceEqual(signal_real.tolist(), signal_real_2.tolist(), msg="In TestIQ test_4, real part is not correct") self.assertSequenceEqual(signal_imag.tolist(), signal_imag_2.tolist(), msg="In TestIQ test_4, imaginary part is not correct") class TestModulator(unittest.TestCase): def setUp(self, f_rf=200.1e6, f_0=200.222e6, T_s=5e-10, n=1000): self.f_rf = f_rf # initial frequency in Hz self.f_0 = f_0 # final frequency in Hz self.T_s = T_s # sampling time self.n = n # number of points def test_v1(self): # Forwards and backwards transformation of a sine wave signal = np.cos(2*np.pi*np.arange(self.n)*self.f_rf*self.T_s) \ + 1j*np.sin(2*np.pi*np.arange(self.n)*self.f_rf*self.T_s) signal_1 = modulator(signal, self.f_rf, self.f_0, self.T_s) signal_2 = modulator(signal_1, self.f_0, self.f_rf, self.T_s) # Drop some digits to avoid rounding errors signal = np.around(signal, 12) signal_2 = np.around(signal_2, 12) self.assertSequenceEqual(signal.tolist(), signal_2.tolist(), msg="In TestModulator, initial and final signals do not match") def test_v2(self): signal = np.array([42]) with self.assertRaises(RuntimeError, msg="In TestModulator, no exception for wrong signal length"): modulator(signal, self.f_rf, self.f_0, self.T_s) class TestRFCurrent(unittest.TestCase): def setUp(self): C = 2*np.pi*1100.009 # Ring circumference [m] gamma_t = 18.0 # Gamma at transition alpha = 1/gamma_t**2 # Momentum compaction factor p_s = 25.92e9 # Synchronous momentum at injection [eV] N_m = 1e5 # Number of macro-particles for tracking N_b = 1.0e11 # Bunch intensity [ppb] # Set up machine parameters self.ring = Ring(C, alpha, p_s, Proton(), n_turns=1) self.rf = RFStation(self.ring, 4620, 4.5e6, 0) # RF-frequency at which to compute beam current self.omega = 2*np.pi*200.222e6 # Create Gaussian beam self.beam = Beam(self.ring, N_m, N_b) self.profile = Profile(self.beam, CutOptions=CutOptions(cut_left=-1e-9, cut_right=6e-9, n_slices=100)) # Test charge distribution with analytic functions # Compare with theoretical value def test_1(self): t = self.profile.bin_centers self.profile.n_macroparticles \ = 2600*np.exp(-(t-2.5e-9)**2 / (2*0.5e-9)**2) rf_current = rf_beam_current(self.profile, self.omega, self.ring.t_rev[0], lpf=False) rf_current_real = np.around(rf_current.real, 12) rf_current_imag = np.around(rf_current.imag, 12) rf_theo_real = 2*self.beam.ratio*self.profile.Beam.Particle.charge*e\ * 2600*np.exp(-(t-2.5e-9)**2/(2*0.5*1e-9)**2)\ * np.cos(self.omega*t) rf_theo_real = np.around(rf_theo_real, 12) rf_theo_imag = 2*self.beam.ratio*self.profile.Beam.Particle.charge*e\ * 2600*np.exp(-(t-2.5e-9)**2/(2*0.5*1e-9)**2)\ * np.sin(self.omega*t) rf_theo_imag = np.around(rf_theo_imag, 12) self.assertListEqual(rf_current_real.tolist(), rf_theo_real.tolist(), msg="In TestRfCurrent test_1, mismatch in real part of RF current") self.assertListEqual(rf_current_imag.tolist(), rf_theo_imag.tolist(), msg="In TestRfCurrent test_1, mismatch in real part of RF current") # Test charge distribution of a bigaussian profile, without LPF # Compare to simulation data def test_2(self): bigaussian(self.ring, self.rf, self.beam, 3.2e-9/4, seed=1234, reinsertion=True) self.profile.track() rf_current = rf_beam_current(self.profile, self.omega, self.ring.t_rev[0], lpf=False) Iref_real = np.array( [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 4.17276535e-13, 4.58438681e-13, 2.48023976e-13, 5.29812878e-13, 2.79735891e-13, 0.00000000e+00, 1.21117141e-12, 9.32525023e-13, 3.16481489e-13, 6.39337176e-13, 0.00000000e+00, 0.00000000e+00, 4.08671434e-12, 4.92294314e-12, 6.56965575e-12, 1.06279981e-11, 1.36819774e-11, 2.16648778e-11, 3.09847740e-11, 3.52971849e-11, 4.70378842e-11, 4.53538351e-11, 4.87255679e-11, 5.36705228e-11, 5.13609263e-11, 4.32833543e-11, 3.41417624e-11, 1.57452091e-11, -1.09005668e-11, -4.60465929e-11, -9.12872553e-11, -1.48257171e-10, -2.08540597e-10, -2.77630608e-10, -3.72157667e-10, -4.56272786e-10, -5.57978710e-10, -6.46554672e-10, -7.48006839e-10, -8.21493943e-10, -9.37522966e-10, -1.03729659e-09, -1.06159943e-09, -1.08434837e-09, -1.15738771e-09, -1.17887328e-09, -1.17146946e-09, -1.10964397e-09, -1.10234198e-09, -1.08852433e-09, -9.85866185e-10, -9.11727492e-10, -8.25604179e-10, -7.34122902e-10, -6.47294094e-10, -5.30372699e-10, -4.40357820e-10, -3.61273445e-10, -2.76871612e-10, -2.02227691e-10, -1.45430219e-10, -8.88675652e-11, -4.28984525e-11, -8.85451321e-12, 1.79026289e-11, 3.48384211e-11, 4.50190278e-11, 5.62413467e-11, 5.27322593e-11, 4.98163111e-11, 4.83288193e-11, 4.18200848e-11, 3.13334266e-11, 2.44082106e-11, 2.12572803e-11, 1.37397871e-11, 1.00879346e-11, 7.78502206e-12, 4.00790815e-12, 2.51830412e-12, 1.91301488e-12, 0.00000000e+00, 9.58518921e-13, 3.16123806e-13, 1.24116545e-12, 1.20821671e-12, 5.82952178e-13, 8.35917228e-13, 5.27285250e-13, 4.93205915e-13, 0.00000000e+00, 2.06937011e-13, 1.84618141e-13, 1.60868490e-13, 0.00000000e+00, 1.09822742e-13]) I_real = np.around(rf_current.real, 14) # round Iref_real = np.around(Iref_real, 14) self.assertSequenceEqual(I_real.tolist(), Iref_real.tolist(), msg="In TestRFCurrent test_2, mismatch in real part of RF current") Iref_imag = np.array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -4.86410815e-13, -4.47827158e-13, -2.02886432e-13, -3.60573852e-13, -1.56290206e-13, 0.00000000e+00, -4.19433613e-13, -2.33465744e-13, -5.01823105e-14, -4.43075921e-14, 0.00000000e+00, 0.00000000e+00, 8.07144709e-13, 1.43192280e-12, 2.55659168e-12, 5.25480064e-12, 8.33669524e-12, 1.59729353e-11, 2.73609511e-11, 3.71844853e-11, 5.92134758e-11, 6.87376280e-11, 9.02226570e-11, 1.24465616e-10, 1.55478762e-10, 1.84035433e-10, 2.37241518e-10, 2.86677989e-10, 3.28265272e-10, 3.77882012e-10, 4.29727720e-10, 4.83759029e-10, 5.13978173e-10, 5.41841031e-10, 5.91537968e-10, 6.00658643e-10, 6.13928028e-10, 5.96367636e-10, 5.76920099e-10, 5.25297875e-10, 4.89104065e-10, 4.29776324e-10, 3.33901906e-10, 2.38690921e-10, 1.49673305e-10, 4.78223853e-11, -5.57081558e-11, -1.51374774e-10, -2.50724894e-10, -3.50731761e-10, -4.16547058e-10, -4.83765618e-10, -5.36075032e-10, -5.74421794e-10, -6.05459147e-10, -5.91794283e-10, -5.88179055e-10, -5.83222843e-10, -5.49774151e-10, -5.08571646e-10, -4.86623358e-10, -4.33179012e-10, -3.73737133e-10, -3.37622742e-10, -2.89119788e-10, -2.30660798e-10, -1.85597518e-10, -1.66348322e-10, -1.19981335e-10, -9.07232680e-11, -7.21467862e-11, -5.18977454e-11, -3.25510912e-11, -2.12524272e-11, -1.54447488e-11, -8.24107056e-12, -4.90052047e-12, -2.96720377e-12, -1.13551262e-12, -4.79152734e-13, -1.91861296e-13, 0.00000000e+00, 7.31481456e-14, 5.23883203e-14, 3.19951675e-13, 4.27870459e-13, 2.66236636e-13, 4.74712082e-13, 3.64260145e-13, 4.09222572e-13, 0.00000000e+00, 2.44654594e-13, 2.61906356e-13, 2.77128356e-13, 0.00000000e+00, 3.01027843e-13]) I_imag = np.around(rf_current.imag, 14) # round Iref_imag = np.around(Iref_imag, 14) self.assertSequenceEqual(I_imag.tolist(), Iref_imag.tolist(), msg="In TestRFCurrent test_2, mismatch in imaginary part of" + " RF current") # Test charge distribution of a bigaussian profile, with LPF # Compare to simulation data def test_3(self): bigaussian(self.ring, self.rf, self.beam, 3.2e-9/4, seed=1234, reinsertion=True) self.profile.track() self.assertEqual(len(self.beam.dt), np.sum(self.profile.n_macroparticles), "In" + " TestBeamCurrent: particle number mismatch in Beam vs Profile") # RF current calculation with low-pass filter rf_current = rf_beam_current(self.profile, self.omega, self.ring.t_rev[0], lpf=True) Iref_real = np.array([-7.1511909689e-12, -7.1512708858e-12, -7.1513482919e-12, -7.1514232388e-12, -7.1514957777e-12, -7.1515659593e-12, -7.1516338342e-12, -7.1516994523e-12, -7.1517628634e-12, -7.1518241168e-12, -7.1518832613e-12, -7.1519403454e-12, -7.1519954170e-12, -7.1520485239e-12, -7.1520997131e-12, -7.1521490313e-12, -7.1521965247e-12, -7.1522422392e-12, -7.1522862199e-12, -7.1523285117e-12, -7.1523691587e-12, -7.1524082048e-12, -7.1524456933e-12, -7.1524816668e-12, -7.1525161676e-12, -7.1525492372e-12, -7.1525809169e-12, -7.1526112471e-12, -7.1526402679e-12, -7.1526680187e-12, -7.1526945383e-12, -7.1527198650e-12, -7.1527440365e-12, -7.1527670898e-12, -7.1527890615e-12, -7.1528099874e-12, -7.1528299028e-12, -7.1528488424e-12, -7.1528668402e-12, -7.1528839295e-12, -7.1529001433e-12, -7.1529155136e-12, -7.1529300719e-12, -7.1529438491e-12, -7.1529568755e-12, -7.1529691807e-12, -7.1529807935e-12, -7.1529917422e-12, -7.1530020545e-12, -7.1530117574e-12, -7.1530208772e-12, -7.1530294395e-12, -7.1530374694e-12, -7.1530449913e-12, -7.1530520287e-12, -7.1530586049e-12, -7.1530647421e-12, -7.1530704621e-12, -7.1530757860e-12, -7.1530807343e-12, -7.1530853267e-12, -7.1530895824e-12, -7.1530935199e-12, -7.1530971572e-12, -7.1531005114e-12, -7.1531035991e-12, -7.1531064365e-12, -7.1531090389e-12, -7.1531114211e-12, -7.1531135972e-12, -7.1531155809e-12, -7.1531173853e-12, -7.1531190226e-12, -7.1531205049e-12, -7.1531218433e-12, -7.1531230488e-12, -7.1531241314e-12, -7.1531251010e-12, -7.1531259666e-12, -7.1531267370e-12, -7.1531274203e-12, -7.1531280242e-12, -7.1531285560e-12, -7.1531290223e-12, -7.1531294297e-12, -7.1531297839e-12, -7.1531300904e-12, -7.1531303544e-12, -7.1531305805e-12, -7.1531307730e-12, -7.1531309360e-12, -7.1531310731e-12, -7.1531311875e-12, -7.1531312824e-12, -7.1531313603e-12, -7.1531314238e-12, -7.1531314750e-12, -7.1531315159e-12, -7.1531315482e-12, -7.1531315733e-12]) np.testing.assert_allclose(rf_current.real, Iref_real, rtol=1e-7, atol=0, err_msg="In TestRFCurrent test_3, mismatch in real part of RF current") Iref_imag = np.array([-2.1797211489e-12, -2.1796772456e-12, -2.1796347792e-12, -2.1795937182e-12, -2.1795540314e-12, -2.1795156879e-12, -2.1794786570e-12, -2.1794429085e-12, -2.1794084122e-12, -2.1793751384e-12, -2.1793430575e-12, -2.1793121404e-12, -2.1792823581e-12, -2.1792536822e-12, -2.1792260843e-12, -2.1791995365e-12, -2.1791740112e-12, -2.1791494811e-12, -2.1791259193e-12, -2.1791032992e-12, -2.1790815944e-12, -2.1790607792e-12, -2.1790408280e-12, -2.1790217154e-12, -2.1790034169e-12, -2.1789859077e-12, -2.1789691639e-12, -2.1789531618e-12, -2.1789378779e-12, -2.1789232894e-12, -2.1789093736e-12, -2.1788961083e-12, -2.1788834718e-12, -2.1788714425e-12, -2.1788599995e-12, -2.1788491222e-12, -2.1788387903e-12, -2.1788289840e-12, -2.1788196838e-12, -2.1788108708e-12, -2.1788025262e-12, -2.1787946320e-12, -2.1787871702e-12, -2.1787801236e-12, -2.1787734750e-12, -2.1787672079e-12, -2.1787613061e-12, -2.1787557538e-12, -2.1787505357e-12, -2.1787456369e-12, -2.1787410427e-12, -2.1787367390e-12, -2.1787327121e-12, -2.1787289486e-12, -2.1787254356e-12, -2.1787221605e-12, -2.1787191111e-12, -2.1787162758e-12, -2.1787136430e-12, -2.1787112020e-12, -2.1787089419e-12, -2.1787068527e-12, -2.1787049244e-12, -2.1787031475e-12, -2.1787015131e-12, -2.1787000122e-12, -2.1786986365e-12, -2.1786973779e-12, -2.1786962288e-12, -2.1786951818e-12, -2.1786942299e-12, -2.1786933662e-12, -2.1786925846e-12, -2.1786918789e-12, -2.1786912433e-12, -2.1786906724e-12, -2.1786901610e-12, -2.1786897043e-12, -2.1786892977e-12, -2.1786889367e-12, -2.1786886175e-12, -2.1786883361e-12, -2.1786880890e-12, -2.1786878729e-12, -2.1786876847e-12, -2.1786875215e-12, -2.1786873806e-12, -2.1786872597e-12, -2.1786871564e-12, -2.1786870686e-12, -2.1786869946e-12, -2.1786869325e-12, -2.1786868808e-12, -2.1786868381e-12, -2.1786868031e-12, -2.1786867746e-12, -2.1786867517e-12, -2.1786867335e-12, -2.1786867192e-12, -2.1786867081e-12]) np.testing.assert_allclose(rf_current.imag, Iref_imag, rtol=1e-7, atol=0, err_msg="In TestRFCurrent test_3, mismatch in imaginary part of RF current") # Test RF beam current on coarse grid integrated from fine grid # Compare to simulation data for peak RF current def test_4(self): # Create a batch of 100 equal, short bunches bunches = 100 T_s = 5*self.rf.t_rev[0]/self.rf.harmonic[0, 0] N_m = int(1e5) N_b = 2.3e11 bigaussian(self.ring, self.rf, self.beam, 0.1e-9, seed=1234, reinsertion=True) beam2 = Beam(self.ring, bunches*N_m, bunches*N_b) bunch_spacing = 5*self.rf.t_rf[0, 0] buckets = 5*bunches for i in range(bunches): beam2.dt[i*N_m:(i+1)*N_m] = self.beam.dt + i*bunch_spacing beam2.dE[i*N_m:(i+1)*N_m] = self.beam.dE profile2 = Profile(beam2, CutOptions=CutOptions(cut_left=0, cut_right=bunches*bunch_spacing, n_slices=1000*buckets)) profile2.track() tot_charges = np.sum(profile2.n_macroparticles)/\ beam2.n_macroparticles*beam2.intensity self.assertAlmostEqual(tot_charges, 2.3000000000e+13, 9) # Calculate fine- and coarse-grid RF current rf_current_fine, rf_current_coarse = rf_beam_current(profile2, self.rf.omega_rf[0, 0], self.ring.t_rev[0], lpf=False, downsample={'Ts': T_s, 'points': self.rf.harmonic[0, 0]/5}) rf_current_coarse /= T_s # Peak RF current on coarse grid peak_rf_current = np.max(np.absolute(rf_current_coarse)) self.assertAlmostEqual(peak_rf_current, 2.9285808008, 7) class TestComb(unittest.TestCase): def test_1(self): y = np.random.rand(42) self.assertListEqual(y.tolist(), comb_filter(y, y, 15/16).tolist(), msg="In TestComb test_1, filtered signal not correct") def test_2(self): t = np.arange(0, 2*np.pi, 2*np.pi/120) y = np.cos(t) # Shift cosine by quarter period x = np.roll(y, int(len(t)/4)) # Drop some digits to avoid rounding errors result = np.around(comb_filter(y, x, 0.5), 12) result_theo = np.around(np.sin(np.pi/4 + t)/np.sqrt(2), 12) self.assertListEqual(result.tolist(), result_theo.tolist(), msg="In TestComb test_2, filtered signal not correct") class TestLowPass(unittest.TestCase): def test_1(self): # Example based on SciPy.org filtfilt t = np.linspace(0, 1.0, 2001) xlow = np.sin(2 * np.pi * 5 * t) xhigh = np.sin(2 * np.pi * 250 * t) x = xlow + xhigh y = low_pass_filter(x, cutoff_frequency=1/8) # Test for difference between filtered signal and xlow; # using signal.butter(8, 0.125) and filtfilt(b, a, x, padlen=15) # from the SciPy documentation of filtfilt gives the stated # value 9.10862958....e-6 self.assertAlmostEqual(np.abs(y - xlow).max(), 0.0230316365, places=10) class TestMovingAverage(unittest.TestCase): # Run before every test def setUp(self, N=3, x_prev=None): self.x = np.array([0, 3, 6, 3, 0, 3, 6, 3, 0], dtype=float) self.y = moving_average(self.x, N, x_prev) # Run after every test def tearDown(self): del self.x del self.y def test_1(self): self.setUp(N=3) self.assertEqual(len(self.x), len(self.y) + 3 - 1, msg="In TestMovingAverage, test_1: wrong array length") self.assertSequenceEqual(self.y.tolist(), np.array([3, 4, 3, 2, 3, 4, 3], dtype=float).tolist(), msg="In TestMovingAverage, test_1: arrays differ") def test_2(self): self.setUp(N=4) self.assertEqual(len(self.x), len(self.y) + 4 - 1, msg="In TestMovingAverage, test_2: wrong array length") self.assertSequenceEqual(self.y.tolist(), np.array([3, 3, 3, 3, 3, 3], dtype=float).tolist(), msg="In TestMovingAverage, test_2: arrays differ") def test_3(self): self.setUp(N=3, x_prev=np.array([0, 3])) self.assertEqual(len(self.x), len(self.y), msg="In TestMovingAverage, test_3: wrong array length") self.assertSequenceEqual(self.y.tolist(), np.array([1, 2, 3, 4, 3, 2, 3, 4, 3], dtype=float).tolist(), msg="In TestMovingAverage, test_3: arrays differ") class TestFeedforwardFilter(unittest.TestCase): # Run before every test def setUp(self): # Ring and RF definitions ring = Ring(2*np.pi*1100.009, 1/18**2, 25.92e9, Particle=Proton()) rf = RFStation(ring, [4620], [4.5e6], [0.], n_rf=1) self.T_s = 5*rf.t_rf[0, 0] def test_1(self): # Modified filling time to match reference case TWC = SPS3Section200MHzTWC() TWC.tau = 420e-9 filter, n_taps, n_filling, n_fit = feedforward_filter(TWC, 4/125*1e-6, debug=False, taps=31, opt_output=True) self.assertEqual(n_taps, 31, msg="In TestFeedforwardFilter, test_1: n_taps incorrect") self.assertEqual(n_filling, 13, msg="In TestFeedforwardFilter, test_1: n_filling incorrect") self.assertEqual(n_fit, 44, msg="In TestFeedforwardFilter, test_1: n_fit incorrect") filter_ref = np.array( [-0.0227533635, 0.0211514102, 0.0032929202, -0.0026111554, 0.0119559316, 0.0043905603, 0.0043905603, 0.0040101282, -0.0241480816, -0.0237676496, 0.0043905603, 0.0043905603, 0.0043905603, -0.0107783487, 0.0184915005, 0.0065858404, -0.0052223108, 0.0239118633, 0.0087811206, 0.0087811206, 0.0080202564, 0.0295926259, 0.0237676496, -0.0043905603, -0.0043905603, -0.0043905603, -0.0119750148, 0.0026599098, -0.0032929202, -0.021005147, 0.022696114]) np.testing.assert_allclose(filter, filter_ref, rtol=1e-8, atol=1e-9, err_msg="In TestFeedforwardFilter, test_1: filter array incorrect") del TWC def test_2(self): TWC = SPS3Section200MHzTWC() filter, n_taps, n_filling, n_fit = feedforward_filter(TWC, self.T_s, debug=False, opt_output=True) self.assertEqual(n_taps, 31, msg="In TestFeedforwardFilter, test_2: n_taps incorrect") self.assertEqual(n_filling, 18, msg="In TestFeedforwardFilter, test_2: n_filling incorrect") self.assertEqual(n_fit, 49, msg="In TestFeedforwardFilter, test_2: n_fit incorrect") # filter_ref = np.array( # [-0.0070484734, 0.0161859736, 0.0020289928, 0.0020289928, # 0.0020289928, -0.0071641302, -0.0162319424, -0.0070388194, # 0.0020289928, 0.0020289928, 0.0020289928, - 0.0050718734, # 0.0065971343, 0.0030434892, 0.0030434892, 0.0030434892, # 0.0030434892, 0.0030434892, -0.0004807475, 0.011136476, # 0.0040579856, 0.0040579856, 0.0040579856, 0.0132511086, # 0.019651364, 0.0074147518, -0.0020289928, -0.0020289928, # -0.0020289928, -0.0162307252, 0.0071072903]) filter_ref = np.copy(feedforward_filter_TWC3) np.testing.assert_allclose(filter, filter_ref, rtol=1e-8, atol=1e-9, err_msg="In TestFeedforwardFilter, test_2: filter array incorrect") del TWC def test_3(self): TWC = SPS4Section200MHzTWC() filter, n_taps, n_filling, n_fit = feedforward_filter(TWC, self.T_s, debug=False, opt_output=True) self.assertEqual(n_taps, 37, msg="In TestFeedforwardFilter, test_3: n_taps incorrect") self.assertEqual(n_filling, 24, msg="In TestFeedforwardFilter, test_3: n_filling incorrect") self.assertEqual(n_fit, 61, msg="In TestFeedforwardFilter, test_3: n_fit incorrect") # filter_ref = np.array( # [ 0.0048142895, 0.0035544775, 0.0011144336, 0.0011144336, # 0.0011144336, -0.0056984584, -0.0122587698, -0.0054458778, # 0.0011144336, 0.0011144336, 0.0011144336, -0.0001684528, # -0.000662115, 0.0016716504, 0.0016716504, 0.0016716504, # 0.0016716504, 0.0016716504, 0.0016716504, 0.0016716504, # 0.0016716504, 0.0016716504, 0.0016716504, 0.0016716504, # 0.0040787952, 0.0034488892, 0.0022288672, 0.0022288672, # 0.0022288672, 0.0090417593, 0.0146881621, 0.0062036196, # -0.0011144336, -0.0011144336, -0.0011144336, -0.0036802064, # -0.0046675309]) filter_ref = np.copy(feedforward_filter_TWC4) np.testing.assert_allclose(filter, filter_ref, rtol=1e-8, atol=1e-9, err_msg="In TestFeedforwardFilter, test_3: filter array incorrect") del TWC def test_4(self): TWC = SPS5Section200MHzTWC() filter, n_taps, n_filling, n_fit = feedforward_filter(TWC, self.T_s, debug=False, opt_output=True) self.assertEqual(n_taps, 43, msg="In TestFeedforwardFilter, test_4: n_taps incorrect") self.assertEqual(n_filling, 31, msg="In TestFeedforwardFilter, test_4: n_filling incorrect") self.assertEqual(n_fit, 74, msg="In TestFeedforwardFilter, test_4: n_fit incorrect") # filter_ref = np.array( # [ 0.0189205535, -0.0105637125, 0.0007262783, 0.0007262783, # 0.0006531768, -0.0105310359, -0.0104579343, 0.0007262783, # 0.0007262783, 0.0007262783, 0.0063272331, -0.0083221785, # 0.0010894175, 0.0010894175, 0.0010894175, 0.0010894175, # 0.0010894175, 0.0010894175, 0.0010894175, 0.0010894175, # 0.0010894175, 0.0010894175, 0.0010894175, 0.0010894175, # 0.0010894175, 0.0010894175, 0.0010894175, 0.0010894175, # 0.0010894175, 0.0010894175, 0.0010894175, 0.0105496942, # -0.0041924387, 0.0014525567, 0.0014525567, 0.0013063535, # 0.0114011487, 0.0104579343, -0.0007262783, -0.0007262783, # -0.0007262783, 0.0104756312, -0.018823192]) filter_ref = np.copy(feedforward_filter_TWC5) np.testing.assert_allclose(filter, filter_ref, rtol=1e-8, atol=1e-9, err_msg="In TestFeedforwardFilter, test_4: filter array incorrect") del TWC # TWC4 = SPS4Section200MHzTWC() # FF_4 = feedforward_filter(TWC4, 25e-9, debug=True) # TWC5 = SPS5Section200MHzTWC() # FF_5 = feedforward_filter(TWC5, 25e-9, debug=True) if __name__ == '__main__': unittest.main()
blond-admin/BLonD
unittests/llrf/test_signal_processing.py
Python
gpl-3.0
30,820
[ "Gaussian" ]
98b422c7c53c3f5b1f1e0b99acecf68fcc269dbb73b5fb73bbd6f08e809068cb
######################################################################## # $HeadURL$ # File: FileTest.py # Author: Krzysztof.Ciba@NOSPAMgmail.com # Date: 2012/08/06 13:48:54 ######################################################################## """ :mod: FileTest ======================= .. module: FileTest :synopsis: test cases for Files .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com test cases for Files """ __RCSID__ = "$Id$" # # # @file FileTest.py # @author Krzysztof.Ciba@NOSPAMgmail.com # @date 2012/08/06 13:49:05 # @brief Definition of FileTest class. # # imports import unittest # # from DIRAC from DIRAC.RequestManagementSystem.Client.Operation import Operation # # SUT from DIRAC.RequestManagementSystem.Client.File import File ######################################################################## class FileTests( unittest.TestCase ): """ .. class:: FileTest """ def setUp( self ): """ test setup """ self.fromDict = { "Size" : 1, "LFN" : "/test/lfn", "ChecksumType" : "ADLER32", "Checksum" : "123456", "Status" : "Waiting" } def tearDown( self ): """ test tear down """ del self.fromDict def test01ctors( self ): """ File construction and (de)serialisation """ # # empty default ctor theFile = File() self.assertEqual( isinstance( theFile, File ), True ) # # fromDict try: theFile = File( self.fromDict ) except AttributeError, error: print "AttributeError: %s" % str( error ) self.assertEqual( isinstance( theFile, File ), True ) for key, value in self.fromDict.items(): self.assertEqual( getattr( theFile, key ), value ) toJSON = theFile.toJSON() self.assertEqual( toJSON["OK"], True, "JSON serialization error" ) def test02props( self ): """ test props and attributes """ theFile = File() # valid props theFile.FileID = 1 self.assertEqual( theFile.FileID, 1 ) theFile.Status = "Done" self.assertEqual( theFile.Status, "Done" ) theFile.LFN = "/some/path/somewhere" self.assertEqual( theFile.LFN, "/some/path/somewhere" ) theFile.PFN = "/some/path/somewhere" self.assertEqual( theFile.PFN, "/some/path/somewhere" ) theFile.Attempt = 1 self.assertEqual( theFile.Attempt, 1 ) theFile.Size = 1 self.assertEqual( theFile.Size, 1 ) theFile.GUID = "2bbabe80-e2f1-11e1-9b23-0800200c9a66" self.assertEqual( theFile.GUID, "2bbabe80-e2f1-11e1-9b23-0800200c9a66" ) theFile.ChecksumType = "adler32" self.assertEqual( theFile.ChecksumType, "ADLER32" ) theFile.Checksum = "123456" self.assertEqual( theFile.Checksum, "123456" ) # # theFile.Checksum = None theFile.ChecksumType = None self.assertEqual( theFile.Checksum, "" ) self.assertEqual( theFile.ChecksumType, "" ) # # invalid props # FileID try: theFile.FileID = "foo" except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) # parent parent = Operation( { "OperationID" : 99999 } ) parent += theFile theFile.FileID = 0 self.assertEqual( parent.OperationID, theFile.OperationID ) try: theFile.OperationID = 111111 except Exception, error: self.assertEqual( isinstance( error, AttributeError ), True ) self.assertEqual( str( error ), "can't set attribute" ) # LFN try: theFile.LFN = 1 except Exception, error: self.assertEqual( isinstance( error, TypeError ), True ) self.assertEqual( str( error ), "LFN has to be a string!" ) try: theFile.LFN = "../some/path" except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) self.assertEqual( str( error ), "LFN should be an absolute path!" ) # PFN try: theFile.PFN = 1 except Exception, error: self.assertEqual( isinstance( error, TypeError ), True ) self.assertEqual( str( error ), "PFN has to be a string!" ) try: theFile.PFN = "snafu" except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) self.assertEqual( str( error ), "Wrongly formatted PFN!" ) # Size try: theFile.Size = "snafu" except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) try: theFile.Size = -1 except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) self.assertEqual( str( error ), "Size should be a positive integer!" ) # GUID try: theFile.GUID = "snafuu-uuu-uuu-uuu-uuu-u" except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) self.assertEqual( str( error ), "'snafuu-uuu-uuu-uuu-uuu-u' is not a valid GUID!" ) try: theFile.GUID = 2233345 except Exception, error: self.assertEqual( isinstance( error, TypeError ), True ) self.assertEqual( str( error ), "GUID should be a string!" ) # Attempt try: theFile.Attempt = "snafu" except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) try: theFile.Attempt = -1 except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) self.assertEqual( str( error ), "Attempt should be a positive integer!" ) # Status try: theFile.Status = None except Exception, error: self.assertEqual( isinstance( error, ValueError ), True ) self.assertEqual( str( error ), "Unknown Status: None!" ) # Error try: theFile.Error = Exception( "test" ) except Exception, error: self.assertEqual( isinstance( error, TypeError ), True ) self.assertEqual( str( error ), "Error has to be a string!" ) # # test execution if __name__ == "__main__": testLoader = unittest.TestLoader() fileTests = testLoader.loadTestsFromTestCase( FileTests ) suite = unittest.TestSuite( [ fileTests ] ) unittest.TextTestRunner( verbosity = 3 ).run( suite )
sposs/DIRAC
RequestManagementSystem/test/FileTests.py
Python
gpl-3.0
6,001
[ "DIRAC" ]
4ce20cdb626867045b4618f103bdfe262fb794f5875073b6f5c1fd31af888428
from __future__ import unicode_literals from django.db import models from django.contrib.auth.models import User import random, string # Create your models here. class Patient(models.Model): GENDER_CHOICES = ( ('M', 'Male'), ('F', 'Female'), ) identifier = models.CharField(max_length=255) first_name = models.CharField(max_length=127) last_name = models.CharField(max_length=127) gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default='M') birthdate = models.DateField() address = models.CharField(max_length=255) address2 = models.CharField(max_length=255, null=True) city = models.CharField(max_length=63) province = models.CharField(max_length=63) country = models.CharField(max_length=63) phone_number = models.CharField(max_length=63) date_created = models.DateTimeField(auto_now_add=True) user = models.ForeignKey(User, on_delete=models.PROTECT) class Visit(models.Model): patient = models.ForeignKey(Patient, on_delete=models.CASCADE) diagnoses = models.CharField(max_length=255) notes = models.TextField() creator = models.ForeignKey(User) date_created = models.DateTimeField(auto_now_add=True) class Vitals(models.Model): height = models.DecimalField(max_digits=5, decimal_places=2) weight = models.DecimalField(max_digits=5, decimal_places=2) temperature = models.IntegerField() pulse = models.IntegerField() respiratory_rate = models.IntegerField() bp_systole = models.IntegerField() bp_diastole = models.IntegerField() patient = models.ForeignKey(Patient, on_delete=models.CASCADE) creator = models.ForeignKey(User) date_created = models.DateTimeField(auto_now_add=True) class LabResult(models.Model): patient = models.ForeignKey(Patient, on_delete=models.CASCADE) test_name = models.CharField(max_length=127) img = models.CharField(max_length=255) result = models.CharField(max_length=127) creator = models.ForeignKey(User) date_created = models.DateTimeField(auto_now_add=True) class Treatment(models.Model): DRUG_CHOICES = ( ('HRZE', 'FDC 4 combination (HRZE)'), ('HR', 'FDC 2 combination (HR)'), ('H', 'Isoniazid (H)'), ('R', 'Rifampisin (R)'), ('Z', 'Pirazinamid (Z)'), ('E', 'Etambutol (E)'), ('S', 'Streptomisin (S)'), ('HRZ', 'FDC for children (HRZ)'), ('Km', 'Kanamysin (Km)'), ('Cm', 'Capreomysin (Cm)'), ('Lfx', 'Levofloksasin (Lfx)'), ('Mfx', 'Moksifloksasin (Mfx)'), ('Etio', 'Ethionamide (Etio)'), ('Cs', 'Cycloserine (Cs)'), ('PAS', 'Para-aminosalicyclic Acid (PAS)'), ) medication = models.CharField(max_length=7, choices=DRUG_CHOICES) dosage = models.DecimalField(max_digits=15, decimal_places=2) freq_day = models.IntegerField() freq_week = models.IntegerField() start_date = models.DateField() end_date = models.DateField() patient = models.ForeignKey(Patient, on_delete=models.CASCADE) creator = models.ForeignKey(User) date_created = models.DateTimeField(auto_now_add=True) class Appointment(models.Model): SERVICE_CHOICES = ( ('C', 'Consultation'), ('L', 'Lab test'), ('M', 'Medical check up'), ) patient = models.ForeignKey(Patient, on_delete=models.CASCADE) service_type = models.CharField(max_length=255, choices=SERVICE_CHOICES) agenda = models.TextField() date = models.DateTimeField() creator = models.ForeignKey(User) date_created = models.DateTimeField(auto_now_add=True) class Screening(models.Model): HIV_CHOICES = ( ('1', 'Yes'), ('0', 'No'), ('99', 'Unknown'), ) TB_EXPOSURE_CHOICES = ( ('1', 'One member of family in the same house had previous TB'), ('2', 'One member of family in the same house has active TB infection'), ('3', 'Contact with person infected with TB'), ('99', 'Unknown'), ) DIAGNOSIS_CHOICES = ( ('1', 'Suspect TB'), ('2', 'Confirm Positive TB'), ('0', 'Negative TB'), ) TB_PATIENT_STATUS_CHOICES = ( ('1', 'New'), ('2', 'Chronic'), ('3', 'Relapse'), ('4', 'Drop out'), ('99', 'NA'), ) cough = models.BooleanField() haemoptysis = models.BooleanField() chest_pain = models.BooleanField() weight_loss = models.BooleanField() fatigue = models.BooleanField() fever = models.BooleanField() night_sweats = models.BooleanField() chills = models.BooleanField() other_symptoms = models.BooleanField() diagnosis = models.CharField(max_length=7, choices=DIAGNOSIS_CHOICES) tb_patient_status = models.CharField(max_length=7, choices=TB_PATIENT_STATUS_CHOICES, default=0) meningitis = models.BooleanField() pregnant = models.BooleanField() immunocompromised = models.BooleanField() malnutrition = models.BooleanField() coinfection = models.BooleanField() comorbid = models.BooleanField() hiv = models.CharField(max_length=15, choices=HIV_CHOICES) tb_exposure = models.CharField(max_length=7, choices=TB_EXPOSURE_CHOICES) patient = models.ForeignKey(Patient, on_delete=models.CASCADE) creator = models.ForeignKey(User, default=1) date_created = models.DateTimeField(auto_now_add=True)
aliakbars/tbdc
patient/models.py
Python
apache-2.0
5,381
[ "VisIt" ]
9df4f0ede9f657ad40f738698b98557776d952f600516bd0802eba7333371063
#!/usr/bin/env python # -*- coding: utf-8 -*- # relevant imports import sys import time import espressopp import mpi4py.MPI as MPI import Tetracryst # Preparation of tetrahedral crystal and constuctions of bonds in tetrahedral liquid from espressopp import Real3D, Int3D from espressopp.tools import decomp from espressopp.tools import timers # integration steps, cutoff, skin, AdResS specifications steps = 10000 timestep = 0.0005 intervals = 1000 rc = 4.5 # cutoff coarse-grained potential rca = 1.122462048309373 # cutoff atomistic potential (cutoff (2^(1/6)), WCA) skin = 0.4 # Parameters for the thermostat #gamma = 2.0 #temp = 1.0 # Parameters for size of AdResS dimensions ex_size = 5.0 hy_size = 5.0 # read equilibrated configuration file pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = espressopp.tools.readxyz("equilibrated_conf.xyz") # Table for coarse-grained potential tabCG = "table_potential.dat" # number of CG particles num_particlesCG = len(x)/4 # number of AT particles num_particles = len(x) # set up the system sys.stdout.write('Setting up simulation ...\n') density = num_particles / (Lx * Ly * Lz) size = (Lx, Ly, Lz) system = espressopp.System() system.rng = espressopp.esutil.RNG() system.bc = espressopp.bc.OrthorhombicBC(system.rng, size) system.skin = skin comm = MPI.COMM_WORLD nodeGrid = decomp.nodeGrid(comm.size) cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin) # (H-)AdResS domain decomposition system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid) # prepare AT particles allParticlesAT = [] allParticles = [] tuples = [] for pidAT in range(num_particles): allParticlesAT.append([pidAT, # add here these particles just temporarily Real3D(x[pidAT], y[pidAT], z[pidAT]), # position Real3D(vx[pidAT], vy[pidAT], vz[pidAT]), # velocity Real3D(0, 0, 0), # force 1, 1.0, 1]) # type, mass, is AT particle # create CG particles for pidCG in range(num_particlesCG): # we put CG molecule in first atom, later CG molecules will be positioned in the center cmp = espressopp.tools.AdressSetCG(4, pidCG, allParticlesAT) # Preparation of tuples (tuples define, which atoms belong to which CG molecules) tmptuple = [pidCG+num_particles] for pidAT2 in range(4): pid = pidCG*4+pidAT2 tmptuple.append(pid) # append CG particles allParticles.append([pidCG+num_particles, # CG particle has to be added first! Real3D(cmp[0], cmp[1], cmp[2]), # pos Real3D(0, 0, 0), # vel Real3D(0, 0, 0), # force 0, 4.0, 0]) # type, mass, is not AT particle # append AT particles for pidAT in range(4): pid = pidCG*4+pidAT allParticles.append([pid, # now the AT particles can be added (allParticlesAT[pid])[1], # pos (allParticlesAT[pid])[2], # vel (allParticlesAT[pid])[3], # force (allParticlesAT[pid])[4], # type (allParticlesAT[pid])[5], # mass (allParticlesAT[pid])[6]]) # is AT particle # append tuple to tuplelist tuples.append(tmptuple) # add particles to system system.storage.addParticles(allParticles, "id", "pos", "v", "f", "type", "mass", "adrat") # create FixedTupleList object ftpl = espressopp.FixedTupleListAdress(system.storage) # and add the tuples ftpl.addTuples(tuples) system.storage.setFixedTuplesAdress(ftpl) # add bonds between AT particles fpl = espressopp.FixedPairListAdress(system.storage, ftpl) bonds = Tetracryst.makebonds(len(x)) fpl.addBonds(bonds) # decompose after adding tuples and bonds print "Added tuples and bonds, decomposing now ..." system.storage.decompose() print "done decomposing" # AdResS Verlet list vl = espressopp.VerletListAdress(system, cutoff=rc, adrcut=rc, dEx=ex_size, dHy=hy_size, adrCenter=[Lx/2, Ly/2, Lz/2]) # non-bonded potentials # LJ Capped WCA between AT and tabulated potential between CG particles interNB = espressopp.interaction.VerletListHadressLennardJones(vl, ftpl) # Here we need specific (H-)AdResS interaction type potWCA = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=rca) potCG = espressopp.interaction.Tabulated(itype=3, filename=tabCG, cutoff=rc) # CG interNB.setPotentialAT(type1=1, type2=1, potential=potWCA) # AT interNB.setPotentialCG(type1=0, type2=0, potential=potCG) # CG system.addInteraction(interNB) # bonded potentials # Quartic potential between AT particles potQuartic = espressopp.interaction.Quartic(K=75.0, r0=1.0) interQuartic = espressopp.interaction.FixedPairListQuartic(system, fpl, potQuartic) system.addInteraction(interQuartic) # VelocityVerlet integrator integrator = espressopp.integrator.VelocityVerlet(system) integrator.dt = timestep # add AdResS extension adress = espressopp.integrator.Adress(system, vl, ftpl) integrator.addExtension(adress) # add Langevin thermostat extension #langevin = espressopp.integrator.LangevinThermostat(system) #langevin.gamma = gamma #langevin.temperature = temp #langevin.adress = True # enable AdResS! #integrator.addExtension(langevin) # distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass espressopp.tools.AdressDecomp(system, integrator) # system information print '' print 'AdResS Center =', [Lx/2, Ly/2, Lz/2] print 'number of AT particles =', num_particles print 'number of CG particles =', num_particlesCG print 'density = %.4f' % (density) print 'rc =', rc print 'dt =', integrator.dt print 'skin =', system.skin print 'steps =', steps print 'NodeGrid = %s' % (nodeGrid,) print 'CellGrid = %s' % (cellGrid,) print '' # analysis temperature = espressopp.analysis.Temperature(system) fmt = '%5d %8.4f %12.3f %12.3f %12.3f %12.3f\n' T = temperature.compute() Ek = 0.5 * T * (3 * num_particles) Ep = interNB.computeEnergy() Eb = interQuartic.computeEnergy() sys.stdout.write(' step Temp etotal enonbonded ebonded ekinetic\n') sys.stdout.write(fmt % (0, T, Ek + Ep + Eb, Ep, Eb, Ek)) # Timer, Steps start_time = time.clock() nsteps = steps / intervals # write the start configuration to trajectory pdb-file dump_conf_gro = espressopp.io.DumpGRO(system, integrator, filename='trajCG.gro') dump_conf_gro_adr = espressopp.io.DumpGROAdress(system, ftpl, integrator, filename='trajAT.gro') # integration and on the fly analysis for s in range(1, intervals + 1): integrator.run(nsteps) step = nsteps * s T = temperature.compute() Ek = 0.5 * T * (3 * num_particles) Ep = interNB.computeEnergy() Eb = interQuartic.computeEnergy() sys.stdout.write(fmt % (step, T, Ek + Ep + Eb, Ep, Eb, Ek)) dump_conf_gro.dump() dump_conf_gro_adr.dump() # simulation information end_time = time.clock() timers.show(integrator.getTimers(), precision=3) sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize()) sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(num_particles))) sys.stdout.write('Neighbor list builds = %d\n' % vl.builds) sys.stdout.write('Integration steps = %d\n' % integrator.step) sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
junghans/espressopp
examples/hadress/hadressPlain/hadress.py
Python
gpl-3.0
7,455
[ "CRYSTAL" ]
b33f9adb8bce105c5443b52f1766f121f54c19c4cab90a4d51c1aa24e993d81a
from django.shortcuts import render from django.http import HttpResponse from rango.models import Category,Page from rango.forms import CategoryForm from django.contrib.auth.decorators import login_required from datetime import datetime def index(request): category_list = Category.objects.order_by('-likes')[:15] page_list = Page.objects.order_by('-views')[:15] context_dict = {'categories': category_list, 'pages': page_list} visits = request.session.get('visits') if not visits: visits = 1 reset_last_visit_time = False last_visit = request.session.get('last_visit') if last_visit: last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S") if (datetime.now() - last_visit_time).seconds > 0: # ...reassign the value of the cookie to +1 of what it was before... visits = visits + 1 # ...and update the last visit cookie, too. reset_last_visit_time = True else: # Cookie last_visit doesn't exist, so create it to the current date/time. reset_last_visit_time = True if reset_last_visit_time: request.session['last_visit'] = str(datetime.now()) request.session['visits'] = visits context_dict['visits'] = visits response = render(request,'rango/index.html', context_dict) return response def about(request): context_dict = {} visits = request.session.get('visits') last_visit = request.session.get('last_visit') context_dict['visits'] = visits; context_dict['last_visit'] = last_visit; return render(request,'rango/about.html',context_dict) def category(request, category_name_slug): # Create a context dictionary which we can pass to the template rendering engine. context_dict = {} try: # Can we find a category name slug with the given name? # If we can't, the .get() method raises a DoesNotExist exception. # So the .get() method returns one model instance or raises an exception. category = Category.objects.get(slug=category_name_slug) context_dict['category_name'] = category.name context_dict['category_name_slug'] = category_name_slug # Retrieve all of the associated pages. # Note that filter returns >= 1 model instance. pages = Page.objects.filter(category=category) # Adds our results list to the template context under name pages. context_dict['pages'] = pages # We also add the category object from the database to the context dictionary. # We'll use this in the template to verify that the category exists. context_dict['category'] = category except Category.DoesNotExist: # We get here if we didn't find the specified category. # Don't do anything - the template displays the "no category" message for us. pass # Go render the response and return it to the client. return render(request, 'rango/category.html', context_dict) @login_required def add_category(request): # A HTTP POST? if request.method == 'POST': form = CategoryForm(request.POST) # Have we been provided with a valid form? if form.is_valid(): # Save the new category to the database. form.save(commit=True) # Now call the index() view. # The user will be shown the homepage. return index(request) else: # The supplied form contained errors - just print them to the terminal. print form.errors else: # If the request was not a POST, display the form to enter details. form = CategoryForm() # Bad form (or form details), no form supplied... # Render the form with error messages (if any). return render(request, 'rango/add_category.html', {'form': form}) from rango.forms import PageForm @login_required def add_page(request, category_name_slug): try: cat = Category.objects.get(slug=category_name_slug) except Category.DoesNotExist: cat = None if request.method == 'POST': form = PageForm(request.POST) if form.is_valid(): if cat: page = form.save(commit=False) if not Page.objects.filter(title = page.title,category=cat): page.category = cat page.views = 0 page.save() # probably better to use a redirect here. return category(request, category_name_slug) else: print form.errors else: form = PageForm() context_dict = {'form':form, 'category': cat} return render(request, 'rango/add_page.html', context_dict) from rango.forms import UserForm, UserProfileForm def register(request): # A boolean value for telling the template whether the registration was successful. # Set to False initially. Code changes value to True when registration succeeds. registered = False # If it's a HTTP POST, we're interested in processing form data. if request.method == 'POST': # Attempt to grab information from the raw form information. # Note that we make use of both UserForm and UserProfileForm. user_form = UserForm(data=request.POST) profile_form = UserProfileForm(data=request.POST) # If the two forms are valid... if user_form.is_valid() and profile_form.is_valid(): # Save the user's form data to the database. user = user_form.save() # Now we hash the password with the set_password method. # Once hashed, we can update the user object. user.set_password(user.password) user.save() # Now sort out the UserProfile instance. # Since we need to set the user attribute ourselves, we set commit=False. # This delays saving the model until we're ready to avoid integrity problems. profile = profile_form.save(commit=False) profile.user = user # Did the user provide a profile picture? # If so, we need to get it from the input form and put it in the UserProfile model. if 'picture' in request.FILES: profile.picture = request.FILES['picture'] # Now we save the UserProfile model instance. profile.save() # Update our variable to tell the template registration was successful. registered = True # Invalid form or forms - mistakes or something else? # Print problems to the terminal. # They'll also be shown to the user. else: print user_form.errors, profile_form.errors # Not a HTTP POST, so we render our form using two ModelForm instances. # These forms will be blank, ready for user input. else: user_form = UserForm() profile_form = UserProfileForm() # Render the template depending on the context. return render(request, 'rango/register.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered} ) from django.contrib.auth import authenticate, login from django.http import HttpResponseRedirect, HttpResponse def user_login(request): # If the request is a HTTP POST, try to pull out the relevant information. if request.method == 'POST': # Gather the username and password provided by the user. # This information is obtained from the login form. # We use request.POST.get('<variable>') as opposed to request.POST['<variable>'], # because the request.POST.get('<variable>') returns None, if the value does not exist, # while the request.POST['<variable>'] will raise key error exception username = request.POST.get('username') password = request.POST.get('password') # Use Django's machinery to attempt to see if the username/password # combination is valid - a User object is returned if it is. user = authenticate(username=username, password=password) # If we have a User object, the details are correct. # If None (Python's way of representing the absence of a value), no user # with matching credentials was found. if user: # Is the account active? It could have been disabled. if user.is_active: # If the account is valid and active, we can log the user in. # We'll send the user back to the homepage. login(request, user) return HttpResponseRedirect('/rango/') else: # An inactive account was used - no logging in! return HttpResponse("Your Rango account is disabled.") else: # Bad login details were provided. So we can't log the user in. print "Invalid login details: {0}, {1}".format(username, password) return HttpResponse("Invalid login details supplied.") # The request is not a HTTP POST, so display the login form. # This scenario would most likely be a HTTP GET. else: # No context variables to pass to the template system, hence the # blank dictionary object... return render(request, 'rango/login.html', {}) @login_required def restricted(request): return HttpResponse("Since you're logged in, you can see this text!") from django.contrib.auth import logout # Use the login_required() decorator to ensure only those logged in can access the view. @login_required def user_logout(request): # Since we know the user is logged in, we can now just log them out. logout(request) # Take the user back to the homepage. return HttpResponseRedirect('/rango/') from rango.bing_search import run_query def search(request): result_list = [] if request.method == 'POST': query = request.POST['query'].strip() if query: # Run our Bing function to get the results list! result_list = run_query(query) return render(request, 'rango/search.html', {'result_list': result_list})
ZheJiuShiMing/tango_with_django_project
tango_with_django_project/rango/views.py
Python
gpl-2.0
10,530
[ "VisIt" ]
3576436965b3649bf5168bced152aaf8c9bd3d6dbae2b4b759f026f261b3df6f
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2012 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## from kiwi.currency import currency from stoqlib.gui.editors.costcentereditor import CostCenterEditor from stoqlib.gui.test.uitestutils import GUITest class TestCostCenterEditor(GUITest): def test_show(self): model = self.create_cost_center() model.budget = currency('10000') editor = CostCenterEditor(self.store, model) self.check_editor(editor, 'editor-cost-center-show') def test_create(self): editor = CostCenterEditor(self.store) self.check_editor(editor, 'editor-cost-center-create')
andrebellafronte/stoq
stoqlib/gui/test/test_costcentereditor.py
Python
gpl-2.0
1,441
[ "VisIt" ]
13124ed61f53820c8f3a3959fd47f6af10515acb6d093804448fc74bb051ee9c
#!/usr/bin/env python import glob import sys import numpy as np from setuptools import setup, find_packages try: from Cython.Build import cythonize have_cython = True except ImportError: have_cython = False # Determine shared library suffix if sys.platform == 'darwin': suffix = 'dylib' else: suffix = 'so' # Get version information from __init__.py. This is ugly, but more reliable than # using an import. with open('openmc/__init__.py', 'r') as f: version = f.readlines()[-1].split()[-1].strip("'") kwargs = { 'name': 'openmc', 'version': version, 'packages': find_packages(exclude=['tests*']), 'scripts': glob.glob('scripts/openmc-*'), # Data files and librarries 'package_data': { 'openmc.lib': ['libopenmc.{}'.format(suffix)], 'openmc.data': ['mass16.txt', 'BREMX.DAT', '*.h5'], 'openmc.data.effective_dose': ['*.txt'] }, # Metadata 'author': 'The OpenMC Development Team', 'author_email': 'openmc-dev@googlegroups.com', 'description': 'OpenMC', 'url': 'https://openmc.org', 'download_url': 'https://github.com/openmc-dev/openmc/releases', 'project_urls': { 'Issue Tracker': 'https://github.com/openmc-dev/openmc/issues', 'Documentation': 'https://docs.openmc.org', 'Source Code': 'https://github.com/openmc-dev/openmc', }, 'classifiers': [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Topic :: Scientific/Engineering' 'Programming Language :: C++', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], # Dependencies 'python_requires': '>=3.5', 'install_requires': [ 'numpy>=1.9', 'h5py', 'scipy', 'ipython', 'matplotlib', 'pandas', 'lxml', 'uncertainties' ], 'extras_require': { 'depletion-mpi': ['mpi4py'], 'docs': ['sphinx', 'sphinxcontrib-katex', 'sphinx-numfig', 'jupyter', 'sphinxcontrib-svg2pdfconverter', 'sphinx-rtd-theme'], 'test': ['pytest', 'pytest-cov', 'colorama'], 'vtk': ['vtk'], }, } # If Cython is present, add resonance reconstruction and fast float_endf if have_cython: kwargs.update({ 'ext_modules': cythonize('openmc/data/*.pyx'), 'include_dirs': [np.get_include()] }) setup(**kwargs)
liangjg/openmc
setup.py
Python
mit
2,715
[ "VTK" ]
7d074e9b39927c73e56eb31cb6769ebc343b6b53c6332c1f2f2b00a311c9a272
# Copyright (C) 2012,2013,2016 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ********************************** espressopp.analysis.Configurations ********************************** * `gather()` add configuration to trajectory * `clear()` clear trajectory * `back()` get last configuration of trajectory * `capacity` maximum number of configurations in trajectory further adding (`gather()`) configurations results in erasing oldest configuration before adding new one capacity=0 means: infinite capacity (until memory is full) * `size` number of stored configurations usage: storing trajectory >>> configurations = espressopp.Configurations(system) >>> configurations.gather() >>> for k in xrange(100): >>> integrator.run(100) >>> configurations.gather() accessing trajectory data: iterate over all stored configurations: >>> for conf in configurations: iterate over all particles stored in configuration: >>> for pid in conf >>> particle_coords = conf[pid] >>> print pid, particle_coords access particle with id <pid> of stored configuration <n>: >>> print "particle coord: ",configurations[n][pid] .. function:: espressopp.analysis.Configurations(system) :param system: :type system: .. function:: espressopp.analysis.Configurations.back() :rtype: .. function:: espressopp.analysis.Configurations.clear() :rtype: .. function:: espressopp.analysis.Configurations.gather() :rtype: """ from espressopp.esutil import cxxinit from espressopp import pmi from espressopp.analysis.Observable import * from _espressopp import analysis_Configurations class ConfigurationsLocal(ObservableLocal, analysis_Configurations): def __init__(self, system, pos=True, vel=False, force=False, radius=False, folded=True): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, analysis_Configurations, system, pos, vel, force, radius, folded) def gather(self): return self.cxxclass.gather(self) def clear(self): return self.cxxclass.clear(self) def __iter__(self): return self.cxxclass.all(self).__iter__() def back(self): return self.cxxclass.back(self) if pmi.isController : class Configurations(Observable): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.analysis.ConfigurationsLocal', pmicall = [ "gather", "clear", "back" ], localcall = ["__getitem__", "__iter__"], pmiproperty = ["capacity", "size"] )
govarguz/espressopp
src/analysis/Configurations.py
Python
gpl-3.0
3,386
[ "ESPResSo" ]
2ae051fad35708f8ad660b816428464dd37c88e457277a9e409eccc4f2287a88
""" ResourceManagementClient Client to interact with the ResourceManagementDB. """ __RCSID__ = '$Id$' from DIRAC.Core.DISET.RPCClient import RPCClient def uppercase_first_letter(key): """ a method that makes the first letter uppercase only (and leaves the remaining letters unaffected) """ return key[0].upper() + key[1:] class ResourceManagementClient(object): """ The :class:`ResourceManagementClient` class exposes the :mod:`DIRAC.ResourceManagement` API. All functions you need are on this client. It has the 'direct-db-access' functions, the ones of the type: - insert - update - select - delete that return parts of the RSSConfiguration stored on the CS, and used everywhere on the RSS module. Finally, and probably more interesting, it exposes a set of functions, badly called 'boosters'. They are 'home made' functions using the basic database functions that are interesting enough to be exposed. The client will ALWAYS try to connect to the DB, and in case of failure, to the XML-RPC server ( namely :mod:`~DIRAC.ResourceStatusSystem.DB.ResourceManagementDB` and :mod:`~DIRAC.ResourceStatusSystem.Service.ResourceManagementHandler` ). You can use this client on this way >>> from DIRAC.ResourceManagementSystem.Client.ResourceManagementClient import ResourceManagementClient >>> rsClient = ResourceManagementClient() All functions calling methods exposed on the database or on the booster are making use of some syntactic sugar, in this case a decorator that simplifies the client considerably. """ def _prepare(self, sendDict): # remove unnecessary key generated by locals() del sendDict['self'] # make each key name uppercase to match database column names (case sensitive) for key, value in sendDict.items(): del sendDict[key] if value: sendDict.update({uppercase_first_letter(key): value}) return sendDict # AccountingCache Methods .................................................... def selectAccountingCache(self, name=None, plotType=None, plotName=None, result=None, dateEffective=None, lastCheckTime=None, meta=None): ''' Gets from PolicyResult all rows that match the parameters given. :param name: name of an individual of the grid topology :type name: string, list :param plotType: the plotType name (e.g. 'Pilot') :type plotType: string, list :param plotName: the plot name :type plotName: string, list :param result: command result :type result: string, list :param dateEffective: time-stamp from which the result is effective :type dateEffective: datetime, list :param lastCheckTime: time-stamp setting last time the result was checked :type lastCheckTime: datetime, list :param dict meta: metadata for the mysql query. Currently it is being used only for column selection. For example: meta={'columns': ['Name']} will return only the 'Name' column. :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").select('AccountingCache', self._prepare(locals())) def addOrModifyAccountingCache(self, name=None, plotType=None, plotName=None, result=None, dateEffective=None, lastCheckTime=None): ''' Adds or updates-if-duplicated to AccountingCache. Using `name`, `plotType` and `plotName` to query the database, decides whether to insert or update the table. :param str name: name of an individual of the grid topology :param str plotType: name (e.g. 'Pilot') :param str plotName: the plot name :param str result: command result :param datetime dateEffective: timestamp from which the result is effective :param datetime lastCheckTime: timestamp setting last time the result was checked :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").addOrModify('AccountingCache', self._prepare(locals())) def deleteAccountingCache(self, name=None, plotType=None, plotName=None, result=None, dateEffective=None, lastCheckTime=None): ''' Deletes from AccountingCache all rows that match the parameters given. :param str name: name of an individual of the grid topology :param str plotType: the plotType name (e.g. 'Pilot') :param str plotName: the plot name :param str result: command result :param datetime dateEffective: timestamp from which the result is effective :param datetime lastCheckTime: timestamp setting last time the result was checked :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").delete('AccountingCache', self._prepare(locals())) # GGUSTicketsCache Methods ................................................... def selectGGUSTicketsCache(self, gocSite=None, link=None, openTickets=None, tickets=None, lastCheckTime=None, meta=None): ''' Gets from GGUSTicketsCache all rows that match the parameters given. :param str gocSite: :param str link: url to the details :param int openTickets: :param str tickets: :param datetime lastCheckTime: timestamp setting last time the result was checked :param dict meta: metadata for the mysql query. Currently it is being used only for column selection. For example: meta={'columns': ['Name']} will return only the 'Name' column. :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").select('GGUSTicketsCache', self._prepare(locals())) def deleteGGUSTicketsCache(self, gocSite=None, link=None, openTickets=None, tickets=None, lastCheckTime=None): ''' Deletes from GGUSTicketsCache all rows that match the parameters given. :param str gocSite: :param str link: url to the details :param int openTickets: :param str tickets: :param datetime lastCheckTime: timestamp setting last time the result was checked :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").delete('GGUSTicketsCache', self._prepare(locals())) def addOrModifyGGUSTicketsCache(self, gocSite=None, link=None, openTickets=None, tickets=None, lastCheckTime=None): ''' Adds or updates-if-duplicated to GGUSTicketsCache all rows that match the parameters given. :param str gocSite: :param str link: url to the details :param int openTickets: :param str tickets: :param datetime lastCheckTime: timestamp setting last time the result was checked :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").addOrModify('GGUSTicketsCache', self._prepare(locals())) # DowntimeCache Methods ...................................................... def selectDowntimeCache(self, downtimeID=None, element=None, name=None, startDate=None, endDate=None, severity=None, description=None, link=None, dateEffective=None, lastCheckTime=None, gOCDBServiceType=None, meta=None): ''' Gets from DowntimeCache all rows that match the parameters given. :param downtimeID: unique id for the downtime :type downtimeID: string, list :param element: valid element in the topology (Site, Resource, Node) :type element: string, list :param name: name of the element where the downtime applies :type name: string, list :param startDate: starting time for the downtime :type startDate: datetime, list :param endDate: ending time for the downtime :type endDate: datetime, list :param severity: severity assigned by the gocdb :type severity: string, list :param description: brief description of the downtime :type description: string, list :param link: url to the details :type link: string, list :param dateEffective: time when the entry was created in this database :type dateEffective: datetime, list :param lastCheckTime: timestamp setting last time the result was checked :type lastCheckTime: datetime, list :param str gOCDBServiceType: service type assigned by gocdb :param dict meta: metadata for the mysql query. Currently it is being used only for column selection. For example: meta={'columns': ['Name']} will return only the 'Name' column. :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").select('DowntimeCache', self._prepare(locals())) def deleteDowntimeCache(self, downtimeID=None, element=None, name=None, startDate=None, endDate=None, severity=None, description=None, link=None, dateEffective=None, lastCheckTime=None, gOCDBServiceType=None): ''' Deletes from DowntimeCache all rows that match the parameters given. :param downtimeID: unique id for the downtime :type downtimeID: string, list :param element: valid element in the topology ( Site, Resource, Node ) :type element: string, list :param name: name of the element where the downtime applies :type name: string, list :param startDate: starting time for the downtime :type startDate: datetime, list :param endDate: ending time for the downtime :type endDate: datetime, list :param severity: severity assigned by the gocdb :type severity: string, list :param description: brief description of the downtime :type description: string, list :param link: url to the details :type link: string, list :param dateEffective: time when the entry was created in this database :type dateEffective: datetime, list :param lastCheckTime: time-stamp setting last time the result was checked :type lastCheckTime: datetime, list :param str gOCDBServiceType: service type assigned by gocdb :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").delete('DowntimeCache', self._prepare(locals())) def addOrModifyDowntimeCache(self, downtimeID=None, element=None, name=None, startDate=None, endDate=None, severity=None, description=None, link=None, dateEffective=None, lastCheckTime=None, gOCDBServiceType=None): ''' Adds or updates-if-duplicated to DowntimeCache. Using `downtimeID` to query the database, decides whether to insert or update the table. :param str downtimeID: unique id for the downtime :param str element: valid element in the topology ( Site, Resource, Node ) :param str name: name of the element where the downtime applies :param datetime startDate: starting time for the downtime :param datetime endDate: ending time for the downtime :param str severity: severity assigned by the gocdb :param str description: brief description of the downtime :param str link: url to the details :param datetime dateEffective: time when the entry was created in this database :param datetime lastCheckTime: timestamp setting last time the result was checked :param str gOCDBServiceType: service type assigned by gocdb :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").addOrModify('DowntimeCache', self._prepare(locals())) # JobCache Methods ........................................................... def selectJobCache(self, site=None, maskStatus=None, efficiency=None, status=None, lastCheckTime=None, meta=None): ''' Gets from JobCache all rows that match the parameters given. :param site: name of the site element :type site: string, list :param maskStatus: maskStatus for the site :type maskStatus: string, list :param efficiency: job efficiency ( successful / total ) :type efficiency: float, list :param status: status for the site computed :type status: string, list :param lastCheckTime: timestamp setting last time the result was checked :type lastCheckTime: datetime, list :param dict meta: metadata for the mysql query. Currently it is being used only for column selection. For example: meta={'columns': ['Name']} will return only the 'Name' column. :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").select('JobCache', self._prepare(locals())) def deleteJobCache(self, site=None, maskStatus=None, efficiency=None, status=None, lastCheckTime=None): ''' Deletes from JobCache all rows that match the parameters given. :param site: name of the site element :type site: string, list :param maskStatus: maskStatus for the site :type maskStatus: string, list :param efficiency: job efficiency ( successful / total ) :type efficiency: float, list :param status: status for the site computed :type status: string, list :param lastCheckTime: timestamp setting last time the result was checked :type lastCheckTime: datetime, list :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").delete('JobCache', self._prepare(locals())) def addOrModifyJobCache(self, site=None, maskStatus=None, efficiency=None, status=None, lastCheckTime=None): ''' Adds or updates-if-duplicated to JobCache. Using `site` to query the database, decides whether to insert or update the table. :param site: name of the site element :type site: string, list :param maskStatus: maskStatus for the site :type maskStatus: string, list :param efficiency: job efficiency ( successful / total ) :type efficiency: float, list :param status: status for the site computed :type status: string, list :param lastCheckTime: time-stamp setting last time the result was checked :type lastCheckTime: datetime, list :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").addOrModify('JobCache', self._prepare(locals())) # TransferCache Methods ...................................................... def selectTransferCache(self, sourceName=None, destinationName=None, metric=None, value=None, lastCheckTime=None, meta=None): ''' Gets from TransferCache all rows that match the parameters given. :param elementName: name of the element :type elementName: string, list :param direction: the element taken as Source or Destination of the transfer :type direction: string, list :param metric: measured quality of failed transfers :type metric: string, list :param value: percentage :type value: float, list :param lastCheckTime: time-stamp setting last time the result was checked :type lastCheckTime: float, list :param dict meta: metadata for the mysql query. Currently it is being used only for column selection. For example: meta={'columns': ['Name']} will return only the 'Name' column. :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").select('TransferCache', self._prepare(locals())) def deleteTransferCache(self, sourceName=None, destinationName=None, metric=None, value=None, lastCheckTime=None): ''' Deletes from TransferCache all rows that match the parameters given. :param elementName: name of the element :type elementName: string, list :param direction: the element taken as Source or Destination of the transfer :type direction: string, list :param metric: measured quality of failed transfers :type metric: string, list :param value: percentage :type value: float, list :param lastCheckTime: time-stamp setting last time the result was checked :type lastCheckTime: float, list :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").delete('TransferCache', self._prepare(locals())) def addOrModifyTransferCache(self, sourceName=None, destinationName=None, metric=None, value=None, lastCheckTime=None): ''' Adds or updates-if-duplicated to TransferCache. Using `elementName`, `direction` and `metric` to query the database, decides whether to insert or update the table. :param str elementName: name of the element :param str direction: the element taken as Source or Destination of the transfer :param str metric: measured quality of failed transfers :param float value: percentage :param datetime lastCheckTime: time-stamp setting last time the result was checked :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").addOrModify('TransferCache', self._prepare(locals())) # PilotCache Methods ......................................................... def selectPilotCache(self, site=None, cE=None, pilotsPerJob=None, pilotJobEff=None, status=None, lastCheckTime=None, meta=None): ''' Gets from TransferCache all rows that match the parameters given. :param site: name of the site :type site: string, list :param cE: name of the CE of 'Multiple' if all site CEs are considered :type cE: string, list :param pilotsPerJob: measure calculated :type pilotsPerJob: float, list :param pilotJobEff: percentage :type pilotJobEff: float, list :param status: status of the CE / Site :type status: float, list :param lastCheckTime: measure calculated :type lastCheckTime: datetime, list :param dict meta: metadata for the mysql query. Currently it is being used only for column selection. For example: meta={'columns': ['Name']} will return only the 'Name' column. :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").select('PilotCache', self._prepare(locals())) def deletePilotCache(self, site=None, cE=None, pilotsPerJob=None, pilotJobEff=None, status=None, lastCheckTime=None): ''' Deletes from TransferCache all rows that match the parameters given. :param site: name of the site :type site: string, list :param cE: name of the CE of 'Multiple' if all site CEs are considered :type cE: string, list :param pilotsPerJob: measure calculated :type pilotsPerJob: float, list :param pilotJobEff: percentage :type pilotJobEff: float, list :param status: status of the CE / Site :type status: float, list :param lastCheckTime: measure calculated :type lastCheckTime: datetime, list :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").delete('PilotCache', self._prepare(locals())) def addOrModifyPilotCache(self, site=None, cE=None, pilotsPerJob=None, pilotJobEff=None, status=None, lastCheckTime=None): ''' Adds or updates-if-duplicated to PilotCache. Using `site` and `cE` to query the database, decides whether to insert or update the table. :param str site: name of the site :param str cE: name of the CE of 'Multiple' if all site CEs are considered :param float pilotsPerJob: measure calculated :param flaot pilotJobEff: percentage :param str status: status of the CE / Site :param datetime lastCheckTime: measure calculated :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").addOrModify('PilotCache', self._prepare(locals())) # PolicyResult Methods ....................................................... def selectPolicyResult(self, element=None, name=None, policyName=None, statusType=None, status=None, reason=None, lastCheckTime=None, meta=None): ''' Gets from PolicyResult all rows that match the parameters given. :param granularity: it has to be a valid element ( ValidElement ), any of the defaults: 'Site' | 'Service' | 'Resource' | 'StorageElement' :type granularity: string, list :param name: name of the element :type name: string, list :param policyName: name of the policy :type policyName: string, list :param statusType: it has to be a valid status type for the given granularity :type statusType: string, list :param status: it has to be a valid status, any of the defaults: 'Active' | 'Degraded' | 'Probing' | 'Banned' :type status: string, list :param reason: decision that triggered the assigned status :type reason: string, list :param lastCheckTime: time-stamp setting last time the policy result was checked :type lastCheckTime: datetime, list :param dict meta: metadata for the mysql query. Currently it is being used only for column selection. For example: meta={'columns': ['Name']} will return only the 'Name' column. :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").select('PolicyResult', self._prepare(locals())) def deletePolicyResult(self, element=None, name=None, policyName=None, statusType=None, status=None, reason=None, dateEffective=None, lastCheckTime=None): ''' Deletes from PolicyResult all rows that match the parameters given. :param granularity: it has to be a valid element ( ValidElement ), any of the defaults: 'Site' | 'Service' | 'Resource' | 'StorageElement' :type granularity: string, list :param name: name of the element :type name: string, list :param policyName: name of the policy :type policyName: string, list :param statusType: it has to be a valid status type for the given granularity :type statusType: string, list :param status: it has to be a valid status, any of the defaults: 'Active' | 'Degraded' | 'Probing' | 'Banned' :type status: string, list :param reason: decision that triggered the assigned status :type reason: string, list :param datetime dateEffective: time-stamp from which the policy result is effective :param lastCheckTime: time-stamp setting last time the policy result was checked :type lastCheckTime: datetime, list :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").delete('PolicyResult', self._prepare(locals())) def addOrModifyPolicyResult(self, element=None, name=None, policyName=None, statusType=None, status=None, reason=None, dateEffective=None, lastCheckTime=None): ''' Adds or updates-if-duplicated to PolicyResult. Using `name`, `policyName` and `statusType` to query the database, decides whether to insert or update the table. :param str element: it has to be a valid element ( ValidElement ), any of the defaults: 'Site' | 'Service' | 'Resource' | 'StorageElement' :param str name: name of the element :param str policyName: name of the policy :param str statusType: it has to be a valid status type for the given element :param str status: it has to be a valid status, any of the defaults: 'Active' | 'Degraded' | 'Probing' | 'Banned' :param str reason: decision that triggered the assigned status :param datetime dateEffective: time-stamp from which the policy result is effective :param datetime lastCheckTime: time-stamp setting last time the policy result was checked :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").addOrModify('PolicyResult', self._prepare(locals())) # SpaceTokenOccupancyCache Methods ........................................... def selectSpaceTokenOccupancyCache(self, endpoint=None, token=None, total=None, guaranteed=None, free=None, lastCheckTime=None, meta=None): ''' Gets from SpaceTokenOccupancyCache all rows that match the parameters given. :param endpoint: srm endpoint :type endpoint: string, list :param token: name of the token :type token: string, list :param total: total terabytes :type total: integer, list :param guaranteed: guaranteed terabytes :type guaranteed: integer, list :param free: free terabytes :type free: integer, list :param lastCheckTime: time-stamp from which the result is effective :type lastCheckTime: datetime, list :param dict meta: metadata for the mysql query. Currently it is being used only for column selection. For example: meta={'columns': ['Name']} will return only the 'Name' column. :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").select('SpaceTokenOccupancyCache', self._prepare(locals())) def deleteSpaceTokenOccupancyCache(self, endpoint=None, token=None, total=None, guaranteed=None, free=None, lastCheckTime=None): ''' Deletes from SpaceTokenOccupancyCache all rows that match the parameters given. :param endpoint: srm endpoint :type endpoint: string, list :param token: name of the token :type token: string, list :param total: total terabytes :type total: integer, list :param guaranteed: guaranteed terabytes :type guaranteed: integer, list :param free: free terabytes :type free: integer, list :param lastCheckTime: time-stamp from which the result is effective :type lastCheckTime: datetime, list :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").delete('SpaceTokenOccupancyCache', self._prepare(locals())) def addOrModifySpaceTokenOccupancyCache(self, endpoint=None, token=None, total=None, guaranteed=None, free=None, lastCheckTime=None): ''' Adds or updates-if-duplicated to SpaceTokenOccupancyCache. Using `site` and `token` to query the database, decides whether to insert or update the table. :param endpoint: srm endpoint :type endpoint: string, list :param str token: name of the token :param int total: total terabytes :param int guaranteed: guaranteed terabytes :param int free: free terabytes :param datetime lastCheckTime: time-stamp from which the result is effective :return: S_OK() || S_ERROR() ''' return RPCClient("ResourceStatus/ResourceManagement").addOrModify('SpaceTokenOccupancyCache', self._prepare(locals())) # EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
petricm/DIRAC
ResourceStatusSystem/Client/ResourceManagementClient.py
Python
gpl-3.0
26,914
[ "DIRAC" ]
f16e4c0d4c3e973100c2cfd1d7d8eed8673467220b3d2d6bebfcc5b046827974
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RPreprocesscore(RPackage): """A library of core preprocessing routines""" homepage = "https://bioconductor.org/packages/preprocessCore/" url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/preprocessCore_1.38.1.tar.gz" list_url = homepage version('1.38.1', '5085ba98cbab4686f1eb86971f1eecd6') depends_on('r@3.4.0:3.4.9', when='@1.38.1')
lgarren/spack
var/spack/repos/builtin/packages/r-preprocesscore/package.py
Python
lgpl-2.1
1,640
[ "Bioconductor" ]
e7df437590cacf3bee250fd741fc324a77e350b3cd0a455f950a9e066de4b570
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """AST conversion templates. Adapted from Tangent. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import ast import textwrap import gast from tensorflow.contrib.autograph.pyct import anno from tensorflow.contrib.autograph.pyct import ast_util from tensorflow.contrib.autograph.pyct import parser from tensorflow.contrib.autograph.pyct import qual_names class ReplaceTransformer(gast.NodeTransformer): """Replace AST nodes.""" def __init__(self, replacements): """Create a new ReplaceTransformer. Args: replacements: A mapping from placeholder names to (lists of) AST nodes that these placeholders will be replaced by. """ self.replacements = replacements self.in_replacements = False self.preserved_annos = { anno.Basic.ORIGIN, anno.Basic.SKIP_PROCESSING, anno.Static.ORIG_DEFINITIONS, } def _prepare_replacement(self, replaced, key): """Prepares a replacement AST that's safe to swap in for a node. Args: replaced: ast.AST, the node being replaced key: Hashable, the key of the replacement AST Returns: ast.AST, the replacement AST """ repl = self.replacements[key] new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos) if isinstance(new_nodes, gast.AST): new_nodes = [new_nodes] return new_nodes def visit_Expr(self, node): # When replacing a placeholder with an entire statement, the replacement # must stand on its own and not be wrapped in an Expr. new_value = self.visit(node.value) if new_value is node.value: return node return new_value def visit_keyword(self, node): if node.arg not in self.replacements: return self.generic_visit(node) repl = self._prepare_replacement(node, node.arg) if isinstance(repl, gast.keyword): return repl elif (repl and isinstance(repl, (list, tuple)) and all(isinstance(r, gast.keyword) for r in repl)): return repl # TODO(mdan): We may allow replacing with a string as well. # For example, if one wanted to replace foo with bar in foo=baz, then # we could allow changing just node arg, so that we end up with bar=baz. raise ValueError( 'a keyword argument may only be replaced by another keyword or a ' 'non-empty list of keywords. Found: %s' % repl) def visit_FunctionDef(self, node): node = self.generic_visit(node) if node.name not in self.replacements: return node repl = self.replacements[node.name] if not isinstance(repl, (gast.Name, ast.Name)): raise ValueError( 'a function name can only be replaced by a Name node. Found: %s' % repl) node.name = repl.id return node def _check_has_context(self, node): if not node.ctx: raise ValueError('node %s is missing ctx value' % node) def _check_inner_children_have_context(self, node): if isinstance(node, gast.Attribute): self._check_inner_children_have_context(node.value) self._check_has_context(node) elif isinstance(node, gast.Tuple): for e in node.elts: self._check_inner_children_have_context(e) self._check_has_context(node) elif isinstance(node, gast.Dict): for e in node.keys: self._check_inner_children_have_context(e) for e in node.values: self._check_inner_children_have_context(e) elif isinstance(node, gast.Subscript): self._check_inner_children_have_context(node.value) self._check_inner_children_have_context(node.slice) elif isinstance(node, gast.Slice): self._check_inner_children_have_context(node.lower) if node.upper: self._check_inner_children_have_context(node.upper) if node.step: self._check_inner_children_have_context(node.step) elif isinstance(node, gast.Name): self._check_has_context(node) elif isinstance(node, (gast.Str, gast.Num)): pass else: raise ValueError('unexpected node type "%s"' % node) def _set_inner_child_context(self, node, ctx): if isinstance(node, gast.Attribute): self._set_inner_child_context(node.value, gast.Load()) node.ctx = ctx elif isinstance(node, gast.Tuple): for e in node.elts: self._set_inner_child_context(e, ctx) node.ctx = ctx elif isinstance(node, gast.Name): node.ctx = ctx elif isinstance(node, gast.Call): self._set_inner_child_context(node.func, ctx) # We may be able to override these to Load(), but for now it's simpler # to just assert that they're set. for a in node.args: self._check_inner_children_have_context(a) for k in node.keywords: self._check_inner_children_have_context(k.value) elif isinstance(node, gast.Dict): # We may be able to override these to Load(), but for now it's simpler # to just assert that they're set. for e in node.keys: self._check_inner_children_have_context(e) for e in node.values: self._check_inner_children_have_context(e) elif isinstance(node, gast.Subscript): self._set_inner_child_context(node.value, ctx) self._check_inner_children_have_context(node.slice) elif isinstance(node, (gast.Str, gast.Num)): pass else: raise ValueError('unexpected node type "%s"' % node) def visit_Attribute(self, node): node = self.generic_visit(node) if node.attr not in self.replacements: return node repl = self.replacements[node.attr] if not isinstance(repl, gast.Name): raise ValueError( 'An attribute can only be replaced by a Name node. Found: %s' % repl) node.attr = repl.id return node def visit_Name(self, node): if node.id not in self.replacements: return node new_nodes = self._prepare_replacement(node, node.id) # Preserve the target context. for n in new_nodes: if isinstance(n, gast.Tuple): for e in n.elts: self._set_inner_child_context(e, node.ctx) if isinstance(n, gast.Attribute): # For attributes, the inner Name node receives the context, while the # outer ones have it set to Load. self._set_inner_child_context(n, node.ctx) else: n.ctx = node.ctx if len(new_nodes) == 1: new_nodes, = new_nodes return new_nodes def _convert_to_ast(n): """Converts from a known data type to AST.""" if isinstance(n, str): # Note: the node will receive the ctx value from the template, see # ReplaceTransformer.visit_Name. return gast.Name(id=n, ctx=None, annotation=None) if isinstance(n, qual_names.QN): return n.ast() if isinstance(n, list): return [_convert_to_ast(e) for e in n] if isinstance(n, tuple): return tuple(_convert_to_ast(e) for e in n) return n def replace(template, **replacements): """Replaces placeholders in a Python template. AST Name and Tuple nodes always receive the context that inferred from the template. However, when replacing more complex nodes (that can potentially contain Name children), then the caller is responsible for setting the appropriate context. Args: template: A string representing Python code. Any symbol name can be used that appears in the template code can be used as placeholder. **replacements: A mapping from placeholder names to (lists of) AST nodes that these placeholders will be replaced by. String values are also supported as a shorthand for AST Name nodes with the respective ID. Returns: An AST node or list of AST nodes with the replacements made. If the template was a function, a list will be returned. If the template was a node, the same node will be returned. If the template was a string, an AST node will be returned (a `Module` node in the case of a multi-line string, an `Expr` node otherwise). Raises: ValueError: if the arguments are incorrect. """ if not isinstance(template, str): raise ValueError('Expected string template, got %s' % type(template)) tree = parser.parse_str(textwrap.dedent(template)) for k in replacements: replacements[k] = _convert_to_ast(replacements[k]) results = ReplaceTransformer(replacements).visit(tree).body if isinstance(results, list): return [qual_names.resolve(r) for r in results] return qual_names.resolve(results) def replace_as_expression(template, **replacements): """Variant of replace that generates expressions, instead of code blocks.""" replacement = replace(template, **replacements) if len(replacement) != 1: raise ValueError( 'single expression expected; for more general templates use replace') node = replacement[0] node = qual_names.resolve(node) if isinstance(node, gast.Expr): return node.value elif isinstance(node, gast.Name): return node raise ValueError( 'the template is expected to generate an expression or a name node;' ' instead found %s' % node)
aselle/tensorflow
tensorflow/contrib/autograph/pyct/templates.py
Python
apache-2.0
9,765
[ "VisIt" ]
5f6ec6aa64273efb45911069244ebe25b274342c3b29413bd68c6e2724708657
""" This modules defines two functions that can be used in place of the ``RPCClient`` and ``TransferClient`` to transparently switch to ``https``. Example:: from DIRAC.Core.Tornado.Client.ClientSelector import RPCClientSelector as RPCClient myService = RPCClient("Framework/MyService") myService.doSomething() """ from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = "$Id$" import functools from DIRAC import gLogger from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceURL from DIRAC.Core.DISET.RPCClient import RPCClient from DIRAC.Core.DISET.TransferClient import TransferClient from DIRAC.Core.Tornado.Client.TornadoClient import TornadoClient sLog = gLogger.getSubLogger(__name__) def ClientSelector(disetClient, *args, **kwargs): # We use same interface as RPCClient """ Select the correct Client (either RPC or Transfer ), instantiate it, and return it. The selection is based on the URL: * either it contains the protocol, in which case we make a choice * or it is in the form <Component/Service>, in which case we resolve first This is a generic function. You should rather use :py:class:`.RPCClientSelector` or :py:class:`.TransferClientSelector` In principle, the only place for this class to be used is in :py:class:`DIRAC.Core.Base.Client.Client`, since it is the only one supposed to instantiate an :py:class:`DIRAC.Core.Base.DISET.RPCClient.RPCClient` :params disetClient: the DISET class to be instantiated, so either :py:class:`DIRAC.Core.Base.DISET.RPCClient.RPCClient` or :py:class:`DIRAC.Core.Base.DISET.TransferClient.TransferClient` :param args: Whatever ``disetClient`` takes as args, but the first one is always the URL we want to rely on. It can be either "system/service" or "dips://domain:port/system/service" :param kwargs: This can contain: * Whatever ``disetClient`` takes. * httpsClient: specific class inheriting from TornadoClient """ # We detect if we need to use a specific class for the HTTPS client tornadoClient = kwargs.pop('httpsClient', TornadoClient) # We have to make URL resolution BEFORE the RPCClient or TornadoClient to determine which one we want to use # URL is defined as first argument (called serviceName) in RPCClient try: serviceName = args[0] sLog.debug("Trying to autodetect client for %s" % serviceName) # If we are not already given a URL, resolve it if serviceName.startswith(('http', 'dip')): completeUrl = serviceName else: completeUrl = getServiceURL(serviceName) sLog.debug("URL resolved: %s" % completeUrl) if completeUrl.startswith("http"): sLog.verbose("Using HTTPS for service %s" % serviceName) rpc = tornadoClient(*args, **kwargs) else: rpc = disetClient(*args, **kwargs) except Exception as e: # pylint: disable=broad-except # If anything went wrong in the resolution, we return default RPCClient # So the behaviour is exactly the same as before implementation of Tornado sLog.warn("Could not select DISET or Tornado client", "%s" % repr(e)) rpc = disetClient(*args, **kwargs) return rpc # Client to use for RPC selection RPCClientSelector = functools.partial(ClientSelector, RPCClient) # Client to use for Transfer selection TransferClientSelector = functools.partial(ClientSelector, TransferClient)
yujikato/DIRAC
src/DIRAC/Core/Tornado/Client/ClientSelector.py
Python
gpl-3.0
3,489
[ "DIRAC" ]
b80d6f65cfb5d1ea5ff92d4a1e79498d19fba8b92bf4a291d91a746065e47a0b
# Copyright (C) 2001 greg Landrum and Rational Discovery LLC """basic unit testing code for query mols """ from rdkit import RDConfig import unittest, os, sys class TestCase(unittest.TestCase): def setUp(self): print('\n%s: ' % self.shortDescription(), end='') # decipher the name of the executable if (sys.platform == 'win32'): exe = 'QueryMolTest___Win32_Debug/QueryMolTest.exe' else: exe = 'querytest.exe' # update to use the full path self.exe = '%s/Code/GraphMol/%s' % (RDConfig.RDBaseDir, exe) def test1(self): """ the basic test """ res = os.system(self.exe) assert res == 0, 'test failed' if __name__ == '__main__': unittest.main()
ptosco/rdkit
Code/GraphMol/UnitTestQueryMol.py
Python
bsd-3-clause
703
[ "RDKit" ]
7502f1f1ae617fd3d92685b31103cb6c642f3704a0eccb98db32b8ce09d64632
############################## # # # Instructions # # # ############################## # To run, use the following command: # $ python longest_distance.py <input_file> # where <input_file> is the filename with the question's input import sys import re # Check to make sure correct number of arguments supplied if (len(sys.argv) != 2): print('Invalid number of arguments!') sys.exit() # Read the input from the file provided as argument input_file = open(sys.argv[1]) puzzle_input = input_file.readlines() input_file.close() # Regex to find special character sequences city_regex = re.compile(r'(\w+) to (\w+)') distance_regex = re.compile(r'= (\d+)') # Intialize empty dictionary of distances distances = {} # Gets the longest distance from one city through those not yet visited def get_longest_distance_from_start(distances, start_dest, visited): # For every city that start_dest connects to, get the longest distance for it to travel # through the rest of the cities, then add the distance to travel to that city. longest_path = 0 visited.append(start_dest) for next_dest in distances[start_dest]: if not next_dest in visited: distance = get_longest_distance_from_start(distances, next_dest, visited[:]) + \ distances[start_dest][next_dest] if distance > longest_path: longest_path = distance # If all cities were already visited, return 0, otherwise return distance to visit return longest_path # For each line in the input for line in puzzle_input: # Get the names of the cities cities = re.search(city_regex, line) distance = re.search(distance_regex, line) # Add cities and distances to dictionary if cities.group(1) in distances: distances[cities.group(1)][cities.group(2)] = int(distance.group(1)) else: distances[cities.group(1)] = {cities.group(2): int(distance.group(1))} if cities.group(2) in distances: distances[cities.group(2)][cities.group(1)] = int(distance.group(1)) else: distances[cities.group(2)] = {cities.group(1): int(distance.group(1))} # Find the longest path from each starting city and compare to get the longest overall longest_path = 0 for starting_point in distances: longest_from_starting = get_longest_distance_from_start(distances, starting_point, []) if longest_from_starting > longest_path: longest_path = longest_from_starting # Print the longest path print('The longest distance Santa can travel is', longest_path, 'km')
joseph-roque/advent-of-code
day_09/longest_distance.py
Python
mit
2,625
[ "VisIt" ]
675e8df0c818e7ce24bcc8dcadefb2d43751897793401edf912af94321c1cf6a
# -*- coding: utf-8 -*- """ Initial AST validation and normalization. """ from __future__ import print_function, division, absolute_import import ast import copy from numba import error from numba import nodes from numba import visitors from numba import typesystem class NormalizeAST(visitors.NumbaTransformer): "Normalize AST" function_level = 0 #------------------------------------------------------------------------ # Validation #------------------------------------------------------------------------ def visit_GeneratorExp(self, node): raise error.NumbaError( node, "Generator comprehensions are not yet supported") def visit_SetComp(self, node): raise error.NumbaError( node, "Set comprehensions are not yet supported") def visit_DictComp(self, node): raise error.NumbaError( node, "Dict comprehensions are not yet supported") def visit_Raise(self, node): raise error.NumbaError(node, "Raise statement not implemented yet") #------------------------------------------------------------------------ # Normalization #------------------------------------------------------------------------ def visit_FunctionDef(self, node): if self.function_level: return self.handle_inner_function(node) self.function_level += 1 self.visitchildren(node) self.function_level -= 1 return node def handle_inner_function(self, node): "Create assignment code for inner functions and mark the assignment" lhs = ast.Name(node.name, ast.Store()) ast.copy_location(lhs, node) rhs = FuncDefExprNode(func_def=node) ast.copy_location(rhs, node) fields = rhs._fields rhs._fields = [] assmnt = ast.Assign(targets=[lhs], value=rhs) result = self.visit(assmnt) rhs._fields = fields return result def visit_FunctionDef(self, node): #for arg in node.args: # if arg.default: # self.visitchildren(arg) if self.function_level: return self.handle_inner_function(node) self.visitchildren(node) return node def visit_ListComp(self, node): """ Rewrite list comprehensions to the equivalent for loops. AST syntax: ListComp(expr elt, comprehension* generators) comprehension = (expr target, expr iter, expr* ifs) 'ifs' represent a chain of ANDs """ assert len(node.generators) > 0 # Create innermost body, i.e. list.append(expr) # TODO: size hint for PyList_New list_create = ast.List(elts=[], ctx=ast.Load()) list_create.type = typesystem.object_ # typesystem.ListType() list_create = nodes.CloneableNode(list_create) list_value = nodes.CloneNode(list_create) list_append = ast.Attribute(list_value, "append", ast.Load()) append_call = ast.Call(func=list_append, args=[node.elt], keywords=[], starargs=None, kwargs=None) # Build up the loops from inwards to outwards body = append_call for comprehension in reversed(node.generators): # Hanlde the 'if' clause ifs = comprehension.ifs if len(ifs) > 1: make_boolop = lambda op1_op2: ast.BoolOp(op=ast.And(), values=op1_op2) if_test = reduce(make_boolop, ifs) elif len(ifs) == 1: if_test, = ifs else: if_test = None if if_test is not None: body = ast.If(test=if_test, body=[body], orelse=[]) # Wrap list.append() call or inner loops body = ast.For(target=comprehension.target, iter=comprehension.iter, body=[body], orelse=[]) expr = nodes.ExpressionNode(stmts=[list_create, body], expr=list_value) return self.visit(expr) def visit_AugAssign(self, node): """ Inplace assignment. Resolve a += b to a = a + b. Set 'inplace_op' attribute of the Assign node so later stages may recognize inplace assignment. Do this now, so that we can correctly mark the RHS reference. """ target = node.target rhs_target = copy.deepcopy(target) rhs_target.ctx = ast.Load() ast.fix_missing_locations(rhs_target) bin_op = ast.BinOp(rhs_target, node.op, node.value) assignment = ast.Assign([target], bin_op) assignment.inplace_op = node.op return self.visit(assignment) #------------------------------------------------------------------------ # Nodes #------------------------------------------------------------------------ class FuncDefExprNode(nodes.Node): """ Wraps an inner function node until the closure code kicks in. """ _fields = ['func_def']
shiquanwang/numba
numba/normalize.py
Python
bsd-2-clause
5,040
[ "VisIt" ]
c0f4af9b3ae45a08aae78e2d920c34484cf6addeeb70917db457254b3fb68d67
# Copyright (c) 2015-2018, 2020 Claudiu Popa <pcmanticore@gmail.com> # Copyright (c) 2015-2016 Ceridwen <ceridwenv@gmail.com> # Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net> # Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com> # Copyright (c) 2019 Ashley Whetter <ashley@awhetter.co.uk> # Copyright (c) 2020-2021 hippo91 <guillaume.peillex@gmail.com> # Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com> # Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html # For details: https://github.com/PyCQA/astroid/blob/master/LICENSE import contextlib import time import unittest from astroid import builder, nodes, parse, transforms @contextlib.contextmanager def add_transform(manager, node, transform, predicate=None): manager.register_transform(node, transform, predicate) try: yield finally: manager.unregister_transform(node, transform, predicate) class TestTransforms(unittest.TestCase): def setUp(self): self.transformer = transforms.TransformVisitor() def parse_transform(self, code): module = parse(code, apply_transforms=False) return self.transformer.visit(module) def test_function_inlining_transform(self): def transform_call(node): # Let's do some function inlining inferred = next(node.infer()) return inferred self.transformer.register_transform(nodes.Call, transform_call) module = self.parse_transform( """ def test(): return 42 test() #@ """ ) self.assertIsInstance(module.body[1], nodes.Expr) self.assertIsInstance(module.body[1].value, nodes.Const) self.assertEqual(module.body[1].value.value, 42) def test_recursive_transforms_into_astroid_fields(self): # Test that the transformer walks properly the tree # by going recursively into the _astroid_fields per each node. def transform_compare(node): # Let's check the values of the ops _, right = node.ops[0] # Assume they are Consts and they were transformed before # us. return nodes.const_factory(node.left.value < right.value) def transform_name(node): # Should be Consts return next(node.infer()) self.transformer.register_transform(nodes.Compare, transform_compare) self.transformer.register_transform(nodes.Name, transform_name) module = self.parse_transform( """ a = 42 b = 24 a < b """ ) self.assertIsInstance(module.body[2], nodes.Expr) self.assertIsInstance(module.body[2].value, nodes.Const) self.assertFalse(module.body[2].value.value) def test_transform_patches_locals(self): def transform_function(node): assign = nodes.Assign() name = nodes.AssignName() name.name = "value" assign.targets = [name] assign.value = nodes.const_factory(42) node.body.append(assign) self.transformer.register_transform(nodes.FunctionDef, transform_function) module = self.parse_transform( """ def test(): pass """ ) func = module.body[0] self.assertEqual(len(func.body), 2) self.assertIsInstance(func.body[1], nodes.Assign) self.assertEqual(func.body[1].as_string(), "value = 42") def test_predicates(self): def transform_call(node): inferred = next(node.infer()) return inferred def should_inline(node): return node.func.name.startswith("inlineme") self.transformer.register_transform(nodes.Call, transform_call, should_inline) module = self.parse_transform( """ def inlineme_1(): return 24 def dont_inline_me(): return 42 def inlineme_2(): return 2 inlineme_1() dont_inline_me() inlineme_2() """ ) values = module.body[-3:] self.assertIsInstance(values[0], nodes.Expr) self.assertIsInstance(values[0].value, nodes.Const) self.assertEqual(values[0].value.value, 24) self.assertIsInstance(values[1], nodes.Expr) self.assertIsInstance(values[1].value, nodes.Call) self.assertIsInstance(values[2], nodes.Expr) self.assertIsInstance(values[2].value, nodes.Const) self.assertEqual(values[2].value.value, 2) def test_transforms_are_separated(self): # Test that the transforming is done at a separate # step, which means that we are not doing inference # on a partially constructed tree anymore, which was the # source of crashes in the past when certain inference rules # were used in a transform. def transform_function(node): if node.decorators: for decorator in node.decorators.nodes: inferred = next(decorator.infer()) if inferred.qname() == "abc.abstractmethod": return next(node.infer_call_result()) return None manager = builder.MANAGER with add_transform(manager, nodes.FunctionDef, transform_function): module = builder.parse( """ import abc from abc import abstractmethod class A(object): @abc.abstractmethod def ala(self): return 24 @abstractmethod def bala(self): return 42 """ ) cls = module["A"] ala = cls.body[0] bala = cls.body[1] self.assertIsInstance(ala, nodes.Const) self.assertEqual(ala.value, 24) self.assertIsInstance(bala, nodes.Const) self.assertEqual(bala.value, 42) def test_transforms_are_called_for_builtin_modules(self): # Test that transforms are called for builtin modules. def transform_function(node): name = nodes.AssignName() name.name = "value" node.args.args = [name] return node manager = builder.MANAGER predicate = lambda node: node.root().name == "time" with add_transform(manager, nodes.FunctionDef, transform_function, predicate): builder_instance = builder.AstroidBuilder() module = builder_instance.module_build(time) asctime = module["asctime"] self.assertEqual(len(asctime.args.args), 1) self.assertIsInstance(asctime.args.args[0], nodes.AssignName) self.assertEqual(asctime.args.args[0].name, "value") def test_builder_apply_transforms(self): def transform_function(node): return nodes.const_factory(42) manager = builder.MANAGER with add_transform(manager, nodes.FunctionDef, transform_function): astroid_builder = builder.AstroidBuilder(apply_transforms=False) module = astroid_builder.string_build("""def test(): pass""") # The transform wasn't applied. self.assertIsInstance(module.body[0], nodes.FunctionDef) def test_transform_crashes_on_is_subtype_of(self): # Test that we don't crash when having is_subtype_of # in a transform, as per issue #188. This happened # before, when the transforms weren't in their own step. def transform_class(cls): if cls.is_subtype_of("django.db.models.base.Model"): return cls return cls self.transformer.register_transform(nodes.ClassDef, transform_class) self.parse_transform( """ # Change environ to automatically call putenv() if it exists import os putenv = os.putenv try: # This will fail if there's no putenv putenv except NameError: pass else: import UserDict """ ) if __name__ == "__main__": unittest.main()
ruchee/vimrc
vimfiles/bundle/vim-python/submodules/astroid/tests/unittest_transforms.py
Python
mit
8,215
[ "VisIt" ]
bde535d4094ba78e32b500f182fd83a02bc942b960a7f0bbf38c707a1a6aaeda
""" Module to set up run time parameters for Clawpack. The values set in the function setrun are then written out to data files that will be read in by the Fortran code. """ import os import numpy as np #------------------------------ def setrun(claw_pkg='amrclaw'): #------------------------------ """ Define the parameters used for running Clawpack. INPUT: claw_pkg expected to be "amrclaw" for this setrun. OUTPUT: rundata - object of class ClawRunData """ from clawpack.clawutil import data assert claw_pkg.lower() == 'amrclaw', "Expected claw_pkg = 'amrclaw'" num_dim = 2 rundata = data.ClawRunData(claw_pkg, num_dim) #------------------------------------------------------------------ # Problem-specific parameters to be written to setprob.data: #------------------------------------------------------------------ probdata = rundata.new_UserData(name='probdata',fname='setprob.data') probdata.add_param('rho', 1., 'density of medium') probdata.add_param('bulk', 4., 'bulk modulus') #------------------------------------------------------------------ # Standard Clawpack parameters to be written to claw.data: # (or to amrclaw.data for AMR) #------------------------------------------------------------------ clawdata = rundata.clawdata # initialized when rundata instantiated # Set single grid parameters first. # See below for AMR parameters. # --------------- # Spatial domain: # --------------- # Number of space dimensions: clawdata.num_dim = num_dim # Lower and upper edge of computational domain: clawdata.lower[0] = -4.000000e+00 # xlower clawdata.upper[0] = 8.000000e+00 # xupper clawdata.lower[1] = -1.000000e+00 # ylower clawdata.upper[1] = 11.000000e+00 # yupper # Number of grid cells: clawdata.num_cells[0] = 50 # mx clawdata.num_cells[1] = 50 # my # --------------- # Size of system: # --------------- # Number of equations in the system: clawdata.num_eqn = 3 # Number of auxiliary variables in the aux array (initialized in setaux) clawdata.num_aux = 0 # Index of aux array corresponding to capacity function, if there is one: clawdata.capa_index = 0 # ------------- # Initial time: # ------------- clawdata.t0 = 0.000000 # Restart from checkpoint file of a previous run? # If restarting, t0 above should be from original run, and the # restart_file 'fort.chkNNNNN' specified below should be in # the OUTDIR indicated in Makefile. clawdata.restart = False # True to restart from prior results clawdata.restart_file = 'fort.chk00006' # File to use for restart data # ------------- # Output times: #-------------- # Specify at what times the results should be written to fort.q files. # Note that the time integration stops after the final output time. clawdata.output_style = 1 if clawdata.output_style==1: # Output ntimes frames at equally spaced times up to tfinal: # Can specify num_output_times = 0 for no output clawdata.num_output_times = 30 clawdata.tfinal = 1.5 clawdata.output_t0 = True # output at initial (or restart) time? elif clawdata.output_style == 2: # Specify a list or numpy array of output times: # Include t0 if you want output at the initial time. clawdata.output_times = [0., 0.1] elif clawdata.output_style == 3: # Output every step_interval timesteps over total_steps timesteps: clawdata.output_step_interval = 2 clawdata.total_steps = 4 clawdata.output_t0 = True # output at initial (or restart) time? clawdata.output_format = 'binary' # 'ascii', 'binary', 'netcdf' clawdata.output_q_components = 'all' # could be list such as [True,True] clawdata.output_aux_components = 'none' # could be list clawdata.output_aux_onlyonce = True # output aux arrays only at t0 # --------------------------------------------------- # Verbosity of messages to screen during integration: # --------------------------------------------------- # The current t, dt, and cfl will be printed every time step # at AMR levels <= verbosity. Set verbosity = 0 for no printing. # (E.g. verbosity == 2 means print only on levels 1 and 2.) clawdata.verbosity = 0 # -------------- # Time stepping: # -------------- # if dt_variable==True: variable time steps used based on cfl_desired, # if dt_variable==False: fixed time steps dt = dt_initial always used. clawdata.dt_variable = True # Initial time step for variable dt. # (If dt_variable==0 then dt=dt_initial for all steps) clawdata.dt_initial = 1.00000e-02 # Max time step to be allowed if variable dt used: clawdata.dt_max = 1.000000e+99 # Desired Courant number if variable dt used clawdata.cfl_desired = 0.900000 # max Courant number to allow without retaking step with a smaller dt: clawdata.cfl_max = 1.000000 # Maximum number of time steps to allow between output times: clawdata.steps_max = 50000 # ------------------ # Method to be used: # ------------------ # Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters clawdata.order = 2 # Use dimensional splitting? (not yet available for AMR) clawdata.dimensional_split = 'unsplit' # For unsplit method, transverse_waves can be # 0 or 'none' ==> donor cell (only normal solver used) # 1 or 'increment' ==> corner transport of waves # 2 or 'all' ==> corner transport of 2nd order corrections too clawdata.transverse_waves = 2 # Number of waves in the Riemann solution: clawdata.num_waves = 2 # List of limiters to use for each wave family: # Required: len(limiter) == num_waves # Some options: # 0 or 'none' ==> no limiter (Lax-Wendroff) # 1 or 'minmod' ==> minmod # 2 or 'superbee' ==> superbee # 3 or 'vanleer' ==> van Leer # 4 or 'mc' ==> MC limiter clawdata.limiter = ['vanleer','vanleer'] clawdata.use_fwaves = False # True ==> use f-wave version of algorithms # Source terms splitting: # src_split == 0 or 'none' ==> no source term (src routine never called) # src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used, # src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended. clawdata.source_split = 0 # -------------------- # Boundary conditions: # -------------------- # Number of ghost cells (usually 2) clawdata.num_ghost = 2 # Choice of BCs at xlower and xupper: # 0 or 'user' => user specified (must modify bcNamr.f to use this option) # 1 or 'extrap' => extrapolation (non-reflecting outflow) # 2 or 'periodic' => periodic (must specify this at both boundaries) # 3 or 'wall' => solid wall for systems where q(2) is normal velocity clawdata.bc_lower[0] = 'wall' # at xlower clawdata.bc_upper[0] = 'wall' # at xupper clawdata.bc_lower[1] = 'wall' # at ylower clawdata.bc_upper[1] = 'wall' # at yupper # --------------- # Gauges: # --------------- rundata.gaugedata.gauges = [] # for gauges append lines of the form [gaugeno, x, y, t1, t2] rundata.gaugedata.gauges.append([0, 3.5, 0.5, 1.22, 2.85]) # -------------- # Checkpointing: # -------------- # Specify when checkpoint files should be created that can be # used to restart a computation. clawdata.checkpt_style = 0 if clawdata.checkpt_style == 0: # Do not checkpoint at all pass elif clawdata.checkpt_style == 1: # Checkpoint only at tfinal. pass elif clawdata.checkpt_style == 2: # Specify a list of checkpoint times. clawdata.checkpt_times = [0.1,0.15] elif clawdata.checkpt_style == 3: # Checkpoint every checkpt_interval timesteps (on Level 1) # and at the final time. clawdata.checkpt_interval = 5 # --------------- # AMR parameters: # --------------- amrdata = rundata.amrdata # max number of refinement levels: amrdata.amr_levels_max = 3 # List of refinement ratios at each level (length at least amr_level_max-1) amrdata.refinement_ratios_x = [2, 2] amrdata.refinement_ratios_y = [2, 2] amrdata.refinement_ratios_t = [2, 2] # Specify type of each aux variable in clawdata.auxtype. # This must be a list of length num_aux, each element of which is one of: # 'center', 'capacity', 'xleft', or 'yleft' (see documentation). amrdata.aux_type = [] # Flag for refinement based on Richardson error estimater: amrdata.flag_richardson = False # use Richardson? amrdata.flag_richardson_tol = 0.001000e+00 # Richardson tolerance # Flag for refinement using routine flag2refine: amrdata.flag2refine = True # use this? amrdata.flag2refine_tol = 0.1 # tolerance used in this routine # User can modify flag2refine to change the criterion for flagging. # steps to take on each level L between regriddings of level L+1: amrdata.regrid_interval = 2 # width of buffer zone around flagged points: # (typically the same as regrid_interval so waves don't escape): amrdata.regrid_buffer_width = 2 # clustering alg. cutoff for (# flagged pts) / (total # of cells refined) # (closer to 1.0 => more small grids may be needed to cover flagged cells) amrdata.clustering_cutoff = 0.7 # print info about each regridding up to this level: amrdata.verbosity_regrid = 0 # --------------- # Regions: # --------------- rundata.regiondata.regions = [] # to specify regions of refinement append lines of the form # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2] # ----- For developers ----- # Toggle debugging print statements: amrdata.dprint = False # print domain flags amrdata.eprint = False # print err est flags amrdata.edebug = False # even more err est flags amrdata.gprint = False # grid bisection/clustering amrdata.nprint = False # proper nesting output amrdata.pprint = False # proj. of tagged points amrdata.rprint = False # print regridding summary amrdata.sprint = False # space/memory output amrdata.tprint = False # time step reporting each level amrdata.uprint = False # update/upbnd reporting return rundata # end of function setrun # ---------------------- if __name__ == '__main__': # Set up run-time parameters and write all data files. import sys rundata = setrun(*sys.argv[1:]) rundata.write()
clawpack/adjoint
examples/acoustics_2d_radial_timepoint/compare/setrun_pflag.py
Python
bsd-2-clause
11,189
[ "NetCDF" ]
645bee7570f9dad41d8c75b1cba09098d0c0a3d6263b2d02312964418784ed84
""" Defines the SX127x class and a few utility functions. """ # -*- coding: utf-8 -*- # Copyright 2015-2018 Mayer Analytics Ltd. # # This file is part of pySX127x. # # pySX127x is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public # License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later # version. # # pySX127x is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You can be released from the requirements of the license by obtaining a commercial license. Such a license is # mandatory as soon as you develop commercial activities involving pySX127x without disclosing the source code of your # own applications, or shipping pySX127x with a closed source product. # # You should have received a copy of the GNU General Public License along with pySX127. If not, see # <http://www.gnu.org/licenses/>. import sys from .constants import * from .board_config import BOARD ################################################## Some utility functions ############################################## def set_bit(value, index, new_bit): """ Set the index'th bit of value to new_bit, and return the new value. :param value: The integer to set the new_bit in :type value: int :param index: 0-based index :param new_bit: New value the bit shall have (0 or 1) :return: Changed value :rtype: int """ mask = 1 << index value &= ~mask if new_bit: value |= mask return value def getter(register_address): """ The getter decorator reads the register content and calls the decorated function to do post-processing. :param register_address: Register address :return: Register value :rtype: int """ def decorator(func): def wrapper(self): return func(self, self.spi.xfer([register_address, 0])[1]) return wrapper return decorator def setter(register_address): """ The setter decorator calls the decorated function for pre-processing and then writes the result to the register :param register_address: Register address :return: New register value :rtype: int """ def decorator(func): def wrapper(self, val): return self.spi.xfer([register_address | 0x80, func(self, val)])[1] return wrapper return decorator ############################################### Definition of the LoRa class ########################################### class LoRa(object): spi = BOARD.SpiDev() # init and get the baord's SPI mode = None # the mode is backed up here backup_registers = [] verbose = True dio_mapping = [None] * 6 # store the dio mapping here def __init__(self, verbose=True, do_calibration=True, calibration_freq=868): """ Init the object Send the device to sleep, read all registers, and do the calibration (if do_calibration=True) :param verbose: Set the verbosity True/False :param calibration_freq: call rx_chain_calibration with this parameter. Default is 868 :param do_calibration: Call rx_chain_calibration, default is True. """ self.verbose = verbose # set the callbacks for DIO0..5 IRQs. BOARD.add_events(self._dio0, self._dio1, self._dio2, self._dio3, self._dio4, self._dio5) # set mode to sleep and read all registers self.set_mode(MODE.SLEEP) self.backup_registers = self.get_all_registers() # more setup work: if do_calibration: self.rx_chain_calibration(calibration_freq) # the FSK registers are set up exactly as modtronix do it: lookup_fsk = [ #[REG.FSK.LNA , 0x23], #[REG.FSK.RX_CONFIG , 0x1E], #[REG.FSK.RSSI_CONFIG , 0xD2], #[REG.FSK.PREAMBLE_DETECT, 0xAA], #[REG.FSK.OSC , 0x07], #[REG.FSK.SYNC_CONFIG , 0x12], #[REG.FSK.SYNC_VALUE_1 , 0xC1], #[REG.FSK.SYNC_VALUE_2 , 0x94], #[REG.FSK.SYNC_VALUE_3 , 0xC1], #[REG.FSK.PACKET_CONFIG_1, 0xD8], #[REG.FSK.FIFO_THRESH , 0x8F], #[REG.FSK.IMAGE_CAL , 0x02], #[REG.FSK.DIO_MAPPING_1 , 0x00], #[REG.FSK.DIO_MAPPING_2 , 0x30] ] self.set_mode(MODE.FSK_STDBY) for register_address, value in lookup_fsk: self.set_register(register_address, value) self.set_mode(MODE.SLEEP) # set the dio_ mapping by calling the two get_dio_mapping_* functions self.get_dio_mapping_1() self.get_dio_mapping_2() # Overridable functions: def on_rx_done(self): pass def on_tx_done(self): pass def on_cad_done(self): pass def on_rx_timeout(self): pass def on_valid_header(self): pass def on_payload_crc_error(self): pass def on_fhss_change_channel(self): pass # Internal callbacks for add_events() def _dio0(self, channel): # DIO0 00: RxDone # DIO0 01: TxDone # DIO0 10: CadDone if self.dio_mapping[0] == 0: self.on_rx_done() elif self.dio_mapping[0] == 1: self.on_tx_done() elif self.dio_mapping[0] == 2: self.on_cad_done() else: raise RuntimeError("unknown dio0mapping!") def _dio1(self, channel): # DIO1 00: RxTimeout # DIO1 01: FhssChangeChannel # DIO1 10: CadDetected if self.dio_mapping[1] == 0: self.on_rx_timeout() elif self.dio_mapping[1] == 1: self.on_fhss_change_channel() elif self.dio_mapping[1] == 2: self.on_CadDetected() else: raise RuntimeError("unknown dio1mapping!") def _dio2(self, channel): # DIO2 00: FhssChangeChannel # DIO2 01: FhssChangeChannel # DIO2 10: FhssChangeChannel self.on_fhss_change_channel() def _dio3(self, channel): # DIO3 00: CadDone # DIO3 01: ValidHeader # DIO3 10: PayloadCrcError if self.dio_mapping[3] == 0: self.on_cad_done() elif self.dio_mapping[3] == 1: self.on_valid_header() elif self.dio_mapping[3] == 2: self.on_payload_crc_error() else: raise RuntimeError("unknown dio3 mapping!") def _dio4(self, channel): raise RuntimeError("DIO4 is not used") def _dio5(self, channel): raise RuntimeError("DIO5 is not used") # All the set/get/read/write functions def get_mode(self): """ Get the mode :return: New mode """ self.mode = self.spi.xfer([REG.LORA.OP_MODE, 0])[1] return self.mode def set_mode(self, mode): """ Set the mode :param mode: Set the mode. Use constants.MODE class :return: New mode """ # the mode is backed up in self.mode if mode == self.mode: return mode if self.verbose: sys.stderr.write("Mode <- %s\n" % MODE.lookup[mode]) self.mode = mode return self.spi.xfer([REG.LORA.OP_MODE | 0x80, mode])[1] def write_payload(self, payload): """ Get FIFO ready for TX: Set FifoAddrPtr to FifoTxBaseAddr. The transceiver is put into STDBY mode. :param payload: Payload to write (list) :return: Written payload """ payload_size = len(payload) self.set_payload_length(payload_size) self.set_mode(MODE.STDBY) base_addr = self.get_fifo_tx_base_addr() self.set_fifo_addr_ptr(base_addr) return self.spi.xfer([REG.LORA.FIFO | 0x80] + payload)[1:] def reset_ptr_rx(self): """ Get FIFO ready for RX: Set FifoAddrPtr to FifoRxBaseAddr. The transceiver is put into STDBY mode. """ self.set_mode(MODE.STDBY) base_addr = self.get_fifo_rx_base_addr() self.set_fifo_addr_ptr(base_addr) def rx_is_good(self): """ Check the IRQ flags for RX errors :return: True if no errors :rtype: bool """ flags = self.get_irq_flags() return not any([flags[s] for s in ['valid_header', 'crc_error', 'rx_done', 'rx_timeout']]) def read_payload(self , nocheck = False): """ Read the payload from FIFO :param nocheck: If True then check rx_is_good() :return: Payload :rtype: list[int] """ if not nocheck and not self.rx_is_good(): return None rx_nb_bytes = self.get_rx_nb_bytes() fifo_rx_current_addr = self.get_fifo_rx_current_addr() self.set_fifo_addr_ptr(fifo_rx_current_addr) payload = self.spi.xfer([REG.LORA.FIFO] + [0] * rx_nb_bytes)[1:] return payload def get_freq(self): """ Get the frequency (MHz) :return: Frequency in MHz :rtype: float """ msb, mid, lsb = self.spi.xfer([REG.LORA.FR_MSB, 0, 0, 0])[1:] f = lsb + 256*(mid + 256*msb) return f / 16384. def set_freq(self, f): """ Set the frequency (MHz) :param f: Frequency in MHz "type f: float :return: New register settings (3 bytes [msb, mid, lsb]) :rtype: list[int] """ assert self.mode == MODE.SLEEP or self.mode == MODE.STDBY or self.mode == MODE.FSK_STDBY i = int(f * 16384.) # choose floor msb = i // 65536 i -= msb * 65536 mid = i // 256 i -= mid * 256 lsb = i return self.spi.xfer([REG.LORA.FR_MSB | 0x80, msb, mid, lsb]) def get_pa_config(self, convert_dBm=False): v = self.spi.xfer([REG.LORA.PA_CONFIG, 0])[1] pa_select = v >> 7 max_power = v >> 4 & 0b111 output_power = v & 0b1111 if convert_dBm: max_power = max_power * .6 + 10.8 output_power = max_power - (15 - output_power) return dict( pa_select = pa_select, max_power = max_power, output_power = output_power ) def set_pa_config(self, pa_select=None, max_power=None, output_power=None): """ Configure the PA :param pa_select: Selects PA output pin, 0->RFO, 1->PA_BOOST :param max_power: Select max output power Pmax=10.8+0.6*MaxPower :param output_power: Output power Pout=Pmax-(15-OutputPower) if PaSelect = 0, Pout=17-(15-OutputPower) if PaSelect = 1 (PA_BOOST pin) :return: new register value """ loc = locals() current = self.get_pa_config() loc = {s: current[s] if loc[s] is None else loc[s] for s in loc} val = (loc['pa_select'] << 7) | (loc['max_power'] << 4) | (loc['output_power']) return self.spi.xfer([REG.LORA.PA_CONFIG | 0x80, val])[1] @getter(REG.LORA.PA_RAMP) def get_pa_ramp(self, val): return val & 0b1111 @setter(REG.LORA.PA_RAMP) def set_pa_ramp(self, val): return val & 0b1111 def get_ocp(self, convert_mA=False): v = self.spi.xfer([REG.LORA.OCP, 0])[1] ocp_on = v >> 5 & 0x01 ocp_trim = v & 0b11111 if convert_mA: if ocp_trim <= 15: ocp_trim = 45. + 5. * ocp_trim elif ocp_trim <= 27: ocp_trim = -30. + 10. * ocp_trim else: assert ocp_trim <= 27 return dict( ocp_on = ocp_on, ocp_trim = ocp_trim ) def set_ocp_trim(self, I_mA): assert(I_mA >= 45 and I_mA <= 240) ocp_on = self.spi.xfer([REG.LORA.OCP, 0])[1] >> 5 & 0x01 if I_mA <= 120: v = int(round((I_mA-45.)/5.)) else: v = int(round((I_mA+30.)/10.)) v = set_bit(v, 5, ocp_on) return self.spi.xfer([REG.LORA.OCP | 0x80, v])[1] def get_lna(self): v = self.spi.xfer([REG.LORA.LNA, 0])[1] return dict( lna_gain = v >> 5, lna_boost_lf = v >> 3 & 0b11, lna_boost_hf = v & 0b11 ) def set_lna(self, lna_gain=None, lna_boost_lf=None, lna_boost_hf=None): assert lna_boost_hf is None or lna_boost_hf == 0b00 or lna_boost_hf == 0b11 self.set_mode(MODE.STDBY) if lna_gain is not None: # Apparently agc_auto_on must be 0 in order to set lna_gain self.set_agc_auto_on(lna_gain == GAIN.NOT_USED) loc = locals() current = self.get_lna() loc = {s: current[s] if loc[s] is None else loc[s] for s in loc} val = (loc['lna_gain'] << 5) | (loc['lna_boost_lf'] << 3) | (loc['lna_boost_hf']) retval = self.spi.xfer([REG.LORA.LNA | 0x80, val])[1] if lna_gain is not None: # agc_auto_on must track lna_gain: GAIN=NOT_USED -> agc_auto=ON, otherwise =OFF self.set_agc_auto_on(lna_gain == GAIN.NOT_USED) return retval def set_lna_gain(self, lna_gain): self.set_lna(lna_gain=lna_gain) def get_fifo_addr_ptr(self): return self.spi.xfer([REG.LORA.FIFO_ADDR_PTR, 0])[1] def set_fifo_addr_ptr(self, ptr): return self.spi.xfer([REG.LORA.FIFO_ADDR_PTR | 0x80, ptr])[1] def get_fifo_tx_base_addr(self): return self.spi.xfer([REG.LORA.FIFO_TX_BASE_ADDR, 0])[1] def set_fifo_tx_base_addr(self, ptr): return self.spi.xfer([REG.LORA.FIFO_TX_BASE_ADDR | 0x80, ptr])[1] def get_fifo_rx_base_addr(self): return self.spi.xfer([REG.LORA.FIFO_RX_BASE_ADDR, 0])[1] def set_fifo_rx_base_addr(self, ptr): return self.spi.xfer([REG.LORA.FIFO_RX_BASE_ADDR | 0x80, ptr])[1] def get_fifo_rx_current_addr(self): return self.spi.xfer([REG.LORA.FIFO_RX_CURR_ADDR, 0])[1] def get_fifo_rx_byte_addr(self): return self.spi.xfer([REG.LORA.FIFO_RX_BYTE_ADDR, 0])[1] def get_irq_flags_mask(self): v = self.spi.xfer([REG.LORA.IRQ_FLAGS_MASK, 0])[1] return dict( rx_timeout = v >> 7 & 0x01, rx_done = v >> 6 & 0x01, crc_error = v >> 5 & 0x01, valid_header = v >> 4 & 0x01, tx_done = v >> 3 & 0x01, cad_done = v >> 2 & 0x01, fhss_change_ch = v >> 1 & 0x01, cad_detected = v >> 0 & 0x01, ) def set_irq_flags_mask(self, rx_timeout=None, rx_done=None, crc_error=None, valid_header=None, tx_done=None, cad_done=None, fhss_change_ch=None, cad_detected=None): loc = locals() v = self.spi.xfer([REG.LORA.IRQ_FLAGS_MASK, 0])[1] for i, s in enumerate(['cad_detected', 'fhss_change_ch', 'cad_done', 'tx_done', 'valid_header', 'crc_error', 'rx_done', 'rx_timeout']): this_bit = locals()[s] if this_bit is not None: v = set_bit(v, i, this_bit) return self.spi.xfer([REG.LORA.IRQ_FLAGS_MASK | 0x80, v])[1] def get_irq_flags(self): v = self.spi.xfer([REG.LORA.IRQ_FLAGS, 0])[1] return dict( rx_timeout = v >> 7 & 0x01, rx_done = v >> 6 & 0x01, crc_error = v >> 5 & 0x01, valid_header = v >> 4 & 0x01, tx_done = v >> 3 & 0x01, cad_done = v >> 2 & 0x01, fhss_change_ch = v >> 1 & 0x01, cad_detected = v >> 0 & 0x01, ) def set_irq_flags(self, rx_timeout=None, rx_done=None, crc_error=None, valid_header=None, tx_done=None, cad_done=None, fhss_change_ch=None, cad_detected=None): v = self.spi.xfer([REG.LORA.IRQ_FLAGS, 0])[1] for i, s in enumerate(['cad_detected', 'fhss_change_ch', 'cad_done', 'tx_done', 'valid_header', 'crc_error', 'rx_done', 'rx_timeout']): this_bit = locals()[s] if this_bit is not None: v = set_bit(v, i, this_bit) return self.spi.xfer([REG.LORA.IRQ_FLAGS | 0x80, v])[1] def clear_irq_flags(self, RxTimeout=None, RxDone=None, PayloadCrcError=None, ValidHeader=None, TxDone=None, CadDone=None, FhssChangeChannel=None, CadDetected=None): v = 0 for i, s in enumerate(['CadDetected', 'FhssChangeChannel', 'CadDone', 'TxDone', 'ValidHeader', 'PayloadCrcError', 'RxDone', 'RxTimeout']): this_bit = locals()[s] if this_bit is not None: v = set_bit(v, eval('MASK.IRQ_FLAGS.' + s), this_bit) return self.spi.xfer([REG.LORA.IRQ_FLAGS | 0x80, v])[1] def get_rx_nb_bytes(self): return self.spi.xfer([REG.LORA.RX_NB_BYTES, 0])[1] def get_rx_header_cnt(self): msb, lsb = self.spi.xfer([REG.LORA.RX_HEADER_CNT_MSB, 0, 0])[1:] return lsb + 256 * msb def get_rx_packet_cnt(self): msb, lsb = self.spi.xfer([REG.LORA.RX_PACKET_CNT_MSB, 0, 0])[1:] return lsb + 256 * msb def get_modem_status(self): status = self.spi.xfer([REG.LORA.MODEM_STAT, 0])[1] return dict( rx_coding_rate = status >> 5 & 0x03, modem_clear = status >> 4 & 0x01, header_info_valid = status >> 3 & 0x01, rx_ongoing = status >> 2 & 0x01, signal_sync = status >> 1 & 0x01, signal_detected = status >> 0 & 0x01 ) def get_pkt_snr_value(self): v = self.spi.xfer([REG.LORA.PKT_SNR_VALUE, 0])[1] return (float(v-256) if v > 127 else float(v)) / 4. def get_pkt_rssi_value(self): v = self.spi.xfer([REG.LORA.PKT_RSSI_VALUE, 0])[1] return v - (164 if BOARD.low_band else 157) # See datasheet 5.5.5. p. 87 def get_rssi_value(self): v = self.spi.xfer([REG.LORA.RSSI_VALUE, 0])[1] return v - (164 if BOARD.low_band else 157) # See datasheet 5.5.5. p. 87 def get_hop_channel(self): v = self.spi.xfer([REG.LORA.HOP_CHANNEL, 0])[1] return dict( pll_timeout = v >> 7, crc_on_payload = v >> 6 & 0x01, fhss_present_channel = v >> 5 & 0b111111 ) def get_modem_config_1(self): val = self.spi.xfer([REG.LORA.MODEM_CONFIG_1, 0])[1] return dict( bw = val >> 4 & 0x0F, coding_rate = val >> 1 & 0x07, implicit_header_mode = val & 0x01 ) def set_modem_config_1(self, bw=None, coding_rate=None, implicit_header_mode=None): loc = locals() current = self.get_modem_config_1() loc = {s: current[s] if loc[s] is None else loc[s] for s in loc} val = loc['implicit_header_mode'] | (loc['coding_rate'] << 1) | (loc['bw'] << 4) return self.spi.xfer([REG.LORA.MODEM_CONFIG_1 | 0x80, val])[1] def set_bw(self, bw): """ Set the bandwidth 0=7.8kHz ... 9=500kHz :param bw: A number 0,2,3,...,9 :return: """ self.set_modem_config_1(bw=bw) def set_coding_rate(self, coding_rate): """ Set the coding rate 4/5, 4/6, 4/7, 4/8 :param coding_rate: A number 1,2,3,4 :return: New register value """ self.set_modem_config_1(coding_rate=coding_rate) def set_implicit_header_mode(self, implicit_header_mode): self.set_modem_config_1(implicit_header_mode=implicit_header_mode) def get_modem_config_2(self, include_symb_timout_lsb=False): val = self.spi.xfer([REG.LORA.MODEM_CONFIG_2, 0])[1] d = dict( spreading_factor = val >> 4 & 0x0F, tx_cont_mode = val >> 3 & 0x01, rx_crc = val >> 2 & 0x01, ) if include_symb_timout_lsb: d['symb_timout_lsb'] = val & 0x03 return d def set_modem_config_2(self, spreading_factor=None, tx_cont_mode=None, rx_crc=None): loc = locals() # RegModemConfig2 contains the SymbTimout MSB bits. We tack the back on when writing this register. current = self.get_modem_config_2(include_symb_timout_lsb=True) loc = {s: current[s] if loc[s] is None else loc[s] for s in loc} val = (loc['spreading_factor'] << 4) | (loc['tx_cont_mode'] << 3) | (loc['rx_crc'] << 2) | current['symb_timout_lsb'] return self.spi.xfer([REG.LORA.MODEM_CONFIG_2 | 0x80, val])[1] def set_spreading_factor(self, spreading_factor): self.set_modem_config_2(spreading_factor=spreading_factor) def set_rx_crc(self, rx_crc): self.set_modem_config_2(rx_crc=rx_crc) def get_modem_config_3(self): val = self.spi.xfer([REG.LORA.MODEM_CONFIG_3, 0])[1] return dict( low_data_rate_optim = val >> 3 & 0x01, agc_auto_on = val >> 2 & 0x01 ) def set_modem_config_3(self, low_data_rate_optim=None, agc_auto_on=None): loc = locals() current = self.get_modem_config_3() loc = {s: current[s] if loc[s] is None else loc[s] for s in loc} val = (loc['low_data_rate_optim'] << 3) | (loc['agc_auto_on'] << 2) return self.spi.xfer([REG.LORA.MODEM_CONFIG_3 | 0x80, val])[1] @setter(REG.LORA.INVERT_IQ) def set_invert_iq(self, invert): """ Invert the LoRa I and Q signals :param invert: 0: normal mode, 1: I and Q inverted :return: New value of register """ return 0x27 | (invert & 0x01) << 6 @getter(REG.LORA.INVERT_IQ) def get_invert_iq(self, val): """ Get the invert the I and Q setting :return: 0: normal mode, 1: I and Q inverted """ return (val >> 6) & 0x01 def get_agc_auto_on(self): return self.get_modem_config_3()['agc_auto_on'] def set_agc_auto_on(self, agc_auto_on): self.set_modem_config_3(agc_auto_on=agc_auto_on) def get_low_data_rate_optim(self): return self.set_modem_config_3()['low_data_rate_optim'] def set_low_data_rate_optim(self, low_data_rate_optim): self.set_modem_config_3(low_data_rate_optim=low_data_rate_optim) def get_symb_timeout(self): SYMB_TIMEOUT_MSB = REG.LORA.MODEM_CONFIG_2 msb, lsb = self.spi.xfer([SYMB_TIMEOUT_MSB, 0, 0])[1:] # the MSB bits are stored in REG.LORA.MODEM_CONFIG_2 msb = msb & 0b11 return lsb + 256 * msb def set_symb_timeout(self, timeout): bkup_reg_modem_config_2 = self.spi.xfer([REG.LORA.MODEM_CONFIG_2, 0])[1] msb = timeout >> 8 & 0b11 # bits 8-9 lsb = timeout - 256 * msb # bits 0-7 reg_modem_config_2 = bkup_reg_modem_config_2 & 0xFC | msb # bits 2-7 of bkup_reg_modem_config_2 ORed with the two msb bits old_msb = self.spi.xfer([REG.LORA.MODEM_CONFIG_2 | 0x80, reg_modem_config_2])[1] & 0x03 old_lsb = self.spi.xfer([REG.LORA.SYMB_TIMEOUT_LSB | 0x80, lsb])[1] return old_lsb + 256 * old_msb def get_preamble(self): msb, lsb = self.spi.xfer([REG.LORA.PREAMBLE_MSB, 0, 0])[1:] return lsb + 256 * msb def set_preamble(self, preamble): msb = preamble >> 8 lsb = preamble - msb * 256 old_msb, old_lsb = self.spi.xfer([REG.LORA.PREAMBLE_MSB | 0x80, msb, lsb])[1:] return old_lsb + 256 * old_msb @getter(REG.LORA.PAYLOAD_LENGTH) def get_payload_length(self, val): return val @setter(REG.LORA.PAYLOAD_LENGTH) def set_payload_length(self, payload_length): return payload_length @getter(REG.LORA.MAX_PAYLOAD_LENGTH) def get_max_payload_length(self, val): return val @setter(REG.LORA.MAX_PAYLOAD_LENGTH) def set_max_payload_length(self, max_payload_length): return max_payload_length @getter(REG.LORA.HOP_PERIOD) def get_hop_period(self, val): return val @setter(REG.LORA.HOP_PERIOD) def set_hop_period(self, hop_period): return hop_period def get_fei(self): msb, mid, lsb = self.spi.xfer([REG.LORA.FEI_MSB, 0, 0, 0])[1:] msb &= 0x0F freq_error = lsb + 256 * (mid + 256 * msb) return freq_error @getter(REG.LORA.DETECT_OPTIMIZE) def get_detect_optimize(self, val): """ Get LoRa detection optimize setting :return: detection optimize setting 0x03: SF7-12, 0x05: SF6 """ return val & 0b111 @setter(REG.LORA.DETECT_OPTIMIZE) def set_detect_optimize(self, detect_optimize): """ Set LoRa detection optimize :param detect_optimize 0x03: SF7-12, 0x05: SF6 :return: New register value """ assert detect_optimize == 0x03 or detect_optimize == 0x05 return detect_optimize & 0b111 @getter(REG.LORA.DETECTION_THRESH) def get_detection_threshold(self, val): """ Get LoRa detection threshold setting :return: detection threshold 0x0A: SF7-12, 0x0C: SF6 """ return val @setter(REG.LORA.DETECTION_THRESH) def set_detection_threshold(self, detect_threshold): """ Set LoRa detection optimize :param detect_threshold 0x0A: SF7-12, 0x0C: SF6 :return: New register value """ assert detect_threshold == 0x0A or detect_threshold == 0x0C return detect_threshold @getter(REG.LORA.SYNC_WORD) def get_sync_word(self, sync_word): return sync_word @setter(REG.LORA.SYNC_WORD) def set_sync_word(self, sync_word): return sync_word @getter(REG.LORA.DIO_MAPPING_1) def get_dio_mapping_1(self, mapping): """ Get mapping of pins DIO0 to DIO3. Object variable dio_mapping will be set. :param mapping: Register value :type mapping: int :return: Value of the mapping list :rtype: list[int] """ self.dio_mapping = [mapping>>6 & 0x03, mapping>>4 & 0x03, mapping>>2 & 0x03, mapping>>0 & 0x03] \ + self.dio_mapping[4:6] return self.dio_mapping @setter(REG.LORA.DIO_MAPPING_1) def set_dio_mapping_1(self, mapping): """ Set mapping of pins DIO0 to DIO3. Object variable dio_mapping will be set. :param mapping: Register value :type mapping: int :return: New value of the register :rtype: int """ self.dio_mapping = [mapping>>6 & 0x03, mapping>>4 & 0x03, mapping>>2 & 0x03, mapping>>0 & 0x03] \ + self.dio_mapping[4:6] return mapping @getter(REG.LORA.DIO_MAPPING_2) def get_dio_mapping_2(self, mapping): """ Get mapping of pins DIO4 to DIO5. Object variable dio_mapping will be set. :param mapping: Register value :type mapping: int :return: Value of the mapping list :rtype: list[int] """ self.dio_mapping = self.dio_mapping[0:4] + [mapping>>6 & 0x03, mapping>>4 & 0x03] return self.dio_mapping @setter(REG.LORA.DIO_MAPPING_2) def set_dio_mapping_2(self, mapping): """ Set mapping of pins DIO4 to DIO5. Object variable dio_mapping will be set. :param mapping: Register value :type mapping: int :return: New value of the register :rtype: int """ assert mapping & 0b00001110 == 0 self.dio_mapping = self.dio_mapping[0:4] + [mapping>>6 & 0x03, mapping>>4 & 0x03] return mapping def get_dio_mapping(self): """ Utility function that returns the list of current DIO mappings. Object variable dio_mapping will be set. :return: List of current DIO mappings :rtype: list[int] """ self.get_dio_mapping_1() return self.get_dio_mapping_2() def set_dio_mapping(self, mapping): """ Utility function that returns the list of current DIO mappings. Object variable dio_mapping will be set. :param mapping: DIO mapping list :type mapping: list[int] :return: New DIO mapping list :rtype: list[int] """ mapping_1 = (mapping[0] & 0x03) << 6 | (mapping[1] & 0x03) << 4 | (mapping[2] & 0x3) << 2 | mapping[3] & 0x3 mapping_2 = (mapping[4] & 0x03) << 6 | (mapping[5] & 0x03) << 4 self.set_dio_mapping_1(mapping_1) return self.set_dio_mapping_2(mapping_2) @getter(REG.LORA.VERSION) def get_version(self, version): """ Version code of the chip. Bits 7-4 give the full revision number; bits 3-0 give the metal mask revision number. :return: Version code :rtype: int """ return version @getter(REG.LORA.TCXO) def get_tcxo(self, tcxo): """ Get TCXO or XTAL input setting 0 -> "XTAL": Crystal Oscillator with external Crystal 1 -> "TCXO": External clipped sine TCXO AC-connected to XTA pin :param tcxo: 1=TCXO or 0=XTAL input setting :return: TCXO or XTAL input setting :type: int (0 or 1) """ return tcxo & 0b00010000 @setter(REG.LORA.TCXO) def set_tcxo(self, tcxo): """ Make TCXO or XTAL input setting. 0 -> "XTAL": Crystal Oscillator with external Crystal 1 -> "TCXO": External clipped sine TCXO AC-connected to XTA pin :param tcxo: 1=TCXO or 0=XTAL input setting :return: new TCXO or XTAL input setting """ return (tcxo >= 1) << 4 | 0x09 # bits 0-3 must be 0b1001 @getter(REG.LORA.PA_DAC) def get_pa_dac(self, pa_dac): """ Enables the +20dBm option on PA_BOOST pin False -> Default value True -> +20dBm on PA_BOOST when OutputPower=1111 :return: True/False if +20dBm option on PA_BOOST on/off :rtype: bool """ pa_dac &= 0x07 # only bits 0-2 if pa_dac == 0x04: return False elif pa_dac == 0x07: return True else: raise RuntimeError("Bad PA_DAC value %s" % hex(pa_dac)) @setter(REG.LORA.PA_DAC) def set_pa_dac(self, pa_dac): """ Enables the +20dBm option on PA_BOOST pin False -> Default value True -> +20dBm on PA_BOOST when OutputPower=1111 :param pa_dac: 1/0 if +20dBm option on PA_BOOST on/off :return: New pa_dac register value :rtype: int """ return 0x87 if pa_dac else 0x84 def rx_chain_calibration(self, freq=868.): """ Run the image calibration (see Semtech documentation section 4.2.3.8) :param freq: Frequency for the HF calibration :return: None """ # backup some registers op_mode_bkup = self.get_mode() pa_config_bkup = self.get_register(REG.LORA.PA_CONFIG) freq_bkup = self.get_freq() # for image calibration device must be in FSK standby mode self.set_mode(MODE.FSK_STDBY) # cut the PA self.set_register(REG.LORA.PA_CONFIG, 0x00) # calibration for the LF band image_cal = (self.get_register(REG.FSK.IMAGE_CAL) & 0xBF) | 0x40 self.set_register(REG.FSK.IMAGE_CAL, image_cal) while (self.get_register(REG.FSK.IMAGE_CAL) & 0x20) == 0x20: pass # Set a Frequency in HF band self.set_freq(freq) # calibration for the HF band image_cal = (self.get_register(REG.FSK.IMAGE_CAL) & 0xBF) | 0x40 self.set_register(REG.FSK.IMAGE_CAL, image_cal) while (self.get_register(REG.FSK.IMAGE_CAL) & 0x20) == 0x20: pass # put back the saved parameters self.set_mode(op_mode_bkup) self.set_register(REG.LORA.PA_CONFIG, pa_config_bkup) self.set_freq(freq_bkup) def dump_registers(self): """ Returns a list of [reg_addr, reg_name, reg_value] tuples. Chip is put into mode SLEEP. :return: List of [reg_addr, reg_name, reg_value] tuples :rtype: list[tuple] """ self.set_mode(MODE.SLEEP) values = self.get_all_registers() skip_set = set([REG.LORA.FIFO]) result_list = [] for i, s in REG.LORA.lookup.iteritems(): if i in skip_set: continue v = values[i] result_list.append((i, s, v)) return result_list def get_register(self, register_address): return self.spi.xfer([register_address & 0x7F, 0])[1] def set_register(self, register_address, val): return self.spi.xfer([register_address | 0x80, val])[1] def get_all_registers(self): # read all registers reg = [0] + self.spi.xfer([1]+[0]*0x3E)[1:] self.mode = reg[1] return reg def __del__(self): self.set_mode(MODE.SLEEP) if self.verbose: sys.stderr.write("MODE=SLEEP\n") def __str__(self): # don't use __str__ while in any mode other that SLEEP or STDBY assert(self.mode == MODE.SLEEP or self.mode == MODE.STDBY) onoff = lambda i: 'ON' if i else 'OFF' f = self.get_freq() cfg1 = self.get_modem_config_1() cfg2 = self.get_modem_config_2() cfg3 = self.get_modem_config_3() pa_config = self.get_pa_config(convert_dBm=True) ocp = self.get_ocp(convert_mA=True) lna = self.get_lna() s = "SX127x LoRa registers:\n" s += " mode %s\n" % MODE.lookup[self.get_mode()] s += " freq %f MHz\n" % f s += " coding_rate %s\n" % CODING_RATE.lookup[cfg1['coding_rate']] s += " bw %s\n" % BW.lookup[cfg1['bw']] s += " spreading_factor %s chips/symb\n" % (1 << cfg2['spreading_factor']) s += " implicit_hdr_mode %s\n" % onoff(cfg1['implicit_header_mode']) s += " rx_payload_crc %s\n" % onoff(cfg2['rx_crc']) s += " tx_cont_mode %s\n" % onoff(cfg2['tx_cont_mode']) s += " preamble %d\n" % self.get_preamble() s += " low_data_rate_opti %s\n" % onoff(cfg3['low_data_rate_optim']) s += " agc_auto_on %s\n" % onoff(cfg3['agc_auto_on']) s += " symb_timeout %s\n" % self.get_symb_timeout() s += " freq_hop_period %s\n" % self.get_hop_period() s += " hop_channel %s\n" % self.get_hop_channel() s += " payload_length %s\n" % self.get_payload_length() s += " max_payload_length %s\n" % self.get_max_payload_length() s += " irq_flags_mask %s\n" % self.get_irq_flags_mask() s += " irq_flags %s\n" % self.get_irq_flags() s += " rx_nb_byte %d\n" % self.get_rx_nb_bytes() s += " rx_header_cnt %d\n" % self.get_rx_header_cnt() s += " rx_packet_cnt %d\n" % self.get_rx_packet_cnt() s += " pkt_snr_value %f\n" % self.get_pkt_snr_value() s += " pkt_rssi_value %d\n" % self.get_pkt_rssi_value() s += " rssi_value %d\n" % self.get_rssi_value() s += " fei %d\n" % self.get_fei() s += " pa_select %s\n" % PA_SELECT.lookup[pa_config['pa_select']] s += " max_power %f dBm\n" % pa_config['max_power'] s += " output_power %f dBm\n" % pa_config['output_power'] s += " ocp %s\n" % onoff(ocp['ocp_on']) s += " ocp_trim %f mA\n" % ocp['ocp_trim'] s += " lna_gain %s\n" % GAIN.lookup[lna['lna_gain']] s += " lna_boost_lf %s\n" % bin(lna['lna_boost_lf']) s += " lna_boost_hf %s\n" % bin(lna['lna_boost_hf']) s += " detect_optimize %#02x\n" % self.get_detect_optimize() s += " detection_thresh %#02x\n" % self.get_detection_threshold() s += " sync_word %#02x\n" % self.get_sync_word() s += " dio_mapping 0..5 %s\n" % self.get_dio_mapping() s += " tcxo %s\n" % ['XTAL', 'TCXO'][self.get_tcxo()] s += " pa_dac %s\n" % ['default', 'PA_BOOST'][self.get_pa_dac()] s += " fifo_addr_ptr %#02x\n" % self.get_fifo_addr_ptr() s += " fifo_tx_base_addr %#02x\n" % self.get_fifo_tx_base_addr() s += " fifo_rx_base_addr %#02x\n" % self.get_fifo_rx_base_addr() s += " fifo_rx_curr_addr %#02x\n" % self.get_fifo_rx_current_addr() s += " fifo_rx_byte_addr %#02x\n" % self.get_fifo_rx_byte_addr() s += " status %s\n" % self.get_modem_status() s += " version %#02x\n" % self.get_version() return s
mayeranalytics/pySX127x
SX127x/LoRa.py
Python
agpl-3.0
37,104
[ "CRYSTAL" ]
22bad47a7d0279b7526c7132998e63bd8cf108e86d72be55e6ef10867e481a4b
# -*- coding: utf-8 -*- # Copyright 2013-2014 Victor Amin, http://vamin.net/ """MESS.DB import module This module contains the import tool class and load function. """ from __future__ import print_function from __future__ import unicode_literals import codecs import math import os from collections import OrderedDict import pybel from mess.decorators import decorate, UnicodeDecorator from mess.method import AbstractMethod from mess.source import Source from mess.tool import AbstractTool from mess.utils import get_inchikey_dir, is_inchikey, setup_dir, touch class Import(AbstractTool): """This tool imports molecules into MESS.DB from a source directory.""" def __init__(self): """Set description of tool.""" self.description = 'Import molecules into MESS.DB' self.epilog = '' def subparse(self, subparser): """Set tool-specific argparse arguments.""" subparser.add_argument('source', help='a molecule source file or directory') subparser.add_argument('-k', '--skip-fragments', action='store_true', help=('do not attempt to separate and import ' 'non-covalently bound fragments')) def execute(self, args): """Run import method for every molecule in source.""" source = Source() source.setup(args.source) self.log_console.info('reading molecules') molecules = OrderedDict() # not required, but useful for debugging # crashing imports threedee = False pybel.ob.obErrorLog.SetOutputLevel(-1) for source_file in source.files(): for mol in pybel.readfile(source_file.split('.')[-1], os.path.join(source.source_dir, source_file)): if not threedee and mol.dim == 3: threedee = True try: decorate(mol, UnicodeDecorator) except IndexError: self.log_console.error('Unexpected error importing %s.', mol.title) continue inchikey = mol.write('inchikey').rstrip() if not is_inchikey(inchikey): self.log_console.info( ("'%s' is not an importable molecule."), mol.title) continue molecules[inchikey] = (mol, source) if not args.skip_fragments: cansmi = mol.write('can').split()[0] if cansmi.count('.') > 0: for fragment in cansmi.split('.'): fragmol = pybel.readstring('can', fragment) decorate(fragmol, UnicodeDecorator) inchikey = fragmol.write('inchikey').rstrip() if not is_inchikey(inchikey): self.log_console.info( ("'%s' fragment in %s " "is not an importable molecule."), fragment, mol.title) else: fragmol.title = mol.title molecules[inchikey] = (fragmol, source) import0d = Import0D() import0d.setup() if threedee: import3d = Import3D() import3d.shortdesc = source.dirname import3d.setup() self.log_console.info('setting up molecule dirs') queries = {} for inchikey, (mol, source) in molecules.iteritems(): for query, values in import0d.map(mol, source): try: queries[query].append(values) except KeyError: queries[query] = [values] if mol.dim == 3: import3d.map(mol, source) self.log_console.info('loading simple properties') for query, values in queries.iteritems(): import0d.reduce(query, values) class Import0D(AbstractMethod): """This class adds an individual 0D molecule to MESS.DB.""" # method info description = 'import0d' geop = 0 # program info prog_name = 'Open Babel' prog_version = pybel.ob.OBReleaseVersion() prog_url = 'http://openbabel.org/wiki/Main_Page' prog_citation = ('Noel M. O’Boyle, Michael Banck, Craig A. James, ' 'Chris Morley, Tim Vandermeersch, Geoffrey R. Hutchison ' 'Open Babel: An open chemical toolbox. J. Cheminf. ' '2011, 3, 33.') # parameters parameters = {} def check_dependencies(self): """Return True, no external dependencies to check.""" return True def map(self, mol, source): """Import molecule into MESS.DB.""" # setup local variables self.inchikey = mol.write('inchikey').rstrip() inchikey_dir = get_inchikey_dir(self.inchikey) inchikey_basename = os.path.join(inchikey_dir, self.inchikey) identifier = unicode(mol.title, 'utf-8', 'replace') # setup directory setup_dir(inchikey_dir) if not self.check(): mol.title = b'' mol.write('inchi', (inchikey_basename + '.inchi'), overwrite=True) if not os.path.exists(inchikey_basename + '.png'): mol.write('_png2', (inchikey_basename + '.png')) touch(inchikey_basename + '.log') touch(inchikey_basename + '.notes') touch(os.path.join(inchikey_dir, '%s.sources.tsv' % inchikey_basename)) self.log_all.info('%s molecule directory initialized', self.inchikey) source.update_source_tsv(self.inchikey, identifier) yield source.update_molecule_source_query(self.inchikey, identifier) yield self.insert_molecule_query(self.inchikey, mol) for query, values in self.get_insert_moldata_queries( self.inchikey, mol, description='molecule data from %s input' % source.dirname): yield query, values for query, values in self.get_openbabel_property_queries(self.inchikey, mol): yield query, values def check(self): """Check that a valid molecule folder was created and that there is a matching molecule in the database. Args: inchikey: The valid InChIKey for the molecule. inchikey_dir: The full path to the molecule's dir. Returns: True if everything is fine, False otherwise. """ inchikey_dir = get_inchikey_dir(self.inchikey) inchi = os.path.join(inchikey_dir, '%s.inchi' % self.inchikey) log = os.path.join(inchikey_dir, '%s.log' % self.inchikey) notes = os.path.join(inchikey_dir, '%s.notes' % self.inchikey) png = os.path.join(inchikey_dir, '%s.png' % self.inchikey) sources = os.path.join(inchikey_dir, '%s.sources.tsv' % self.inchikey) try: with codecs.open(inchi, encoding='utf-8') as file_: inchi_str = file_.readline().split('=')[1].strip() query = 'SELECT inchikey FROM molecule WHERE inchi=?' row = self.db.execute(query, (inchi_str,)).fetchone() try: if row.inchikey != self.inchikey: return False except AttributeError: return False with codecs.open(log, encoding='utf-8'): pass with codecs.open(notes, encoding='utf-8'): pass with codecs.open(png, encoding='utf-8'): pass with codecs.open(sources, encoding='utf-8'): pass return True except IOError: return False def insert_molecule_query(self, inchikey, mol): """Load basic molecule attributes into mess.db. Args: inchikey: The molecule InChIKey. mol: A pybel mol object for the molecule. """ inchi = mol.write('inchi').rstrip().split('=')[1] smiles = mol.write('can').rstrip() # canonical smiles formula = mol.formula # insert molecule identifiers query = ('INSERT OR IGNORE INTO molecule ' '(inchikey, inchi, smiles, formula) ' 'VALUES (?, ?, ?, ?)') return (query, (inchikey, inchi, smiles, formula)) def get_openbabel_property_queries(self, inchikey, mol): """Load properties available in Open Babel into mess.db. Args: inchikey: The molecule InChIKey. method_path_id: Path id for import. mol: A pybel mol object for the molecule. """ # insert Open Babel molecule attributes yield self.get_insert_property_query( inchikey, 'charge', 'Open Babel molecule attribute', type(mol.charge).__name__, mol.charge) yield self.get_insert_property_query( inchikey, 'exactmass', 'Open Babel molecule attribute', type(mol.exactmass).__name__, mol.exactmass, 'g/mol') yield self.get_insert_property_query( inchikey, 'molwt', 'Open Babel descriptor value', type(mol.molwt).__name__, mol.molwt, 'g/mol') yield self.get_insert_property_query( inchikey, 'spin', 'Open Babel descriptor value', type(mol.spin).__name__, mol.spin) # insert Open Babel descriptors for property_name, property_value in mol.calcdesc().iteritems(): if math.isnan(property_value): continue yield self.get_insert_property_query( inchikey, property_name, 'Open Babel descriptor value', type(property_value).__name__, property_value) class Import3D(AbstractMethod): """This class adds an individual 3D molecule to MESS.DB.""" # method info description = 'import3d' geop = 1 # program info prog_name = 'Open Babel' prog_version = pybel.ob.OBReleaseVersion() prog_url = 'http://openbabel.org/wiki/Main_Page' prog_citation = ('Noel M. O’Boyle, Michael Banck, Craig A. James, ' 'Chris Morley, Tim Vandermeersch, Geoffrey R. Hutchison ' 'Open Babel: An open chemical toolbox. J. Cheminf. ' '2011, 3, 33.') # parameters parameters = {} def check_dependencies(self): """Return True, no external dependencies to check.""" return True def map(self, mol, source): """Import molecule into MESS.DB.""" self.inchikey = mol.write('inchikey').rstrip() if not self.check(): inchikey_dir = get_inchikey_dir(self.inchikey) setup_dir(os.path.join(inchikey_dir, self.method_dir)) mol.write('xyz', os.path.join(inchikey_dir, self.method_dir, '%s.xyz' % self.inchikey), overwrite=True) self.log_all.info('%s 3D structure from %s added', self.inchikey, source.dirname) def check(self): inchikey_dir = get_inchikey_dir(self.inchikey) try: mol = pybel.readfile('xyz', os.path.join(inchikey_dir, self.method_dir, '%s.xyz' % self.inchikey)).next() except IOError: return False decorate(mol, UnicodeDecorator) if not mol.write('inchikey').rstrip() == self.inchikey: self.log_console.warning('inconsistent 3D geometry in %s (%s)', self.inchikey, self.method_dir) return False return True def load(): """Load Import().""" return Import()
vamin/MESS.DB
mess/tools/import.py
Python
agpl-3.0
12,410
[ "Open Babel", "Pybel" ]
6bed2bffb2476cd9b611282e3b6e9170f8d88692161be9fda074cc0c824f2e7a
#!/usr/bin/env python from __future__ import print_function import urllib2 import re from pymatgen.core.periodic_table import PeriodicTable #pattern = r'elemParams[%d] = ['%s','%s','%s']' #'\definecolor{atom-H}{rgb}{1.000000,1.000000,1.000000}' def atoms_style_jmol(): url = 'http://jmol.sourceforge.net/jscolors/jmol_constants.js' js = urllib2.urlopen(url) lines = js.readlines() for line in lines: r = re.search("elemParams\[(\d+)\].*?=.*?\['(.*?)','(.*?)','(.*?)'\]",line) if r: val1 = int('0x' + r.group(3)[0:2],0)/255. val2 = int('0x' + r.group(3)[2:4],0)/255. val3 = int('0x' + r.group(3)[4:6],0)/255. print('\\definecolor{atom-%s}{%f,%f,%f}' % (r.group(2),val1,val2,val3)) print('\\definecolor{atom-%s}{%f,%f,%f}' % ('X',0.,0.,0.)) def atoms_style_bytype(): pt = PeriodicTable() for element in pt.all_elements: if element.is_noble_gas: print('\\colorlet{atom-%s}{green!50}' % element.symbol) elif element.is_transition_metal: print('\\colorlet{atom-%s}{blue!50}' % element.symbol) elif element.is_metalloid: print('\\colorlet{atom-%s}{yellow!50}' % element.symbol) elif element.is_alkali: print('\\colorlet{atom-%s}{orange!50}' % element.symbol) elif element.is_alkaline: print('\\colorlet{atom-%s}{purple!50}' % element.symbol) elif element.is_lanthanoid: print('\\colorlet{atom-%s}{brown!50}' % element.symbol) elif element.is_actinoid: print('\\colorlet{atom-%s}{brown!50}' % element.symbol) else: print('\\colorlet{atom-%s}{red!50}' % element.symbol) print('\\colorlet{atom-X}{black}')
ldamewood/figures
scripts/tikzlibraryatoms/tikblibraryatoms.style.py
Python
mit
1,763
[ "Jmol", "pymatgen" ]
723d3042733314f5d09f2635cbcee763141754d3615cfe479d8ce4188a809028
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import errno import sys import re import os import shlex import yaml import copy import optparse import operator from ansible import errors from ansible import __version__ from ansible.utils.display_functions import * from ansible.utils.plugins import * from ansible.utils.su_prompts import * from ansible.callbacks import display from ansible.module_utils.splitter import split_args, unquote import ansible.constants as C import ast import time import StringIO import stat import termios import tty import pipes import random import difflib import warnings import traceback import getpass import sys import json import subprocess import contextlib import jinja2.exceptions from vault import VaultLib VERBOSITY=0 MAX_FILE_SIZE_FOR_DIFF=1*1024*1024 # caching the compilation of the regex used # to check for lookup calls within data LOOKUP_REGEX = re.compile(r'lookup\s*\(') PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})') CODE_REGEX = re.compile(r'(?:{%|%})') try: import json except ImportError: import simplejson as json try: from hashlib import md5 as _md5 except ImportError: from md5 import md5 as _md5 PASSLIB_AVAILABLE = False try: import passlib.hash PASSLIB_AVAILABLE = True except: pass try: import builtin except ImportError: import __builtin__ as builtin KEYCZAR_AVAILABLE=False try: try: # some versions of pycrypto may not have this? from Crypto.pct_warnings import PowmInsecureWarning except ImportError: PowmInsecureWarning = RuntimeWarning with warnings.catch_warnings(record=True) as warning_handler: warnings.simplefilter("error", PowmInsecureWarning) try: import keyczar.errors as key_errors from keyczar.keys import AesKey except PowmInsecureWarning: system_warning( "The version of gmp you have installed has a known issue regarding " + \ "timing vulnerabilities when used with pycrypto. " + \ "If possible, you should update it (i.e. yum update gmp)." ) warnings.resetwarnings() warnings.simplefilter("ignore") import keyczar.errors as key_errors from keyczar.keys import AesKey KEYCZAR_AVAILABLE=True except ImportError: pass ############################################################### # Abstractions around keyczar ############################################################### def key_for_hostname(hostname): # fireball mode is an implementation of ansible firing up zeromq via SSH # to use no persistent daemons or key management if not KEYCZAR_AVAILABLE: raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes") key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR) if not os.path.exists(key_path): os.makedirs(key_path, mode=0700) os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8)) elif not os.path.isdir(key_path): raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.') if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8): raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))) key_path = os.path.join(key_path, hostname) # use new AES keys every 2 hours, which means fireball must not allow running for longer either if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2): key = AesKey.Generate() fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)) fh = os.fdopen(fd, 'w') fh.write(str(key)) fh.close() return key else: if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8): raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path)) fh = open(key_path) key = AesKey.Read(fh.read()) fh.close() return key def encrypt(key, msg): return key.Encrypt(msg) def decrypt(key, msg): try: return key.Decrypt(msg) except key_errors.InvalidSignatureError: raise errors.AnsibleError("decryption failed") ############################################################### # UTILITY FUNCTIONS FOR COMMAND LINE TOOLS ############################################################### def read_vault_file(vault_password_file): """Read a vault password from a file or if executable, execute the script and retrieve password from STDOUT """ if vault_password_file: this_path = os.path.realpath(os.path.expanduser(vault_password_file)) if is_executable(this_path): try: # STDERR not captured to make it easier for users to prompt for input in their scripts p = subprocess.Popen(this_path, stdout=subprocess.PIPE) except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e)) stdout, stderr = p.communicate() vault_pass = stdout.strip('\r\n') else: try: f = open(this_path, "rb") vault_pass=f.read().strip() f.close() except (OSError, IOError), e: raise errors.AnsibleError("Could not read %s: %s" % (this_path, e)) return vault_pass else: return None def err(msg): ''' print an error message to stderr ''' print >> sys.stderr, msg def exit(msg, rc=1): ''' quit with an error to stdout and a failure code ''' err(msg) sys.exit(rc) def jsonify(result, format=False): ''' format JSON output (uncompressed or uncompressed) ''' if result is None: return "{}" result2 = result.copy() for key, value in result2.items(): if type(value) is str: result2[key] = value.decode('utf-8', 'ignore') indent = None if format: indent = 4 try: return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False) except UnicodeDecodeError: return json.dumps(result2, sort_keys=True, indent=indent) def write_tree_file(tree, hostname, buf): ''' write something into treedir/hostname ''' # TODO: might be nice to append playbook runs per host in a similar way # in which case, we'd want append mode. path = os.path.join(tree, hostname) fd = open(path, "w+") fd.write(buf) fd.close() def is_failed(result): ''' is a given JSON result a failed result? ''' return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true'])) def is_changed(result): ''' is a given JSON result a changed result? ''' return (result.get('changed', False) in [ True, 'True', 'true']) def check_conditional(conditional, basedir, inject, fail_on_undefined=False): from ansible.utils import template if conditional is None or conditional == '': return True if isinstance(conditional, list): for x in conditional: if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined): return False return True if not isinstance(conditional, basestring): return conditional conditional = conditional.replace("jinja2_compare ","") # allow variable names if conditional in inject and '-' not in str(inject[conditional]): conditional = inject[conditional] conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined) original = str(conditional).replace("jinja2_compare ","") # a Jinja2 evaluation that results in something Python can eval! presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional conditional = template.template(basedir, presented, inject) val = conditional.strip() if val == presented: # the templating failed, meaning most likely a # variable was undefined. If we happened to be # looking for an undefined variable, return True, # otherwise fail if "is undefined" in conditional: return True elif "is defined" in conditional: return False else: raise errors.AnsibleError("error while evaluating conditional: %s" % original) elif val == "True": return True elif val == "False": return False else: raise errors.AnsibleError("unable to evaluate conditional: %s" % original) def is_executable(path): '''is the given path executable?''' return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE]) def unfrackpath(path): ''' returns a path that is free of symlinks, environment variables, relative path traversals and symbols (~) example: '$HOME/../../var/mail' becomes '/var/spool/mail' ''' return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path)))) def prepare_writeable_dir(tree,mode=0777): ''' make sure a directory exists and is writeable ''' # modify the mode to ensure the owner at least # has read/write access to this directory mode |= 0700 # make sure the tree path is always expanded # and normalized and free of symlinks tree = unfrackpath(tree) if not os.path.exists(tree): try: os.makedirs(tree, mode) except (IOError, OSError), e: raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e)) if not os.access(tree, os.W_OK): raise errors.AnsibleError("Cannot write to path %s" % tree) return tree def path_dwim(basedir, given): ''' make relative paths work like folks expect. ''' if given.startswith("'"): given = given[1:-1] if given.startswith("/"): return os.path.abspath(given) elif given.startswith("~"): return os.path.abspath(os.path.expanduser(given)) else: if basedir is None: basedir = "." return os.path.abspath(os.path.join(basedir, given)) def path_dwim_relative(original, dirname, source, playbook_base, check=True): ''' find one file in a directory one level up in a dir named dirname relative to current ''' # (used by roles code) from ansible.utils import template basedir = os.path.dirname(original) if os.path.islink(basedir): basedir = unfrackpath(basedir) template2 = os.path.join(basedir, dirname, source) else: template2 = os.path.join(basedir, '..', dirname, source) source2 = path_dwim(basedir, template2) if os.path.exists(source2): return source2 obvious_local_path = path_dwim(playbook_base, source) if os.path.exists(obvious_local_path): return obvious_local_path if check: raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path)) return source2 # which does not exist def repo_url_to_role_name(repo_url): # gets the role name out of a repo like # http://git.example.com/repos/repo.git" => "repo" if '://' not in repo_url and '@' not in repo_url: return repo_url trailing_path = repo_url.split('/')[-1] if trailing_path.endswith('.git'): trailing_path = trailing_path[:-4] if trailing_path.endswith('.tar.gz'): trailing_path = trailing_path[:-7] if ',' in trailing_path: trailing_path = trailing_path.split(',')[0] return trailing_path def role_spec_parse(role_spec): # takes a repo and a version like # git+http://git.example.com/repos/repo.git,v1.0 # and returns a list of properties such as: # { # 'scm': 'git', # 'src': 'http://git.example.com/repos/repo.git', # 'version': 'v1.0', # 'name': 'repo' # } role_spec = role_spec.strip() role_version = '' default_role_versions = dict(git='master', hg='tip') if role_spec == "" or role_spec.startswith("#"): return (None, None, None, None) tokens = [s.strip() for s in role_spec.split(',')] # assume https://github.com URLs are git+https:// URLs and not # tarballs unless they end in '.zip' if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): tokens[0] = 'git+' + tokens[0] if '+' in tokens[0]: (scm, role_url) = tokens[0].split('+') else: scm = None role_url = tokens[0] if len(tokens) >= 2: role_version = tokens[1] if len(tokens) == 3: role_name = tokens[2] else: role_name = repo_url_to_role_name(tokens[0]) if scm and not role_version: role_version = default_role_versions.get(scm, '') return dict(scm=scm, src=role_url, version=role_version, name=role_name) def role_yaml_parse(role): if 'role' in role: # Old style: {role: "galaxy.role,version,name", other_vars: "here" } role_info = role_spec_parse(role['role']) if isinstance(role_info, dict): # Warning: Slight change in behaviour here. name may be being # overloaded. Previously, name was only a parameter to the role. # Now it is both a parameter to the role and the name that # ansible-galaxy will install under on the local system. if 'name' in role and 'name' in role_info: del role_info['name'] role.update(role_info) else: # New style: { src: 'galaxy.role,version,name', other_vars: "here" } if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'): role["src"] = "git+" + role["src"] if '+' in role["src"]: (scm, src) = role["src"].split('+') role["scm"] = scm role["src"] = src if 'name' not in role: role["name"] = repo_url_to_role_name(role["src"]) if 'version' not in role: role['version'] = '' if 'scm' not in role: role['scm'] = None return role def json_loads(data): ''' parse a JSON string and return a data structure ''' return json.loads(data) def _clean_data(orig_data, from_remote=False, from_inventory=False): ''' remove jinja2 template tags from a string ''' if not isinstance(orig_data, basestring): return orig_data # when the data is marked as having come from a remote, we always # replace any print blocks (ie. {{var}}), however when marked as coming # from inventory we only replace print blocks that contain a call to # a lookup plugin (ie. {{lookup('foo','bar'))}}) replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None) regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX with contextlib.closing(StringIO.StringIO(orig_data)) as data: # these variables keep track of opening block locations, as we only # want to replace matched pairs of print/block tags print_openings = [] block_openings = [] for mo in regex.finditer(orig_data): token = mo.group(0) token_start = mo.start(0) if token[0] == '{': if token == '{%': block_openings.append(token_start) elif token == '{{': print_openings.append(token_start) elif token[1] == '}': prev_idx = None if token == '%}' and block_openings: prev_idx = block_openings.pop() elif token == '}}' and print_openings: prev_idx = print_openings.pop() if prev_idx is not None: # replace the opening data.seek(prev_idx, os.SEEK_SET) data.write('{#') # replace the closing data.seek(token_start, os.SEEK_SET) data.write('#}') else: assert False, 'Unhandled regex match' return data.getvalue() def _clean_data_struct(orig_data, from_remote=False, from_inventory=False): ''' walk a complex data structure, and use _clean_data() to remove any template tags that may exist ''' if not from_remote and not from_inventory: raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory") if isinstance(orig_data, dict): data = orig_data.copy() for key in data: new_key = _clean_data_struct(key, from_remote, from_inventory) new_val = _clean_data_struct(data[key], from_remote, from_inventory) if key != new_key: del data[key] data[new_key] = new_val elif isinstance(orig_data, list): data = orig_data[:] for i in range(0, len(data)): data[i] = _clean_data_struct(data[i], from_remote, from_inventory) elif isinstance(orig_data, basestring): data = _clean_data(orig_data, from_remote, from_inventory) else: data = orig_data return data def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False): ''' this version for module return data only ''' orig_data = raw_data # ignore stuff like tcgetattr spewage or other warnings data = filter_leading_non_json_lines(raw_data) try: results = json.loads(data) except: if no_exceptions: return dict(failed=True, parsed=False, msg=raw_data) else: raise if from_remote: results = _clean_data_struct(results, from_remote, from_inventory) return results def serialize_args(args): ''' Flattens a dictionary args to a k=v string ''' module_args = "" for (k,v) in args.iteritems(): if isinstance(v, basestring): module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) elif isinstance(v, bool): module_args = "%s=%s %s" % (k, str(v), module_args) return module_args.strip() def merge_module_args(current_args, new_args): ''' merges either a dictionary or string of k=v pairs with another string of k=v pairs, and returns a new k=v string without duplicates. ''' if not isinstance(current_args, basestring): raise errors.AnsibleError("expected current_args to be a basestring") # we use parse_kv to split up the current args into a dictionary final_args = parse_kv(current_args) if isinstance(new_args, dict): final_args.update(new_args) elif isinstance(new_args, basestring): new_args_kv = parse_kv(new_args) final_args.update(new_args_kv) return serialize_args(final_args) def parse_yaml(data, path_hint=None): ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!''' stripped_data = data.lstrip() loaded = None if stripped_data.startswith("{") or stripped_data.startswith("["): # since the line starts with { or [ we can infer this is a JSON document. try: loaded = json.loads(data) except ValueError, ve: if path_hint: raise errors.AnsibleError(path_hint + ": " + str(ve)) else: raise errors.AnsibleError(str(ve)) else: # else this is pretty sure to be a YAML document loaded = yaml.safe_load(data) return loaded def process_common_errors(msg, probline, column): replaced = probline.replace(" ","") if ":{{" in replaced and "}}" in replaced: msg = msg + """ This one looks easy to fix. YAML thought it was looking for the start of a hash/dictionary and was confused to see a second "{". Most likely this was meant to be an ansible template evaluation instead, so we have to give the parser a small hint that we wanted a string instead. The solution here is to just quote the entire value. For instance, if the original line was: app_path: {{ base_path }}/foo It should be written as: app_path: "{{ base_path }}/foo" """ return msg elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1: msg = msg + """ This one looks easy to fix. There seems to be an extra unquoted colon in the line and this is confusing the parser. It was only expecting to find one free colon. The solution is just add some quotes around the colon, or quote the entire line after the first colon. For instance, if the original line was: copy: src=file.txt dest=/path/filename:with_colon.txt It can be written as: copy: src=file.txt dest='/path/filename:with_colon.txt' Or: copy: 'src=file.txt dest=/path/filename:with_colon.txt' """ return msg else: parts = probline.split(":") if len(parts) > 1: middle = parts[1].strip() match = False unbalanced = False if middle.startswith("'") and not middle.endswith("'"): match = True elif middle.startswith('"') and not middle.endswith('"'): match = True if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2: unbalanced = True if match: msg = msg + """ This one looks easy to fix. It seems that there is a value started with a quote, and the YAML parser is expecting to see the line ended with the same kind of quote. For instance: when: "ok" in result.stdout Could be written as: when: '"ok" in result.stdout' or equivalently: when: "'ok' in result.stdout" """ return msg if unbalanced: msg = msg + """ We could be wrong, but this one looks like it might be an issue with unbalanced quotes. If starting a value with a quote, make sure the line ends with the same set of quotes. For instance this arbitrary example: foo: "bad" "wolf" Could be written as: foo: '"bad" "wolf"' """ return msg return msg def process_yaml_error(exc, data, path=None, show_content=True): if hasattr(exc, 'problem_mark'): mark = exc.problem_mark if show_content: if mark.line -1 >= 0: before_probline = data.split("\n")[mark.line-1] else: before_probline = '' probline = data.split("\n")[mark.line] arrow = " " * mark.column + "^" msg = """Syntax Error while loading YAML script, %s Note: The error may actually appear before this position: line %s, column %s %s %s %s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow) unquoted_var = None if '{{' in probline and '}}' in probline: if '"{{' not in probline or "'{{" not in probline: unquoted_var = True if not unquoted_var: msg = process_common_errors(msg, probline, mark.column) else: msg = msg + """ We could be wrong, but this one looks like it might be an issue with missing quotes. Always quote template expression brackets when they start a value. For instance: with_items: - {{ foo }} Should be written as: with_items: - "{{ foo }}" """ else: # most likely displaying a file with sensitive content, # so don't show any of the actual lines of yaml just the # line number itself msg = """Syntax error while loading YAML script, %s The error appears to have been on line %s, column %s, but may actually be before there depending on the exact syntax problem. """ % (path, mark.line + 1, mark.column + 1) else: # No problem markers means we have to throw a generic # "stuff messed up" type message. Sry bud. if path: msg = "Could not parse YAML. Check over %s again." % path else: msg = "Could not parse YAML." raise errors.AnsibleYAMLValidationFailed(msg) def parse_yaml_from_file(path, vault_password=None): ''' convert a yaml file to a data structure ''' data = None show_content = True try: data = open(path).read() except IOError: raise errors.AnsibleError("file could not read: %s" % path) vault = VaultLib(password=vault_password) if vault.is_encrypted(data): # if the file is encrypted and no password was specified, # the decrypt call would throw an error, but we check first # since the decrypt function doesn't know the file name if vault_password is None: raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path) data = vault.decrypt(data) show_content = False try: return parse_yaml(data, path_hint=path) except yaml.YAMLError, exc: process_yaml_error(exc, data, path, show_content) def parse_kv(args): ''' convert a string of key/value items to a dict ''' options = {} if args is not None: try: vargs = split_args(args) except ValueError, ve: if 'no closing quotation' in str(ve).lower(): raise errors.AnsibleError("error parsing argument string, try quoting the entire line.") else: raise for x in vargs: if "=" in x: k, v = x.split("=",1) options[k.strip()] = unquote(v.strip()) return options def _validate_both_dicts(a, b): if not (isinstance(a, dict) and isinstance(b, dict)): raise errors.AnsibleError( "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__) ) def merge_hash(a, b): ''' recursively merges hash b into a keys from b take precedence over keys from a ''' result = {} # we check here as well as in combine_vars() since this # function can work recursively with nested dicts _validate_both_dicts(a, b) for dicts in a, b: # next, iterate over b keys and values for k, v in dicts.iteritems(): # if there's already such key in a # and that key contains dict if k in result and isinstance(result[k], dict): # merge those dicts recursively result[k] = merge_hash(a[k], v) else: # otherwise, just copy a value from b to a result[k] = v return result def md5s(data): ''' Return MD5 hex digest of data. ''' digest = _md5() try: digest.update(data) except UnicodeEncodeError: digest.update(data.encode('utf-8')) return digest.hexdigest() def md5(filename): ''' Return MD5 hex digest of local file, None if file is not present or a directory. ''' if not os.path.exists(filename) or os.path.isdir(filename): return None digest = _md5() blocksize = 64 * 1024 try: infile = open(filename, 'rb') block = infile.read(blocksize) while block: digest.update(block) block = infile.read(blocksize) infile.close() except IOError, e: raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) return digest.hexdigest() def default(value, function): ''' syntactic sugar around lazy evaluation of defaults ''' if value is None: return function() return value def _git_repo_info(repo_path): ''' returns a string containing git branch, commit id and commit date ''' result = None if os.path.exists(repo_path): # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. if os.path.isfile(repo_path): try: gitdir = yaml.safe_load(open(repo_path)).get('gitdir') # There is a possibility the .git file to have an absolute path. if os.path.isabs(gitdir): repo_path = gitdir else: repo_path = os.path.join(repo_path[:-4], gitdir) except (IOError, AttributeError): return '' f = open(os.path.join(repo_path, "HEAD")) branch = f.readline().split('/')[-1].rstrip("\n") f.close() branch_path = os.path.join(repo_path, "refs", "heads", branch) if os.path.exists(branch_path): f = open(branch_path) commit = f.readline()[:10] f.close() else: # detached HEAD commit = branch[:10] branch = 'detached HEAD' branch_path = os.path.join(repo_path, "HEAD") date = time.localtime(os.stat(branch_path).st_mtime) if time.daylight == 0: offset = time.timezone else: offset = time.altzone result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36) else: result = '' return result def _gitinfo(): basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') repo_path = os.path.join(basedir, '.git') result = _git_repo_info(repo_path) submodules = os.path.join(basedir, '.gitmodules') if not os.path.exists(submodules): return result f = open(submodules) for line in f: tokens = line.strip().split(' ') if tokens[0] == 'path': submodule_path = tokens[2] submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git')) if not submodule_info: submodule_info = ' not found - use git submodule update --init ' + submodule_path result += "\n {0}: {1}".format(submodule_path, submodule_info) f.close() return result def version(prog): result = "{0} {1}".format(prog, __version__) gitinfo = _gitinfo() if gitinfo: result = result + " {0}".format(gitinfo) result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH return result def version_info(gitinfo=False): if gitinfo: # expensive call, user with care ansible_version_string = version('') else: ansible_version_string = __version__ ansible_version = ansible_version_string.split()[0] ansible_versions = ansible_version.split('.') for counter in range(len(ansible_versions)): if ansible_versions[counter] == "": ansible_versions[counter] = 0 try: ansible_versions[counter] = int(ansible_versions[counter]) except: pass if len(ansible_versions) < 3: for counter in range(len(ansible_versions), 3): ansible_versions.append(0) return {'string': ansible_version_string.strip(), 'full': ansible_version, 'major': ansible_versions[0], 'minor': ansible_versions[1], 'revision': ansible_versions[2]} def getch(): ''' read in a single character ''' fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch def sanitize_output(str): ''' strips private info out of a string ''' private_keys = ['password', 'login_password'] filter_re = [ # filter out things like user:pass@foo/whatever # and http://username:pass@wherever/foo re.compile('^(?P<before>.*:)(?P<password>.*)(?P<after>\@.*)$'), ] parts = str.split() output = '' for part in parts: try: (k,v) = part.split('=', 1) if k in private_keys: output += " %s=VALUE_HIDDEN" % k else: found = False for filter in filter_re: m = filter.match(v) if m: d = m.groupdict() output += " %s=%s" % (k, d['before'] + "********" + d['after']) found = True break if not found: output += " %s" % part except: output += " %s" % part return output.strip() #################################################################### # option handling code for /usr/bin/ansible and ansible-playbook # below this line class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' def format_help(self, formatter=None): self.option_list.sort(key=operator.methodcaller('get_opt_string')) return optparse.OptionParser.format_help(self, formatter=None) def increment_debug(option, opt, value, parser): global VERBOSITY VERBOSITY += 1 def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False): ''' create an options parser for any ansible script ''' parser = SortedOptParser(usage, version=version("%prog")) parser.add_option('-v','--verbose', default=False, action="callback", callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int', help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS) parser.add_option('-i', '--inventory-file', dest='inventory', help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST, default=constants.DEFAULT_HOST_LIST) parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', help='ask for SSH password') parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', help='use this file to authenticate the connection') parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', help='ask for sudo password') parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', help='ask for su password') parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file', help="vault password file") parser.add_option('--list-hosts', dest='listhosts', action='store_true', help='outputs a list of matching hosts; does not execute anything else') parser.add_option('-M', '--module-path', dest='module_path', help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH, default=None) if subset_opts: parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset', help='further limit selected hosts to an additional pattern') parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int', dest='timeout', help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT) if output_opts: parser.add_option('-o', '--one-line', dest='one_line', action='store_true', help='condense output') parser.add_option('-t', '--tree', dest='tree', default=None, help='log output to this directory') if runas_opts: parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, help='desired sudo user (default=root)') # Can't default to root because we need to detect when this option was given parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user', help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER) parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true', help='run operations with su') parser.add_option('-R', '--su-user', help='run operations with su as this ' 'user (default=%s)' % constants.DEFAULT_SU_USER) if connect_opts: parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) if async_opts: parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval', help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL) parser.add_option('-B', '--background', dest='seconds', type='int', default=0, help='run asynchronously, failing after X seconds (default=N/A)') if check_opts: parser.add_option("-C", "--check", default=False, dest='check', action='store_true', help="don't make any changes; instead, try to predict some of the changes that may occur" ) if diff_opts: parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true', help="when changing (small) files and templates, show the differences in those files; works great with --check" ) return parser def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False): vault_pass = None new_vault_pass = None if ask_vault_pass: vault_pass = getpass.getpass(prompt="Vault password: ") if ask_vault_pass and confirm_vault: vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ") if vault_pass != vault_pass2: raise errors.AnsibleError("Passwords do not match") if ask_new_vault_pass: new_vault_pass = getpass.getpass(prompt="New Vault password: ") if ask_new_vault_pass and confirm_new: new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") if new_vault_pass != new_vault_pass2: raise errors.AnsibleError("Passwords do not match") # enforce no newline chars at the end of passwords if vault_pass: vault_pass = vault_pass.strip() if new_vault_pass: new_vault_pass = new_vault_pass.strip() return vault_pass, new_vault_pass def ask_passwords(ask_pass=False, ask_sudo_pass=False, ask_su_pass=False, ask_vault_pass=False): sshpass = None sudopass = None su_pass = None vault_pass = None sudo_prompt = "sudo password: " su_prompt = "su password: " if ask_pass: sshpass = getpass.getpass(prompt="SSH password: ") sudo_prompt = "sudo password [defaults to SSH password]: " if ask_sudo_pass: sudopass = getpass.getpass(prompt=sudo_prompt) if ask_pass and sudopass == '': sudopass = sshpass if ask_su_pass: su_pass = getpass.getpass(prompt=su_prompt) if ask_vault_pass: vault_pass = getpass.getpass(prompt="Vault password: ") return (sshpass, sudopass, su_pass, vault_pass) def do_encrypt(result, encrypt, salt_size=None, salt=None): if PASSLIB_AVAILABLE: try: crypt = getattr(passlib.hash, encrypt) except: raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt) if salt_size: result = crypt.encrypt(result, salt_size=salt_size) elif salt: result = crypt.encrypt(result, salt=salt) else: result = crypt.encrypt(result) else: raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values") return result def last_non_blank_line(buf): all_lines = buf.splitlines() all_lines.reverse() for line in all_lines: if (len(line) > 0): return line # shouldn't occur unless there's no output return "" def filter_leading_non_json_lines(buf): ''' used to avoid random output from SSH at the top of JSON output, like messages from tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). need to filter anything which starts not with '{', '[', ', '=' or is an empty line. filter only leading lines since multiline JSON is valid. ''' filtered_lines = StringIO.StringIO() stop_filtering = False for line in buf.splitlines(): if stop_filtering or line.startswith('{') or line.startswith('['): stop_filtering = True filtered_lines.write(line + '\n') return filtered_lines.getvalue() def boolean(value): val = str(value) if val.lower() in [ "true", "t", "y", "1", "yes" ]: return True else: return False def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd): """ helper function for connection plugins to create sudo commands """ # Rather than detect if sudo wants a password this time, -k makes # sudo always ask for a password if one is required. # Passing a quoted compound command to sudo (or sudo -s) # directly doesn't work, so we shellquote it with pipes.quote() # and pass the quoted string to the user's shell. We loop reading # output until we see the randomly-generated sudo prompt set with # the -p option. randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) prompt = '[sudo via ansible, key=%s] password: ' % randbits success_key = 'SUDO-SUCCESS-%s' % randbits sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd))) return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key) def make_su_cmd(su_user, executable, cmd): """ Helper function for connection plugins to create direct su commands """ # TODO: work on this function randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) success_key = 'SUDO-SUCCESS-%s' % randbits sudocmd = '%s %s %s -c "%s -c %s"' % ( C.DEFAULT_SU_EXE, C.DEFAULT_SU_FLAGS, su_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd)) ) return ('/bin/sh -c ' + pipes.quote(sudocmd), None, success_key) _TO_UNICODE_TYPES = (unicode, type(None)) def to_unicode(value): if isinstance(value, _TO_UNICODE_TYPES): return value return value.decode("utf-8") def get_diff(diff): # called by --diff usage in playbook and runner via callbacks # include names in diffs 'before' and 'after' and do diff -U 10 try: with warnings.catch_warnings(): warnings.simplefilter('ignore') ret = [] if 'dst_binary' in diff: ret.append("diff skipped: destination file appears to be binary\n") if 'src_binary' in diff: ret.append("diff skipped: source file appears to be binary\n") if 'dst_larger' in diff: ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger']) if 'src_larger' in diff: ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger']) if 'before' in diff and 'after' in diff: if 'before_header' in diff: before_header = "before: %s" % diff['before_header'] else: before_header = 'before' if 'after_header' in diff: after_header = "after: %s" % diff['after_header'] else: after_header = 'after' differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10) for line in list(differ): ret.append(line) return u"".join(ret) except UnicodeDecodeError: return ">> the files are different, but the diff library cannot compare unicode strings" def is_list_of_strings(items): for x in items: if not isinstance(x, basestring): return False return True def list_union(a, b): result = [] for x in a: if x not in result: result.append(x) for x in b: if x not in result: result.append(x) return result def list_intersection(a, b): result = [] for x in a: if x in b and x not in result: result.append(x) return result def list_difference(a, b): result = [] for x in a: if x not in b and x not in result: result.append(x) for x in b: if x not in a and x not in result: result.append(x) return result def contains_vars(data): ''' returns True if the data contains a variable pattern ''' return "$" in data or "{{" in data def safe_eval(expr, locals={}, include_exceptions=False): ''' This is intended for allowing things like: with_items: a_list_variable Where Jinja2 would return a string but we do not want to allow it to call functions (outside of Jinja2, where the env is constrained). If the input data to this function came from an untrusted (remote) source, it should first be run through _clean_data_struct() to ensure the data is further sanitized prior to evaluation. Based on: http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe ''' # this is the whitelist of AST nodes we are going to # allow in the evaluation. Any node type other than # those listed here will raise an exception in our custom # visitor class defined below. SAFE_NODES = set( ( ast.Add, ast.BinOp, ast.Call, ast.Compare, ast.Dict, ast.Div, ast.Expression, ast.List, ast.Load, ast.Mult, ast.Num, ast.Name, ast.Str, ast.Sub, ast.Tuple, ast.UnaryOp, ) ) # AST node types were expanded after 2.6 if not sys.version.startswith('2.6'): SAFE_NODES.union( set( (ast.Set,) ) ) filter_list = [] for filter in filter_loader.all(): filter_list.extend(filter.filters().keys()) CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list class CleansingNodeVisitor(ast.NodeVisitor): def generic_visit(self, node, inside_call=False): if type(node) not in SAFE_NODES: raise Exception("invalid expression (%s)" % expr) elif isinstance(node, ast.Call): inside_call = True elif isinstance(node, ast.Name) and inside_call: if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST: raise Exception("invalid function: %s" % node.id) # iterate over all child nodes for child_node in ast.iter_child_nodes(node): self.generic_visit(child_node, inside_call) if not isinstance(expr, basestring): # already templated to a datastructure, perhaps? if include_exceptions: return (expr, None) return expr cnv = CleansingNodeVisitor() try: parsed_tree = ast.parse(expr, mode='eval') cnv.visit(parsed_tree) compiled = compile(parsed_tree, expr, 'eval') result = eval(compiled, {}, locals) if include_exceptions: return (result, None) else: return result except SyntaxError, e: # special handling for syntax errors, we just return # the expression string back as-is if include_exceptions: return (expr, None) return expr except Exception, e: if include_exceptions: return (expr, e) return expr def listify_lookup_plugin_terms(terms, basedir, inject): from ansible.utils import template if isinstance(terms, basestring): # someone did: # with_items: alist # OR # with_items: {{ alist }} stripped = terms.strip() if not (stripped.startswith('{') or stripped.startswith('[')) and \ not stripped.startswith("/") and \ not stripped.startswith('set([') and \ not LOOKUP_REGEX.search(terms): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: new_terms = template.template(basedir, terms, inject, convert_bare=True, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: terms = new_terms except jinja2.exceptions.UndefinedError, e: raise errors.AnsibleUndefinedVariable('undefined variable in items: %s' % e) except: pass if '{' in terms or '[' in terms: # Jinja2 already evaluated a variable to a list. # Jinja2-ified list needs to be converted back to a real type # TODO: something a bit less heavy than eval return safe_eval(terms) if isinstance(terms, basestring): terms = [ terms ] return terms def combine_vars(a, b): _validate_both_dicts(a, b) if C.DEFAULT_HASH_BEHAVIOUR == "merge": return merge_hash(a, b) else: return dict(a.items() + b.items()) def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS): '''Return a random password string of length containing only chars.''' password = [] while len(password) < length: new_char = os.urandom(1) if new_char in chars: password.append(new_char) return ''.join(password) def before_comment(msg): ''' what's the part of a string before a comment? ''' msg = msg.replace("\#","**NOT_A_COMMENT**") msg = msg.split("#")[0] msg = msg.replace("**NOT_A_COMMENT**","#") return msg def load_vars(basepath, results, vault_password=None): """ Load variables from any potential yaml filename combinations of basepath, returning result. """ paths_to_check = [ "".join([basepath, ext]) for ext in C.YAML_FILENAME_EXTENSIONS ] found_paths = [] for path in paths_to_check: found, results = _load_vars_from_path(path, results, vault_password=vault_password) if found: found_paths.append(path) # disallow the potentially confusing situation that there are multiple # variable files for the same name. For example if both group_vars/all.yml # and group_vars/all.yaml if len(found_paths) > 1: raise errors.AnsibleError("Multiple variable files found. " "There should only be one. %s" % ( found_paths, )) return results ## load variables from yaml files/dirs # e.g. host/group_vars # def _load_vars_from_path(path, results, vault_password=None): """ Robustly access the file at path and load variables, carefully reporting errors in a friendly/informative way. Return the tuple (found, new_results, ) """ try: # in the case of a symbolic link, we want the stat of the link itself, # not its target pathstat = os.lstat(path) except os.error, err: # most common case is that nothing exists at that path. if err.errno == errno.ENOENT: return False, results # otherwise this is a condition we should report to the user raise errors.AnsibleError( "%s is not accessible: %s." " Please check its permissions." % ( path, err.strerror)) # symbolic link if stat.S_ISLNK(pathstat.st_mode): try: target = os.path.realpath(path) except os.error, err2: raise errors.AnsibleError("The symbolic link at %s " "is not readable: %s. Please check its permissions." % (path, err2.strerror, )) # follow symbolic link chains by recursing, so we repeat the same # permissions checks above and provide useful errors. return _load_vars_from_path(target, results, vault_password) # directory if stat.S_ISDIR(pathstat.st_mode): # support organizing variables across multiple files in a directory return True, _load_vars_from_folder(path, results, vault_password=vault_password) # regular file elif stat.S_ISREG(pathstat.st_mode): data = parse_yaml_from_file(path, vault_password=vault_password) if data and type(data) != dict: raise errors.AnsibleError( "%s must be stored as a dictionary/hash" % path) elif data is None: data = {} # combine vars overrides by default but can be configured to do a # hash merge in settings results = combine_vars(results, data) return True, results # something else? could be a fifo, socket, device, etc. else: raise errors.AnsibleError("Expected a variable file or directory " "but found a non-file object at path %s" % (path, )) def _load_vars_from_folder(folder_path, results, vault_password=None): """ Load all variables within a folder recursively. """ # this function and _load_vars_from_path are mutually recursive try: names = os.listdir(folder_path) except os.error, err: raise errors.AnsibleError( "This folder cannot be listed: %s: %s." % ( folder_path, err.strerror)) # evaluate files in a stable order rather than whatever order the # filesystem lists them. names.sort() # do not parse hidden files or dirs, e.g. .svn/ paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')] for path in paths: _found, results = _load_vars_from_path(path, results, vault_password=vault_password) return results def update_hash(hash, key, new_value): ''' used to avoid nested .update calls on the parent ''' value = hash.get(key, {}) value.update(new_value) hash[key] = value def censor_unlogged_data(data): ''' used when the no_log: True attribute is passed to a task to keep data from a callback. NOT intended to prevent variable registration, but only things from showing up on screen ''' new_data = {} for (x,y) in data.iteritems(): if x in [ 'skipped', 'changed', 'failed', 'rc' ]: new_data[x] = y new_data['censored'] = 'results hidden due to no_log parameter' return new_data
iambocai/ansible
lib/ansible/utils/__init__.py
Python
gpl-3.0
56,744
[ "Galaxy", "VisIt" ]
0c3a6ce52b54fce273f3f0414ff16d3fdd1399be472276acabb42457b21be6ff
""" Module for handling state variables. """ from __future__ import absolute_import import numpy as nm from sfepy.base.base import Struct import six class State(Struct): """ Class holding/manipulating the state variables and corresponding DOF vectors. Manipulating the state class changes the underlying variables, and hence also the corresponding equations/terms (if any). Notes ----- This class allows working with LCBC conditions in time-dependent problems, as it keeps track of the reduced DOF vector that cannot be reconstructed from the full DOF vector by using the usual `variables.strip_state_vector()`. """ @staticmethod def from_variables(variables): """ Create a State instance for the given variables. The DOF vector is created using the DOF data in `variables`. Parameters ---------- variables : Variables instance The variables. """ parts = variables.get_state_parts() vec = variables.create_state_vector() for key, part in six.iteritems(parts): indx = variables.get_indx(key) vec[indx] = part return State(variables, vec) def __init__(self, variables, vec=None, preserve_caches=False): """ Create a State instance for the given variables. Parameters ---------- variables : Variables instance The variables. vec : array, optional The (initial) DOF vector corresponding to the variables. preserve_caches : bool If True, do not invalidate evaluate caches of variables. """ Struct.__init__(self, variables=variables, vec=vec, r_vec=None) if self.vec is None: self.vec = variables.create_state_vector() self.variables.set_data(self.vec, preserve_caches=preserve_caches) def copy(self, deep=False, preserve_caches=False): """ Copy the state. By default, the new state contains the same variables, and creates new DOF vectors. If `deep` is True, also the DOF vectors are copied. Parameters ---------- deep : bool If True, make a copy of the DOF vectors. preserve_caches : bool If True, do not invalidate evaluate caches of variables. """ if deep: other = State(self.variables, self.vec.copy(), preserve_caches=True) if self.r_vec is not None: other.r_vec = self.r_vec.copy() else: other = State(self.variables, preserve_caches=True) return other def fill(self, value): """ Fill the DOF vector with given value. """ if self.r_vec is not None: self.r_vec.fill(value) self.vec.fill(value) def init_history(self): """ Initialize variables with history. """ self.variables.init_history() def apply_ebc(self, force_values=None): """ Apply essential (Dirichlet) boundary conditions to the state. """ self.variables.apply_ebc(self.vec, force_values=force_values) def has_ebc(self): """ Test whether the essential (Dirichlet) boundary conditions have been applied to the DOF vector. """ return self.variables.has_ebc(self.vec) def apply_ic(self, force_values=None): """ Apply initial conditions to the state. """ if self.r_vec is not None: raise ValueError('cannot re-apply initial conditions with LCBCs!') self.variables.apply_ic(self.vec, force_values=force_values) def get_reduced(self, follow_epbc=False): """ Get the reduced DOF vector, with EBC and PBC DOFs removed. """ strip = self.variables.strip_state_vector if self.variables.has_lcbc: if self.r_vec is None: r_vec = strip(self.vec, follow_epbc=follow_epbc) # This just sets the correct vector size (wrong values)! r_vec = self.variables.mtx_lcbc.T * r_vec else: r_vec = self.r_vec else: r_vec = strip(self.vec, follow_epbc=follow_epbc) return r_vec def set_reduced(self, r_vec, preserve_caches=False): """ Set the reduced DOF vector, with EBC and PBC DOFs removed. Parameters ---------- r_vec : array The reduced DOF vector corresponding to the variables. preserve_caches : bool If True, do not invalidate evaluate caches of variables. """ self.vec = self.variables.make_full_vec(r_vec) if self.variables.has_lcbc: self.r_vec = r_vec self.variables.set_data(self.vec, preserve_caches=preserve_caches) def set_full(self, vec, var_name=None, force=False): """ Set the full DOF vector (including EBC and PBC DOFs). If `var_name` is given, set only the DOF sub-vector corresponding to the given variable. If `force` is True, setting variables with LCBC DOFs is allowed. """ if var_name is None: if self.variables.has_lcbc and not force: raise ValueError('cannot set full DOF vector with LCBCs!') self.vec = vec self.variables.set_data(self.vec) else: var = self.variables[var_name] if var.has_lcbc and not force: raise ValueError('cannot set full DOF vector with LCBCs!') self.variables.set_state_part(self.vec, vec, var_name) var.set_data(self.vec, self.variables.get_indx(var_name)) def __call__(self, var_name=None): """ Get the full DOF vector (including EBC and PBC DOFs). If `var_name` is given, return only the DOF vector corresponding to the given variable. """ if var_name is None: out = self.vec else: out = self.variables.get_state_part_view(self.vec, var_name) return out def set_parts(self, parts, force=False): """ Set parts of the DOF vector corresponding to individual state variables. Parameters ---------- parts : dict The dictionary of the DOF vector parts. """ if self.variables.has_lcbc and not force: raise ValueError('cannot set full DOF vector with LCBCs!') self.variables.set_data(parts) for key, part in six.iteritems(parts): indx = self.variables.get_indx(key) self.vec[indx] = part def get_parts(self): """ Return parts of the DOF vector corresponding to individual state variables. Returns ------- out : dict The dictionary of the DOF vector parts. """ return self.variables.get_state_parts(self.vec) def get_vec(self, active_only): if active_only: vec = self.get_reduced() else: vec = self() return vec def set_vec(self, vec, active_only): if active_only: self.set_reduced(vec, preserve_caches=True) else: self.set_full(vec) def create_output_dict(self, fill_value=None, var_info=None, extend=True, linearization=None): """ Transforms state to an output dictionary, that can be passed as 'out' kwarg to Mesh.write(). Then the dictionary entries are formed by components of the state vector corresponding to unknown variables according to kind of linearization given by `linearization`. Examples -------- >>> out = state.create_output_dict() >>> problem.save_state('file.vtk', out=out) """ return self.variables.state_to_output(self.vec, fill_value, var_info, extend, linearization=linearization) def get_weighted_norm(self, vec, weights=None, return_weights=False): """ Return the weighted norm of DOF vector `vec`. By default, each component of `vec` is weighted by the 1/norm of the corresponding state part, or 1 if the norm is zero. Alternatively, the weights can be provided explicitly using `weights` argument. Parameters ---------- vec : array The DOF vector corresponding to the variables. weights : dict, optional If given, the weights are used instead of the norms of the state parts. Keys of the dictionary must be equal to the names of variables comprising the DOF vector. return_weights: bool If True, return also the used weights. Returns ------- norm : float The weighted norm. weights : dict, optional If `return_weights` is True, the used weights. Examples -------- >>> err = state0.get_weighted_norm(state() - state0()) """ if weights is None: parts = self.get_parts() weights = {} for key, part in six.iteritems(parts): pnorm = nm.linalg.norm(part) if pnorm < 10.0 * nm.finfo(nm.float64).eps: pnorm = 1.0 weights[key] = 1.0 / pnorm else: if set(weights.keys()) != self.variables.state: raise ValueError('weights keys have to be in %s!' % self.variables.state) wvec = vec.copy() for key in six.iterkeys(weights): indx = self.variables.get_indx(key) wvec[indx] *= weights[key] norm = nm.linalg.norm(wvec) if return_weights: return norm, weights else: return norm
vlukes/sfepy
sfepy/discrete/state.py
Python
bsd-3-clause
10,040
[ "VTK" ]
8a20d734ea986527166e8edea1638b5cf0e22ab209ce5350091eaf4bdb11ffb3
from WebAppDIRAC.Lib.WebHandler import WebHandler, asyncGen from DIRAC.Resources.Catalog.FileCatalog import FileCatalog from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup from DIRAC import gConfig, gLogger from DIRAC.Core.Utilities import Time from hashlib import md5 class FileCatalogHandler( WebHandler ): AUTH_PROPS = "authenticated" def __init__(self, *args, **kwargs ): super( FileCatalogHandler, self ).__init__( *args, **kwargs ) sessionData = self.getSessionData() self.user = sessionData['user'].get( 'username', '' ) self.group = sessionData['user'].get( 'group', '' ) self.vo = getVOForGroup( self.group ) self.fc = FileCatalog( vo = self.vo ) ''' Method to read all the available fields possible for defining a query ''' @asyncGen def web_getMetadataFields(self): self.L_NUMBER = 0 self.S_NUMBER = 0 result = yield self.threadTask( self.fc.getMetadataFields ) gLogger.debug( "request: %s" % result ) if not result[ "OK" ] : gLogger.error( "getSelectorGrid: %s" % result[ "Message" ] ) self.finish({ "success" : "false" , "error" : result[ "Message" ] }) return result = result["Value"] callback = {} if not result.has_key( "FileMetaFields" ): error = "Service response has no FileMetaFields key" gLogger.error( "getSelectorGrid: %s" % error ) self.finish({ "success" : "false" , "error" : error }) return if not result.has_key( "DirectoryMetaFields" ): error = "Service response has no DirectoryMetaFields key" gLogger.error( "getSelectorGrid: %s" % error ) self.finish({ "success" : "false" , "error" : error }) return filemeta = result[ "FileMetaFields" ] if len( filemeta ) > 0 : for key , value in filemeta.items(): callback[key]= "label" gLogger.debug( "getSelectorGrid: FileMetaFields callback %s" % callback ) dirmeta = result[ "DirectoryMetaFields" ] if len( dirmeta ) > 0 : for key , value in dirmeta.items(): callback[key]= value.lower() gLogger.debug( "getSelectorGrid: Resulting callback %s" % callback ) self.finish({ "success" : "true" , "result" : callback}) ''' Method to read all the available options for a metadata field ''' @asyncGen def web_getQueryData( self ): try: compat = dict() for key in self.request.arguments: parts = str( key ).split(".") if len(parts)!=3: continue key = str( key ) name = parts[1] sign = parts[2] if not len( name ) > 0: continue value = str( self.request.arguments[ key ][0] ).split("|") #check existence of the 'name' section if not compat.has_key(name): compat[name] = dict() #check existence of the 'sign' section if not compat[name].has_key(sign): if value[0]=="v": compat[name][sign] = "" elif value[0]=="s": compat[name][sign] = [] if value[0]=="v": compat[name][sign] = value[1] elif value[0]=="s": compat[name][sign] += value[1].split(":::") except Exception, e: self.finish({ "success" : "false" , "error" : "Metadata query error" }) return path = "/" if self.request.arguments.has_key("path") : path = self.request.arguments["path"][0] gLogger.always( compat ) result = yield self.threadTask( self.fc.getCompatibleMetadata, compat, path ) gLogger.always( result ) if not result[ "OK" ]: self.finish({ "success" : "false" , "error" : result[ "Message" ] }) return self.finish({ "success" : "true" , "result" : result["Value"] }) @asyncGen def web_getFilesData( self ) : req = self.__request() gLogger.always(req) gLogger.debug( "submit: incoming request %s" % req ) result = yield self.threadTask( self.fc.findFilesByMetadataWeb, req["selection"] , req["path"] , self.S_NUMBER , self.L_NUMBER) gLogger.debug( "submit: result of findFilesByMetadataDetailed %s" % result ) if not result[ "OK" ] : gLogger.error( "submit: %s" % result[ "Message" ] ) self.finish({ "success" : "false" , "error" : result[ "Message" ] }) return result = result[ "Value" ] if not len(result) > 0: self.finish({ "success" : "true" , "result" : [] , "total" : 0, "date":"-" }) return total = result[ "TotalRecords" ] result = result[ "Records" ] callback = list() for key , value in result.items() : size = "" if "Size" in value: size = value[ "Size" ] date = "" if "CreationDate" in value: date = str( value[ "CreationDate" ] ) meta = "" if "Metadata" in value: m = value[ "Metadata" ] meta = '; '.join( [ '%s: %s' % ( i , j ) for ( i , j ) in m.items() ] ) dirnameList = key.split("/") dirname = "/".join(dirnameList[:len(dirnameList)-1]) filename = dirnameList[len(dirnameList)-1:] callback.append({"fullfilename":key, "dirname": dirname, "filename" : filename , "date" : date , "size" : size , "metadata" : meta }) timestamp = Time.dateTime().strftime("%Y-%m-%d %H:%M [UTC]") self.finish({ "success" : "true" , "result" : callback , "total" : total, "date":timestamp}) def __request(self): req = { "selection" : {} , "path" : "/" } self.L_NUMBER = 25 if self.request.arguments.has_key( "limit" ) and len( self.request.arguments[ "limit" ][0] ) > 0: self.L_NUMBER = int( self.request.arguments[ "limit" ][0] ) self.S_NUMBER = 0 if self.request.arguments.has_key( "start" ) and len( self.request.arguments[ "start" ][0] ) > 0: self.S_NUMBER = int( self.request.arguments[ "start" ][0] ) result = gConfig.getOption( "/WebApp/ListSeparator" ) if result[ "OK" ] : separator = result[ "Value" ] else: separator = ":::" result = self.fc.getMetadataFields() gLogger.debug( "request: %s" % result ) if not result["OK"]: gLogger.error( "request: %s" % result[ "Message" ] ) return req result = result["Value"] if not result.has_key( "FileMetaFields" ): error = "Service response has no FileMetaFields key. Return empty dict" gLogger.error( "request: %s" % error ) return req if not result.has_key( "DirectoryMetaFields" ): error = "Service response has no DirectoryMetaFields key. Return empty dict" gLogger.error( "request: %s" % error ) return req filemeta = result[ "FileMetaFields" ] dirmeta = result[ "DirectoryMetaFields" ] meta = [] for key,value in dirmeta.items() : meta.append( key ) gLogger.always( "request: metafields: %s " % meta ) for param in self.request.arguments : tmp = str( param ).split( '.' ) if len( tmp ) != 3 : continue name = tmp[1] logic = tmp[2] value = self.request.arguments[param][0].split("|") if not logic in ["in","nin", "=" , "!=" , ">=" , "<=" , ">" , "<" ] : gLogger.always( "Operand '%s' is not supported " % logic ) continue if name in meta : #check existence of the 'name' section if not req[ "selection" ].has_key(name): req[ "selection" ][name] = dict() #check existence of the 'sign' section if not req[ "selection" ][name].has_key(logic): if value[0]=="v": req[ "selection" ][name][logic] = "" elif value[0]=="s": req[ "selection" ][name][logic] = [] if value[0]=="v": req[ "selection" ][name][logic] = value[1] elif value[0]=="s": req[ "selection" ][name][logic] += value[1].split(":::") if self.request.arguments.has_key("path") : req["path"] = self.request.arguments["path"][0] gLogger.always("REQ: ",req) return req def __request_file(self): req = { "selection" : {} , "path" : "/" } separator = ":::" result = self.fc.getMetadataFields() gLogger.debug( "request: %s" % result ) if not result["OK"]: gLogger.error( "request: %s" % result[ "Message" ] ) return req result = result["Value"] if not result.has_key( "FileMetaFields" ): error = "Service response has no FileMetaFields key. Return empty dict" gLogger.error( "request: %s" % error ) return req if not result.has_key( "DirectoryMetaFields" ): error = "Service response has no DirectoryMetaFields key. Return empty dict" gLogger.error( "request: %s" % error ) return req filemeta = result[ "FileMetaFields" ] dirmeta = result[ "DirectoryMetaFields" ] meta = [] for key,value in dirmeta.items() : meta.append( key ) gLogger.always( "request: metafields: %s " % meta ) selectionElems=self.request.arguments["selection"][0].split("<|>") gLogger.always( "request: THISSSS %s " % self.request.arguments["selection"][0] ) for param in selectionElems: tmp = str( param ).split( '|' ) if len( tmp ) != 4 : continue name = tmp[0] logic = tmp[1] if not logic in ["in","nin", "=" , "!=" , ">=" , "<=" , ">" , "<" ] : gLogger.always( "Operand '%s' is not supported " % logic ) continue if name in meta : #check existence of the 'name' section if not req[ "selection" ].has_key(name): req[ "selection" ][name] = dict() #check existence of the 'sign' section if not req[ "selection" ][name].has_key(logic): if tmp[2]=="v": req[ "selection" ][name][logic] = "" elif tmp[2]=="s": req[ "selection" ][name][logic] = [] if tmp[2]=="v": req[ "selection" ][name][logic] = tmp[3] elif tmp[2]=="s": req[ "selection" ][name][logic] += tmp[3].split(":::") if self.request.arguments.has_key("path") : req["path"] = self.request.arguments["path"][0] gLogger.always("REQ: ",req) return req @asyncGen def web_getMetadataFilesInFile( self ): self.set_header('Content-type','text/plain') self.set_header('Content-Disposition', 'attachment; filename="error.txt"') req = self.__request_file() gLogger.always(req) gLogger.debug( "submit: incoming request %s" % req ) result = yield self.threadTask( self.fc.findFilesByMetadata, req["selection"] , req["path"]) if not result[ "OK" ] : gLogger.error( "submit: %s" % result[ "Message" ] ) self.finish({ "success" : "false" , "error" : result[ "Message" ] }) return result = result[ "Value" ] retStrLines = [] if len(result)>0: for fileName in result: retStrLines.append(fileName) strData = "\n".join(retStrLines) self.set_header('Content-type','text/plain') self.set_header('Content-Disposition', 'attachment; filename="%s.txt"' % md5( str( req ) ).hexdigest()) self.set_header('Content-Length', len( strData )) self.finish(strData) @asyncGen def web_getSubnodeFiles( self ): path = self.request.arguments["path"][0] # print path # path = "/vo.cta.in2p3.fr" result = yield self.threadTask( self.fc.listDirectory, path, False) if not result[ "OK" ] : gLogger.error( "submit: %s" % result[ "Message" ] ) self.finish({ "success" : "false" , "error" : result[ "Message" ] }) return # print result filesData = result["Value"]["Successful"][path]["Files"] dirData = result["Value"]["Successful"][path]["SubDirs"] retData = [] for entryName in dirData: nodeDef = { 'text' : entryName.split("/")[-1] } nodeDef[ 'leaf' ] = False nodeDef[ 'expanded' ] = False retData.append(nodeDef) for entryName in filesData: nodeDef = { 'text' : entryName.split("/")[-1] } nodeDef[ 'leaf' ] = True retData.append(nodeDef) retData = sorted(retData, key=lambda node: node['text'].upper()) self.finish({"success" : "true", "nodes":retData})
zmathe/WebAppDIRAC
WebApp/handler/FileCatalogHandler.py
Python
gpl-3.0
12,082
[ "DIRAC" ]
1ddcd5a57dbadd264466dc664b5ef6deb883c7b90c089edbf548eca2cf44e380
import numpy as np import numpy.random as ra from gpaw.test import equal from gpaw.setup import create_setup from gpaw.grid_descriptor import GridDescriptor from gpaw.localized_functions import create_localized_functions from gpaw.spline import Spline from gpaw.xc import XC from gpaw.utilities import pack from gpaw.mpi import serial_comm ra.seed(8) for name in ['LDA', 'PBE']: xc = XC(name) s = create_setup('N', xc) ni = s.ni niAO = s.niAO wt0_j = s.phit_j rcut = s.xc_correction.rgd.r_g[-1] wt_j = [] for wt0 in wt0_j: data = [wt0(r) for r in np.arange(121) * rcut / 100] data[-1] = 0.0 l = wt0.get_angular_momentum_number() wt_j.append(Spline(l, 1.2 * rcut, data)) a = rcut * 1.2 * 2 + 1.0 ## n = 120 n = 70 n = 90 gd = GridDescriptor((n, n, n), (a, a, a), comm=serial_comm) pr = create_localized_functions(wt_j, gd, (0.5, 0.5, 0.5)) coefs = np.identity(niAO, float) psit_ig = np.zeros((niAO, n, n, n)) pr.add(psit_ig, coefs) nii = ni * (ni + 1) // 2 D_p = np.zeros(nii) H_p = np.zeros(nii) e_g = np.zeros((n, n, n)) n_g = np.zeros((1, n, n, n)) v_g = np.zeros((1, n, n, n)) P_ni = 0.2 * ra.random((20, ni)) P_ni[:, niAO:] = 0.0 D_ii = np.dot(np.transpose(P_ni), P_ni) D_p = pack(D_ii) p = 0 for i1 in range(niAO): for i2 in range(i1, niAO): n_g += D_p[p] * psit_ig[i1] * psit_ig[i2] p += 1 p += ni - niAO p = create_localized_functions([s.nct], gd, (0.5, 0.5, 0.5)) p.add(n_g[0], np.ones(1)) e_g = gd.zeros() xc.calculate(gd, n_g, v_g, e_g) r2_g = np.sum((np.indices((n, n, n)) - n / 2)**2, axis=0) dv_g = gd.dv * np.less(r2_g, (rcut / a * n)**2) E2 = -np.dot(e_g.ravel(), dv_g.ravel()) s.xc_correction.n_qg[:] = 0.0 s.xc_correction.nc_g[:] = 0.0 E1 = (xc.calculate_paw_correction(s, D_p.reshape(1, -1)) + s.xc_correction.Exc0) print name, E1, E2, E1 - E2 equal(E1, E2, 0.0013)
ajylee/gpaw-rtxs
gpaw/test/gga_atom.py
Python
gpl-3.0
2,043
[ "GPAW" ]
2470e03e248a7a48ef0332e700e9560955a9b3697fd4a4270670218523fb90b2
# ---------------------------------------------------------------------- # LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator # http://lammps.sandia.gov, Sandia National Laboratories # Steve Plimpton, sjplimp@sandia.gov # # Copyright (2003) Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. This software is distributed under # the GNU General Public License. # # See the README file in the top-level LAMMPS directory. # ------------------------------------------------------------------------- # Python wrappers on LAMMPS library via ctypes # for python3 compatibility from __future__ import print_function # imports for simple LAMMPS python wrapper module "lammps" import sys,traceback,types from ctypes import * from os.path import dirname,abspath,join from inspect import getsourcefile # imports for advanced LAMMPS python wrapper modules "PyLammps" and "IPyLammps" from collections import namedtuple import os import select import re import sys def get_ctypes_int(size): if size == 4: return c_int32 elif size == 8: return c_int64 return c_int class MPIAbortException(Exception): def __init__(self, message): self.message = message def __str__(self): return repr(self.message) class lammps(object): # detect if Python is using version of mpi4py that can pass a communicator has_mpi4py = False try: from mpi4py import MPI from mpi4py import __version__ as mpi4py_version if mpi4py_version.split('.')[0] in ['2','3']: has_mpi4py = True except: pass # create instance of LAMMPS def __init__(self,name="",cmdargs=None,ptr=None,comm=None): self.comm = comm self.opened = 0 # determine module location modpath = dirname(abspath(getsourcefile(lambda:0))) self.lib = None # if a pointer to a LAMMPS object is handed in, # all symbols should already be available try: if ptr: self.lib = CDLL("",RTLD_GLOBAL) except: self.lib = None # load liblammps.so unless name is given # if name = "g++", load liblammps_g++.so # try loading the LAMMPS shared object from the location # of lammps.py with an absolute path, # so that LD_LIBRARY_PATH does not need to be set for regular install # fall back to loading with a relative path, # typically requires LD_LIBRARY_PATH to be set appropriately if any([f.startswith('liblammps') and f.endswith('.dylib') for f in os.listdir(modpath)]): lib_ext = ".dylib" else: lib_ext = ".so" if not self.lib: try: if not name: self.lib = CDLL(join(modpath,"liblammps" + lib_ext),RTLD_GLOBAL) else: self.lib = CDLL(join(modpath,"liblammps_%s" % name + lib_ext), RTLD_GLOBAL) except: if not name: self.lib = CDLL("liblammps" + lib_ext,RTLD_GLOBAL) else: self.lib = CDLL("liblammps_%s" % name + lib_ext,RTLD_GLOBAL) # define ctypes API for each library method # NOTE: should add one of these for each lib function self.lib.lammps_extract_box.argtypes = \ [c_void_p,POINTER(c_double),POINTER(c_double), POINTER(c_double),POINTER(c_double),POINTER(c_double), POINTER(c_int),POINTER(c_int)] self.lib.lammps_extract_box.restype = None self.lib.lammps_reset_box.argtypes = \ [c_void_p,POINTER(c_double),POINTER(c_double),c_double,c_double,c_double] self.lib.lammps_reset_box.restype = None self.lib.lammps_gather_atoms.argtypes = \ [c_void_p,c_char_p,c_int,c_int,c_void_p] self.lib.lammps_gather_atoms.restype = None self.lib.lammps_gather_atoms_concat.argtypes = \ [c_void_p,c_char_p,c_int,c_int,c_void_p] self.lib.lammps_gather_atoms_concat.restype = None self.lib.lammps_gather_atoms_subset.argtypes = \ [c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p] self.lib.lammps_gather_atoms_subset.restype = None self.lib.lammps_scatter_atoms.argtypes = \ [c_void_p,c_char_p,c_int,c_int,c_void_p] self.lib.lammps_scatter_atoms.restype = None self.lib.lammps_scatter_atoms_subset.argtypes = \ [c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p] self.lib.lammps_scatter_atoms_subset.restype = None # if no ptr provided, create an instance of LAMMPS # don't know how to pass an MPI communicator from PyPar # but we can pass an MPI communicator from mpi4py v2.0.0 and later # no_mpi call lets LAMMPS use MPI_COMM_WORLD # cargs = array of C strings from args # if ptr, then are embedding Python in LAMMPS input script # ptr is the desired instance of LAMMPS # just convert it to ctypes ptr and store in self.lmp if not ptr: # with mpi4py v2, can pass MPI communicator to LAMMPS # need to adjust for type of MPI communicator object # allow for int (like MPICH) or void* (like OpenMPI) if comm: if not lammps.has_mpi4py: raise Exception('Python mpi4py version is not 2 or 3') if lammps.MPI._sizeof(lammps.MPI.Comm) == sizeof(c_int): MPI_Comm = c_int else: MPI_Comm = c_void_p narg = 0 cargs = 0 if cmdargs: cmdargs.insert(0,"lammps.py") narg = len(cmdargs) for i in range(narg): if type(cmdargs[i]) is str: cmdargs[i] = cmdargs[i].encode() cargs = (c_char_p*narg)(*cmdargs) self.lib.lammps_open.argtypes = [c_int, c_char_p*narg, \ MPI_Comm, c_void_p()] else: self.lib.lammps_open.argtypes = [c_int, c_int, \ MPI_Comm, c_void_p()] self.lib.lammps_open.restype = None self.opened = 1 self.lmp = c_void_p() comm_ptr = lammps.MPI._addressof(comm) comm_val = MPI_Comm.from_address(comm_ptr) self.lib.lammps_open(narg,cargs,comm_val,byref(self.lmp)) else: if lammps.has_mpi4py: from mpi4py import MPI self.comm = MPI.COMM_WORLD self.opened = 1 if cmdargs: cmdargs.insert(0,"lammps.py") narg = len(cmdargs) for i in range(narg): if type(cmdargs[i]) is str: cmdargs[i] = cmdargs[i].encode() cargs = (c_char_p*narg)(*cmdargs) self.lmp = c_void_p() self.lib.lammps_open_no_mpi(narg,cargs,byref(self.lmp)) else: self.lmp = c_void_p() self.lib.lammps_open_no_mpi(0,None,byref(self.lmp)) # could use just this if LAMMPS lib interface supported it # self.lmp = self.lib.lammps_open_no_mpi(0,None) else: # magic to convert ptr to ctypes ptr if sys.version_info >= (3, 0): # Python 3 (uses PyCapsule API) pythonapi.PyCapsule_GetPointer.restype = c_void_p pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p] self.lmp = c_void_p(pythonapi.PyCapsule_GetPointer(ptr, None)) else: # Python 2 (uses PyCObject API) pythonapi.PyCObject_AsVoidPtr.restype = c_void_p pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object] self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr)) # optional numpy support (lazy loading) self._numpy = None # set default types self.c_bigint = get_ctypes_int(self.extract_setting("bigint")) self.c_tagint = get_ctypes_int(self.extract_setting("tagint")) self.c_imageint = get_ctypes_int(self.extract_setting("imageint")) self._installed_packages = None # add way to insert Python callback for fix external self.callback = {} self.FIX_EXTERNAL_CALLBACK_FUNC = CFUNCTYPE(None, c_void_p, self.c_bigint, c_int, POINTER(self.c_tagint), POINTER(POINTER(c_double)), POINTER(POINTER(c_double))) self.lib.lammps_set_fix_external_callback.argtypes = [c_void_p, c_char_p, self.FIX_EXTERNAL_CALLBACK_FUNC, c_void_p] self.lib.lammps_set_fix_external_callback.restype = None # shut-down LAMMPS instance def __del__(self): if self.lmp and self.opened: self.lib.lammps_close(self.lmp) self.opened = 0 def close(self): if self.opened: self.lib.lammps_close(self.lmp) self.lmp = None self.opened = 0 def version(self): return self.lib.lammps_version(self.lmp) def file(self,file): if file: file = file.encode() self.lib.lammps_file(self.lmp,file) # send a single command def command(self,cmd): if cmd: cmd = cmd.encode() self.lib.lammps_command(self.lmp,cmd) if self.has_exceptions and self.lib.lammps_has_error(self.lmp): sb = create_string_buffer(100) error_type = self.lib.lammps_get_last_error_message(self.lmp, sb, 100) error_msg = sb.value.decode().strip() if error_type == 2: raise MPIAbortException(error_msg) raise Exception(error_msg) # send a list of commands def commands_list(self,cmdlist): cmds = [x.encode() for x in cmdlist if type(x) is str] args = (c_char_p * len(cmdlist))(*cmds) self.lib.lammps_commands_list(self.lmp,len(cmdlist),args) # send a string of commands def commands_string(self,multicmd): if type(multicmd) is str: multicmd = multicmd.encode() self.lib.lammps_commands_string(self.lmp,c_char_p(multicmd)) # extract lammps type byte sizes def extract_setting(self, name): if name: name = name.encode() self.lib.lammps_extract_setting.restype = c_int return int(self.lib.lammps_extract_setting(self.lmp,name)) # extract global info def extract_global(self,name,type): if name: name = name.encode() if type == 0: self.lib.lammps_extract_global.restype = POINTER(c_int) elif type == 1: self.lib.lammps_extract_global.restype = POINTER(c_double) else: return None ptr = self.lib.lammps_extract_global(self.lmp,name) return ptr[0] # extract global info def extract_box(self): boxlo = (3*c_double)() boxhi = (3*c_double)() xy = c_double() yz = c_double() xz = c_double() periodicity = (3*c_int)() box_change = c_int() self.lib.lammps_extract_box(self.lmp,boxlo,boxhi, byref(xy),byref(yz),byref(xz), periodicity,byref(box_change)) boxlo = boxlo[:3] boxhi = boxhi[:3] xy = xy.value yz = yz.value xz = xz.value periodicity = periodicity[:3] box_change = box_change.value return boxlo,boxhi,xy,yz,xz,periodicity,box_change # extract per-atom info # NOTE: need to insure are converting to/from correct Python type # e.g. for Python list or NumPy or ctypes def extract_atom(self,name,type): if name: name = name.encode() if type == 0: self.lib.lammps_extract_atom.restype = POINTER(c_int) elif type == 1: self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int)) elif type == 2: self.lib.lammps_extract_atom.restype = POINTER(c_double) elif type == 3: self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_double)) else: return None ptr = self.lib.lammps_extract_atom(self.lmp,name) return ptr @property def numpy(self): if not self._numpy: import numpy as np class LammpsNumpyWrapper: def __init__(self, lmp): self.lmp = lmp def _ctype_to_numpy_int(self, ctype_int): if ctype_int == c_int32: return np.int32 elif ctype_int == c_int64: return np.int64 return np.intc def extract_atom_iarray(self, name, nelem, dim=1): if name in ['id', 'molecule']: c_int_type = self.lmp.c_tagint elif name in ['image']: c_int_type = self.lmp.c_imageint else: c_int_type = c_int np_int_type = self._ctype_to_numpy_int(c_int_type) if dim == 1: tmp = self.lmp.extract_atom(name, 0) ptr = cast(tmp, POINTER(c_int_type * nelem)) else: tmp = self.lmp.extract_atom(name, 1) ptr = cast(tmp[0], POINTER(c_int_type * nelem * dim)) a = np.frombuffer(ptr.contents, dtype=np_int_type) a.shape = (nelem, dim) return a def extract_atom_darray(self, name, nelem, dim=1): if dim == 1: tmp = self.lmp.extract_atom(name, 2) ptr = cast(tmp, POINTER(c_double * nelem)) else: tmp = self.lmp.extract_atom(name, 3) ptr = cast(tmp[0], POINTER(c_double * nelem * dim)) a = np.frombuffer(ptr.contents) a.shape = (nelem, dim) return a self._numpy = LammpsNumpyWrapper(self) return self._numpy # extract compute info def extract_compute(self,id,style,type): if id: id = id.encode() if type == 0: if style > 0: return None self.lib.lammps_extract_compute.restype = POINTER(c_double) ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type) return ptr[0] if type == 1: self.lib.lammps_extract_compute.restype = POINTER(c_double) ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type) return ptr if type == 2: if style == 0: self.lib.lammps_extract_compute.restype = POINTER(c_int) ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type) return ptr[0] else: self.lib.lammps_extract_compute.restype = POINTER(POINTER(c_double)) ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type) return ptr return None # extract fix info # in case of global datum, free memory for 1 double via lammps_free() # double was allocated by library interface function def extract_fix(self,id,style,type,i=0,j=0): if id: id = id.encode() if style == 0: self.lib.lammps_extract_fix.restype = POINTER(c_double) ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j) result = ptr[0] self.lib.lammps_free(ptr) return result elif (style == 1) or (style == 2): if type == 1: self.lib.lammps_extract_fix.restype = POINTER(c_double) elif type == 2: self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double)) else: return None ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j) return ptr else: return None # extract variable info # free memory for 1 double or 1 vector of doubles via lammps_free() # for vector, must copy nlocal returned values to local c_double vector # memory was allocated by library interface function def extract_variable(self,name,group,type): if name: name = name.encode() if group: group = group.encode() if type == 0: self.lib.lammps_extract_variable.restype = POINTER(c_double) ptr = self.lib.lammps_extract_variable(self.lmp,name,group) result = ptr[0] self.lib.lammps_free(ptr) return result if type == 1: self.lib.lammps_extract_global.restype = POINTER(c_int) nlocalptr = self.lib.lammps_extract_global(self.lmp,"nlocal".encode()) nlocal = nlocalptr[0] result = (c_double*nlocal)() self.lib.lammps_extract_variable.restype = POINTER(c_double) ptr = self.lib.lammps_extract_variable(self.lmp,name,group) for i in range(nlocal): result[i] = ptr[i] self.lib.lammps_free(ptr) return result return None # return current value of thermo keyword def get_thermo(self,name): if name: name = name.encode() self.lib.lammps_get_thermo.restype = c_double return self.lib.lammps_get_thermo(self.lmp,name) # return total number of atoms in system def get_natoms(self): return self.lib.lammps_get_natoms(self.lmp) # set variable value # value is converted to string # returns 0 for success, -1 if failed def set_variable(self,name,value): if name: name = name.encode() if value: value = str(value).encode() return self.lib.lammps_set_variable(self.lmp,name,value) # reset simulation box size def reset_box(self,boxlo,boxhi,xy,yz,xz): cboxlo = (3*c_double)(*boxlo) cboxhi = (3*c_double)(*boxhi) self.lib.lammps_reset_box(self.lmp,cboxlo,cboxhi,xy,yz,xz) # return vector of atom properties gathered across procs # 3 variants to match src/library.cpp # name = atom property recognized by LAMMPS in atom->extract() # type = 0 for integer values, 1 for double values # count = number of per-atom valus, 1 for type or charge, 3 for x or f # returned data is a 1d vector - doc how it is ordered? # NOTE: need to insure are converting to/from correct Python type # e.g. for Python list or NumPy or ctypes def gather_atoms(self,name,type,count): if name: name = name.encode() natoms = self.lib.lammps_get_natoms(self.lmp) if type == 0: data = ((count*natoms)*c_int)() self.lib.lammps_gather_atoms(self.lmp,name,type,count,data) elif type == 1: data = ((count*natoms)*c_double)() self.lib.lammps_gather_atoms(self.lmp,name,type,count,data) else: return None return data def gather_atoms_concat(self,name,type,count): if name: name = name.encode() natoms = self.lib.lammps_get_natoms(self.lmp) if type == 0: data = ((count*natoms)*c_int)() self.lib.lammps_gather_atoms_concat(self.lmp,name,type,count,data) elif type == 1: data = ((count*natoms)*c_double)() self.lib.lammps_gather_atoms_concat(self.lmp,name,type,count,data) else: return None return data def gather_atoms_subset(self,name,type,count,ndata,ids): if name: name = name.encode() if type == 0: data = ((count*ndata)*c_int)() self.lib.lammps_gather_atoms_subset(self.lmp,name,type,count,ndata,ids,data) elif type == 1: data = ((count*ndata)*c_double)() self.lib.lammps_gather_atoms_subset(self.lmp,name,type,count,ndata,ids,data) else: return None return data # scatter vector of atom properties across procs # 2 variants to match src/library.cpp # name = atom property recognized by LAMMPS in atom->extract() # type = 0 for integer values, 1 for double values # count = number of per-atom valus, 1 for type or charge, 3 for x or f # assume data is of correct type and length, as created by gather_atoms() # NOTE: need to insure are converting to/from correct Python type # e.g. for Python list or NumPy or ctypes def scatter_atoms(self,name,type,count,data): if name: name = name.encode() self.lib.lammps_scatter_atoms(self.lmp,name,type,count,data) def scatter_atoms_subset(self,name,type,count,ndata,ids,data): if name: name = name.encode() self.lib.lammps_scatter_atoms_subset(self.lmp,name,type,count,ndata,ids,data) # create N atoms on all procs # N = global number of atoms # id = ID of each atom (optional, can be None) # type = type of each atom (1 to Ntypes) (required) # x = coords of each atom as (N,3) array (required) # v = velocity of each atom as (N,3) array (optional, can be None) # NOTE: how could we insure are passing correct type to LAMMPS # e.g. for Python list or NumPy, etc # ditto for gather_atoms() above def create_atoms(self,n,id,type,x,v,image=None,shrinkexceed=False): if id: id_lmp = (c_int * n)() id_lmp[:] = id else: id_lmp = id if image: image_lmp = (c_int * n)() image_lmp[:] = image else: image_lmp = image type_lmp = (c_int * n)() type_lmp[:] = type self.lib.lammps_create_atoms(self.lmp,n,id_lmp,type_lmp,x,v,image_lmp, shrinkexceed) @property def has_exceptions(self): """ Return whether the LAMMPS shared library was compiled with C++ exceptions handling enabled """ return self.lib.lammps_config_has_exceptions() != 0 @property def has_gzip_support(self): return self.lib.lammps_config_has_gzip_support() != 0 @property def has_png_support(self): return self.lib.lammps_config_has_png_support() != 0 @property def has_jpeg_support(self): return self.lib.lammps_config_has_jpeg_support() != 0 @property def has_ffmpeg_support(self): return self.lib.lammps_config_has_ffmpeg_support() != 0 @property def installed_packages(self): if self._installed_packages is None: self._installed_packages = [] npackages = self.lib.lammps_config_package_count() sb = create_string_buffer(100) for idx in range(npackages): self.lib.lammps_config_package_name(idx, sb, 100) self._installed_packages.append(sb.value.decode()) return self._installed_packages def set_fix_external_callback(self, fix_name, callback, caller=None): import numpy as np def _ctype_to_numpy_int(ctype_int): if ctype_int == c_int32: return np.int32 elif ctype_int == c_int64: return np.int64 return np.intc def callback_wrapper(caller_ptr, ntimestep, nlocal, tag_ptr, x_ptr, fext_ptr): if cast(caller_ptr,POINTER(py_object)).contents: pyCallerObj = cast(caller_ptr,POINTER(py_object)).contents.value else: pyCallerObj = None tptr = cast(tag_ptr, POINTER(self.c_tagint * nlocal)) tag = np.frombuffer(tptr.contents, dtype=_ctype_to_numpy_int(self.c_tagint)) tag.shape = (nlocal) xptr = cast(x_ptr[0], POINTER(c_double * nlocal * 3)) x = np.frombuffer(xptr.contents) x.shape = (nlocal, 3) fptr = cast(fext_ptr[0], POINTER(c_double * nlocal * 3)) f = np.frombuffer(fptr.contents) f.shape = (nlocal, 3) callback(pyCallerObj, ntimestep, nlocal, tag, x, f) cFunc = self.FIX_EXTERNAL_CALLBACK_FUNC(callback_wrapper) cCaller = cast(pointer(py_object(caller)), c_void_p) self.callback[fix_name] = { 'function': cFunc, 'caller': caller } self.lib.lammps_set_fix_external_callback(self.lmp, fix_name.encode(), cFunc, cCaller) # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- ################################################################################ # Alternative Python Wrapper # Written by Richard Berger <richard.berger@temple.edu> ################################################################################ class OutputCapture(object): """ Utility class to capture LAMMPS library output """ def __init__(self): self.stdout_pipe_read, self.stdout_pipe_write = os.pipe() self.stdout_fd = 1 def __enter__(self): self.stdout = os.dup(self.stdout_fd) os.dup2(self.stdout_pipe_write, self.stdout_fd) return self def __exit__(self, type, value, tracebac): os.dup2(self.stdout, self.stdout_fd) os.close(self.stdout) os.close(self.stdout_pipe_read) os.close(self.stdout_pipe_write) # check if we have more to read from the pipe def more_data(self, pipe): r, _, _ = select.select([pipe], [], [], 0) return bool(r) # read the whole pipe def read_pipe(self, pipe): out = "" while self.more_data(pipe): out += os.read(pipe, 1024).decode() return out @property def output(self): return self.read_pipe(self.stdout_pipe_read) class Variable(object): def __init__(self, lammps_wrapper_instance, name, style, definition): self.wrapper = lammps_wrapper_instance self.name = name self.style = style self.definition = definition.split() @property def value(self): if self.style == 'atom': return list(self.wrapper.lmp.extract_variable(self.name, "all", 1)) else: value = self.wrapper.lmp_print('"${%s}"' % self.name).strip() try: return float(value) except ValueError: return value class AtomList(object): def __init__(self, lammps_wrapper_instance): self.lmp = lammps_wrapper_instance self.natoms = self.lmp.system.natoms self.dimensions = self.lmp.system.dimensions def __getitem__(self, index): if self.dimensions == 2: return Atom2D(self.lmp, index + 1) return Atom(self.lmp, index + 1) class Atom(object): def __init__(self, lammps_wrapper_instance, index): self.lmp = lammps_wrapper_instance self.index = index @property def id(self): return int(self.lmp.eval("id[%d]" % self.index)) @property def type(self): return int(self.lmp.eval("type[%d]" % self.index)) @property def mol(self): return self.lmp.eval("mol[%d]" % self.index) @property def mass(self): return self.lmp.eval("mass[%d]" % self.index) @property def position(self): return (self.lmp.eval("x[%d]" % self.index), self.lmp.eval("y[%d]" % self.index), self.lmp.eval("z[%d]" % self.index)) @position.setter def position(self, value): self.lmp.set("atom", self.index, "x", value[0]) self.lmp.set("atom", self.index, "y", value[1]) self.lmp.set("atom", self.index, "z", value[2]) @property def velocity(self): return (self.lmp.eval("vx[%d]" % self.index), self.lmp.eval("vy[%d]" % self.index), self.lmp.eval("vz[%d]" % self.index)) @velocity.setter def velocity(self, value): self.lmp.set("atom", self.index, "vx", value[0]) self.lmp.set("atom", self.index, "vy", value[1]) self.lmp.set("atom", self.index, "vz", value[2]) @property def force(self): return (self.lmp.eval("fx[%d]" % self.index), self.lmp.eval("fy[%d]" % self.index), self.lmp.eval("fz[%d]" % self.index)) @property def charge(self): return self.lmp.eval("q[%d]" % self.index) class Atom2D(Atom): def __init__(self, lammps_wrapper_instance, index): super(Atom2D, self).__init__(lammps_wrapper_instance, index) @property def position(self): return (self.lmp.eval("x[%d]" % self.index), self.lmp.eval("y[%d]" % self.index)) @position.setter def position(self, value): self.lmp.set("atom", self.index, "x", value[0]) self.lmp.set("atom", self.index, "y", value[1]) @property def velocity(self): return (self.lmp.eval("vx[%d]" % self.index), self.lmp.eval("vy[%d]" % self.index)) @velocity.setter def velocity(self, value): self.lmp.set("atom", self.index, "vx", value[0]) self.lmp.set("atom", self.index, "vy", value[1]) @property def force(self): return (self.lmp.eval("fx[%d]" % self.index), self.lmp.eval("fy[%d]" % self.index)) class variable_set: def __init__(self, name, variable_dict): self._name = name array_pattern = re.compile(r"(?P<arr>.+)\[(?P<index>[0-9]+)\]") for key, value in variable_dict.items(): m = array_pattern.match(key) if m: g = m.groupdict() varname = g['arr'] idx = int(g['index']) if varname not in self.__dict__: self.__dict__[varname] = {} self.__dict__[varname][idx] = value else: self.__dict__[key] = value def __str__(self): return "{}({})".format(self._name, ','.join(["{}={}".format(k, self.__dict__[k]) for k in self.__dict__.keys() if not k.startswith('_')])) def __repr__(self): return self.__str__() def get_thermo_data(output): """ traverse output of runs and extract thermo data columns """ if isinstance(output, str): lines = output.splitlines() else: lines = output runs = [] columns = [] in_run = False current_run = {} for line in lines: if line.startswith("Per MPI rank memory allocation"): in_run = True elif in_run and len(columns) == 0: # first line after memory usage are column names columns = line.split() current_run = {} for col in columns: current_run[col] = [] elif line.startswith("Loop time of "): in_run = False columns = None thermo_data = variable_set('ThermoData', current_run) r = {'thermo' : thermo_data } runs.append(namedtuple('Run', list(r.keys()))(*list(r.values()))) elif in_run and len(columns) > 0: values = [float(x) for x in line.split()] for i, col in enumerate(columns): current_run[col].append(values[i]) return runs class PyLammps(object): """ More Python-like wrapper for LAMMPS (e.g., for iPython) See examples/ipython for usage """ def __init__(self,name="",cmdargs=None,ptr=None,comm=None): if ptr: if isinstance(ptr,PyLammps): self.lmp = ptr.lmp elif isinstance(ptr,lammps): self.lmp = ptr else: self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm) else: self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=None,comm=comm) print("LAMMPS output is captured by PyLammps wrapper") self._cmd_history = [] self.runs = [] def __del__(self): if self.lmp: self.lmp.close() self.lmp = None def close(self): if self.lmp: self.lmp.close() self.lmp = None def version(self): return self.lmp.version() def file(self,file): self.lmp.file(file) def write_script(self,filename): """ Write LAMMPS script file containing all commands executed up until now """ with open(filename, "w") as f: for cmd in self._cmd_history: f.write("%s\n" % cmd) def command(self,cmd): self.lmp.command(cmd) self._cmd_history.append(cmd) def run(self, *args, **kwargs): output = self.__getattr__('run')(*args, **kwargs) if(lammps.has_mpi4py): output = self.lmp.comm.bcast(output, root=0) self.runs += get_thermo_data(output) return output @property def last_run(self): if len(self.runs) > 0: return self.runs[-1] return None @property def atoms(self): return AtomList(self) @property def system(self): output = self.info("system") d = self._parse_info_system(output) return namedtuple('System', d.keys())(*d.values()) @property def communication(self): output = self.info("communication") d = self._parse_info_communication(output) return namedtuple('Communication', d.keys())(*d.values()) @property def computes(self): output = self.info("computes") return self._parse_element_list(output) @property def dumps(self): output = self.info("dumps") return self._parse_element_list(output) @property def fixes(self): output = self.info("fixes") return self._parse_element_list(output) @property def groups(self): output = self.info("groups") return self._parse_groups(output) @property def variables(self): output = self.info("variables") vars = {} for v in self._parse_element_list(output): vars[v['name']] = Variable(self, v['name'], v['style'], v['def']) return vars def eval(self, expr): value = self.lmp_print('"$(%s)"' % expr).strip() try: return float(value) except ValueError: return value def _split_values(self, line): return [x.strip() for x in line.split(',')] def _get_pair(self, value): return [x.strip() for x in value.split('=')] def _parse_info_system(self, output): lines = output[6:-2] system = {} for line in lines: if line.startswith("Units"): system['units'] = self._get_pair(line)[1] elif line.startswith("Atom style"): system['atom_style'] = self._get_pair(line)[1] elif line.startswith("Atom map"): system['atom_map'] = self._get_pair(line)[1] elif line.startswith("Atoms"): parts = self._split_values(line) system['natoms'] = int(self._get_pair(parts[0])[1]) system['ntypes'] = int(self._get_pair(parts[1])[1]) system['style'] = self._get_pair(parts[2])[1] elif line.startswith("Kspace style"): system['kspace_style'] = self._get_pair(line)[1] elif line.startswith("Dimensions"): system['dimensions'] = int(self._get_pair(line)[1]) elif line.startswith("Orthogonal box"): system['orthogonal_box'] = [float(x) for x in self._get_pair(line)[1].split('x')] elif line.startswith("Boundaries"): system['boundaries'] = self._get_pair(line)[1] elif line.startswith("xlo"): keys, values = [self._split_values(x) for x in self._get_pair(line)] for key, value in zip(keys, values): system[key] = float(value) elif line.startswith("ylo"): keys, values = [self._split_values(x) for x in self._get_pair(line)] for key, value in zip(keys, values): system[key] = float(value) elif line.startswith("zlo"): keys, values = [self._split_values(x) for x in self._get_pair(line)] for key, value in zip(keys, values): system[key] = float(value) elif line.startswith("Molecule type"): system['molecule_type'] = self._get_pair(line)[1] elif line.startswith("Bonds"): parts = self._split_values(line) system['nbonds'] = int(self._get_pair(parts[0])[1]) system['nbondtypes'] = int(self._get_pair(parts[1])[1]) system['bond_style'] = self._get_pair(parts[2])[1] elif line.startswith("Angles"): parts = self._split_values(line) system['nangles'] = int(self._get_pair(parts[0])[1]) system['nangletypes'] = int(self._get_pair(parts[1])[1]) system['angle_style'] = self._get_pair(parts[2])[1] elif line.startswith("Dihedrals"): parts = self._split_values(line) system['ndihedrals'] = int(self._get_pair(parts[0])[1]) system['ndihedraltypes'] = int(self._get_pair(parts[1])[1]) system['dihedral_style'] = self._get_pair(parts[2])[1] elif line.startswith("Impropers"): parts = self._split_values(line) system['nimpropers'] = int(self._get_pair(parts[0])[1]) system['nimpropertypes'] = int(self._get_pair(parts[1])[1]) system['improper_style'] = self._get_pair(parts[2])[1] return system def _parse_info_communication(self, output): lines = output[6:-3] comm = {} for line in lines: if line.startswith("MPI library"): comm['mpi_version'] = line.split(':')[1].strip() elif line.startswith("Comm style"): parts = self._split_values(line) comm['comm_style'] = self._get_pair(parts[0])[1] comm['comm_layout'] = self._get_pair(parts[1])[1] elif line.startswith("Processor grid"): comm['proc_grid'] = [int(x) for x in self._get_pair(line)[1].split('x')] elif line.startswith("Communicate velocities for ghost atoms"): comm['ghost_velocity'] = (self._get_pair(line)[1] == "yes") elif line.startswith("Nprocs"): parts = self._split_values(line) comm['nprocs'] = int(self._get_pair(parts[0])[1]) comm['nthreads'] = int(self._get_pair(parts[1])[1]) return comm def _parse_element_list(self, output): lines = output[6:-3] elements = [] for line in lines: element_info = self._split_values(line.split(':')[1].strip()) element = {'name': element_info[0]} for key, value in [self._get_pair(x) for x in element_info[1:]]: element[key] = value elements.append(element) return elements def _parse_groups(self, output): lines = output[6:-3] groups = [] group_pattern = re.compile(r"(?P<name>.+) \((?P<type>.+)\)") for line in lines: m = group_pattern.match(line.split(':')[1].strip()) group = {'name': m.group('name'), 'type': m.group('type')} groups.append(group) return groups def lmp_print(self, s): """ needed for Python2 compatibility, since print is a reserved keyword """ return self.__getattr__("print")(s) def __dir__(self): return ['angle_coeff', 'angle_style', 'atom_modify', 'atom_style', 'atom_style', 'bond_coeff', 'bond_style', 'boundary', 'change_box', 'communicate', 'compute', 'create_atoms', 'create_box', 'delete_atoms', 'delete_bonds', 'dielectric', 'dihedral_coeff', 'dihedral_style', 'dimension', 'dump', 'fix', 'fix_modify', 'group', 'improper_coeff', 'improper_style', 'include', 'kspace_modify', 'kspace_style', 'lattice', 'mass', 'minimize', 'min_style', 'neighbor', 'neigh_modify', 'newton', 'nthreads', 'pair_coeff', 'pair_modify', 'pair_style', 'processors', 'read', 'read_data', 'read_restart', 'region', 'replicate', 'reset_timestep', 'restart', 'run', 'run_style', 'thermo', 'thermo_modify', 'thermo_style', 'timestep', 'undump', 'unfix', 'units', 'variable', 'velocity', 'write_restart'] def __getattr__(self, name): def handler(*args, **kwargs): cmd_args = [name] + [str(x) for x in args] with OutputCapture() as capture: self.command(' '.join(cmd_args)) output = capture.output if 'verbose' in kwargs and kwargs['verbose']: print(output) lines = output.splitlines() if len(lines) > 1: return lines elif len(lines) == 1: return lines[0] return None return handler class IPyLammps(PyLammps): """ iPython wrapper for LAMMPS which adds embedded graphics capabilities """ def __init__(self,name="",cmdargs=None,ptr=None,comm=None): super(IPyLammps, self).__init__(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm) def image(self, filename="snapshot.png", group="all", color="type", diameter="type", size=None, view=None, center=None, up=None, zoom=1.0): cmd_args = [group, "image", filename, color, diameter] if size: width = size[0] height = size[1] cmd_args += ["size", width, height] if view: theta = view[0] phi = view[1] cmd_args += ["view", theta, phi] if center: flag = center[0] Cx = center[1] Cy = center[2] Cz = center[3] cmd_args += ["center", flag, Cx, Cy, Cz] if up: Ux = up[0] Uy = up[1] Uz = up[2] cmd_args += ["up", Ux, Uy, Uz] if zoom: cmd_args += ["zoom", zoom] cmd_args.append("modify backcolor white") self.write_dump(*cmd_args) from IPython.core.display import Image return Image('snapshot.png') def video(self, filename): from IPython.display import HTML return HTML("<video controls><source src=\"" + filename + "\"></video>")
Pakketeretet2/lammps
python/lammps.py
Python
gpl-2.0
38,678
[ "LAMMPS" ]
3c28be0c75d71d317e71912efc6bd8ea022861842dfec8ffcc8ad5355e1144a2
# coding: utf-8 """ Works for Abinit: """ from __future__ import unicode_literals, division, print_function import os import shutil import time import abc import collections import numpy as np import six import copy from six.moves import filter from monty.collections import AttrDict from monty.itertools import chunks from monty.functools import lazy_property from monty.fnmatch import WildCard from pydispatch import dispatcher from pymatgen.core.units import EnergyArray from . import wrappers from .nodes import Dependency, Node, NodeError, NodeResults, check_spectator from .tasks import (Task, AbinitTask, ScfTask, NscfTask, PhononTask, DdkTask, BseTask, RelaxTask, DdeTask, BecTask, ScrTask, SigmaTask) from .utils import Directory from .netcdf import ETSF_Reader, NetcdfReader from .abitimer import AbinitTimerParser import logging logger = logging.getLogger(__name__) __author__ = "Matteo Giantomassi" __copyright__ = "Copyright 2013, The Materials Project" __version__ = "0.1" __maintainer__ = "Matteo Giantomassi" __all__ = [ "Work", "BandStructureWork", "RelaxWork", "G0W0Work", "QptdmWork", "SigmaConvWork", "BseMdfWork", "PhononWork", ] class WorkResults(NodeResults): JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy() @classmethod def from_node(cls, work): """Initialize an instance from a :class:`Work` instance.""" new = super(WorkResults, cls).from_node(work) # Will put all files found in outdir in GridFs # Warning: assuming binary files. d = {os.path.basename(f): f for f in work.outdir.list_filepaths()} new.register_gridfs_files(**d) return new class WorkError(NodeError): """Base class for the exceptions raised by Work objects.""" class BaseWork(six.with_metaclass(abc.ABCMeta, Node)): Error = WorkError Results = WorkResults # interface modeled after subprocess.Popen @abc.abstractproperty def processes(self): """Return a list of objects that support the `subprocess.Popen` protocol.""" def poll(self): """ Check if all child processes have terminated. Set and return returncode attribute. """ return [task.poll() for task in self] def wait(self): """ Wait for child processed to terminate. Set and return returncode attribute. """ return [task.wait() for task in self] def communicate(self, input=None): """ Interact with processes: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child processed, or None, if no data should be sent to the children. communicate() returns a list of tuples (stdoutdata, stderrdata). """ return [task.communicate(input) for task in self] @property def returncodes(self): """ The children return codes, set by poll() and wait() (and indirectly by communicate()). A None value indicates that the process hasn't terminated yet. A negative value -N indicates that the child was terminated by signal N (Unix only). """ return [task.returncode for task in self] @property def ncores_reserved(self): """ Returns the number of cores reserved in this moment. A core is reserved if it's still not running but we have submitted the task to the queue manager. """ return sum(task.manager.num_cores for task in self if task.status == task.S_SUB) @property def ncores_allocated(self): """ Returns the number of CPUs allocated in this moment. A core is allocated if it's running a task or if we have submitted a task to the queue manager but the job is still pending. """ return sum(task.manager.num_cores for task in self if task.status in [task.S_SUB, task.S_RUN]) @property def ncores_used(self): """ Returns the number of cores used in this moment. A core is used if there's a job that is running on it. """ return sum(task.manager.num_cores for task in self if task.status == task.S_RUN) def fetch_task_to_run(self): """ Returns the first task that is ready to run or None if no task can be submitted at present" Raises: `StopIteration` if all tasks are done. """ # All the tasks are done so raise an exception # that will be handled by the client code. if all(task.is_completed for task in self): raise StopIteration("All tasks completed.") for task in self: if task.can_run: return task # No task found, this usually happens when we have dependencies. # Beware of possible deadlocks here! logger.warning("Possible deadlock in fetch_task_to_run!") return None def fetch_alltasks_to_run(self): """ Returns a list with all the tasks that can be submitted. Empty list if not task has been found. """ return [task for task in self if task.can_run] @abc.abstractmethod def setup(self, *args, **kwargs): """Method called before submitting the calculations.""" def _setup(self, *args, **kwargs): self.setup(*args, **kwargs) def connect_signals(self): """ Connect the signals within the work. The :class:`Work` is responsible for catching the important signals raised from its task and raise new signals when some particular condition occurs. """ for task in self: dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task) def disconnect_signals(self): """ Disable the signals within the work. This function reverses the process of `connect_signals` """ for task in self: try: dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task) except dispatcher.errors.DispatcherKeyError as exc: logger.debug(str(exc)) @property def all_ok(self): return all(task.status == task.S_OK for task in self) #@check_spectator def on_ok(self, sender): """ This callback is called when one task reaches status `S_OK`. It executes on_all_ok when all task in self have reached `S_OK`. """ logger.debug("in on_ok with sender %s" % sender) if self.all_ok: if self.finalized: return AttrDict(returncode=0, message="Work has been already finalized") else: # Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work) self.finalized = True try: results = AttrDict(**self.on_all_ok()) except Exception as exc: self.history.critical("on_all_ok raises %s" % str(exc)) self.finalized = False raise # Signal to possible observers that the `Work` reached S_OK self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self)) if self._finalized: self.send_signal(self.S_OK) return results return AttrDict(returncode=1, message="Not all tasks are OK!") #@check_spectator def on_all_ok(self): """ This method is called once the `Work` is completed i.e. when all the tasks have reached status S_OK. Subclasses should provide their own implementation Returns: Dictionary that must contain at least the following entries: returncode: 0 on success. message: a string that should provide a human-readable description of what has been performed. """ return dict(returncode=0, message="Calling on_all_ok of the base class!") def get_results(self, **kwargs): """ Method called once the calculations are completed. The base version returns a dictionary task_name: TaskResults for each task in self. """ results = self.Results.from_node(self) return results class NodeContainer(six.with_metaclass(abc.ABCMeta)): """ Mixin classes for `Work` and `Flow` objects providing helperf functions to register tasks in the container. The helperfunctios call the `register` method of the container. """ # TODO: Abstract protocol for containers @abc.abstractmethod def register_task(self, *args, **kwargs): """ Register a task in the container. """ # TODO: shall flow.register_task return a Task or a Work? # Helper functions def register_scf_task(self, *args, **kwargs): """Register a Scf task.""" kwargs["task_class"] = ScfTask return self.register_task(*args, **kwargs) def register_nscf_task(self, *args, **kwargs): """Register a nscf task.""" kwargs["task_class"] = NscfTask return self.register_task(*args, **kwargs) def register_relax_task(self, *args, **kwargs): """Register a task for structural optimization.""" kwargs["task_class"] = RelaxTask return self.register_task(*args, **kwargs) def register_phonon_task(self, *args, **kwargs): """Register a phonon task.""" kwargs["task_class"] = PhononTask return self.register_task(*args, **kwargs) def register_ddk_task(self, *args, **kwargs): """Register a ddk task.""" kwargs["task_class"] = DdkTask return self.register_task(*args, **kwargs) def register_scr_task(self, *args, **kwargs): """Register a screening task.""" kwargs["task_class"] = ScrTask return self.register_task(*args, **kwargs) def register_sigma_task(self, *args, **kwargs): """Register a sigma task.""" kwargs["task_class"] = SigmaTask return self.register_task(*args, **kwargs) # TODO: Remove def register_dde_task(self, *args, **kwargs): """Register a Dde task.""" kwargs["task_class"] = DdeTask return self.register_task(*args, **kwargs) def register_bec_task(self, *args, **kwargs): """Register a BEC task.""" kwargs["task_class"] = BecTask return self.register_task(*args, **kwargs) def register_bse_task(self, *args, **kwargs): """Register a nscf task.""" kwargs["task_class"] = BseTask return self.register_task(*args, **kwargs) class Work(BaseWork, NodeContainer): """ A Work is a list of (possibly connected) tasks. """ def __init__(self, workdir=None, manager=None): """ Args: workdir: Path to the working directory. manager: :class:`TaskManager` object. """ super(Work, self).__init__() self._tasks = [] if workdir is not None: self.set_workdir(workdir) if manager is not None: self.set_manager(manager) def set_manager(self, manager): """Set the :class:`TaskManager` to use to launch the :class:`Task`.""" self.manager = manager.deepcopy() for task in self: task.set_manager(manager) @property def flow(self): """The flow containing this :class:`Work`.""" return self._flow def set_flow(self, flow): """Set the flow associated to this :class:`Work`.""" if not hasattr(self, "_flow"): self._flow = flow else: if self._flow != flow: raise ValueError("self._flow != flow") @lazy_property def pos(self): """The position of self in the :class:`Flow`""" for i, work in enumerate(self.flow): if self == work: return i raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow)) @property def pos_str(self): """String representation of self.pos""" return "w" + str(self.pos) def set_workdir(self, workdir, chroot=False): """Set the working directory. Cannot be set more than once unless chroot is True""" if not chroot and hasattr(self, "workdir") and self.workdir != workdir: raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir)) self.workdir = os.path.abspath(workdir) # Directories with (input|output|temporary) data. # The work will use these directories to connect # itself to other works and/or to produce new data # that will be used by its children. self.indir = Directory(os.path.join(self.workdir, "indata")) self.outdir = Directory(os.path.join(self.workdir, "outdata")) self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata")) def chroot(self, new_workdir): self.set_workdir(new_workdir, chroot=True) for i, task in enumerate(self): new_tdir = os.path.join(self.workdir, "t" + str(i)) task.set_workdir(new_tdir, chroot=True) def __len__(self): return len(self._tasks) def __iter__(self): return self._tasks.__iter__() def __getitem__(self, slice): return self._tasks[slice] def chunks(self, chunk_size): """Yield successive chunks of tasks of lenght chunk_size.""" for tasks in chunks(self, chunk_size): yield tasks def opath_from_ext(self, ext): """ Returns the path of the output file with extension ext. Use it when the file does not exist yet. """ return self.indir.path_in("in_" + ext) def opath_from_ext(self, ext): """ Returns the path of the output file with extension ext. Use it when the file does not exist yet. """ return self.outdir.path_in("out_" + ext) @property def processes(self): return [task.process for task in self] @property def all_done(self): """True if all the :class:`Task` objects in the :class:`Work` are done.""" return all(task.status >= task.S_DONE for task in self) @property def isnc(self): """True if norm-conserving calculation.""" return all(task.isnc for task in self) @property def ispaw(self): """True if PAW calculation.""" return all(task.ispaw for task in self) @property def status_counter(self): """ Returns a `Counter` object that counts the number of task with given status (use the string representation of the status as key). """ counter = collections.Counter() for task in self: counter[str(task.status)] += 1 return counter def allocate(self, manager=None): """ This function is called once we have completed the initialization of the :class:`Work`. It sets the manager of each task (if not already done) and defines the working directories of the tasks. Args: manager: :class:`TaskManager` object or None """ for i, task in enumerate(self): if not hasattr(task, "manager"): # Set the manager # Use the one provided in input else the one of the work. task.set_manager(manager) if manager is not None else task.set_manager(self.manager) task_workdir = os.path.join(self.workdir, "t" + str(i)) if not hasattr(task, "workdir"): task.set_workdir(task_workdir) else: if task.workdir != task_workdir: raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir)) def register(self, obj, deps=None, required_files=None, manager=None, task_class=None): """ Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies. Args: obj: :class:`AbinitInput` instance. deps: Dictionary specifying the dependency of this node. None means that this obj has no dependency. required_files: List of strings with the path of the files used by the task. Note that the files must exist when the task is registered. Use the standard approach based on Works, Tasks and deps if the files will be produced in the future. manager: The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use the `TaskManager` specified during the creation of the :class:`Work`. task_class: Task subclass to instantiate. Default: :class:`AbinitTask` Returns: :class:`Task` object """ task_workdir = None if hasattr(self, "workdir"): task_workdir = os.path.join(self.workdir, "t" + str(len(self))) if isinstance(obj, Task): task = obj else: # Set the class if task_class is None: task_class = AbinitTask #from .strategies import HtcStrategy #if isinstance(obj, HtcStrategy): # # Create the new task (note the factory so that we create subclasses easily). # raise NotImplementedError("HtcStrategy") # task = task_class(obj, task_workdir, manager) # #else: task = task_class.from_input(obj, task_workdir, manager) self._tasks.append(task) # Handle possible dependencies. if deps is not None: deps = [Dependency(node, exts) for node, exts in deps.items()] task.add_deps(deps) # Handle possible dependencies. if required_files is not None: task.add_required_files(required_files) return task # Needed by NodeContainer register_task = register def path_in_workdir(self, filename): """Create the absolute path of filename in the working directory.""" return os.path.join(self.workdir, filename) def setup(self, *args, **kwargs): """ Method called before running the calculations. The default implementation is empty. """ def build(self, *args, **kwargs): """Creates the top level directory.""" # Create the directories of the work. self.indir.makedirs() self.outdir.makedirs() self.tmpdir.makedirs() # Build dirs and files of each task. for task in self: task.build(*args, **kwargs) # Connect signals within the work. self.connect_signals() @property def status(self): """ Returns the status of the work i.e. the minimum of the status of the tasks. """ return self.get_all_status(only_min=True) def get_all_status(self, only_min=False): """ Returns a list with the status of the tasks in self. Args: only_min: If True, the minimum of the status is returned. """ if len(self) == 0: # The work will be created in the future. if only_min: return self.S_INIT else: return [self.S_INIT] self.check_status() status_list = [task.status for task in self] if only_min: return min(status_list) else: return status_list def check_status(self): """Check the status of the tasks.""" # Recompute the status of the tasks for task in self: if task.status == task.S_LOCKED: continue task.check_status() # Take into account possible dependencies. Use a list instead of generators for task in self: if task.status == task.S_LOCKED: continue if task.status < task.S_SUB and all([status == task.S_OK for status in task.deps_status]): task.set_status(task.S_READY, "Status set to Ready") def rmtree(self, exclude_wildcard=""): """ Remove all files and directories in the working directory Args: exclude_wildcard: Optional string with regular expressions separated by `|`. Files matching one of the regular expressions will be preserved. example: exclude_wildard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"]. """ if not exclude_wildcard: shutil.rmtree(self.workdir) else: w = WildCard(exclude_wildcard) for dirpath, dirnames, filenames in os.walk(self.workdir): for fname in filenames: path = os.path.join(dirpath, fname) if not w.match(fname): os.remove(path) def rm_indatadir(self): """Remove all the indata directories.""" for task in self: task.rm_indatadir() def rm_outdatadir(self): """Remove all the indata directories.""" for task in self: task.rm_outatadir() def rm_tmpdatadir(self): """Remove all the tmpdata directories.""" for task in self: task.rm_tmpdatadir() def move(self, dest, isabspath=False): """ Recursively move self.workdir to another location. This is similar to the Unix "mv" command. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. Be default, dest is located in the parent directory of self.workdir, use isabspath=True to specify an absolute path. """ if not isabspath: dest = os.path.join(os.path.dirname(self.workdir), dest) shutil.move(self.workdir, dest) def submit_tasks(self, wait=False): """ Submits the task in self and wait. TODO: change name. """ for task in self: task.start() if wait: for task in self: task.wait() def start(self, *args, **kwargs): """ Start the work. Calls build and _setup first, then submit the tasks. Non-blocking call unless wait is set to True """ wait = kwargs.pop("wait", False) # Initial setup self._setup(*args, **kwargs) # Build dirs and files. self.build(*args, **kwargs) # Submit tasks (does not block) self.submit_tasks(wait=wait) def read_etotals(self, unit="Ha"): """ Reads the total energy from the GSR file produced by the task. Return a numpy array with the total energies in Hartree The array element is set to np.inf if an exception is raised while reading the GSR file. """ if not self.all_done: raise self.Error("Some task is still in running/submitted state") etotals = [] for task in self: # Open the GSR file and read etotal (Hartree) gsr_path = task.outdir.has_abiext("GSR") etot = np.inf if gsr_path: with ETSF_Reader(gsr_path) as r: etot = r.read_value("etotal") etotals.append(etot) return EnergyArray(etotals, "Ha").to(unit) def parse_timers(self): """ Parse the TIMER section reported in the ABINIT output files. Returns: :class:`AbinitTimerParser` object """ filenames = list(filter(os.path.exists, [task.output_file.path for task in self])) parser = AbinitTimerParser() parser.parse(filenames) return parser class BandStructureWork(Work): """Work for band structure calculations.""" def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None): """ Args: scf_input: Input for the SCF run nscf_input: Input for the NSCF run defining the band structure calculation. dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None. workdir: Working directory. manager: :class:`TaskManager` object. """ super(BandStructureWork, self).__init__(workdir=workdir, manager=manager) # Register the GS-SCF run. self.scf_task = self.register_scf_task(scf_input) # Register the NSCF run and its dependency. self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"}) # Add DOS computation(s) if requested. self.dos_tasks = [] if dos_inputs is not None: if not isinstance(dos_inputs, (list, tuple)): dos_inputs = [dos_inputs] for dos_input in dos_inputs: dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"}) self.dos_tasks.append(dos_task) def plot_ebands(self, **kwargs): """ Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`. Returns: `matplotlib` figure """ with self.nscf_task.open_gsr() as gsr: return gsr.ebands.plot(**kwargs) def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs): """ Plot the band structure and the DOS. Args: dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task). method: String defining the method for the computation of the DOS. step: Energy step (eV) of the linear mesh. width: Standard deviation (eV) of the gaussian. kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot. Returns: `matplotlib` figure. """ with self.nscf_task.open_gsr() as gsr: gs_ebands = gsr.ebands with self.dos_tasks[dos_pos].open_gsr() as gsr: dos_ebands = gsr.ebands edos = dos_ebands.get_edos(method=method, step=step, width=width) return gs_ebands.plot_with_edos(edos, **kwargs) def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs): """ Plot the band structure and the DOS. Args: dos_pos: Index of the task from which the DOS should be obtained. None is all DOSes should be displayed. Accepts integer or list of integers. method: String defining the method for the computation of the DOS. step: Energy step (eV) of the linear mesh. width: Standard deviation (eV) of the gaussian. kwargs: Keyword arguments passed to `plot` method to customize the plot. Returns: `matplotlib` figure. """ if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos] from abipy.electrons.ebands import ElectronDosPlotter plotter = ElectronDosPlotter() for i, task in enumerate(self.dos_tasks): if dos_pos is not None and i not in dos_pos: continue with task.open_gsr() as gsr: edos = gsr.ebands.get_edos(method=method, step=step, width=width) ngkpt = task.get_inpvar("ngkpt") plotter.add_edos("ngkpt %s" % str(ngkpt), edos) return plotter.plot(**kwargs) class RelaxWork(Work): """ Work for structural relaxations. The first task relaxes the atomic position while keeping the unit cell parameters fixed. The second task uses the final structure to perform a structural relaxation in which both the atomic positions and the lattice parameters are optimized. """ def __init__(self, ion_input, ioncell_input, workdir=None, manager=None, target_dilatmx=None): """ Args: ion_input: Input for the relaxation of the ions (cell is fixed) ioncell_input: Input for the relaxation of the ions and the unit cell. workdir: Working directory. manager: :class:`TaskManager` object. """ super(RelaxWork, self).__init__(workdir=workdir, manager=manager) self.ion_task = self.register_relax_task(ion_input) # Note: # 1) It would be nice to restart from the WFK file but ABINIT crashes due to the # different unit cell parameters if paral_kgb == 1 #paral_kgb = ion_input[0]["paral_kgb"] #if paral_kgb == 1: #deps = {self.ion_task: "WFK"} # --> FIXME: Problem in rwwf #deps = {self.ion_task: "DEN"} deps = None self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps) # Lock ioncell_task as ion_task should communicate to ioncell_task that # the calculation is OK and pass the final structure. self.ioncell_task.lock(source_node=self) self.transfer_done = False self.target_dilatmx = target_dilatmx #@check_spectator def on_ok(self, sender): """ This callback is called when one task reaches status S_OK. If sender == self.ion_task, we update the initial structure used by self.ioncell_task and we unlock it so that the job can be submitted. """ logger.debug("in on_ok with sender %s" % sender) if sender == self.ion_task and not self.transfer_done: # Get the relaxed structure from ion_task ion_structure = self.ion_task.get_final_structure() # Transfer it to the ioncell task (we do it only once). self.ioncell_task._change_structure(ion_structure) self.transfer_done = True # Unlock ioncell_task so that we can submit it. self.ioncell_task.unlock(source_node=self) elif sender == self.ioncell_task and self.target_dilatmx: actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.) if self.target_dilatmx < actual_dilatmx: self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx) logger.info('Converging dilatmx. Value reduce from {} to {}.' .format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx'))) self.ioncell_task.reset_from_scratch() return super(RelaxWork, self).on_ok(sender) def plot_ion_relaxation(self, **kwargs): """ Plot the history of the ion-cell relaxation. kwargs are passed to the plot method of :class:`HistFile` Return `matplotlib` figure or None if hist file is not found. """ with self.ion_task.open_hist() as hist: return hist.plot(**kwargs) if hist else None def plot_ioncell_relaxation(self, **kwargs): """ Plot the history of the ion-cell relaxation. kwargs are passed to the plot method of :class:`HistFile` Return `matplotlib` figure or None if hist file is not found. """ with self.ioncell_task.open_hist() as hist: return hist.plot(**kwargs) if hist else None class G0W0Work(Work): """ Work for G0W0 calculations. """ def __init__(self, scf_input, nscf_input, scr_input, sigma_inputs, workdir=None, manager=None, spread_scr=False, nksmall=None): """ Args: scf_input: Input for the SCF run nscf_input: Input for the NSCF run scr_input: Input for the screening run sigma_inputs: List of :class:AbinitInput`for the self-energy run. workdir: Working directory of the calculation. manager: :class:`TaskManager` object. spread_scr: Attach a screening task to every sigma task if false only one screening task with the max ecuteps and nbands for all sigma tasks nksmall: if not None add a dos and bands calculation to the Work """ super(G0W0Work, self).__init__(workdir=workdir, manager=manager) # Register the GS-SCF run. # register all scf_inputs but link the nscf only the last scf in the list #MG: FIXME Why this? if isinstance(scf_input, (list, tuple)): for single_scf_input in scf_input: self.scf_task = self.register_scf_task(single_scf_input) else: self.scf_task = self.register_scf_task(scf_input) nogw = False if nksmall: raise NotImplementedError("with nksmall but strategies have been removed") # if nksmall add bandstructure and dos calculations as well from abiobjects import KSampling if nksmall < 0: nksmall = -nksmall nogw = True scf_in = scf_input[-1] if isinstance(scf_input, (list, tuple)) else scf_input logger.info('added band structure calculation') bands_input = NscfStrategy(scf_strategy=scf_in, ksampling=KSampling.path_from_structure(ndivsm=nksmall, structure=scf_in.structure), nscf_nband=scf_in.electrons.nband, ecut=scf_in.ecut, chksymbreak=0, tolwfr=1e-18) self.bands_task = self.register_nscf_task(bands_input, deps={self.scf_task: "DEN"}) # note we don not let abinit print the dos, since this is inconpatible with parakgb # the dos will be evaluated later using abipy dos_input = NscfStrategy(scf_strategy=scf_in, ksampling=KSampling.automatic_density(kppa=nksmall**3, structure=scf_in.structure, shifts=(0.0, 0.0, 0.0)), nscf_nband=scf_in.electrons.nband, ecut=scf_in.ecut, chksymbreak=0) self.dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"}) #from abiobjects import KSampling #if nksmall < 0: # nksmall = -nksmall # nogw = True #scf_in = scf_input[-1] if isinstance(scf_input, (list, tuple)) else scf_input #logger.info('added band structure calculation') #bands_input = NscfStrategy(scf_strategy=scf_in, # ksampling=KSampling.path_from_structure(ndivsm=nksmall, structure=scf_in.structure), # nscf_nband=scf_in.electrons.nband, ecut=scf_in.ecut, chksymbreak=0) #self.bands_task = self.register_nscf_task(bands_input, deps={self.scf_task: "DEN"}) ## note we don not let abinit print the dos, since this is inconpatible with parakgb ## the dos will be evaluated later using abipy #dos_input = NscfStrategy(scf_strategy=scf_in, # ksampling=KSampling.automatic_density(kppa=nksmall**3, structure=scf_in.structure, # shifts=(0.0, 0.0, 0.0)), # nscf_nband=scf_in.electrons.nband, ecut=scf_in.ecut, chksymbreak=0) #self.dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"}) # Register the SIGMA runs. if not nogw: # Construct the input for the NSCF run. self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"}) # Register the SCREENING run. if not spread_scr: self.scr_task = scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"}) else: self.scr_tasks = [] if not isinstance(sigma_inputs, (list, tuple)): sigma_inputs = [sigma_inputs] self.sigma_tasks = [] for sigma_input in sigma_inputs: if spread_scr: new_scr_input = copy.deepcopy(scr_input) new_scr_input.screening.ecuteps = sigma_input.sigma.ecuteps new_scr_input.screening.nband = sigma_input.sigma.nband new_scr_input.electrons.nband = sigma_input.sigma.nband scr_task = self.register_scr_task(new_scr_input, deps={nscf_task: "WFK"}) task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"}) self.sigma_tasks.append(task) class SigmaConvWork(Work): """ Work for self-energy convergence studies. """ def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None): """ Args: wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file. scr_node: The node who has produced the SCR file or filepath pointing to the SCR file. sigma_inputs: List of :class:`AbinitInput` for the self-energy runs. workdir: Working directory of the calculation. manager: :class:`TaskManager` object. """ # Cast to node instances. wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node) super(SigmaConvWork, self).__init__(workdir=workdir, manager=manager) # Register the SIGMA runs. if not isinstance(sigma_inputs, (list, tuple)): sigma_inputs = [sigma_inputs] for sigma_input in sigma_inputs: self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"}) class BseMdfWork(Work): """ Work for simple BSE calculations in which the self-energy corrections are approximated by the scissors operator and the screening is modeled with the model dielectric function. """ def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None): """ Args: scf_input: Input for the SCF run. nscf_input: Input for the NSCF run. bse_inputs: List of Inputs for the BSE run. workdir: Working directory of the calculation. manager: :class:`TaskManager`. """ super(BseMdfWork, self).__init__(workdir=workdir, manager=manager) # Register the GS-SCF run. self.scf_task = self.register_scf_task(scf_input) # Construct the input for the NSCF run. self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"}) # Construct the input(s) for the BSE run. if not isinstance(bse_inputs, (list, tuple)): bse_inputs = [bse_inputs] for bse_input in bse_inputs: self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"}) def get_mdf_robot(self): """Builds and returns a :class:`MdfRobot` for analyzing the results in the MDF files.""" from abilab.robots import MdfRobot robot = MdfRobot() for task in self[2:]: mdf_path = task.outdir.has_abiext(robot.EXT) if mdf_path: robot.add_file(str(task), mdf_path) return robot #def plot_conv_mdf(self, **kwargs) # with self.get_mdf_robot() as robot: # robot.get_mdf_plooter() # plotter.plot(**kwargs) class QptdmWork(Work): """ This work parallelizes the calculation of the q-points of the screening. It also provides the callback `on_all_ok` that calls mrgscr to merge all the partial screening files produced. """ def create_tasks(self, wfk_file, scr_input): """ Create the SCR tasks and register them in self. Args: wfk_file: Path to the ABINIT WFK file to use for the computation of the screening. scr_input: Input for the screening calculation. """ assert len(self) == 0 wfk_file = self.wfk_file = os.path.abspath(wfk_file) # Build a temporary work in the tmpdir that will use a shell manager # to run ABINIT in order to get the list of q-points for the screening. shell_manager = self.manager.to_shell_manager(mpi_procs=1) w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager) fake_input = scr_input.deepcopy() fake_task = w.register(fake_input) w.allocate() w.build() # Create the symbolic link and add the magic value # nqpdm = -1 to the input to get the list of q-points. fake_task.inlink_file(wfk_file) fake_task._set_inpvars({"nqptdm": -1}) fake_task.start_and_wait() # Parse the section with the q-points with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader: qpoints = reader.read_value("reduced_coordinates_of_kpoints") #print("qpoints) #w.rmtree() # Now we can register the task for the different q-points for qpoint in qpoints: qptdm_input = scr_input.deepcopy() qptdm_input.set_vars(nqptdm=1, qptdm=qpoint) new_task = self.register_scr_task(qptdm_input, manager=self.manager) # Add the garbage collector. if self.flow.gc is not None: new_task.set_gc(self.flow.gc) self.allocate() def merge_scrfiles(self, remove_scrfiles=True): """ This method is called when all the q-points have been computed. It runs `mrgscr` in sequential on the local machine to produce the final SCR file in the outdir of the `Work`. If remove_scrfiles is True, the partial SCR files are removed after the merge. """ scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self])) logger.debug("will call mrgscr to merge %s:\n" % str(scr_files)) assert len(scr_files) == len(self) mrgscr = wrappers.Mrgscr(manager=self[0].manager, verbose=1) final_scr = mrgscr.merge_qpoints(self.outdir.path, scr_files, out_prefix="out") if remove_scrfiles: for scr_file in scr_files: try: os.remove(scr_file) except IOError: pass return final_scr #@check_spectator def on_all_ok(self): """ This method is called when all the q-points have been computed. It runs `mrgscr` in sequential on the local machine to produce the final SCR file in the outdir of the `Work`. """ final_scr = self.merge_scrfiles() return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr) def build_oneshot_phononwork(scf_input, ph_inputs, workdir=None, manager=None, work_class=None): """ Returns a work for the computation of phonon frequencies ph_inputs is a list of input for Phonon calculation in which all the independent perturbations are explicitly computed i.e. * rfdir 1 1 1 * rfatpol 1 natom .. warning:: This work is mainly used for simple calculations, e.g. convergence studies. Use :class:`PhononWork` for better efficiency. """ work_class = OneShotPhononWork if work_class is None else work_class work = work_class(workdir=workdir, manager=manager) scf_task = work.register_scf_task(scf_input) ph_inputs = [ph_inputs] if not isinstance(ph_inputs, (list, tuple)) else ph_inputs for phinp in ph_inputs: # Check rfdir and rfatpol. rfdir = np.array(phinp.get("rfdir", [0, 0, 0])) if len(rfdir) != 3 or any(rfdir != (1, 1, 1)): raise ValueError("Expecting rfdir == (1, 1, 1), got %s" % rfdir) rfatpol = np.array(phinp.get("rfatpol", [1, 1])) if len(rfatpol) != 2 or any(rfatpol != (1, len(phinp.structure))): raise ValueError("Expecting rfatpol == (1, natom), got %s" % rfatpol) # cannot use PhononTaks here because the Task is not able to deal with multiple phonon calculations ph_task = work.register(phinp, deps={scf_task: "WFK"}) return work class OneShotPhononWork(Work): """ Simple and very inefficient work for the computation of the phonon frequencies It consists of a GS task and a DFPT calculations for all the independent perturbations. The main advantage is that one has direct access to the phonon frequencies that can be computed at the end of the second task without having to call anaddb. Use ``build_oneshot_phononwork`` to construct this work from the input files. """ def read_phonons(self): """ Read phonon frequencies from the output file. Return: List of namedtuples. Each `namedtuple` has the following attributes: - qpt: ndarray with the q-point in reduced coordinates. - freqs: ndarray with 3 x Natom phonon frequencies in meV """ # # Phonon wavevector (reduced coordinates) : 0.00000 0.00000 0.00000 # Phonon energies in Hartree : # 1.089934E-04 4.990512E-04 1.239177E-03 1.572715E-03 1.576801E-03 # 1.579326E-03 # Phonon frequencies in cm-1 : # - 2.392128E+01 1.095291E+02 2.719679E+02 3.451711E+02 3.460677E+02 # - 3.466221E+02 BEGIN = " Phonon wavevector (reduced coordinates) :" END = " Phonon frequencies in cm-1 :" ph_tasks, qpts, phfreqs = self[1:], [], [] for task in ph_tasks: # Parse output file. with open(task.output_file.path, "r") as fh: qpt, inside = None, 0 for line in fh: if line.startswith(BEGIN): qpts.append([float(s) for s in line[len(BEGIN):].split()]) inside, omegas = 1, [] elif line.startswith(END): break elif inside: inside += 1 if inside > 2: omegas.extend((float(s) for s in line.split())) else: raise ValueError("Cannot find %s in file %s" % (END, task.output_file.path)) phfreqs.append(omegas) # Use namedtuple to store q-point and frequencies in meV phonon = collections.namedtuple("phonon", "qpt freqs") return [phonon(qpt=qpt, freqs=freqs_meV) for qpt, freqs_meV in zip(qpts, EnergyArray(phfreqs, "Ha").to("meV") )] def get_results(self, **kwargs): results = super(OneShotPhononWork, self).get_results() phonons = self.read_phonons() results.update(phonons=phonons) return results class MergeDdb(object): """Mixin classes for Works that have to merge the DDB files produced by the tasks.""" def merge_ddb_files(self): """ This method is called when all the q-points have been computed. It runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. Returns: path to the output DDB file """ ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in self])) self.history.info("Will call mrgddb to merge %s:\n" % str(ddb_files)) # assert len(ddb_files) == len(self) #if len(ddb_files) == 1: # Avoid the merge. Just move the DDB file to the outdir of the work # Final DDB file will be produced in the outdir of the work. out_ddb = self.outdir.path_in("out_DDB") desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime()) mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0) mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc) return out_ddb class PhononWork(Work, MergeDdb): """ This work usually consists of nirred Phonon tasks where nirred is the number of irreducible perturbations for a given q-point. It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced """ @classmethod def from_scf_task(cls, scf_task, qpt, tolerance=None): """ Construct a `PhononWork` from a :class:`ScfTask` object. The input file for phonons is automatically generated from the input of the ScfTask. Each phonon task depends on the WFK file produced by scf_task. Args: scf_task: ScfTask object. qpt: q-point for phonons in reduced coordinates. """ if not isinstance(scf_task, ScfTask): raise TypeError("task %s does not inherit from ScfTask" % scf_task) new = cls() #manager=scf_task.manager) multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance) for ph_inp in multi: new.register_phonon_task(ph_inp, deps={scf_task: "WFK"}) return new # TODO #def compute_phonons(self) # """ # Call anaddb to compute the phonon frequencies for this q-point and # store the results in the outdir of the work. # """ # #atask = AnaddbTask(anaddb_input, ddb_node, # # gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None) # #atask.start_and_wait() # return phonons #@check_spectator def on_all_ok(self): """ This method is called when all the q-points have been computed. Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. """ # Merge DDB files. out_ddb = self.merge_ddb_files() results = self.Results(node=self, returncode=0, message="DDB merge done") results.register_gridfs_files(DDB=(out_ddb, "t")) return results class BecWork(Work, MergeDdb): """ Work for the computation of the Born effective charges. This work consists of DDK tasks and phonon + electric fiel perturbation It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced """ @classmethod def from_scf_task(cls, scf_task, ddk_tolerance=None): """Build a BecWork from a ground-state task.""" if not isinstance(scf_task, ScfTask): raise TypeError("task %s does not inherit from GsTask" % scf_task) new = cls() #manager=scf_task.manager) # DDK calculations multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance) ddk_tasks = [] for ddk_inp in multi_ddk: ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"}) ddk_tasks.append(ddk_task) # Build the list of inputs for electric field perturbation and phonons # Each bec task is connected to all the previous DDK task and to the scf_task. bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks} bec_deps.update({scf_task: "WFK"}) bec_inputs = scf_task.input.make_bec_inputs() #tolerance=efile for bec_inp in bec_inputs: new.register_bec_task(bec_inp, deps=bec_deps) return new def on_all_ok(self): """ This method is called when all the task reach S_OK Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. """ # Merge DDB files. out_ddb = self.merge_ddb_files() results = self.Results(node=self, returncode=0, message="DDB merge done") results.register_gridfs_files(DDB=(out_ddb, "t")) return results
rousseab/pymatgen
pymatgen/io/abinitio/works.py
Python
mit
52,370
[ "ABINIT", "Gaussian", "NetCDF", "pymatgen" ]
a2dc0cbef396c50c3159f61fec5cfd20eb82102af153a2de79f46fef78322e8f
""" @name: Modules/House/Security/garage_door.py @author: D. Brian Kimmel @contact: D.BrianKimmel@gmail.com @copyright: (c) 2019-2019 by D. Brian Kimmel @license: MIT License @note: Created on aug 26, 2019 @Summary: """ __updated__ = '2019-12-29' __version_info__ = (19, 10, 2) __version__ = '.'.join(map(str, __version_info__)) # Import system type stuff # Import PyMh files from Modules.Core.Config.config_tools import Api as configApi from Modules.Core import logging_pyh as Logger LOG = Logger.getLogger('PyHouse.GarageDoor ') CONFIG_NAME = 'garagedoors' class GarageDoorInformation: """ ==> PyHouse.House.Security.Garage_Doors.xxx as in the def below """ def __init__(self): self.Name = None self.Comment = None self.DeviceType = 'Security' self.DeviceSubType = 'GarageDoor' self.Family = None # FamilyInformation() self.Room = None # RoomInformation() self.Status = None # Open | Closed class LocalConfig: """ """ m_config = None m_pyhouse_obj = None def __init__(self, p_pyhouse_obj): self.m_pyhouse_obj = p_pyhouse_obj self.m_config = configApi(p_pyhouse_obj) def _extract_one_garage_door(self, p_config) -> dict: """ Extract the config info for one button. - Name: Button 1 Comment: This is _test button 1 Family: Name: Insteon Address: 11.44.33 Dimmable: true # Optional Room: Name: Living Room @param p_config: is the config fragment containing one button's information. @return: a ButtonInformation() obj filled in. """ l_obj = GarageDoorInformation() l_required = ['Name', 'Family'] for l_key, l_value in p_config.items(): if l_key == 'Family': l_obj.Family = self.m_config.extract_family_group(l_value) elif l_key == 'Room': l_obj.Room = self.m_config.extract_room_group(l_value) pass else: setattr(l_obj, l_key, l_value) # Check for required data missing from the config file. for l_key in [l_attr for l_attr in dir(l_obj) if not l_attr.startswith('_') and not callable(getattr(l_obj, l_attr))]: if getattr(l_obj, l_key) == None and l_key in l_required: LOG.warning('Location Yaml is missing an entry for "{}"'.format(l_key)) LOG.info('Extracted Garage Door "{}"'.format(l_obj.Name)) return l_obj def _extract_all_garage_doors(self, p_config): """ Get all of the button sets configured A Button set is a (mini-remote) with 4 or 8 buttons in the set The set has one insteon address and each button is in a group """ l_dict = {} for l_ix, l_button in enumerate(p_config): # print('Light: {}'.format(l_light)) l_gdo_obj = self._extract_one_garage_door(l_button) l_dict[l_ix] = l_gdo_obj return l_dict def load_yaml_config(self): """ Read the lights.yaml file if it exists. No file = no lights. It must contain 'Lights:' All the lights are a list. """ l_yaml = self.m_config.read_config_file(CONFIG_NAME) if l_yaml == None: LOG.error('{}.yaml is missing.'.format(CONFIG_NAME)) return None try: l_yaml = l_yaml['Garage_Doors'] except: LOG.warning('The config file does not start with "Garage_Doors:"') return None l_gdo = self._extract_all_garage_doors(l_yaml) return l_gdo class Api: """ """ m_pyhouse_obj = None m_local_config = None def __init__(self, p_pyhouse_obj): self.m_pyhouse_obj = p_pyhouse_obj self._add_storage() self.m_local_config = LocalConfig(p_pyhouse_obj) LOG.info("Initialized - Version:{}".format(__version__)) def _add_storage(self) -> None: """ """ self.m_pyhouse_obj.House.Security.Garage_Doors = {} def LoadConfig(self): """ """ LOG.info('Loading Config') self.m_pyhouse_obj.House.Security.Garage_Doors = self.m_local_config.load_yaml_config() LOG.info('Loaded {} Garage Door(s).'.format(len(self.m_pyhouse_obj.House.Security.Garage_Doors))) def Start(self): """ """ def SaveConfig(self): """ """ pass def Stop(self): """ """ pass # ## END DBK
DBrianKimmel/PyHouse
Project/src/Modules/House/Security/Garagedoors/garagedoors.py
Python
mit
4,586
[ "Brian" ]
f3595fb1cd19f26c65862d792942803f6548cd70b7fccae155a84373750208a0
import math from ..libmp.backend import xrange class QuadratureRule(object): """ Quadrature rules are implemented using this class, in order to simplify the code and provide a common infrastructure for tasks such as error estimation and node caching. You can implement a custom quadrature rule by subclassing :class:`QuadratureRule` and implementing the appropriate methods. The subclass can then be used by :func:`~mpmath.quad` by passing it as the *method* argument. :class:`QuadratureRule` instances are supposed to be singletons. :class:`QuadratureRule` therefore implements instance caching in :func:`~mpmath.__new__`. """ def __init__(self, ctx): self.ctx = ctx self.standard_cache = {} self.transformed_cache = {} self.interval_count = {} def clear(self): """ Delete cached node data. """ self.standard_cache = {} self.transformed_cache = {} self.interval_count = {} def calc_nodes(self, degree, prec, verbose=False): r""" Compute nodes for the standard interval `[-1, 1]`. Subclasses should probably implement only this method, and use :func:`~mpmath.get_nodes` method to retrieve the nodes. """ raise NotImplementedError def get_nodes(self, a, b, degree, prec, verbose=False): """ Return nodes for given interval, degree and precision. The nodes are retrieved from a cache if already computed; otherwise they are computed by calling :func:`~mpmath.calc_nodes` and are then cached. Subclasses should probably not implement this method, but just implement :func:`~mpmath.calc_nodes` for the actual node computation. """ key = (a, b, degree, prec) if key in self.transformed_cache: return self.transformed_cache[key] orig = self.ctx.prec try: self.ctx.prec = prec+20 # Get nodes on standard interval if (degree, prec) in self.standard_cache: nodes = self.standard_cache[degree, prec] else: nodes = self.calc_nodes(degree, prec, verbose) self.standard_cache[degree, prec] = nodes # Transform to general interval nodes = self.transform_nodes(nodes, a, b, verbose) if key in self.interval_count: self.transformed_cache[key] = nodes else: self.interval_count[key] = True finally: self.ctx.prec = orig return nodes def transform_nodes(self, nodes, a, b, verbose=False): r""" Rescale standardized nodes (for `[-1, 1]`) to a general interval `[a, b]`. For a finite interval, a simple linear change of variables is used. Otherwise, the following transformations are used: .. math :: [a, \infty] : t = \frac{1}{x} + (a-1) [-\infty, b] : t = (b+1) - \frac{1}{x} [-\infty, \infty] : t = \frac{x}{\sqrt{1-x^2}} """ ctx = self.ctx a = ctx.convert(a) b = ctx.convert(b) one = ctx.one if (a, b) == (-one, one): return nodes half = ctx.mpf(0.5) new_nodes = [] if ctx.isinf(a) or ctx.isinf(b): if (a, b) == (ctx.ninf, ctx.inf): p05 = -half for x, w in nodes: x2 = x*x px1 = one-x2 spx1 = px1**p05 x = x*spx1 w *= spx1/px1 new_nodes.append((x, w)) elif a == ctx.ninf: b1 = b+1 for x, w in nodes: u = 2/(x+one) x = b1-u w *= half*u**2 new_nodes.append((x, w)) elif b == ctx.inf: a1 = a-1 for x, w in nodes: u = 2/(x+one) x = a1+u w *= half*u**2 new_nodes.append((x, w)) elif a == ctx.inf or b == ctx.ninf: return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)] else: raise NotImplementedError else: # Simple linear change of variables C = (b-a)/2 D = (b+a)/2 for x, w in nodes: new_nodes.append((D+C*x, C*w)) return new_nodes def guess_degree(self, prec): """ Given a desired precision `p` in bits, estimate the degree `m` of the quadrature required to accomplish full accuracy for typical integrals. By default, :func:`~mpmath.quad` will perform up to `m` iterations. The value of `m` should be a slight overestimate, so that "slightly bad" integrals can be dealt with automatically using a few extra iterations. On the other hand, it should not be too big, so :func:`~mpmath.quad` can quit within a reasonable amount of time when it is given an "unsolvable" integral. The default formula used by :func:`~mpmath.guess_degree` is tuned for both :class:`TanhSinh` and :class:`GaussLegendre`. The output is roughly as follows: +---------+---------+ | `p` | `m` | +=========+=========+ | 50 | 6 | +---------+---------+ | 100 | 7 | +---------+---------+ | 500 | 10 | +---------+---------+ | 3000 | 12 | +---------+---------+ This formula is based purely on a limited amount of experimentation and will sometimes be wrong. """ # Expected degree # XXX: use mag g = int(4 + max(0, self.ctx.log(prec/30.0, 2))) # Reasonable "worst case" g += 2 return g def estimate_error(self, results, prec, epsilon): r""" Given results from integrations `[I_1, I_2, \ldots, I_k]` done with a quadrature of rule of degree `1, 2, \ldots, k`, estimate the error of `I_k`. For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`. For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|` from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption that each degree increment roughly doubles the accuracy of the quadrature rule (this is true for both :class:`TanhSinh` and :class:`GaussLegendre`). The extrapolation formula is given by Borwein, Bailey & Girgensohn. Although not very conservative, this method seems to be very robust in practice. """ if len(results) == 2: return abs(results[0]-results[1]) try: if results[-1] == results[-2] == results[-3]: return self.ctx.zero D1 = self.ctx.log(abs(results[-1]-results[-2]), 10) D2 = self.ctx.log(abs(results[-1]-results[-3]), 10) except ValueError: return epsilon D3 = -prec D4 = min(0, max(D1**2/D2, 2*D1, D3)) return self.ctx.mpf(10) ** int(D4) def summation(self, f, points, prec, epsilon, max_degree, verbose=False): """ Main integration function. Computes the 1D integral over the interval specified by *points*. For each subinterval, performs quadrature of degree from 1 up to *max_degree* until :func:`~mpmath.estimate_error` signals convergence. :func:`~mpmath.summation` transforms each subintegration to the standard interval and then calls :func:`~mpmath.sum_next`. """ ctx = self.ctx I = err = ctx.zero for i in xrange(len(points)-1): a, b = points[i], points[i+1] if a == b: continue # XXX: we could use a single variable transformation, # but this is not good in practice. We get better accuracy # by having 0 as an endpoint. if (a, b) == (ctx.ninf, ctx.inf): _f = f f = lambda x: _f(-x) + _f(x) a, b = (ctx.zero, ctx.inf) results = [] for degree in xrange(1, max_degree+1): nodes = self.get_nodes(a, b, degree, prec, verbose) if verbose: print("Integrating from %s to %s (degree %s of %s)" % \ (ctx.nstr(a), ctx.nstr(b), degree, max_degree)) results.append(self.sum_next(f, nodes, degree, prec, results, verbose)) if degree > 1: err = self.estimate_error(results, prec, epsilon) if err <= epsilon: break if verbose: print("Estimated error:", ctx.nstr(err)) I += results[-1] if err > epsilon: if verbose: print("Failed to reach full accuracy. Estimated error:", ctx.nstr(err)) return I, err def sum_next(self, f, nodes, degree, prec, previous, verbose=False): r""" Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list contains the `(w_k, x_k)` pairs. :func:`~mpmath.summation` will supply the list *results* of values computed by :func:`~mpmath.sum_next` at previous degrees, in case the quadrature rule is able to reuse them. """ return self.ctx.fdot((w, f(x)) for (x,w) in nodes) class TanhSinh(QuadratureRule): r""" This class implements "tanh-sinh" or "doubly exponential" quadrature. This quadrature rule is based on the Euler-Maclaurin integral formula. By performing a change of variables involving nested exponentials / hyperbolic functions (hence the name), the derivatives at the endpoints vanish rapidly. Since the error term in the Euler-Maclaurin formula depends on the derivatives at the endpoints, a simple step sum becomes extremely accurate. In practice, this means that doubling the number of evaluation points roughly doubles the number of accurate digits. Comparison to Gauss-Legendre: * Initial computation of nodes is usually faster * Handles endpoint singularities better * Handles infinite integration intervals better * Is slower for smooth integrands once nodes have been computed The implementation of the tanh-sinh algorithm is based on the description given in Borwein, Bailey & Girgensohn, "Experimentation in Mathematics - Computational Paths to Discovery", A K Peters, 2003, pages 312-313. In the present implementation, a few improvements have been made: * A more efficient scheme is used to compute nodes (exploiting recurrence for the exponential function) * The nodes are computed successively instead of all at once Various documents describing the algorithm are available online, e.g.: * http://crd.lbl.gov/~dhbailey/dhbpapers/dhb-tanh-sinh.pdf * http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf """ def sum_next(self, f, nodes, degree, prec, previous, verbose=False): """ Step sum for tanh-sinh quadrature of degree `m`. We exploit the fact that half of the abscissas at degree `m` are precisely the abscissas from degree `m-1`. Thus reusing the result from the previous level allows a 2x speedup. """ h = self.ctx.mpf(2)**(-degree) # Abscissas overlap, so reusing saves half of the time if previous: S = previous[-1]/(h*2) else: S = self.ctx.zero S += self.ctx.fdot((w,f(x)) for (x,w) in nodes) return h*S def calc_nodes(self, degree, prec, verbose=False): r""" The abscissas and weights for tanh-sinh quadrature of degree `m` are given by .. math:: x_k = \tanh(\pi/2 \sinh(t_k)) w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2 where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The list of nodes is actually infinite, but the weights die off so rapidly that only a few are needed. """ ctx = self.ctx nodes = [] extra = 20 ctx.prec += extra tol = ctx.ldexp(1, -prec-10) pi4 = ctx.pi/4 # For simplicity, we work in steps h = 1/2^n, with the first point # offset so that we can reuse the sum from the previous degree # We define degree 1 to include the "degree 0" steps, including # the point x = 0. (It doesn't work well otherwise; not sure why.) t0 = ctx.ldexp(1, -degree) if degree == 1: #nodes.append((mpf(0), pi4)) #nodes.append((-mpf(0), pi4)) nodes.append((ctx.zero, ctx.pi/2)) h = t0 else: h = t0*2 # Since h is fixed, we can compute the next exponential # by simply multiplying by exp(h) expt0 = ctx.exp(t0) a = pi4 * expt0 b = pi4 / expt0 udelta = ctx.exp(h) urdelta = 1/udelta for k in xrange(0, 20*2**degree+1): # Reference implementation: # t = t0 + k*h # x = tanh(pi/2 * sinh(t)) # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2 # Fast implementation. Note that c = exp(pi/2 * sinh(t)) c = ctx.exp(a-b) d = 1/c co = (c+d)/2 si = (c-d)/2 x = si / co w = (a+b) / co**2 diff = abs(x-1) if diff <= tol: break nodes.append((x, w)) nodes.append((-x, w)) a *= udelta b *= urdelta if verbose and k % 300 == 150: # Note: the number displayed is rather arbitrary. Should # figure out how to print something that looks more like a # percentage print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec)) ctx.prec -= extra return nodes class GaussLegendre(QuadratureRule): """ This class implements Gauss-Legendre quadrature, which is exceptionally efficient for polynomials and polynomial-like (i.e. very smooth) integrands. The abscissas and weights are given by roots and values of Legendre polynomials, which are the orthogonal polynomials on `[-1, 1]` with respect to the unit weight (see :func:`~mpmath.legendre`). In this implementation, we take the "degree" `m` of the quadrature to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following Borwein, Bailey & Girgensohn). This way we get quadratic, rather than linear, convergence as the degree is incremented. Comparison to tanh-sinh quadrature: * Is faster for smooth integrands once nodes have been computed * Initial computation of nodes is usually slower * Handles endpoint singularities worse * Handles infinite integration intervals worse """ def calc_nodes(self, degree, prec, verbose=False): """ Calculates the abscissas and weights for Gauss-Legendre quadrature of degree of given degree (actually `3 \cdot 2^m`). """ ctx = self.ctx # It is important that the epsilon is set lower than the # "real" epsilon epsilon = ctx.ldexp(1, -prec-8) # Fairly high precision might be required for accurate # evaluation of the roots orig = ctx.prec ctx.prec = int(prec*1.5) if degree == 1: x = ctx.sqrt(ctx.mpf(3)/5) w = ctx.mpf(5)/9 nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)] ctx.prec = orig return nodes nodes = [] n = 3*2**(degree-1) upto = n//2 + 1 for j in xrange(1, upto): # Asymptotic formula for the roots r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5))) # Newton iteration while 1: t1, t2 = 1, 0 # Evaluates the Legendre polynomial using its defining # recurrence relation for j1 in xrange(1,n+1): t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1 t4 = n*(r*t1- t2)/(r**2-1) t5 = r a = t1/t4 r = r - a if abs(a) < epsilon: break x = r w = 2/((1-r**2)*t4**2) if verbose and j % 30 == 15: print("Computing nodes (%i of %i)" % (j, upto)) nodes.append((x, w)) nodes.append((-x, w)) ctx.prec = orig return nodes class QuadratureMethods(object): def __init__(ctx, *args, **kwargs): ctx._gauss_legendre = GaussLegendre(ctx) ctx._tanh_sinh = TanhSinh(ctx) def quad(ctx, f, *points, **kwargs): r""" Computes a single, double or triple integral over a given 1D interval, 2D rectangle, or 3D cuboid. A basic example:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> quad(sin, [0, pi]) 2.0 A basic 2D integral:: >>> f = lambda x, y: cos(x+y/2) >>> quad(f, [-pi/2, pi/2], [0, pi]) 4.0 **Interval format** The integration range for each dimension may be specified using a list or tuple. Arguments are interpreted as follows: ``quad(f, [x1, x2])`` -- calculates `\int_{x_1}^{x_2} f(x) \, dx` ``quad(f, [x1, x2], [y1, y2])`` -- calculates `\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx` ``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates `\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z) \, dz \, dy \, dx` Endpoints may be finite or infinite. An interval descriptor may also contain more than two points. In this case, the integration is split into subintervals, between each pair of consecutive points. This is useful for dealing with mid-interval discontinuities, or integrating over large intervals where the function is irregular or oscillates. **Options** :func:`~mpmath.quad` recognizes the following keyword arguments: *method* Chooses integration algorithm (described below). *error* If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the integral and `e` is the estimated error. *maxdegree* Maximum degree of the quadrature rule to try before quitting. *verbose* Print details about progress. **Algorithms** Mpmath presently implements two integration algorithms: tanh-sinh quadrature and Gauss-Legendre quadrature. These can be selected using *method='tanh-sinh'* or *method='gauss-legendre'* or by passing the classes *method=TanhSinh*, *method=GaussLegendre*. The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available as shortcuts. Both algorithms have the property that doubling the number of evaluation points roughly doubles the accuracy, so both are ideal for high precision quadrature (hundreds or thousands of digits). At high precision, computing the nodes and weights for the integration can be expensive (more expensive than computing the function values). To make repeated integrations fast, nodes are automatically cached. The advantages of the tanh-sinh algorithm are that it tends to handle endpoint singularities well, and that the nodes are cheap to compute on the first run. For these reasons, it is used by :func:`~mpmath.quad` as the default algorithm. Gauss-Legendre quadrature often requires fewer function evaluations, and is therefore often faster for repeated use, but the algorithm does not handle endpoint singularities as well and the nodes are more expensive to compute. Gauss-Legendre quadrature can be a better choice if the integrand is smooth and repeated integrations are required (e.g. for multiple integrals). See the documentation for :class:`TanhSinh` and :class:`GaussLegendre` for additional details. **Examples of 1D integrals** Intervals may be infinite or half-infinite. The following two examples evaluate the limits of the inverse tangent function (`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral `\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`:: >>> mp.dps = 15 >>> quad(lambda x: 2/(x**2+1), [0, inf]) 3.14159265358979 >>> quad(lambda x: exp(-x**2), [-inf, inf])**2 3.14159265358979 Integrals can typically be resolved to high precision. The following computes 50 digits of `\pi` by integrating the area of the half-circle defined by `x^2 + y^2 \le 1`, `-1 \le x \le 1`, `y \ge 0`:: >>> mp.dps = 50 >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) 3.1415926535897932384626433832795028841971693993751 One can just as well compute 1000 digits (output truncated):: >>> mp.dps = 1000 >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS 3.141592653589793238462643383279502884...216420198 Complex integrals are supported. The following computes a residue at `z = 0` by integrating counterclockwise along the diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`:: >>> mp.dps = 15 >>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1])) (0.0 + 6.28318530717959j) **Examples of 2D and 3D integrals** Here are several nice examples of analytically solvable 2D integrals (taken from MathWorld [1]) that can be evaluated to high precision fairly rapidly by :func:`~mpmath.quad`:: >>> mp.dps = 30 >>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y)) >>> quad(f, [0, 1], [0, 1]) 0.577215664901532860606512090082 >>> +euler 0.577215664901532860606512090082 >>> f = lambda x, y: 1/sqrt(1+x**2+y**2) >>> quad(f, [-1, 1], [-1, 1]) 3.17343648530607134219175646705 >>> 4*log(2+sqrt(3))-2*pi/3 3.17343648530607134219175646705 >>> f = lambda x, y: 1/(1-x**2 * y**2) >>> quad(f, [0, 1], [0, 1]) 1.23370055013616982735431137498 >>> pi**2 / 8 1.23370055013616982735431137498 >>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1]) 1.64493406684822643647241516665 >>> pi**2 / 6 1.64493406684822643647241516665 Multiple integrals may be done over infinite ranges:: >>> mp.dps = 15 >>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf])) 0.367879441171442 >>> print(1/e) 0.367879441171442 For nonrectangular areas, one can call :func:`~mpmath.quad` recursively. For example, we can replicate the earlier example of calculating `\pi` by integrating over the unit-circle, and actually use double quadrature to actually measure the area circle:: >>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)]) >>> quad(f, [-1, 1]) 3.14159265358979 Here is a simple triple integral:: >>> mp.dps = 15 >>> f = lambda x,y,z: x*y/(1+z) >>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre') 0.101366277027041 >>> (log(3)-log(2))/4 0.101366277027041 **Singularities** Both tanh-sinh and Gauss-Legendre quadrature are designed to integrate smooth (infinitely differentiable) functions. Neither algorithm copes well with mid-interval singularities (such as mid-interval discontinuities in `f(x)` or `f'(x)`). The best solution is to split the integral into parts:: >>> mp.dps = 15 >>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad 3.99900894176779 >>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good 4.0 The tanh-sinh rule often works well for integrands having a singularity at one or both endpoints:: >>> mp.dps = 15 >>> quad(log, [0, 1], method='tanh-sinh') # Good -1.0 >>> quad(log, [0, 1], method='gauss-legendre') # Bad -0.999932197413801 However, the result may still be inaccurate for some functions:: >>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh') 1.99999999946942 This problem is not due to the quadrature rule per se, but to numerical amplification of errors in the nodes. The problem can be circumvented by temporarily increasing the precision:: >>> mp.dps = 30 >>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh') >>> mp.dps = 15 >>> +a 2.0 **Highly variable functions** For functions that are smooth (in the sense of being infinitely differentiable) but contain sharp mid-interval peaks or many "bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For example, with default settings, :func:`~mpmath.quad` is able to integrate `\sin(x)` accurately over an interval of length 100 but not over length 1000:: >>> quad(sin, [0, 100]); 1-cos(100) # Good 0.137681127712316 0.137681127712316 >>> quad(sin, [0, 1000]); 1-cos(1000) # Bad -37.8587612408485 0.437620923709297 One solution is to break the integration into 10 intervals of length 100:: >>> quad(sin, linspace(0, 1000, 10)) # Good 0.437620923709297 Another is to increase the degree of the quadrature:: >>> quad(sin, [0, 1000], maxdegree=10) # Also good 0.437620923709297 Whether splitting the interval or increasing the degree is more efficient differs from case to case. Another example is the function `1/(1+x^2)`, which has a sharp peak centered around `x = 0`:: >>> f = lambda x: 1/(1+x**2) >>> quad(f, [-100, 100]) # Bad 3.64804647105268 >>> quad(f, [-100, 100], maxdegree=10) # Good 3.12159332021646 >>> quad(f, [-100, 0, 100]) # Also good 3.12159332021646 **References** 1. http://mathworld.wolfram.com/DoubleIntegral.html """ rule = kwargs.get('method', 'tanh-sinh') if type(rule) is str: if rule == 'tanh-sinh': rule = ctx._tanh_sinh elif rule == 'gauss-legendre': rule = ctx._gauss_legendre else: raise ValueError("unknown quadrature rule: %s" % rule) else: rule = rule(ctx) verbose = kwargs.get('verbose') dim = len(points) orig = prec = ctx.prec epsilon = ctx.eps/8 m = kwargs.get('maxdegree') or rule.guess_degree(prec) points = [ctx._as_points(p) for p in points] try: ctx.prec += 20 if dim == 1: v, err = rule.summation(f, points[0], prec, epsilon, m, verbose) elif dim == 2: v, err = rule.summation(lambda x: \ rule.summation(lambda y: f(x,y), \ points[1], prec, epsilon, m)[0], points[0], prec, epsilon, m, verbose) elif dim == 3: v, err = rule.summation(lambda x: \ rule.summation(lambda y: \ rule.summation(lambda z: f(x,y,z), \ points[2], prec, epsilon, m)[0], points[1], prec, epsilon, m)[0], points[0], prec, epsilon, m, verbose) else: raise NotImplementedError("quadrature must have dim 1, 2 or 3") finally: ctx.prec = orig if kwargs.get("error"): return +v, err return +v def quadts(ctx, *args, **kwargs): """ Performs tanh-sinh quadrature. The call quadts(func, *points, ...) is simply a shortcut for: quad(func, *points, ..., method=TanhSinh) For example, a single integral and a double integral: quadts(lambda x: exp(cos(x)), [0, 1]) quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1]) See the documentation for quad for information about how points arguments and keyword arguments are parsed. See documentation for TanhSinh for algorithmic information about tanh-sinh quadrature. """ kwargs['method'] = 'tanh-sinh' return ctx.quad(*args, **kwargs) def quadgl(ctx, *args, **kwargs): """ Performs Gauss-Legendre quadrature. The call quadgl(func, *points, ...) is simply a shortcut for: quad(func, *points, ..., method=GaussLegendre) For example, a single integral and a double integral: quadgl(lambda x: exp(cos(x)), [0, 1]) quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1]) See the documentation for quad for information about how points arguments and keyword arguments are parsed. See documentation for TanhSinh for algorithmic information about tanh-sinh quadrature. """ kwargs['method'] = 'gauss-legendre' return ctx.quad(*args, **kwargs) def quadosc(ctx, f, interval, omega=None, period=None, zeros=None): r""" Calculates .. math :: I = \int_a^b f(x) dx where at least one of `a` and `b` is infinite and where `f(x) = g(x) \cos(\omega x + \phi)` for some slowly decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc` can also handle oscillatory integrals where the oscillation rate is different from a pure sine or cosine wave. In the standard case when `|a| < \infty, b = \infty`, :func:`~mpmath.quadosc` works by evaluating the infinite series .. math :: I = \int_a^{x_1} f(x) dx + \sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx where `x_k` are consecutive zeros (alternatively some other periodic reference point) of `f(x)`. Accordingly, :func:`~mpmath.quadosc` requires information about the zeros of `f(x)`. For a periodic function, you can specify the zeros by either providing the angular frequency `\omega` (*omega*) or the *period* `2 \pi/\omega`. In general, you can specify the `n`-th zero by providing the *zeros* arguments. Below is an example of each:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> f = lambda x: sin(3*x)/(x**2+1) >>> quadosc(f, [0,inf], omega=3) 0.37833007080198 >>> quadosc(f, [0,inf], period=2*pi/3) 0.37833007080198 >>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3) 0.37833007080198 >>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica 0.37833007080198 Note that *zeros* was specified to multiply `n` by the *half-period*, not the full period. In theory, it does not matter whether each partial integral is done over a half period or a full period. However, if done over half-periods, the infinite series passed to :func:`~mpmath.nsum` becomes an *alternating series* and this typically makes the extrapolation much more efficient. Here is an example of an integration over the entire real line, and a half-infinite integration starting at `-\infty`:: >>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1) 1.15572734979092 >>> pi/e 1.15572734979092 >>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi) -0.0844109505595739 >>> cos(1)+si(1)-pi/2 -0.0844109505595738 Of course, the integrand may contain a complex exponential just as well as a real sine or cosine:: >>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3) (0.156410688228254 + 0.0j) >>> pi/e**3 0.156410688228254 >>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3) (0.00317486988463794 - 0.0447701735209082j) >>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2) (0.00317486988463794 - 0.0447701735209082j) **Non-periodic functions** If `f(x) = g(x) h(x)` for some function `h(x)` that is not strictly periodic, *omega* or *period* might not work, and it might be necessary to use *zeros*. A notable exception can be made for Bessel functions which, though not periodic, are "asymptotically periodic" in a sufficiently strong sense that the sum extrapolation will work out:: >>> quadosc(j0, [0, inf], period=2*pi) 1.0 >>> quadosc(j1, [0, inf], period=2*pi) 1.0 More properly, one should provide the exact Bessel function zeros:: >>> j0zero = lambda n: findroot(j0, pi*(n-0.25)) >>> quadosc(j0, [0, inf], zeros=j0zero) 1.0 For an example where *zeros* becomes necessary, consider the complete Fresnel integrals .. math :: \int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx = \sqrt{\frac{\pi}{8}}. Although the integrands do not decrease in magnitude as `x \to \infty`, the integrals are convergent since the oscillation rate increases (causing consecutive periods to asymptotically cancel out). These integrals are virtually impossible to calculate to any kind of accuracy using standard quadrature rules. However, if one provides the correct asymptotic distribution of zeros (`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works:: >>> mp.dps = 30 >>> f = lambda x: cos(x**2) >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n)) 0.626657068657750125603941321203 >>> f = lambda x: sin(x**2) >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n)) 0.626657068657750125603941321203 >>> sqrt(pi/8) 0.626657068657750125603941321203 (Interestingly, these integrals can still be evaluated if one places some other constant than `\pi` in the square root sign.) In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow the inverse-function distribution `h^{-1}(x)`:: >>> mp.dps = 15 >>> f = lambda x: sin(exp(x)) >>> quadosc(f, [1,inf], zeros=lambda n: log(n)) -0.25024394235267 >>> pi/2-si(e) -0.250243942352671 **Non-alternating functions** If the integrand oscillates around a positive value, without alternating signs, the extrapolation might fail. A simple trick that sometimes works is to multiply or divide the frequency by 2:: >>> f = lambda x: 1/x**2+sin(x)/x**4 >>> quadosc(f, [1,inf], omega=1) # Bad 1.28642190869861 >>> quadosc(f, [1,inf], omega=0.5) # Perfect 1.28652953559617 >>> 1+(cos(1)+ci(1)+sin(1))/6 1.28652953559617 **Fast decay** :func:`~mpmath.quadosc` is primarily useful for slowly decaying integrands. If the integrand decreases exponentially or faster, :func:`~mpmath.quad` will likely handle it without trouble (and generally be much faster than :func:`~mpmath.quadosc`):: >>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1) 0.5 >>> quad(lambda x: cos(x)/exp(x), [0, inf]) 0.5 """ a, b = ctx._as_points(interval) a = ctx.convert(a) b = ctx.convert(b) if [omega, period, zeros].count(None) != 2: raise ValueError( \ "must specify exactly one of omega, period, zeros") if a == ctx.ninf and b == ctx.inf: s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period) s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period) return s1 + s2 if a == ctx.ninf: if zeros: return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n)) else: return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period) if b != ctx.inf: raise ValueError("quadosc requires an infinite integration interval") if not zeros: if omega: period = 2*ctx.pi/omega zeros = lambda n: n*period/2 #for n in range(1,10): # p = zeros(n) # if p > a: # break #if n >= 9: # raise ValueError("zeros do not appear to be correctly indexed") n = 1 s = ctx.quadgl(f, [a, zeros(n)]) def term(k): return ctx.quadgl(f, [zeros(k), zeros(k+1)]) s += ctx.nsum(term, [n, ctx.inf]) return s if __name__ == '__main__': import doctest doctest.testmod()
klkuhlm/mpmath
mpmath/calculus/quadrature.py
Python
bsd-3-clause
38,292
[ "Gaussian" ]
444277256b8d0b9ee22b0d05acd1352fdc1626e6448b92839fd554dc42a08bbb
from go_to_adjacent_systems import * from go_somewhere_significant import * import vsrandom import launch import faction_ships import VS import Briefing import universe import unit import Director import quest import gettext class escort_local (Director.Mission): def __init__ (self,factionname,numsystemsaway, enemyquantity, waves, distance_from_base, creds, incoming, protectivefactionname='',jumps=(),var_to_set='',dynamic_flightgroup='',dynamic_type='', dynamic_defend_fg='',dynamic_defend_type='',greetingText=[_('Escort: give up while you still can...'),_('If you let us ravage our target then we grant you passage today.')]): Director.Mission.__init__(self) self.greetingText=greetingText self.dedicatedattack=vsrandom.randrange(0,2)*vsrandom.randrange(0,2) if (VS.GetRelation(factionname,protectivefactionname)>=0.0): self.dedicatedattack=1 self.arrived=0 self.todock=VS.Unit() self.launchedfriend=0 self.protectivefaction = protectivefactionname self.var_to_set=var_to_set self.quantity=0 self.mplay="all" self.gametime=VS.GetGameTime() self.waves=waves self.incoming=incoming self.dynatkfg = dynamic_flightgroup self.dynatktype = dynamic_type self.dyndeffg = dynamic_defend_fg self.dyndeftype = dynamic_defend_type self.attackers = [] self.objective= 0 self.targetiter = 0 self.ship_check_count=0 self.faction = factionname self.jp=VS.Unit() self.cred=creds self.quantity=enemyquantity self.savedquantity=enemyquantity self.distance_from_base=distance_from_base self.defendee=VS.Unit() self.difficulty=1 self.you=VS.getPlayer() self.respawn=0 name = self.you.getName () self.successdelay=0 self.objectivezero=0 self.mplay=universe.getMessagePlayer(self.you) self.adjsys = go_to_adjacent_systems(self.you,numsystemsaway,jumps) VS.IOmessage (0,"escort mission",self.mplay,_("Your mission is as follows:")) self.adjsys.Print(_("You are in the %s system,"),_("Proceed swiftly to %s."),_("Your arrival point is %s."),"escort mission",1) def SetVarValue (self,value): if (self.var_to_set!=''): quest.removeQuest (self.you.isPlayerStarship(),self.var_to_set,value) def SuccessMission (self): self.defendee.setFgDirective('b') self.defendee.setFlightgroupLeader(self.defendee) if (self.incoming): import unit un=unit.getSignificant(vsrandom.randrange(0,20),1,0) if (un.getName()==self.defendee.getName()): un=unit.getSignificant(vsrandom.randrange(0,30),1,0) if (un.getName()==self.defendee.getName()): un=unit.getSignificant(vsrandom.randrange(0,40),1,0) if (un.getName()==self.defendee.getName()): un=unit.getSignificant(vsrandom.randrange(0,30),1,0) if (un.getName()==self.defendee.getName()): un=unit.getSignificant(vsrandom.randrange(0,40),1,0) if (un.getName()!=self.defendee.getName()): self.defendee.performDockingOperations(un,0) #print "docking with "+un.getName() self.todock=un VS.setObjective (self.objectivezero,"Escort To %s" % unit.getUnitFullName(un)) else: self.defendee.ActivateJumpDrive(0) self.defendee.SetTarget(self.adjsys.SignificantUnit()) self.successdelay=VS.GetGameTime()+1 def PayMission(self): VS.AdjustRelation(self.you.getFactionName(),self.faction,.03,1) self.SetVarValue(1) if (self.cred>0): self.you.addCredits (self.cred) VS.IOmessage(0,"escort mission",self.mplay,_("Excellent work pilot! Your effort has thwarted the foe!")) VS.IOmessage(0,"escort mission",self.mplay,_("You have been rewarded for your effort as agreed.")) VS.terminateMission(1) def FailMission (self): self.you.addCredits (-self.cred) VS.AdjustRelation(self.you.getFactionName(),self.faction,-.02,1) self.SetVarValue(-1) VS.IOmessage (0,"escort mission",self.mplay,_("You Allowed the base you were to protect to be destroyed.")) VS.IOmessage (0,"escort mission",self.mplay,_("You are a failure to your race!")) VS.IOmessage (1,"escort mission",self.mplay,_("We have contacted your bank and informed them of your failure to deliver on credit. They have removed a number of your credits for this inconvenience. Let this serve as a lesson.")) VS.terminateMission(0) def NoEnemiesInArea (self,jp): if (self.adjsys.DestinationSystem()!=VS.getSystemFile()): return 0 if (self.ship_check_count>=len(self.attackers)): VS.setCompleteness(self.objective,1.0) return 1 un= self.attackers[self.ship_check_count] self.ship_check_count+=1 if (un.isNull() or (un.GetHullPercent()<.7 and self.defendee.getDistance(un)>7000)): return 0 else: VS.setObjective(self.objective,"Destroy the %s"%unit.getUnitFullName(un)) self.ship_check_count=0 return 0 def GenerateEnemies (self,jp,you): count=0 self.objectivezero=VS.addObjective ("Protect %s from %s" % (unit.getUnitFullName(jp),self.faction)) self.objective = VS.addObjective ("Destroy All %s Hostiles" % self.faction) VS.setCompleteness(self.objective,0.0) print("quantity "+str(self.quantity)) while (count<self.quantity): L = launch.Launch() L.fg="Shadow";L.dynfg=self.dynatkfg; if (self.dynatktype==''): L.type=faction_ships.getRandomFighter(self.faction) else: L.type=self.dynatktype L.ai="default";L.num=1;L.minradius=20000.0;L.maxradius=25000.0 try: L.minradius*=faction_ships.launch_distance_factor L.maxradius*=faction_ships.launch_distance_factor except: pass L.faction=self.faction launched=L.launch(you) if (count==0): self.you.SetTarget(launched) if (1): launched.SetTarget (jp) else: launched.SetTarget (you) if (self.dedicatedattack): launched.setFgDirective('B') self.attackers += [ launched ] count+=1 if (self.respawn==0 and len(self.attackers)>0): self.respawn=1 import universe universe.greet(self.greetingText,self.attackers[0],you); else: VS.IOmessage (0,"escort mission",self.mplay,_("Eliminate all %s ships here") % self.faction) VS.IOmessage (0,"escort mission",self.mplay,_("You must protect %s.") % unit.getUnitFullName(jp)) self.quantity=0 def GenerateDefendee(self): L=launch.Launch() L.fg ="Escort" L.faction=self.protectivefaction if (self.dyndeffg=='' and self.dyndeftype==''): L.type = faction_ships.getRandomFighter(self.protectivefaction) else: L.type = self.dyndeftype L.dynfg = self.dyndeffg import escort_mission escort_mission.escort_num+=1 L.fgappend = str(escort_mission.escort_num) L.ai = "default" L.num=1 L.minradius = 2.0*self.you.rSize() L.maxradius = 3.0*self.you.rSize() L.forcetype=True escortee=L.launch(self.you) escortee.upgrade("jump_drive",0,0,0,1) escortee.setFlightgroupLeader(self.you) escortee.setFgDirective('F') escortee.setMissionRelevant() return escortee def Execute (self): if (self.successdelay): if (self.defendee.getUnitSystemFile()!=self.you.getUnitSystemFile() or VS.GetGameTime()-self.successdelay>120 or (self.incoming and self.todock.isNull()==False and self.you.isNull()==False and self.you.getDistance(self.todock)<330)): if (self.defendee): self.PayMission() else: self.FailMission() return #nothing more happens inside this control if (self.you.isNull() or (self.launchedfriend and self.defendee.isNull())): VS.IOmessage (0,"escort mission",self.mplay,_("#ff0000You were unable to arrive in time to help. Mission failed.")) self.SetVarValue(-1) VS.terminateMission(0) return if (not self.adjsys.Execute()): return if (not self.arrived): self.arrived=1 if (self.launchedfriend==0 and not self.incoming): self.defendee=self.GenerateDefendee() self.launchedfriend=1 self.adjsys=go_somewhere_significant (self.you,0,self.distance_from_base,0) self.adjsys.Print (_("You must visit the %s"),"escort mission",_("docked around the %s"), 0) self.jp=self.adjsys.SignificantUnit() else: if (self.launchedfriend==0): self.defendee=self.GenerateDefendee() self.launchedfriend=1 if (self.defendee.isNull ()): self.FailMission(you) return else: self.defendee.setFlightgroupLeader(self.you) if (VS.GetGameTime()-self.gametime>10): self.defendee.setFgDirective('F') if (self.quantity>0): self.GenerateEnemies (self.defendee,self.you) if (self.ship_check_count==0 and self.dedicatedattack): if (self.targetiter>=len(self.attackers)): self.targetiter=0 else: un = self.attackers[self.targetiter] if (not un.isNull()): un.SetTarget (self.defendee) self.targetiter=self.targetiter+1 if (self.NoEnemiesInArea (self.defendee)): if (self.waves>0): self.quantity=self.savedquantity self.waves-=1 else: self.SuccessMission() def initbriefing(self): print("ending briefing") def loopbriefing(self): print("loop briefing") Briefing.terminate(); def endbriefing(self): print("ending briefing")
costalfy/Vega-Strike
data/modules/missions/escort_local.py
Python
gpl-2.0
10,666
[ "VisIt" ]
68ce559e0675c6c7acb12d01d760d21aa1ff929db898a4b2a200a0ffd1be28b3
#pylint: disable=missing-docstring #################################################################################################### # DO NOT MODIFY THIS HEADER # # MOOSE - Multiphysics Object Oriented Simulation Environment # # # # (c) 2010 Battelle Energy Alliance, LLC # # ALL RIGHTS RESERVED # # # # Prepared by Battelle Energy Alliance, LLC # # Under Contract No. DE-AC07-05ID14517 # # With the U. S. Department of Energy # # # # See COPYRIGHT for full restrictions # #################################################################################################### #pylint: enable=missing-docstring from Extension import Extension import elements import moose_elements class MooseExtension(Extension): """ Aggregates the MOOSE specific element objects into an extension for html to latex conversion. """ def __init__(self, **kwargs): super(MooseExtension, self).__init__(**kwargs) self._configs.setdefault('hrule', False) def extend(self, translator): config = self.getConfigs() translator.elements.add('moose_cite', moose_elements.MooseCite(), '<span') translator.elements.add('admonition', moose_elements.Admonition(), '<div') translator.elements.add('moose_code_div', moose_elements.MooseCodeDiv(), '_begin') translator.elements.add('moose_pre_code', moose_elements.MoosePreCode(), '<pre_code') translator.elements.add('moose_table', moose_elements.MooseTable(), '<table') translator.elements.add('moose_figure', moose_elements.MooseFigure(), '<div') translator.elements.add('moose_img', moose_elements.MooseImage(), '<img') translator.elements.add('moose_img_caption', elements.ArgumentCommand(name='p', \ command='caption', end_suffix='\n', \ attrs={'class':'moose-image-caption'}, strip=True), '<p') translator.elements.add('moose_button', elements.Element(name='button', content=''), '_begin') if not config['hrule']: translator.elements.add('moose_hide_hr', elements.Element(name='hr'), '<hr')
liuwenf/moose
python/MooseDocs/html2latex/MooseExtension.py
Python
lgpl-2.1
2,953
[ "MOOSE" ]
1b852c4ecc5d5dd6ada0b898bf37098f8083b60901d74b22b5e8fc9c7c99e25f
#!/usr/bin/env python2 # vim:fileencoding=utf-8 # License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net> from __future__ import (unicode_literals, division, absolute_import, print_function) from collections import OrderedDict from functools import partial import textwrap from PyQt5.Qt import ( QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QListWidget, QIcon, QSize, QComboBox, QLineEdit, QListWidgetItem, QStyledItemDelegate, QStaticText, Qt, QStyle, QToolButton, QInputDialog, QMenu, pyqtSignal ) from calibre.ebooks.metadata.tag_mapper import map_tags, compile_pat from calibre.gui2 import error_dialog, elided_text, Application, question_dialog from calibre.gui2.ui import get_gui from calibre.gui2.widgets2 import Dialog from calibre.utils.config import JSONConfig from calibre.utils.localization import localize_user_manual_link tag_maps = JSONConfig('tag-map-rules') class QueryEdit(QLineEdit): def contextMenuEvent(self, ev): menu = self.createStandardContextMenu() self.parent().specialise_context_menu(menu) menu.exec_(ev.globalPos()) class RuleEdit(QWidget): ACTION_MAP = OrderedDict(( ('remove', _('Remove')), ('replace', _('Replace')), ('keep', _('Keep')), ('capitalize', _('Capitalize')), ('lower', _('Lower-case')), ('upper', _('Upper-case')), ('split', _('Split')), )) MATCH_TYPE_MAP = OrderedDict(( ('one_of', _('is one of')), ('not_one_of', _('is not one of')), ('matches', _('matches pattern')), ('not_matches', _('does not match pattern')), ('has', _('contains')), )) MSG = _('Create the rule below, the rule can be used to remove or replace tags') SUBJECT = _('the tag, if it') VALUE_ERROR = _('You must provide a value for the tag to match') REPLACE_TEXT = _('with the tag:') SPLIT_TEXT = _('on the character:') SPLIT_TOOLTIP = _( 'The character on which to split tags. Note that technically you can specify' ' a sub-string, not just a single character. Then splitting will happen on the sub-string.') REPLACE_TOOLTIP = _( 'What to replace the tag with. Note that if you use a pattern to match' ' tags, you can replace with parts of the matched pattern. See ' ' the User Manual on how to use regular expressions for details.') REGEXP_HELP_TEXT = _('For help with regex pattern matching, see the <a href="%s">User Manual</a>') def __init__(self, parent=None): QWidget.__init__(self, parent) self.l = l = QVBoxLayout(self) self.h = h = QHBoxLayout() self.la = la = QLabel(self.MSG) la.setWordWrap(True) l.addWidget(la) l.addLayout(h) self.action = a = QComboBox(self) h.addWidget(a) for action, text in self.ACTION_MAP.iteritems(): a.addItem(text, action) a.currentIndexChanged.connect(self.update_state) self.la1 = la = QLabel('\xa0' + self.SUBJECT + '\xa0') h.addWidget(la) self.match_type = q = QComboBox(self) h.addWidget(q) for action, text in self.MATCH_TYPE_MAP.iteritems(): q.addItem(text, action) q.currentIndexChanged.connect(self.update_state) self.la2 = la = QLabel(':\xa0') h.addWidget(la) self.query = q = QueryEdit(self) h.addWidget(q) self.tag_editor_button = b = QToolButton(self) b.setIcon(QIcon(I('chapters.png'))) b.setToolTip(_('Edit the list of tags with the tag editor')) h.addWidget(b), b.clicked.connect(self.edit_tags) b.setVisible(self.can_use_tag_editor) self.h2 = h = QHBoxLayout() l.addLayout(h) self.la3 = la = QLabel(self.REPLACE_TEXT + '\xa0') h.addWidget(la) self.replace = r = QLineEdit(self) h.addWidget(r) self.regex_help = la = QLabel('<p>' + self.REGEXP_HELP_TEXT % localize_user_manual_link( 'http://manual.calibre-ebook.com/regexp.html')) la.setOpenExternalLinks(True) la.setWordWrap(True) l.addWidget(la) la.setVisible(False) l.addStretch(10) self.la3.setVisible(False), self.replace.setVisible(False) self.update_state() def sizeHint(self): a = QWidget.sizeHint(self) a.setHeight(a.height() + 75) a.setWidth(a.width() + 100) return a @property def can_use_tag_editor(self): return self.SUBJECT is RuleEdit.SUBJECT and 'matches' not in self.match_type.currentData() and get_gui() is not None def update_state(self): a = self.action.currentData() replace = a == 'replace' split = a == 'split' self.la3.setVisible(replace or split), self.replace.setVisible(replace or split) tt = _('A comma separated list of tags') m = self.match_type.currentData() is_match = 'matches' in m self.tag_editor_button.setVisible(self.can_use_tag_editor) if is_match: tt = _('A regular expression') elif m == 'has': tt = _('Tags that contain this string will match') self.regex_help.setVisible(is_match) self.la3.setText((self.SPLIT_TEXT if split else self.REPLACE_TEXT) + '\xa0') self.query.setToolTip(tt) self.replace.setToolTip(textwrap.fill(self.SPLIT_TOOLTIP if split else self.REPLACE_TOOLTIP)) def specialise_context_menu(self, menu): if self.can_use_tag_editor: menu.addAction(_('Use the tag editor to edit the list of tags'), self.edit_tags) def edit_tags(self): from calibre.gui2.dialogs.tag_editor import TagEditor d = TagEditor(self, get_gui().current_db, current_tags=filter(None, [x.strip() for x in self.query.text().split(',')])) if d.exec_() == d.Accepted: self.query.setText(', '.join(d.tags)) @property def rule(self): return { 'action': self.action.currentData(), 'match_type': self.match_type.currentData(), 'query': self.query.text().strip(), 'replace': self.replace.text().strip(), } @rule.setter def rule(self, rule): def sc(name): c = getattr(self, name) idx = c.findData(unicode(rule.get(name, ''))) if idx < 0: idx = 0 c.setCurrentIndex(idx) sc('action'), sc('match_type') self.query.setText(unicode(rule.get('query', '')).strip()) self.replace.setText(unicode(rule.get('replace', '')).strip()) def validate(self): rule = self.rule if not rule['query']: error_dialog(self, _('Query required'), self.VALUE_ERROR, show=True) return False if 'matches' in rule['match_type']: try: compile_pat(rule['query']) except Exception: error_dialog(self, _('Query invalid'), _( '%s is not a valid regular expression') % rule['query'], show=True) return False return True class RuleEditDialog(Dialog): PREFS_NAME = 'edit-tag-mapper-rule' DIALOG_TITLE = _('Edit rule') RuleEditClass = RuleEdit def __init__(self, parent=None): Dialog.__init__(self, self.DIALOG_TITLE, self.PREFS_NAME, parent=None) def setup_ui(self): self.l = l = QVBoxLayout(self) self.edit_widget = w = self.RuleEditClass(self) l.addWidget(w) l.addWidget(self.bb) def accept(self): if self.edit_widget.validate(): Dialog.accept(self) DATA_ROLE = Qt.UserRole RENDER_ROLE = DATA_ROLE + 1 class RuleItem(QListWidgetItem): @staticmethod def text_from_rule(rule, parent): query = elided_text(rule['query'], font=parent.font(), width=200, pos='right') text = _( '<b>{action}</b> the tag, if it <i>{match_type}</i>: <b>{query}</b>').format( action=RuleEdit.ACTION_MAP[rule['action']], match_type=RuleEdit.MATCH_TYPE_MAP[rule['match_type']], query=query) if rule['action'] == 'replace': text += '<br>' + _('with the tag:') + ' <b>%s</b>' % rule['replace'] if rule['action'] == 'split': text += '<br>' + _('on the character:') + ' <b>%s</b>' % rule['replace'] return text def __init__(self, rule, parent): QListWidgetItem.__init__(self, '', parent) st = self.text_from_rule(rule, parent) self.setData(RENDER_ROLE, st) self.setData(DATA_ROLE, rule) class Delegate(QStyledItemDelegate): MARGIN = 16 def sizeHint(self, option, index): st = QStaticText(index.data(RENDER_ROLE)) st.prepare(font=self.parent().font()) width = max(option.rect.width(), self.parent().width() - 50) if width and width != st.textWidth(): st.setTextWidth(width) br = st.size() return QSize(br.width(), br.height() + self.MARGIN) def paint(self, painter, option, index): QStyledItemDelegate.paint(self, painter, option, index) pal = option.palette color = pal.color(pal.HighlightedText if option.state & QStyle.State_Selected else pal.Text).name() text = '<div style="color:%s">%s</div>' % (color, index.data(RENDER_ROLE)) st = QStaticText(text) st.setTextWidth(option.rect.width()) painter.drawStaticText(option.rect.left() + self.MARGIN // 2, option.rect.top() + self.MARGIN // 2, st) class Rules(QWidget): RuleItemClass = RuleItem RuleEditDialogClass = RuleEditDialog changed = pyqtSignal() MSG = _('You can specify rules to filter/transform tags here. Click the "Add Rule" button' ' below to get started. The rules will be processed in order for every tag until either a' ' "remove" or a "keep" rule matches.') def __init__(self, parent=None): QWidget.__init__(self, parent) self.l = l = QVBoxLayout(self) self.msg_label = la = QLabel( '<p>' + self.MSG + '<p>' + _( 'You can <b>change an existing rule</b> by double clicking it') ) la.setWordWrap(True) l.addWidget(la) self.h = h = QHBoxLayout() l.addLayout(h) self.add_button = b = QPushButton(QIcon(I('plus.png')), _('&Add rule'), self) b.clicked.connect(self.add_rule) h.addWidget(b) self.remove_button = b = QPushButton(QIcon(I('minus.png')), _('&Remove rule(s)'), self) b.clicked.connect(self.remove_rules) h.addWidget(b) self.h3 = h = QHBoxLayout() l.addLayout(h) self.rule_list = r = QListWidget(self) self.delegate = Delegate(self) r.setSelectionMode(r.ExtendedSelection) r.setItemDelegate(self.delegate) r.doubleClicked.connect(self.edit_rule) h.addWidget(r) r.setDragEnabled(True) r.viewport().setAcceptDrops(True) r.setDropIndicatorShown(True) r.setDragDropMode(r.InternalMove) r.setDefaultDropAction(Qt.MoveAction) self.l2 = l = QVBoxLayout() h.addLayout(l) self.up_button = b = QToolButton(self) b.setIcon(QIcon(I('arrow-up.png'))), b.setToolTip(_('Move current rule up')) b.clicked.connect(self.move_up) l.addWidget(b) self.down_button = b = QToolButton(self) b.setIcon(QIcon(I('arrow-down.png'))), b.setToolTip(_('Move current rule down')) b.clicked.connect(self.move_down) l.addStretch(10), l.addWidget(b) def sizeHint(self): return QSize(800, 600) def add_rule(self): d = self.RuleEditDialogClass(self) if d.exec_() == d.Accepted: i = self.RuleItemClass(d.edit_widget.rule, self.rule_list) self.rule_list.scrollToItem(i) self.changed.emit() def edit_rule(self): i = self.rule_list.currentItem() if i is not None: d = self.RuleEditDialogClass(self) d.edit_widget.rule = i.data(Qt.UserRole) if d.exec_() == d.Accepted: rule = d.edit_widget.rule i.setData(DATA_ROLE, rule) i.setData(RENDER_ROLE, self.RuleItemClass.text_from_rule(rule, self.rule_list)) self.changed.emit() def remove_rules(self): changed = False for item in self.rule_list.selectedItems(): self.rule_list.takeItem(self.rule_list.row(item)) changed = True if changed: self.changed.emit() def move_up(self): i = self.rule_list.currentItem() if i is not None: row = self.rule_list.row(i) if row > 0: self.rule_list.takeItem(row) self.rule_list.insertItem(row - 1, i) self.rule_list.setCurrentItem(i) self.changed.emit() def move_down(self): i = self.rule_list.currentItem() if i is not None: row = self.rule_list.row(i) if row < self.rule_list.count() - 1: self.rule_list.takeItem(row) self.rule_list.insertItem(row + 1, i) self.rule_list.setCurrentItem(i) self.changed.emit() @property def rules(self): ans = [] for r in xrange(self.rule_list.count()): ans.append(self.rule_list.item(r).data(DATA_ROLE)) return ans @rules.setter def rules(self, rules): self.rule_list.clear() for rule in rules: if 'action' in rule and 'match_type' in rule and 'query' in rule: self.RuleItemClass(rule, self.rule_list) class Tester(Dialog): DIALOG_TITLE = _('Test tag mapper rules') PREFS_NAME = 'test-tag-mapper-rules' LABEL = _('Enter a comma separated list of &tags to test:') PLACEHOLDER = _('Enter tags and click the Test button') EMPTY_RESULT = '<p>&nbsp;<br>&nbsp;</p>' def __init__(self, rules, parent=None): self.rules = rules Dialog.__init__(self, self.DIALOG_TITLE, self.PREFS_NAME, parent=parent) def setup_ui(self): self.l = l = QVBoxLayout(self) self.bb.setStandardButtons(self.bb.Close) self.la = la = QLabel(self.LABEL) l.addWidget(la) self.tags = t = QLineEdit(self) la.setBuddy(t) t.setPlaceholderText(self.PLACEHOLDER) self.h = h = QHBoxLayout() l.addLayout(h) h.addWidget(t) self.test_button = b = QPushButton(_('&Test'), self) b.clicked.connect(self.do_test) h.addWidget(b) self.result = la = QLabel(self) la.setWordWrap(True) la.setText(self.EMPTY_RESULT) l.addWidget(la) l.addWidget(self.bb) @property def value(self): return self.tags.text() def do_test(self): tags = [x.strip() for x in self.value.split(',')] tags = map_tags(tags, self.rules) self.result.setText(_('<b>Resulting tags:</b> %s') % ', '.join(tags)) def sizeHint(self): ans = Dialog.sizeHint(self) ans.setWidth(ans.width() + 150) return ans class SaveLoadMixin(object): def save_ruleset(self): if not self.rules: error_dialog(self, _('No rules'), _( 'Cannot save as no rules have been created'), show=True) return text, ok = QInputDialog.getText(self, _('Save ruleset as'), _( 'Enter a name for this ruleset:'), text=self.loaded_ruleset or '') if ok and text: if self.loaded_ruleset and text == self.loaded_ruleset: if not question_dialog(self, _('Are you sure?'), _( 'A ruleset with the name "%s" already exists, do you want to replace it?') % text): return self.loaded_ruleset = text rules = self.rules if rules: self.PREFS_OBJECT[text] = self.rules elif text in self.PREFS_OBJECT: del self.PREFS_OBJECT[text] self.build_load_menu() def build_load_menu(self): self.load_menu.clear() if len(self.PREFS_OBJECT): for name, rules in self.PREFS_OBJECT.iteritems(): self.load_menu.addAction(name).triggered.connect(partial(self.load_ruleset, name)) self.load_menu.addSeparator() m = self.load_menu.addMenu(_('Delete saved rulesets')) for name, rules in self.PREFS_OBJECT.iteritems(): m.addAction(name).triggered.connect(partial(self.delete_ruleset, name)) else: self.load_menu.addAction(_('No saved rulesets available')) def load_ruleset(self, name): self.rules = self.PREFS_OBJECT[name] self.loaded_ruleset = name def delete_ruleset(self, name): del self.PREFS_OBJECT[name] self.build_load_menu() class RulesDialog(Dialog, SaveLoadMixin): DIALOG_TITLE = _('Edit tag mapper rules') PREFS_NAME = 'edit-tag-mapper-rules' RulesClass = Rules TesterClass = Tester PREFS_OBJECT = tag_maps def __init__(self, parent=None): self.loaded_ruleset = None Dialog.__init__(self, self.DIALOG_TITLE, self.PREFS_NAME, parent=parent) def setup_ui(self): self.l = l = QVBoxLayout(self) self.edit_widget = w = self.RulesClass(self) l.addWidget(w) l.addWidget(self.bb) self.save_button = b = self.bb.addButton(_('&Save'), self.bb.ActionRole) b.setToolTip(_('Save this ruleset for later re-use')) b.clicked.connect(self.save_ruleset) self.load_button = b = self.bb.addButton(_('&Load'), self.bb.ActionRole) b.setToolTip(_('Load a previously saved ruleset')) self.load_menu = QMenu(self) b.setMenu(self.load_menu) self.build_load_menu() self.test_button = b = self.bb.addButton(_('&Test rules'), self.bb.ActionRole) b.clicked.connect(self.test_rules) @property def rules(self): return self.edit_widget.rules @rules.setter def rules(self, rules): self.edit_widget.rules = rules def test_rules(self): self.TesterClass(self.rules, self).exec_() if __name__ == '__main__': app = Application([]) d = RulesDialog() d.rules = [ {'action':'remove', 'query':'moose', 'match_type':'one_of', 'replace':''}, {'action':'replace', 'query':'moose', 'match_type':'one_of', 'replace':'xxxx'}, {'action':'split', 'query':'/', 'match_type':'has', 'replace':'/'}, ] d.exec_() from pprint import pprint pprint(d.rules) del d, app
timpalpant/calibre
src/calibre/gui2/tag_mapper.py
Python
gpl-3.0
18,847
[ "MOOSE" ]
18cf58b33e4840d3d7a6e3f4fb8744d92049f830469f94c0940d5c695d4f2a01
# FIXME: do we still need this test case, if test_provider_skeleton is working fine? from unittest import TestCase from openarticlegauge.plugins.nature import NaturePlugin # TUTORIAL: change this to import *your* plugin from openarticlegauge import config, models # TUTORIAL: no need to modify any of this unless you added a key to the license info keys_in_license = ['provenance', 'description', 'type', 'title', 'url', 'jurisdiction', 'open_access', 'BY', 'NC', 'SA', 'ND'] keys_in_provenance = ['date', 'agent', 'source', 'category', 'description'] class TestBasic(TestCase): def setUp(self): pass def tearDown(self): pass # TUTORIAL # Give some examples of dereferenced URL-s which you expect to work. # Dereferenced means NOT http://dx.doi.org/10.1186/1471-2164-13-425 # but the result of the redirect from hitting that). def test_01_nature_supports_success(self): test_urls = ["http://www.nature.com/ncomms/journal/v1/n1/full/ncomms1007.html"] npg = NaturePlugin() for url in test_urls: assert npg.supports({"url" : [url]}) # TUTORIAL # Now give some examples of URL-s and strings that should not be # supported. def test_02_nature_supports_fail(self): test_urls = ["http://www.plosone.org/", "askjdfsakjdhfsa"] npg = NaturePlugin() for url in test_urls: assert not npg.supports({"url" : [url]}) # TUTORIAL: Repeat success examples from above test. def test_03_npg_supports_url_success(self): test_urls = ["http://www.nature.com/srep/2013/130415/srep01657/full/srep01657.html"] npg = NaturePlugin() for url in test_urls: assert npg.supports_base_url(url) # TUTORIAL: Repeat failure examples from above test. def test_04_npg_supports_url_fail(self): npg = NaturePlugin() test_urls = ["http://www.biomedcentral.com/983242", "askjdfsakjdhfsa"] for url in test_urls: assert not npg.supports_base_url(url) def test_05_name_and_version(self): """ Take an example supported article and check just the handler fields """ record = {} record['bibjson'] = {} record['provider'] = {} record['provider']['url'] = ['http://www.nature.com/srep/2013/130415/srep01657/full/srep01657.html'] record = models.MessageObject(record=record) npg = NaturePlugin() npg.license_detect(record) record = record.record # just barebones checks to make sure the license and provenance objects # exist in the first place so the handler fields can be checked assert record['bibjson'].has_key('license') assert record['bibjson']['license'] assert 'provenance' in record['bibjson']['license'][-1] assert 'handler' in record['bibjson']['license'][-1]['provenance'] assert record['bibjson']['license'][-1]['provenance']['handler'] == 'nature' assert record['bibjson']['license'][-1]['provenance']['handler_version'] == '0.1' def test_06_npg_ccncsa_license(self): record = {} record['bibjson'] = {} record['provider'] = {} # TUTORIAL # Again, you must provide a dereferenced URL here - that's what your # plugin will get! record['provider']['url'] = ['http://www.nature.com/srep/2013/130415/srep01657/full/srep01657.html'] record = models.MessageObject(record=record) npg = NaturePlugin() npg.license_detect(record) # check if all the important keys were created record = record.record # TUTORIAL: You don't need to modify any of these assert record['bibjson'].has_key('license') assert record['bibjson']['license'] # NB: some examples may fail the 'url' test since the Open Definition # data we're using as the basis for our licenses dictionary does not # have 'url' for all licenses. Fix by modifying licenses.py - add the data. assert all (key in record['bibjson']['license'][-1] for key in keys_in_license) assert all (key in record['bibjson']['license'][-1]['provenance'] for key in keys_in_provenance) # some content checks now # TUTORIAL: this is what you need to modify to make sure your plugin is # recording the right data. Essentially this should match whatever you put # into lic_statements in the plugin code and whatever you're expecting # for this particular resource. assert record['bibjson']['license'][-1]['type'] == 'cc-nc-sa' assert record['bibjson']['license'][-1]['version'] == '3.0' assert 'id' not in record['bibjson']['license'][-1] # should not have "id" - due to bibserver assert not record['bibjson']['license'][-1]['jurisdiction'] assert not record['bibjson']['license'][-1]['open_access'] assert record['bibjson']['license'][-1]['BY'] assert record['bibjson']['license'][-1]['NC'] assert record['bibjson']['license'][-1]['SA'] assert not record['bibjson']['license'][-1]['ND'] # In this case we also expect the plugin to overwrite the ['license']['url'] # property with a more specific one from the license statement. assert record['bibjson']['license'][-1]['url'] == 'http://creativecommons.org/licenses/by-nc-sa/3.0/' # TUTORIAL: you don't need to touch the following tests, EXCEPT for # the last one - the human-readable provenance description. assert record['bibjson']['license'][-1]['provenance']['agent'] == config.agent assert record['bibjson']['license'][-1]['provenance']['source'] == record['provider']['url'][0] assert record['bibjson']['license'][-1]['provenance']['date'] assert record['bibjson']['license'][-1]['provenance']['category'] == 'page_scrape' # TUTORIAL: This is what you need to change - this is the license # statement from lic_statements that you expect to have been present # on this resource's page. lic_statement = 'This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. To view a copy of this license, visit <a href="http://creativecommons.org/licenses/by-nc-sa/3.0/">http://creativecommons.org/licenses/by-nc-sa/3.0/</a>' \ # TUTORIAL: this is essentially some boilerplate text that you should not change assert record['bibjson']['license'][-1]['provenance']['description'] == 'License decided by scraping the resource at ' + record['provider']['url'][0] + ' and looking for the following license statement: "' + lic_statement + '".' def test_07_npg_ccncnd_license(self): record = {} record['bibjson'] = {} record['provider'] = {} # TUTORIAL # Again, you must provide a dereferenced URL here - that's what your # plugin will get! record['provider']['url'] = ['http://www.nature.com/ncomms/journal/v4/n4/full/ncomms2674.html'] record = models.MessageObject(record=record) npg = NaturePlugin() npg.license_detect(record) # check if all the important keys were created record = record.record # TUTORIAL: You don't need to modify any of these assert record['bibjson'].has_key('license') assert record['bibjson']['license'] # NB: some examples may fail the 'url' test since the Open Definition # data we're using as the basis for our licenses dictionary does not # have 'url' for all licenses. Fix by modifying licenses.py - add the data. assert all (key in record['bibjson']['license'][-1] for key in keys_in_license) assert all (key in record['bibjson']['license'][-1]['provenance'] for key in keys_in_provenance) # some content checks now # TUTORIAL: this is what you need to modify to make sure your plugin is # recording the right data. Essentially this should match whatever you put # into lic_statements in the plugin code and whatever you're expecting # for this particular resource. assert record['bibjson']['license'][-1]['type'] == 'cc-nc-nd' assert record['bibjson']['license'][-1]['version'] == '3.0' assert 'id' not in record['bibjson']['license'][-1] # should not have "id" - due to bibserver assert not record['bibjson']['license'][-1]['jurisdiction'] assert not record['bibjson']['license'][-1]['open_access'] assert record['bibjson']['license'][-1]['BY'] assert record['bibjson']['license'][-1]['NC'] assert not record['bibjson']['license'][-1]['SA'] assert record['bibjson']['license'][-1]['ND'] # In this case we also expect the plugin to overwrite the ['license']['url'] # property with a more specific one from the license statement. assert record['bibjson']['license'][-1]['url'] == 'http://creativecommons.org/licenses/by-nc-nd/3.0/' # TUTORIAL: you don't need to touch the following tests, EXCEPT for # the last one - the human-readable provenance description. assert record['bibjson']['license'][-1]['provenance']['agent'] == config.agent assert record['bibjson']['license'][-1]['provenance']['source'] == record['provider']['url'][0] assert record['bibjson']['license'][-1]['provenance']['date'] assert record['bibjson']['license'][-1]['provenance']['category'] == 'page_scrape' # TUTORIAL: This is what you need to change - this is the license # statement from lic_statements that you expect to have been present # on this resource's page. lic_statement = 'This work is licensed under a Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported License. To view a copy of this license, visit <a href="http://creativecommons.org/licenses/by-nc-nd/3.0/">http://creativecommons.org/licenses/by-nc-nd/3.0/</a>' \ # TUTORIAL: this is essentially some boilerplate text that you should not change assert record['bibjson']['license'][-1]['provenance']['description'] == 'License decided by scraping the resource at ' + record['provider']['url'][0] + ' and looking for the following license statement: "' + lic_statement + '".' def test_08_npg_ccby_license(self): record = {} record['bibjson'] = {} record['provider'] = {} # TUTORIAL # Again, you must provide a dereferenced URL here - that's what your # plugin will get! record['provider']['url'] = ['http://www.nature.com/srep/2013/130129/srep01154/full/srep01154.html'] record = models.MessageObject(record=record) npg = NaturePlugin() npg.license_detect(record) # check if all the important keys were created record = record.record # TUTORIAL: You don't need to modify any of these assert record['bibjson'].has_key('license') assert record['bibjson']['license'] # NB: some examples may fail the 'url' test since the Open Definition # data we're using as the basis for our licenses dictionary does not # have 'url' for all licenses. Fix by modifying licenses.py - add the data. assert all (key in record['bibjson']['license'][-1] for key in keys_in_license) assert all (key in record['bibjson']['license'][-1]['provenance'] for key in keys_in_provenance) # some content checks now # TUTORIAL: this is what you need to modify to make sure your plugin is # recording the right data. Essentially this should match whatever you put # into lic_statements in the plugin code and whatever you're expecting # for this particular resource. assert record['bibjson']['license'][-1]['type'] == 'cc-by' assert record['bibjson']['license'][-1]['version'] == '3.0' assert 'id' not in record['bibjson']['license'][-1] # should not have "id" - due to bibserver assert not record['bibjson']['license'][-1]['jurisdiction'] assert record['bibjson']['license'][-1]['open_access'] assert record['bibjson']['license'][-1]['BY'] assert not record['bibjson']['license'][-1]['NC'] assert not record['bibjson']['license'][-1]['SA'] assert not record['bibjson']['license'][-1]['ND'] # In this case we also expect the plugin to overwrite the ['license']['url'] # property with a more specific one from the license statement. assert record['bibjson']['license'][-1]['url'] == 'http://creativecommons.org/licenses/by/3.0/' # TUTORIAL: you don't need to touch the following tests, EXCEPT for # the last one - the human-readable provenance description. assert record['bibjson']['license'][-1]['provenance']['agent'] == config.agent assert record['bibjson']['license'][-1]['provenance']['source'] == record['provider']['url'][0] assert record['bibjson']['license'][-1]['provenance']['date'] assert record['bibjson']['license'][-1]['provenance']['category'] == 'page_scrape' # TUTORIAL: This is what you need to change - this is the license # statement from lic_statements that you expect to have been present # on this resource's page. lic_statement = 'This work is licensed under a Creative Commons Attribution 3.0 Unported License. To view a copy of this license, visit <a href="http://creativecommons.org/licenses/by/3.0/">http://creativecommons.org/licenses/by/3.0/</a>' \ # TUTORIAL: this is essentially some boilerplate text that you should not change assert record['bibjson']['license'][-1]['provenance']['description'] == 'License decided by scraping the resource at ' + record['provider']['url'][0] + ' and looking for the following license statement: "' + lic_statement + '".'
CottageLabs/OpenArticleGauge
openarticlegauge/tests/test_nature.py
Python
bsd-3-clause
14,097
[ "VisIt" ]
a61a570d069dac198954319753534c70086487726a40d799673f2e5ec5453f71
from __future__ import print_function from __future__ import absolute_import import numpy as nm import sys from six.moves import range sys.path.append('.') from sfepy.base.base import output, assert_ from sfepy.base.ioutils import ensure_path from sfepy.linalg import cycle from sfepy.discrete.fem.mesh import Mesh from sfepy.mesh.mesh_tools import elems_q2t def get_tensor_product_conn(shape): """ Generate vertex connectivity for cells of a tensor-product mesh of the given shape. Parameters ---------- shape : array of 2 or 3 ints Shape (counts of nodes in x, y, z) of the mesh. Returns ------- conn : array The vertex connectivity array. desc : str The cell kind. """ shape = nm.asarray(shape) dim = len(shape) assert_(1 <= dim <= 3) n_nod = nm.prod(shape) n_el = nm.prod(shape - 1) grid = nm.arange(n_nod, dtype=nm.int32) grid.shape = shape if dim == 1: conn = nm.zeros((n_el, 2), dtype=nm.int32) conn[:, 0] = grid[:-1] conn[:, 1] = grid[1:] desc = '1_2' elif dim == 2: conn = nm.zeros((n_el, 4), dtype=nm.int32) conn[:, 0] = grid[:-1, :-1].flat conn[:, 1] = grid[1:, :-1].flat conn[:, 2] = grid[1:, 1:].flat conn[:, 3] = grid[:-1, 1:].flat desc = '2_4' else: conn = nm.zeros((n_el, 8), dtype=nm.int32) conn[:, 0] = grid[:-1, :-1, :-1].flat conn[:, 1] = grid[1:, :-1, :-1].flat conn[:, 2] = grid[1:, 1:, :-1].flat conn[:, 3] = grid[:-1, 1:, :-1].flat conn[:, 4] = grid[:-1, :-1, 1:].flat conn[:, 5] = grid[1:, :-1, 1:].flat conn[:, 6] = grid[1:, 1:, 1:].flat conn[:, 7] = grid[:-1, 1:, 1:].flat desc = '3_8' return conn, desc def gen_block_mesh(dims, shape, centre, mat_id=0, name='block', coors=None, verbose=True): """ Generate a 2D or 3D block mesh. The dimension is determined by the lenght of the shape argument. Parameters ---------- dims : array of 2 or 3 floats Dimensions of the block. shape : array of 2 or 3 ints Shape (counts of nodes in x, y, z) of the block mesh. centre : array of 2 or 3 floats Centre of the block. mat_id : int, optional The material id of all elements. name : string Mesh name. verbose : bool If True, show progress of the mesh generation. Returns ------- mesh : Mesh instance """ dims = nm.asarray(dims, dtype=nm.float64) shape = nm.asarray(shape, dtype=nm.int32) centre = nm.asarray(centre, dtype=nm.float64) dim = shape.shape[0] centre = centre[:dim] dims = dims[:dim] n_nod = nm.prod(shape) output('generating %d vertices...' % n_nod, verbose=verbose) x0 = centre - 0.5 * dims dd = dims / (shape - 1) ngrid = nm.mgrid[[slice(ii) for ii in shape]] ngrid.shape = (dim, n_nod) coors = x0 + ngrid.T * dd output('...done', verbose=verbose) n_el = nm.prod(shape - 1) output('generating %d cells...' % n_el, verbose=verbose) mat_ids = nm.empty((n_el,), dtype=nm.int32) mat_ids.fill(mat_id) conn, desc = get_tensor_product_conn(shape) output('...done', verbose=verbose) mesh = Mesh.from_data(name, coors, None, [conn], [mat_ids], [desc]) return mesh def gen_cylinder_mesh(dims, shape, centre, axis='x', force_hollow=False, is_open=False, open_angle=0.0, non_uniform=False, name='cylinder', verbose=True): """ Generate a cylindrical mesh along an axis. Its cross-section can be ellipsoidal. Parameters ---------- dims : array of 5 floats Dimensions of the cylinder: inner surface semi-axes a1, b1, outer surface semi-axes a2, b2, length. shape : array of 3 ints Shape (counts of nodes in radial, circumferential and longitudinal directions) of the cylinder mesh. centre : array of 3 floats Centre of the cylinder. axis: one of 'x', 'y', 'z' The axis of the cylinder. force_hollow : boolean Force hollow mesh even if inner radii a1 = b1 = 0. is_open : boolean Generate an open cylinder segment. open_angle : float Opening angle in radians. non_uniform : boolean If True, space the mesh nodes in radial direction so that the element volumes are (approximately) the same, making thus the elements towards the outer surface thinner. name : string Mesh name. verbose : bool If True, show progress of the mesh generation. Returns ------- mesh : Mesh instance """ dims = nm.asarray(dims, dtype=nm.float64) shape = nm.asarray(shape, dtype=nm.int32) centre = nm.asarray(centre, dtype=nm.float64) a1, b1, a2, b2, length = dims nr, nfi, nl = shape origin = centre - nm.array([0.5 * length, 0.0, 0.0]) dfi = 2.0 * (nm.pi - open_angle) / nfi if is_open: nnfi = nfi + 1 else: nnfi = nfi is_hollow = force_hollow or not (max(abs(a1), abs(b1)) < 1e-15) if is_hollow: mr = 0 else: mr = (nnfi - 1) * nl grid = nm.zeros((nr, nnfi, nl), dtype=nm.int32) n_nod = nr * nnfi * nl - mr coors = nm.zeros((n_nod, 3), dtype=nm.float64) angles = nm.linspace(open_angle, open_angle+(nfi)*dfi, nfi+1) xs = nm.linspace(0.0, length, nl) if non_uniform: ras = nm.zeros((nr,), dtype=nm.float64) rbs = nm.zeros_like(ras) advol = (a2**2 - a1**2) / (nr - 1) bdvol = (b2**2 - b1**2) / (nr - 1) ras[0], rbs[0] = a1, b1 for ii in range(1, nr): ras[ii] = nm.sqrt(advol + ras[ii-1]**2) rbs[ii] = nm.sqrt(bdvol + rbs[ii-1]**2) else: ras = nm.linspace(a1, a2, nr) rbs = nm.linspace(b1, b2, nr) # This is 3D only... output('generating %d vertices...' % n_nod, verbose=verbose) ii = 0 for ix in range(nr): a, b = ras[ix], rbs[ix] for iy, fi in enumerate(angles[:nnfi]): for iz, x in enumerate(xs): grid[ix,iy,iz] = ii coors[ii] = origin + [x, a * nm.cos(fi), b * nm.sin(fi)] ii += 1 if not is_hollow and (ix == 0): if iy > 0: grid[ix,iy,iz] = grid[ix,0,iz] ii -= 1 assert_(ii == n_nod) output('...done', verbose=verbose) n_el = (nr - 1) * nfi * (nl - 1) conn = nm.zeros((n_el, 8), dtype=nm.int32) output('generating %d cells...' % n_el, verbose=verbose) ii = 0 for (ix, iy, iz) in cycle([nr-1, nnfi, nl-1]): if iy < (nnfi - 1): conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ], grid[ix+1,iy+1,iz ], grid[ix ,iy+1,iz ], grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1], grid[ix+1,iy+1,iz+1], grid[ix ,iy+1,iz+1]] ii += 1 elif not is_open: conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ], grid[ix+1,0,iz ], grid[ix ,0,iz ], grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1], grid[ix+1,0,iz+1], grid[ix ,0,iz+1]] ii += 1 mat_id = nm.zeros((n_el,), dtype = nm.int32) desc = '3_8' assert_(n_nod == (conn.max() + 1)) output('...done', verbose=verbose) if axis == 'z': coors = coors[:,[1,2,0]] elif axis == 'y': coors = coors[:,[2,0,1]] mesh = Mesh.from_data(name, coors, None, [conn], [mat_id], [desc]) return mesh def _spread_along_axis(axis, coors, tangents, grading_fun): """ Spread the coordinates along the given axis using the grading function, and the tangents in the other two directions. """ oo = list(set([0, 1, 2]).difference([axis])) c0, c1, c2 = coors[:, axis], coors[:, oo[0]], coors[:, oo[1]] out = nm.empty_like(coors) mi, ma = c0.min(), c0.max() nc0 = (c0 - mi) / (ma - mi) out[:, axis] = oc0 = grading_fun(nc0) * (ma - mi) + mi nc = oc0 - oc0.min() mi, ma = c1.min(), c1.max() n1 = 2 * (c1 - mi) / (ma - mi) - 1 out[:, oo[0]] = c1 + n1 * nc * tangents[oo[0]] mi, ma = c2.min(), c2.max() n2 = 2 * (c2 - mi) / (ma - mi) - 1 out[:, oo[1]] = c2 + n2 * nc * tangents[oo[1]] return out def _get_extension_side(side, grading_fun, mat_id, b_dims, b_shape, e_dims, e_shape, centre): """ Get a mesh extending the given side of a block mesh. """ # Pure extension dimensions. pe_dims = 0.5 * (e_dims - b_dims) coff = 0.5 * (b_dims + pe_dims) cc = centre + coff * nm.eye(3)[side] if side == 0: # x axis. dims = [pe_dims[0], b_dims[1], b_dims[2]] shape = [e_shape, b_shape[1], b_shape[2]] tangents = [0, pe_dims[1] / pe_dims[0], pe_dims[2] / pe_dims[0]] elif side == 1: # y axis. dims = [b_dims[0], pe_dims[1], b_dims[2]] shape = [b_shape[0], e_shape, b_shape[2]] tangents = [pe_dims[0] / pe_dims[1], 0, pe_dims[2] / pe_dims[1]] elif side == 2: # z axis. dims = [b_dims[0], b_dims[1], pe_dims[2]] shape = [b_shape[0], b_shape[1], e_shape] tangents = [pe_dims[0] / pe_dims[2], pe_dims[1] / pe_dims[2], 0] e_mesh = gen_block_mesh(dims, shape, cc, mat_id=mat_id, verbose=False) e_mesh.coors[:] = _spread_along_axis(side, e_mesh.coors, tangents, grading_fun) return e_mesh, shape def gen_extended_block_mesh(b_dims, b_shape, e_dims, e_shape, centre, grading_fun=None, name=None): """ Generate a 3D mesh with a central block and (coarse) extending side meshes. The resulting mesh is again a block. Each of the components has a different material id. Parameters ---------- b_dims : array of 3 floats The dimensions of the central block. b_shape : array of 3 ints The shape (counts of nodes in x, y, z) of the central block mesh. e_dims : array of 3 floats The dimensions of the complete block (central block + extensions). e_shape : int The count of nodes of extending blocks in the direction from the central block. centre : array of 3 floats The centre of the mesh. grading_fun : callable, optional A function of :math:`x \in [0, 1]` that can be used to shift nodes in the extension axis directions to allow smooth grading of element sizes from the centre. The default function is :math:`x**p` with :math:`p` determined so that the element sizes next to the central block have the size of the shortest edge of the central block. name : string, optional The mesh name. Returns ------- mesh : Mesh instance """ b_dims = nm.asarray(b_dims, dtype=nm.float64) b_shape = nm.asarray(b_shape, dtype=nm.int32) e_dims = nm.asarray(e_dims, dtype=nm.float64) centre = nm.asarray(centre, dtype=nm.float64) # Pure extension dimensions. pe_dims = 0.5 * (e_dims - b_dims) # Central block element sizes. dd = (b_dims / (b_shape - 1)) # The "first x" going to grading_fun. nc = 1.0 / (e_shape - 1) # Grading power and function. power = nm.log(dd.min() / pe_dims.min()) / nm.log(nc) grading_fun = (lambda x: x**power) if grading_fun is None else grading_fun # Central block mesh. b_mesh = gen_block_mesh(b_dims, b_shape, centre, mat_id=0, verbose=False) # 'x' extension. e_mesh, xs = _get_extension_side(0, grading_fun, 10, b_dims, b_shape, e_dims, e_shape, centre) mesh = b_mesh + e_mesh # Mirror by 'x'. e_mesh.coors[:, 0] = (2 * centre[0]) - e_mesh.coors[:, 0] e_mesh.cmesh.cell_groups.fill(11) mesh = mesh + e_mesh # 'y' extension. e_mesh, ys = _get_extension_side(1, grading_fun, 20, b_dims, b_shape, e_dims, e_shape, centre) mesh = mesh + e_mesh # Mirror by 'y'. e_mesh.coors[:, 1] = (2 * centre[1]) - e_mesh.coors[:, 1] e_mesh.cmesh.cell_groups.fill(21) mesh = mesh + e_mesh # 'z' extension. e_mesh, zs = _get_extension_side(2, grading_fun, 30, b_dims, b_shape, e_dims, e_shape, centre) mesh = mesh + e_mesh # Mirror by 'z'. e_mesh.coors[:, 2] = (2 * centre[2]) - e_mesh.coors[:, 2] e_mesh.cmesh.cell_groups.fill(31) mesh = mesh + e_mesh if name is not None: mesh.name = name # Verify merging by checking the number of nodes. n_nod = (nm.prod(nm.maximum(b_shape - 2, 0)) + 2 * nm.prod(xs) + 2 * (max(ys[0] - 2, 0) * ys[1] * ys[2]) + 2 * (max(zs[0] - 2, 0) * max(zs[1] - 2, 0) * zs[2])) if n_nod != mesh.n_nod: raise ValueError('Merge of meshes failed! (%d == %d)' % (n_nod, mesh.n_nod)) return mesh def tiled_mesh1d(conn, coors, ngrps, idim, n_rep, bb, eps=1e-6, ndmap=False): from sfepy.discrete.fem.periodic import match_grid_plane s1 = nm.nonzero(coors[:,idim] < (bb[0] + eps))[0] s2 = nm.nonzero(coors[:,idim] > (bb[1] - eps))[0] if s1.shape != s2.shape: raise ValueError('incompatible shapes: %s == %s'\ % (s1.shape, s2.shape)) (nnod0, dim) = coors.shape nnod = nnod0 * n_rep - s1.shape[0] * (n_rep - 1) (nel0, nnel) = conn.shape nel = nel0 * n_rep dd = nm.zeros((dim,), dtype=nm.float64) dd[idim] = bb[1] - bb[0] m1, m2 = match_grid_plane(coors[s1], coors[s2], idim) oconn = nm.zeros((nel, nnel), dtype=nm.int32) ocoors = nm.zeros((nnod, dim), dtype=nm.float64) ongrps = nm.zeros((nnod,), dtype=nm.int32) if type(ndmap) is bool: ret_ndmap = ndmap else: ret_ndmap= True ndmap_out = nm.zeros((nnod,), dtype=nm.int32) el_off = 0 nd_off = 0 for ii in range(n_rep): if ii == 0: oconn[0:nel0,:] = conn ocoors[0:nnod0,:] = coors ongrps[0:nnod0] = ngrps.squeeze() nd_off += nnod0 mapto = s2[m2] mask = nm.ones((nnod0,), dtype=nm.int32) mask[s1] = 0 remap0 = nm.cumsum(mask) - 1 nnod0r = nnod0 - s1.shape[0] cidx = nm.where(mask) if ret_ndmap: ndmap_out[0:nnod0] = nm.arange(nnod0) else: remap = remap0 + nd_off remap[s1[m1]] = mapto mapto = remap[s2[m2]] ocoors[nd_off:(nd_off + nnod0r),:] =\ (coors[cidx,:] + ii * dd) ongrps[nd_off:(nd_off + nnod0r)] = ngrps[cidx].squeeze() oconn[el_off:(el_off + nel0),:] = remap[conn] if ret_ndmap: ndmap_out[nd_off:(nd_off + nnod0r)] = cidx[0] nd_off += nnod0r el_off += nel0 if ret_ndmap: if ndmap is not None: max_nd_ref = nm.max(ndmap) idxs = nm.where(ndmap_out > max_nd_ref) ndmap_out[idxs] = ndmap[ndmap_out[idxs]] return oconn, ocoors, ongrps, ndmap_out else: return oconn, ocoors, ongrps def gen_tiled_mesh(mesh, grid=None, scale=1.0, eps=1e-6, ret_ndmap=False): """ Generate a new mesh by repeating a given periodic element along each axis. Parameters ---------- mesh : Mesh instance The input periodic FE mesh. grid : array Number of repetition along each axis. scale : float, optional Scaling factor. eps : float, optional Tolerance for boundary detection. ret_ndmap : bool, optional If True, return global node map. Returns ------- mesh_out : Mesh instance FE mesh. ndmap : array Maps: actual node id --> node id in the reference cell. """ bbox = mesh.get_bounding_box() if grid is None: iscale = max(int(1.0 / scale), 1) grid = [iscale] * mesh.dim conn = mesh.get_conn(mesh.descs[0]) mat_ids = mesh.cmesh.cell_groups coors = mesh.coors ngrps = mesh.cmesh.vertex_groups nrep = nm.prod(grid) ndmap = None output('repeating %s ...' % grid) nblk = 1 for ii, gr in enumerate(grid): if ret_ndmap: (conn, coors, ngrps, ndmap0) = tiled_mesh1d(conn, coors, ngrps, ii, gr, bbox.transpose()[ii], eps=eps, ndmap=ndmap) ndmap = ndmap0 else: conn, coors, ngrps = tiled_mesh1d(conn, coors, ngrps, ii, gr, bbox.transpose()[ii], eps=eps) nblk *= gr output('...done') mat_ids = nm.tile(mat_ids, (nrep,)) mesh_out = Mesh.from_data('tiled mesh', coors * scale, ngrps, [conn], [mat_ids], [mesh.descs[0]]) if ret_ndmap: return mesh_out, ndmap else: return mesh_out def gen_misc_mesh(mesh_dir, force_create, kind, args, suffix='.mesh', verbose=False): """ Create sphere or cube mesh according to `kind` in the given directory if it does not exist and return path to it. """ import os from sfepy import data_dir defdir = os.path.join(data_dir, 'meshes') if mesh_dir is None: mesh_dir = defdir def retype(args, types, defaults): args=list(args) args.extend(defaults[len(args):len(defaults)]) return tuple([type(value) for type, value in zip(types, args) ]) if kind == 'sphere': default = [5, 41, args[0]] args = retype(args, [float, int, float], default) mesh_pattern = os.path.join(mesh_dir, 'sphere-%.2f-%.2f-%i') else: assert_(kind == 'cube') args = retype(args, (int, float, int, float, int, float), (args[0], args[1], args[0], args[1], args[0], args[1])) mesh_pattern = os.path.join(mesh_dir, 'cube-%i_%.2f-%i_%.2f-%i_%.2f') if verbose: output(args) filename = mesh_pattern % args if not force_create: if os.path.exists(filename): return filename if os.path.exists(filename + '.mesh') : return filename + '.mesh' if os.path.exists(filename + '.vtk'): return filename + '.vtk' if kind == 'cube': filename = filename + suffix ensure_path(filename) output('creating new cube mesh') output('(%i nodes in %.2f) x (%i nodes in %.2f) x (%i nodes in %.2f)' % args) output('to file %s...' % filename) mesh = gen_block_mesh(args[1::2], args[0::2], (0.0, 0.0, 0.0), name=filename) mesh.write(filename, io='auto') output('...done') else: import subprocess, shutil, tempfile filename = filename + '.mesh' ensure_path(filename) output('creating new sphere mesh (%i nodes, r=%.2f) and gradation %d' % args) output('to file %s...' % filename) f = open(os.path.join(defdir, 'quantum', 'sphere.geo')) tmp_dir = tempfile.mkdtemp() tmpfile = os.path.join(tmp_dir, 'sphere.geo.temp') ff = open(tmpfile, "w") ff.write(""" R = %i.0; n = %i.0; dens = %f; """ % args) ff.write(f.read()) f.close() ff.close() subprocess.call(['gmsh', '-3', tmpfile, '-format', 'mesh', '-o', filename]) shutil.rmtree(tmp_dir) output('...done') return filename def gen_mesh_from_string(mesh_name, mesh_dir): import re result = re.match('^\\s*([a-zA-Z]+)[:\\(]([^\\):]*)[:\\)](\\*)?\\s*$', mesh_name) if result is None: return mesh_name else: args = re.split(',', result.group(2)) kind = result.group(1) return gen_misc_mesh(mesh_dir, result.group(3)=='*', kind, args) def gen_mesh_from_geom(geo, a=None, verbose=False, refine=False): """ Runs mesh generator - tetgen for 3D or triangle for 2D meshes. Parameters ---------- geo : geometry geometry description a : int, optional a maximum area/volume constraint verbose : bool, optional detailed information refine : bool, optional refines mesh Returns ------- mesh : Mesh instance triangular or tetrahedral mesh """ import os.path as op import pexpect import tempfile import shutil tmp_dir = tempfile.mkdtemp() polyfilename = op.join(tmp_dir, 'meshgen.poly') # write geometry to poly file geo.to_poly_file(polyfilename) meshgen_call = {2: ('triangle', ''), 3: ('tetgen', 'BFENk')} params = "-ACp" params += "q" if refine else '' params += "V" if verbose else "Q" params += meshgen_call[geo.dim][1] if a is not None: params += "a%f" % (a) params += " %s" % (polyfilename) cmd = "%s %s" % (meshgen_call[geo.dim][0], params) if verbose: print("Generating mesh using", cmd) p=pexpect.run(cmd, timeout=None) bname, ext = op.splitext(polyfilename) if geo.dim == 2: mesh = Mesh.from_file(bname + '.1.node') if geo.dim == 3: mesh = Mesh.from_file(bname + '.1.vtk') shutil.rmtree(tmp_dir) return mesh def gen_mesh_from_voxels(voxels, dims, etype='q'): """ Generate FE mesh from voxels (volumetric data). Parameters ---------- voxels : array Voxel matrix, 1=material. dims : array Size of one voxel. etype : integer, optional 'q' - quadrilateral or hexahedral elements 't' - triangular or tetrahedral elements Returns ------- mesh : Mesh instance Finite element mesh. """ dims = nm.array(dims).squeeze() dim = len(dims) nddims = nm.array(voxels.shape) + 2 nodemtx = nm.zeros(nddims, dtype=nm.int32) if dim == 2: #iy, ix = nm.where(voxels.transpose()) iy, ix = nm.where(voxels) nel = ix.shape[0] if etype == 'q': nodemtx[ix,iy] += 1 nodemtx[ix + 1,iy] += 1 nodemtx[ix + 1,iy + 1] += 1 nodemtx[ix,iy + 1] += 1 elif etype == 't': nodemtx[ix,iy] += 2 nodemtx[ix + 1,iy] += 1 nodemtx[ix + 1,iy + 1] += 2 nodemtx[ix,iy + 1] += 1 nel *= 2 elif dim == 3: #iy, ix, iz = nm.where(voxels.transpose(1, 0, 2)) iy, ix, iz = nm.where(voxels) nel = ix.shape[0] if etype == 'q': nodemtx[ix,iy,iz] += 1 nodemtx[ix + 1,iy,iz] += 1 nodemtx[ix + 1,iy + 1,iz] += 1 nodemtx[ix,iy + 1,iz] += 1 nodemtx[ix,iy,iz + 1] += 1 nodemtx[ix + 1,iy,iz + 1] += 1 nodemtx[ix + 1,iy + 1,iz + 1] += 1 nodemtx[ix,iy + 1,iz + 1] += 1 elif etype == 't': nodemtx[ix,iy,iz] += 6 nodemtx[ix + 1,iy,iz] += 2 nodemtx[ix + 1,iy + 1,iz] += 2 nodemtx[ix,iy + 1,iz] += 2 nodemtx[ix,iy,iz + 1] += 2 nodemtx[ix + 1,iy,iz + 1] += 2 nodemtx[ix + 1,iy + 1,iz + 1] += 6 nodemtx[ix,iy + 1,iz + 1] += 2 nel *= 6 else: msg = 'incorrect voxel dimension! (%d)' % dim raise ValueError(msg) ndidx = nm.where(nodemtx) coors = nm.array(ndidx).transpose() * dims nnod = coors.shape[0] nodeid = -nm.ones(nddims, dtype=nm.int32) nodeid[ndidx] = nm.arange(nnod) # generate elements if dim == 2: elems = nm.array([nodeid[ix,iy], nodeid[ix + 1,iy], nodeid[ix + 1,iy + 1], nodeid[ix,iy + 1]]).transpose() elif dim == 3: elems = nm.array([nodeid[ix,iy,iz], nodeid[ix + 1,iy,iz], nodeid[ix + 1,iy + 1,iz], nodeid[ix,iy + 1,iz], nodeid[ix,iy,iz + 1], nodeid[ix + 1,iy,iz + 1], nodeid[ix + 1,iy + 1,iz + 1], nodeid[ix,iy + 1,iz + 1]]).transpose() if etype == 't': elems = elems_q2t(elems) eid = etype + str(dim) eltab = {'q2': 4, 'q3': 8, 't2': 3, 't3': 4} mesh = Mesh.from_data('voxel_data', coors, nm.ones((nnod,), dtype=nm.int32), [nm.ascontiguousarray(elems)], [nm.ones((nel,), dtype=nm.int32)], ['%d_%d' % (dim, eltab[eid])]) return mesh def main(): mesh = gen_block_mesh(nm.array((1.0, 2.0, 3.0)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), name='') mesh.write('0.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=False, open_angle = 0.0, name='') mesh.write('1.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.0, name='') mesh.write('2.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.5, name='') mesh.write('3.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 2.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=False, open_angle = 0.0, name='') mesh.write('4.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.5, name='') mesh.write('5.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.5, non_uniform=True, name='') mesh.write('6.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((0.5, 0.5, 1.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.5, non_uniform=True, name='') mesh.write('7.mesh', io = 'auto') if __name__ == '__main__': main()
lokik/sfepy
sfepy/mesh/mesh_generators.py
Python
bsd-3-clause
27,183
[ "VTK" ]
cb0bcb77fbb630bf61b2973dd52e57197b3a90194e0432e93a8214f78ea58fe8
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2021 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # import collections def sapt_psivars(): """Returns dictionary of PsiVariable definitions. """ pv1 = collections.OrderedDict() pv1['SAPT EXCHSCAL1'] = {'func': lambda x: 1.0 if x[0] < 1.0e-5 else x[0] / x[1], 'args': ['SAPT EXCH10 ENERGY', 'SAPT EXCH10(S^2) ENERGY']} # special treatment in pandas pv1['SAPT EXCHSCAL3'] = {'func': lambda x: x[0] ** 3, 'args': ['SAPT EXCHSCAL1']} pv1['SAPT EXCHSCAL'] = {'func': lambda x: x[0] ** x[1], 'args': ['SAPT EXCHSCAL1', 'SAPT ALPHA']} pv1['SAPT HF(2) ALPHA=0.0 ENERGY'] = {'func': lambda x: x[0] - (x[1] + x[2] + x[3] + x[4]), 'args': ['SAPT HF TOTAL ENERGY', 'SAPT ELST10,R ENERGY', 'SAPT EXCH10 ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY']} pv1['SAPT HF(2),U ALPHA=0.0 ENERGY'] = {'func': lambda x: x[0] - (x[1] + x[2] + x[3] + x[4]), 'args': ['SAPT HF TOTAL ENERGY', 'SAPT ELST10,R ENERGY', 'SAPT EXCH10 ENERGY', 'SAPT IND20,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']} pv1['SAPT HF(2) ENERGY'] = {'func': lambda x: x[1] + (1.0 - x[0]) * x[2], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ALPHA=0.0 ENERGY', 'SAPT EXCH-IND20,R ENERGY']} pv1['SAPT HF(2),U ENERGY'] = {'func': lambda x: x[1] + (1.0 - x[0]) * x[2], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2),U ALPHA=0.0 ENERGY', 'SAPT EXCH-IND20,U ENERGY']} pv1['SAPT HF(3) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[0] * x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND30,R ENERGY', 'SAPT EXCH-IND30,R ENERGY']} pv1['SAPT MP2(2) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[3] + x[4] + x[0] * (x[5] + x[6] + x[7] + x[8])), 'args': ['SAPT EXCHSCAL', 'SAPT MP2 CORRELATION ENERGY', 'SAPT ELST12,R ENERGY', # MP2 CORRELATION ENERGY renamed here from pandas since this is IE # renamed again SA --> SAPT 'SAPT IND22 ENERGY', 'SAPT DISP20 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY', 'SAPT EXCH-IND22 ENERGY', 'SAPT EXCH-DISP20 ENERGY']} pv1['SAPT MP2(3) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[0] * x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT MP2(2) ENERGY', 'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']} pv1['SAPT MP4 DISP'] = {'func': lambda x: x[0] * x[1] + x[2] + x[3] + x[4] + x[5], 'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY', 'SAPT DISP21 ENERGY', 'SAPT DISP22(SDQ) ENERGY', 'SAPT EST.DISP22(T) ENERGY']} pv1['SAPT CCD DISP'] = {'func': lambda x: x[0] * x[1] + x[2] + x[3] + x[4], 'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP2(CCD) ENERGY', 'SAPT DISP22(S)(CCD) ENERGY', 'SAPT EST.DISP22(T)(CCD) ENERGY']} pv1['SAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY']} pv1['SAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT EXCH10 ENERGY']} pv1['SAPT0 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY']} pv1['SAPT0 IND,U ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2),U ENERGY', 'SAPT IND20,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']} pv1['SAPT0 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2], 'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} pv1['SAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY', 'SAPT0 EXCH ENERGY', 'SAPT0 IND ENERGY', 'SAPT0 DISP ENERGY']} pv1['SSAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY']} pv1['SSAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT0 EXCH ENERGY']} pv1['SSAPT0 IND ENERGY'] = {'func': lambda x: x[1] + (x[0] - 1.0) * x[2], 'args': ['SAPT EXCHSCAL3', 'SAPT0 IND ENERGY', 'SAPT EXCH-IND20,R ENERGY']} pv1['SSAPT0 IND,U ENERGY'] = {'func': lambda x: x[1] + (x[0] - 1.0) * x[2], 'args': ['SAPT EXCHSCAL3', 'SAPT0 IND,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']} pv1['SSAPT0 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2], 'args': ['SAPT EXCHSCAL3', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} pv1['SSAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SSAPT0 ELST ENERGY', 'SSAPT0 EXCH ENERGY', 'SSAPT0 IND ENERGY', 'SSAPT0 DISP ENERGY']} pv1['SCS-SAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY']} pv1['SCS-SAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT0 EXCH ENERGY']} pv1['SCS-SAPT0 IND ENERGY'] = {'func': sum, 'args': ['SAPT0 IND ENERGY']} pv1['SCS-SAPT0 IND,U ENERGY'] = {'func': sum, 'args': ['SAPT0 IND,U ENERGY']} pv1['SCS-SAPT0 DISP ENERGY'] = {'func': lambda x: (x[0] - x[3]) * (x[1] + x[2]) + x[3] * (x[4] + x[5]), 'args': [0.66, 'SAPT SAME-SPIN EXCH-DISP20 ENERGY', 'SAPT SAME-SPIN DISP20 ENERGY', 1.2, 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} # note no xs for SCS disp pv1['SCS-SAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SCS-SAPT0 ELST ENERGY', 'SCS-SAPT0 EXCH ENERGY', 'SCS-SAPT0 IND ENERGY', 'SCS-SAPT0 DISP ENERGY']} pv1['SAPT2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY']} pv1['SAPT2 EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']} pv1['SAPT2 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY', 'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']} pv1['SAPT2 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2], 'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} pv1['SAPT2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2 ELST ENERGY', 'SAPT2 EXCH ENERGY', 'SAPT2 IND ENERGY', 'SAPT2 DISP ENERGY']} pv1['SAPT2+ ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY']} pv1['SAPT2+ EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']} pv1['SAPT2+ IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY', 'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']} pv1['SAPT2+ DISP ENERGY'] = {'func': sum, 'args': ['SAPT MP4 DISP']} pv1['SAPT2+ TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY', 'SAPT2+ EXCH ENERGY', 'SAPT2+ IND ENERGY', 'SAPT2+ DISP ENERGY']} pv1['SAPT2+(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']} pv1['SAPT2+(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']} pv1['SAPT2+(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+ IND ENERGY']} pv1['SAPT2+(CCD) DISP ENERGY'] = {'func': sum, 'args': ['SAPT CCD DISP']} pv1['SAPT2+(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD) ELST ENERGY', 'SAPT2+(CCD) EXCH ENERGY', 'SAPT2+(CCD) IND ENERGY', 'SAPT2+(CCD) DISP ENERGY']} pv1['SAPT2+DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']} pv1['SAPT2+DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']} pv1['SAPT2+DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+ IND ENERGY', 'SAPT MP2(2) ENERGY']} pv1['SAPT2+DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+ DISP ENERGY']} pv1['SAPT2+DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+DMP2 ELST ENERGY', 'SAPT2+DMP2 EXCH ENERGY', 'SAPT2+DMP2 IND ENERGY', 'SAPT2+DMP2 DISP ENERGY']} pv1['SAPT2+(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']} pv1['SAPT2+(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']} pv1['SAPT2+(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+DMP2 IND ENERGY']} pv1['SAPT2+(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD) DISP ENERGY']} pv1['SAPT2+(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD)DMP2 ELST ENERGY', 'SAPT2+(CCD)DMP2 EXCH ENERGY', 'SAPT2+(CCD)DMP2 IND ENERGY', 'SAPT2+(CCD)DMP2 DISP ENERGY']} pv1['SAPT2+(3) ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY', 'SAPT ELST13,R ENERGY']} pv1['SAPT2+(3) EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']} pv1['SAPT2+(3) IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY', 'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']} pv1['SAPT2+(3) DISP ENERGY'] = {'func': sum, 'args': ['SAPT MP4 DISP', 'SAPT DISP30 ENERGY']} pv1['SAPT2+(3) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY', 'SAPT2+(3) EXCH ENERGY', 'SAPT2+(3) IND ENERGY', 'SAPT2+(3) DISP ENERGY']} pv1['SAPT2+(3)(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']} pv1['SAPT2+(3)(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']} pv1['SAPT2+(3)(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) IND ENERGY']} pv1['SAPT2+(3)(CCD) DISP ENERGY'] = {'func': sum, 'args': ['SAPT CCD DISP', 'SAPT DISP30 ENERGY']} pv1['SAPT2+(3)(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD) ELST ENERGY', 'SAPT2+(3)(CCD) EXCH ENERGY', 'SAPT2+(3)(CCD) IND ENERGY', 'SAPT2+(3)(CCD) DISP ENERGY']} pv1['SAPT2+(3)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']} pv1['SAPT2+(3)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']} pv1['SAPT2+(3)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) IND ENERGY', 'SAPT MP2(2) ENERGY']} pv1['SAPT2+(3)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) DISP ENERGY']} pv1['SAPT2+(3)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)DMP2 ELST ENERGY', 'SAPT2+(3)DMP2 EXCH ENERGY', 'SAPT2+(3)DMP2 IND ENERGY', 'SAPT2+(3)DMP2 DISP ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)DMP2 IND ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD) DISP ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD)DMP2 ELST ENERGY', 'SAPT2+(3)(CCD)DMP2 EXCH ENERGY', 'SAPT2+(3)(CCD)DMP2 IND ENERGY', 'SAPT2+(3)(CCD)DMP2 DISP ENERGY']} pv1['SAPT2+3 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY', 'SAPT ELST13,R ENERGY']} pv1['SAPT2+3 EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']} pv1['SAPT2+3 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5] + x[6] + x[0] * x[7], 'args': ['SAPT EXCHSCAL', 'SAPT HF(3) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY', 'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY', 'SAPT IND30,R ENERGY', 'SAPT EXCH-IND30,R ENERGY']} pv1['SAPT2+3 DISP ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT MP4 DISP', 'SAPT DISP30 ENERGY', 'SAPT EXCH-DISP30 ENERGY', 'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']} pv1['SAPT2+3 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY', 'SAPT2+3 EXCH ENERGY', 'SAPT2+3 IND ENERGY', 'SAPT2+3 DISP ENERGY']} pv1['SAPT2+3(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']} pv1['SAPT2+3(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']} pv1['SAPT2+3(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3 IND ENERGY']} pv1['SAPT2+3(CCD) DISP ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT CCD DISP', 'SAPT DISP30 ENERGY', 'SAPT EXCH-DISP30 ENERGY', 'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']} pv1['SAPT2+3(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD) ELST ENERGY', 'SAPT2+3(CCD) EXCH ENERGY', 'SAPT2+3(CCD) IND ENERGY', 'SAPT2+3(CCD) DISP ENERGY']} pv1['SAPT2+3DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']} pv1['SAPT2+3DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']} pv1['SAPT2+3DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3 IND ENERGY', 'SAPT MP2(3) ENERGY']} pv1['SAPT2+3DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+3 DISP ENERGY']} pv1['SAPT2+3DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3DMP2 ELST ENERGY', 'SAPT2+3DMP2 EXCH ENERGY', 'SAPT2+3DMP2 IND ENERGY', 'SAPT2+3DMP2 DISP ENERGY']} pv1['SAPT2+3(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']} pv1['SAPT2+3(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']} pv1['SAPT2+3(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3DMP2 IND ENERGY']} pv1['SAPT2+3(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD) DISP ENERGY']} pv1['SAPT2+3(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD)DMP2 ELST ENERGY', 'SAPT2+3(CCD)DMP2 EXCH ENERGY', 'SAPT2+3(CCD)DMP2 IND ENERGY', 'SAPT2+3(CCD)DMP2 DISP ENERGY']} return pv1
ashutoshvt/psi4
psi4/driver/qcdb/psivardefs.py
Python
lgpl-3.0
15,690
[ "Psi4" ]
0e55901597e7cea5da747bd8c4aba85fddad6c8b06238dbe80755aa61d466959
#!/usr/bin/env python # # $File: IdTagger.py $ # # This file is part of simuPOP, a forward-time population genetics # simulation environment. Please visit http://simupop.sourceforge.net # for details. # # Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # This script is an example in the simuPOP user's guide. Please refer to # the user's guide (http://simupop.sourceforge.net/manual) for a detailed # description of this example. # import simuPOP as sim pop = sim.Population(10, infoFields='ind_id', ancGen=1) pop.evolve( initOps=sim.IdTagger(), matingScheme=sim.RandomSelection(ops=[ sim.CloneGenoTransmitter(), sim.IdTagger(), ]), gen = 1 ) print([int(ind.ind_id) for ind in pop.individuals()]) pop.useAncestralGen(1) print([int(ind.ind_id) for ind in pop.individuals()]) sim.tagID(pop) # re-assign ID print([int(ind.ind_id) for ind in pop.individuals()])
BoPeng/simuPOP
docs/IdTagger.py
Python
gpl-2.0
1,541
[ "VisIt" ]
f29f239c21beca38717930fe5d3d79be0ce67a1937d72ceb9c36d745c63b0799
import os, copy from bashlex import yacc, tokenizer, state, ast, subst, flags, errors, heredoc def _partsspan(parts): return parts[0].pos[0], parts[-1].pos[1] tokens = [e.name for e in tokenizer.tokentype] precedence = ( ('left', 'AMPERSAND', 'SEMICOLON', 'NEWLINE', 'EOF'), ('left', 'AND_AND', 'OR_OR'), ('right', 'BAR', 'BAR_AND') ) def p_inputunit(p): '''inputunit : simple_list simple_list_terminator | NEWLINE | error NEWLINE | EOF''' # XXX if p.lexer._parserstate & flags.parser.CMDSUBST: p.lexer._parserstate.add(flags.parser.EOFTOKEN) if isinstance(p[1], ast.node): p[0] = p[1] # accept right here in case the input contains more lines that are # not part of the current command p.accept() def p_word_list(p): '''word_list : WORD | word_list WORD''' parserobj = p.context if len(p) == 2: p[0] = [_expandword(parserobj, p.slice[1])] else: p[0] = p[1] p[0].append(_expandword(parserobj, p.slice[2])) def p_redirection_heredoc(p): '''redirection : LESS_LESS WORD | NUMBER LESS_LESS WORD | REDIR_WORD LESS_LESS WORD | LESS_LESS_MINUS WORD | NUMBER LESS_LESS_MINUS WORD | REDIR_WORD LESS_LESS_MINUS WORD''' parserobj = p.context assert isinstance(parserobj, _parser) output = ast.node(kind='word', word=p[len(p)-1], parts=[], pos=p.lexspan(len(p)-1)) if len(p) == 3: p[0] = ast.node(kind='redirect', input=None, type=p[1], heredoc=None, output=output, pos=(p.lexpos(1), p.endlexpos(2))) else: p[0] = ast.node(kind='redirect', input=p[1], type=p[2], heredoc=None, output=output, pos=(p.lexpos(1), p.endlexpos(3))) if p.slice[len(p)-2].ttype == tokenizer.tokentype.LESS_LESS: parserobj.redirstack.append((p[0], False)) else: parserobj.redirstack.append((p[0], True)) def p_redirection(p): '''redirection : GREATER WORD | LESS WORD | NUMBER GREATER WORD | NUMBER LESS WORD | REDIR_WORD GREATER WORD | REDIR_WORD LESS WORD | GREATER_GREATER WORD | NUMBER GREATER_GREATER WORD | REDIR_WORD GREATER_GREATER WORD | GREATER_BAR WORD | NUMBER GREATER_BAR WORD | REDIR_WORD GREATER_BAR WORD | LESS_GREATER WORD | NUMBER LESS_GREATER WORD | REDIR_WORD LESS_GREATER WORD | LESS_LESS_LESS WORD | NUMBER LESS_LESS_LESS WORD | REDIR_WORD LESS_LESS_LESS WORD | LESS_AND NUMBER | NUMBER LESS_AND NUMBER | REDIR_WORD LESS_AND NUMBER | GREATER_AND NUMBER | NUMBER GREATER_AND NUMBER | REDIR_WORD GREATER_AND NUMBER | LESS_AND WORD | NUMBER LESS_AND WORD | REDIR_WORD LESS_AND WORD | GREATER_AND WORD | NUMBER GREATER_AND WORD | REDIR_WORD GREATER_AND WORD | GREATER_AND DASH | NUMBER GREATER_AND DASH | REDIR_WORD GREATER_AND DASH | LESS_AND DASH | NUMBER LESS_AND DASH | REDIR_WORD LESS_AND DASH | AND_GREATER WORD | AND_GREATER_GREATER WORD''' parserobj = p.context if len(p) == 3: output = p[2] if p.slice[2].ttype == tokenizer.tokentype.WORD: output = _expandword(parserobj, p.slice[2]) p[0] = ast.node(kind='redirect', input=None, type=p[1], heredoc=None, output=output, pos=(p.lexpos(1), p.endlexpos(2))) else: output = p[3] if p.slice[3].ttype == tokenizer.tokentype.WORD: output = _expandword(parserobj, p.slice[3]) p[0] = ast.node(kind='redirect', input=p[1], type=p[2], heredoc=None, output=output, pos=(p.lexpos(1), p.endlexpos(3))) def _expandword(parser, tokenword): if parser._expansionlimit == -1: # we enter this branch in the following conditions: # - currently parsing a substitution as a result of an expansion # - the previous expansion had limit == 0 # # this means that this node is a descendant of a substitution in an # unexpanded word and will be filtered in the limit == 0 condition below # # (the reason we even expand when limit == 0 is to get quote removal) node = ast.node(kind='word', word=tokenword, pos=(tokenword.lexpos, tokenword.endlexpos), parts=[]) return node else: quoted = bool(tokenword.flags & flags.word.QUOTED) doublequoted = quoted and tokenword.value[0] == '"' # TODO set qheredocument parts, expandedword = subst._expandwordinternal(parser, tokenword, 0, doublequoted, 0, 0) # limit reached, don't include substitutions (still expanded to get # quote removal though) if parser._expansionlimit == 0: parts = [node for node in parts if 'substitution' not in node.kind] node = ast.node(kind='word', word=expandedword, pos=(tokenword.lexpos, tokenword.endlexpos), parts=parts) return node def p_simple_command_element(p): '''simple_command_element : WORD | ASSIGNMENT_WORD | redirection''' if isinstance(p[1], ast.node): p[0] = [p[1]] return parserobj = p.context p[0] = [_expandword(parserobj, p.slice[1])] # change the word node to an assignment if necessary if p.slice[1].ttype == tokenizer.tokentype.ASSIGNMENT_WORD: p[0][0].kind = 'assignment' def p_redirection_list(p): '''redirection_list : redirection | redirection_list redirection''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[0].append(p[2]) def p_simple_command(p): '''simple_command : simple_command_element | simple_command simple_command_element''' p[0] = p[1] if len(p) == 3: p[0].extend(p[2]) def p_command(p): '''command : simple_command | shell_command | shell_command redirection_list | function_def | coproc''' if isinstance(p[1], ast.node): p[0] = p[1] if len(p) == 3: assert p[0].kind == 'compound' p[0].redirects.extend(p[2]) assert p[0].pos[0] < p[0].redirects[-1].pos[1] p[0].pos = (p[0].pos[0], p[0].redirects[-1].pos[1]) else: p[0] = ast.node(kind='command', parts=p[1], pos=_partsspan(p[1])) def p_shell_command(p): '''shell_command : for_command | case_command | WHILE compound_list DO compound_list DONE | UNTIL compound_list DO compound_list DONE | select_command | if_command | subshell | group_command | arith_command | cond_command | arith_for_command''' if len(p) == 2: p[0] = p[1] else: # while or until assert p[2].kind == 'list' parts = _makeparts(p) kind = parts[0].word assert kind in ('while', 'until') p[0] = ast.node(kind='compound', redirects=[], list=[ast.node(kind=kind, parts=parts, pos=_partsspan(parts))], pos=_partsspan(parts)) assert p[0].kind == 'compound' def _makeparts(p): parts = [] for i in range(1, len(p)): if isinstance(p[i], ast.node): parts.append(p[i]) elif isinstance(p[i], list): parts.extend(p[i]) elif isinstance(p.slice[i], tokenizer.token): if p.slice[i].ttype == tokenizer.tokentype.WORD: parserobj = p.context parts.append(_expandword(parserobj, p.slice[i])) else: parts.append(ast.node(kind='reservedword', word=p[i], pos=p.lexspan(i))) else: pass return parts def p_for_command(p): '''for_command : FOR WORD newline_list DO compound_list DONE | FOR WORD newline_list LEFT_CURLY compound_list RIGHT_CURLY | FOR WORD SEMICOLON newline_list DO compound_list DONE | FOR WORD SEMICOLON newline_list LEFT_CURLY compound_list RIGHT_CURLY | FOR WORD newline_list IN word_list list_terminator newline_list DO compound_list DONE | FOR WORD newline_list IN word_list list_terminator newline_list LEFT_CURLY compound_list RIGHT_CURLY | FOR WORD newline_list IN list_terminator newline_list DO compound_list DONE | FOR WORD newline_list IN list_terminator newline_list LEFT_CURLY compound_list RIGHT_CURLY''' parts = _makeparts(p) # find the operatornode that we might have there due to # list_terminator/newline_list and convert it to a reservedword so its # considered as part of the for loop for i, part in enumerate(parts): if part.kind == 'operator' and part.op == ';': parts[i] = ast.node(kind='reservedword', word=';', pos=part.pos) break # there could be only one in there... p[0] = ast.node(kind='compound', redirects=[], list=[ast.node(kind='for', parts=parts, pos=_partsspan(parts))], pos=_partsspan(parts)) def p_arith_for_command(p): '''arith_for_command : FOR ARITH_FOR_EXPRS list_terminator newline_list DO compound_list DONE | FOR ARITH_FOR_EXPRS list_terminator newline_list LEFT_CURLY compound_list RIGHT_CURLY | FOR ARITH_FOR_EXPRS DO compound_list DONE | FOR ARITH_FOR_EXPRS LEFT_CURLY compound_list RIGHT_CURLY''' raise NotImplementedError('arithmetic for') def p_select_command(p): '''select_command : SELECT WORD newline_list DO list DONE | SELECT WORD newline_list LEFT_CURLY list RIGHT_CURLY | SELECT WORD SEMICOLON newline_list DO list DONE | SELECT WORD SEMICOLON newline_list LEFT_CURLY list RIGHT_CURLY | SELECT WORD newline_list IN word_list list_terminator newline_list DO list DONE | SELECT WORD newline_list IN word_list list_terminator newline_list LEFT_CURLY list RIGHT_CURLY''' raise NotImplementedError('select command') def p_case_command(p): '''case_command : CASE WORD newline_list IN newline_list ESAC | CASE WORD newline_list IN case_clause_sequence newline_list ESAC | CASE WORD newline_list IN case_clause ESAC''' raise NotImplementedError ('case command') def p_function_def(p): '''function_def : WORD LEFT_PAREN RIGHT_PAREN newline_list function_body | FUNCTION WORD LEFT_PAREN RIGHT_PAREN newline_list function_body | FUNCTION WORD newline_list function_body''' parts = _makeparts(p) body = parts[-1] name = parts[ast.findfirstkind(parts, 'word')] p[0] = ast.node(kind='function', name=name, body=body, parts=parts, pos=_partsspan(parts)) def p_function_body(p): '''function_body : shell_command | shell_command redirection_list''' assert p[1].kind == 'compound' p[0] = p[1] if len(p) == 3: p[0].redirects.extend(p[2]) assert p[0].pos[0] < p[0].redirects[-1].pos[1] p[0].pos = (p[0].pos[0], p[0].redirects[-1].pos[1]) def p_subshell(p): '''subshell : LEFT_PAREN compound_list RIGHT_PAREN''' lparen = ast.node(kind='reservedword', word=p[1], pos=p.lexspan(1)) rparen = ast.node(kind='reservedword', word=p[3], pos=p.lexspan(3)) parts = [lparen, p[2], rparen] p[0] = ast.node(kind='compound', list=parts, redirects=[], pos=_partsspan(parts)) def p_coproc(p): '''coproc : COPROC shell_command | COPROC shell_command redirection_list | COPROC WORD shell_command | COPROC WORD shell_command redirection_list | COPROC simple_command''' raise NotImplementedError('coproc') def p_if_command(p): '''if_command : IF compound_list THEN compound_list FI | IF compound_list THEN compound_list ELSE compound_list FI | IF compound_list THEN compound_list elif_clause FI''' # we currently don't distinguish the various lists that make up the # command, because it's not needed later on. if there will be a need # we can always add different nodes for elif/else. parts = _makeparts(p) p[0] = ast.node(kind='compound', redirects=[], list=[ast.node(kind='if', parts=parts, pos=_partsspan(parts))], pos=_partsspan(parts)) def p_group_command(p): '''group_command : LEFT_CURLY compound_list RIGHT_CURLY''' lcurly = ast.node(kind='reservedword', word=p[1], pos=p.lexspan(1)) rcurly = ast.node(kind='reservedword', word=p[3], pos=p.lexspan(3)) parts = [lcurly, p[2], rcurly] p[0] = ast.node(kind='compound', list=parts, redirects=[], pos=_partsspan(parts)) def p_arith_command(p): '''arith_command : ARITH_CMD''' raise NotImplementedError('arithmetic command') def p_cond_command(p): '''cond_command : COND_START COND_CMD COND_END''' raise NotImplementedError('cond command') def p_elif_clause(p): '''elif_clause : ELIF compound_list THEN compound_list | ELIF compound_list THEN compound_list ELSE compound_list | ELIF compound_list THEN compound_list elif_clause''' parts = [] for i in range(1, len(p)): if isinstance(p[i], ast.node): parts.append(p[i]) else: parts.append(ast.node(kind='reservedword', word=p[i], pos=p.lexspan(i))) p[0] = parts def p_case_clause(p): '''case_clause : pattern_list | case_clause_sequence pattern_list''' raise NotImplementedError('case clause') def p_pattern_list(p): '''pattern_list : newline_list pattern RIGHT_PAREN compound_list | newline_list pattern RIGHT_PAREN newline_list | newline_list LEFT_PAREN pattern RIGHT_PAREN compound_list | newline_list LEFT_PAREN pattern RIGHT_PAREN newline_list''' raise NotImplementedError('pattern list') def p_case_clause_sequence(p): '''case_clause_sequence : pattern_list SEMI_SEMI | case_clause_sequence pattern_list SEMI_SEMI | pattern_list SEMI_AND | case_clause_sequence pattern_list SEMI_AND | pattern_list SEMI_SEMI_AND | case_clause_sequence pattern_list SEMI_SEMI_AND''' raise NotImplementedError('case clause') def p_pattern(p): '''pattern : WORD | pattern BAR WORD''' raise NotImplementedError('pattern') def p_list(p): '''list : newline_list list0''' p[0] = p[2] def p_compound_list(p): '''compound_list : list | newline_list list1''' if len(p) == 2: p[0] = p[1] else: parts = p[2] if len(parts) > 1: p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts)) else: p[0] = parts[0] def p_list0(p): '''list0 : list1 NEWLINE newline_list | list1 AMPERSAND newline_list | list1 SEMICOLON newline_list''' parts = p[1] if len(parts) > 1 or p.slice[2].ttype != tokenizer.tokentype.NEWLINE: parts.append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts)) else: p[0] = parts[0] def p_list1(p): '''list1 : list1 AND_AND newline_list list1 | list1 OR_OR newline_list list1 | list1 AMPERSAND newline_list list1 | list1 SEMICOLON newline_list list1 | list1 NEWLINE newline_list list1 | pipeline_command''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] # XXX newline p[0].append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0].extend(p[len(p) - 1]) def p_simple_list_terminator(p): '''simple_list_terminator : NEWLINE | EOF''' pass def p_list_terminator(p): '''list_terminator : NEWLINE | SEMICOLON | EOF''' if p[1] == ';': p[0] = ast.node(kind='operator', op=';', pos=p.lexspan(1)) def p_newline_list(p): '''newline_list : empty | newline_list NEWLINE''' pass def p_simple_list(p): '''simple_list : simple_list1 | simple_list1 AMPERSAND | simple_list1 SEMICOLON''' tok = p.lexer heredoc.gatherheredocuments(tok) if len(p) == 3 or len(p[1]) > 1: parts = p[1] if len(p) == 3: parts.append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts)) else: assert len(p[1]) == 1 p[0] = p[1][0] if (len(p) == 2 and p.lexer._parserstate & flags.parser.CMDSUBST and p.lexer._current_token.nopos() == p.lexer._shell_eof_token): # accept the input p.accept() def p_simple_list1(p): '''simple_list1 : simple_list1 AND_AND newline_list simple_list1 | simple_list1 OR_OR newline_list simple_list1 | simple_list1 AMPERSAND simple_list1 | simple_list1 SEMICOLON simple_list1 | pipeline_command''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[0].append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0].extend(p[len(p) - 1]) def p_pipeline_command(p): '''pipeline_command : pipeline | BANG pipeline_command | timespec pipeline_command | timespec list_terminator | BANG list_terminator''' if len(p) == 2: if len(p[1]) == 1: p[0] = p[1][0] else: p[0] = ast.node(kind='pipeline', parts=p[1], pos=(p[1][0].pos[0], p[1][-1].pos[1])) else: # XXX timespec node = ast.node(kind='reservedword', word='!', pos=p.lexspan(1)) if p[2].kind == 'pipeline': p[0] = p[2] p[0].parts.insert(0, node) p[0].pos = (p[0].parts[0].pos[0], p[0].parts[-1].pos[1]) else: p[0] = ast.node(kind='pipeline', parts=[node, p[2]], pos=(node.pos[0], p[2].pos[1])) def p_pipeline(p): '''pipeline : pipeline BAR newline_list pipeline | pipeline BAR_AND newline_list pipeline | command''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[0].append(ast.node(kind='pipe', pipe=p[2], pos=p.lexspan(2))) p[0].extend(p[len(p) - 1]) def p_timespec(p): '''timespec : TIME | TIME TIMEOPT | TIME TIMEOPT TIMEIGN''' raise NotImplementedError('time command') def p_empty(p): '''empty :''' pass def p_error(p): assert isinstance(p, tokenizer.token) if p.ttype == tokenizer.tokentype.EOF: raise errors.ParsingError('unexpected EOF', p.lexer.source, len(p.lexer.source)) else: raise errors.ParsingError('unexpected token %r' % p.value, p.lexer.source, p.lexpos) yaccparser = yacc.yacc(tabmodule='bashlex.parsetab', outputdir=os.path.dirname(__file__), debug=False) # some hack to fix yacc's reduction on command substitutions yaccparser.action[45]['RIGHT_PAREN'] = -155 yaccparser.action[11]['RIGHT_PAREN'] = -148 for tt in tokenizer.tokentype: yaccparser.action[62][tt.name] = -1 yaccparser.action[63][tt.name] = -141 def parsesingle(s, strictmode=True, expansionlimit=None, convertpos=False): '''like parse, but only consumes a single top level node, e.g. parsing 'a\nb' will only return a node for 'a', leaving b unparsed''' p = _parser(s, strictmode=strictmode, expansionlimit=expansionlimit) tree = p.parse() if convertpos: ast.posconverter(s).visit(tree) return tree def parse(s, strictmode=True, expansionlimit=None, convertpos=False): '''parse the input string, returning a list of nodes top level node kinds are: - command - a simple command - pipeline - a series of simple commands - list - a series of one or more pipelines - compound - contains constructs for { list; }, (list), if, for.. leafs are word nodes (which in turn can also contain any of the aforementioned nodes due to command substitutions). when strictmode is set to False, we will: - skip reading a heredoc if we're at the end of the input expansionlimit is used to limit the amount of recursive parsing done due to command substitutions found during word expansion. ''' p = _parser(s, strictmode=strictmode, expansionlimit=expansionlimit) parts = [p.parse()] class endfinder(ast.nodevisitor): def __init__(self): self.end = -1 def visitheredoc(self, node, value): self.end = node.pos[1] # find the 'real' end incase we have a heredoc in there ef = _endfinder() ef.visit(parts[-1]) index = max(parts[-1].pos[1], ef.end) + 1 while index < len(s): part = _parser(s[index:], strictmode=strictmode).parse() if not isinstance(part, ast.node): break ast.posshifter(index).visit(part) parts.append(part) ef = _endfinder() ef.visit(parts[-1]) index = max(parts[-1].pos[1], ef.end) + 1 if convertpos: for tree in parts: ast.posconverter(s).visit(tree) return parts class _parser(object): ''' this class is mainly used to provide context to the productions when we're in the middle of parsing. as a hack, we shove it into the YaccProduction context attribute to make it accessible. ''' def __init__(self, s, strictmode=True, expansionlimit=None, tokenizerargs=None): assert expansionlimit is None or isinstance(expansionlimit, int) self.s = s self._strictmode = strictmode self._expansionlimit = expansionlimit if tokenizerargs is None: tokenizerargs = {} self.parserstate = tokenizerargs.pop('parserstate', state.parserstate()) self.tok = tokenizer.tokenizer(s, parserstate=self.parserstate, strictmode=strictmode, **tokenizerargs) self.redirstack = self.tok.redirstack def parse(self): # yacc.yacc returns a parser object that is not reentrant, it has # some mutable state. we make a shallow copy of it so no # state spills over to the next call to parse on it theparser = copy.copy(yaccparser) tree = theparser.parse(lexer=self.tok, context=self) return tree class _endfinder(ast.nodevisitor): '''helper class to find the "real" end pos of a node that contains a heredoc. this is a hack because heredoc aren't really part of any node since they don't always follow the end of a node and might appear on a different line''' def __init__(self): self.end = -1 def visitheredoc(self, node, value): self.end = node.pos[1]
vikasgorur/bashlex
bashlex/parser.py
Python
gpl-3.0
24,684
[ "VisIt" ]
4dec7475af35c13a79a18200090d72c376d739e1048b0771f93e952d2a7f4772
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This is the "deploy" module for the validate_bom script. It is responsible for deploying spinnaker (via Halyard) remotely. """ from multiprocessing.pool import ThreadPool import json import logging import os import shutil import stat import subprocess import sys import tempfile import time import traceback from buildtool import ( add_parser_argument, check_subprocess, check_subprocess_sequence, check_subprocesses_to_logfile, scan_logs_for_install_errors, run_subprocess, write_to_path, raise_and_log_error, ConfigError, ExecutionError, ResponseError, TimeoutError, UnexpectedError) SUPPORTED_DEPLOYMENT_TYPES = ['localdebian', 'distributed'] SUPPORTED_DISTRIBUTED_PLATFORMS = ['kubernetes', 'kubernetes_v2'] HALYARD_SERVICES = ['halyard'] SPINNAKER_SERVICES = [ 'clouddriver', 'echo', 'fiat', 'front50', 'gate', 'igor', 'orca', 'rosco', 'kayenta', 'monitoring' ] def decode_json(data): try: return json.JSONDecoder().decode(data) except (ValueError, TypeError) as err: logging.error('Error decoding JSON: %s\n%s\n', err.message, data) raise def replace_ha_services(services, options): """Replace services with their HA services. Given a list of services and options, return a new list of services where services that are enabled for HA are replaced with their HA counterparts. """ transform_map = {} if options.ha_clouddriver_enabled: transform_map['clouddriver'] = \ ['clouddriver-caching', 'clouddriver-rw', 'clouddriver-ro', 'clouddriver-ro-deck'] if options.ha_echo_enabled: transform_map['echo'] = \ ['echo-scheduler', 'echo-worker'] transformed_services = [] for service in services: transformed_services.extend(transform_map.get(service, [service])) return transformed_services def ensure_empty_ssh_key(path, user): """Ensure there is an ssh key at the given path. It is assumed that this key has no password associated with it so we can use it for ssh/scp. """ if os.path.exists(path): return logging.debug('Creating %s SSH key for user "%s"', path, user) check_subprocess_sequence([ 'ssh-keygen -N "" -t rsa -f {path} -C {user}'.format( path=path, user=user), 'sed "s/^ssh-rsa/{user}:ssh-rsa/" -i {path}'.format( user=user, path=path) ]) def write_data_to_secure_path(data, path=None, is_script=False): """Write data to a path with user-only access. Args: path: [string] Path to file or None to create a temporary file. is_script: [bool] True if data is a script (and should be executable). Returns: path to file written. """ # pylint: disable=invalid-name if path is None: fd, path = tempfile.mkstemp() else: fd = os.open(path, os.O_WRONLY | os.O_CREAT) maybe_executable = stat.S_IXUSR if is_script else 0 flags = stat.S_IRUSR | stat.S_IWUSR | maybe_executable os.fchmod(fd, flags) os.write(fd, data.encode('utf-8')) os.close(fd) return path def write_script_to_path(script, path=None): """Write the script to a path as a secure, user-only executable file. Args: script: [list] Sequence of bash statements to script. path: [string] Path to file to write, or None to create a temp file. Returns: path written """ data = ['#!/bin/bash', 'set -e', 'set -x'] data.extend(script) return write_data_to_secure_path( '\n'.join(data), path=path, is_script=True) class BaseValidateBomDeployer(object): """Base class/interface for Deployer that uses Halyard to deploy Spinnaker. This class is not intended to be constructed directly. Instead see the free function make_deployer() in this module. """ @property def options(self): """The options bound at construction.""" return self.__options @property def metrics(self): """The metrics regisry bound at construction.""" return self.__metrics @property def hal_user(self): """Returns the Halyard User within the deployment VM.""" return self.__hal_user def __init__(self, options, metrics, runtime_class=None): if runtime_class: self.__spinnaker_deployer = runtime_class(options, metrics) else: self.__spinnaker_deployer = self self.__options = options self.__metrics = metrics self.__hal_user = options.deploy_hal_user logging.info('hal_user="%s"', self.__hal_user) def make_port_forward_command(self, service, local_port, remote_port): """Return the command used to forward ports to the given service. Returns: array of commandline arguments to create a subprocess with. """ return self.__spinnaker_deployer.do_make_port_forward_command( service, local_port, remote_port) def deploy(self, init_script, config_script, files_to_upload): """Deploy and configure spinnaker. The deployment configuration is specified via the bound options. The runtime configuration is passed to the call. Args: init_script: [list] The sequence of bash commands to run in order to prepare the host before installing halyard and configuring. config_script: [list] The sequence of bash commands to run in order to configure spinnaker. file_to_upload: [set] A set of file paths to upload to the deployed instance before running the init_script. Presumably these will be referenced by the init_script or config_script. """ deploy_labels = {} self.__metrics.track_and_time_call( 'DeploySpinnaker', deploy_labels, self.__metrics.default_determine_outcome_labels, self.__wrapped_deploy, init_script, config_script, files_to_upload) def __wrapped_deploy(self, init_script, config_script, files_to_upload): platform = self.options.deploy_hal_platform logging.info('Deploying with hal on %s...', platform) script = list(init_script) self.add_install_hal_script_statements(script) if self.options.halyard_config_bucket_credentials: files_to_upload.add(self.options.halyard_config_bucket_credentials) self.add_inject_halyard_application_default_credentials( self.options.halyard_config_bucket_credentials, script) self.add_platform_deploy_script_statements(script) # Add the version first to avoid warnings or facilitate checks # with the configuration commands script.append('hal -q --log=info config version edit' ' --version {version}' .format(version=self.options.deploy_version)) script.extend(config_script) self.add_hal_deploy_script_statements(script) # Dump the hal config so we log it for posterity script.append('hal -q --log=info config') script.append('sudo hal -q --log=info deploy apply') self.add_post_deploy_statements(script) if not self.options.deploy_deploy: logging.warning('Skipping deployment because --deploy_deploy=false\n') return self.do_deploy(script, files_to_upload) logging.info('Finished deploying to %s', platform) def undeploy(self): undeploy_labels = {} self.__metrics.track_and_time_call( 'UndeploySpinnaker', undeploy_labels, self.__metrics.default_determine_outcome_labels, self.__wrapped_undeploy) def __wrapped_undeploy(self): """Remove the spinnaker deployment and reclaim resources.""" # Consider also undeploying from options.deploy_spinnaker_platform # with self.__runtime_deployer platform = self.options.deploy_hal_platform logging.info('Undeploying hal on %s...', platform) if not self.options.deploy_undeploy: logging.warning( 'Skipping undeploy because --deploy_undeploy=false\n') return self.do_undeploy() logging.info('Finished undeploying from %s', platform) def collect_logs(self): """Collect all the microservice log files.""" log_dir = os.path.join(self.options.log_dir, 'service_logs') if not os.path.exists(log_dir): os.makedirs(log_dir) def fetch_service_log(service): try: deployer = (self if service in HALYARD_SERVICES else self.__spinnaker_deployer) deployer.do_fetch_service_log_file(service, log_dir) except Exception as ex: message = 'Error fetching log for service "{service}": {ex}'.format( service=service, ex=ex) if ex.message.find('No such file') >= 0: message += '\n Perhaps the service never started.' # dont log since the error was already captured. else: logging.error(message) message += '\n{trace}'.format( trace=traceback.format_exc()) write_data_to_secure_path( message, os.path.join(log_dir, service + '.log')) logging.info('Collecting server log files into "%s"', log_dir) all_services = replace_ha_services(SPINNAKER_SERVICES, self.options) all_services.extend(HALYARD_SERVICES) thread_pool = ThreadPool(len(all_services)) thread_pool.map(fetch_service_log, all_services) thread_pool.terminate() def do_make_port_forward_command(self, service, local_port, remote_port): """Hook for concrete platforms to return the port forwarding command. Returns: array of commandline arguments to create a subprocess with. """ raise NotImplementedError(self.__class__.__name__) def do_deploy(self, script, files_to_upload): """Hook for specialized platforms to implement the concrete deploy().""" # pylint: disable=unused-argument raise NotImplementedError(self.__class__.__name__) def do_undeploy(self): """Hook for specialized platforms to implement the concrete undeploy().""" raise NotImplementedError(self.__class__.__name__) def add_inject_halyard_application_default_credentials( self, local_path, script): """Inject google application credentials into halyards startup script. This is only so we can install halyard against a halyard test repo. We're doing this injection because halyard does not explicitly support this use case from installation, though does support the use of application default credentials. """ script.append('first=$(head -1 /opt/halyard/bin/halyard)') script.append( 'inject="export GOOGLE_APPLICATION_CREDENTIALS={path}"' .format(path='$(pwd)/' + os.path.basename(local_path))) script.append('remaining=$(tail -n +2 /opt/halyard/bin/halyard)') script.append('cat <<EOF | sudo tee /opt/halyard/bin/halyard\n' '$first\n$inject\n$remaining\n' 'EOF') script.append('sudo chmod 755 /opt/halyard/bin/halyard') # Kill running halyard so it restarts with credentials. # This method awaiting support in halyard to terminate the job. # In the meantime, we'll kill all the java processes. Since this # is run on a newly provisioned VM, it should only be halyard. script.append('echo "Using nuclear option to stop existing halyard"') script.append('killall java || true') # hack script.append('echo "Restarting halyard..."') script.append('sudo su -c "hal -v" -s /bin/bash {user}' .format(user=self.options.deploy_hal_user)) script.append('for i in `seq 1 30`; do' ' if hal --ready &> /dev/null; then break; fi;' ' sleep 1; done') def add_install_hal_script_statements(self, script): """Adds the sequence of Bash statements to fetch and install halyard.""" options = self.options script.append('curl -s -O {url}'.format(url=options.halyard_install_script)) install_params = ['-y'] if options.halyard_config_bucket: install_params.extend(['--config-bucket', options.halyard_config_bucket]) if options.halyard_bucket_base_url: install_params.extend(['--halyard-bucket-base-url', options.halyard_bucket_base_url]) if options.halyard_version: install_params.extend(['--version', options.halyard_version]) if self.hal_user: install_params.extend(['--user', self.hal_user]) if options.spinnaker_repository: install_params.extend( ['--spinnaker-repository', options.spinnaker_repository]) if options.spinnaker_registry: install_params.extend( ['--spinnaker-registry', options.spinnaker_registry]) script.append('sudo bash ./InstallHalyard.sh {install_params}' .format(install_params=' '.join(install_params))) return script def add_platform_deploy_script_statements(self, script): """Hook for deployment platform to add specific hal statements.""" pass def add_hal_deploy_script_statements(self, script): """Adds the hal deploy statements prior to "apply".""" options = self.options type_args = ['--type', options.deploy_spinnaker_type] if options.deploy_spinnaker_type == 'distributed': # Kubectl required for the next hal command, so install it if needed. script.append( 'if ! `which kubectl >& /dev/null`; then' ' curl -LO https://storage.googleapis.com/kubernetes-release/release' '/$(curl -s https://storage.googleapis.com/kubernetes-release/release' '/stable.txt)/bin/linux/amd64/kubectl' '; chmod +x ./kubectl' '; sudo mv ./kubectl /usr/local/bin/kubectl' '; fi') if options.injected_deploy_spinnaker_account: type_args.extend(['--account-name', options.injected_deploy_spinnaker_account]) if options.deploy_distributed_platform == 'kubernetes': script.append('hal -q --log=info config deploy edit --location {namespace}' .format(namespace=self.options.deploy_k8s_namespace)) elif options.deploy_distributed_platform == 'kubernetes_v2': script.append('hal -q --log=info config deploy edit --location {namespace}' .format(namespace=self.options.deploy_k8s_v2_namespace)) script.append('hal -q --log=info config deploy edit {args}' .format(args=' '.join(type_args))) def add_post_deploy_statements(self, script): """Add any statements following "hal deploy apply".""" pass class KubernetesValidateBomDeployer(BaseValidateBomDeployer): """Concrete deployer used to deploy Hal onto Google Cloud Platform. This class is not intended to be constructed directly. Instead see the free function make_deployer() in this module. """ def __init__(self, options, metrics, **kwargs): super(KubernetesValidateBomDeployer, self).__init__( options, metrics, **kwargs) @classmethod def init_platform_argument_parser(cls, parser, defaults): """Adds custom configuration parameters to argument parser. This is a helper function for the free function init_argument_parser(). """ add_parser_argument( parser, 'deploy_k8s_namespace', defaults, 'spinnaker', help='Namespace for the account Spinnaker is deployed into.') @classmethod def validate_options_helper(cls, options): """Adds custom configuration parameters to argument parser. This is a helper function for make_deployer(). """ if options.deploy_distributed_platform != 'kubernetes': return if not options.k8s_account_name: raise_and_log_error( ConfigError('--deploy_distributed_platform="kubernetes" requires' ' a --k8s_account_name be configured.')) if hasattr(options, "injected_deploy_spinnaker_account"): raise_and_log_error( UnexpectedError('deploy_spinnaker_account was already set to "{0}"' .format(options.injected_deploy_spinnaker_account))) options.injected_deploy_spinnaker_account = options.k8s_account_name def __get_pod_name(self, k8s_namespace, service): """Determine the pod name for the deployed service.""" options = self.options flags = ' --namespace {namespace} --logtostderr=false'.format( namespace=k8s_namespace) kubectl_command = 'kubectl {context} get pods {flags}'.format( context=('--context {0}'.format(options.k8s_account_context) if options.k8s_account_context else ''), flags=flags) retcode, stdout = run_subprocess( '{command}' ' | gawk -F "[[:space:]]+" "/{service}-v/ {{print \\$1}}"' ' | tail -1'.format( command=kubectl_command, service=service), shell=True) pod = stdout.strip() if not pod: message = 'There is no pod for "{service}" in {namespace}'.format( service=service, namespace=k8s_namespace) raise_and_log_error(ConfigError(message, cause='NoPod')) if retcode != 0: message = 'Could not find pod for "{service}".: {error}'.format( service=service, error=stdout.strip()) raise_and_log_error(ExecutionError(message, program='kubectl')) else: logging.debug('pod "%s" -> %s', service, stdout) return stdout.strip() def do_make_port_forward_command(self, service, local_port, remote_port): """Implements interface.""" options = self.options k8s_namespace = options.deploy_k8s_namespace service_pod = self.__get_pod_name(k8s_namespace, service) return [ 'kubectl', '--namespace', k8s_namespace, 'port-forward', service_pod, '{local}:{remote}'.format(local=local_port, remote=remote_port) ] def do_deploy(self, script, files_to_upload): """Implements the BaseBomValidateDeployer interface.""" # This is not yet supported in this script. # To deploy spinnaker to kubernetes, you need to go through # a halyard VM deployment. Halyard itself can be deployed to K8s. # This script doesnt. super(KubernetesValidateBomDeployer, self).do_deploy( script, files_to_upload) def do_undeploy(self): """Implements the BaseBomValidateDeployer interface.""" super(KubernetesValidateBomDeployer, self).do_undeploy() # kubectl delete namespace spinnaker def do_fetch_service_log_file(self, service, log_dir): """Retrieve log file for the given service's pod. Args: service: [string] The service's log to get log_dir: [string] The directory name to write the logs into. """ if service == 'monitoring': # monitoring is in a sidecar of each service return options = self.options k8s_namespace = options.deploy_k8s_namespace service_pod = self.__get_pod_name(k8s_namespace, service) containers = ['spin-' + service] if options.monitoring_install_which: containers.append('spin-monitoring-daemon') for container in containers: if container == 'spin-monitoring-daemon': path = os.path.join(log_dir, service + '_monitoring.log') else: path = os.path.join(log_dir, service + '.log') retcode, stdout = run_subprocess( 'kubectl -n {namespace} -c {container} {context} logs {pod}' .format(namespace=k8s_namespace, container=container, context=('--context {0}'.format(options.k8s_account_context) if options.k8s_account_context else ''), pod=service_pod), shell=True) write_data_to_secure_path(stdout, path) class KubernetesV2ValidateBomDeployer(BaseValidateBomDeployer): """Concrete deployer used to deploy Hal onto Google Cloud Platform. This class is not intended to be constructed directly. Instead see the free function make_deployer() in this module. """ def __init__(self, options, metrics, **kwargs): super(KubernetesV2ValidateBomDeployer, self).__init__( options, metrics, **kwargs) @classmethod def init_platform_argument_parser(cls, parser, defaults): """Adds custom configuration parameters to argument parser. This is a helper function for the free function init_argument_parser(). """ add_parser_argument( parser, 'deploy_k8s_v2_namespace', defaults, 'spinnaker', help='Namespace for the account Spinnaker is deployed into.') @classmethod def validate_options_helper(cls, options): """Adds custom configuration parameters to argument parser. This is a helper function for make_deployer(). """ if options.deploy_distributed_platform != 'kubernetes_v2': return if not options.k8s_v2_account_name: raise_and_log_error( ConfigError('--deploy_distributed_platform="kubernetes_v2" requires' ' a --k8s_v2_account_name be configured.')) if hasattr(options, "injected_deploy_spinnaker_account"): raise_and_log_error( UnexpectedError('deploy_spinnaker_account was already set to "{0}"' .format(options.injected_deploy_spinnaker_account))) options.injected_deploy_spinnaker_account = options.k8s_v2_account_name def __get_pod_name(self, k8s_v2_namespace, service): """Determine the pod name for the deployed service.""" options = self.options flags = ' --namespace {namespace} --logtostderr=false'.format( namespace=k8s_v2_namespace) kubectl_command = 'kubectl {context} get pods {flags}'.format( context=('--context {0}'.format(options.k8s_v2_account_context) if options.k8s_v2_account_context else ''), flags=flags) retcode, stdout = run_subprocess( '{command}' ' | gawk -F "[[:space:]]+" "/{service}/ {{print \\$1}}"' ' | tail -1'.format( command=kubectl_command, service=service), shell=True) pod = stdout.strip() if not pod: message = 'There is no pod for "{service}" in {namespace}'.format( service=service, namespace=k8s_v2_namespace) raise_and_log_error(ConfigError(message, cause='NoPod')) if retcode != 0: message = 'Could not find pod for "{service}".: {error}'.format( service=service, error=stdout.strip()) raise_and_log_error(ExecutionError(message, program='kubectl')) else: logging.debug('pod "%s" -> %s', service, stdout) return stdout.strip() def do_make_port_forward_command(self, service, local_port, remote_port): """Implements interface.""" options = self.options k8s_v2_namespace = options.deploy_k8s_v2_namespace service_pod = self.__get_pod_name(k8s_v2_namespace, service) return [ 'kubectl', '--namespace', k8s_v2_namespace, 'port-forward', service_pod, '{local}:{remote}'.format(local=local_port, remote=remote_port) ] def do_deploy(self, script, files_to_upload): """Implements the BaseBomValidateDeployer interface.""" # This is not yet supported in this script. # To deploy spinnaker to kubernetes, you need to go through # a halyard VM deployment. Halyard itself can be deployed to K8s. # This script doesnt. super(KubernetesV2ValidateBomDeployer, self).do_deploy( script, files_to_upload) def do_undeploy(self): """Implements the BaseBomValidateDeployer interface.""" super(KubernetesV2ValidateBomDeployer, self).do_undeploy() # kubectl delete namespace spinnaker def do_fetch_service_log_file(self, service, log_dir): """Retrieve log file for the given service's pod. Args: service: [string] The service's log to get log_dir: [string] The directory name to write the logs into. """ if service == 'monitoring': # monitoring is in a sidecar of each service return options = self.options k8s_v2_namespace = options.deploy_k8s_v2_namespace service_pod = self.__get_pod_name(k8s_v2_namespace, service) containers = [service] if options.monitoring_install_which: containers.append('monitoring-daemon') for container in containers: if container == 'monitoring-daemon': path = os.path.join(log_dir, service + '_monitoring.log') else: path = os.path.join(log_dir, service + '.log') retcode, stdout = run_subprocess( 'kubectl -n {namespace} -c {container} {context} logs {pod}' .format(namespace=k8s_v2_namespace, container=container, context=('--context {0}'.format(options.k8s_v2_account_context) if options.k8s_v2_account_context else ''), pod=service_pod), shell=True) write_data_to_secure_path(stdout, path) class GenericVmValidateBomDeployer(BaseValidateBomDeployer): """Concrete deployer used to deploy Hal onto Generic VM This class is not intended to be constructed directly. Instead see the free function make_deployer() in this module. """ @property def instance_ip(self): """The underlying IP address for the deployed instance.""" if not self.__instance_ip: self.__instance_ip = self.do_determine_instance_ip() return self.__instance_ip def set_instance_ip(self, value): """Sets the underlying IP address for the deployed instance.""" self.__instance_ip = value @property def ssh_key_path(self): """Returns the path to the ssh key for the deployment VM.""" return self.__ssh_key_path @ssh_key_path.setter def ssh_key_path(self, path): """Sets the path to the ssh key to use.""" self.__ssh_key_path = path def __init__(self, options, metrics, **kwargs): super(GenericVmValidateBomDeployer, self).__init__( options, metrics, **kwargs) self.__instance_ip = None self.__ssh_key_path = os.path.join(os.environ['HOME'], '.ssh', '{0}_empty_key'.format(self.hal_user)) def do_make_port_forward_command(self, service, local_port, remote_port): """Implements interface.""" return [ 'ssh', '-i', self.__ssh_key_path, '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', '{user}@{ip}'.format(user=self.hal_user, ip=self.instance_ip), '-L', '{local_port}:localhost:{remote_port}'.format( local_port=local_port, remote_port=remote_port), '-N'] def do_determine_instance_ip(self): """Hook for determining the ip address of the hal instance.""" raise NotImplementedError(self.__class__.__name__) def do_create_vm(self, options): """Hook for concrete deployer to craete the VM.""" raise NotImplementedError(self.__class__.__name__) def __upload_files_helper(self, files_to_upload): copy_files = ( 'scp' ' -i {ssh_key_path}' ' -o StrictHostKeyChecking=no' ' -o UserKnownHostsFile=/dev/null' ' {files}' ' {user}@{ip}:~' .format(ssh_key_path=self.__ssh_key_path, files=' '.join(files_to_upload), user=self.hal_user, ip=self.instance_ip)) logging.info('Copying deployment and configuration files') # pylint: disable=unused-variable for retry in range(0, 10): returncode, _ = run_subprocess(copy_files) if returncode == 0: break time.sleep(2) if returncode != 0: check_subprocess(copy_files) def __wait_for_ssh_helper(self): logging.info('Waiting for ssh %s@%s...', self.hal_user, self.instance_ip) end_time = time.time() + 30 while time.time() < end_time: retcode, _ = run_subprocess( 'ssh' ' -i {ssh_key}' ' -o StrictHostKeyChecking=no' ' -o UserKnownHostsFile=/dev/null' ' {user}@{ip}' ' "exit 0"' .format(user=self.hal_user, ip=self.instance_ip, ssh_key=self.__ssh_key_path)) if retcode == 0: logging.info('%s is ready', self.instance_ip) break time.sleep(1) def attempt_install(self, script_path, retry): """Attempt to the install script on the remote instance. Bintray is flaky making this not uncommon to fail intermittently. Therefore, it is intended that this function may be called multiple times on the same instance. """ attempt_decorator = '+%d' % retry if retry > 0 else '' logging.info('Configuring deployment%s', ' retry=%d' % retry if retry else '') logfile = os.path.join( self.options.output_dir, 'install_spinnaker-%d%s.log' % (os.getpid(), attempt_decorator)) try: command = ( 'ssh' ' -i {ssh_key}' ' -o StrictHostKeyChecking=no' ' -o UserKnownHostsFile=/dev/null' ' {user}@{ip}' ' bash -l -c ./{script_name}' .format(user=self.hal_user, ip=self.instance_ip, ssh_key=self.__ssh_key_path, script_name=os.path.basename(script_path))) check_subprocesses_to_logfile('install spinnaker', logfile, [command]) except ExecutionError as error: scan_logs_for_install_errors(logfile) return ExecutionError('Halyard deployment failed: %s' % error.message, program='install') except Exception as ex: return UnexpectedError(ex.message) return None def do_deploy(self, script, files_to_upload): """Implements the BaseBomValidateDeployer interface.""" options = self.options ensure_empty_ssh_key(self.__ssh_key_path, self.hal_user) script_parts = [] for path in files_to_upload: filename = os.path.basename(path) script_parts.append('sudo chmod 600 {file}'.format(file=filename)) script_parts.append('sudo chown {user}:{user} {file}' .format(user=self.hal_user, file=filename)) script_parts.extend(script) script_path = write_script_to_path(script_parts, path=None) files_to_upload.add(script_path) try: self.do_create_vm(options) self.__upload_files_helper(files_to_upload) self.__wait_for_ssh_helper() except Exception as ex: raise_and_log_error( ExecutionError('Caught "%s" provisioning vm' % ex.message, program='provisionVm')) finally: shutil.copyfile(script_path, os.path.join(options.output_dir, 'install-script.sh')) os.remove(script_path) files_to_upload.remove(script_path) # in case we need to retry error = None max_retries = 10 install_labels = {} for retry in range(0, max_retries): error = self.metrics.track_and_time_call( 'InstallSpinnaker', install_labels, self.metrics.determine_outcome_labels_from_error_result, self.attempt_install, script_path, retry) if not error: break logging.warning('Encountered an error during install: %s', error.message) if retry < (max_retries - 1): # Re-upload the files because script may have moved them around # so re-running the script wont find them anymore. self.__upload_files_helper(files_to_upload) logging.debug('Re-uploading install files...') # Clear halyard history clear_halyard_command = ( 'ssh' ' -i {ssh_key}' ' -o StrictHostKeyChecking=no' ' -o UserKnownHostsFile=/dev/null' ' {user}@{ip}' ' "hal deploy clean || true; echo "Y" | sudo ~/.hal/uninstall.sh || true;"' .format(user=self.hal_user, ip=self.instance_ip, ssh_key=self.__ssh_key_path)) run_subprocess(clear_halyard_command) logging.debug('Waiting a minute before retrying...') time.sleep(60) if error: raise_and_log_error(error) def do_fetch_service_log_file(self, service, log_dir): """Implements the BaseBomValidateDeployer interface.""" write_data_to_secure_path('', os.path.join(log_dir, service + '.log')) retcode, stdout = run_subprocess( 'ssh' ' -i {ssh_key}' ' -o StrictHostKeyChecking=no' ' -o UserKnownHostsFile=/dev/null' ' {user}@{ip}' ' "if [[ -f /var/log/spinnaker/{service_dir}/{service_name}.log ]];' ' then cat /var/log/spinnaker/{service_dir}/{service_name}.log;' ' else command -v journalctl >/dev/null && journalctl -u {service_name}; fi"' .format(user=self.hal_user, ip=self.instance_ip, ssh_key=self.ssh_key_path, service_dir=service, service_name=service)) if retcode != 0: logging.warning('Failed obtaining %s.log: %s', service, stdout) write_to_path(stdout, os.path.join(log_dir, service + '.log')) class AwsValidateBomDeployer(GenericVmValidateBomDeployer): """Concrete deployer used to deploy Hal onto Amazon EC2 This class is not intended to be constructed directly. Instead see the free function make_deployer() in this module. """ @classmethod def init_platform_argument_parser(cls, parser, defaults): """Adds custom configuration parameters to argument parser. This is a helper function for the free function init_argument_parser(). """ add_parser_argument( parser, 'deploy_aws_name', defaults, None, help='Value for name to tag instance with.') add_parser_argument( parser, 'deploy_aws_pem_path', defaults, None, help='Path to the EC2 PEM file.') add_parser_argument( parser, 'deploy_aws_security_group', defaults, None, help='Name of EC2 security group.') # Make this instead default to a search for the current image. # https://cloud-images.ubuntu.com/locator/ec2/ add_parser_argument( # 14.04 east-1 hvm:ebs parser, 'deploy_aws_ami', defaults, 'ami-0b542c1d', help='Image ID to run.') add_parser_argument( parser, 'deploy_aws_region', defaults, 'us-east-1', help='Region to deploy aws instance into.' ' Need an aws profile with this name') @classmethod def validate_options_helper(cls, options): """Adds custom configuration parameters to argument parser. This is a helper function for make_deployer(). """ if not options.deploy_aws_name: return if not options.deploy_aws_pem_path: raise_and_log_error(ConfigError('--deploy_aws_pem_path not specified.')) if not os.path.exists(options.deploy_aws_pem_path): raise_and_log_error( ConfigError('File "{path}" does not exist.' .format(path=options.deploy_aws_pem_path))) if not options.deploy_aws_security_group: raise_and_log_error( ConfigError('--deploy_aws_security_group not specified.')) if options.deploy_deploy: logging.debug('Looking for existing EC2 instance.') retcode, stdout = run_subprocess( 'aws ec2 describe-instances' ' --profile {region}' ' --filters "Name=tag:Name,Values={name}' ',Name=instance-state-name,Values=running"' .format(region=options.deploy_aws_region, name=options.deploy_aws_name)) if retcode != 0: raise_and_log_error( ExecutionError('Could not probe AWS: {0}'.format(stdout), program='aws')) reservations = decode_json(stdout).get('Reservations') # For some reason aws is ignoring our filter, so check again just to be # sure the reservations returned are the ones we asked for. for reservation in reservations or []: for tags in reservation.get('Tags', []): if (tags.get('Key') == 'Name' and tags.get('Value') == options.deploy_aws_name): raise_and_log_error( ConfigError( 'Running "{name}" already exists: {info}' .format(name=options.deploy_aws_name, info=reservation), cause='VmExists')) logging.warning('aws returned another instance - ignore: %s', reservation) def __init__(self, options, metrics, **kwargs): super(AwsValidateBomDeployer, self).__init__(options, metrics, **kwargs) self.__instance_id = None self.ssh_key_path = options.deploy_aws_pem_path def __find_instance_with_name(self, response, name): """Locate the desired instance in the response.""" if not response: logging.error('Unexpected empty response.') return {} for elem in response: for instance in elem['Instances']: for tag in instance.get('Tags', []): if tag['Key'] == 'Name' and tag['Value'] == name: return instance logging.error('No instance tagged %r found in response.', name) return {} def do_determine_instance_ip(self): """Implements GenericVmValidateBomDeployer interface.""" options = self.options logging.debug('Looking up EC2 instance IP.') retcode, stdout = run_subprocess( 'aws ec2 describe-instances' ' --profile {region}' ' --output json' ' --filters "Name=tag:Name,Values={name}' ',Name=instance-state-name,Values=running"' .format(region=options.deploy_aws_region, name=options.deploy_aws_name)) if retcode != 0: raise_and_log_error( ExecutionError('Could not determine public IP: {0}'.format(stdout), program='aws')) found = decode_json(stdout).get('Reservations') if not found: raise_and_log_error( ResponseError( '"{0}" is not running'.format(options.deploy_aws_name), server='ec2')) try: # Although we filtered, sometimes aws CLI returns others. instance = self.__find_instance_with_name( found, options.deploy_aws_name) public_ip = instance['PublicIpAddress'] except KeyError: logging.error('**** aws ec2 describe instances returned %r\n' 'expected "PublicIpAddress" for instance named %s', found, options.deploy_aws_name) raise logging.debug('Using public IP=%s', public_ip) return public_ip def do_create_vm(self, options): """Implements GenericVmValidateBomDeployer interface.""" pem_basename = os.path.basename(options.deploy_aws_pem_path) key_pair_name = os.path.splitext(pem_basename)[0] logging.info('Creating "%s" with key-pair "%s"', options.deploy_aws_name, key_pair_name) logging.debug('Creating new EC2 VM.') response = check_subprocess( 'aws ec2 run-instances' ' --profile {region}' ' --output json' ' --count 1' ' --image-id {ami}' ' --instance-type {type}' ' --key-name {key_pair_name}' ' --security-group-ids {sg}' .format(region=options.deploy_aws_region, ami=options.deploy_aws_ami, type='t2.xlarge', # 4 core x 16G key_pair_name=key_pair_name, sg=options.deploy_aws_security_group)) doc = decode_json(response) self.__instance_id = doc["Instances"][0]["InstanceId"] logging.info('Created instance id=%s to tag as "%s"', self.__instance_id, options.deploy_aws_name) # It's slow to start up and sometimes there is a race condition # in which describe-instances doesnt know about our id even though # create-tags did, or create-tags doesnt know abut the new id. time.sleep(5) end_time = time.time() + 10*60 did_tag = False while time.time() < end_time: if not did_tag: tag_retcode, _ = run_subprocess( 'aws ec2 create-tags' ' --region {region}' ' --resources {instance_id}' ' --tags "Key=Name,Value={name}"' .format(region=options.deploy_aws_region, instance_id=self.__instance_id, name=options.deploy_aws_name)) did_tag = tag_retcode == 0 if self.__is_ready(): return time.sleep(5) raise_and_log_error( TimeoutError('Giving up waiting for deployment.', cause='ec2')) def __is_ready(self): retcode, stdout = run_subprocess( 'aws ec2 describe-instances' ' --profile {region}' ' --output json' ' --instance-ids {id}' ' --query "Reservations[*].Instances[*]"' .format(region=self.options.deploy_aws_region, id=self.__instance_id)) if retcode != 0: logging.warning('Could not determine public IP: %s', stdout) return False # result is an array of reservations of ararys of instances. # but we only expect one, so fish out the first instance info info = decode_json(stdout)[0][0] state = info.get('State', {}).get('Name') if state in ['pending', 'initializing']: logging.info('Waiting for %s to finish initializing (state=%s)', self.__instance_id, state) return False if state in ['shutting-down', 'terminated']: raise_and_log_error(ResponseError('VM failed: {0}'.format(info), server='ec2')) logging.info('%s is in state %s', self.__instance_id, state) self.set_instance_ip(info.get('PublicIpAddress')) # attempt to ssh into it so we know we're accepting connections when # we return. It takes time to start logging.info('Checking if it is ready for ssh...') retcode, stdout = run_subprocess( 'ssh' ' -i {ssh_key}' ' -o StrictHostKeyChecking=no' ' -o UserKnownHostsFile=/dev/null' ' {user}@{ip}' ' "exit 0"' .format(user=self.hal_user, ip=self.instance_ip, ssh_key=self.ssh_key_path)) if retcode == 0: logging.info('%s is ready', self.instance_ip) return True # Sometimes ssh accepts but authentication still fails # for a while. If this is the case, then try again # though the whole loop to distinguish VM going away. logging.info('%s\nNot yet ready...', stdout.strip()) return False def do_undeploy(self): """Implements the BaseBomValidateDeployer interface.""" options = self.options logging.info('Terminating "%s"', options.deploy_aws_name) if self.__instance_id: all_ids = [self.__instance_id] else: lookup_response = check_subprocess( 'aws ec2 describe-instances' ' --profile {region}' ' --filters "Name=tag:Name,Values={name}' ',Name=instance-state-name,Values=running"' .format(region=options.deploy_aws_region, name=options.deploy_aws_name)) exists = decode_json(lookup_response).get('Reservations') if not exists: logging.warning('"%s" is not running', options.deploy_aws_name) return all_ids = [] for reservation in exists: all_ids.extend([instance['InstanceId'] for instance in reservation['Instances']]) for instance_id in all_ids: logging.info('Terminating "%s" instanceId=%s', options.deploy_aws_name, instance_id) retcode, _ = run_subprocess( 'aws ec2 terminate-instances' ' --profile {region}' ' --instance-ids {id}' .format(region=options.deploy_aws_region, id=instance_id)) if retcode != 0: logging.warning('Failed to delete "%s" instanceId=%s', options.deploy_aws_name, instance_id) class AzureValidateBomDeployer(GenericVmValidateBomDeployer): """Concrete deployer used to deploy Hal onto Microsoft Azure This class is not intended to be constructed directly. Instead see the free function make_deployer() in this module. """ @classmethod def init_platform_argument_parser(cls, parser, defaults): """Adds custom configuration parameters to argument parser. This is a helper function for the free function init_argument_parser(). """ add_parser_argument( parser, 'deploy_azure_location', defaults, 'eastus', help='Azure region to deploy to if --deploy_hal_platform is "azure".') add_parser_argument( parser, 'deploy_azure_resource_group', defaults, None, help='Azure resource group to deploy to' ' if --deploy_hal_platform is "azure".') add_parser_argument( parser, 'deploy_azure_name', defaults, None, help='Azure VM name to deploy to if --deploy_hal_platform is "azure".') add_parser_argument( parser, 'deploy_azure_image', defaults, 'Canonical:UbuntuServer:14.04.5-LTS:latest', help='Azure image to deploy.') @classmethod def validate_options_helper(cls, options): """Adds custom configuration parameters to argument parser. This is a helper function for make_deployer(). """ if not options.deploy_azure_resource_group: raise_and_log_error( ConfigError('--deploy_azure_resource_group not specified.')) if not options.deploy_azure_name: raise_and_log_error( ConfigError('--deploy_azure_name not specified.')) if options.deploy_deploy: retcode, _ = run_subprocess( 'az vm show --resource-group {rg} --vm-name {name}' .format(rg=options.deploy_azure_resource_group, name=options.deploy_azure_name)) if retcode == 0: raise_and_log_error(UnexpectedError( '"{name}" already exists in resource-group={rg}' .format(name=options.deploy_azure_name, rg=options.deploy_azure_resource_group))) def do_create_vm(self, options): """Implements GenericVmValidateBomDeployer interface.""" logging.info('Creating "%s" in resource-group "%s"', options.deploy_azure_name, options.deploy_azure_resource_group) response = check_subprocess( 'az vm create' ' --name {name}' ' --resource-group {rg}' ' --location {location}' ' --image {image}' ' --use-unmanaged-disk' ' --storage-sku Standard_LRS' ' --size Standard_D12_v2_Promo' ' --ssh-key-value {ssh_key_path}.pub' .format(name=options.deploy_azure_name, rg=options.deploy_azure_resource_group, location=options.deploy_azure_location, image=options.deploy_azure_image, ssh_key_path=self.ssh_key_path)) self.set_instance_ip(decode_json(response)['publicIpAddress']) def do_undeploy(self): """Implements the BaseBomValidateDeployer interface.""" options = self.options if options.deploy_spinnaker_type == 'distributed': run_subprocess( 'ssh' ' -i {ssh_key}' ' -o StrictHostKeyChecking=no' ' -o UserKnownHostsFile=/dev/null' ' {user}@{ip} sudo hal -q --log=info deploy clean' .format(user=self.hal_user, ip=self.instance_ip, ssh_key=self.ssh_key_path)) check_subprocess( 'az vm delete -y' ' --name {name}' ' --resource-group {rg}' .format(name=options.deploy_azure_name, rg=options.deploy_azure_resource_group)) def do_determine_instance_ip(self): """Implements GenericVmValidateBomDeployer interface.""" options = self.options retcode, stdout = run_subprocess( 'az vm list-ip-addresses --name {name} --resource-group {group}'.format( name=options.deploy_azure_name, group=options.deploy_azure_resource_group)) if retcode != 0: raise_and_log_error( ExecutionError('Could not determine public IP: {0}'.format(stdout), program='az')) found = decode_json(stdout)[0].get('virtualMachine') if not found: raise_and_log_error( ResponseError( '"{0}" is not running'.format(options.deploy_azure_name), server='az')) return found['network']['publicIpAddresses'][0]['ipAddress'] class GoogleValidateBomDeployer(GenericVmValidateBomDeployer): """Concrete deployer used to deploy Hal onto Google Cloud Platform. This class is not intended to be constructed directly. Instead see the free function make_deployer() in this module. """ def do_determine_instance_ip(self): """Implements GenericVmValidateBomDeployer interface.""" options = self.options # Note: this used to dup_stderr_to_stdout=False with an older API # presumably this wont return stderr anymore or it will corrupt the json. logging.debug('Looking up IP address for "%s"...', options.deploy_google_instance) response = check_subprocess( 'gcloud compute instances describe' ' --format json' ' --account {gcloud_account}' ' --project {project} --zone {zone} {instance}' .format(gcloud_account=options.deploy_hal_google_service_account, project=options.deploy_google_project, zone=options.deploy_google_zone, instance=options.deploy_google_instance), # Setting this to PIPE means it will get logged instead of getting # commingled with stdout into the response stderr=subprocess.PIPE) nic = decode_json(response)['networkInterfaces'][0] use_internal_ip = options.deploy_google_use_internal_ip if use_internal_ip: logging.debug('Using internal IP=%s', nic['networkIP']) return nic['networkIP'] ip = nic['accessConfigs'][0]['natIP'] logging.debug('Using natIP=%s', ip) return ip def __init__(self, options, metrics, **kwargs): super(GoogleValidateBomDeployer, self).__init__(options, metrics, **kwargs) @classmethod def init_platform_argument_parser(cls, parser, defaults): """Adds custom configuration parameters to argument parser. This is a helper function for the free function init_argument_parser(). """ add_parser_argument( parser, 'deploy_google_project', defaults, None, help='Google project to deploy to if --deploy_hal_platform is "gce".') add_parser_argument( parser, 'deploy_google_zone', defaults, 'us-central1-f', help='Google zone to deploy to if --deploy_hal_platform is "gce".') add_parser_argument( parser, 'deploy_google_instance', defaults, None, help='Google instance to deploy to if --deploy_hal_platform is "gce".') add_parser_argument( parser, 'deploy_google_machine_type', defaults, 'n1-standard-4', help='Google machine type if --deploy_hal_platform is "gce".') add_parser_argument( parser, 'deploy_google_image_family', defaults, 'ubuntu-1604-lts', help='Google image family to deploy if --deploy_hal_platform is "gce".') add_parser_argument( parser, 'deploy_google_image_project', defaults, 'ubuntu-os-cloud', help='Project containing image from --deploy_google_image_family.') add_parser_argument( parser, 'deploy_google_network', defaults, 'default', help='The GCP Network to deploy spinnaker into.') add_parser_argument( parser, 'deploy_google_use_internal_ip', defaults, True, type=bool, help='Force the internal IP to connect to the deployed instance.' ' This is only valid when talking within the same project.') parser.add_argument( '--deploy_google_use_external_ip', dest='deploy_google_use_internal_ip', action='store_false', help='DEPRECATED: Use --deploy_google_use_internal_ip=false') add_parser_argument( parser, 'deploy_google_tags', defaults, 'spinnaker-validation-instance', help='A comma-delimited list of GCP network tags to tag' ' the deployed instances with.') add_parser_argument( parser, 'deploy_hal_google_service_account', defaults, None, help='When deploying to gce, this is the service account to use' ' for configuring halyard.') @classmethod def validate_options_helper(cls, options): """Adds custom configuration parameters to argument parser. This is a helper function for make_deployer(). """ if not options.deploy_google_project: raise_and_log_error( ConfigError('--deploy_google_project not specified.')) if not options.deploy_google_instance: raise_and_log_error( ConfigError('--deploy_google_instance not specified.')) if not options.deploy_hal_google_service_account: raise_and_log_error( ConfigError('--deploy_hal_google_service_account not specified.')) if options.deploy_deploy: logging.debug('Checking if "%s" already exists...', options.deploy_google_instance) retcode, _ = run_subprocess( 'gcloud compute instances describe' ' --account {gcloud_account}' ' --project {project} --zone {zone} {instance}' .format(gcloud_account=options.deploy_hal_google_service_account, project=options.deploy_google_project, zone=options.deploy_google_zone, instance=options.deploy_google_instance)) if retcode == 0: raise_and_log_error(ConfigError( '"{instance}" already exists in project={project} zone={zone}' .format(instance=options.deploy_google_instance, project=options.deploy_google_project, zone=options.deploy_google_zone), cause='VmExists')) def do_create_vm(self, options): """Implements the BaseBomValidateDeployer interface.""" logging.info('Creating "%s" in project "%s"', options.deploy_google_instance, options.deploy_google_project) with open(self.ssh_key_path + '.pub', 'r') as f: ssh_key = f.read().strip() if ssh_key.startswith('ssh-rsa'): ssh_key = self.hal_user + ':' + ssh_key check_subprocess( 'gcloud compute instances create' ' --account {gcloud_account}' ' --machine-type {machine_type}' ' --image-family {image_family}' ' --image-project {image_project}' ' --metadata block-project-ssh-keys=TRUE,ssh-keys="{ssh_key}"' ' --project {project} --zone {zone}' ' --network {network}' ' --tags {network_tags}' ' --scopes {scopes}' ' {instance}' .format(gcloud_account=options.deploy_hal_google_service_account, machine_type=options.deploy_google_machine_type, image_family=options.deploy_google_image_family, image_project=options.deploy_google_image_project, project=options.deploy_google_project, zone=options.deploy_google_zone, scopes='compute-rw,storage-full,logging-write,monitoring', network=options.deploy_google_network, network_tags=options.deploy_google_tags, ssh_key=ssh_key, instance=options.deploy_google_instance), stream=sys.stdout) def do_undeploy(self): """Implements the BaseBomValidateDeployer interface.""" options = self.options if options.deploy_spinnaker_type == 'distributed': run_subprocess( 'ssh' ' -i {ssh_key}' ' -o StrictHostKeyChecking=no' ' -o UserKnownHostsFile=/dev/null' ' {user}@{ip} sudo hal -q --log=info deploy clean' .format(user=self.hal_user, ip=self.instance_ip, ssh_key=self.ssh_key_path)) check_subprocess( 'gcloud -q compute instances delete' ' --account {gcloud_account}' ' --project {project} --zone {zone} {instance}' .format(gcloud_account=options.deploy_hal_google_service_account, project=options.deploy_google_project, zone=options.deploy_google_zone, instance=options.deploy_google_instance)) def make_deployer(options, metrics): """Public interface to instantiate the desired Deployer. Args: options: [Namespace] from an argument parser given to init_argument_parser """ if options.deploy_hal_platform == 'gce': hal_klass = GoogleValidateBomDeployer elif options.deploy_hal_platform == 'ec2': hal_klass = AwsValidateBomDeployer elif options.deploy_hal_platform == 'azure': hal_klass = AzureValidateBomDeployer else: raise_and_log_error(ConfigError( 'Invalid --deploy_hal_platform=%s' % options.deploy_hal_platform)) if options.deploy_spinnaker_type not in SUPPORTED_DEPLOYMENT_TYPES: raise_and_log_error(ConfigError( 'Invalid --deploy_spinnaker_type "{0}". Must be one of {1}' .format(options.deploy_spinnaker_type, SUPPORTED_DEPLOYMENT_TYPES))) # This is the class for accessing the Spinnaker deployment if other than Hal. spin_klass = None if options.deploy_spinnaker_type == 'distributed': if (options.deploy_distributed_platform not in SUPPORTED_DISTRIBUTED_PLATFORMS): raise_and_log_error(ConfigError( 'A "distributed" deployment requires --deploy_distributed_platform')) if options.deploy_distributed_platform == 'kubernetes': spin_klass = KubernetesValidateBomDeployer elif options.deploy_distributed_platform == 'kubernetes_v2': spin_klass = KubernetesV2ValidateBomDeployer else: raise_and_log_error(ConfigError( 'Unknown --deploy_distributed_platform.' ' This must be the value of one of the following parameters: {0}' .format(SUPPORTED_DISTRIBUTED_PLATFORMS))) hal_klass.validate_options_helper(options) if spin_klass: spin_klass.validate_options_helper(options) return hal_klass(options, metrics, runtime_class=spin_klass) def determine_deployment_platform(options): """Helper function to determine the deployment platform being tested. This is used for instrumentation purposes. """ platform = options.deploy_hal_platform if options.deploy_spinnaker_type == 'distributed': if platform == 'gce': platform = 'gke' else: platform += '+k8s' return platform def init_argument_parser(parser, defaults): """Initialize the argument parser with deployment and configuration params. Args: parser: [ArgumentParser] The argument parser to add the parameters to. """ # pylint: disable=line-too-long add_parser_argument( parser, 'halyard_install_script', defaults, 'https://raw.githubusercontent.com/spinnaker/halyard/master/install/debian/InstallHalyard.sh', help='The URL to the InstallHalyard.sh script.') add_parser_argument( parser, 'halyard_version', defaults, None, help='If provided, the specific version of halyard to use.') add_parser_argument( parser, 'halyard_bucket_base_url', defaults, None, help='The base URL for the bucket containing the halyard jar files' ' to override, if any.') add_parser_argument( parser, 'halyard_config_bucket', defaults, None, help='The global halyard configuration bucket to override, if any.') add_parser_argument( parser, 'halyard_config_bucket_credentials', defaults, None, help='If specified, give these credentials to halyard' ' in order to access the global halyard GCS bucket.') add_parser_argument( parser, 'spinnaker_repository', defaults, 'https://dl.bintray.com/spinnaker-releases/debians', help='The location of the spinnaker debian repository.') add_parser_argument( parser, 'spinnaker_registry', defaults, 'gcr.io/spinnaker-marketplace', help='The location of the spinnaker container registry.') add_parser_argument( parser, 'deploy_spinnaker_type', defaults, None, choices=SUPPORTED_DEPLOYMENT_TYPES, help='The type of spinnaker deployment to create.') add_parser_argument( parser, 'deploy_hal_platform', defaults, None, choices=['gce', 'ec2', 'azure'], help='Platform to deploy Halyard onto.' ' Halyard will then deploy Spinnaker.') add_parser_argument( parser, 'deploy_hal_user', defaults, os.environ.get('LOGNAME'), help='User name on deployed hal_platform for deploying hal.' ' This is used to scp and ssh from this machine.') add_parser_argument( parser, 'deploy_distributed_platform', defaults, 'kubernetes', choices=SUPPORTED_DISTRIBUTED_PLATFORMS, help='The platform to deploy spinnaker to when' ' --deploy_spinnaker_type=distributed') add_parser_argument( parser, 'deploy_version', defaults, 'master-latest-unvalidated', help='Spinnaker version to deploy. The default is "master-latest-unverified".') add_parser_argument( parser, 'deploy_deploy', defaults, True, type=bool, help='Actually perform the deployment.' ' This is for facilitating debugging with this script.') add_parser_argument( parser, 'deploy_undeploy', defaults, True, type=bool, help='Actually perform the undeployment.' ' This is for facilitating debugging with this script.') add_parser_argument( parser, 'deploy_always_collect_logs', defaults, False, type=bool, help='Always collect logs.' 'By default logs are only collected when deploy_undeploy is True.') AwsValidateBomDeployer.init_platform_argument_parser(parser, defaults) AzureValidateBomDeployer.init_platform_argument_parser(parser, defaults) GoogleValidateBomDeployer.init_platform_argument_parser(parser, defaults) KubernetesValidateBomDeployer.init_platform_argument_parser(parser, defaults) KubernetesV2ValidateBomDeployer.init_platform_argument_parser(parser, defaults)
duftler/spinnaker
dev/validate_bom__deploy.py
Python
apache-2.0
62,240
[ "ORCA" ]
865f6d021921105f4b8c9221a4fed1f3ebdfb559ae7c2fd4c835586fb123af3e
"""Read genome build configurations from Galaxy *.loc and bcbio-nextgen resource files. """ import ConfigParser import glob import os from xml.etree import ElementTree import toolz as tz import yaml from bcbio import utils from bcbio.ngsalign import star from bcbio.pipeline import alignment from bcbio.provenance import do # ## bcbio-nextgen genome resource files def get_resources(genome, ref_file): """Retrieve genome information from a genome-references.yaml file. """ base_dir = os.path.normpath(os.path.dirname(ref_file)) resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", "")) if not os.path.exists(resource_file): raise IOError("Did not find resource file for %s: %s\n" "To update bcbio_nextgen.py with genome resources for standard builds, run:\n" "bcbio_nextgen.py upgrade -u skip" % (genome, resource_file)) with open(resource_file) as in_handle: resources = yaml.load(in_handle) def resource_file_path(x): if isinstance(x, basestring) and os.path.exists(os.path.join(base_dir, x)): return os.path.normpath(os.path.join(base_dir, x)) return x return utils.dictapply(resources, resource_file_path) # ## Utilities def abs_file_paths(xs, base_dir=None, ignore_keys=None): """Normalize any file paths found in a subdirectory of configuration input. """ ignore_keys = set([]) if ignore_keys is None else set(ignore_keys) if not isinstance(xs, dict): return xs if base_dir is None: base_dir = os.getcwd() orig_dir = os.getcwd() os.chdir(base_dir) input_dir = os.path.join(base_dir, "inputs") out = {} for k, v in xs.iteritems(): if k not in ignore_keys and v and isinstance(v, basestring): if v.lower() == "none": out[k] = None elif os.path.exists(v) or v.startswith(utils.SUPPORTED_REMOTES): out[k] = os.path.normpath(os.path.join(base_dir, utils.dl_remotes(v, input_dir))) else: out[k] = v else: out[k] = v os.chdir(orig_dir) return out # ## Galaxy integration -- *.loc files def _get_galaxy_loc_file(name, galaxy_dt, ref_dir, galaxy_base): """Retrieve Galaxy *.loc file for the given reference/aligner name. First tries to find an aligner specific *.loc file. If not defined or does not exist, then we need to try and remap it from the default reference file """ if "file" in galaxy_dt and os.path.exists(os.path.join(galaxy_base, galaxy_dt["file"])): loc_file = os.path.join(galaxy_base, galaxy_dt["file"]) need_remap = False elif alignment.TOOLS[name].galaxy_loc_file is None: loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE) need_remap = True else: loc_file = os.path.join(ref_dir, alignment.TOOLS[name].galaxy_loc_file) need_remap = False if not os.path.exists(loc_file): loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE) need_remap = True return loc_file, need_remap def _galaxy_loc_iter(loc_file, galaxy_dt, need_remap=False): """Iterator returning genome build and references from Galaxy *.loc file. """ if "column" in galaxy_dt: dbkey_i = galaxy_dt["column"].index("dbkey") path_i = galaxy_dt["column"].index("path") else: dbkey_i = None if os.path.exists(loc_file): with open(loc_file) as in_handle: for line in in_handle: if line.strip() and not line.startswith("#"): parts = line.strip().split("\t") # Detect and report spaces instead of tabs if len(parts) == 1: parts = [x.strip() for x in line.strip().split(" ") if x.strip()] if len(parts) > 1: raise IOError("Galaxy location file uses spaces instead of " "tabs to separate fields: %s" % loc_file) if dbkey_i is not None and not need_remap: dbkey = parts[dbkey_i] cur_ref = parts[path_i] else: if parts[0] == "index": parts = parts[1:] dbkey = parts[0] cur_ref = parts[-1] yield (dbkey, cur_ref) def _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap, galaxy_config, data): """Retrieve reference genome file from Galaxy *.loc file. Reads from tool_data_table_conf.xml information for the index if it exists, otherwise uses heuristics to find line based on most common setups. """ refs = [ref for dbkey, ref in _galaxy_loc_iter(loc_file, galaxy_dt, need_remap) if dbkey == genome_build] remap_fn = alignment.TOOLS[name].remap_index_fn need_remap = remap_fn is not None if len(refs) == 0: cur_ref = _download_prepped_genome(genome_build, data, name, need_remap) # allow multiple references in a file and use the most recently added else: cur_ref = refs[-1] if need_remap: assert remap_fn is not None, "%s requires remapping function from base location file" % name cur_ref = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"])) cur_ref = remap_fn(os.path.abspath(cur_ref)) return cur_ref def _get_galaxy_tool_info(galaxy_base): """Retrieve Galaxy tool-data information from defaults or galaxy config file. """ ini_file = os.path.join(galaxy_base, "universe_wsgi.ini") info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"), "tool_data_path": os.path.join(galaxy_base, "tool-data")} config = ConfigParser.ConfigParser() config.read(ini_file) if "app:main" in config.sections(): for option in config.options("app:main"): if option in info: info[option] = os.path.join(galaxy_base, config.get("app:main", option)) return info def _get_galaxy_data_table(name, dt_config_file): """Parse data table config file for details on tool *.loc location and columns. """ out = {} if os.path.exists(dt_config_file): tdtc = ElementTree.parse(dt_config_file) for t in tdtc.getiterator("table"): if t.attrib.get("name", "") in [name, "%s_indexes" % name]: out["column"] = [x.strip() for x in t.find("columns").text.split(",")] out["file"] = t.find("file").attrib.get("path", "") return out def get_refs(genome_build, aligner, galaxy_base, data): """Retrieve the reference genome file location from galaxy configuration. """ out = {} name_remap = {"samtools": "fasta"} if genome_build: galaxy_config = _get_galaxy_tool_info(galaxy_base) for name in [x for x in ("samtools", aligner) if x]: galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"]) loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"], galaxy_base) cur_ref = _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap, galaxy_config, data) base = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"])) if os.path.isdir(base): indexes = glob.glob(os.path.join(base, "*")) else: indexes = glob.glob("%s*" % utils.splitext_plus(base)[0]) out[name_remap.get(name, name)] = {"indexes": indexes} if os.path.exists(base) and os.path.isfile(base): out[name_remap.get(name, name)]["base"] = base return out def get_builds(galaxy_base): """Retrieve configured genome builds and reference files, using Galaxy configuration files. Allows multiple dbkey specifications in the same file, using the most recently added. """ name = "samtools" galaxy_config = _get_galaxy_tool_info(galaxy_base) galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"]) loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"], galaxy_base) assert not need_remap, "Should not need to remap reference files" fnames = {} for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt): fnames[dbkey] = fname out = [] for dbkey in sorted(fnames.keys()): out.append((dbkey, fnames[dbkey])) return out # ## Retrieve pre-prepared genomes REMAP_NAMES = {"tophat2": "bowtie2", "samtools": "seq"} S3_INFO = {"bucket": "biodata", "key": "prepped/{build}/{build}-{target}.tar.gz"} INPLACE_INDEX = {"star": star.index} def _download_prepped_genome(genome_build, data, name, need_remap): """Get a pre-prepared genome from S3, unpacking it locally. Supports runs on AWS where we can retrieve the resources on demand. """ out_dir = utils.safe_makedir(os.path.join(tz.get_in(["dirs", "work"], data), "inputs", "data", "genomes")) ref_dir = os.path.join(out_dir, genome_build, REMAP_NAMES.get(name, name)) if not os.path.exists(ref_dir): target = REMAP_NAMES.get(name, name) if target in INPLACE_INDEX: ref_file = glob.glob(os.path.normpath(os.path.join(ref_dir, os.pardir, "seq", "*.fa")))[0] INPLACE_INDEX[target](ref_file, ref_dir, data) else: with utils.chdir(out_dir): bucket = S3_INFO["bucket"] key = S3_INFO["key"].format(build=genome_build, target=REMAP_NAMES.get(name, name)) cmd = ("gof3r get --no-md5 -k {key} -b {bucket} | pigz -d -c | tar -xvp") do.run(cmd.format(**locals()), "Download pre-prepared genome data: %s" % genome_build) genome_dir = os.path.join(out_dir, genome_build) genome_build = genome_build.replace("-test", "") if need_remap or name == "samtools": return os.path.join(genome_dir, "seq", "%s.fa" % genome_build) else: ref_dir = os.path.join(genome_dir, REMAP_NAMES.get(name, name)) base_name = os.path.commonprefix(os.listdir(ref_dir)) while base_name.endswith("."): base_name = base_name[:-1] return os.path.join(ref_dir, base_name)
SciLifeLab/bcbio-nextgen
bcbio/pipeline/genome.py
Python
mit
10,747
[ "Galaxy" ]
d644cbd8a3b4c2c11bab4bd454b732a9e8c919e611c59373784502432ed6331b