text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
import Bio
import os
from Bio.Seq import Seq
from Bio import motifs
from Bio.Alphabet import IUPAC
''' Script uses BioPython to generate position weight matrices from a directory containing Fasta files for
each modules phospho-peptides. Note: Duplicate amino acid sequences should be removed from the Fasta files before running this script, if they exist, to prevent overweigting
the matrix'''
alphabet = IUPAC.protein # use protein alphabet
instances = []
os.chdir("/Users/mmacgilvray18/Desktop/NaCl_Module_FASTA_Files_Dupes_Removed/") # user defined directory containing Fasta files
def CreatePWM():
''' Function creates PWMs for each Module '''
instances = []
for x in os.listdir(): # Iterate through the Fasta files in the directory
if x.endswith('.txt'):
with open(x, "r") as f:
for line in f:
if line.startswith('>'):
continue
line = line.rstrip()
instances.append(Seq(line, IUPAC.protein)) # add amino acid sequence to instances
m = motifs.create(instances)
pwm = m.counts.normalize(pseudocounts = 1) # Add a +1 pseudocount
instances = []
CreatePWM()
| mmacgilvray18/Phospho_Network | python/Create_PWMs_From_Module_Fasta.py | Python | gpl-3.0 | 1,540 | [
"Biopython"
] | e564006e8a135fbc7da0b7ac2c0a8958a8bb827ee45eac0ade2cf6826890b21d |
import numpy as np
from scipy import ndimage
from skimage.morphology import remove_small_objects
from skimage.measure import regionprops
from skimage.io import imread
from collections import defaultdict
import matplotlib.pyplot as plt
from matplotlib import cm
from Bio import Phylo
####################################################################################
# utility functions
####################################################################################
# -------------
# -------------
def xcorr2fft(image1, image2):
'''
phase correlation method based shift estimation between images
translation of image2 with respect to image1
return variable shift is a vector that has the dim of the two images
'''
assert image1.shape == image2.shape
dim = np.asarray(image1.shape)
F = np.fft.fftn(image1)
Fc = np.conj(np.fft.fftn(image2))
R = F*Fc
c = np.fft.ifftn(R)
shift_indices = np.asarray(np.unravel_index(c.argmax(), c.shape))
shift = np.where(np.abs(shift_indices-1)<np.abs(dim-shift_indices+1), -shift_indices, dim-shift_indices)
return shift
def read_ilastik_pred(fname, channel, p_cutoff = 0.5):
'''
read an image from and ilastik h5 file. assumes prediction is in group /volumes/prediction
channel specifies the object class to be returned
p_cutoff is the threshold for the posterior probability
'''
import h5py
f = h5py.File(fname,'r')
return np.asarray(f['volume']['prediction']).squeeze()[:,:,channel]>p_cutoff
def read_ilastik_data(fname):
'''
read an image from and ilastik h5 file. assumes prediction is in group /volumes/prediction
channel specifies the object class to be returned
p_cutoff is the threshold for the posterior probability
'''
import h5py
f = h5py.File(fname,'r')
return np.asarray(f['volume']['data']).squeeze()
def match_mindist(X2, X1, dmax):
'''
Match points by euclidean distance with upper limit for distance.
Briefly: Obtain distance matrix, compute where minima in rows
Return is a vector that assigns each element in a partner in X2 or -1 (no match case)
'''
from scipy.spatial.distance import cdist
# calculate distance matrix
D = cdist(X1,X2,'euclidean')
m1, m2 = np.min(D, axis=1), np.min(D, axis=0)
m1i, m2i = np.argmin(D, axis = 1), np.argmin(D, axis = 0)
m1i[m1>=dmax] = -1
m2i[m2>=dmax] = -1
return m1i, m2i
def match_bijective(X2,X1,dmax):
'''
Match points by euclidean distance with upper limit for distance.
Briefly: Obtain distance matrix, compute where minima in rows and columns
agree and remove assigned points from the matrix. Iterate until
no entries with distance < dmax exist, or until no unique
assignments are possible anymore.
X1 are the points in time 1 and X2 in time 2
dmax is an upper limit to the distance between matched points
Returns a two vectors assigning each element in X1 a partner in X2
and vice versa. -1 signifies to no match
'''
np1, np2= X1.shape[0], X2.shape[0]
ind1, ind2 = np.arange(np1), np.arange(np2)
ix1, ix2 = -np.ones(np1, dtype = 'int32'), -np.ones(np2, dtype = 'int32')
# calculate distance matrix
while len(ind1) and len(ind2):
m12, m21 = match_mindist(X2[ind2], X1[ind1], dmax)
agree1 = (m21[m12] == np.arange(len(ind1)))*(m12>-1)
agree2 = (m12[m21] == np.arange(len(ind2)))*(m21>-1)
if agree1.sum()==0 or agree2.sum()==0:
break
ix1[ind1[agree1]]= m12[agree1]
ix2[ind2[agree2]]= m21[agree2]
ind1 = ind1[-agree1]
ind2 = ind2[-agree2]
return ix1, ix2
####################################################################################
# classes
####################################################################################
class segmented_image(object):
'''
simple class providing utility functions to work with segmented images
TODO: add functionality to read images of disk
'''
####################################################################################
# constructor
####################################################################################
def __init__(self, seg_fname, image_fname=None, save_image=False, p_cutoff = 0.5, channel = 1):
self.p_cutoff = p_cutoff # ilastik posterior probability cutoff
self.channel = channel # ilastik segmentation channel
self.image_fname = image_fname
self.seg_fname = seg_fname
if save_image:
self.seg = self.get_seg()
if self.image_fname is not None: self.img = self.get_img()
else:
self.img=None
self.seg = None
tmp_seg = self.get_seg()
self.seg_shape = tmp_seg.shape
print "segmentation dimensions:", self.seg_shape
if self.image_fname is not None:
tmp_img = self.get_img()
self.img_shape = tmp_img.shape
print "image dimensions:", self.img_shape
else:
self.img_shape = None
####################################################################################
# methods
####################################################################################
def get_seg(self):
'''
return segmented image. load from file if not saved
'''
if self.seg is not None:
return self.seg
else:
# determine file format and specify loading function
if self.seg_fname.split('.')[-1].startswith('tif'):
return imread(self.seg_fname)
elif self.seg_fname.split('.')[-1].startswith('h5'):
return read_ilastik_pred(self.seg_fname, self.channel, self.p_cutoff)
else:
print 'unsupported image format'
return None
def get_img(self):
'''
return intensity image. load from file if not saved.
'''
if self.seg is not None:
return self.seg
elif self.image_fname is not None:
# determine file format and specify loading function
if self.image_fname.split('.')[-1].startswith('tif'):
return imread(self.image_fname)
elif self.image_fname.split('.')[-1].startswith('h5'):
return read_ilastik_data(self.image_fname)
else:
print 'unsupported image format'
return None
else:
return None
class labeled_image(segmented_image):
'''
child of segmented_image
simple class providing utility functions to work with labeled objects
'''
def __init__(self, seg_fname, img_fname, min_size = None, save_image=False, channel = 1, p_cutoff = 0.5):
segmented_image.__init__(self,seg_fname, img_fname,save_image, channel=channel,p_cutoff=p_cutoff)
self.parents = defaultdict(list)
self.children = defaultdict(list)
self.min_size = min_size
labeled_img = self.get_labeled_img()
# make a dictionary that links the label th the slice of the image
# in which the object is found.
self.labeled_obj = {label_i+1:o_slice for label_i, o_slice in
enumerate(ndimage.find_objects(labeled_img))}
self.region_props = {label_i+1:o_slice for label_i, o_slice in
enumerate(regionprops(labeled_img, intensity_image = self.get_img()))}
self.obj_list = self.labeled_obj.keys()
self.obj_list.sort()
def filter_objects(self, prop, lower_th, upper_th):
'''
filter the list of objects by a given property using a lower and upper threshold.
updates self.obj_list with objects passing filter only
'''
self.obj_list= [label_i for label_i, obj in self.region_props.iteritems()
if obj[prop]>=lower_th and obj[prop]<upper_th]
self.obj_list.sort()
# reset the parents and children since assignments have been invalidated after
# redefining the object set.
self.parents = defaultdict(list)
self.children = defaultdict(list)
return self.obj_list
def get_labeled_img(self):
if self.min_size is not None:
labeled_img, self.n_objects = ndimage.label(remove_small_objects(self.get_seg(), self.min_size))
else:
labeled_img, self.n_objects = ndimage.label(self.get_seg())
return labeled_img
def filter_objects_multiProp(self, criteria, gate = 'AND'):
'''
filter the list of objects by a given property using a lower and upper threshold.
updates self.obj_list with objects passing filter only
criteria: Dictionary with string, and value tuples.
'''
passing_filter = {} # instantiate dictionary
for prop, (low,up) in criteria.iteritems(): # iteritems returns key, and tuple of values
passing_filter[prop]= set([label_i for label_i, obj in self.region_props.iteritems()
if obj[prop]>=low and obj[prop]<up])
if gate == 'AND':
# criteria have to be met in all properties
self.obj_list = set(self.labeled_obj.keys())
for prop, val in passing_filter.iteritems():
self.obj_list.intersection_update(val)
else:
# criteria have to be met in one property
self.obj_list = set()
for prop, val in passing_filter.iteritems():
self.obj_list.update(val)
self.obj_list = list(self.obj_list)
self.obj_list.sort()
# reset the parents and children since assignments have been invalidated after
# redefining the object set.
self.parents = defaultdict(list)
self.children = defaultdict(list)
return self.obj_list
#################################################################################
class labeled_series(object):
'''
class holding a series of images of objects that can be tracked through the
series
'''
def __init__(self):
self.colorlookup = {} # dictionary of dictionaries assigning colors to obj in time slices
self.series = [] # list holding the labeled images of the experiment
self.shifts = None # two dimensional shifts of an image relative to its predecessor
self.trees = []
def load_from_file(self, file_mask_seg, file_mask_intensity=None, min_size=None, channel = 1, p_cutoff = 0.9):
'''
loads an image series from file given a search string for segmented images
optionally takes a corresponding search string for the intensity images
file list need to be sortable
'''
from glob import glob
# make list of segmentation files to be loaded, sort them
self.segmentation_files = glob(file_mask_seg)
self.segmentation_files.sort()
# if intensity image names are provided, load and sort them too. truncate list if too many images found
if file_mask_intensity is not None:
self.image_files = glob(file_mask_intensity)
self.image_files.sort()
if len(self.image_files)>len(self.segmentation_files):
self.image_files=self.image_files[:len(self.segmentation_files)]
# load image and append to self.series
if len(self.segmentation_files)>0:
if file_mask_intensity is not None:
for seg_name, img_name in zip(self.segmentation_files, self.image_files):
print "reading", seg_name, img_name
self.series.append(labeled_image(seg_name, img_name, min_size, channel = channel, p_cutoff=p_cutoff))
else:
for seg_name in self.segmentation_files:
print "reading", seg_name
self.series.append(labeled_image(seg_name, None, min_size, channel = channel, p_cutoff=p_cutoff))
self.dim = self.series[-1].seg_shape
[self.color_randomly(ti) for ti in range(len(self.series))]
else:
print "no images found at", file_mask_seg
def get_object_props(self, prop, ti):
'''
Per time point in the series, extract the properties of interest as list.
prop: String specifiging property name according to reginoprops
ti : integer. time point
'''
return [p[prop] for p in self.series[ti].region_props.values()]
def get_object_props_allTimes(self, prop):
'''
For all time points in the series, extract the properties of interest as list.
prop: String specifiging property name according to reginoprops
'''
return_list = []
for t in range(len(self.series)):
return_list.extend(self.get_object_props(prop,t))
return return_list
def filter_objects(self, prop, lower_th, upper_th):
'''
loops over all time points and filters the objects at this time point according to
a given criterion of regionprops with upper and lower threshold
TODO add AND and OR operations for multiple conditions
'''
for ti,labeled_img in enumerate(self.series):
labeled_img.filter_objects(prop, lower_th, upper_th)
def filter_objects_multiProp(self, criteria, gate = 'AND'):
'''
loops over all time points and filters the objects at this time point according to
a given criterion of regionprops with upper and lower threshold
TODO add AND and OR operations for multiple conditions
'''
for ti,labeled_img in enumerate(self.series):
labeled_img.filter_objects_multiProp(criteria, gate)
def calc_image_shifts(self, channel=None):
'''
loops over the series of images and calculates the most likely shift
by which image ti differs from image ti+1
'''
self.shifts = np.zeros((len(self.series)-1, len(self.dim)))
for ti in xrange(len(self.series)-1):
try:
if channel is None:
if len(self.series[ti].img_shape)==2:
img1, img2 = self.series[ti].get_img(), self.series[ti+1].get_img()
else:
img1, img2 = self.series[ti].get_img().max(axis=-1), self.series[ti+1].get_img().max(axis=-1)
else:
img1, img2 = self.series[ti].get_img()[:,:,channel], self.series[ti+1].get_img()[:,:,channel]
except:
img1, img2 = self.series[ti].get_seg()>0, self.series[ti+1].get_seg()>0
self.shifts[ti] = xcorr2fft(img1, img2)
print 'Shift', ti, 'to', ti+1, self.shifts[ti]
def track_objects(self, match_func = match_bijective, dmax = 1000):
'''
loops over the image series and applices the match_func to each pair of images
'''
if self.shifts is None:
self.calc_image_shifts()
for ti in xrange(len(self.series)-1):
obj1, obj2 = self.series[ti].obj_list, self.series[ti+1].obj_list
if len(obj1) and len(obj2):
points1 = np.array([self.series[ti].region_props[obj]['centroid'] for obj in obj1])
points2 = np.array([self.series[ti+1].region_props[obj]['centroid']-self.shifts[ti] for obj in obj2])
m12, m21 = match_func(points2, points1, dmax)
for parent, child in zip(m21, obj2):
if parent!=-1:
self.series[ti].children[obj1[parent]].append(child)
self.series[ti+1].parents[child].append(obj1[parent])
else:
print "no objects in image at time",ti,"after filtering"
print "matched", (m12>-1).sum(), 'objects.', (m12==-1).sum() , (m21==-1).sum(), 'left unmatched in time step', ti, ti+1, 'respectively'
####################################################################
### Phylogeny
####################################################################
def find_trees(self):
'''
loops overall time points and finds objects without parents
for each, generate a new tree
'''
self.trees = []
for ti,tp in enumerate(self.series):
for oi in tp.obj_list:
if oi not in tp.parents: # oi does not have a parent
print "new tree found at time",ti, "with object id",oi, "as root"
self.trees.append((ti, self.build_tree(ti,oi)))
def build_tree(self,ti,oi):
'''
given a root, construct a BioPython tree and call a function that recursively adds
subtrees for all children of the root. The tree object is returned
'''
new_tree = Phylo.BaseTree.Tree()
new_tree.root.name = str((ti,oi))
self.add_subtree(new_tree.root, ti, oi)
return new_tree
def add_subtree(self, clade, ti, oi):
'''
recursively add children to the tree.
'''
node_children = self.series[ti].children[oi]
clade.split(len(node_children))
for ci,child in enumerate(node_children):
clade.clades[ci].name = str((ti+1,child))
self.add_subtree(clade.clades[ci], ti+1, child)
def color_trees(self, prop):
for _, tree in self.trees:
props = {}
for node in tree.get_terminals()+tree.get_nonterminals():
ti, oi = map(int, node.name[1:-1].split(','))
props[node.name] = self.series[ti].region_props[oi][prop]
max_prop = max(props.values())
for node in tree.get_terminals()+tree.get_nonterminals():
node.color = [int(x) for x in cm.jet(props[node.name]/max_prop, bytes=True)[:-1]]
####################################################################
### coloring
####################################################################
def color_randomly(self,ti):
'''
resets the color lookup of time point ti to random choices of the jet colormap
'''
self.colorlookup[ti] = {oi:cm.jet(np.random.randint(256)) for oi in self.series[ti].obj_list}
def color_as_parent(self, ti):
'''
redo the color assignment by assigning each object the same color
as that of its parent
ti -- time slice to operate on
'''
assert ti>0
cur = self.series[ti]
parent_colors = self.colorlookup[ti-1]
temp_colors = {}
for oi in cur.obj_list:
if oi in cur.parents: # if node has parent, assign parent color
temp_colors[oi] = parent_colors[cur.parents[oi][0]]
else: # otherwise assign random color
temp_colors[oi] = cm.jet(np.random.randint(256))
self.colorlookup[ti]=temp_colors
def color_lineages(self, initial_frame = 0):
'''
assign random colors to the initial frame, color all subsequent frames sa parent
'''
self.color_randomly(initial_frame)
for ti in xrange(initial_frame+1, len(self.series)):
self.color_as_parent(ti)
####################################################################
### plotting
####################################################################
def add_centroids(self, ti):
'''
adds a colored dot (according to the colorlookup) at the centroid of each
object passing filter criteria of time point ti.
uses the current axis
'''
for label_i in self.series[ti].obj_list:
x,y = self.series[ti].region_props[label_i]['centroid']
plt.plot([y],[x], 'o', c=self.colorlookup[ti][label_i])
plt.ylim(0,self.series[ti].seg_shape[0])
plt.xlim(0,self.series[ti].seg_shape[1])
def add_forward_trajectory(self, ti, oi):
'''
starting with object oi at time points oi, looks for one
descendant in subsequent time slices and constructs a trajectory
of the centroids. adds this trajectory to the current axis.
NOTE: this always uses child[0]
'''
traj = [self.series[ti].region_props[oi]['centroid']]
next_oi = oi
next_ti = ti
while next_oi in self.series[next_ti].children:
next_oi = self.series[next_ti].children[next_oi][0]
next_ti += 1
traj.append(self.series[next_ti].region_props[next_oi]['centroid'])
traj = np.array(traj)
plt.plot(traj[:,1], traj[:,0], ls='-', marker = 'o', c=self.colorlookup[ti][oi])
def add_backward_trajectory(self, ti, oi):
'''
starting with object oi at time points oi, looks for the
ancestor in previous time slices and constructs a trajectory
of the centroids. adds this trajectory to the current axis.
NOTE: this always uses child[0]
'''
traj = [self.series[ti].region_props[oi]['centroid']]
next_oi = oi
next_ti = ti
while next_oi in self.series[next_ti].parents:
next_oi = self.series[next_ti].parents[next_oi][0]
next_ti -= 1
traj.append(self.series[next_ti].region_props[next_oi]['centroid'])
traj = np.array(traj)
plt.plot(traj[:,1], traj[:,0], ls='-', marker = 'o', c=self.colorlookup[ti][oi])
def plot_image_and_centroids(self, ti, ax=None):
'''
plots the intensity image (if not available the labeled image) and adds
dots at the centroids of each object passing filters. Constructs a new figure
if no axis is given
'''
if ax is None: # make new figure if necessary
fig = plt.figure()
ax = plt.subplot(111)
try: # plot intensity image if available
ax.imshow(self.series[ti].get_img(), interpolation='nearest', cmap = cm.gray)
except: # fallback to labeled image
ax.imshow(self.series[ti].get_seg(), interpolation='nearest', cmap = cm.gray)
self.add_centroids(ti)
def plot_image_and_trajectories(self, ti, ax=None, bwd = False, fwd = True):
'''
plots the intensity image (if not available the labeled image) and adds
dots at the trajectory of centroids of each object passing filters. Constructs a new figure
if no axis is given. backward and forward trajectories can be specified.
'''
if ax is None:
fig = plt.figure()
ax = plt.subplot(111)
try:
ax.imshow(self.series[ti].get_img(), interpolation='nearest',cmap = cm.gray)
except:
ax.imshow(self.series[ti].get_seg(), interpolation='nearest',cmap = cm.gray)
for oi in self.series[ti].obj_list:
if fwd: self.add_forward_trajectory(ti,oi)
if bwd: self.add_backward_trajectory(ti,oi)
plt.ylim(0,self.series[ti].seg_shape)
plt.xlim(0,self.series[ti].seg_shape)
def save_QC_series(self,save_path, additional_tps = [], img_format='png'):
'''
saves each time frame to file and adds the centroids of subsequent time slices.
'''
for ti in xrange(len(self.series)):
plt.ioff()
self.plot_image_and_centroids(ti)
if len(additional_tps):
for dt in additional_tps:
if ti>=-dt:
self.add_centroids(ti+dt)
plt.savefig(save_path+format(ti,'03d')+'.'+img_format)
plt.close()
if __name__ == '__main__':
# test matching
points1 = np.random.uniform(size = (5,2))
points2 = np.concatenate((points1+ 0.02* np.random.uniform(size = (5,2)),
np.random.uniform(size = (4,2))))[::-1]
m12, m21 = match_mindist(points2, points1, 0.1)
print 'minimal distance', m12, m21
m12, m21 = match_bijective(points2, points1, 0.1)
print 'minimal bijective', m12, m21
# test shift detetion
Y,X = np.meshgrid(np.arange(100), np.arange(100))
center = (50,50)
shift = (3,-7)
test_image = np.exp(-(X-center[0])**2 - (Y-center[1])**2) + 0.01*np.random.randn(100,100)
test_image2 = np.exp(-(X-center[0]-shift[0])**2 - (Y-center[1]-shift[1])**2)+ 0.01*np.random.randn(100,100)
print 'True:', shift, 'Inferred:', xcorr2fft(test_image, test_image2)
| qbio2014/cellTracking | src/segmentation_tools.py | Python | gpl-2.0 | 24,884 | [
"Biopython"
] | 5e8b9dfccb61261deb070ae0802bb3197b6f8d2915910a4ebddb8ef0a24e4b12 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import os
import sys
import unittest
import mock
import nikola
import nikola.plugins.command.import_wordpress
from .base import BaseTestCase
class BasicCommandImportWordpress(BaseTestCase):
def setUp(self):
self.module = nikola.plugins.command.import_wordpress
self.import_command = self.module.CommandImportWordpress()
self.import_filename = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'wordpress_export_example.xml'))
def tearDown(self):
del self.import_command
del self.import_filename
class TestQTranslateContentSeparation(BasicCommandImportWordpress):
def test_conserves_qtranslate_less_post(self):
content = """Si vous préférez savoir à qui vous parlez commencez par visiter l'<a title="À propos" href="http://some.blog/about/">À propos</a>.
Quoiqu'il en soit, commentaires, questions et suggestions sont les bienvenues !"""
content_translations = self.module.separate_qtranslate_content(content)
self.assertEqual(1, len(content_translations))
self.assertEqual(content, content_translations[""])
def test_split_a_two_language_post(self):
content = """<!--:fr-->Si vous préférez savoir à qui vous parlez commencez par visiter l'<a title="À propos" href="http://some.blog/about/">À propos</a>.
Quoiqu'il en soit, commentaires, questions et suggestions sont les bienvenues !
<!--:--><!--:en-->If you'd like to know who you're talking to, please visit the <a title="À propos" href="http://some.blog/about/">about page</a>.
Comments, questions and suggestions are welcome !
<!--:-->"""
content_translations = self.module.separate_qtranslate_content(content)
self.assertEqual("""Si vous préférez savoir à qui vous parlez commencez par visiter l'<a title="À propos" href="http://some.blog/about/">À propos</a>.
Quoiqu'il en soit, commentaires, questions et suggestions sont les bienvenues !
""", content_translations["fr"])
self.assertEqual("""If you'd like to know who you're talking to, please visit the <a title="À propos" href="http://some.blog/about/">about page</a>.
Comments, questions and suggestions are welcome !
""", content_translations["en"])
def test_split_a_two_language_post_with_teaser(self):
content = """<!--:fr-->Si vous préférez savoir à qui vous parlez commencez par visiter l'<a title="À propos" href="http://some.blog/about/">À propos</a>.
Quoiqu'il en soit, commentaires, questions et suggestions sont les bienvenues !
<!--:--><!--:en-->If you'd like to know who you're talking to, please visit the <a title="À propos" href="http://some.blog/about/">about page</a>.
Comments, questions and suggestions are welcome !
<!--:--><!--more--><!--:fr-->
Plus de détails ici !
<!--:--><!--:en-->
More details here !
<!--:-->"""
content_translations = self.module.separate_qtranslate_content(content)
self.assertEqual("""Si vous préférez savoir à qui vous parlez commencez par visiter l'<a title="À propos" href="http://some.blog/about/">À propos</a>.
Quoiqu'il en soit, commentaires, questions et suggestions sont les bienvenues !
<!--more--> \n\
Plus de détails ici !
""", content_translations["fr"])
self.assertEqual("""If you'd like to know who you're talking to, please visit the <a title="À propos" href="http://some.blog/about/">about page</a>.
Comments, questions and suggestions are welcome !
<!--more--> \n\
More details here !
""", content_translations["en"])
def test_split_a_two_language_post_with_intermission(self):
content = """<!--:fr-->Voila voila<!--:-->COMMON<!--:en-->BLA<!--:-->"""
content_translations = self.module.separate_qtranslate_content(content)
self.assertEqual("Voila voila COMMON", content_translations["fr"])
self.assertEqual("COMMON BLA", content_translations["en"])
def test_split_a_two_language_post_with_uneven_repartition(self):
content = """<!--:fr-->Voila voila<!--:-->COMMON<!--:fr-->MOUF<!--:--><!--:en-->BLA<!--:-->"""
content_translations = self.module.separate_qtranslate_content(content)
self.assertEqual("Voila voila COMMON MOUF", content_translations["fr"])
self.assertEqual("COMMON BLA", content_translations["en"])
def test_split_a_two_language_post_with_uneven_repartition_bis(self):
content = """<!--:fr-->Voila voila<!--:--><!--:en-->BLA<!--:-->COMMON<!--:fr-->MOUF<!--:-->"""
content_translations = self.module.separate_qtranslate_content(content)
self.assertEqual("Voila voila COMMON MOUF", content_translations["fr"])
self.assertEqual("BLA COMMON", content_translations["en"])
class CommandImportWordpressRunTest(BasicCommandImportWordpress):
def setUp(self):
super(self.__class__, self).setUp()
self.data_import = mock.MagicMock()
self.site_generation = mock.MagicMock()
self.write_urlmap = mock.MagicMock()
self.write_configuration = mock.MagicMock()
site_generation_patch = mock.patch('os.system', self.site_generation)
data_import_patch = mock.patch(
'nikola.plugins.command.import_wordpress.CommandImportWordpress.import_posts', self.data_import)
write_urlmap_patch = mock.patch(
'nikola.plugins.command.import_wordpress.CommandImportWordpress.write_urlmap_csv', self.write_urlmap)
write_configuration_patch = mock.patch(
'nikola.plugins.command.import_wordpress.CommandImportWordpress.write_configuration', self.write_configuration)
self.patches = [site_generation_patch, data_import_patch,
write_urlmap_patch, write_configuration_patch]
for patch in self.patches:
patch.start()
def tearDown(self):
del self.data_import
del self.site_generation
del self.write_urlmap
del self.write_configuration
for patch in self.patches:
patch.stop()
del self.patches
super(self.__class__, self).tearDown()
def test_create_import(self):
valid_import_arguments = (
dict(options={'output_folder': 'some_folder'},
args=[self.import_filename]),
dict(args=[self.import_filename]),
dict(args=[self.import_filename, 'folder_argument']),
)
for arguments in valid_import_arguments:
self.import_command.execute(**arguments)
self.assertTrue(self.site_generation.called)
self.assertTrue(self.data_import.called)
self.assertTrue(self.write_urlmap.called)
self.assertTrue(self.write_configuration.called)
self.assertFalse(self.import_command.exclude_drafts)
def test_ignoring_drafts(self):
valid_import_arguments = (
dict(options={'exclude_drafts': True}, args=[
self.import_filename]),
dict(
options={'exclude_drafts': True,
'output_folder': 'some_folder'},
args=[self.import_filename]),
)
for arguments in valid_import_arguments:
self.import_command.execute(**arguments)
self.assertTrue(self.import_command.exclude_drafts)
class CommandImportWordpressTest(BasicCommandImportWordpress):
def test_create_import_work_without_argument(self):
# Running this without an argument must not fail.
# It should show the proper usage of the command.
self.import_command.execute()
def test_populate_context(self):
channel = self.import_command.get_channel_from_file(
self.import_filename)
context = self.import_command.populate_context(channel)
for required_key in ('POSTS', 'PAGES', 'COMPILERS'):
self.assertTrue(required_key in context)
self.assertEqual('de', context['DEFAULT_LANG'])
self.assertEqual('Wordpress blog title', context['BLOG_TITLE'])
self.assertEqual('Nikola test blog ;) - with moré Ümläüts',
context['BLOG_DESCRIPTION'])
self.assertEqual('http://some.blog/', context['SITE_URL'])
self.assertEqual('mail@some.blog', context['BLOG_EMAIL'])
self.assertEqual('Niko', context['BLOG_AUTHOR'])
def test_importing_posts_and_attachments(self):
channel = self.import_command.get_channel_from_file(
self.import_filename)
self.import_command.context = self.import_command.populate_context(
channel)
self.import_command.base_dir = ''
self.import_command.output_folder = 'new_site'
self.import_command.squash_newlines = True
self.import_command.no_downloads = False
# Ensuring clean results
self.import_command.url_map = {}
self.module.links = {}
write_metadata = mock.MagicMock()
write_content = mock.MagicMock()
download_mock = mock.MagicMock()
with mock.patch('nikola.plugins.command.import_wordpress.CommandImportWordpress.write_content', write_content):
with mock.patch('nikola.plugins.command.import_wordpress.CommandImportWordpress.write_metadata', write_metadata):
with mock.patch('nikola.plugins.command.import_wordpress.CommandImportWordpress.download_url_content_to_file', download_mock):
with mock.patch('nikola.plugins.command.import_wordpress.os.makedirs'):
self.import_command.import_posts(channel)
self.assertTrue(download_mock.called)
qpath = 'new_site/files/wp-content/uploads/2008/07/arzt_und_pfusch-sick-cover.png'
download_mock.assert_any_call(
'http://some.blog/wp-content/uploads/2008/07/arzt_und_pfusch-sick-cover.png',
qpath.replace('/', os.sep))
self.assertTrue(write_metadata.called)
write_metadata.assert_any_call(
'new_site/stories/kontakt.meta'.replace('/', os.sep), 'Kontakt',
'kontakt', '2009-07-16 20:20:32', None, [])
self.assertTrue(write_content.called)
write_content.assert_any_call('new_site/posts/2007/04/hoert.wp'.replace('/', os.sep),
"""An image.
<img class="size-full wp-image-16" title="caption test" src="http://some.blog/wp-content/uploads/2009/07/caption_test.jpg" alt="caption test" width="739" height="517" />
Some source code.
```Python
import sys
print sys.version
```
The end.
""")
write_content.assert_any_call(
'new_site/posts/2008/07/arzt-und-pfusch-s-i-c-k.wp'.replace('/', os.sep),
'''<img class="size-full wp-image-10 alignright" title="Arzt+Pfusch - S.I.C.K." src="http://some.blog/wp-content/uploads/2008/07/arzt_und_pfusch-sick-cover.png" alt="Arzt+Pfusch - S.I.C.K." width="210" height="209" />Arzt+Pfusch - S.I.C.K.Gerade bin ich \xfcber das Album <em>S.I.C.K</em> von <a title="Arzt+Pfusch" href="http://www.arztpfusch.com/" target="_blank">Arzt+Pfusch</a> gestolpert, welches Arzt+Pfusch zum Download f\xfcr lau anbieten. Das Album steht unter einer Creative Commons <a href="http://creativecommons.org/licenses/by-nc-nd/3.0/de/">BY-NC-ND</a>-Lizenz.
Die Ladung <em>noisebmstupidevildustrial</em> gibts als MP3s mit <a href="http://www.archive.org/download/dmp005/dmp005_64kb_mp3.zip">64kbps</a> und <a href="http://www.archive.org/download/dmp005/dmp005_vbr_mp3.zip">VBR</a>, als Ogg Vorbis und als FLAC (letztere <a href="http://www.archive.org/details/dmp005">hier</a>). <a href="http://www.archive.org/download/dmp005/dmp005-artwork.zip">Artwork</a> und <a href="http://www.archive.org/download/dmp005/dmp005-lyrics.txt">Lyrics</a> gibts nochmal einzeln zum Download.''')
write_content.assert_any_call(
'new_site/stories/kontakt.wp'.replace('/', os.sep), """<h1>Datenschutz</h1>
Ich erhebe und speichere automatisch in meine Server Log Files Informationen, die dein Browser an mich \xfcbermittelt. Dies sind:
<ul>
<li>Browsertyp und -version</li>
<li>verwendetes Betriebssystem</li>
<li>Referrer URL (die zuvor besuchte Seite)</li>
<li>IP Adresse des zugreifenden Rechners</li>
<li>Uhrzeit der Serveranfrage.</li>
</ul>
Diese Daten sind f\xfcr mich nicht bestimmten Personen zuordenbar. Eine Zusammenf\xfchrung dieser Daten mit anderen Datenquellen wird nicht vorgenommen, die Daten werden einzig zu statistischen Zwecken erhoben.""")
self.assertTrue(len(self.import_command.url_map) > 0)
self.assertEqual(
self.import_command.url_map['http://some.blog/2007/04/hoert/'],
'http://some.blog/posts/2007/04/hoert.html')
self.assertEqual(
self.import_command.url_map[
'http://some.blog/2008/07/arzt-und-pfusch-s-i-c-k/'],
'http://some.blog/posts/2008/07/arzt-und-pfusch-s-i-c-k.html')
self.assertEqual(
self.import_command.url_map['http://some.blog/kontakt/'],
'http://some.blog/stories/kontakt.html')
image_thumbnails = [
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-64x64.png',
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-300x175.png',
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-36x36.png',
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-24x24.png',
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-96x96.png',
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-96x96.png',
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-48x48.png',
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-96x96.png',
'http://some.blog/wp-content/uploads/2012/12/2012-12-19-1355925145_1024x600_scrot-150x150.png'
]
for link in image_thumbnails:
self.assertTrue(
link in self.module.links,
'No link to "{0}" found in {map}.'.format(
link,
map=self.module.links
)
)
def test_transforming_content(self):
"""Applying markup conversions to content."""
transform_code = mock.MagicMock()
transform_caption = mock.MagicMock()
transform_newlines = mock.MagicMock()
with mock.patch('nikola.plugins.command.import_wordpress.CommandImportWordpress.transform_code', transform_code):
with mock.patch('nikola.plugins.command.import_wordpress.CommandImportWordpress.transform_caption', transform_caption):
with mock.patch('nikola.plugins.command.import_wordpress.CommandImportWordpress.transform_multiple_newlines', transform_newlines):
self.import_command.transform_content("random content")
self.assertTrue(transform_code.called)
self.assertTrue(transform_caption.called)
self.assertTrue(transform_newlines.called)
def test_transforming_source_code(self):
"""
Tests the handling of sourcecode tags.
"""
content = """Hello World.
[sourcecode language="Python"]
import sys
print sys.version
[/sourcecode]"""
content = self.import_command.transform_code(content)
self.assertFalse('[/sourcecode]' in content)
self.assertFalse('[sourcecode language=' in content)
replaced_content = """Hello World.
```Python
import sys
print sys.version
```"""
self.assertEqual(content, replaced_content)
def test_transform_caption(self):
caption = '[caption id="attachment_16" align="alignnone" width="739" caption="beautiful picture"]<img class="size-full wp-image-16" src="http://some.blog/wp-content/uploads/2009/07/caption_test.jpg" alt="beautiful picture" width="739" height="517" />[/caption]'
transformed_content = self.import_command.transform_caption(caption)
expected_content = '<img class="size-full wp-image-16" src="http://some.blog/wp-content/uploads/2009/07/caption_test.jpg" alt="beautiful picture" width="739" height="517" />'
self.assertEqual(transformed_content, expected_content)
def test_transform_multiple_captions_in_a_post(self):
content = """asdasdas
[caption id="attachment_16" align="alignnone" width="739" caption="beautiful picture"]<img class="size-full wp-image-16" src="http://some.blog/wp-content/uploads/2009/07/caption_test.jpg" alt="beautiful picture" width="739" height="517" />[/caption]
asdasdas
asdasdas
[caption id="attachment_16" align="alignnone" width="739" caption="beautiful picture"]<img class="size-full wp-image-16" title="pretty" src="http://some.blog/wp-content/uploads/2009/07/caption_test.jpg" alt="beautiful picture" width="739" height="517" />[/caption]
asdasdas"""
expected_content = """asdasdas
<img class="size-full wp-image-16" src="http://some.blog/wp-content/uploads/2009/07/caption_test.jpg" alt="beautiful picture" width="739" height="517" />
asdasdas
asdasdas
<img class="size-full wp-image-16" title="pretty" src="http://some.blog/wp-content/uploads/2009/07/caption_test.jpg" alt="beautiful picture" width="739" height="517" />
asdasdas"""
self.assertEqual(
expected_content, self.import_command.transform_caption(content))
def test_transform_multiple_newlines(self):
content = """This
has
way to many
newlines.
"""
expected_content = """This
has
way to many
newlines.
"""
self.import_command.squash_newlines = False
self.assertEqual(content,
self.import_command.transform_multiple_newlines(content))
self.import_command.squash_newlines = True
self.assertEqual(expected_content,
self.import_command.transform_multiple_newlines(content))
def test_transform_caption_with_link_inside(self):
content = """[caption caption="Fehlermeldung"]<a href="http://some.blog/openttd-missing_sound.png"><img class="size-thumbnail wp-image-551" title="openttd-missing_sound" src="http://some.blog/openttd-missing_sound-150x150.png" alt="Fehlermeldung" /></a>[/caption]"""
transformed_content = self.import_command.transform_caption(content)
expected_content = """<a href="http://some.blog/openttd-missing_sound.png"><img class="size-thumbnail wp-image-551" title="openttd-missing_sound" src="http://some.blog/openttd-missing_sound-150x150.png" alt="Fehlermeldung" /></a>"""
self.assertEqual(expected_content, transformed_content)
def test_get_configuration_output_path(self):
self.import_command.output_folder = 'new_site'
default_config_path = os.path.join('new_site', 'conf.py')
self.import_command.import_into_existing_site = False
self.assertEqual(default_config_path,
self.import_command.get_configuration_output_path())
self.import_command.import_into_existing_site = True
config_path_with_timestamp = self.import_command.get_configuration_output_path(
)
self.assertNotEqual(default_config_path, config_path_with_timestamp)
self.assertTrue(self.import_command.name in config_path_with_timestamp)
def test_write_content_does_not_detroy_text(self):
content = b"""<h1>Installation</h1>
Follow the instructions <a title="Installing Jenkins" href="https://wiki.jenkins-ci.org/display/JENKINS/Installing+Jenkins">described here</a>.
<h1>Plugins</h1>
There are many plugins.
<h2>Violations</h2>
You can use the <a title="Jenkins Plugin: Violations" href="https://wiki.jenkins-ci.org/display/JENKINS/Violations">Violations</a> plugin."""
open_mock = mock.mock_open()
with mock.patch('nikola.plugins.basic_import.open', open_mock, create=True):
self.import_command.write_content('some_file', content)
open_mock.assert_called_once_with('some_file', 'wb+')
call_context = open_mock()
call_context.write.assert_called_once_with(
content.join([b'<html><body>', b'</body></html>']))
def test_configure_redirections(self):
"""
Testing the configuration of the redirections.
We need to make sure that we have valid sources and target links.
"""
url_map = {
'/somewhere/else': 'http://foo.bar/posts/somewhereelse.html'
}
redirections = self.import_command.configure_redirections(url_map)
self.assertEqual(1, len(redirections))
self.assertTrue(('somewhere/else/index.html', '/posts/somewhereelse.html') in redirections)
if __name__ == '__main__':
unittest.main()
| immanetize/nikola | tests/test_command_import_wordpress.py | Python | mit | 20,899 | [
"VisIt"
] | bbb8a8220b1323590bb19d18833aaf66660cdfc3a907d7585309fe01e7f8771c |
# Orca
# Copyright (C) 2016 UrbanSim Inc.
# See full license in LICENSE.
import os
import tempfile
import pandas as pd
import pandas.testing as pdt
import pytest
from .. import orca
from ..utils.testing import assert_frames_equal
def setup_function(func):
orca.clear_all()
orca.enable_cache()
def teardown_function(func):
orca.clear_all()
orca.enable_cache()
@pytest.fixture
def df():
return pd.DataFrame(
[[1, 4],
[2, 5],
[3, 6]],
columns=['a', 'b'],
index=['x', 'y', 'z'])
def test_tables(df):
wrapped_df = orca.add_table('test_frame', df)
@orca.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
assert set(orca.list_tables()) == {'test_frame', 'test_func'}
table = orca.get_table('test_frame')
assert table is wrapped_df
assert table.columns == ['a', 'b']
assert table.local_columns == ['a', 'b']
assert len(table) == 3
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a)
pdt.assert_series_equal(table.a, df.a)
pdt.assert_series_equal(table['b'], df['b'])
table = orca._TABLES['test_func']
assert table.index is None
assert table.columns == []
assert len(table) is 0
pdt.assert_frame_equal(table.to_frame(), df / 2)
pdt.assert_frame_equal(table.to_frame([]), df[[]])
pdt.assert_frame_equal(table.to_frame(columns=['a']), df[['a']] / 2)
pdt.assert_frame_equal(table.to_frame(columns='a'), df[['a']] / 2)
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a / 2)
pdt.assert_series_equal(table.a, df.a / 2)
pdt.assert_series_equal(table['b'], df['b'] / 2)
assert len(table) == 3
assert table.columns == ['a', 'b']
def test_table_func_cache(df):
orca.add_injectable('x', 2)
@orca.table(cache=True)
def table(variable='x'):
return df * variable
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 2)
orca.add_injectable('x', 3)
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 2)
orca.get_table('table').clear_cached()
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 3)
orca.add_injectable('x', 4)
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 3)
orca.clear_cache()
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 4)
orca.add_injectable('x', 5)
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 4)
orca.add_table('table', table)
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 5)
def test_table_func_cache_disabled(df):
orca.add_injectable('x', 2)
@orca.table('table', cache=True)
def asdf(x):
return df * x
orca.disable_cache()
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 2)
orca.add_injectable('x', 3)
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 3)
orca.enable_cache()
orca.add_injectable('x', 4)
pdt.assert_frame_equal(orca.get_table('table').to_frame(), df * 3)
def test_table_copy(df):
orca.add_table('test_frame_copied', df, copy_col=True)
orca.add_table('test_frame_uncopied', df, copy_col=False)
orca.add_table('test_func_copied', lambda: df, copy_col=True)
orca.add_table('test_func_uncopied', lambda: df, copy_col=False)
@orca.table(copy_col=True)
def test_funcd_copied():
return df
@orca.table(copy_col=False)
def test_funcd_uncopied():
return df
@orca.table(copy_col=True)
def test_funcd_copied2(test_frame_copied):
# local returns original, but it is copied by copy_col.
return test_frame_copied.local
@orca.table(copy_col=True)
def test_funcd_copied3(test_frame_uncopied):
# local returns original, but it is copied by copy_col.
return test_frame_uncopied.local
@orca.table(copy_col=False)
def test_funcd_uncopied2(test_frame_copied):
# local returns original.
return test_frame_copied.local
@orca.table(copy_col=False)
def test_funcd_uncopied3(test_frame_uncopied):
# local returns original.
return test_frame_uncopied.local
orca.add_table('test_cache_copied', lambda: df, cache=True, copy_col=True)
orca.add_table(
'test_cache_uncopied', lambda: df, cache=True, copy_col=False)
@orca.table(cache=True, copy_col=True)
def test_cached_copied():
return df
@orca.table(cache=True, copy_col=False)
def test_cached_uncopied():
return df
# Create tables with computed columns.
orca.add_table(
'test_copied_columns', pd.DataFrame(index=df.index), copy_col=True)
orca.add_table(
'test_uncopied_columns', pd.DataFrame(index=df.index), copy_col=False)
for column_name in ['a', 'b']:
label = "test_frame_uncopied.{}".format(column_name)
def func(col=label):
return col
for table_name in ['test_copied_columns', 'test_uncopied_columns']:
orca.add_column(table_name, column_name, func)
for name in ['test_frame_uncopied', 'test_func_uncopied',
'test_funcd_uncopied', 'test_funcd_uncopied2',
'test_funcd_uncopied3', 'test_cache_uncopied',
'test_cached_uncopied', 'test_uncopied_columns',
'test_frame_copied', 'test_func_copied',
'test_funcd_copied', 'test_funcd_copied2',
'test_funcd_copied3', 'test_cache_copied',
'test_cached_copied', 'test_copied_columns']:
table = orca.get_table(name)
table2 = orca.get_table(name)
# to_frame will always return a copy.
if 'columns' in name:
assert_frames_equal(table.to_frame(), df)
else:
pdt.assert_frame_equal(table.to_frame(), df)
assert table.to_frame() is not df
pdt.assert_frame_equal(table.to_frame(), table.to_frame())
assert table.to_frame() is not table.to_frame()
pdt.assert_series_equal(table.to_frame()['a'], df['a'])
assert table.to_frame()['a'] is not df['a']
pdt.assert_series_equal(table.to_frame()['a'],
table.to_frame()['a'])
assert table.to_frame()['a'] is not table.to_frame()['a']
if 'uncopied' in name:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is table2['a']
else:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is not df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is not table2['a']
def test_columns_for_table():
orca.add_column(
'table1', 'col10', pd.Series([1, 2, 3], index=['a', 'b', 'c']))
orca.add_column(
'table2', 'col20', pd.Series([10, 11, 12], index=['x', 'y', 'z']))
@orca.column('table1')
def col11():
return pd.Series([4, 5, 6], index=['a', 'b', 'c'])
@orca.column('table2', 'col21')
def asdf():
return pd.Series([13, 14, 15], index=['x', 'y', 'z'])
t1_col_names = orca.list_columns_for_table('table1')
assert set(t1_col_names) == {'col10', 'col11'}
t2_col_names = orca.list_columns_for_table('table2')
assert set(t2_col_names) == {'col20', 'col21'}
t1_cols = orca._columns_for_table('table1')
assert 'col10' in t1_cols and 'col11' in t1_cols
t2_cols = orca._columns_for_table('table2')
assert 'col20' in t2_cols and 'col21' in t2_cols
def test_columns_and_tables(df):
orca.add_table('test_frame', df)
@orca.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
orca.add_column('test_frame', 'c', pd.Series([7, 8, 9], index=df.index))
@orca.column('test_func', 'd')
def asdf(test_func):
return test_func.to_frame(columns=['b'])['b'] * 2
@orca.column('test_func')
def e(column='test_func.d'):
return column + 1
test_frame = orca.get_table('test_frame')
assert set(test_frame.columns) == set(['a', 'b', 'c'])
assert_frames_equal(
test_frame.to_frame(),
pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_frame.to_frame(columns=['a', 'c']),
pd.DataFrame(
{'a': [1, 2, 3],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
test_func_df = orca._TABLES['test_func']
assert set(test_func_df.columns) == set(['d', 'e'])
assert_frames_equal(
test_func_df.to_frame(),
pd.DataFrame(
{'a': [0.5, 1, 1.5],
'b': [2, 2.5, 3],
'c': [3.5, 4, 4.5],
'd': [4., 5., 6.],
'e': [5., 6., 7.]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_func_df.to_frame(columns=['b', 'd']),
pd.DataFrame(
{'b': [2, 2.5, 3],
'd': [4., 5., 6.]},
index=['x', 'y', 'z']))
assert set(test_func_df.columns) == set(['a', 'b', 'c', 'd', 'e'])
assert set(orca.list_columns()) == {
('test_frame', 'c'), ('test_func', 'd'), ('test_func', 'e')}
def test_column_cache(df):
orca.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@orca.table()
def table():
return df
@orca.column(*key, cache=True)
def column(variable='x'):
return series * variable
def c():
return orca._COLUMNS[key]
pdt.assert_series_equal(c()(), series * 2)
orca.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 2)
c().clear_cached()
pdt.assert_series_equal(c()(), series * 3)
orca.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
orca.clear_cache()
pdt.assert_series_equal(c()(), series * 4)
orca.add_injectable('x', 5)
pdt.assert_series_equal(c()(), series * 4)
orca.get_table('table').clear_cached()
pdt.assert_series_equal(c()(), series * 5)
orca.add_injectable('x', 6)
pdt.assert_series_equal(c()(), series * 5)
orca.add_column(*key, column=column, cache=True)
pdt.assert_series_equal(c()(), series * 6)
def test_manual_cache_clearing(df):
@orca.injectable(cache=True)
def my_inj(x):
return x * 2
@orca.table(cache=True)
def my_table(x):
return df + x
@orca.column('my_table', cache=True)
def extra1(my_table):
return my_table['a'] * -1
@orca.column('my_table', cache=True)
def extra2(my_table):
return my_table['b'] * -1
def run_checks(x):
orca.add_injectable('x', x)
inj = orca.get_injectable('my_inj')
tab = orca.get_table('my_table').to_frame()
assert inj == x * 2
assert (tab['a'] == df['a'] + x).all()
assert (tab['b'] == df['b'] + x).all()
assert (tab['extra1'] == -1 * (df['a'] + x)).all()
assert (tab['extra2'] == -1 * (df['b'] + x)).all()
# initial collection
run_checks(100)
# manually clear out and re-check
orca.clear_injectable('my_inj')
orca.clear_column('my_table', 'extra1')
orca.clear_column('my_table', 'extra2')
orca.clear_table('my_table')
run_checks(200)
# check clearing all columns
orca.clear_injectable('my_inj')
orca.clear_columns('my_table')
orca.clear_table('my_table')
run_checks(300)
# check clearing subset of columns
orca.clear_injectable('my_inj')
orca.clear_columns('my_table', ['extra1', 'extra2'])
orca.clear_table('my_table')
run_checks(400)
def test_update_scope():
@orca.injectable(cache=True)
def my_inj(x):
return x
@orca.table()
def my_table(x):
df = pd.DataFrame({'a': [100, 200, 300]})
return df + x
@orca.column('my_table', cache=True)
def my_col(my_table):
return my_table['a'] * -1
# initial collection
orca.add_injectable('x', 10)
orca.get_injectable('my_inj')
orca.get_table('my_table').to_frame()
# update injectable scope
orca.update_injectable_scope('my_inj', 'iteration')
inj = orca.get_raw_injectable('my_inj')
assert inj.cache
assert inj.cache_scope == 'iteration'
# update table scope
orca.update_table_scope('my_table', 'step')
tab = orca.get_raw_table('my_table')
assert tab.cache
assert tab.cache_scope == 'step'
# update column scope
orca.update_column_scope('my_table', 'my_col')
col = orca.get_raw_column('my_table', 'my_col')
assert not col.cache
assert col.cache_scope == 'forever'
# invalid cache scope
with pytest.raises(ValueError):
orca.update_table_scope('my_table', 'bogus scope')
# make sure the cached values got cleared
orca.add_injectable('x', 20)
assert orca.get_injectable('my_inj') == 20
df = orca.get_table('my_table').to_frame()
assert (df['a'].values == [120, 220, 320]).all()
assert (df['my_col'].values == [-120, -220, -320]).all()
def test_column_cache_disabled(df):
orca.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@orca.table()
def table():
return df
@orca.column(*key, cache=True)
def column(x):
return series * x
def c():
return orca._COLUMNS[key]
orca.disable_cache()
pdt.assert_series_equal(c()(), series * 2)
orca.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 3)
orca.enable_cache()
orca.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
def test_update_col(df):
wrapped = orca.add_table('table', df)
wrapped.update_col('b', pd.Series([7, 8, 9], index=df.index))
pdt.assert_series_equal(
wrapped['b'], pd.Series([7, 8, 9], index=df.index, name='b'))
a_dtype = wrapped['a'].dtype
# test 1 - cast the data type before the update
wrapped.update_col_from_series('a', pd.Series(dtype=a_dtype))
pdt.assert_series_equal(wrapped['a'], df['a'])
# test 2 - let the update method do the cast
wrapped.update_col_from_series('a', pd.Series(), True)
pdt.assert_series_equal(wrapped['a'], df['a'])
# test 3 - don't cast, should raise an error
with pytest.raises(ValueError):
wrapped.update_col_from_series('a', pd.Series())
wrapped.update_col_from_series('a', pd.Series([99], index=['y']))
pdt.assert_series_equal(
wrapped['a'], pd.Series([1, 99, 3], index=df.index, name='a'))
class _FakeTable(object):
def __init__(self, name, columns):
self.name = name
self.columns = columns
@pytest.fixture
def fta():
return _FakeTable('a', ['aa', 'ab', 'ac'])
@pytest.fixture
def ftb():
return _FakeTable('b', ['bx', 'by', 'bz'])
def test_column_map_raises(fta, ftb):
with pytest.raises(RuntimeError):
orca.column_map([fta, ftb], ['aa', 'by', 'bz', 'cw'])
def test_column_map_none(fta, ftb):
assert orca.column_map([fta, ftb], None) == {'a': None, 'b': None}
def test_column_map(fta, ftb):
result = orca.column_map([fta, ftb], ['aa', 'by', 'bz'])
assert result['a'] == ['aa']
assert sorted(result['b']) == ['by', 'bz']
result = orca.column_map([fta, ftb], ['by', 'bz'])
assert result['a'] == []
assert sorted(result['b']) == ['by', 'bz']
def test_is_step():
@orca.step()
def test_step():
pass
assert orca.is_step('test_step') is True
assert orca.is_step('not_a_step') is False
def test_steps(df):
orca.add_table('test_table', df)
df2 = df / 2
orca.add_table('test_table2', df2)
@orca.step()
def test_step(test_table, test_column='test_table2.b'):
tt = test_table.to_frame()
test_table['a'] = tt['a'] + tt['b']
pdt.assert_series_equal(test_column, df2['b'])
with pytest.raises(KeyError):
orca.get_step('asdf')
step = orca.get_step('test_step')
assert step._tables_used() == set(['test_table', 'test_table2'])
step()
table = orca.get_table('test_table')
pdt.assert_frame_equal(
table.to_frame(),
pd.DataFrame(
{'a': [5, 7, 9],
'b': [4, 5, 6]},
index=['x', 'y', 'z']))
assert orca.list_steps() == ['test_step']
def test_step_run(df):
orca.add_table('test_table', df)
@orca.table()
def table_func(test_table):
tt = test_table.to_frame()
tt['c'] = [7, 8, 9]
return tt
@orca.column('table_func')
def new_col(test_table, table_func):
tt = test_table.to_frame()
tf = table_func.to_frame(columns=['c'])
return tt['a'] + tt['b'] + tf['c']
@orca.step()
def test_step1(iter_var, test_table, table_func):
tf = table_func.to_frame(columns=['new_col'])
test_table[iter_var] = tf['new_col'] + iter_var
@orca.step('test_step2')
def asdf(table='test_table'):
tt = table.to_frame()
table['a'] = tt['a'] ** 2
orca.run(steps=['test_step1', 'test_step2'], iter_vars=[2000, 3000])
test_table = orca.get_table('test_table')
assert_frames_equal(
test_table.to_frame(),
pd.DataFrame(
{'a': [1, 16, 81],
'b': [4, 5, 6],
2000: [2012, 2015, 2018],
3000: [3012, 3017, 3024]},
index=['x', 'y', 'z']))
m = orca.get_step('test_step1')
assert set(m._tables_used()) == {'test_table', 'table_func'}
def test_step_func_source_data():
@orca.step()
def test_step():
return 'orca'
filename, lineno, source = orca.get_step('test_step').func_source_data()
assert filename.endswith('test_orca.py')
assert isinstance(lineno, int)
assert source == (
" @orca.step()\n"
" def test_step():\n"
" return 'orca'\n")
def test_get_broadcast():
orca.broadcast('a', 'b', cast_on='ax', onto_on='bx')
orca.broadcast('x', 'y', cast_on='yx', onto_index=True)
assert orca.is_broadcast('a', 'b') is True
assert orca.is_broadcast('b', 'a') is False
with pytest.raises(KeyError):
orca.get_broadcast('b', 'a')
ab = orca.get_broadcast('a', 'b')
assert isinstance(ab, orca.Broadcast)
assert ab == ('a', 'b', 'ax', 'bx', False, False)
xy = orca.get_broadcast('x', 'y')
assert isinstance(xy, orca.Broadcast)
assert xy == ('x', 'y', 'yx', None, False, True)
def test_get_broadcasts():
orca.broadcast('a', 'b')
orca.broadcast('b', 'c')
orca.broadcast('z', 'b')
orca.broadcast('f', 'g')
with pytest.raises(ValueError):
orca._get_broadcasts(['a', 'b', 'g'])
assert set(orca._get_broadcasts(['a', 'b', 'c', 'z']).keys()) == \
{('a', 'b'), ('b', 'c'), ('z', 'b')}
assert set(orca._get_broadcasts(['a', 'b', 'z']).keys()) == \
{('a', 'b'), ('z', 'b')}
assert set(orca._get_broadcasts(['a', 'b', 'c']).keys()) == \
{('a', 'b'), ('b', 'c')}
assert set(orca.list_broadcasts()) == \
{('a', 'b'), ('b', 'c'), ('z', 'b'), ('f', 'g')}
def test_collect_variables(df):
orca.add_table('df', df)
@orca.table()
def df_func():
return df
@orca.column('df')
def zzz():
return df['a'] / 2
orca.add_injectable('answer', 42)
@orca.injectable()
def injected():
return 'injected'
@orca.table('source table', cache=True)
def source():
return df
with pytest.raises(KeyError):
orca._collect_variables(['asdf'])
with pytest.raises(KeyError):
orca._collect_variables(names=['df'], expressions=['asdf'])
names = ['df', 'df_func', 'answer', 'injected', 'source_label', 'df_a']
expressions = ['source table', 'df.a']
things = orca._collect_variables(names, expressions)
assert set(things.keys()) == set(names)
assert isinstance(things['source_label'], orca.DataFrameWrapper)
pdt.assert_frame_equal(things['source_label'].to_frame(), df)
assert isinstance(things['df_a'], pd.Series)
pdt.assert_series_equal(things['df_a'], df['a'])
def test_collect_variables_expression_only(df):
@orca.table()
def table():
return df
vars = orca._collect_variables(['a'], ['table.a'])
pdt.assert_series_equal(vars['a'], df.a)
def test_injectables():
orca.add_injectable('answer', 42)
@orca.injectable()
def func1(answer):
return answer * 2
@orca.injectable('func2', autocall=False)
def asdf(variable='x'):
return variable / 2
@orca.injectable()
def func3(func2):
return func2(4)
@orca.injectable()
def func4(func='func1'):
return func / 2
assert orca._INJECTABLES['answer'] == 42
assert orca._INJECTABLES['func1']() == 42 * 2
assert orca._INJECTABLES['func2'](4) == 2
assert orca._INJECTABLES['func3']() == 2
assert orca._INJECTABLES['func4']() == 42
assert orca.get_injectable('answer') == 42
assert orca.get_injectable('func1') == 42 * 2
assert orca.get_injectable('func2')(4) == 2
assert orca.get_injectable('func3') == 2
assert orca.get_injectable('func4') == 42
with pytest.raises(KeyError):
orca.get_injectable('asdf')
assert set(orca.list_injectables()) == \
{'answer', 'func1', 'func2', 'func3', 'func4'}
def test_injectables_combined(df):
@orca.injectable()
def column():
return pd.Series(['a', 'b', 'c'], index=df.index)
@orca.table()
def table():
return df
@orca.step()
def step(table, column):
df = table.to_frame()
df['new'] = column
orca.add_table('table', df)
orca.run(steps=['step'])
table_wr = orca.get_table('table').to_frame()
pdt.assert_frame_equal(table_wr[['a', 'b']], df)
pdt.assert_series_equal(table_wr['new'], pd.Series(column(), name='new'))
def test_injectables_cache():
x = 2
@orca.injectable(autocall=True, cache=True)
def inj():
return x * x
def i():
return orca._INJECTABLES['inj']
assert i()() == 4
x = 3
assert i()() == 4
i().clear_cached()
assert i()() == 9
x = 4
assert i()() == 9
orca.clear_cache()
assert i()() == 16
x = 5
assert i()() == 16
orca.add_injectable('inj', inj, autocall=True, cache=True)
assert i()() == 25
def test_injectables_cache_disabled():
x = 2
@orca.injectable(autocall=True, cache=True)
def inj():
return x * x
def i():
return orca._INJECTABLES['inj']
orca.disable_cache()
assert i()() == 4
x = 3
assert i()() == 9
orca.enable_cache()
assert i()() == 9
x = 4
assert i()() == 9
orca.disable_cache()
assert i()() == 16
def test_memoized_injectable():
outside = 'x'
@orca.injectable(autocall=False, memoize=True)
def x(s):
return outside + s
assert 'x' in orca._MEMOIZED
def getx():
return orca.get_injectable('x')
assert hasattr(getx(), 'cache')
assert hasattr(getx(), 'clear_cached')
assert getx()('y') == 'xy'
outside = 'z'
assert getx()('y') == 'xy'
getx().clear_cached()
assert getx()('y') == 'zy'
def test_memoized_injectable_cache_off():
outside = 'x'
@orca.injectable(autocall=False, memoize=True)
def x(s):
return outside + s
def getx():
return orca.get_injectable('x')('y')
orca.disable_cache()
assert getx() == 'xy'
outside = 'z'
assert getx() == 'zy'
orca.enable_cache()
outside = 'a'
assert getx() == 'zy'
orca.disable_cache()
assert getx() == 'ay'
def test_clear_cache_all(df):
@orca.table(cache=True)
def table():
return df
@orca.column('table', cache=True)
def z(table):
return df.a
@orca.injectable(cache=True)
def x():
return 'x'
@orca.injectable(autocall=False, memoize=True)
def y(s):
return s + 'y'
orca.eval_variable('table.z')
orca.eval_variable('x')
orca.get_injectable('y')('x')
assert list(orca._TABLE_CACHE.keys()) == ['table']
assert list(orca._COLUMN_CACHE.keys()) == [('table', 'z')]
assert list(orca._INJECTABLE_CACHE.keys()) == ['x']
assert orca._MEMOIZED['y'].value.cache == {(('x',), None): 'xy'}
orca.clear_cache()
assert orca._TABLE_CACHE == {}
assert orca._COLUMN_CACHE == {}
assert orca._INJECTABLE_CACHE == {}
assert orca._MEMOIZED['y'].value.cache == {}
def test_clear_cache_scopes(df):
@orca.table(cache=True, cache_scope='forever')
def table():
return df
@orca.column('table', cache=True, cache_scope='iteration')
def z(table):
return df.a
@orca.injectable(cache=True, cache_scope='step')
def x():
return 'x'
@orca.injectable(autocall=False, memoize=True, cache_scope='iteration')
def y(s):
return s + 'y'
orca.eval_variable('table.z')
orca.eval_variable('x')
orca.get_injectable('y')('x')
assert list(orca._TABLE_CACHE.keys()) == ['table']
assert list(orca._COLUMN_CACHE.keys()) == [('table', 'z')]
assert list(orca._INJECTABLE_CACHE.keys()) == ['x']
assert orca._MEMOIZED['y'].value.cache == {(('x',), None): 'xy'}
orca.clear_cache(scope='step')
assert list(orca._TABLE_CACHE.keys()) == ['table']
assert list(orca._COLUMN_CACHE.keys()) == [('table', 'z')]
assert orca._INJECTABLE_CACHE == {}
assert orca._MEMOIZED['y'].value.cache == {(('x',), None): 'xy'}
orca.clear_cache(scope='iteration')
assert list(orca._TABLE_CACHE.keys()) == ['table']
assert orca._COLUMN_CACHE == {}
assert orca._INJECTABLE_CACHE == {}
assert orca._MEMOIZED['y'].value.cache == {}
orca.clear_cache(scope='forever')
assert orca._TABLE_CACHE == {}
assert orca._COLUMN_CACHE == {}
assert orca._INJECTABLE_CACHE == {}
assert orca._MEMOIZED['y'].value.cache == {}
def test_cache_scope(df):
orca.add_injectable('x', 11)
orca.add_injectable('y', 22)
orca.add_injectable('z', 33)
orca.add_injectable('iterations', 1)
@orca.injectable(cache=True, cache_scope='forever')
def a(x):
return x
@orca.injectable(cache=True, cache_scope='iteration')
def b(y):
return y
@orca.injectable(cache=True, cache_scope='step')
def c(z):
return z
@orca.step()
def m1(iter_var, a, b, c):
orca.add_injectable('x', iter_var + a)
orca.add_injectable('y', iter_var + b)
orca.add_injectable('z', iter_var + c)
assert a == 11
@orca.step()
def m2(iter_var, a, b, c, iterations):
assert a == 11
if iter_var == 1000:
assert b == 22
assert c == 1033
elif iter_var == 2000:
assert b == 1022
assert c == 3033
orca.add_injectable('iterations', iterations + 1)
orca.run(['m1', 'm2'], iter_vars=[1000, 2000])
def test_table_func_local_cols(df):
@orca.table()
def table():
return df
orca.add_column(
'table', 'new', pd.Series(['a', 'b', 'c'], index=df.index))
assert orca.get_table('table').local_columns == ['a', 'b']
def test_is_table(df):
orca.add_table('table', df)
assert orca.is_table('table') is True
assert orca.is_table('asdf') is False
@pytest.fixture
def store_name(request):
fname = tempfile.NamedTemporaryFile(suffix='.h5').name
def fin():
if os.path.isfile(fname):
os.remove(fname)
request.addfinalizer(fin)
return fname
def test_write_tables(df, store_name):
orca.add_table('table', df)
@orca.step()
def step(table):
pass
step_tables = orca.get_step_table_names(['step'])
orca.write_tables(store_name, step_tables, None)
with pd.HDFStore(store_name, mode='r') as store:
assert 'table' in store
pdt.assert_frame_equal(store['table'], df)
orca.write_tables(store_name, step_tables, 1969)
with pd.HDFStore(store_name, mode='r') as store:
assert '1969/table' in store
pdt.assert_frame_equal(store['1969/table'], df)
def test_write_all_tables(df, store_name):
orca.add_table('table', df)
orca.write_tables(store_name)
with pd.HDFStore(store_name, mode='r') as store:
for t in orca.list_tables():
assert t in store
def test_run_and_write_tables(df, store_name):
orca.add_table('table', df)
def year_key(y):
return '{}'.format(y)
def series_year(y):
return pd.Series([y] * 3, index=df.index, name=str(y))
@orca.step()
def step(iter_var, table):
table[year_key(iter_var)] = series_year(iter_var)
orca.run(
['step'], iter_vars=range(11), data_out=store_name, out_interval=3)
with pd.HDFStore(store_name, mode='r') as store:
for year in range(0, 11, 3):
key = '{}/table'.format(year)
assert key in store
for x in range(year):
pdt.assert_series_equal(
store[key][year_key(x)], series_year(x))
assert 'base/table' in store
for x in range(11):
pdt.assert_series_equal(
store['10/table'][year_key(x)], series_year(x))
def test_run_and_write_tables_out_tables_provided(df, store_name):
table_names = ['table', 'table2', 'table3']
for t in table_names:
orca.add_table(t, df)
@orca.step()
def step(iter_var, table, table2):
return
orca.run(
['step'],
iter_vars=range(1),
data_out=store_name,
out_base_tables=table_names,
out_run_tables=['table'])
with pd.HDFStore(store_name, mode='r') as store:
for t in table_names:
assert 'base/{}'.format(t) in store
assert '0/table' in store
assert '0/table2' not in store
assert '0/table3' not in store
def test_get_raw_table(df):
orca.add_table('table1', df)
@orca.table()
def table2():
return df
assert isinstance(orca.get_raw_table('table1'), orca.DataFrameWrapper)
assert isinstance(orca.get_raw_table('table2'), orca.TableFuncWrapper)
assert orca.table_type('table1') == 'dataframe'
assert orca.table_type('table2') == 'function'
def test_get_table(df):
orca.add_table('frame', df)
@orca.table()
def table():
return df
@orca.table(cache=True)
def source():
return df
fr = orca.get_table('frame')
ta = orca.get_table('table')
so = orca.get_table('source')
with pytest.raises(KeyError):
orca.get_table('asdf')
assert isinstance(fr, orca.DataFrameWrapper)
assert isinstance(ta, orca.DataFrameWrapper)
assert isinstance(so, orca.DataFrameWrapper)
pdt.assert_frame_equal(fr.to_frame(), df)
pdt.assert_frame_equal(ta.to_frame(), df)
pdt.assert_frame_equal(so.to_frame(), df)
def test_cache_disabled_cm():
x = 3
@orca.injectable(cache=True)
def xi():
return x
assert orca.get_injectable('xi') == 3
x = 5
assert orca.get_injectable('xi') == 3
with orca.cache_disabled():
assert orca.get_injectable('xi') == 5
# cache still gets updated even when cacheing is off
assert orca.get_injectable('xi') == 5
def test_injectables_cm():
orca.add_injectable('a', 'a')
orca.add_injectable('b', 'b')
orca.add_injectable('c', 'c')
with orca.injectables():
assert orca._INJECTABLES == {
'a': 'a', 'b': 'b', 'c': 'c'
}
with orca.injectables(c='d', x='x', y='y', z='z'):
assert orca._INJECTABLES == {
'a': 'a', 'b': 'b', 'c': 'd',
'x': 'x', 'y': 'y', 'z': 'z'
}
assert orca._INJECTABLES == {
'a': 'a', 'b': 'b', 'c': 'c'
}
def test_temporary_tables_cm():
orca.add_table('a', pd.DataFrame())
with orca.temporary_tables():
assert sorted(orca._TABLES.keys()) == ['a']
with orca.temporary_tables(a=pd.DataFrame(), b=pd.DataFrame()):
assert sorted(orca._TABLES.keys()) == ['a', 'b']
assert sorted(orca._TABLES.keys()) == ['a']
def test_is_expression():
assert orca.is_expression('name') is False
assert orca.is_expression('table.column') is True
def test_eval_variable(df):
orca.add_injectable('x', 3)
assert orca.eval_variable('x') == 3
@orca.injectable()
def func(x):
return 'xyz' * x
assert orca.eval_variable('func') == 'xyzxyzxyz'
assert orca.eval_variable('func', x=2) == 'xyzxyz'
@orca.table()
def table(x):
return df * x
pdt.assert_series_equal(orca.eval_variable('table.a'), df.a * 3)
def test_eval_step(df):
orca.add_injectable('x', 3)
@orca.step()
def step(x):
return df * x
pdt.assert_frame_equal(orca.eval_step('step'), df * 3)
pdt.assert_frame_equal(orca.eval_step('step', x=5), df * 5)
def test_always_dataframewrapper(df):
@orca.table()
def table():
return df / 2
@orca.table()
def table2(table):
assert isinstance(table, orca.DataFrameWrapper)
return table.to_frame() / 2
result = orca.eval_variable('table2')
pdt.assert_frame_equal(result.to_frame(), df / 4)
def test_table_func_source_data(df):
@orca.table()
def table():
return df * 2
t = orca.get_raw_table('table')
filename, lineno, source = t.func_source_data()
assert filename.endswith('test_orca.py')
assert isinstance(lineno, int)
assert 'return df * 2' in source
def test_column_type(df):
orca.add_table('test_frame', df)
@orca.table()
def test_func():
return df
s = pd.Series(range(len(df)), index=df.index)
def col_func():
return s
orca.add_column('test_frame', 'col_series', s)
orca.add_column('test_func', 'col_series', s)
orca.add_column('test_frame', 'col_func', col_func)
orca.add_column('test_func', 'col_func', col_func)
tframe = orca.get_raw_table('test_frame')
tfunc = orca.get_raw_table('test_func')
assert tframe.column_type('a') == 'local'
assert tframe.column_type('col_series') == 'series'
assert tframe.column_type('col_func') == 'function'
assert tfunc.column_type('a') == 'local'
assert tfunc.column_type('col_series') == 'series'
assert tfunc.column_type('col_func') == 'function'
def test_get_raw_column(df):
orca.add_table('test_frame', df)
s = pd.Series(range(len(df)), index=df.index)
def col_func():
return s
orca.add_column('test_frame', 'col_series', s)
orca.add_column('test_frame', 'col_func', col_func)
assert isinstance(
orca.get_raw_column('test_frame', 'col_series'),
orca._SeriesWrapper)
assert isinstance(
orca.get_raw_column('test_frame', 'col_func'),
orca._ColumnFuncWrapper)
def test_column_func_source_data(df):
orca.add_table('test_frame', df)
@orca.column('test_frame')
def col_func():
return pd.Series(range(len(df)), index=df.index)
s = orca.get_raw_column('test_frame', 'col_func')
filename, lineno, source = s.func_source_data()
assert filename.endswith('test_orca.py')
assert isinstance(lineno, int)
assert 'def col_func():' in source
def test_is_injectable():
orca.add_injectable('answer', 42)
assert orca.is_injectable('answer') is True
assert orca.is_injectable('nope') is False
def test_injectable_type():
orca.add_injectable('answer', 42)
@orca.injectable()
def inj1():
return 42
@orca.injectable(autocall=False, memoize=True)
def power(x):
return 42 ** x
assert orca.injectable_type('answer') == 'variable'
assert orca.injectable_type('inj1') == 'function'
assert orca.injectable_type('power') == 'function'
def test_get_injectable_func_source_data():
@orca.injectable()
def inj1():
return 42
@orca.injectable(autocall=False, memoize=True)
def power(x):
return 42 ** x
def inj2():
return 'orca'
orca.add_injectable('inj2', inj2, autocall=False)
filename, lineno, source = orca.get_injectable_func_source_data('inj1')
assert filename.endswith('test_orca.py')
assert isinstance(lineno, int)
assert '@orca.injectable()' in source
filename, lineno, source = orca.get_injectable_func_source_data('power')
assert filename.endswith('test_orca.py')
assert isinstance(lineno, int)
assert '@orca.injectable(autocall=False, memoize=True)' in source
filename, lineno, source = orca.get_injectable_func_source_data('inj2')
assert filename.endswith('test_orca.py')
assert isinstance(lineno, int)
assert 'def inj2()' in source
| UDST/orca | orca/tests/test_orca.py | Python | bsd-3-clause | 36,982 | [
"ORCA"
] | 3ff2c29c9f897b1f212679e6d53a9bef0652d8b160525c8a856ce1b895f14b74 |
""":mod:`news.reporters.url` --- URL reporters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provide a concrete URL news reporter.
"""
from bs4 import BeautifulSoup
from extraction import Extractor
from ..models.abstract import Readable
from .generics import TraversingReporter
from .mixins import (
BatchTraversingMixin,
DomainTraversingMixin
)
from ..utils.url import fillurl
class URLReporter(
BatchTraversingMixin,
DomainTraversingMixin,
TraversingReporter):
"""URL Reporter for fetching news from plain html web pages.
:param meta: Reporter meta from which to populate the reporter.
:type meta: :class:`~news.reporters.ReporterMeta`
:param backend: Backend to report news.
:type backend: :class:`~news.backends.abstract.AbstractBackend`
implementation
:param intel: Intels to use for batch traversing.
:type intel: :class:`list` of news
"""
def parse(self, content):
"""Parses html content of http response body into a single
:class:`~news.models.abstract.Readable`.
Internally uses :class:`~extraction.Extractor` extractor to extract
sementic tags from the plain html content.
:param content: Http response body
:type content: :class:`str`
:returns: A parsed readable
:rtype: :class:`~news.models.abstract.Readable`
"""
extractor = Extractor()
extracted = extractor.extract(content)
return Readable(url=self.url, title=extracted.title, content=content,
summary=extracted.description, image=extracted.image)
def make_news(self, readable):
"""Instantiate a news out of the readable parsed from :meth:`parse`.
:param readable: A parsed readable.
:type readable: :class:`~news.models.abstract.Readable`
:returns: A news instance
:rtype: :class:`~news.models.abstract.AbstractNews` implementation
"""
parent = self.parent.fetched_news if not self.is_root else None
stored = self.backend.get_news_by(owner=self.owner, url=self.url)
fetched = self.fetched_news
if not fetched and not stored:
news = self.backend.News.create_instance(
parent=parent, schedule=self.schedule,
**readable.kwargs()
)
else:
news = fetched or stored
news.parent = parent
for k, v in readable.kwargs().items():
setattr(news, k, v)
return news
async def get_urls(self, news):
"""Retrieve urls to visit from the instantiated news.
:param news: Instantiated news from the :meth:`make_news`
:type news: :class:`~news.models.abstract.AbstractNews` implementation
:returns: A set of urls retrieved from the response body.
:rtype: :set:
"""
atags = BeautifulSoup(news.content, 'html.parser')('a')
links = {a['href'] for a in atags if a.has_attr('href')}
return {fillurl(self.root.url, l) for l in links}
| kuc2477/news | news/reporters/url.py | Python | mit | 3,069 | [
"VisIt"
] | 502768ff20c1928b55ac2afd519540f2d72894b4abcbb3d37a34ce1fed48a841 |
#!/usr/bin/env python
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import json
import os.path
import re
import shutil
import sys
import textwrap
import time
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import (
build_collection,
download_collections,
find_existing_collections,
install_collections,
publish_collection,
validate_collection_name,
validate_collection_path,
verify_collections
)
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.dependency_resolution.dataclasses import Requirement
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
SERVER_DEF = [
('url', True),
('username', False),
('password', False),
('token', False),
('auth_url', False),
('v3', False),
('validate_certs', False),
('client_id', False),
]
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
This decorator constructs a ConcreteArtifactsManager and maintains
the related temporary directory auto-cleanup around the target
method invocation.
"""
def method_wrapper(*args, **kwargs):
if 'artifacts_manager' in kwargs:
return wrapped_method(*args, **kwargs)
with ConcreteArtifactsManager.under_tmpdir(
C.DEFAULT_LOCAL_TMP,
validate_certs=not context.CLIARGS['ignore_certs'],
) as concrete_artifact_cm:
kwargs['artifacts_manager'] = concrete_artifact_cm
return wrapped_method(*args, **kwargs)
return method_wrapper
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
h1,
h2,
'-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
'-' * max([len(h2), w2]),
cwidth=w1,
vwidth=w2,
))
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
fqcn=to_text(collection.fqcn),
version=collection.ver,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
if not is_iterable(collections):
collections = (collections, )
fqcn_set = {to_text(c.fqcn) for c in collections}
version_set = {to_text(c.ver) for c in collections}
fqcn_length = len(max(fqcn_set, key=len))
version_length = len(max(version_set, key=len))
return fqcn_length, version_length
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
name = 'ansible-galaxy'
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
self._raw_args = args
self._implicit_role = False
if len(args) > 1:
# Inject role into sys.argv[1] as a backwards compatibility step
if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
# Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
idx = 2 if args[1].startswith('-v') else 1
args.insert(idx, 'role')
self._implicit_role = True
# since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
if args[1:3] == ['role', 'login']:
display.error(
"The login command was removed in late 2020. An API key is now required to publish roles or collections "
"to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
"ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
"command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
sys.exit(1)
self.api_servers = []
self.galaxy = None
self._api = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.argparse.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
default=AnsibleCollectionConfig.collection_paths,
action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
cache_options = opt_help.argparse.ArgumentParser(add_help=False)
cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
default=False, help='Clear the existing server response cache.')
cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
help='Do not use the server response cache.')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common, cache_options])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force, cache_options])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_COLLECTION_SKELETON if galaxy_type == 'collection' else C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
if galaxy_type == 'collection':
list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
help="Format to display the list of collections in.")
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The installed collection(s) name. '
'This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Validate collection integrity locally without contacting server for '
'canonical manifest hash.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=self._get_default_collection_path(),
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
else:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of roles to be installed.')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be published to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required):
return {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
}
validate_certs_fallback = not context.CLIARGS['ignore_certs']
galaxy_options = {}
for optional_key in ['clear_response_cache', 'no_cache']:
if optional_key in context.CLIARGS:
galaxy_options[optional_key] = context.CLIARGS[optional_key]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_priority, server_key in enumerate(server_list, start=1):
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in SERVER_DEF)
defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi
auth_url = server_options.pop('auth_url', None)
client_id = server_options.pop('client_id', None)
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
available_api_versions = None
v3 = server_options.pop('v3', None)
validate_certs = server_options['validate_certs']
if validate_certs is None:
validate_certs = validate_certs_fallback
server_options['validate_certs'] = validate_certs
if v3:
# This allows a user to explicitly indicate the server uses the /v3 API
# This was added for testing against pulp_ansible and I'm not sure it has
# a practical purpose outside of this use case. As such, this option is not
# documented as of now
server_options['available_api_versions'] = {'v3': '/v3'}
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username,
server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=validate_certs,
client_id=client_id)
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
server_options.update(galaxy_options)
config_servers.append(GalaxyAPI(
self.galaxy, server_key,
priority=server_priority,
**server_options
))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
priority=len(config_servers) + 1,
validate_certs=validate_certs_fallback,
**galaxy_options
))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
priority=0,
validate_certs=validate_certs_fallback,
**galaxy_options
))
return context.CLIARGS['func']()
@property
def api(self):
if self._api:
return self._api
for server in self.api_servers:
try:
if u'v1' in server.available_api_versions:
self._api = server
break
except Exception:
continue
if not self._api:
self._api = self.api_servers[0]
return self._api
def _get_default_collection_path(self):
return C.COLLECTIONS_PATHS[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
type: git|file|url|galaxy
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:param artifacts_manager: Artifacts manager.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles') or []:
requirements['roles'] += parse_role_req(role_req)
requirements['collections'] = [
Requirement.from_requirement_dict(
self._init_coll_req_dict(collection_req),
artifacts_manager,
)
for collection_req in file_requirements.get('collections') or []
]
return requirements
def _init_coll_req_dict(self, coll_req):
if not isinstance(coll_req, dict):
# Assume it's a string:
return {'name': coll_req}
if (
'name' not in coll_req or
not coll_req.get('source') or
coll_req.get('type', 'galaxy') != 'galaxy'
):
return coll_req
# Try and match up the requirement source with our list of Galaxy API
# servers defined in the config, otherwise create a server with that
# URL without any auth.
coll_req['source'] = next(
iter(
srvr for srvr in self.api_servers
if coll_req['source'] in {srvr.name, srvr.api_server}
),
GalaxyAPI(
self.galaxy,
'explicit_requirement_{name!s}'.format(
name=coll_req['name'],
),
coll_req['source'],
validate_certs=not context.CLIARGS['ignore_certs'],
),
)
return coll_req
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
# Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
galaxy_info = role_info.get('galaxy_info', {})
description = role_info.get('description', galaxy_info.get('description', ''))
text.append(u"\tdescription: %s" % description)
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
# make sure we have a trailing newline returned
text.append(u"")
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
return meta_value
def _require_one_of_collections_requirements(
self, collections, requirements_file,
artifacts_manager=None,
):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(
requirements_file,
allow_old_format=False,
artifacts_manager=artifacts_manager,
)
else:
requirements = {
'collections': [
Requirement.from_string(coll_input, artifacts_manager)
for coll_input in collections
],
'roles': [],
}
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(
to_text(collection_path, errors='surrogate_or_strict'),
to_text(output_path, errors='surrogate_or_strict'),
force,
)
@with_collection_artifacts_manager
def execute_download(self, artifacts_manager=None):
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(
requirements, download_path, self.api_servers, no_deps,
context.CLIARGS['allow_pre_release'],
artifacts_manager=artifacts_manager,
)
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
dependencies=[],
))
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
skeleton_ignore_expressions = C.GALAXY_COLLECTION_SKELETON_IGNORE
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
loader = DataLoader()
templar = Templar(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
# Filter out ignored directory names
# Use [:] to mutate the list os.walk uses
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
if not context.CLIARGS['offline']:
remote_data = None
try:
remote_data = self.api.lookup_role_by_name(role, False)
except AnsibleError as e:
if e.http_code == 400 and 'Bad Request' in e.message:
# Role does not exist in Ansible Galaxy
data = u"- the role %s was not found" % role
break
raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
if remote_data:
role_info.update(remote_data)
elif context.CLIARGS['offline'] and not gr._exists:
data = u"- the role %s was not found" % role
break
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
self.pager(data)
@with_collection_artifacts_manager
def execute_verify(self, artifacts_manager=None):
collections = context.CLIARGS['args']
search_paths = context.CLIARGS['collections_path']
ignore_errors = context.CLIARGS['ignore_errors']
local_verify_only = context.CLIARGS['offline']
requirements_file = context.CLIARGS['requirements']
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
results = verify_collections(
requirements, resolved_paths,
self.api_servers, ignore_errors,
local_verify_only=local_verify_only,
artifacts_manager=artifacts_manager,
)
if any(result for result in results if not result.success):
return 1
return 0
@with_collection_artifacts_manager
def execute_install(self, artifacts_manager=None):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
:param artifacts_manager: Artifacts manager.
"""
install_items = context.CLIARGS['args']
requirements_file = context.CLIARGS['requirements']
collection_path = None
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
"run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
"'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
# TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
collection_requirements = []
role_requirements = []
if context.CLIARGS['type'] == 'collection':
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
requirements = self._require_one_of_collections_requirements(
install_items, requirements_file,
artifacts_manager=artifacts_manager,
)
collection_requirements = requirements['collections']
if requirements['roles']:
display.vvv(two_type_warning.format('role'))
else:
if not install_items and requirements_file is None:
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
if requirements_file:
if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
requirements = self._parse_requirements_file(
requirements_file,
artifacts_manager=artifacts_manager,
)
role_requirements = requirements['roles']
# We can only install collections and roles at the same time if the type wasn't specified and the -p
# argument was not used. If collections are present in the requirements then at least display a msg.
galaxy_args = self._raw_args
if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
'--roles-path' in galaxy_args):
# We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
# was explicit about the type and shouldn't care that collections were skipped.
display_func = display.warning if self._implicit_role else display.vvv
display_func(two_type_warning.format('collection'))
else:
collection_path = self._get_default_collection_path()
collection_requirements = requirements['collections']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
role_requirements.append(GalaxyRole(self.galaxy, self.api, **role))
if not role_requirements and not collection_requirements:
display.display("Skipping install, no requirements found")
return
if role_requirements:
display.display("Starting galaxy role install process")
self._execute_install_role(role_requirements)
if collection_requirements:
display.display("Starting galaxy collection install process")
# Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
# the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
self._execute_install_collection(
collection_requirements, collection_path,
artifacts_manager=artifacts_manager,
)
def _execute_install_collection(
self, requirements, path, artifacts_manager,
):
force = context.CLIARGS['force']
ignore_errors = context.CLIARGS['ignore_errors']
no_deps = context.CLIARGS['no_deps']
force_with_deps = context.CLIARGS['force_with_deps']
# If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
upgrade = context.CLIARGS.get('upgrade', False)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(path), to_text(":".join(collections_path))))
output_path = validate_collection_path(path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(
requirements, output_path, self.api_servers, ignore_errors,
no_deps, force, force_with_deps, upgrade,
allow_pre_release=allow_pre_release,
artifacts_manager=artifacts_manager,
)
return 0
def _execute_install_role(self, requirements):
role_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
for role in requirements:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = (role.metadata.get('dependencies') or []) + role.requirements
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in requirements:
display.display('- adding dependency: %s' % to_text(dep_role))
requirements.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependent role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
requirements.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
requirements.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
@with_collection_artifacts_manager
def execute_list_collection(self, artifacts_manager=None):
"""
List all collections installed on the local system
:param artifacts_manager: Artifacts manager.
"""
output_format = context.CLIARGS['output_format']
collections_search_paths = set(context.CLIARGS['collections_path'])
collection_name = context.CLIARGS['collection']
default_collections_path = AnsibleCollectionConfig.collection_paths
collections_in_paths = {}
warnings = []
path_found = False
collection_found = False
for path in collections_search_paths:
collection_path = GalaxyCLI._resolve_path(path)
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(collection_path))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
path_found = True
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace, collection = collection_name.split('.')
collection_path = validate_collection_path(collection_path)
b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
warnings.append("- unable to find {0} in collection paths".format(collection_name))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
collection_found = True
try:
collection = Requirement.from_dir_path_as_unknown(
b_collection_path,
artifacts_manager,
)
except ValueError as val_err:
six.raise_from(AnsibleError(val_err), val_err)
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver}
}
continue
fqcn_width, version_width = _get_collection_widths([collection])
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
_display_collection(collection, fqcn_width, version_width)
else:
# list all collections
collection_path = validate_collection_path(path)
if os.path.isdir(collection_path):
display.vvv("Searching {0} for collections".format(collection_path))
collections = list(find_existing_collections(
collection_path, artifacts_manager,
))
else:
# There was no 'ansible_collections/' directory in the path, so there
# or no collections here.
display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
continue
if not collections:
display.vvv("No collections found at {0}".format(collection_path))
continue
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver} for collection in collections
}
continue
# Display header
fqcn_width, version_width = _get_collection_widths(collections)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
# Sort collections by the namespace and name
for collection in sorted(collections, key=to_text):
_display_collection(collection, fqcn_width, version_width)
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
if output_format == 'json':
display.display(json.dumps(collections_in_paths))
elif output_format == 'yaml':
display.display(yaml_dump(collections_in_paths))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
def main(args=None):
GalaxyCLI.cli_executor(args)
if __name__ == '__main__':
main()
| jborean93/ansible | lib/ansible/cli/galaxy.py | Python | gpl-3.0 | 81,162 | [
"Galaxy"
] | 31969a935bf228adf0fe2b7f01b9c9b7c06ab4dc1cfab8f1a2bd48a9f768bf40 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import sys
import numpy as np
import pysam
MISSING_QUAL_CHAR = '"' # got from calling 'samtools bam2fq' on bam w/o qualities
def parse_args():
parser = argparse.ArgumentParser("Produce FASTQ file from soft-clipped reads")
parser.add_argument('--input', '-i', dest='input', default=None, required=True, type=str,
help='Alignment file')
parser.add_argument('--output', '-o', dest='output', default=None, required=False, type=str,
help='Output FQ (defaults to stdout)')
return parser.parse_args()
def main():
args = parse_args()
alignment_filename = args.input
# get read data we care about
samfile = None
outfile = None
read_count = -1
try:
print("Reading {}:".format(alignment_filename))
samfile = pysam.AlignmentFile(alignment_filename, 'rb' if alignment_filename.endswith("bam") else 'r')
outfile = sys.stdout if args.output is None else open(args.output, 'w')
for read in samfile.fetch():
read_count += 1
read_name = read.query_name
sequence = read.query_alignment_sequence
qualities = read.query_alignment_qualities
if qualities is None: qualities = MISSING_QUAL_CHAR * len(sequence)
outfile.write("@{}\n{}\n+\n{}\n".format(read_name, sequence, qualities))
assert len(sequence) == len(qualities), "Read {} (#{}) has sequence len {} and qual len {}"\
.format(read_name, read_count, len(sequence), len(qualities))
finally:
if samfile is not None: samfile.close()
if outfile is not None and args.output is not None: outfile.close()
if __name__ == "__main__":
main() | benedictpaten/marginPhase | toil/src/toil_marginphase/scripts/fastq_from_bam.py | Python | mit | 1,808 | [
"pysam"
] | 1b4b01e861358be196aaa4a193286b6332318f0f4fea5bed02662c417fe3e587 |
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import
"""
"""
from puliclient.server.renderNodeHandler import RenderNodeHandler
from octopus.core import enums
rnHandler = RenderNodeHandler()
(allRnList, summary) = rnHandler.getAllRenderNodes()
print "All render nodes"
if allRnList:
for rn in allRnList:
print "id:%s name:%s hostname:%s ram:%s/%s" % (rn.id, rn.name, rn.host, rn.systemFreeRam, rn.ramSize)
# print rnHandler.getRenderNodesById([1, 2, 3, 4, 5])
# print rnHandler.getRenderNodesByName(['vfxpc.*:8000'])
# print rnHandler.getRenderNodesByStatus([enums.RN_UNKNOWN])
# print rnHandler.getRenderNodesByHost(['vfxpc64'])
# print rnHandler.getRenderNodesByVersion(['dev'])
# print rnHandler.getRenderNodesByPool(None)
(results, summary) = rnHandler.getRenderNodes(
idList=[4, 5, 6],
nameList=["vfxpc.*:8000"],
hostList=["vfxpc64"],
versionList=["dev"],
poolList=["default", "renderfarm"],
statusList=[enums.RN_IDLE],
)
print ""
print "Query render nodes:"
if results:
for rn in results:
print "id:%s name:%s hostname:%s puliversion:%s ram:%s/%s" % (rn.id, rn.name, rn.host, rn.puliversion, rn.systemFreeRam, rn.ramSize)
| mikrosimage/OpenRenderManagement | scripts/examples/request-edit/01_request_rendernodes.py | Python | bsd-3-clause | 1,206 | [
"Octopus"
] | 266c152f2b2988e857813fe5feffa84ea98dd81414e8ceb18b7eb0dfe31eb724 |
"""
Tests the forum notification views.
"""
import json
import logging
from datetime import datetime
import ddt
import pytest
import six
from django.http import Http404
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import translation
from edx_django_utils.cache import RequestCache
from mock import ANY, Mock, call, patch
from six import text_type
from six.moves import range
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.discussion import views
from lms.djangoapps.discussion.django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY
from lms.djangoapps.discussion.django_comment_client.permissions import get_team
from lms.djangoapps.discussion.django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
GroupIdAssertionMixin,
NonCohortedTopicGroupIdTestMixin
)
from lms.djangoapps.discussion.django_comment_client.tests.unicode import UnicodeTestMixin
from lms.djangoapps.discussion.django_comment_client.tests.utils import (
CohortedTestCase,
ForumsEnableMixin,
config_course_discussions,
topic_name_to_id
)
from lms.djangoapps.discussion.django_comment_client.utils import strip_none
from lms.djangoapps.discussion.views import _get_discussion_default_topic_id, course_discussions_settings_handler
from lms.djangoapps.teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from openedx.core.djangoapps.course_groups.tests.test_views import CohortViewsTestCase
from openedx.core.djangoapps.django_comment_common.comment_client.utils import CommentClientPaginatedResult
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_STUDENT,
CourseDiscussionSettings,
ForumsConfig
)
from openedx.core.djangoapps.django_comment_common.utils import ThreadContext, seed_permissions_roles
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from openedx.core.djangoapps.waffle_utils.testutils import WAFFLE_TABLES
from openedx.core.lib.teams_config import TeamsConfig
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from openedx.features.enterprise_support.tests.mixins.enterprise import EnterpriseTestConsentRequired
from student.roles import CourseStaffRole, UserBasedRole
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import EventTestMixin, UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MONGO_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
log = logging.getLogger(__name__)
QUERY_COUNT_TABLE_BLACKLIST = WAFFLE_TABLES
class ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsExceptionTestCase, self).setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the student
self.student = UserFactory(username=uname, password=password, email=email)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
# Log the student in
self.client = Client()
assert self.client.login(username=uname, password=password)
config = ForumsConfig.current()
config.enabled = True
config.save()
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_user_profile_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('user_profile',
kwargs={'course_id': text_type(self.course.id), 'user_id': '12345'}) # There is no user 12345
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.subscribed_threads')
def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = CommentClientPaginatedResult(collection=[], page=1, num_pages=1)
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('followed_threads',
kwargs={'course_id': text_type(self.course.id), 'user_id': '12345'}) # There is no user 12345
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def make_mock_thread_data(
course,
text,
thread_id,
num_children,
group_id=None,
group_name=None,
commentable_id=None,
is_commentable_divided=None,
):
data_commentable_id = (
commentable_id or course.discussion_topics.get('General', {}).get('id') or "dummy_commentable_id"
)
thread_data = {
"id": thread_id,
"type": "thread",
"title": text,
"body": text,
"commentable_id": data_commentable_id,
"resp_total": 42,
"resp_skip": 25,
"resp_limit": 5,
"group_id": group_id,
"context": (
ThreadContext.COURSE if get_team(data_commentable_id) is None else ThreadContext.STANDALONE
)
}
if group_id is not None:
thread_data['group_name'] = group_name
if is_commentable_divided is not None:
thread_data['is_commentable_divided'] = is_commentable_divided
if num_children is not None:
thread_data["children"] = [{
"id": "dummy_comment_id_{}".format(i),
"type": "comment",
"body": text,
} for i in range(num_children)]
return thread_data
def make_mock_collection_data(
course,
text,
thread_id,
num_children=None,
group_id=None,
commentable_id=None,
thread_list=None
):
if thread_list:
return [
make_mock_thread_data(course=course, text=text, num_children=num_children, **thread)
for thread in thread_list
]
else:
return [
make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_children,
group_id=group_id,
commentable_id=commentable_id,
)
]
def make_mock_perform_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
thread_list=None
):
def mock_perform_request_impl(*args, **kwargs):
url = args[1]
if url.endswith("threads") or url.endswith("user_profile"):
return {
"collection": make_mock_collection_data(
course, text, thread_id, None, group_id, commentable_id, thread_list
)
}
elif thread_id and url.endswith(thread_id):
return make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_thread_responses,
group_id=group_id,
commentable_id=commentable_id
)
elif "/users/" in url:
res = {
"default_sort_key": "date",
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
# comments service adds these attributes when course_id param is present
if kwargs.get('params', {}).get('course_id'):
res.update({
"threads_count": 1,
"comments_count": 2
})
return res
else:
return None
return mock_perform_request_impl
def make_mock_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
thread_list=None,
):
impl = make_mock_perform_request_impl(
course,
text,
thread_id=thread_id,
group_id=group_id,
commentable_id=commentable_id,
num_thread_responses=num_thread_responses,
thread_list=thread_list
)
def mock_request_impl(*args, **kwargs):
data = impl(*args, **kwargs)
if data:
return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))
else:
return Mock(status_code=404)
return mock_request_impl
class StringEndsWithMatcher(object):
def __init__(self, suffix):
self.suffix = suffix
def __eq__(self, other):
return other.endswith(self.suffix)
class PartialDictMatcher(object):
def __init__(self, expected_values):
self.expected_values = expected_values
def __eq__(self, other):
return all([
key in other and other[key] == value
for key, value in six.iteritems(self.expected_values)
])
@patch('requests.request', autospec=True)
class SingleThreadTestCase(ForumsEnableMixin, ModuleStoreTestCase):
CREATE_USER = False
def setUp(self):
super(SingleThreadTestCase, self).setUp()
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def test_ajax(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
text_type(self.course.id),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEqual(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({"mark_as_read": True, "user_id": 1, "recursive": True}),
headers=ANY,
timeout=ANY
)
def test_skip_limit(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
response_skip = "45"
response_limit = "15"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
{"resp_skip": response_skip, "resp_limit": response_limit},
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
text_type(self.course.id),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEqual(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({
"mark_as_read": True,
"user_id": 1,
"recursive": True,
"resp_skip": response_skip,
"resp_limit": response_limit,
}),
headers=ANY,
timeout=ANY
)
def test_post(self, _mock_request):
request = RequestFactory().post("dummy_url")
response = views.single_thread(
request,
text_type(self.course.id),
"dummy_discussion_id",
"dummy_thread_id"
)
self.assertEqual(response.status_code, 405)
def test_not_found(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
# Mock request to return 404 for thread request
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id=None)
self.assertRaises(
Http404,
views.single_thread,
request,
text_type(self.course.id),
"test_discussion_id",
"test_thread_id"
)
def test_private_team_thread_html(self, mock_request):
discussion_topic_id = 'dummy_discussion_id'
thread_id = 'test_thread_id'
CourseTeamFactory.create(discussion_topic_id=discussion_topic_id)
user_not_in_team = UserFactory.create()
CourseEnrollmentFactory.create(user=user_not_in_team, course_id=self.course.id)
self.client.login(username=user_not_in_team.username, password='test')
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy",
thread_id=thread_id,
commentable_id=discussion_topic_id
)
with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:
mocked.return_value = True
response = self.client.get(
reverse('single_thread', kwargs={
'course_id': six.text_type(self.course.id),
'discussion_id': discussion_topic_id,
'thread_id': thread_id,
})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content.decode('utf-8')
# Verify that the access denied error message is in the HTML
self.assertIn(
'This is a private discussion. You do not have permissions to view this discussion',
html
)
class AllowPlusOrMinusOneInt(int):
"""
A workaround for the fact that assertNumQueries doesn't let you
specify a range or any tolerance. An 'int' that is 'equal to' its value,
but also its value +/- 1
"""
def __init__(self, value):
super().__init__()
self.value = value
self.values = (value, value - 1, value + 1)
def __eq__(self, other):
return other in self.values
def __repr__(self):
return "({} +/- 1)".format(self.value)
@ddt.ddt
@patch('requests.request', autospec=True)
class SingleThreadQueryCountTestCase(ForumsEnableMixin, ModuleStoreTestCase):
"""
Ensures the number of modulestore queries and number of sql queries are
independent of the number of responses retrieved for a given discussion thread.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
@ddt.data(
# Old mongo with cache. There is an additional SQL query for old mongo
# because the first time that disabled_xblocks is queried is in call_single_thread,
# vs. the creation of the course (CourseFactory.create). The creation of the
# course is outside the context manager that is verifying the number of queries,
# and with split mongo, that method ends up querying disabled_xblocks (which is then
# cached and hence not queried as part of call_single_thread).
(ModuleStoreEnum.Type.mongo, False, 1, 5, 2, 21, 7),
(ModuleStoreEnum.Type.mongo, False, 50, 5, 2, 21, 7),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, False, 1, 3, 3, 21, 8),
(ModuleStoreEnum.Type.split, False, 50, 3, 3, 21, 8),
# Enabling Enterprise integration should have no effect on the number of mongo queries made.
(ModuleStoreEnum.Type.mongo, True, 1, 5, 2, 21, 7),
(ModuleStoreEnum.Type.mongo, True, 50, 5, 2, 21, 7),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, True, 1, 3, 3, 21, 8),
(ModuleStoreEnum.Type.split, True, 50, 3, 3, 21, 8),
)
@ddt.unpack
def test_number_of_mongo_queries(
self,
default_store,
enterprise_enabled,
num_thread_responses,
num_uncached_mongo_calls,
num_cached_mongo_calls,
num_uncached_sql_queries,
num_cached_sql_queries,
mock_request
):
ContentTypeGatingConfig.objects.create(enabled=True, enabled_as_of=datetime(2018, 1, 1))
with modulestore().default_store(default_store):
course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=course.id)
test_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=course, text="dummy content", thread_id=test_thread_id, num_thread_responses=num_thread_responses
)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = student
def call_single_thread():
"""
Call single_thread and assert that it returns what we expect.
"""
with patch.dict("django.conf.settings.FEATURES", dict(ENABLE_ENTERPRISE_INTEGRATION=enterprise_enabled)):
response = views.single_thread(
request,
text_type(course.id),
"dummy_discussion_id",
test_thread_id
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(json.loads(response.content.decode('utf-8'))["content"]["children"]),
num_thread_responses
)
# Test uncached first, then cached now that the cache is warm.
cached_calls = [
[num_uncached_mongo_calls, num_uncached_sql_queries],
# Sometimes there will be one more or fewer sql call than expected, because the call to
# CourseMode.modes_for_course sometimes does / doesn't get cached and does / doesn't hit the DB.
# EDUCATOR-5167
[num_cached_mongo_calls, AllowPlusOrMinusOneInt(num_cached_sql_queries)],
]
for expected_mongo_calls, expected_sql_queries in cached_calls:
with self.assertNumQueries(expected_sql_queries, table_blacklist=QUERY_COUNT_TABLE_BLACKLIST):
with check_mongo_calls(expected_mongo_calls):
call_single_thread()
@patch('requests.request', autospec=True)
class SingleCohortedThreadTestCase(CohortedTestCase):
def _create_mock_cohorted_thread(self, mock_request):
mock_text = "dummy content"
mock_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=mock_text,
thread_id=mock_thread_id,
group_id=self.student_cohort.id,
commentable_id="cohorted_topic",
)
return mock_text, mock_thread_id
def test_ajax(self, mock_request):
mock_text, mock_thread_id = self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
text_type(self.course.id),
"cohorted_topic",
mock_thread_id
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(
response_data["content"],
make_mock_thread_data(
course=self.course,
commentable_id="cohorted_topic",
text=mock_text,
thread_id=mock_thread_id,
num_children=1,
group_id=self.student_cohort.id,
group_name=self.student_cohort.name,
is_commentable_divided=True,
)
)
def test_html(self, mock_request):
_mock_text, mock_thread_id = self._create_mock_cohorted_thread(mock_request)
self.client.login(username=self.student.username, password='test')
response = self.client.get(
reverse('single_thread', kwargs={
'course_id': six.text_type(self.course.id),
'discussion_id': "cohorted_topic",
'thread_id': mock_thread_id,
})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content.decode('utf-8')
# Verify that the group name is correctly included in the HTML
self.assertRegex(html, r'"group_name": "student_cohort"')
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class SingleThreadAccessTestCase(CohortedTestCase):
def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", thread_id=thread_id, group_id=thread_group_id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.single_thread(
request,
text_type(self.course.id),
commentable_id,
thread_id
)
def test_student_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_student_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
# this test ensures that a thread response from the cs with group_id: null
# behaves the same as a thread response without a group_id (see: TNL-444)
def test_student_global_thread_in_cohorted_topic(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=None
)
self.assertEqual(resp.status_code, 200)
def test_student_different_cohort(self, mock_request):
self.assertRaises(
Http404,
lambda: self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.moderator_cohort.id
)
)
def test_moderator_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_moderator_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.moderator_cohort.id
)
self.assertEqual(resp.status_code, 200)
def test_moderator_different_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
def test_private_team_thread(self, mock_request):
CourseTeamFactory.create(discussion_topic_id='dummy_discussion_id')
user_not_in_team = UserFactory.create()
CourseEnrollmentFactory(user=user_not_in_team, course_id=self.course.id)
with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:
mocked.return_value = True
response = self.call_view(
mock_request,
'non_cohorted_topic',
user_not_in_team,
''
)
self.assertEqual(403, response.status_code)
self.assertEqual(
views.TEAM_PERMISSION_MESSAGE,
response.content.decode('utf-8'),
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class SingleThreadGroupIdTestCase(CohortedTestCase, GroupIdAssertionMixin):
cs_endpoint = "/threads/dummy_thread_id"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", group_id=self.student_cohort.id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=user.username, password='test')
return self.client.get(
reverse('single_thread', args=[six.text_type(self.course.id), commentable_id, "dummy_thread_id"]),
data=request_data,
**headers
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['content']
)
@patch('requests.request', autospec=True)
class ForumFormDiscussionContentGroupTestCase(ForumsEnableMixin, ContentGroupTestCase):
"""
Tests `forum_form_discussion api` works with different content groups.
Discussion modules are setup in ContentGroupTestCase class i.e
alpha_module => alpha_group_discussion => alpha_cohort => alpha_user/community_ta
beta_module => beta_group_discussion => beta_cohort => beta_user
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumFormDiscussionContentGroupTestCase, self).setUp()
self.thread_list = [
{"thread_id": "test_general_thread_id"},
{"thread_id": "test_global_group_thread_id", "commentable_id": self.global_module.discussion_id},
{"thread_id": "test_alpha_group_thread_id", "group_id": self.alpha_module.group_access[0][0],
"commentable_id": self.alpha_module.discussion_id},
{"thread_id": "test_beta_group_thread_id", "group_id": self.beta_module.group_access[0][0],
"commentable_id": self.beta_module.discussion_id}
]
def assert_has_access(self, response, expected_discussion_threads):
"""
Verify that a users have access to the threads in their assigned
cohorts and non-cohorted modules.
"""
discussion_data = json.loads(response.content.decode('utf-8'))['discussion_data']
self.assertEqual(len(discussion_data), expected_discussion_threads)
def call_view(self, mock_request, user):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy content",
thread_list=self.thread_list
)
self.client.login(username=user.username, password='test')
return self.client.get(
reverse("forum_form_discussion", args=[six.text_type(self.course.id)]),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
def test_community_ta_user(self, mock_request):
"""
Verify that community_ta user has access to all threads regardless
of cohort.
"""
response = self.call_view(
mock_request,
self.community_ta
)
self.assert_has_access(response, 4)
def test_alpha_cohort_user(self, mock_request):
"""
Verify that alpha_user has access to alpha_cohort and non-cohorted
threads.
"""
response = self.call_view(
mock_request,
self.alpha_user
)
self.assert_has_access(response, 3)
def test_beta_cohort_user(self, mock_request):
"""
Verify that beta_user has access to beta_cohort and non-cohorted
threads.
"""
response = self.call_view(
mock_request,
self.beta_user
)
self.assert_has_access(response, 3)
def test_global_staff_user(self, mock_request):
"""
Verify that global staff user has access to all threads regardless
of cohort.
"""
response = self.call_view(
mock_request,
self.staff_user
)
self.assert_has_access(response, 4)
@patch('requests.request', autospec=True)
class SingleThreadContentGroupTestCase(ForumsEnableMixin, UrlResetMixin, ContentGroupTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(SingleThreadContentGroupTestCase, self).setUp()
def assert_can_access(self, user, discussion_id, thread_id, should_have_access):
"""
Verify that a user has access to a thread within a given
discussion_id when should_have_access is True, otherwise
verify that the user does not have access to that thread.
"""
def call_single_thread():
self.client.login(username=user.username, password='test')
return self.client.get(
reverse('single_thread', args=[six.text_type(self.course.id), discussion_id, thread_id])
)
if should_have_access:
self.assertEqual(call_single_thread().status_code, 200)
else:
self.assertEqual(call_single_thread().status_code, 404)
def test_staff_user(self, mock_request):
"""
Verify that the staff user can access threads in the alpha,
beta, and global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.alpha_module, self.beta_module, self.global_module]:
self.assert_can_access(self.staff_user, discussion_xblock.discussion_id, thread_id, True)
def test_alpha_user(self, mock_request):
"""
Verify that the alpha user can access threads in the alpha and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.alpha_module, self.global_module]:
self.assert_can_access(self.alpha_user, discussion_xblock.discussion_id, thread_id, True)
self.assert_can_access(self.alpha_user, self.beta_module.discussion_id, thread_id, False)
def test_beta_user(self, mock_request):
"""
Verify that the beta user can access threads in the beta and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.beta_module, self.global_module]:
self.assert_can_access(self.beta_user, discussion_xblock.discussion_id, thread_id, True)
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_non_cohorted_user(self, mock_request):
"""
Verify that the non-cohorted user can access threads in just the
global discussion module.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
self.assert_can_access(self.non_cohorted_user, self.global_module.discussion_id, thread_id, True)
self.assert_can_access(self.non_cohorted_user, self.alpha_module.discussion_id, thread_id, False)
self.assert_can_access(self.non_cohorted_user, self.beta_module.discussion_id, thread_id, False)
def test_course_context_respected(self, mock_request):
"""
Verify that course threads go through discussion_category_id_access method.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id
)
# Beta user does not have access to alpha_module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_standalone_context_respected(self, mock_request):
"""
Verify that standalone threads don't go through discussion_category_id_access method.
"""
# For this rather pathological test, we are assigning the alpha module discussion_id (commentable_id)
# to a team so that we can verify that standalone threads don't go through discussion_category_id_access.
thread_id = "test_thread_id"
CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.alpha_module.discussion_id
)
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id,
commentable_id=self.alpha_module.discussion_id
)
# If a thread returns context other than "course", the access check is not done, and the beta user
# can see the alpha discussion module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, True)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionContextTestCase(ForumsEnableMixin, ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionContextTestCase, self).setUp()
self.course = CourseFactory.create()
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
self.discussion_topic_id = "dummy_topic"
self.team = CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.discussion_topic_id
)
self.team.add_user(self.user)
self.user_not_in_team = UserFactory.create()
def test_context_can_be_standalone(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
request = RequestFactory().get("dummy_url")
request.user = self.user
response = views.inline_discussion(
request,
six.text_type(self.course.id),
self.discussion_topic_id,
)
json_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(json_response['discussion_data'][0]['context'], ThreadContext.STANDALONE)
def test_private_team_discussion(self, mock_request):
# First set the team discussion to be private
CourseEnrollmentFactory(user=self.user_not_in_team, course_id=self.course.id)
request = RequestFactory().get("dummy_url")
request.user = self.user_not_in_team
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:
mocked.return_value = True
response = views.inline_discussion(
request,
six.text_type(self.course.id),
self.discussion_topic_id,
)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content.decode('utf-8'), views.TEAM_PERMISSION_MESSAGE)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionGroupIdTestCase(
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def setUp(self):
super(InlineDiscussionGroupIdTestCase, self).setUp()
self.cohorted_commentable_id = 'cohorted_topic'
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {'commentable_id': self.cohorted_commentable_id}
if group_id:
# avoid causing a server error when the LMS chokes attempting
# to find a group name for the group_id, when we're testing with
# an invalid one.
try:
CourseUserGroup.objects.get(id=group_id)
kwargs['group_id'] = group_id
except CourseUserGroup.DoesNotExist:
pass
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data
)
request.user = user
return views.inline_discussion(
request,
text_type(self.course.id),
commentable_id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
self.cohorted_commentable_id,
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False): # pylint: disable=arguments-differ
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=user.username, password='test')
return self.client.get(
reverse("forum_form_discussion", args=[six.text_type(self.course.id)]),
data=request_data,
**headers
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/active_threads"
def call_view_for_profiled_user(
self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False
):
"""
Calls "user_profile" view method on behalf of "requesting_user" to get information about
the user "profiled_user".
"""
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=requesting_user.username, password='test')
return self.client.get(
reverse('user_profile', args=[six.text_type(self.course.id), profiled_user.id]),
data=request_data,
**headers
)
def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False): # pylint: disable=arguments-differ
return self.call_view_for_profiled_user(
mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
def _test_group_id_passed_to_user_profile(
self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id
):
"""
Helper method for testing whether or not group_id was passed to the user_profile request.
"""
def get_params_from_user_info_call(for_specific_course):
"""
Returns the request parameters for the user info call with either course_id specified or not,
depending on value of 'for_specific_course'.
"""
# There will be 3 calls from user_profile. One has the cs_endpoint "active_threads", and it is already
# tested. The other 2 calls are for user info; one of those calls is for general information about the user,
# and it does not specify a course_id. The other call does specify a course_id, and if the caller did not
# have discussion moderator privileges, it should also contain a group_id.
for r_call in mock_request.call_args_list:
if not r_call[0][1].endswith(self.cs_endpoint):
params = r_call[1]["params"]
has_course_id = "course_id" in params
if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):
return params
pytest.fail("Did not find appropriate user_profile call for 'for_specific_course'=" + for_specific_course)
mock_request.reset_mock()
self.call_view_for_profiled_user(
mock_request,
requesting_user,
profiled_user,
group_id,
pass_group_id=pass_group_id,
is_ajax=False
)
# Should never have a group_id if course_id was not included in the request.
params_without_course_id = get_params_from_user_info_call(False)
self.assertNotIn("group_id", params_without_course_id)
params_with_course_id = get_params_from_user_info_call(True)
if expect_group_id_in_request:
self.assertIn("group_id", params_with_course_id)
self.assertEqual(group_id, params_with_course_id["group_id"])
else:
self.assertNotIn("group_id", params_with_course_id)
def test_group_id_passed_to_user_profile_student(self, mock_request):
"""
Test that the group id is always included when requesting user profile information for a particular
course if the requester does not have discussion moderation privileges.
"""
def verify_group_id_always_present(profiled_user, pass_group_id):
"""
Helper method to verify that group_id is always present for student in course
(non-privileged user).
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the student (non-privileged user).
# The profile returned on behalf of the student is for the profiled_user.
verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)
def test_group_id_user_profile_moderator(self, mock_request):
"""
Test that the group id is only included when a privileged user requests user profile information for a
particular course and user if the group_id is explicitly passed in.
"""
def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is not present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the moderator (privileged user).
# If the group_id is explicitly passed, it will be present in the request.
verify_group_id_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_present(
profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort
)
# If the group_id is not explicitly passed, it will not be present because the requesting_user
# has discussion moderator privileges.
verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/subscribed_threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.followed_threads(
request,
text_type(self.course.id),
user.id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionTestCase(ForumsEnableMixin, ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionTestCase, self).setUp()
self.course = CourseFactory.create(
org="TestX",
number="101",
display_name="Test Course",
teams_configuration=TeamsConfig({
'topics': [{
'id': 'topic_id',
'name': 'A topic',
'description': 'A topic',
}]
})
)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
display_name='Discussion1',
discussion_category="Chapter",
discussion_target="Discussion1"
)
def send_request(self, mock_request, params=None):
"""
Creates and returns a request with params set, and configures
mock_request to return appropriate values.
"""
request = RequestFactory().get("dummy_url", params if params else {})
request.user = self.student
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", commentable_id=self.discussion1.discussion_id
)
return views.inline_discussion(
request, text_type(self.course.id), self.discussion1.discussion_id
)
def test_context(self, mock_request):
team = CourseTeamFactory(
name='Team Name',
topic_id='topic_id',
course_id=self.course.id,
discussion_topic_id=self.discussion1.discussion_id
)
team.add_user(self.student)
self.send_request(mock_request)
self.assertEqual(mock_request.call_args[1]['params']['context'], ThreadContext.STANDALONE)
@patch('requests.request', autospec=True)
class UserProfileTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
TEST_THREAD_TEXT = 'userprofile-test-text'
TEST_THREAD_ID = 'userprofile-test-thread-id'
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UserProfileTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
self.profiled_user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.profiled_user, course_id=self.course.id)
def get_response(self, mock_request, params, **headers):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
self.client.login(username=self.student.username, password='test')
response = self.client.get(
reverse('user_profile', kwargs={
'course_id': six.text_type(self.course.id),
'user_id': self.profiled_user.id,
}),
data=params,
**headers
)
mock_request.assert_any_call(
"get",
StringEndsWithMatcher('/users/{}/active_threads'.format(self.profiled_user.id)),
data=None,
params=PartialDictMatcher({
"course_id": text_type(self.course.id),
"page": params.get("page", 1),
"per_page": views.THREADS_PER_PAGE
}),
headers=ANY,
timeout=ANY
)
return response
def check_html(self, mock_request, **params):
response = self.get_response(mock_request, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content.decode('utf-8')
self.assertRegex(html, r'data-page="1"')
self.assertRegex(html, r'data-num-pages="1"')
self.assertRegex(html, r'<span class="discussion-count">1</span> discussion started')
self.assertRegex(html, r'<span class="discussion-count">2</span> comments')
self.assertRegex(html, u''id': '{}''.format(self.TEST_THREAD_ID))
self.assertRegex(html, u''title': '{}''.format(self.TEST_THREAD_TEXT))
self.assertRegex(html, u''body': '{}''.format(self.TEST_THREAD_TEXT))
if six.PY2:
self.assertRegex(html, u''username': u'{}''.format(self.student.username))
else:
self.assertRegex(html, u''username': '{}''.format(self.student.username))
def check_ajax(self, mock_request, **params):
response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(
sorted(response_data.keys()),
["annotated_content_info", "discussion_data", "num_pages", "page"]
)
self.assertEqual(len(response_data['discussion_data']), 1)
self.assertEqual(response_data["page"], 1)
self.assertEqual(response_data["num_pages"], 1)
self.assertEqual(response_data['discussion_data'][0]['id'], self.TEST_THREAD_ID)
self.assertEqual(response_data['discussion_data'][0]['title'], self.TEST_THREAD_TEXT)
self.assertEqual(response_data['discussion_data'][0]['body'], self.TEST_THREAD_TEXT)
def test_html(self, mock_request):
self.check_html(mock_request)
def test_ajax(self, mock_request):
self.check_ajax(mock_request)
def test_404_non_enrolled_user(self, __):
"""
Test that when student try to visit un-enrolled students' discussion profile,
the system raises Http404.
"""
unenrolled_user = UserFactory.create()
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
text_type(self.course.id),
unenrolled_user.id
)
def test_404_profiled_user(self, _mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
text_type(self.course.id),
-999
)
def test_404_course(self, _mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
"non/existent/course",
self.profiled_user.id
)
def test_post(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().post("dummy_url")
request.user = self.student
response = views.user_profile(
request,
text_type(self.course.id),
self.profiled_user.id
)
self.assertEqual(response.status_code, 405)
@patch('requests.request', autospec=True)
class CommentsServiceRequestHeadersTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
CREATE_USER = False
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CommentsServiceRequestHeadersTestCase, self).setUp()
username = "foo"
password = "bar"
# Invoke UrlResetMixin
super(CommentsServiceRequestHeadersTestCase, self).setUp()
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
self.addCleanup(translation.deactivate)
def assert_all_calls_have_header(self, mock_request, key, value):
expected = call(
ANY, # method
ANY, # url
data=ANY,
params=ANY,
headers=PartialDictMatcher({key: value}),
timeout=ANY
)
for actual in mock_request.call_args_list:
self.assertEqual(expected, actual)
def test_accept_language(self, mock_request):
lang = "eo"
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
self.client.get(
reverse(
"single_thread",
kwargs={
"course_id": text_type(self.course.id),
"discussion_id": "dummy_discussion_id",
"thread_id": thread_id,
}
),
HTTP_ACCEPT_LANGUAGE=lang,
)
self.assert_all_calls_have_header(mock_request, "Accept-Language", lang)
@override_settings(COMMENTS_SERVICE_KEY="test_api_key")
def test_api_key(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id="dummy")
self.client.get(
reverse(
"forum_form_discussion",
kwargs={"course_id": text_type(self.course.id)}
),
)
self.assert_all_calls_have_header(mock_request, "X-Edx-Api-Key", "test_api_key")
class InlineDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(InlineDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(InlineDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
response = views.inline_discussion(
request, text_type(self.course.id), self.course.discussion_topics['General']['id']
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class ForumFormDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumFormDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumFormDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, text_type(self.course.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
@ddt.ddt
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class ForumDiscussionXSSTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumDiscussionXSSTestCase, self).setUp()
username = "foo"
password = "bar"
self.course = CourseFactory.create()
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(self.client.login(username=username, password=password))
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
def test_forum_discussion_xss_prevent(self, malicious_code, mock_user, mock_req):
"""
Test that XSS attack is prevented
"""
mock_user.return_value.to_dict.return_value = {}
mock_req.return_value.status_code = 200
reverse_url = "%s%s" % (reverse(
"forum_form_discussion",
kwargs={"course_id": six.text_type(self.course.id)}), '/forum_form_discussion')
# Test that malicious code does not appear in html
url = "%s?%s=%s" % (reverse_url, 'sort_key', malicious_code)
resp = self.client.get(url)
self.assertNotContains(resp, malicious_code)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_forum_user_profile_xss_prevent(self, malicious_code, mock_threads, mock_from_django_user, mock_request):
"""
Test that XSS attack is prevented
"""
mock_threads.return_value = [], 1, 1
mock_from_django_user.return_value.to_dict.return_value = {
'upvoted_ids': [],
'downvoted_ids': [],
'subscribed_thread_ids': []
}
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
url = reverse('user_profile',
kwargs={'course_id': six.text_type(self.course.id), 'user_id': str(self.student.id)})
# Test that malicious code does not appear in html
url_string = "%s?%s=%s" % (url, 'page', malicious_code)
resp = self.client.get(url_string)
self.assertNotContains(resp, malicious_code)
class ForumDiscussionSearchUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumDiscussionSearchUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumDiscussionSearchUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
data = {
"ajax": 1,
"text": text,
}
request = RequestFactory().get("dummy_url", data)
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, text_type(self.course.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class SingleThreadUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(SingleThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})
@classmethod
def setUpTestData(cls):
super(SingleThreadUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.single_thread(request, text_type(self.course.id), "dummy_discussion_id", thread_id)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(response_data["content"]["title"], text)
self.assertEqual(response_data["content"]["body"], text)
class UserProfileUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UserProfileUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UserProfileUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.user_profile(request, text_type(self.course.id), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class FollowedThreadsUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(FollowedThreadsUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(FollowedThreadsUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.followed_threads(request, text_type(self.course.id), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class EnrollmentTestCase(ForumsEnableMixin, ModuleStoreTestCase):
"""
Tests for the behavior of views depending on if the student is enrolled
in the course
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(EnrollmentTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def test_unenrolled(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
request = RequestFactory().get('dummy_url')
request.user = self.student
with self.assertRaises(CourseAccessRedirect):
views.forum_form_discussion(request, course_id=text_type(self.course.id)) # pylint: disable=no-value-for-parameter, unexpected-keyword-arg
@patch('requests.request', autospec=True)
class EnterpriseConsentTestCase(EnterpriseTestConsentRequired, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Ensure that the Enterprise Data Consent redirects are in place only when consent is required.
"""
CREATE_USER = False
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Invoke UrlResetMixin setUp
super(EnterpriseConsentTestCase, self).setUp()
username = "foo"
password = "bar"
self.discussion_id = 'dummy_discussion_id'
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': self.discussion_id}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
self.addCleanup(translation.deactivate)
@patch('openedx.features.enterprise_support.api.enterprise_customer_for_request')
def test_consent_required(self, mock_enterprise_customer_for_request, mock_request):
"""
Test that enterprise data sharing consent is required when enabled for the various discussion views.
"""
# ENT-924: Temporary solution to replace sensitive SSO usernames.
mock_enterprise_customer_for_request.return_value = None
thread_id = 'dummy'
course_id = six.text_type(self.course.id)
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy', thread_id=thread_id)
for url in (
reverse('forum_form_discussion',
kwargs=dict(course_id=course_id)),
reverse('single_thread',
kwargs=dict(course_id=course_id, discussion_id=self.discussion_id, thread_id=thread_id)),
):
self.verify_consent_required(self.client, url) # pylint: disable=no-value-for-parameter
class DividedDiscussionsTestCase(CohortViewsTestCase):
def create_divided_discussions(self):
"""
Set up a divided discussion in the system, complete with all the fixings
"""
divided_inline_discussions = ['Topic A']
divided_course_wide_discussions = ["Topic B"]
divided_discussions = divided_inline_discussions + divided_course_wide_discussions
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_name_to_id(self.course, "Topic A"),
discussion_category="Chapter",
discussion_target="Discussion",
start=datetime.now()
)
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(
self.course,
is_cohorted=True,
)
config_course_discussions(
self.course,
discussion_topics=discussion_topics,
divided_discussions=divided_discussions
)
return divided_inline_discussions, divided_course_wide_discussions
class CourseDiscussionTopicsTestCase(DividedDiscussionsTestCase):
"""
Tests the `divide_discussion_topics` view.
"""
def test_non_staff(self):
"""
Verify that we cannot access divide_discussion_topics if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(views.discussion_topics, "GET", [six.text_type(self.course.id)])
def test_get_discussion_topics(self):
"""
Verify that discussion_topics is working for HTTP GET.
"""
# create inline & course-wide discussion to verify the different map.
self.create_divided_discussions()
response = self.get_handler(self.course, handler=views.discussion_topics)
start_date = response['inline_discussions']['subcategories']['Chapter']['start_date']
expected_response = {
"course_wide_discussions": {
'children': [['Topic B', TYPE_ENTRY]],
'entries': {
'Topic B': {
'sort_key': 'A',
'is_divided': True,
'id': topic_name_to_id(self.course, "Topic B"),
'start_date': response['course_wide_discussions']['entries']['Topic B']['start_date']
}
}
},
"inline_discussions": {
'subcategories': {
'Chapter': {
'subcategories': {},
'children': [['Discussion', TYPE_ENTRY]],
'entries': {
'Discussion': {
'sort_key': None,
'is_divided': True,
'id': topic_name_to_id(self.course, "Topic A"),
'start_date': start_date
}
},
'sort_key': 'Chapter',
'start_date': start_date
}
},
'children': [['Chapter', TYPE_SUBCATEGORY]]
}
}
self.assertEqual(response, expected_response)
class CourseDiscussionsHandlerTestCase(DividedDiscussionsTestCase):
"""
Tests the course_discussion_settings_handler
"""
def get_expected_response(self):
"""
Returns the static response dict.
"""
return {
u'always_divide_inline_discussions': False,
u'divided_inline_discussions': [],
u'divided_course_wide_discussions': [],
u'id': 1,
u'division_scheme': u'cohort',
u'available_division_schemes': [u'cohort']
}
def test_non_staff(self):
"""
Verify that we cannot access course_discussions_settings_handler if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(
course_discussions_settings_handler, "GET", [six.text_type(self.course.id)]
)
self._verify_non_staff_cannot_access(
course_discussions_settings_handler, "PATCH", [six.text_type(self.course.id)]
)
def test_update_always_divide_inline_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for always_divide_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['always_divide_inline_discussions'] = True
response = self.patch_handler(
self.course, data=expected_response, handler=course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_update_course_wide_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for divided_course_wide_discussions via HTTP PATCH.
"""
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(self.course, is_cohorted=True)
config_course_discussions(self.course, discussion_topics=discussion_topics)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, "Topic B")]
response = self.patch_handler(
self.course, data=expected_response, handler=views.course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_update_inline_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for divided_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
RequestCache.clear_all_namespaces()
now = datetime.now()
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="Topic_A",
discussion_category="Chapter",
discussion_target="Discussion",
start=now
)
expected_response['divided_inline_discussions'] = ["Topic_A"]
response = self.patch_handler(
self.course, data=expected_response, handler=views.course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_get_settings(self):
"""
Verify that course_discussions_settings_handler is working for HTTP GET.
"""
divided_inline_discussions, divided_course_wide_discussions = self.create_divided_discussions()
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
expected_response['divided_inline_discussions'] = [topic_name_to_id(self.course, name)
for name in divided_inline_discussions]
expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, name)
for name in divided_course_wide_discussions]
self.assertEqual(response, expected_response)
def test_update_settings_with_invalid_field_data_type(self):
"""
Verify that course_discussions_settings_handler return HTTP 400 if field data type is incorrect.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.patch_handler(
self.course,
data={'always_divide_inline_discussions': ''},
expected_response_code=400,
handler=views.course_discussions_settings_handler
)
self.assertEqual(
u"Incorrect field type for `{}`. Type must be `{}`".format('always_divide_inline_discussions',
bool.__name__),
response.get("error")
)
def test_available_schemes(self):
# Cohorts disabled, single enrollment mode.
config_course_cohorts(self.course, is_cohorted=False)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
expected_response['available_division_schemes'] = []
self.assertEqual(response, expected_response)
# Add 2 enrollment modes
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response['available_division_schemes'] = [CourseDiscussionSettings.ENROLLMENT_TRACK]
self.assertEqual(response, expected_response)
# Enable cohorts
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response['available_division_schemes'] = [
CourseDiscussionSettings.COHORT, CourseDiscussionSettings.ENROLLMENT_TRACK
]
self.assertEqual(response, expected_response)
class DefaultTopicIdGetterTestCase(ModuleStoreTestCase):
"""
Tests the `_get_discussion_default_topic_id` helper.
"""
def test_no_default_topic(self):
discussion_topics = {
'dummy discussion': {
'id': 'dummy_discussion_id',
},
}
course = CourseFactory.create(discussion_topics=discussion_topics)
expected_id = None
result = _get_discussion_default_topic_id(course)
self.assertEqual(expected_id, result)
def test_default_topic_id(self):
discussion_topics = {
'dummy discussion': {
'id': 'dummy_discussion_id',
},
'another discussion': {
'id': 'another_discussion_id',
'default': True,
},
}
course = CourseFactory.create(discussion_topics=discussion_topics)
expected_id = 'another_discussion_id'
result = _get_discussion_default_topic_id(course)
self.assertEqual(expected_id, result)
class ThreadViewedEventTestCase(EventTestMixin, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Forum thread views are expected to launch analytics events. Test these here.
"""
CATEGORY_ID = 'i4x-edx-discussion-id'
CATEGORY_NAME = 'Discussion 1'
PARENT_CATEGORY_NAME = 'Chapter 1'
DUMMY_THREAD_ID = 'dummythreadids'
DUMMY_TITLE = 'Dummy title'
DUMMY_URL = 'https://example.com/dummy/url/'
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self): # pylint: disable=arguments-differ
super(ThreadViewedEventTestCase, self).setUp('eventtracking.tracker')
self.course = CourseFactory.create(
teams_configuration=TeamsConfig({
'topics': [{
'id': 'arbitrary-topic-id',
'name': 'arbitrary-topic-name',
'description': 'arbitrary-topic-desc'
}]
})
)
seed_permissions_roles(self.course.id)
PASSWORD = 'test'
self.student = UserFactory.create(password=PASSWORD)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.staff = UserFactory.create(is_staff=True)
UserBasedRole(user=self.staff, role=CourseStaffRole.ROLE).add_course(self.course.id)
self.category = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id=self.CATEGORY_ID,
discussion_category=self.PARENT_CATEGORY_NAME,
discussion_target=self.CATEGORY_NAME,
)
self.team = CourseTeamFactory.create(
name='Team 1',
course_id=self.course.id,
topic_id='arbitrary-topic-id',
discussion_topic_id=self.category.discussion_id,
)
CourseTeamMembershipFactory.create(team=self.team, user=self.student)
self.client.login(username=self.student.username, password=PASSWORD)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.perform_request')
def test_thread_viewed_event(self, mock_perform_request):
mock_perform_request.side_effect = make_mock_perform_request_impl(
course=self.course,
text=self.DUMMY_TITLE,
thread_id=self.DUMMY_THREAD_ID,
commentable_id=self.category.discussion_id,
)
url = '/courses/{0}/discussion/forum/{1}/threads/{2}'.format(
six.text_type(self.course.id),
self.category.discussion_id,
self.DUMMY_THREAD_ID
)
self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
expected_event = {
'id': self.DUMMY_THREAD_ID,
'title': self.DUMMY_TITLE,
'commentable_id': self.category.discussion_id,
'category_id': self.category.discussion_id,
'category_name': self.category.discussion_target,
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'target_username': self.student.username,
'team_id': self.team.id,
'url': self.DUMMY_URL,
}
expected_event_items = list(expected_event.items())
self.assert_event_emission_count('edx.forum.thread.viewed', 1)
_, event = self.get_latest_call_args()
event_items = list(event.items())
self.assertTrue(kv_pair in event_items for kv_pair in expected_event_items)
| msegado/edx-platform | lms/djangoapps/discussion/tests/test_views.py | Python | agpl-3.0 | 91,039 | [
"VisIt"
] | fbe6c8c31e6cc820823ec97fe222e2ed74f8b86e545ee6b47a03816f72e0068f |
from resp_base import Obligation, Deadline, ResourceDelta, ResponsibilityEffect, Act, mean, BullshitAgent
from resp_base import CourseworkWorkflow, IncompetentCourseworkWorkflow, HedonisticAgent, StudiousAgent, Lecturer
from theatre_ag.theatre_ag import SynchronizingClock
import matplotlib.pyplot as plt
from random import seed
from copy import copy
import unittest
class TestBehaviourGraphs(unittest.TestCase):
def manualSetUp(self,
max_ticks=100,
number_of_agents=7):
self.global_clock = SynchronizingClock(max_ticks=max_ticks)
self.lecturer_count = 1
self.student_count = number_of_agents # (per type of student)
self.students = []
self.lecturers = []
# Construct students
for i in range(self.student_count):
for type_of_student in [HedonisticAgent, StudiousAgent, BullshitAgent]:
for competence in [CourseworkWorkflow, IncompetentCourseworkWorkflow]:
student_workflow = competence()
curr_student = type_of_student([],
"student_"+str(len(self.students)),
self.global_clock,
[student_workflow])
# Create acts and effects to register with the student created here
writing_effect = ResponsibilityEffect({'essays_written': 1})
programming_effect = ResponsibilityEffect({'working_programs': 1})
writing_act = Act(writing_effect,
student_workflow.write_essay,
student_workflow)
programming_act = Act(programming_effect,
student_workflow.write_program,
student_workflow)
# Register the acts
curr_student.register_act(writing_act)
curr_student.register_act(programming_act)
curr_student.start()
self.students.append(curr_student)
# Construct lecturers
for i in range(self.lecturer_count):
curr_lecturer = Lecturer([], "lecturer_"+str(i), self.global_clock, [])
curr_lecturer.start()
self.lecturers.append(curr_lecturer)
def trial(self,
complete_ticks=True,
number_of_agents=7,
max_ticks=100):
seed(0)
self.manualSetUp(max_ticks, number_of_agents)
lecturer = self.lecturers[0]
# Give all students a writing and programming assignment.
for student in self.students:
for i in range(10):
essay_deadline = Deadline(10, self.global_clock)
programming_deadline = Deadline(10, self.global_clock)
visit_concert_duration = Deadline(20, self.global_clock)
write_essay_constraint = ResourceDelta({'essays_written': 1})
write_code_constraint = ResourceDelta({'working_programs': 1})
visit_concert_effect = ResourceDelta({'personal_enjoyment': 1})
# Obligations that responsibilities can be made from
essay_writing = Obligation([essay_deadline,
write_essay_constraint],
name="write essay")
programming_assignment = Obligation([programming_deadline,
write_code_constraint],
name="write program")
visit_concert = Obligation([visit_concert_duration,
visit_concert_effect],
name="visit concert")
lecturer.delegate_responsibility(copy(essay_writing),
[i * 0.075
for item in essay_writing.constraint_set],
student)
lecturer.delegate_responsibility(copy(programming_assignment),
[i * 0.075
for item in programming_assignment.constraint_set],
student)
lecturer.delegate_responsibility(copy(visit_concert),
[i * 0.075
for item in visit_concert.constraint_set],
student)
if complete_ticks:
self.global_clock.tick_toc()
def test_adopting_new_behaviour(self):
gradual_trial_results = []
number_of_trials = 10
number_of_ticks = 0
while number_of_ticks < 150:
trial_results = [[], []]
number_of_ticks += 15
# A set of trials where lecturers advise bad students
for i in range(number_of_trials):
self.trial(complete_ticks=False,
max_ticks=number_of_ticks)
lecturer = self.lecturers[0]
while self.global_clock.current_tick < self.global_clock.max_ticks/2:
self.global_clock.tick()
for student in self.students:
if lecturer.general_responsibility_judgement(student) < lecturer.basic_judgement_responsible:
lecturer.advise(student)
self.global_clock.tick_toc()
trial_results[0].append(mean([lecturer.general_responsibility_judgement(student)
for student in self.students]))
self.tearDown()
print(number_of_ticks)
trial_results[0] = mean(trial_results[0])
# A set of trials where lecturers do not advise
for i in range(number_of_trials):
self.trial(complete_ticks=True,
max_ticks=number_of_ticks)
lecturer = self.lecturers[0]
trial_results[1].append(mean([lecturer.general_responsibility_judgement(student)
for student in self.students]))
trial_results[1] = mean(trial_results[1])
gradual_trial_results.append(trial_results)
print(number_of_ticks)
print(gradual_trial_results)
plt.plot(range(15, 151, 15), [trial[0] for trial in gradual_trial_results], 'bs',
range(15, 151, 15), [trial[1] for trial in gradual_trial_results], 'g^')
plt.xlabel('Number of ticks')
plt.ylabel('Average degree of general responsibleness over 10 ticks')
plt.show()
# If the experiment is successful, results from the trials involving advice should result in more responsible
# ...agents overall, because their responsibility is directed by advice via the formalism's interpretation
# ...factors.
self.assertTrue(mean(trial_results[0]) > mean(trial_results[1]))
| probablytom/msci-model | tests/test_new_behaviour_graphs.py | Python | mit | 7,344 | [
"VisIt"
] | a553a603cbaf30bb51cd9a17f5a0f580ac81a23aebbfb70ebde7e50fd431d92e |
import random
import numpy as np
from operator import itemgetter
from ase.ga.offspring_creator import OffspringCreator
from ase.ga.utilities import get_distance_matrix, get_nndist
from ase import Atoms
class Mutation(OffspringCreator):
"""Base class for all particle mutation type operators.
Do not call this class directly."""
def __init__(self, num_muts=1):
OffspringCreator.__init__(self, num_muts=num_muts)
self.descriptor = 'Mutation'
self.min_inputs = 1
@classmethod
def interchange2(cls, atoms, i1, i2):
"""Switches identity of the atoms on index i1 and i2 in
the supplied atoms object."""
p1 = atoms[int(i1)].position.copy()
atoms[int(i1)].position = atoms[int(i2)].position.copy()
atoms[int(i2)].position = p1
@classmethod
def get_atomic_configuration(cls, atoms, elements=None, eps=4e-2):
"""Returns the atomic configuration of the particle as a list of
lists. Each list contain the indices of the atoms sitting at the
same distance from the geometrical center of the particle. Highly
symmetrical particles will often have many atoms in each shell.
For further elaboration see:
J. Montejano-Carrizales and J. Moran-Lopez, Geometrical
characteristics of compact nanoclusters, Nanostruct. Mater., 1,
5, 397-409 (1992)
Parameters:
elements: Only take into account the elements specified in this
list. Default is to take all elements into account.
eps: The distance allowed to separate elements within each shell."""
atoms = atoms.copy()
if elements is None:
e = list(set(atoms.get_chemical_symbols()))
else:
e = elements
atoms.set_constraint()
atoms.center()
geo_mid = np.array([(atoms.get_cell() / 2.)[i][i] for i in range(3)])
dists = [(np.linalg.norm(geo_mid - atoms[i].position), i)
for i in range(len(atoms))]
dists.sort(key=itemgetter(0))
atomic_conf = []
old_dist = -10.
for dist, i in dists:
if abs(dist - old_dist) > eps:
atomic_conf.append([i])
else:
atomic_conf[-1].append(i)
old_dist = dist
sorted_elems = sorted(set(atoms.get_chemical_symbols()))
if e is not None and sorted(e) != sorted_elems:
for shell in atomic_conf:
torem = []
for i in shell:
if atoms[i].symbol not in e:
torem.append(i)
for i in torem:
shell.remove(i)
return atomic_conf
@classmethod
def get_list_of_possible_permutations(cls, atoms, l1, l2):
"""Returns a list of available permutations from the two
lists of indices, l1 and l2. Checking that identical elements
are not permuted."""
possible_permutations = []
for i in l1:
for j in l2:
if atoms[int(i)].symbol != atoms[int(j)].symbol:
possible_permutations.append((i, j))
return possible_permutations
class RandomMutation(Mutation):
"""Moves a random atom the supplied length in a random direction."""
def __init__(self, length=2., num_muts=1):
Mutation.__init__(self, num_muts=num_muts)
self.descriptor = 'RandomMutation'
self.length = length
def mutate(self, atoms):
""" Does the actual mutation. """
tbm = random.choice(range(len(atoms)))
indi = Atoms()
for a in atoms:
if a.index == tbm:
a.position += self.random_vector(self.length)
indi.append(a)
return indi
def get_new_individual(self, parents):
f = parents[0]
indi = self.mutate(f)
indi = self.initialize_individual(f, indi)
indi.info['data']['parents'] = [f.info['confid']]
return (self.finalize_individual(indi),
self.descriptor + ': {0}'.format(f.info['confid']))
@classmethod
def random_vector(cls, l):
"""return random vector of length l"""
vec = np.array([random.random() * 2 - 1 for i in range(3)])
vl = np.linalg.norm(vec)
return np.array([v * l / vl for v in vec])
class RandomPermutation(Mutation):
"""Permutes two random atoms.
Parameters:
num_muts: the number of times to perform this operation."""
def __init__(self, elements=None, num_muts=1):
Mutation.__init__(self, num_muts=num_muts)
self.descriptor = 'RandomPermutation'
self.elements = elements
def get_new_individual(self, parents):
f = parents[0].copy()
diffatoms = len(set(f.numbers))
assert diffatoms > 1, 'Permutations with one atomic type is not valid'
indi = self.initialize_individual(f)
indi.info['data']['parents'] = [f.info['confid']]
for _ in range(self.num_muts):
RandomPermutation.mutate(f, self.elements)
for atom in f:
indi.append(atom)
return (self.finalize_individual(indi),
self.descriptor + ': {0}'.format(f.info['confid']))
@classmethod
def mutate(cls, atoms, elements=None):
"""Do the actual permutation."""
if elements is None:
indices = range(len(atoms))
else:
indices = [a.index for a in atoms if a.symbol in elements]
i1 = random.choice(indices)
i2 = random.choice(indices)
while atoms[i1].symbol == atoms[i2].symbol:
i2 = random.choice(indices)
Mutation.interchange2(atoms, i1, i2)
class COM2surfPermutation(Mutation):
"""The Center Of Mass to surface (COM2surf) permutation operator
described in
S. Lysgaard et al., Top. Catal., 2014, 57 (1-4), pp 33-39
Parameters:
elements: which elements should be included in this permutation,
for example: include all metals and exclude all adsorbates
min_ratio: minimum ratio of each element in the core or surface region.
If elements=[a, b] then ratio of a is Na / (Na + Nb) (N: Number of).
If less than minimum ratio is present in the core, the region defining
the core will be extended untill the minimum ratio is met, and vice
versa for the surface region. It has the potential reach the
recursive limit if an element has a smaller total ratio in the
complete particle. In that case remember to decrease this min_ratio.
num_muts: the number of times to perform this operation.
"""
def __init__(self, elements=None, min_ratio=0.25, num_muts=1):
Mutation.__init__(self, num_muts=num_muts)
self.descriptor = 'COM2surfPermutation'
self.min_ratio = min_ratio
self.elements = elements
def get_new_individual(self, parents):
f = parents[0].copy()
diffatoms = len(set(f.numbers))
assert diffatoms > 1, 'Permutations with one atomic type is not valid'
indi = self.initialize_individual(f)
indi.info['data']['parents'] = [f.info['confid']]
for _ in range(self.num_muts):
elems = self.elements
COM2surfPermutation.mutate(f, elems, self.min_ratio)
for atom in f:
indi.append(atom)
return (self.finalize_individual(indi),
self.descriptor + ': {0}'.format(f.info['confid']))
@classmethod
def mutate(cls, atoms, elements, min_ratio):
"""Performs the COM2surf permutation."""
ac = atoms.copy()
if elements is not None:
del ac[[a.index for a in ac if a.symbol not in elements]]
syms = ac.get_chemical_symbols()
for el in set(syms):
assert syms.count(el) / float(len(syms)) > min_ratio
atomic_conf = Mutation.get_atomic_configuration(atoms,
elements=elements)
core = COM2surfPermutation.get_core_indices(atoms,
atomic_conf,
min_ratio)
shell = COM2surfPermutation.get_shell_indices(atoms,
atomic_conf,
min_ratio)
permuts = Mutation.get_list_of_possible_permutations(atoms,
core,
shell)
swap = random.choice(permuts)
Mutation.interchange2(atoms, *swap)
@classmethod
def get_core_indices(cls, atoms, atomic_conf, min_ratio, recurs=0):
"""Recursive function that returns the indices in the core subject to
the min_ratio constraint. The indices are found from the supplied
atomic configuration."""
elements = list(set([atoms[i].symbol
for subl in atomic_conf for i in subl]))
core = [i for subl in atomic_conf[:1 + recurs] for i in subl]
while len(core) < 1:
recurs += 1
core = [i for subl in atomic_conf[:1 + recurs] for i in subl]
for elem in elements:
ratio = len([i for i in core
if atoms[i].symbol == elem]) / float(len(core))
if ratio < min_ratio:
return COM2surfPermutation.get_core_indices(atoms,
atomic_conf,
min_ratio,
recurs + 1)
return core
@classmethod
def get_shell_indices(cls, atoms, atomic_conf, min_ratio, recurs=0):
"""Recursive function that returns the indices in the surface
subject to the min_ratio constraint. The indices are found from
the supplied atomic configuration."""
elements = list(set([atoms[i].symbol
for subl in atomic_conf for i in subl]))
shell = [i for subl in atomic_conf[-1 - recurs:] for i in subl]
while len(shell) < 1:
recurs += 1
shell = [i for subl in atomic_conf[-1 - recurs:] for i in subl]
for elem in elements:
ratio = len([i for i in shell
if atoms[i].symbol == elem]) / float(len(shell))
if ratio < min_ratio:
return COM2surfPermutation.get_shell_indices(atoms,
atomic_conf,
min_ratio,
recurs + 1)
return shell
class _NeighborhoodPermutation(Mutation):
"""Helper class that holds common functions to all permutations
that look at the neighborhoods of each atoms."""
@classmethod
def get_possible_poor2rich_permutations(cls, atoms, inverse=False,
recurs=0, distance_matrix=None):
dm = distance_matrix
if dm is None:
dm = get_distance_matrix(atoms)
# Adding a small value (0.2) to overcome slight variations
# in the average bond length
nndist = get_nndist(atoms, dm) + 0.2
same_neighbors = {}
def f(x):
return x[1]
for i, atom in enumerate(atoms):
same_neighbors[i] = 0
neighbors = [j for j in range(len(dm[i])) if dm[i][j] < nndist]
for n in neighbors:
if atoms[n].symbol == atom.symbol:
same_neighbors[i] += 1
sorted_same = sorted(same_neighbors.items(), key=f)
if inverse:
sorted_same.reverse()
poor_indices = [j[0] for j in sorted_same
if abs(j[1] - sorted_same[0][1]) <= recurs]
rich_indices = [j[0] for j in sorted_same
if abs(j[1] - sorted_same[-1][1]) <= recurs]
permuts = Mutation.get_list_of_possible_permutations(atoms,
poor_indices,
rich_indices)
if len(permuts) == 0:
_NP = _NeighborhoodPermutation
return _NP.get_possible_poor2rich_permutations(atoms, inverse,
recurs + 1, dm)
return permuts
class Poor2richPermutation(_NeighborhoodPermutation):
"""The poor to rich (Poor2rich) permutation operator described in
S. Lysgaard et al., Top. Catal., 2014, 57 (1-4), pp 33-39
Permutes two atoms from regions short of the same elements, to
regions rich in the same elements.
(Inverse of Rich2poorPermutation)
Parameters:
elements: Which elements to take into account in this permutation
"""
def __init__(self, elements=[], num_muts=1):
_NeighborhoodPermutation.__init__(self, num_muts=num_muts)
self.descriptor = 'Poor2richPermutation'
self.elements = elements
def get_new_individual(self, parents):
f = parents[0].copy()
diffatoms = len(set(f.numbers))
assert diffatoms > 1, 'Permutations with one atomic type is not valid'
indi = self.initialize_individual(f)
indi.info['data']['parents'] = [f.info['confid']]
for _ in range(self.num_muts):
Poor2richPermutation.mutate(f, self.elements)
for atom in f:
indi.append(atom)
return (self.finalize_individual(indi),
self.descriptor + ': {0}'.format(f.info['confid']))
@classmethod
def mutate(cls, atoms, elements):
_NP = _NeighborhoodPermutation
# indices = [a.index for a in atoms if a.symbol in elements]
ac = atoms.copy()
del ac[[atom.index for atom in ac
if atom.symbol not in elements]]
permuts = _NP.get_possible_poor2rich_permutations(ac)
swap = random.choice(permuts)
Mutation.interchange2(atoms, *swap)
class Rich2poorPermutation(_NeighborhoodPermutation):
"""
The rich to poor (Rich2poor) permutation operator described in
S. Lysgaard et al., Top. Catal., 2014, 57 (1-4), pp 33-39
Permutes two atoms from regions rich in the same elements, to
regions short of the same elements.
(Inverse of Poor2richPermutation)
Parameters:
elements: Which elements to take into account in this permutation
"""
def __init__(self, elements=None, num_muts=1):
_NeighborhoodPermutation.__init__(self, num_muts=num_muts)
self.descriptor = 'Rich2poorPermutation'
self.elements = elements
def get_new_individual(self, parents):
f = parents[0].copy()
diffatoms = len(set(f.numbers))
assert diffatoms > 1, 'Permutations with one atomic type is not valid'
indi = self.initialize_individual(f)
indi.info['data']['parents'] = [f.info['confid']]
if self.elements is None:
elems = list(set(f.get_chemical_symbols()))
else:
elems = self.elements
for _ in range(self.num_muts):
Rich2poorPermutation.mutate(f, elems)
for atom in f:
indi.append(atom)
return (self.finalize_individual(indi),
self.descriptor + ': {0}'.format(f.info['confid']))
@classmethod
def mutate(cls, atoms, elements):
_NP = _NeighborhoodPermutation
ac = atoms.copy()
del ac[[atom.index for atom in ac
if atom.symbol not in elements]]
permuts = _NP.get_possible_poor2rich_permutations(ac,
inverse=True)
swap = random.choice(permuts)
Mutation.interchange2(atoms, *swap)
class SymmetricSubstitute(Mutation):
"""Permute all atoms within a subshell of the symmetric particle.
The atoms within a subshell all have the same distance to the center,
these are all equivalent under the particle point group symmetry.
"""
def __init__(self, elements=None, num_muts=1):
Mutation.__init__(self, num_muts=num_muts)
self.descriptor = 'SymmetricSubstitute'
self.elements = elements
def substitute(self, atoms):
"""Does the actual substitution"""
atoms = atoms.copy()
aconf = self.get_atomic_configuration(atoms,
elements=self.elements)
itbm = random.randint(0, len(aconf) - 1)
to_element = random.choice(self.elements)
for i in aconf[itbm]:
atoms[i].symbol = to_element
return atoms
def get_new_individual(self, parents):
f = parents[0]
indi = self.substitute(f)
indi = self.initialize_individual(f, indi)
indi.info['data']['parents'] = [f.info['confid']]
return (self.finalize_individual(indi),
self.descriptor + ': {0}'.format(f.info['confid']))
class RandomSubstitute(Mutation):
"""Substitutes one atom with another atom type. The possible atom types
are supplied in the parameter elements"""
def __init__(self, elements=None, num_muts=1):
Mutation.__init__(self, num_muts=num_muts)
self.descriptor = 'RandomSubstitute'
self.elements = elements
def substitute(self, atoms):
"""Does the actual substitution"""
atoms = atoms.copy()
if self.elements is None:
elems = list(set(atoms.get_chemical_symbols()))
else:
elems = self.elements[:]
possible_indices = [a.index for a in atoms
if a.symbol in elems]
itbm = random.choice(possible_indices)
elems.remove(atoms[itbm].symbol)
new_symbol = random.choice(elems)
atoms[itbm].symbol = new_symbol
return atoms
def get_new_individual(self, parents):
f = parents[0]
indi = self.substitute(f)
indi = self.initialize_individual(f, indi)
indi.info['data']['parents'] = [f.info['confid']]
return (self.finalize_individual(indi),
self.descriptor + ': {0}'.format(f.info['confid']))
| suttond/MODOI | ase/ga/particle_mutations.py | Python | lgpl-3.0 | 18,411 | [
"ASE"
] | 3598272a38d948016f7d781f81e98e6a38759df7ac32c290ddd701fe17c1385d |
#!/usr/bin/env python
# Standard packages
import os
import sys
import argparse
# Third-party packages
from toil.job import Job
# Package methods
from ddb import configuration
from ddb_ngsflow import gatk
from ddb_ngsflow import annotation
from ddb_ngsflow import pipeline
from ddb_ngsflow.align import bwa
from ddb_ngsflow.qc import qc
from ddb_ngsflow.coverage import sambamba
from ddb_ngsflow.variation import variation
from ddb_ngsflow.variation import mutect
from ddb_ngsflow.variation import platypus
from ddb_ngsflow.variation import vardict
from ddb_ngsflow.variation import scalpel
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file',
help="Input configuration file for samples")
parser.add_argument('-c', '--configuration',
help="Configuration file for various settings")
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
args.logLevel = "INFO"
sys.stdout.write("Setting up analysis directory\n")
if not os.path.exists("Logs"):
os.makedirs("Logs")
if not os.path.exists("FinalVCFs"):
os.makedirs("FinalVCFs")
if not os.path.exists("FinalBAMs"):
os.makedirs("FinalBAMs")
if not os.path.exists("Intermediates"):
os.makedirs("Intermediates")
if not os.path.exists("Coverage"):
os.makedirs("Coverage")
if not os.path.exists("Reports"):
os.makedirs("Reports")
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
# Workflow Graph definition. The following workflow definition should
# create a valid Directed Acyclic Graph (DAG)
root_job = Job.wrapJobFn(pipeline.spawn_batch_jobs, cores=1)
fastqc_job = Job.wrapJobFn(qc.run_fastqc, config, samples)
# Per sample jobs
for sample in samples:
# Alignment and Refinement Stages
align_job = Job.wrapJobFn(bwa.run_bwa_mem, config, sample, samples,
cores=int(config['bwa']['num_cores']),
memory="{}G".format(config['bwa']['max_mem']))
add_job = Job.wrapJobFn(gatk.add_or_replace_readgroups, config, sample,
align_job.rv(),
cores=1,
memory="{}G".format(config['picard-add']['max_mem']))
creator_job = Job.wrapJobFn(gatk.realign_target_creator, config, sample,
add_job.rv(),
cores=int(config['gatk-realign']['num_cores']),
memory="{}G".format(config['gatk-realign']['max_mem']))
realign_job = Job.wrapJobFn(gatk.realign_indels, config, sample,
add_job.rv(), creator_job.rv(),
cores=1,
memory="{}G".format(config['gatk-realign']['max_mem']))
recal_job = Job.wrapJobFn(gatk.recalibrator, config, sample,
realign_job.rv(),
cores=int(config['gatk-recal']['num_cores']),
memory="{}G".format(config['gatk-recal']['max_mem']))
# Variant Calling
spawn_variant_job = Job.wrapJobFn(pipeline.spawn_variant_jobs)
coverage_job = Job.wrapJobFn(sambamba.sambamba_region_coverage, config,
sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['gatk']['num_cores']),
memory="{}G".format(config['gatk']['max_mem']))
mutect_job = Job.wrapJobFn(mutect.mutect_single, config, sample,
samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=1,
memory="{}G".format(config['mutect']['max_mem']))
vardict_job = Job.wrapJobFn(vardict.vardict_single, config, sample,
samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['vardict']['num_cores']),
memory="{}G".format(config['vardict']['max_mem']))
scalpel_job = Job.wrapJobFn(scalpel.scalpel_single, config, sample,
samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['scalpel']['num_cores']),
memory="{}G".format(config['scalpel']['max_mem']))
platypus_job = Job.wrapJobFn(platypus.platypus_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['platypus']['num_cores']),
memory="{}G".format(config['platypus']['max_mem']))
# Need to filter for on target only results somewhere as well
spawn_normalization_job = Job.wrapJobFn(pipeline.spawn_variant_jobs)
normalization_job2 = Job.wrapJobFn(variation.vt_normalization, config, sample, "mutect",
"{}.mutect.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job3 = Job.wrapJobFn(variation.vt_normalization, config, sample, "vardict",
"{}.vardict.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job4 = Job.wrapJobFn(variation.vt_normalization, config, sample, "scalpel",
"{}.scalpel.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job5 = Job.wrapJobFn(variation.vt_normalization, config, sample, "platypus",
"{}.platypus.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
callers = "mutect,vardict,scalpel,platypus"
merge_job = Job.wrapJobFn(variation.merge_variant_calls, config, sample, callers, (normalization_job2.rv(),
normalization_job3.rv(),
normalization_job4.rv(),
normalization_job5.rv()))
gatk_annotate_job = Job.wrapJobFn(gatk.annotate_vcf, config, sample, merge_job.rv(),
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['gatk-annotate']['num_cores']),
memory="{}G".format(config['gatk-annotate']['max_mem']))
gatk_filter_job = Job.wrapJobFn(gatk.filter_variants, config, sample, gatk_annotate_job.rv(),
cores=1,
memory="{}G".format(config['gatk-filter']['max_mem']))
snpeff_job = Job.wrapJobFn(annotation.snpeff, config, sample, "{}.filtered.vcf".format(sample),
cores=int(config['snpeff']['num_cores']),
memory="{}G".format(config['snpeff']['max_mem']))
vcfanno_job = Job.wrapJobFn(annotation.vcfanno, config, sample, samples,
"{}.snpEff.{}.vcf".format(sample, config['snpeff']['reference']),
cores=int(config['vcfanno']['num_cores']),
memory="{}G".format(config['vcfanno']['max_mem']))
# Create workflow from created jobs
root_job.addChild(align_job)
align_job.addChild(add_job)
add_job.addChild(creator_job)
creator_job.addChild(realign_job)
realign_job.addChild(recal_job)
recal_job.addChild(spawn_variant_job)
spawn_variant_job.addChild(coverage_job)
spawn_variant_job.addChild(mutect_job)
spawn_variant_job.addChild(vardict_job)
spawn_variant_job.addChild(scalpel_job)
spawn_variant_job.addChild(platypus_job)
spawn_variant_job.addFollowOn(spawn_normalization_job)
spawn_normalization_job.addChild(normalization_job2)
spawn_normalization_job.addChild(normalization_job3)
spawn_normalization_job.addChild(normalization_job4)
spawn_normalization_job.addChild(normalization_job5)
spawn_normalization_job.addFollowOn(merge_job)
merge_job.addChild(gatk_annotate_job)
gatk_annotate_job.addChild(gatk_filter_job)
gatk_filter_job.addChild(snpeff_job)
snpeff_job.addChild(vcfanno_job)
root_job.addFollowOn(fastqc_job)
# Start workflow execution
Job.Runner.startToil(root_job, args)
| dgaston/ddb-scripts | workflow-somatic_amplicon_nofreebayes_pindel.py | Python | mit | 9,674 | [
"BWA"
] | acac01ddb6e15661ca0d27bd350e62d112fae2518ccfed486ff3b21decf26cfd |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
from search.views import search
BACKEND_NAME = 'Eraldo Energy backend'
admin.site.site_header = BACKEND_NAME
admin.site.site_title = BACKEND_NAME
urlpatterns = [
# Meta information
url(r'^robots\.txt/$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
url(r'^humans\.txt/$', TemplateView.as_view(template_name='humans.txt', content_type='text/plain')),
# Django Admin, use {% url 'admin:index' %}
url(r'^{}/'.format(settings.ADMIN_URL), include(admin.site.urls)),
# url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
# url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# User management
url(r'^users/', include('eraldoenergy.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^dance/', include('dance.urls', namespace='dance')),
url(r'^contact/', include('contact.urls', namespace='contact')),
url(r'^items/', include('inventory.urls', namespace='inventory')),
url(r'', include('pages.urls', namespace='pages')),
# CMS wagtail
url(r'^cms/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', search, name='search'), # optional
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
| Eraldo/eraldoenergy | config/urls.py | Python | bsd-3-clause | 2,433 | [
"VisIt"
] | ac9b91b1dce86176065cbcd58c3dc4716476f1b922af91f71b59ef4398ef10a9 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import gtk
import mock
from stoq.gui.pos import TemporarySaleItem
from stoqlib.domain.sellable import Sellable
from stoqlib.domain.views import ProductFullStockView
from stoqlib.gui.editors.producteditor import ProductEditor
from stoqlib.gui.search.sellablesearch import (SellableSearch,
PurchaseSellableSearch,
SaleSellableSearch)
from stoqlib.gui.wizards.productwizard import ProductCreateWizard
from stoqlib.gui.test.uitestutils import GUITest
class TestSellableSearch(GUITest):
def _show_search(self):
search = SellableSearch(self.store)
search.search.refresh()
#search.results.select(search.results[0])
return search
def test_search(self):
search = self._show_search()
self.check_search(search, 'sellable-no-filter')
search.set_searchbar_search_string('cal')
search.search.refresh()
self.check_search(search, 'sellable-string-filter')
class TestSaleSellableSearch(GUITest):
@mock.patch('stoqlib.gui.search.sellablesearch.SellableSearch.set_message')
def test_create(self, set_message):
sellable = self.create_sellable()
self.create_storable(product=sellable.product)
sale_item = TemporarySaleItem(sellable=sellable, quantity=1)
search = SaleSellableSearch(self.store,
sale_items=[sale_item], quantity=1)
self.assertRaises(TypeError, SaleSellableSearch, self.store,
sale_items=[sale_item],
selection_mode=gtk.SELECTION_MULTIPLE)
self.assertRaises(TypeError, SaleSellableSearch, self.store,
sale_items=[sale_item], quantity=None)
search = SaleSellableSearch(self.store)
search.search.refresh()
self.check_search(search, 'sale-sellable-no-filter')
search = SaleSellableSearch(self.store, info_message='test')
set_message.assert_called_once_with('test')
search = SaleSellableSearch(self.store, search_str='cal')
self.check_search(search, 'sale-sellable-string-filter')
class TestPurchaseSellableSearch(GUITest):
@mock.patch('stoqlib.gui.search.searcheditor.api.new_store')
@mock.patch('stoqlib.gui.search.searcheditor.run_dialog')
def test_run_editor(self, run_dialog, new_store):
run_dialog.return_value = None
new_store.return_value = self.store
query = Sellable.get_unblocked_sellables_query(self.store)
dialog = PurchaseSellableSearch(store=self.store,
search_spec=ProductFullStockView,
search_query=query)
dialog.search.refresh()
dialog.results.select(dialog.results[0])
product = dialog.results[0].product
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
self.click(dialog._toolbar.edit_button)
run_dialog.assert_called_once_with(ProductEditor, dialog,
self.store, product,
visual_mode=False)
@mock.patch('stoqlib.gui.search.searcheditor.api.new_store')
@mock.patch('stoqlib.gui.wizards.productwizard.run_dialog')
def test_run_wizard(self, run_dialog, new_store):
run_dialog.return_value = None
new_store.return_value = self.store
query = Sellable.get_unblocked_sellables_query(self.store)
dialog = PurchaseSellableSearch(store=self.store,
search_spec=ProductFullStockView,
search_query=query)
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
self.click(dialog._toolbar.new_button)
run_dialog.assert_called_once_with(ProductCreateWizard, dialog,
self.store)
| tiagocardosos/stoq | stoqlib/gui/test/test_sellablesearch.py | Python | gpl-2.0 | 4,990 | [
"VisIt"
] | 2a150cc53f59930b080d0033c8baffe4e3479dec5e4c2cdf0636d8a33f1cf7ce |
import codecs
import inspect
import json
import logging
import math
import pickle
import os.path
import numpy as np
from . import objects
from . import connections
from . import simulator
logger = logging.getLogger(__name__)
class Model(object):
"""A model contains a single network and the ability to
run simulations of that network.
Model is the first part of the API that modelers
become familiar with, and it is possible to create
many of the models that one would want to create simply
by making a model and calling functions on that model.
For example, a model that implements a communication channel
between two ensembles of neurons can be created with::
import nengo
model = nengo.Model("Communication channel")
input = model.make_node("Input", values=[0])
pre = model.make_ensemble("In", neurons=100, dimensions=1)
post = model.make_ensemble("Out", neurons=100, dimensions=1)
model.connect(input, pre)
model.connect(pre, post)
Parameters
----------
name : str
Name of the model.
seed : int, optional
Random number seed that will be fed to the random number generator.
Setting this seed makes the creation of the model
a deterministic process; however, each new ensemble
in the network advances the random number generator,
so if the network creation code changes, the entire model changes.
fixed_seed : int, optional
Random number seed that will be fed to the random number generator
before each random process. Unlike setting ``seed``,
each new ensemble in the network will use ``fixed_seed``,
meaning that ensembles with the same properties will have the same
set of neurons generated.
Attributes
----------
name : str
Name of the model
seed : int
Random seed used by the model.
time : float
The amount of time that this model has been simulated.
metadata : dict
An editable dictionary that modelers can use to store
extra information about a network.
properties : read-only dict
A collection of basic information about
a network (e.g., number of neurons, number of synapses, etc.)
"""
def __init__(self, name, seed=None, fixed_seed=None,
simulator=simulator.Simulator, dt=0.001):
self.dt = dt
self.signals = set()
self.nonlinearities = set()
self.encoders = set()
self.decoders = set()
self.transforms = set()
self.filters = set()
self.probes = set()
self.objs = {}
self.aliases = {}
self.probed = {}
self.data = {}
self.name = name
self.simulator = simulator
self.seed = np.random.randint(2**31-1) if seed is None else seed
self.rng = np.random.RandomState(self.seed)
self.fixed_seed = fixed_seed
self.simtime = self.add(objects.Signal(name='simtime'))
self.steps = self.add(objects.Signal(name='steps'))
self.one = self.add(objects.Constant(1, value=[1.0], name='one'))
# Automatically probe these
self.probe(self.simtime)
self.probe(self.steps)
# -- steps counts by 1.0
self.add(objects.Filter(1.0, self.one, self.steps))
self.add(objects.Filter(1.0, self.steps, self.steps))
# simtime <- dt * steps
self.add(objects.Filter(dt, self.one, self.simtime))
self.add(objects.Filter(dt, self.steps, self.simtime))
def _get_new_seed(self):
return self.rng.randint(2**31-1) if self.fixed_seed is None \
else self.fixed_seed
def __str__(self):
return "Model: " + self.name
@property
def connections(self):
return [o for o in self.objs.values() if isinstance(o, Connection)]
@property
def ensembles(self):
return [o for o in self.objs.values() if isinstance(o, Ensemble)]
@property
def nodes(self):
return [o for o in self.objs.values() if isinstance(o, Node)]
@property
def networks(self):
return [o for o in self.objs.values() if isinstance(o, Network)]
### I/O
def save(self, fname, format=None):
"""Save this model to a file.
So far, JSON and Pickle are the possible formats.
"""
if format is None:
format = os.path.splitext(fname)[1]
if format in ('json', '.json'):
with codecs.open(fname, 'w', encoding='utf-8') as f:
json.dump(self.to_json(), f, sort_keys=True, indent=2)
logger.info("Saved %s successfully.", fname)
else:
# Default to pickle
with open(fname, 'wb') as f:
pickle.dump(self, f)
logger.info("Saved %s successfully.", fname)
def to_json(self):
d = {
'__class__': self.__module__ + '.' + self.__class__.__name__,
'name': self.name,
'dt': self.dt,
# 'simulator': ?? We probably don't want to serialize this
}
d['signals'] = [sig.to_json() for sig in self.signals]
d['nonlinearities'] = [nl.to_json() for nl in self.nonlinearities]
d['encoders'] = [enc.to_json() for enc in self.encoders]
d['decoders'] = [dec.to_json() for dec in self.decoders]
d['transforms'] = [trans.to_json() for trans in self.transforms]
d['filters'] = [filt.to_json() for filt in self.filters]
d['probes'] = [pr.to_json() for pr in self.probes]
# d['aliases'] = self.aliases
# d['objs'] = {k: v.to_json() for k, v in self.objs.items()}
# d['probed'] = ?? Deal with later!
# d['data'] = ?? Do we want to serialize this?
return d
@staticmethod
def load(self, fname, format=None):
"""Load this model from a file.
So far, JSON and Pickle are the possible formats.
"""
if format is None:
format = os.path.splitext(fname)[1]
if format == 'json':
with codecs.open(fname, 'r', encoding='utf-8') as f:
return Model.from_json(json.load(f))
else:
# Default to pickle
with open(fname, 'rb') as f:
return pickle.load(f)
raise IOError("Could not load {}".format(fname))
### Simulation methods
def reset(self):
"""Reset the state of the simulation.
Runs through all nodes, then ensembles, then connections and then
probes in the network and calls thier reset functions.
"""
logger.debug("Resetting simulator for %s", self.name)
self.sim_obj.reset()
def run(self, time, dt=0.001, output=None, stop_when=None):
"""Runs a simulation of the model.
Parameters
----------
time : float
How long to run the simulation, in seconds.
If called more than once, successive calls will continue
the simulation for ``time`` more seconds.
To reset the simulation, call :func:`nengo.Model.reset()`.
Typical use cases are to either simply call it once::
model.run(10)
or to call it multiple times in a row::
time = 0
dt = 0.1
while time < 10:
model.run(dt)
time += dt
dt : float, optional
The length of a timestep, in seconds.
**Default**: 0.001
output : str or None, optional
Where probed data should be output.
If ``output`` is None, then probed data will be returned
by this function as a dictionary.
If ``output`` is a string, it is interpreted as a path,
and probed data will be written to that file.
The file extension will be parsed to determine the type
of file to write; any unrecognized extension
will be ignored and a comma-separated value file will
be created.
**Default**: None, so this function returns a dictionary
of probed data.
Returns
-------
data : dictionary
All of the probed data. This is only returned if
``output`` is None.
"""
if getattr(self, 'sim_obj', None) is None:
logger.debug("Creating simulator for %s", self.name)
self.sim_obj = self.simulator(self)
steps = int(time // self.dt)
logger.debug("Running for %f seconds; %d steps", time, steps)
self.sim_obj.run_steps(steps)
for k in self.probed:
self.data[k] = self.sim_obj.probe_data(self.probed[k])
return self.data
### Model manipulation
def add(self, obj):
"""Adds a Nengo object to this model.
This is generally only used for manually created nodes, not ones
created by calling :func:`nef.Model.make_ensemble()` or
:func:`nef.Model.make_node()`, as these are automatically added.
A common usage is with user created subclasses, as in the following::
node = net.add(MyNode('name'))
Parameters
----------
obj : Nengo object
The Nengo object to add.
Returns
-------
obj : Nengo object
The Nengo object that was added.
See Also
--------
Network.add : The same function for Networks
"""
if hasattr(obj, 'name') and self.objs.has_key(obj.name):
raise ValueError("Something called " + obj.name + " already exists."
" Please choose a different name.")
obj.add_to_model(self)
if hasattr(obj, 'name'):
self.objs[obj.name] = obj
return obj
def get(self, target, default=None):
"""Return the Nengo object specified.
Parameters
----------
target : string or Nengo object
The ``target`` can be specified with a string
(see `string reference <string_reference.html>`_)
or a Nengo object.
If a Nengo object is passed, ``get`` just confirms
that ``target`` is a part of the model.
default : optional
If ``target`` is not in the model, then ``get`` will
return ``default``.
Returns
-------
target : Nengo object
The Nengo object specified by ``target``.
"""
if isinstance(target, str):
if self.aliases.has_key(target):
return self.aliases[target]
elif self.objs.has_key(target):
return self.objs[target]
logger.error("Cannot find %s in this model.", target)
return default
if not target in self.objs.values():
logger.error("Cannot find %s in this model.", str(target))
return default
return target
def get_string(self, target, default=None):
"""Return the canonical string of the Nengo object specified.
Parameters
----------
target : string or Nengo object
The ``target`` can be specified with a string
(see `string reference <string_reference.html>`_)
or a Nengo object.
If a string is passed, ``get_string`` returns
the canonical version of it; i.e., if it is
an alias, the non-aliased version is returned.
default : optional
If ``target`` is not in the model, then ``get`` will
return ``default``.
Returns
-------
target : Nengo object
The Nengo object specified by ``target``.
Raises
------
ValueError
If the ``target`` does not exist and no ``default`` is specified.
"""
if isinstance(target, str):
if self.aliases.has_key(target):
obj = self.aliases[target]
elif self.objs.has_key(target):
return target
for k, v in self.objs.iteritems():
if v == target:
return k
logger.warning("Cannot find %s in this model.", str(target))
return default
def remove(self, target):
"""Removes a Nengo object from the model.
Parameters
----------
target : str, Nengo object
A string referencing the Nengo object to be removed
(see `string reference <string_reference.html>`)
or node or name of the node to be removed.
Returns
-------
target : Nengo object
The Nengo object removed.
"""
obj = self.get(target)
if obj is None:
logger.warning("%s is not in this model.", str(target))
return
obj.remove_from_model(self)
for k, v in self.objs.iteritems():
if v == obj:
del self.objs[k]
logger.info("%s removed.", k)
for k, v in self.aliases.iteritem():
if v == obj:
del self.aliases[k]
logger.info("Alias '%s' removed.", k)
return obj
def alias(self, alias, target):
"""Adds a named shortcut to an existing Nengo object
within this model.
This is designed to simplify :func:`nengo.Model.connect()`,
:func:`nengo.Model.get()`, and :func:`nengo.Model.remove()` calls.
For example, you can do::
model.make_alias('vision', 'A.B.C.D.E')
model.make_alias('motor', 'W.X.Y.Z')
model.connect('vision', 'motor')
Parameters
----------
alias : str
The alias to assign to ``target``.
target : str or Nengo object
Identifies the Nengo object to be aliased.
Raises
------
ValueError
If ``target`` can't be found in the model.
"""
obj_s = self.get_string(target)
if obj_s is None:
raise ValueError(target + " cannot be found.")
self.aliases[alias] = obj_s
logger.info("%s aliased to %s", obj_s, alias)
return self.get(obj_s)
# Model creation methods
def make_ensemble(self, name, neurons, dimensions,
max_rates=objects.Uniform(200, 300),
intercepts=objects.Uniform(-1, 1),
radius=1.0, encoders=None):
"""Create and return an ensemble of neurons.
The ensemble created by this function is automatically added to
the model.
Parameters
----------
name : str
Name of the ensemble. Must be unique within the model.
neurons : int
Number of neurons in the ensemble.
dimensions : int
Number of dimensions that this ensemble will represent.
max_rates : iterable, optional
A 2-element iterable containing the minimum and maximum
values of a uniform distribution from which the maximum
firing rates of neurons in the ensemble will be selected
(in Hz).
**Default**: (50, 100)
intercepts : iterable, optional
A 2-element iterable containing the minimum and maximum
values of a uniform distribution from which the x-intercepts
of neuron tuning curves will be selected.
**Default**: (-1, 1)
radius : float, optional
The representational range of the ensemble.
I.e., the maximum value that can be represented
in each dimension.
**Default**: 1.0
encoders : 2-D matrix of floats, optional
A matrix containing encoding vectors for each neuron.
**Default**: randomly generated vectors on the unit sphere.
neuron_model : dict, optional
Specifies the neuron model that this ensemble will
be made up of.
**Default**: A leaky integrate-and-fire (LIF) neuron
with ``tau_rc=0.02``, ``tau_ref=0.002``.
mode : {'spiking', 'direct', 'rate'}, optional
Simulation mode.
**Default**: 'spiking'
See Also
--------
Ensemble : The Ensemble object
"""
ens = objects.Ensemble(
name, neurons, dimensions,
max_rates=max_rates, intercepts=intercepts, radius=radius,
encoders=encoders, seed=self._get_new_seed())
return self.add(ens)
def make_network(self, name, seed=None):
"""Create and return a network.
Networks can contain other networks, and are useful
for organizing ensembles and connections.
This function creates a new network, which can then be used to
create ensembles and other Nengo objects that are within that
network.
Parameters
----------
name : str
Name of the network. This must be unique within the model.
Returns
-------
network : Network
The created network.
seed : int, optional
Random number seed that will be fed to the
random number generator. Setting this seed makes
the creation of the network a deterministic process;
however, each new ensemble in the network advances
the random number generator, so if the network creation code
changes, the entire network changes.
"""
net = objects.Network(name, seed, model=self)
return self.add(net)
def make_node(self, name, output):
"""Create and return a node of dimensionality ``len(output)``,
which produces the defined output.
Parameters
----------
name : str
Name of this node. Must be unique in the model.
output : function, list of floats, dict
The output that should be generated by this node.
If ``output`` is a function, it will be called on each timestep;
if it accepts a single parameter, it will be given
the current time of the simulation.
If ``output`` is a list of floats, that list will be
used as constant output.
If ``output`` is a dict, the output defines a piece-wise constant
function in which the keys define when the value changes,
and the values define what the value changes to.
Returns
-------
node : Node
The created Node
See Also
--------
Node : The Node object
"""
node = objects.Node(name, output, input=self.simtime)
return self.add(node)
def probe(self, target, sample_every=None, filter=None):
"""Probe a piece of data contained in the model.
When a piece of data is probed, it will be recorded through
the course of the simulation.
Parameters
----------
target : str, Nengo object
The piece of data being probed.
This can specified as a string
(see `string reference <string_reference.html>`_)
or a Nengo object. Each Nengo object will emit
what it considers to be the most useful piece of data
by default; if that's not what you want,
then specify the correct data using the string format.
sample_every : float, optional
How often to sample the target data, in seconds.
Some types of data (e.g. connection weight matrices)
are very large, and change relatively slowly.
Use ``sample_every`` to limit the amount of data
being recorded. For example::
model.probe('A>B.weights', sample_every=0.5)
records the value of the weight matrix between
the ``A`` and ``B`` ensembles every 0.5 simulated seconds.
**Default**: Every timestep (i.e., ``dt``).
static : bool, optional
Denotes if a piece of data does not change.
Some data that you would want to know about the model
does not change over the course of the simulation;
this includes things like the properties of a model
(e.g., number of neurons or connections) or the random seed
associated with a model. In these cases, to record that data
only once (for later being written to a file),
set ``static`` to True.
**Default**: False
"""
if sample_every is None:
sample_every = self.dt
probe_type = ''
key = target
if isinstance(target, str):
s = target.split('.')
if len(s) > 1:
target, probe_type = s[0], s[1]
obj = self.get(target)
if type(obj) == objects.Ensemble:
obj_s = self.get_string(target)
p = obj.probe(probe_type, sample_every, filter, self)
self.probed[key] = p.probe
return p
if type(obj) != objects.Signal:
obj = obj.signal
if filter is not None and filter > self.dt:
fcoef, tcoef = _filter_coefs(pstc=filter, dt=self.dt)
probe_sig = self.add(objects.Signal(obj.n))
self.add(objects.Filter(fcoef, probe_sig, probe_sig))
self.add(objects.Transform(tcoef, obj, probe_sig))
p = self.add(objects.Probe(probe_sig, sample_every))
else:
p = self.add(objects.Probe(obj, sample_every))
self.probed[key] = p
return p
def connect(self, pre, post, **kwargs):
"""Connect ``pre`` to ``post``.
Parameters
----------
pre, post : str or Nengo object
The items to connect.
``pre`` and ``post`` can be strings that identify a Nengo object
(see `string reference <string_reference.html>`_), or they
can be the Nengo objects themselves.
function : Python function, optional
The function that this connection will compute.
This function takes as input the vector being represented by
``pre``, and returns another vector which will be
projected to ``post``.
If ``function`` is not specified, by default the
identity function will be used (i.e., the function returns
the same vector that it takes as input;
:math:`f(\mathbf{x}) = \mathbf{x}`).
The function takes a single parameter ``x``,
which is the current value of the ``pre`` ensemble,
and must return a float (for one-dimensional functions) or
a list of floats.
The following simple example connects two ensembles together
such that the second ensemble represents
the square of the first ensemble::
def square(x):
return x * x
pre = net.make_ensemble('pre', neurons=30, dimensions=1)
post = net.make_ensemble('post', neurons=30, dimensions=1)
net.connect(pre, post, function=square)
or, slightly more succinctly::
net.connect(pre, post, function=lambda x: x * x)
**Default**: the ``identity`` function
(:math:`f(x) = x`).
transform : float matrix (``function`` dims by ``post`` dims), optional
A matrix that maps the computed function onto ``post``.
Its dimensionality is ``function`` output dimensions
by ``post`` dimensions. If ``transform`` is not specified,
the identity matrix will be used. This mainly makes sense
when the dimensionality of the ``function`` output
is exactly the dimensionality of ``post``; if this isn't true,
then you should probably explicitly define ``transform``.
The following simple example passes through the values
represented by a 2-dimensional ensemble to
the first and third dimension of a 4-dimensional ensemble::
pre = model.make_ensemble('pre', neurons=40, dimensions=2)
post = model.make_ensemble('post', neurons=80, dimensions=4)
model.connect(pre, post, transform=[[1, 0], [0, 0], [0, 1], [0, 0]])
The transform matrix is quite confusing to make manually;
a helper function for making transforms is provided
(see :func:`nengo.gen_transform()`).
The following complex example computes the product of
a 2-dimensional vector, and projects that to the second dimension
of a 2-dimensional ensemble::
def product(x):
return x[0] * x[1]
pre = model.make_ensemble('pre', neurons=40, dimensions=2)
post = model.make_ensemble('post', neurons=40, dimensions=2)
model.connect(pre, post, func=product, transform=[[0], [1]])
or, slightly more succinctly::
model.connect(pre, post, func=lambda x: x[0] * x[1],
transform=[[0], [1]])
**Default**: an identity matrix.
filter : dict, optional
``filter`` contains information about the type of filter
to use across this connection.
**Default**: specifies an exponentially decaying filter
with ``tau=0.01``.
learning_rule : dict, optional
``learning_rule`` contains information about the type of
learning rule that modifies this connection.
**Default**: None
Returns
-------
connection : Connection
The Connection object created.
See Also
--------
Connection : The Connection object
"""
pre = self.get(pre)
post = self.get(post)
if type(pre) == objects.Ensemble:
logger.info("Creating DecodedConnection")
return self.add(connections.DecodedConnection(pre, post, **kwargs))
else:
logger.info("Creating SimpleConnection")
return self.add(connections.SimpleConnection(pre, post, **kwargs))
def gen_transform(pre_dims, post_dims,
weight=1.0, index_pre=None, index_post=None):
"""Helper function used to create a ``pre_dims`` by ``post_dims``
linear transformation matrix.
Parameters
----------
pre_dims, post_dims : int
The numbers of presynaptic and postsynaptic dimensions.
weight : float, optional
The weight value to use in the transform.
All values in the transform are either 0 or ``weight``.
**Default**: 1.0
index_pre, index_post : iterable of int
Determines which values are non-zero, and indicates which
dimensions of the pre-synaptic ensemble should be routed to which
dimensions of the post-synaptic ensemble.
Returns
-------
transform : 2D matrix of floats
A two-dimensional transform matrix performing the requested routing.
Examples
--------
# Sends the first two dims of pre to the first two dims of post
>>> gen_transform(pre_dims=2, post_dims=3,
index_pre=[0, 1], index_post=[0, 1])
[[1, 0], [0, 1], [0, 0]]
"""
t = [[0 for pre in xrange(pre_dims)] for post in xrange(post_dims)]
if index_pre is None:
index_pre = range(dim_pre)
elif isinstance(index_pre, int):
index_pre = [index_pre]
if index_post is None:
index_post = range(dim_post)
elif isinstance(index_post, int):
index_post = [index_post]
for i in xrange(min(len(index_pre), len(index_post))): # was max
pre = index_pre[i] # [i % len(index_pre)]
post = index_post[i] # [i % len(index_post)]
t[post][pre] = weight
return t
def gen_weights(pre_neurons, post_neurons, function):
"""Helper function used to create a ``pre_neurons`` by ``post_neurons``
connection weight matrix.
Parameters
----------
pre_neurons, post_neurons : int
The numbers of presynaptic and postsynaptic neurons.
function : function
A function that generates weights.
If it accepts no arguments, it will be called to
generate each individual weight (useful
to great random weights, for example).
If it accepts two arguments, it will be given the
``pre`` and ``post`` index in the weight matrix.
Returns
-------
weights : 2D matrix of floats
A two-dimensional connection weight matrix.
Examples
--------
>>> gen_weights(2, 2, random.random)
[[0.6281625119511959, 0.48560016153108376], [0.9639779858394248, 0.4768136917985597]]
>>> def product(pre, post):
... return pre * post
>>> gen_weights(3, 3, product)
[[0, 0, 0], [0, 1, 2], [0, 2, 4]]
"""
argspec = inspect.getargspec(func)
if len(argspec[0]) == 0:
return [[func() for pre in xrange(pre_neurons)
for post in xrange(post_neurons)]]
elif len(argspec[0]) == 2:
return [[func(pre, post) for pre in xrange(pre_neurons)
for post in xrange(post_neurons)]]
| jaberg/nengo | nengo/model.py | Python | mit | 29,418 | [
"NEURON"
] | ae5c41fb6179f14a75538171cfdd8c5a10e8fc6f54849bcb9131ce08fe738947 |
#!/usr/bin/env python
# File created on 13 Jul 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from os.path import join, splitext, exists
from bfillings.blat import MinimalBlatParser9
from bfillings.blat import (assign_dna_reads_to_protein_database
as blat_assign_dna_reads_to_protein_database,
assign_dna_reads_to_dna_database
as blat_assign_dna_reads_to_dna_database)
from bfillings.usearch import (clusters_from_blast_uc_file,
assign_dna_reads_to_database
as usearch_assign_dna_reads_to_database)
from bfillings.bwa import (assign_dna_reads_to_dna_database
as bwa_assign_dna_reads_to_dna_database)
from qiime.format import format_observation_map
from qiime.parse import parse_taxonomy, MinimalSamParser
from qiime.make_otu_table import make_otu_table
from qiime.util import get_qiime_temp_dir, create_dir, write_biom_table
class DatabaseMapper(object):
def __call__(self,
query_fasta_fp,
database_fasta_fp,
output_dir,
observation_metadata_fp=None,
params=None,
HALT_EXEC=False):
if params is None:
params = {}
""" Call the DatabaseMapper """
create_dir(output_dir)
raw_output_fp = self._get_raw_output_fp(output_dir,
params)
output_observation_map_fp = '%s/observation_map.txt' % output_dir
output_biom_fp = '%s/observation_table.biom' % output_dir
log_fp = '%s/observation_table.log' % output_dir
self._assign_dna_reads_to_database(
query_fasta_fp=query_fasta_fp,
database_fasta_fp=database_fasta_fp,
raw_output_fp=raw_output_fp,
temp_dir=get_qiime_temp_dir(),
params=params,
HALT_EXEC=HALT_EXEC)
self._process_raw_output(raw_output_fp,
log_fp,
output_observation_map_fp)
self._generate_biom_output(output_observation_map_fp,
output_biom_fp,
observation_metadata_fp)
def _generate_biom_output(self,
observation_map_fp,
output_biom_fp,
observation_metadata_fp):
if observation_metadata_fp is not None:
observation_metadata = \
parse_taxonomy(open(observation_metadata_fp, 'U'))
else:
observation_metadata = None
biom_table = make_otu_table(open(observation_map_fp, 'U'),
observation_metadata)
write_biom_table(biom_table, output_biom_fp)
def _assign_dna_reads_to_database(self,
query_fasta_fp,
database_fasta_fp,
raw_output_fp,
observation_metadata_fp,
params,
HALT_EXEC):
raise NotImplementedError(
"DatabaseMapper subclasses must override _assign_dna_reads_to_database")
def _get_raw_output_fp(self, output_dir, params):
""" Generate filepath for raw output
subclasses will generally want to override this method
"""
return join(output_dir, 'raw_output.txt')
def _process_raw_output(self,
raw_output_fp,
log_fp,
output_observation_map_fp):
raise NotImplementedError(
"DatabaseMapper subclasses must override _process_raw_output")
class UsearchDatabaseMapper(DatabaseMapper):
def _assign_dna_reads_to_database(self,
query_fasta_fp,
database_fasta_fp,
raw_output_fp,
temp_dir,
params,
HALT_EXEC):
usearch_assign_dna_reads_to_database(
query_fasta_fp=query_fasta_fp,
database_fasta_fp=database_fasta_fp,
output_fp=raw_output_fp,
temp_dir=temp_dir,
params=params,
HALT_EXEC=HALT_EXEC)
def _get_raw_output_fp(self,
output_dir,
params):
""" Generate filepath for .uc file """
return join(output_dir, 'out.uc')
def _process_raw_output(self,
raw_output_fp,
log_fp,
output_observation_map_fp):
""" Generate observation map and biom table from .uc file
"""
hits, failures = clusters_from_blast_uc_file(
open(raw_output_fp, 'U'), 9)
observation_map_f = open(output_observation_map_fp, 'w')
for line in format_observation_map(hits.items(), ''):
observation_map_f.write(line)
observation_map_f.close()
class BlatDatabaseMapper(DatabaseMapper):
MaxEvalue = 1e-10
MinId = 0.97
def _get_raw_output_fp(self,
output_dir,
params):
""" Generate filepath for .bl9 (blast9) file """
return join(output_dir, 'out.bl9')
def _process_raw_output(self,
raw_output_fp,
log_fp,
output_observation_map_fp):
""" Generate observation map and biom table from .bl9 file
"""
result = {}
pct_id_field = 2
evalue_field = 10
output_observation_map_f = open(output_observation_map_fp, 'w')
log_f = open(log_fp, 'w')
for summary, blat_results in MinimalBlatParser9(
open(raw_output_fp, 'U'),
include_column_names=False):
for e in blat_results:
if (float(e[evalue_field]) <= self.MaxEvalue and
float(e[pct_id_field]) / 100. >= self.MinId):
query_id = e[0]
subject_id = e[1]
try:
result[subject_id].append(query_id)
except KeyError:
result[subject_id] = [query_id]
log_f.write('\t'.join(e))
log_f.write('\n')
break
log_f.close()
for e in result.items():
output_observation_map_f.write(
'%s\t%s\n' %
(e[0], '\t'.join(e[1])))
output_observation_map_f.close()
return result
def _assign_dna_reads_to_database(self,
query_fasta_fp,
database_fasta_fp,
raw_output_fp,
temp_dir,
params,
HALT_EXEC):
blat_assign_dna_reads_to_protein_database(
query_fasta_fp=query_fasta_fp,
database_fasta_fp=database_fasta_fp,
output_fp=raw_output_fp,
temp_dir=temp_dir,
params=params)
class BlatNtDatabaseMapper(BlatDatabaseMapper):
def _assign_dna_reads_to_database(self,
query_fasta_fp,
database_fasta_fp,
raw_output_fp,
temp_dir,
params,
HALT_EXEC):
blat_assign_dna_reads_to_dna_database(
query_fasta_fp=query_fasta_fp,
database_fasta_fp=database_fasta_fp,
output_fp=raw_output_fp,
params=params)
class BwaSwDatabaseMapper(DatabaseMapper):
def _get_raw_output_fp(self,
output_dir,
params):
""" Generate filepath for .bl9 (blast9) file """
return join(output_dir, 'bwa_raw_out.sam')
def _process_raw_output(self,
raw_output_fp,
log_fp,
output_observation_map_fp):
""" Generate observation map and biom table from .bl9 file
"""
result = {}
query_id_field = 0
flag_field = 1
subject_id_field = 2
output_observation_map_f = open(output_observation_map_fp, 'w')
log_f = open(log_fp, 'w')
for e in MinimalSamParser(open(raw_output_fp, 'U')):
query_id = e[query_id_field]
subject_id = e[subject_id_field]
flag = int(e[flag_field])
if (flag != 4):
try:
result[subject_id].append(query_id)
except KeyError:
result[subject_id] = [query_id]
log_f.write('\t'.join(e))
log_f.write('\n')
log_f.close()
for e in result.items():
output_observation_map_f.write(
'%s\t%s\n' %
(e[0], '\t'.join(e[1])))
output_observation_map_f.close()
return result
def _assign_dna_reads_to_database(self,
query_fasta_fp,
database_fasta_fp,
raw_output_fp,
temp_dir,
params,
HALT_EXEC):
_params = {}
_params.update(params)
bwa_assign_dna_reads_to_dna_database(
query_fasta_fp=query_fasta_fp,
database_fasta_fp=database_fasta_fp,
out_fp=raw_output_fp,
params=_params)
class BwaShortDatabaseMapper(BwaSwDatabaseMapper):
def _assign_dna_reads_to_database(self,
query_fasta_fp,
database_fasta_fp,
raw_output_fp,
temp_dir,
params,
HALT_EXEC):
_aln_params = {'-f': splitext(raw_output_fp)[0] + '.sai'}
if 'aln_params' in params:
_aln_params.update(params['aln_params'])
params['algorithm'] = 'bwa-short'
params['aln_params'] = _aln_params
bwa_assign_dna_reads_to_dna_database(
query_fasta_fp=query_fasta_fp,
database_fasta_fp=database_fasta_fp,
out_fp=raw_output_fp,
params=params)
def usearch_database_mapper(query_fp,
refseqs_fp,
output_dir,
evalue,
min_id,
queryalnfract,
targetalnfract,
maxaccepts,
maxrejects,
observation_metadata_fp=None,
HALT_EXEC=False):
params = {}
params['--evalue'] = evalue
params['--id'] = min_id
params['--queryalnfract'] = queryalnfract
params['--targetalnfract'] = targetalnfract
params['--maxaccepts'] = maxaccepts
params['--maxrejects'] = maxrejects
usearch_db_mapper = UsearchDatabaseMapper()
usearch_db_mapper(query_fp,
refseqs_fp,
output_dir,
params=params,
observation_metadata_fp=observation_metadata_fp,
HALT_EXEC=HALT_EXEC)
def blat_database_mapper(query_fp,
refseqs_fp,
output_dir,
evalue,
min_id,
genetic_code,
observation_metadata_fp=None,
HALT_EXEC=False):
params = {'-minIdentity': min_id,
'genetic_code': genetic_code}
blat_db_mapper = BlatDatabaseMapper()
blat_db_mapper.MinId = min_id
blat_db_mapper.MaxEvalue = evalue
blat_db_mapper(query_fp,
refseqs_fp,
output_dir,
params=params,
observation_metadata_fp=observation_metadata_fp,
HALT_EXEC=HALT_EXEC)
def blat_nt_database_mapper(query_fp,
refseqs_fp,
output_dir,
evalue,
min_id,
observation_metadata_fp=None,
HALT_EXEC=False):
params = {'-minIdentity': min_id}
blat_db_mapper = BlatNtDatabaseMapper()
blat_db_mapper.MinId = min_id
blat_db_mapper.MaxEvalue = evalue
blat_db_mapper(query_fp,
refseqs_fp,
output_dir,
params=params,
observation_metadata_fp=observation_metadata_fp,
HALT_EXEC=HALT_EXEC)
def bwa_sw_database_mapper(query_fp,
refseqs_fp,
output_dir,
observation_metadata_fp=None,
HALT_EXEC=False):
bwa_db_mapper = BwaSwDatabaseMapper()
params = {}
bwa_db_mapper(query_fp,
refseqs_fp,
output_dir,
params=params,
observation_metadata_fp=observation_metadata_fp,
HALT_EXEC=HALT_EXEC)
def bwa_short_database_mapper(query_fp,
refseqs_fp,
output_dir,
max_diff,
observation_metadata_fp=None,
HALT_EXEC=False):
bwa_db_mapper = BwaShortDatabaseMapper()
if max_diff is not None:
params = {'aln_params': {'-n': max_diff}}
else:
params = {}
bwa_db_mapper(query_fp,
refseqs_fp,
output_dir,
params=params,
observation_metadata_fp=observation_metadata_fp,
HALT_EXEC=HALT_EXEC)
| wasade/qiime | qiime/map_reads_to_reference.py | Python | gpl-2.0 | 14,828 | [
"BWA"
] | afedb6d2b51f9ec053ec44cd0a25d567e650d87a5ab5a63d03ec85bb840dc6dd |
# -*- coding: utf-8 -*-
import numpy as np
from shapely.geometry.polygon import Polygon
import datetime
import netCDF4 as nc
import itertools
import geojson
from shapely.ops import cascaded_union
#from openclimategis.util.helpers import get_temp_path
#from openclimategis.util.toshp import OpenClimateShp
from shapely.geometry.multipolygon import MultiPolygon, MultiPolygonAdapter
from shapely import prepared, wkt
from shapely.geometry.geo import asShape
import time, sys
from multiprocessing import Process, Queue, Lock
from math import sqrt
import os
from osgeo import osr, ogr
from util.helpers import get_temp_path
from util.toshp import OpenClimateShp
dtime = 0
class OcgDataset(object):
"""
Wraps and netCDF4-python Dataset object providing extraction methods by
spatial and temporal queries.
dataset -- netCDF4-python Dataset object
**kwds -- arguments for the names of spatial and temporal dimensions.
rowbnds_name
colbnds_name
time_name
time_units
calendar
"""
def __init__(self,dataset,**kwds):
self.url = dataset
self.dataset = nc.Dataset(dataset,'r')
self.multiReset = kwds.get('multiReset') or False
self.verbose = kwds.get('verbose')
# self.polygon = kwds.get('polygon')
# self.temporal = kwds.get('temporal')
# self.row_name = kwds.get('row_name') or 'latitude'
# self.col_name = kwds.get('col_name') or 'longitude'
## extract the names of the spatiotemporal variables/dimensions from
## the keyword arguments.
self.rowbnds_name = kwds.get('rowbnds_name') or 'bounds_latitude'
self.colbnds_name = kwds.get('colbnds_name') or 'bounds_longitude'
self.time_name = kwds.get('time_name') or 'time'
self.time_units = kwds.get('time_units') or 'days since 1950-01-01 00:00:00'
self.calendar = kwds.get('calendar') or 'proleptic_gregorian'
self.level_name = kwds.get('level_name') or 'levels'
# self.clip = kwds.get('clip') or False
# self.dissolve = kwds.get('dissolve') or False
#print self.dataset.variables[self.time_name].units
#sys.exit()
# self.row = self.dataset.variables[self.row_name][:]
# self.col = self.dataset.variables[self.col_name][:]
## extract the row and column bounds from the dataset
self.row_bnds = self.dataset.variables[self.rowbnds_name][:]
self.col_bnds = self.dataset.variables[self.colbnds_name][:]
## convert the time vector to datetime objects
self.timevec = nc.netcdftime.num2date(self.dataset.variables[self.time_name][:],
self.time_units,
self.calendar)
## these are base numpy arrays used by spatial operations.
## four numpy arrays one for each bounding coordinate of a polygon
self.min_col,self.min_row = np.meshgrid(self.col_bnds[:,0],self.row_bnds[:,0])
self.max_col,self.max_row = np.meshgrid(self.col_bnds[:,1],self.row_bnds[:,1])
## these are the original indices of the row and columns. they are
## referenced after the spatial subset to retrieve data from the dataset
self.real_col,self.real_row = np.meshgrid(np.arange(0,len(self.col_bnds)),
np.arange(0,len(self.row_bnds)))
#data file must be closed and reopened to work properly with multiple threads
if self.multiReset:
if self.verbose>1: print 'closed'
self.dataset.close()
def _itr_array_(self,a):
"a -- 2-d ndarray"
ix = a.shape[0]
jx = a.shape[1]
for ii,jj in itertools.product(xrange(ix),xrange(jx)):
yield ii,jj
def _contains_(self,grid,lower,upper):
## small ranges on coordinates requires snapping to closest coordinate
## to ensure values are selected through logical comparison.
ugrid = np.unique(grid)
lower = ugrid[np.argmin(np.abs(ugrid-lower))]
upper = ugrid[np.argmin(np.abs(ugrid-upper))]
s1 = grid >= lower
s2 = grid <= upper
ret = s1*s2
return(ret)
def _set_overlay_(self,polygon=None,clip=False):
"""
Perform spatial operations.
polygon=None -- shapely polygon object
clip=False -- set to True to perform an intersection
"""
if self.verbose>1: print('overlay...')
## holds polygon objects
self._igrid = np.empty(self.min_row.shape,dtype=object)
## hold point objects
self._jgrid = np.empty(self.min_row.shape,dtype=object)
##holds locations that would be partial if the data were clipped for use in dissolve
self._pgrid = np.zeros(self.min_row.shape,dtype=bool)
## holds weights for area weighting in the case of a dissolve
self._weights = np.zeros(self.min_row.shape)
## initial subsetting to avoid iterating over all polygons unless abso-
## lutely necessary
if polygon is not None:
emin_col,emin_row,emax_col,emax_row = polygon.envelope.bounds
#print emin_col,emin_row,emax_col,emax_row
#print self.min_col
#print self.max_col
#print self.min_row
#print self.max_row
smin_col = self._contains_(self.min_col,emin_col,emax_col)
smax_col = self._contains_(self.max_col,emin_col,emax_col)
smin_row = self._contains_(self.min_row,emin_row,emax_row)
smax_row = self._contains_(self.max_row,emin_row,emax_row)
#print smin_col
#print smax_col
#print smin_row
#print smax_row
#include = smin_col*smax_col*smin_row*smax_row
include = np.any((smin_col,smax_col),axis=0)*np.any((smin_row,smax_row),axis=0)
#print include
else:
include = np.empty(self.min_row.shape,dtype=bool)
include[:,:] = True
# print('constructing grid...')
# ## construct the subset of polygon geometries
# vfunc = np.vectorize(self._make_poly_array_)
# self._igrid = vfunc(include,
# self.min_row,
# self.min_col,
# self.max_row,
# self.max_col,
# polygon)
#
# ## calculate the areas for potential weighting
# print('calculating area...')
# def _area(x):
# if x != None:
# return(x.area)
# else:
# return(0.0)
# vfunc_area = np.vectorize(_area,otypes=[np.float])
# preareas = vfunc_area(self._igrid)
#
# ## if we are clipping the data, modify the geometries and record the weights
# if clip and polygon:
# print('clipping...')
## polys = []
## for p in self._igrid.reshape(-1):
## polys.append(self._intersection_(polygon,p))
# vfunc = np.vectorize(self._intersection_)
# self._igrid = vfunc(polygon,self._igrid)
#
# ## calculate weights following intersection
# areas = vfunc_area(self._igrid)
# def _weight(x,y):
# if y == 0:
# return(0.0)
# else:
# return(x/y)
# self._weights=np.vectorize(_weight)(areas,preareas)
#
# ## set the mask
# self._mask = self._weights > 0
#
# print('overlay done.')
## loop for each spatial grid element
# tr()
if polygon:
# prepared_polygon = polygon
prepared_polygon = prepared.prep(polygon)
for ii,jj in self._itr_array_(include):
if not include[ii,jj]: continue
## create the polygon
g = self._make_poly_((self.min_row[ii,jj],self.max_row[ii,jj]),
(self.min_col[ii,jj],self.max_col[ii,jj]))
## add the polygon if it intersects the aoi of if all data is being
## returned.
if polygon:
if not prepared_polygon.intersects(g): continue
if polygon.intersection(g).area==0:
continue
# if g.intersects(polygon) or polygon is None:
## get the area before the intersection
prearea = g.area
## full intersection in the case of a clip and an aoi is passed
# if g.overlaps(polygon) and clip is True and polygon is not None:
if clip is True and polygon is not None:
ng = g.intersection(polygon)
## otherwise, just keep the geometry
else:
ng = g
#check if the geometry partially intersects the AoI
#without this multiple features covering the same location will
#occur when threading is enabled
if g.intersection(polygon).area<g.area and g.intersection(polygon).area>0:
self._pgrid[ii,jj]=True
## calculate the weight
w = ng.area/prearea
## a polygon can have a true intersects but actually not overlap
## i.e. shares a border.
if w > 0:
self._igrid[ii,jj] = ng
self._weights[ii,jj] = w
self._jgrid[ii,jj] = (g.centroid.x,g.centroid.y)
## the mask is used as a subset
self._mask = self._weights > 0
#print self._mask
#print self._pgrid
# self._weights = self._weights/self._weights.sum()
#print self._weights
#print self._mask.shape
def _make_poly_(self,rtup,ctup):
"""
rtup = (row min, row max)
ctup = (col min, col max)
"""
return Polygon(((ctup[0],rtup[0]),
(ctup[0],rtup[1]),
(ctup[1],rtup[1]),
(ctup[1],rtup[0])))
@staticmethod
def _make_poly_array_(include,min_row,min_col,max_row,max_col,polygon=None):
ret = None
if include:
poly = Polygon(((min_col,min_row),
(max_col,min_row),
(max_col,max_row),
(min_col,max_row),
(min_col,min_row)))
if polygon != None:
if polygon.intersects(poly):
ret = poly
else:
ret = poly
return(ret)
@staticmethod
def _intersection_(polygon,target):
ret = None
if target != None:
ppp = target.intersection(polygon)
if not ppp.is_empty:
ret = ppp
return(ret)
def _get_numpy_data_(self,var_name,polygon=None,time_range=None,clip=False,levels = [0],lock=Lock()):
"""
var_name -- NC variable to extract from
polygon=None -- shapely polygon object
time_range=None -- [lower datetime, upper datetime]
clip=False -- set to True to perform a full intersection
"""
if self.verbose>1: print('getting numpy data...')
## perform the spatial operations
self._set_overlay_(polygon=polygon,clip=clip)
def _u(arg):
"Pulls unique values and generates an evenly spaced array."
un = np.unique(arg)
ret = np.arange(un.min(),un.max()+1)
return(ret)
def _sub(arg):
"Subset an array."
return arg[self._idxrow.min():self._idxrow.max()+1,
self._idxcol.min():self._idxcol.max()+1]
## get the time indices
if time_range is not None:
self._idxtime = np.arange(
0,
len(self.timevec))[(self.timevec>=time_range[0])*
(self.timevec<=time_range[1])]
else:
self._idxtime = np.arange(0,len(self.timevec))
## reference the original (world) coordinates of the netCDF when selecting
## the spatial subset.
self._idxrow = _u(self.real_row[self._mask])
self._idxcol = _u(self.real_col[self._mask])
## subset our reference arrays in a similar manner
self._mask = _sub(self._mask)
self._weights = _sub(self._weights)
self._igrid = _sub(self._igrid)
self._jgrid = _sub(self._jgrid)
self._pgrid = _sub(self._pgrid)
## hit the dataset and extract the block
npd = None
#print an error message and return if the selection doesn't include any data
if len(self._idxrow)==0:
if self.verbose>0: print "Invalid Selection, unable to select row"
return
if len(self._idxcol)==0:
if self.verbose>0: print "Invalid Selection, unable to select column"
return
if len(self._idxtime)==0:
if self.verbose>0: print "Invalid Selection, unable to select time range"
return
narg = time.clock()
#attempt to aquire the file lock
while not(lock.acquire(False)):
time.sleep(.1)
#reopen the data file
if self.multiReset:
self.dataset = nc.Dataset(self.url,'r')
##check if data is 3 or 4 dimensions
dimShape = len(self.dataset.variables[var_name].dimensions)
#grab the data
if dimShape == 3:
npd = self.dataset.variables[var_name][self._idxtime,self._idxrow,self._idxcol]
# reshape the data if the selection causes a loss of dimension(s)
if len(npd.shape) <= 2:
npd = npd.reshape(len(self._idxtime),len(self._idxrow),len(self._idxcol))
elif dimShape == 4:
#check if 1 or more levels have been selected
if len(levels)==0:
if self.verbose>0: print "Invalid Selection, unable to select levels"
return
#grab level values
self.levels = self.dataset.variables[self.level_name][:]
npd = self.dataset.variables[var_name][self._idxtime,levels,self._idxrow,self._idxcol]
# reshape the data if the selection causes a loss of dimension(s)
if len(npd.shape)<=3:
npd = npd.reshape(len(self._idxtime),len(levels),len(self._idxrow),len(self._idxcol))
#print self._weights
#close the dataset
if self.multiReset:
self.dataset.close()
#release the file lock
lock.release()
if self.verbose>1: print "dtime: ", time.clock()-narg
if self.verbose>1: print('numpy extraction done.')
return(npd)
def _is_masked_(self,arg):
"Ensures proper formating of masked data for single-layer data."
if isinstance(arg,np.ma.MaskedArray):
return None
else:
return arg
def _is_masked2_(self,arg):
"Ensures proper formating of masked data for multi-layer data."
#print arg
if isinstance(arg[0],np.ma.MaskedArray):
return None
else:
return np.array(arg)
def extract_elements(self,*args,**kwds):
"""
Merges the geometries and extracted attributes into a GeoJson-like dictionary
list.
var_name -- NC variable to extract from
dissolve=False -- set to True to merge geometries and calculate an
area-weighted average
polygon=None -- shapely polygon object
time_range=None -- [lower datetime, upper datetime]
clip=False -- set to True to perform a full intersection
"""
if self.verbose>1: print('extracting elements...')
## dissolve argument is unique to extract_elements
if 'dissolve' in kwds:
dissolve = kwds.pop('dissolve')
else:
dissolve = False
if 'levels' in kwds:
levels = kwds.get('levels')
#get the parent polygon ID so geometry/features can be recombined later
if 'parentPoly' in kwds:
parent = kwds.pop('parentPoly')
else:
parent = None
clip = kwds.get('clip')
## extract numpy data from the nc file
q=args[0]
var = args[1]
npd = self._get_numpy_data_(*args[1:],**kwds)
#if hasattr(npd,'mask'):
#print self.url," has a mask layer"
#else:
#print self.url," does not have a mask layer"
#cancel if there is no data
if npd is None:
return
##check which flavor of climate data we are dealing with
ocgShape = len(npd.shape)
## will hold feature dictionaries
features = []
## partial pixels
recombine = {}
## the unique identified iterator
ids = self._itr_id_()
gpass = True
if dissolve:
## one feature is created for each unique time
for kk in range(len(self._idxtime)):
## check if this is the first iteration. approach assumes that
## masked values are homogenous through the time layers. this
## avoids multiple union operations on the geometries. i.e.
## time 1 = masked, time 2 = masked, time 3 = masked
## vs.
## time 1 = 0.5, time 2 = masked, time 3 = 0.46
if kk == 0:
## on the first iteration:
## 1. make the unioned geometry
## 2. weight the data according to area
## reference layer for the masked data
lyr = None
if ocgShape==3:
lyr = npd[kk,:,:]
elif ocgShape==4:
lyr = npd[kk,0,:,:]
## select values with spatial overlap and not masked
if hasattr(lyr,'mask'):
select = self._mask*np.invert(lyr.mask)
else:
select = self._mask
#cut out partial values
if not clip:
pselect = select*self._pgrid
select *= np.invert(self._pgrid)
## select those geometries
geoms = self._igrid[select]
#print geoms
if len(geoms)>0:
## union the geometries
unioned = cascaded_union([p for p in geoms])
## select the weight subset and normalize to unity
sub_weights = self._weights*select
#print sub_weights
#print unioned.area
self._weights = sub_weights/sub_weights.sum()
## apply the weighting
weighted = npd*self._weights
#print self._weights
#print weighted
#print (npd*sub_weights).sum()
#print select.sum()
#weighted = npd/sub_weights.sum()*sub_weights
else:
gpass = False
## generate the feature
#only bother with dissolve if there are one or more features that
#fully intersect the AoI
if gpass:
if ocgShape==3:
feature = dict(
id=ids.next(),
geometry=unioned,
properties=dict({var:float(weighted[kk,:,:].sum()),
'timestamp':self.timevec[self._idxtime[kk]]}))
elif ocgShape==4:
feature = dict(
id=ids.next(),
geometry=unioned,
properties=dict({var:float(list(weighted[kk,x,:,:].sum() for x in xrange(len(levels)))),
'timestamp':self.timevec[self._idxtime[kk]],
'levels':list(x for x in self.levels[levels])}))
#record the weight used so the geometry can be
#properly recombined later
if not(parent == None) and dissolve:
feature['weight']=sub_weights.sum()
features.append(feature)
#Record pieces that partially cover geometry so duplicates
#can later be filtered out the the unique values recombined
if not clip:
for ii,jj in self._itr_array_(pselect):
if self._pgrid[ii,jj]:
ctr = self._jgrid[ii,jj]
if kk==0:
recombine[ctr] = []
if ocgShape==3:
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
weight=1.0,
properties=dict({var:float(npd[kk,ii,jj]),
'timestamp':self.timevec[self._idxtime[kk]]}))
#print npd[kk,ii,jj]
if ocgShape==4:
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
weight=1.0,
properties=dict({var:float(list(npd[kk,x,ii,jj] for x in xrange(len(levels)))),
'timestamp':self.timevec[self._idxtime[kk]],
'level':list(x for x in self.levels[levels])}))
recombine[ctr].append(feature)
else:
#print self._mask
ctr = None
## loop for each feature. no dissolving.
for ii,jj in self._itr_array_(self._mask):
## if the data is included, add the feature
if self._mask[ii,jj] == True:
#if the geometry has a fraction of a pixel, the other factions could be handled by a different thread
#these must be recombined later, or if it's not clipped there will be duplicates to filter out
if self._weights[ii,jj] < 1 or not clip:
#tag the location this data value is at so it can be compared later
ctr = self._jgrid[ii,jj]
recombine[ctr] = []
## extract the data and convert any mask values
#print ocgShape
if ocgShape == 3:
#if hasattr(npd,'mask'):
#print np.invert(npd.mask)
#else:
#print 'no mask found'
data = [self._is_masked_(da) for da in npd[:,ii,jj]]
#print data
for kk in range(len(data)):
## do not add the feature if the value is a NoneType
if data[kk] == None: continue
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
properties=dict({var:float(data[kk]),
'timestamp':self.timevec[self._idxtime[kk]]}))
#if the data point covers a partial pixel or isn't clipped add it to the recombine set, otherwise leave it alone
if self._weights[ii,jj] < 1 or (self._pgrid[ii,jj] and not clip):
recombine[ctr].append(feature)
else:
features.append(feature)
elif ocgShape == 4:
#if hasattr(npd,'mask'):
#print np.invert(npd.mask)
#else:
#print 'no mask found'
if self._weights[ii,jj] < 1 or not clip:
ctr = self._jgrid[ii,jj]
recombine[ctr] = []
data = [self._is_masked2_(da) for da in npd[:,:,ii,jj]]
#print data
for kk in range(len(data)):
## do not add the feature if the value is a NoneType
if data[kk] == None: continue
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
properties=dict({var:list(float(data[kk][x]) for x in xrange(len(levels))),
'timestamp':self.timevec[self._idxtime[kk]],
'level':list(x for x in self.levels[levels])}))
#q.put(feature)
if self._weights[ii,jj] < 1 or (self._pgrid[ii,jj] and not clip):
recombine[ctr].append(feature)
else:
features.append(feature)
if self.verbose>1: print('extraction complete.')
if not(parent == None) and dissolve:
q.put((parent,features,recombine))
else:
q.put((features,recombine))
return
#sys.exit(0)
#return(features)
def _itr_id_(self,start=1):
while True:
try:
yield start
finally:
start += 1
def as_geojson(elements):
features = []
for e in elements:
e['properties']['timestamp'] = str(e['properties']['timestamp'])
features.append(geojson.Feature(**e))
fc = geojson.FeatureCollection(features)
return(geojson.dumps(fc))
def as_shp(elements,path=None):
if path is None:
path = get_temp_path(suffix='.shp')
ocs = OpenClimateShp(path,elements)
ocs.write()
return(path)
def as_tabular(elements,var,wkt=False,wkb=False,path = None):
'''writes output in a tabular, CSV format
geometry output is optional'''
import osgeo.ogr as ogr
if path is None:
path = get_temp_path(suffix='.txt')
#define spatial references for the projection
sr = ogr.osr.SpatialReference()
sr.ImportFromEPSG(4326)
sr2 = ogr.osr.SpatialReference()
sr2.ImportFromEPSG(3005) #Albers Equal Area is used to ensure legitimate area values
with open(path,'w') as f:
for ii,element in enumerate(elements):
#convert area from degrees to m^2
geo = ogr.CreateGeometryFromWkb(element['geometry'].wkb)
geo.AssignSpatialReference(sr)
geo.TransformTo(sr2)
area = geo.GetArea()
#write id, timestamp, variable
f.write(','.join([repr(ii+1),element['properties']['timestamp'].strftime("%Y-%m-%d %H:%M:%S"),repr(element['properties'][var])]))
#write level if the dataset has levels
if 'level' in element['properties'].keys():
f.write(','+repr(element['properties']['level']))
#write the area
f.write(','+repr(area))
#write wkb if requested
if wkb:
f.write(','+repr(element['geometry'].wkb))
#write wkt if requested
if wkt:
f.write(','+repr(element['geometry'].wkt))
f.write('\n')
f.close()
return path
def as_keyTabular(elements,var,wkt=False,wkb=False,path = None):
'''writes output as tabular csv files, but uses foreign keys
on time and geometry to reduce file size'''
import osgeo.ogr as ogr
if path is None:
path = get_temp_path(suffix='')
if len(path)>4 and path[-4] == '.':
path = path[:-4]
patht = path+"_time.txt"
pathg = path+"_geometry.txt"
pathd = path+"_data.txt"
#define spatial references for the projection
sr = ogr.osr.SpatialReference()
sr.ImportFromEPSG(4326)
sr2 = ogr.osr.SpatialReference()
sr2.ImportFromEPSG(3005)
data = {}
#sort the data into dictionaries so common times and geometries can be identified
for ii,element in enumerate(elements):
#record new element ids (otherwise threads will produce copies of ids)
element['id']=ii
#get the time and geometry
time = element['properties']['timestamp'].strftime("%Y-%m-%d %H:%M:%S")
ewkt = element['geometry'].wkt
if not (time in data):
data[time] = {}
#put the data into the dictionary
if not (ewkt in data[time]):
data[time][ewkt] = [element]
else:
data[time][ewkt].append(element)
#get a unique set of geometry keys
locs = []
for key in data:
locs.extend(data[key].keys())
locs = set(locs)
ft = open(patht,'w')
fg = open(pathg,'w')
fd = open(pathd,'w')
#write the features to file
for ii,time in enumerate(data.keys()):
#write out id's and time values to the time file
tdat = data[time]
ft.write(repr(ii+1)+','+time+'\n')
for jj,loc in enumerate(locs):
if ii==0:
#find the geometry area
geo = ogr.CreateGeometryFromWkt(loc)
geo.AssignSpatialReference(sr)
geo.TransformTo(sr2)
#write the id and area
fg.write(repr(jj+1))
fg.write(','+repr(geo.GetArea()))
#write out optional geometry
if wkt:
fg.write(','+loc)
if wkb:
fg.write(','+repr(ogr.CreateGeometryFromWkt(loc).ExportToWkb()))
fg.write('\n')
if loc in tdat:
for element in tdat[loc]:
#write out id, foreign keys (time then geometry) and the variable value
fd.write(','.join([repr(element['id']),repr(ii+1),repr(jj+1),repr(element['properties'][var])]))
#write out level if appropriate
if 'level' in element['properties']:
fd.write(','+repr(element['properties']['level']))
fd.write('\n')
ft.close()
fg.close()
fd.close()
#['AddGeometry', 'AddGeometryDirectly', 'AddPoint', 'AddPoint_2D', 'AssignSpatialReference', 'Buffer',
#'Centroid', 'Clone', 'CloseRings', 'Contains', 'ConvexHull', 'Crosses', 'Destroy', 'Difference',
#'Disjoint', 'Distance', 'Empty', 'Equal', 'ExportToGML', 'ExportToJson', 'ExportToKML', 'ExportToWkb',
#'ExportToWkt', 'FlattenTo2D', 'GetArea', 'GetBoundary', 'GetCoordinateDimension', 'GetDimension',
#'GetEnvelope', 'GetGeometryCount', 'GetGeometryName', 'GetGeometryRef', 'GetGeometryType', 'GetPoint',
#'GetPointCount', 'GetPoint_2D', 'GetSpatialReference', 'GetX', 'GetY', 'GetZ', 'Intersect', 'Intersection',
#'IsEmpty', 'IsRing', 'IsSimple', 'IsValid', 'Overlaps', 'Segmentize', 'SetCoordinateDimension', 'SetPoint',
#'SetPoint_2D', 'SymmetricDifference', 'Touches', 'Transform', 'TransformTo', 'Union', 'Within', 'WkbSize',
#'__class__', '__del__', '__delattr__', '__dict__', '__doc__', '__format__', '__getattr__', '__getattribute__',
#'__hash__', '__init__', '__iter__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__',
#'__setattr__', '__setstate__', '__sizeof__', '__str__', '__subclasshook__', '__swig_destroy__',
#'__swig_getmethods__', '__swig_setmethods__', '__weakref__', 'next', 'this']
def multipolygon_multicore_operation(dataset,var,polygons,time_range=None,clip=None,dissolve=None,levels = None,ocgOpts=None,subdivide=False,subres='detect',verbose=1):
elements = []
ret = []
q = Queue()
l = Lock()
pl = []
#set the file reset option if the file is local
if not('http:' in dataset or 'www.' in dataset):
if ocgOpts == None:
ocgOpts = {}
ocgOpts['multiReset'] = True
ocgOpts['verbose'] = verbose
ncp = OcgDataset(dataset,**ocgOpts)
#if no polygon was specified
#create a polygon covering the whole area so that the job can be split
if polygons == [None]:
polygons = [Polygon(((ncp.col_bnds.min(),ncp.row_bnds.min()),(ncp.col_bnds.max(),ncp.row_bnds.min()),(ncp.col_bnds.max(),ncp.row_bnds.max()),(ncp.col_bnds.min(),ncp.row_bnds.max())))]
for ii,polygon in enumerate(polygons):
if verbose>1: print(ii)
#skip invalid polygons
if not polygon.is_valid:
if verbose>0: print "Polygon "+repr(ii+1)+" is not valid. "+polygon.wkt
continue
#if polygons have been specified and subdivide is True, each polygon will be subdivided
#into a grid with resolution of subres. If subres is undefined the resolution is half the square root of the area of the polygons envelope, or approximately 4 subpolygons
if subdivide and not(polygons == None):
#figure out the resolution and subdivide
#default value uses sqrt(polygon envelop area)
#generally resulting in 4-6 threads per polygon
if subres == 'detect':
subpolys = make_shapely_grid(polygon,sqrt(polygon.envelope.area)/2.0,clip=True)
else:
subpolys = make_shapely_grid(polygon,subres,clip=True)
#generate threads for each subpolygon
# subpolys = [subpolys[1]] ## tdk
for poly in subpolys:
## during regular gridding used to create sub-polygons, a polygon
## may not intersect the actual extraction extent returning None
## in the process as opposed to a Polygon. skip the Nones.
if poly is None: continue
## continue generating threads
if clip is False:
poly2 = poly.intersection(polygon.envelope)
if poly2 is None: continue
## tdk #####################
# ncp.extract_elements(q,var,lock=l,polygon=poly,time_range=time_range,clip=clip,dissolve=dissolve,levels=levels,parentPoly=11)
############################
p = Process(target = ncp.extract_elements,
args = (q,
var,),
kwargs= {
'lock':l,
'polygon':poly,
'time_range':time_range,
'clip':clip,
'dissolve':dissolve,
'levels' : levels,
'parentPoly':ii})
p.start()
pl.append(p)
#if no polygons are specified only 1 thread will be created per polygon
else:
p = Process(target = ncp.extract_elements,
args = (
q,
var,),
kwargs= {
'lock':l,
'polygon':polygon,
'time_range':time_range,
'clip':clip,
'dissolve':dissolve,
'levels' : levels,
'parentPoly':ii})
p.start()
pl.append(p)
#for p in pl:
#p.join()
#consumer loop, the main process will grab any feature lists added by the
#processing threads and continues until those threads have terminated.
#without this the processing threads will NOT terminate
a=True
while a:
a=False
#check if any threads are still active
for p in pl:
a = a or p.is_alive()
#remove anything from the queue if present
while not q.empty():
ret.append(q.get())
#give the threads some time to process more stuff
time.sleep(.1)
#The subdivided geometry must be recombined into the original polygons
if dissolve:
groups = {}
#form groups of elements based on which polygon they belong to
for x in ret:
if not x[0] in groups:
groups[x[0]] = []
groups[x[0]].append((x[1],x[2]))
#print '>',groups.keys()
#print groups
#for each group, recombine the geometry and average the data points
for x in groups.keys():
#for y in groups[x]:
#print len(y[0])
group = [y[0] for y in groups[x] if len(y[0])>0]
#print groups[x][0][1]
recombine ={}
for y in groups[x]:
recombine.update(y[1])
for key in recombine.keys():
group.append(recombine[key])
#recombine the geometry using the first time period
total = cascaded_union([y[0]['geometry'] for y in group])
#form subgroups consisting of subpolygons that cover the same time period
subgroups = [[g[t] for g in group] for t in xrange(len(group[0]))]
ta = sum([y['weight'] for y in subgroups[0]])
#print 't',ta
#average the data values and form new features
for subgroup in subgroups:
if not(levels == None):
avg = [sum([y['properties'][var][z]*(y['weight']/ta) for y in subgroup]) for z in xrange(len(levels))]
elements.append( dict(
id=subgroup[0]['id'],
geometry=total,
properties=dict({VAR: avg,
'timestamp':subgroup[0]['properties']['timestamp'],
'level': subgroup[0]['properties']['levels']})))
#print total.area
#print avg
else:
#print (y['weight']/ta)
avg = sum([y['properties'][var]*(y['weight']/ta) for y in subgroup])
elements.append( dict(
id=subgroup[0]['id'],
geometry=total,
properties=dict({var:float(avg),
'timestamp':subgroup[0]['properties']['timestamp']})))
#handle recombining undissolved features
else:
recombine = []
#pull out unique elements and potentially repeated elements
for x in ret:
elements.extend(x[0])
recombine.append(x[1])
#get a list of all unique locations
keylist = []
for x in recombine:
keylist.extend(x.keys())
keylist = set(keylist)
#find all the locations that have duplicated features
for key in keylist:
cur = []
for x in recombine:
if key in x:
cur.append(x[key])
#print cur
#if there is only 1 feature, it is unique so toss it into the element list
if len(cur)==1:
elements.extend(cur[0])
else:
#if clip=False then all the features are identical, pick one and discard the rest
if not clip:
elements.extend(cur[0])
#if clip=True then the features have the same values but the geometry is fragmented
else:
#recombine the geometry
geo = cascaded_union([x[0]['geometry'] for x in cur])
#pick a feature, update the geometry, and discard the rest
for x in cur[0]:
x['geometry'] = geo
elements.append(x)
elements2 = []
#expand elements in the case of multi-level data
dtime = time.time()
if not (levels == None):
for x in elements:
#create a new feature for each data level
for i in xrange(len(levels)):
e = x.copy()
e['properties'] = x['properties'].copy()
e['properties'][var] = e['properties'][var][i]
e['properties']['level'] = e['properties']['level'][i]
elements2.append(e)
else:
elements2 = elements
if verbose>1: print "expansion time: ",time.time()-dtime
if verbose>1: print "points: ",repr(len(elements2))
return(elements2)
def make_shapely_grid(poly,res,as_numpy=False,clip=True):
"""
Return a list or NumPy matrix of shapely Polygon objects.
poly -- shapely Polygon to discretize
res -- target grid resolution in the same units as |poly|
"""
## ensure we have a floating point resolution
res = float(res)
## check that the target polygon is a valid geometry
assert(poly.is_valid)
## vectorize the polygon creation
vfunc_poly = np.vectorize(make_poly_array)#,otypes=[np.object])
## prepare the geometry for faster spatial relationship checking. throws a
## a warning so leaving out for now.
# prepped = prep(poly)
## extract bounding coordinates of the polygon
min_x,min_y,max_x,max_y = poly.envelope.bounds
## convert to matrices
X,Y = np.meshgrid(np.arange(min_x,max_x,res),
np.arange(min_y,max_y,res))
#print X,Y
## shift by the resolution
pmin_x = X
pmax_x = X + res
pmin_y = Y
pmax_y = Y + res
## make the 2-d array
# print pmin_y,pmin_x,pmax_y,pmax_x,poly.wkt
if clip:
poly_array = vfunc_poly(pmin_y,pmin_x,pmax_y,pmax_x,poly)
else:
poly_array = vfunc_poly(pmin_y,pmin_x,pmax_y,pmax_x)
#print poly_array
#sys.exit()
## format according to configuration arguments
if as_numpy:
ret = poly_array
else:
ret = list(poly_array.reshape(-1))
return(ret)
def make_poly_array(min_row,min_col,max_row,max_col,polyint=None):
ret = Polygon(((min_col,min_row),
(max_col,min_row),
(max_col,max_row),
(min_col,max_row),
(min_col,min_row)))
if polyint is not None:
if polyint.intersects(ret) == False:
ret = None
else:
ret = polyint.intersection(ret)
return(ret)
def shapely_to_shp(obj,outname):
path = os.path.join('/tmp',outname+'.shp')
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
ogr_geom = 3
dr = ogr.GetDriverByName('ESRI Shapefile')
ds = dr.CreateDataSource(path)
try:
if ds is None:
raise IOError('Could not create file on disk. Does it already exist?')
layer = ds.CreateLayer('lyr',srs=srs,geom_type=ogr_geom)
feature_def = layer.GetLayerDefn()
feat = ogr.Feature(feature_def)
feat.SetGeometry(ogr.CreateGeometryFromWkt(obj.wkt))
layer.CreateFeature(feat)
finally:
ds.Destroy()
if __name__ == '__main__':
narg = time.time()
## all
# POLYINT = Polygon(((-99,39),(-94,38),(-94,40),(-100,39)))
## great lakes
#POLYINT = Polygon(((-90.35,40.55),(-83,43),(-80.80,49.87),(-90.35,49.87)))
#POLYINT = Polygon(((-90,30),(-70,30),(-70,50),(-90,50)))
#POLYINT = Polygon(((-90,40),(-80,40),(-80,50),(-90,50)))
#POLYINT = Polygon(((-130,18),(-60,18),(-60,98),(-130,98)))
#POLYINT = Polygon(((0,0),(0,10),(10,10),(10,0)))
## return all data
POLYINT = None
## two areas
#POLYINT = [wkt.loads('POLYGON ((-85.324076923076916 44.028020242914977,-84.280765182186229 44.16008502024291,-84.003429149797569 43.301663967611333,-83.607234817813762 42.91867611336032,-84.227939271255053 42.060255060728736,-84.941089068825903 41.307485829959511,-85.931574898785414 41.624441295546553,-85.588206477732783 43.011121457489871,-85.324076923076916 44.028020242914977))'),
#wkt.loads('POLYGON ((-89.24640080971659 46.061817813765174,-88.942651821862341 46.378773279352224,-88.454012145748976 46.431599190283393,-87.952165991902831 46.11464372469635,-88.163469635627521 45.190190283400803,-88.889825910931165 44.503453441295541,-88.770967611336033 43.552587044534405,-88.942651821862341 42.786611336032379,-89.774659919028338 42.760198380566798,-90.038789473684204 43.777097165991897,-89.735040485829956 45.097744939271251,-89.24640080971659 46.061817813765174))')]
## watersheds
# path = '/home/bkoziol/git/OpenClimateGIS/bin/geojson/watersheds_4326.geojson'
## select = ['HURON']
# select = []
# with open(path,'r') as f:
# data = ''.join(f.readlines())
## data2 = f.read()
## tr()
## tr()
# gj = geojson.loads(data)
# POLYINT = []
# for feature in gj['features']:
# if select:
# prop = feature['properties']
# if prop['HUCNAME'] in select:
# pass
# else:
# continue
# geom = asShape(feature['geometry'])
# if not isinstance(geom,MultiPolygonAdapter):
# geom = [geom]
# for polygon in geom:
# POLYINT.append(polygon)
#NC = '/home/reid/Desktop/ncconv/pcmdi.ipcc4.bccr_bcm2_0.1pctto2x.run1.monthly.cl_A1_1.nc'
#NC = '/home/bkoziol/git/OpenClimateGIS/bin/climate_data/maurer/bccr_bcm2_0.1.sresa1b.monthly.Prcp.1950.nc'
#NC = '/home/reid/Desktop/ncconv/bccr_bcm2_0.1.sresa1b.monthly.Prcp.1950.nc'
#NC = 'http://hydra.fsl.noaa.gov/thredds/dodsC/oc_gis_downscaling.bccr_bcm2.sresa1b.Prcp.Prcp.1.aggregation.1'
NC = 'test.nc'
# TEMPORAL = [datetime.datetime(1950,2,1),datetime.datetime(1950,4,30)]
TEMPORAL = [datetime.datetime(1950,2,1),datetime.datetime(1950,5,1)]
#TEMPORAL = [datetime.datetime(1960,3,16),datetime.datetime(1961,3,16)] #time range for multi-level file
DISSOLVE = False
CLIP = False
#VAR = 'cl'
VAR = 'Prcp'
#kwds={}
kwds = {
#'rowbnds_name': 'lat_bnds',
#'colbnds_name': 'lon_bnds',
#'time_units': 'days since 1800-1-1 00:00:0.0',
'time_units': 'days since 1950-1-1 0:0:0.0',
#'level_name': 'lev'
}
LEVELS = None
#LEVELS = [x for x in range(0,1)]
#LEVELS = [x for x in range(0,10)]
## open the dataset for reading
dataset = NC#nc.Dataset(NC,'r')
## make iterable if only a single polygon requested
if type(POLYINT) not in (list,tuple): POLYINT = [POLYINT]
## convenience function for multiple polygons
elements = multipolygon_multicore_operation(dataset,
VAR,
POLYINT,
time_range=TEMPORAL,
clip=CLIP,
dissolve=DISSOLVE,
levels = LEVELS,
ocgOpts=kwds,
subdivide=True,
#subres = 90
)
# out = as_shp(elements)
dtime = time.time()
#out = as_geojson(elements)
#with open('./out_M3.json','w') as f:
#f.write(out)
as_keyTabular(elements,VAR,path='./out_tabular.txt',wkt=True)
dtime = time.time()-dtime
blarg = time.time()
print blarg-narg,dtime,blarg-narg-dtime
| OpenSource-/OpenClimateGIS | src/openclimategis/util/ncconv/OLD_ncconv/in_memory_oo_multi_core.py | Python | bsd-3-clause | 48,807 | [
"NetCDF"
] | def69ee51aa85b63d439d8c150a32862969e482a7eb0407a11ea7bb4238fb02c |
#!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import os
import re
import molbiox
import molbiox.kb
import molbiox.lib
from molbiox.settings import all_templates
mbx_root = os.path.dirname(molbiox.__file__)
def locate_template(tplname, new=False):
"""
Locate a template by name.
If `new=False`, return full path of the template file;
else, return the filename for the to-be-generated file
:param tplname: template name, as key in `molbiox.settings.all_templates`
:param new: if True, return the filename for the to-be-generated file
:return: a string
"""
filename = all_templates.get(tplname, tplname)
if new:
return re.sub(r'^s\.', 'run-', filename)
dirpath = os.path.dirname(molbiox.__file__)
return os.path.join(dirpath, 'templates', filename)
def locate_tests(relpath=''):
marker = '.mbx_tests_dir'
path = os.getcwd()
while True:
if marker in os.listdir(path):
return os.path.join(path, relpath)
if path != os.path.dirname(path):
path = os.path.dirname(path)
else:
break
raise IOError('cannot locate tests dir')
def locate_lib(name):
p = os.path
dirpath = p.dirname(p.abspath(molbiox.lib.__file__))
respath = p.join(dirpath, name)
if not p.isfile(respath):
errmsg = 'cannot find lib named {}'.format(name)
raise IOError(errmsg)
return respath
def locate_submat(name):
# ftp://ftp.ncbi.nih.gov/blast/matrices/
p = os.path
dirpath = p.dirname(p.abspath(molbiox.kb.__file__))
respath = p.join(dirpath, 'matrices', name)
if not p.isfile(respath):
errmsg = 'cannot find substitution matrix named {}'.format(name)
raise IOError(errmsg)
return respath
| frozflame/molbiox | molbiox/frame/locate.py | Python | gpl-2.0 | 1,801 | [
"BLAST"
] | d295f70865c4f794ff89eb6683fb543dc453d60d18568d2d022f2c508529141f |
#-*- coding: iso-8859-15 -*-
# SADR METEOLLSKY
# http://www.sadr.fr
# SEBASTIEN LECLERC 2017
# Inspired by :
# NACHO MAS 2013
# http://induino.wordpress.com
# Config file
##### INDI RELATED #####
#To start indiserver use 'localhost'
#otherwise not start and connect remote
#indiserver
#INDISERVER="localhost"
INDISERVER="allsky.sadr"
INDIPORT="7624"
INDIDEVICE="QHY CCD QHY5LII-C-6127d"
##### ARDUINO RELATED ####
DEVICEPORT="/dev/ttyACM0"
##### SITE RELATED ####
OWNERNAME="SADR"
SITENAME="HACIENDA DES ETOILES"
ALTITUDE=1540
#Visit http://weather.uwyo.edu/upperair/sounding.html
#See the sounding location close your site
SOUNDINGSTATION="07510"
##### RRD RELATED #####
#PATH TO GRAPHs
CHARTPATH="/var/www/html/CHART/"
#CHARTPATH="/media/freebox/Projets/Astronomie/SADR/Allsky/2_Travail/SADR/raspberry/allskySCRIPT/"
#EUMETSAT lastimagen. Choose one from:
#http://oiswww.eumetsat.org/IPPS/html/latestImages.html
#This is nice but only work at daylight time:
#EUMETSAT_LAST="http://oiswww.eumetsat.org/IPPS/html/latestImages/EUMETSAT_MSG_RGB-naturalcolor-westernEurope.jpg"
#This show rain
#EUMETSAT_LAST="http://oiswww.eumetsat.org/IPPS/html/latestImages/EUMETSAT_MSG_MPE-westernEurope.jpg"
#and this cloud cover at IR 39. Work at night
EUMETSAT_LAST="http://oiswww.eumetsat.org/IPPS/html/latestImages/EUMETSAT_MSG_IR039E-westernEurope.jpg"
##### ALLSKY PICTURE RELATED #####
#SADR WATERMARK FILE
WATERMARK="image/watermark.png"
##### PUSHETTA RELATED #####
API_KEY="57a4fc6d834526367da533545287aea54468b311"
CHANNEL_NAME="SADR Meteollsky"
| broadcastyourseb/SADR | raspberry/allskySCRIPT/meteollskyconfig.py | Python | apache-2.0 | 1,549 | [
"VisIt"
] | a07912dbdcf7e40b5c95a17d69b68cab3e2d1242465f9369aba214c204a4e05b |
from sympy import (meijerg, I, S, integrate, Integral, oo, gamma, cosh, sinc,
hyperexpand, exp, simplify, sqrt, pi, erf, erfc, sin, cos,
exp_polar, polygamma, hyper, log, expand_func, Rational)
from sympy.integrals.meijerint import (_rewrite_single, _rewrite1,
meijerint_indefinite, _inflate_g, _create_lookup_table,
meijerint_definite, meijerint_inversion)
from sympy.utilities import default_sort_key
from sympy.utilities.pytest import slow
from sympy.utilities.randtest import (verify_numerically,
random_complex_number as randcplx)
from sympy.core.compatibility import range
from sympy.abc import x, y, a, b, c, d, s, t, z
def test_rewrite_single():
def t(expr, c, m):
e = _rewrite_single(meijerg([a], [b], [c], [d], expr), x)
assert e is not None
assert isinstance(e[0][0][2], meijerg)
assert e[0][0][2].argument.as_coeff_mul(x) == (c, (m,))
def tn(expr):
assert _rewrite_single(meijerg([a], [b], [c], [d], expr), x) is None
t(x, 1, x)
t(x**2, 1, x**2)
t(x**2 + y*x**2, y + 1, x**2)
tn(x**2 + x)
tn(x**y)
def u(expr, x):
from sympy import Add, exp, exp_polar
r = _rewrite_single(expr, x)
e = Add(*[res[0]*res[2] for res in r[0]]).replace(
exp_polar, exp) # XXX Hack?
assert verify_numerically(e, expr, x)
u(exp(-x)*sin(x), x)
# The following has stopped working because hyperexpand changed slightly.
# It is probably not worth fixing
#u(exp(-x)*sin(x)*cos(x), x)
# This one cannot be done numerically, since it comes out as a g-function
# of argument 4*pi
# NOTE This also tests a bug in inverse mellin transform (which used to
# turn exp(4*pi*I*t) into a factor of exp(4*pi*I)**t instead of
# exp_polar).
#u(exp(x)*sin(x), x)
assert _rewrite_single(exp(x)*sin(x), x) == \
([(-sqrt(2)/(2*sqrt(pi)), 0,
meijerg(((Rational(-1, 2), 0, Rational(1, 4), S.Half, Rational(3, 4)), (1,)),
((), (Rational(-1, 2), 0)), 64*exp_polar(-4*I*pi)/x**4))], True)
def test_rewrite1():
assert _rewrite1(x**3*meijerg([a], [b], [c], [d], x**2 + y*x**2)*5, x) == \
(5, x**3, [(1, 0, meijerg([a], [b], [c], [d], x**2*(y + 1)))], True)
def test_meijerint_indefinite_numerically():
def t(fac, arg):
g = meijerg([a], [b], [c], [d], arg)*fac
subs = {a: randcplx()/10, b: randcplx()/10 + I,
c: randcplx(), d: randcplx()}
integral = meijerint_indefinite(g, x)
assert integral is not None
assert verify_numerically(g.subs(subs), integral.diff(x).subs(subs), x)
t(1, x)
t(2, x)
t(1, 2*x)
t(1, x**2)
t(5, x**S('3/2'))
t(x**3, x)
t(3*x**S('3/2'), 4*x**S('7/3'))
def test_meijerint_definite():
v, b = meijerint_definite(x, x, 0, 0)
assert v.is_zero and b is True
v, b = meijerint_definite(x, x, oo, oo)
assert v.is_zero and b is True
def test_inflate():
subs = {a: randcplx()/10, b: randcplx()/10 + I, c: randcplx(),
d: randcplx(), y: randcplx()/10}
def t(a, b, arg, n):
from sympy import Mul
m1 = meijerg(a, b, arg)
m2 = Mul(*_inflate_g(m1, n))
# NOTE: (the random number)**9 must still be on the principal sheet.
# Thus make b&d small to create random numbers of small imaginary part.
return verify_numerically(m1.subs(subs), m2.subs(subs), x, b=0.1, d=-0.1)
assert t([[a], [b]], [[c], [d]], x, 3)
assert t([[a, y], [b]], [[c], [d]], x, 3)
assert t([[a], [b]], [[c, y], [d]], 2*x**3, 3)
def test_recursive():
from sympy import symbols
a, b, c = symbols('a b c', positive=True)
r = exp(-(x - a)**2)*exp(-(x - b)**2)
e = integrate(r, (x, 0, oo), meijerg=True)
assert simplify(e.expand()) == (
sqrt(2)*sqrt(pi)*(
(erf(sqrt(2)*(a + b)/2) + 1)*exp(-a**2/2 + a*b - b**2/2))/4)
e = integrate(exp(-(x - a)**2)*exp(-(x - b)**2)*exp(c*x), (x, 0, oo), meijerg=True)
assert simplify(e) == (
sqrt(2)*sqrt(pi)*(erf(sqrt(2)*(2*a + 2*b + c)/4) + 1)*exp(-a**2 - b**2
+ (2*a + 2*b + c)**2/8)/4)
assert simplify(integrate(exp(-(x - a - b - c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 + erf(a + b + c))
assert simplify(integrate(exp(-(x + a + b + c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 - erf(a + b + c))
@slow
def test_meijerint():
from sympy import symbols, expand, arg
s, t, mu = symbols('s t mu', real=True)
assert integrate(meijerg([], [], [0], [], s*t)
*meijerg([], [], [mu/2], [-mu/2], t**2/4),
(t, 0, oo)).is_Piecewise
s = symbols('s', positive=True)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo)) == \
gamma(s + 1)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo),
meijerg=True) == gamma(s + 1)
assert isinstance(integrate(x**s*meijerg([[], []], [[0], []], x),
(x, 0, oo), meijerg=False),
Integral)
assert meijerint_indefinite(exp(x), x) == exp(x)
# TODO what simplifications should be done automatically?
# This tests "extra case" for antecedents_1.
a, b = symbols('a b', positive=True)
assert simplify(meijerint_definite(x**a, x, 0, b)[0]) == \
b**(a + 1)/(a + 1)
# This tests various conditions and expansions:
meijerint_definite((x + 1)**3*exp(-x), x, 0, oo) == (16, True)
# Again, how about simplifications?
sigma, mu = symbols('sigma mu', positive=True)
i, c = meijerint_definite(exp(-((x - mu)/(2*sigma))**2), x, 0, oo)
assert simplify(i) == sqrt(pi)*sigma*(2 - erfc(mu/(2*sigma)))
assert c == True
i, _ = meijerint_definite(exp(-mu*x)*exp(sigma*x), x, 0, oo)
# TODO it would be nice to test the condition
assert simplify(i) == 1/(mu - sigma)
# Test substitutions to change limits
assert meijerint_definite(exp(x), x, -oo, 2) == (exp(2), True)
# Note: causes a NaN in _check_antecedents
assert expand(meijerint_definite(exp(x), x, 0, I)[0]) == exp(I) - 1
assert expand(meijerint_definite(exp(-x), x, 0, x)[0]) == \
1 - exp(-exp(I*arg(x))*abs(x))
# Test -oo to oo
assert meijerint_definite(exp(-x**2), x, -oo, oo) == (sqrt(pi), True)
assert meijerint_definite(exp(-abs(x)), x, -oo, oo) == (2, True)
assert meijerint_definite(exp(-(2*x - 3)**2), x, -oo, oo) == \
(sqrt(pi)/2, True)
assert meijerint_definite(exp(-abs(2*x - 3)), x, -oo, oo) == (1, True)
assert meijerint_definite(exp(-((x - mu)/sigma)**2/2)/sqrt(2*pi*sigma**2),
x, -oo, oo) == (1, True)
assert meijerint_definite(sinc(x)**2, x, -oo, oo) == (pi, True)
# Test one of the extra conditions for 2 g-functinos
assert meijerint_definite(exp(-x)*sin(x), x, 0, oo) == (S.Half, True)
# Test a bug
def res(n):
return (1/(1 + x**2)).diff(x, n).subs(x, 1)*(-1)**n
for n in range(6):
assert integrate(exp(-x)*sin(x)*x**n, (x, 0, oo), meijerg=True) == \
res(n)
# This used to test trigexpand... now it is done by linear substitution
assert simplify(integrate(exp(-x)*sin(x + a), (x, 0, oo), meijerg=True)
) == sqrt(2)*sin(a + pi/4)/2
# Test the condition 14 from prudnikov.
# (This is besselj*besselj in disguise, to stop the product from being
# recognised in the tables.)
a, b, s = symbols('a b s')
from sympy import And, re
assert meijerint_definite(meijerg([], [], [a/2], [-a/2], x/4)
*meijerg([], [], [b/2], [-b/2], x/4)*x**(s - 1), x, 0, oo) == \
(4*2**(2*s - 2)*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/(gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
And(0 < -2*re(4*s) + 8, 0 < re(a/2 + b/2 + s), re(2*s) < 1))
# test a bug
assert integrate(sin(x**a)*sin(x**b), (x, 0, oo), meijerg=True) == \
Integral(sin(x**a)*sin(x**b), (x, 0, oo))
# test better hyperexpand
assert integrate(exp(-x**2)*log(x), (x, 0, oo), meijerg=True) == \
(sqrt(pi)*polygamma(0, S.Half)/4).expand()
# Test hyperexpand bug.
from sympy import lowergamma
n = symbols('n', integer=True)
assert simplify(integrate(exp(-x)*x**n, x, meijerg=True)) == \
lowergamma(n + 1, x)
# Test a bug with argument 1/x
alpha = symbols('alpha', positive=True)
assert meijerint_definite((2 - x)**alpha*sin(alpha/x), x, 0, 2) == \
(sqrt(pi)*alpha*gamma(alpha + 1)*meijerg(((), (alpha/2 + S.Half,
alpha/2 + 1)), ((0, 0, S.Half), (Rational(-1, 2),)), alpha**2/16)/4, True)
# test a bug related to 3016
a, s = symbols('a s', positive=True)
assert simplify(integrate(x**s*exp(-a*x**2), (x, -oo, oo))) == \
a**(-s/2 - S.Half)*((-1)**s + 1)*gamma(s/2 + S.Half)/2
def test_bessel():
from sympy import besselj, besseli
assert simplify(integrate(besselj(a, z)*besselj(b, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == \
2*sin(pi*(a/2 - b/2))/(pi*(a - b)*(a + b))
assert simplify(integrate(besselj(a, z)*besselj(a, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == 1/(2*a)
# TODO more orthogonality integrals
assert simplify(integrate(sin(z*x)*(x**2 - 1)**(-(y + S.Half)),
(x, 1, oo), meijerg=True, conds='none')
*2/((z/2)**y*sqrt(pi)*gamma(S.Half - y))) == \
besselj(y, z)
# Werner Rosenheinrich
# SOME INDEFINITE INTEGRALS OF BESSEL FUNCTIONS
assert integrate(x*besselj(0, x), x, meijerg=True) == x*besselj(1, x)
assert integrate(x*besseli(0, x), x, meijerg=True) == x*besseli(1, x)
# TODO can do higher powers, but come out as high order ... should they be
# reduced to order 0, 1?
assert integrate(besselj(1, x), x, meijerg=True) == -besselj(0, x)
assert integrate(besselj(1, x)**2/x, x, meijerg=True) == \
-(besselj(0, x)**2 + besselj(1, x)**2)/2
# TODO more besseli when tables are extended or recursive mellin works
assert integrate(besselj(0, x)**2/x**2, x, meijerg=True) == \
-2*x*besselj(0, x)**2 - 2*x*besselj(1, x)**2 \
+ 2*besselj(0, x)*besselj(1, x) - besselj(0, x)**2/x
assert integrate(besselj(0, x)*besselj(1, x), x, meijerg=True) == \
-besselj(0, x)**2/2
assert integrate(x**2*besselj(0, x)*besselj(1, x), x, meijerg=True) == \
x**2*besselj(1, x)**2/2
assert integrate(besselj(0, x)*besselj(1, x)/x, x, meijerg=True) == \
(x*besselj(0, x)**2 + x*besselj(1, x)**2 -
besselj(0, x)*besselj(1, x))
# TODO how does besselj(0, a*x)*besselj(0, b*x) work?
# TODO how does besselj(0, x)**2*besselj(1, x)**2 work?
# TODO sin(x)*besselj(0, x) etc come out a mess
# TODO can x*log(x)*besselj(0, x) be done?
# TODO how does besselj(1, x)*besselj(0, x+a) work?
# TODO more indefinite integrals when struve functions etc are implemented
# test a substitution
assert integrate(besselj(1, x**2)*x, x, meijerg=True) == \
-besselj(0, x**2)/2
def test_inversion():
from sympy import piecewise_fold, besselj, sqrt, sin, cos, Heaviside
def inv(f):
return piecewise_fold(meijerint_inversion(f, s, t))
assert inv(1/(s**2 + 1)) == sin(t)*Heaviside(t)
assert inv(s/(s**2 + 1)) == cos(t)*Heaviside(t)
assert inv(exp(-s)/s) == Heaviside(t - 1)
assert inv(1/sqrt(1 + s**2)) == besselj(0, t)*Heaviside(t)
# Test some antcedents checking.
assert meijerint_inversion(sqrt(s)/sqrt(1 + s**2), s, t) is None
assert inv(exp(s**2)) is None
assert meijerint_inversion(exp(-s**2), s, t) is None
def test_inversion_conditional_output():
from sympy import Symbol, InverseLaplaceTransform
a = Symbol('a', positive=True)
F = sqrt(pi/a)*exp(-2*sqrt(a)*sqrt(s))
f = meijerint_inversion(F, s, t)
assert not f.is_Piecewise
b = Symbol('b', real=True)
F = F.subs(a, b)
f2 = meijerint_inversion(F, s, t)
assert f2.is_Piecewise
# first piece is same as f
assert f2.args[0][0] == f.subs(a, b)
# last piece is an unevaluated transform
assert f2.args[-1][1]
ILT = InverseLaplaceTransform(F, s, t, None)
assert f2.args[-1][0] == ILT or f2.args[-1][0] == ILT.as_integral
def test_inversion_exp_real_nonreal_shift():
from sympy import Symbol, DiracDelta
r = Symbol('r', real=True)
c = Symbol('c', extended_real=False)
a = 1 + 2*I
z = Symbol('z')
assert not meijerint_inversion(exp(r*s), s, t).is_Piecewise
assert meijerint_inversion(exp(a*s), s, t) is None
assert meijerint_inversion(exp(c*s), s, t) is None
f = meijerint_inversion(exp(z*s), s, t)
assert f.is_Piecewise
assert isinstance(f.args[0][0], DiracDelta)
@slow
def test_lookup_table():
from random import uniform, randrange
from sympy import Add
from sympy.integrals.meijerint import z as z_dummy
table = {}
_create_lookup_table(table)
for _, l in sorted(table.items()):
for formula, terms, cond, hint in sorted(l, key=default_sort_key):
subs = {}
for a in list(formula.free_symbols) + [z_dummy]:
if hasattr(a, 'properties') and a.properties:
# these Wilds match positive integers
subs[a] = randrange(1, 10)
else:
subs[a] = uniform(1.5, 2.0)
if not isinstance(terms, list):
terms = terms(subs)
# First test that hyperexpand can do this.
expanded = [hyperexpand(g) for (_, g) in terms]
assert all(x.is_Piecewise or not x.has(meijerg) for x in expanded)
# Now test that the meijer g-function is indeed as advertised.
expanded = Add(*[f*x for (f, x) in terms])
a, b = formula.n(subs=subs), expanded.n(subs=subs)
r = min(abs(a), abs(b))
if r < 1:
assert abs(a - b).n() <= 1e-10
else:
assert (abs(a - b)/r).n() <= 1e-10
def test_branch_bug():
from sympy import powdenest, lowergamma
# TODO gammasimp cannot prove that the factor is unity
assert powdenest(integrate(erf(x**3), x, meijerg=True).diff(x),
polar=True) == 2*erf(x**3)*gamma(Rational(2, 3))/3/gamma(Rational(5, 3))
assert integrate(erf(x**3), x, meijerg=True) == \
2*x*erf(x**3)*gamma(Rational(2, 3))/(3*gamma(Rational(5, 3))) \
- 2*gamma(Rational(2, 3))*lowergamma(Rational(2, 3), x**6)/(3*sqrt(pi)*gamma(Rational(5, 3)))
def test_linear_subs():
from sympy import besselj
assert integrate(sin(x - 1), x, meijerg=True) == -cos(1 - x)
assert integrate(besselj(1, x - 1), x, meijerg=True) == -besselj(0, 1 - x)
@slow
def test_probability():
# various integrals from probability theory
from sympy.abc import x, y
from sympy import symbols, Symbol, Abs, expand_mul, gammasimp, powsimp, sin
mu1, mu2 = symbols('mu1 mu2', nonzero=True)
sigma1, sigma2 = symbols('sigma1 sigma2', positive=True)
rate = Symbol('lambda', positive=True)
def normal(x, mu, sigma):
return 1/sqrt(2*pi*sigma**2)*exp(-(x - mu)**2/2/sigma**2)
def exponential(x, rate):
return rate*exp(-rate*x)
assert integrate(normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == \
mu1
assert integrate(x**2*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**2 + sigma1**2
assert integrate(x**3*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**3 + 3*mu1*sigma1**2
assert integrate(normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1
assert integrate(y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu2
assert integrate(x*y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1*mu2
assert integrate((x + y + 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + mu1 + mu2
assert integrate((x + y - 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
-1 + mu1 + mu2
i = integrate(x**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True)
assert not i.has(Abs)
assert simplify(i) == mu1**2 + sigma1**2
assert integrate(y**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
sigma2**2 + mu2**2
assert integrate(exponential(x, rate), (x, 0, oo), meijerg=True) == 1
assert integrate(x*exponential(x, rate), (x, 0, oo), meijerg=True) == \
1/rate
assert integrate(x**2*exponential(x, rate), (x, 0, oo), meijerg=True) == \
2/rate**2
def E(expr):
res1 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(x, 0, oo), (y, -oo, oo), meijerg=True)
res2 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(y, -oo, oo), (x, 0, oo), meijerg=True)
assert expand_mul(res1) == expand_mul(res2)
return res1
assert E(1) == 1
assert E(x*y) == mu1/rate
assert E(x*y**2) == mu1**2/rate + sigma1**2/rate
ans = sigma1**2 + 1/rate**2
assert simplify(E((x + y + 1)**2) - E(x + y + 1)**2) == ans
assert simplify(E((x + y - 1)**2) - E(x + y - 1)**2) == ans
assert simplify(E((x + y)**2) - E(x + y)**2) == ans
# Beta' distribution
alpha, beta = symbols('alpha beta', positive=True)
betadist = x**(alpha - 1)*(1 + x)**(-alpha - beta)*gamma(alpha + beta) \
/gamma(alpha)/gamma(beta)
assert integrate(betadist, (x, 0, oo), meijerg=True) == 1
i = integrate(x*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert (gammasimp(i[0]), i[1]) == (alpha/(beta - 1), 1 < beta)
j = integrate(x**2*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert j[1] == (1 < beta - 1)
assert gammasimp(j[0] - i[0]**2) == (alpha + beta - 1)*alpha \
/(beta - 2)/(beta - 1)**2
# Beta distribution
# NOTE: this is evaluated using antiderivatives. It also tests that
# meijerint_indefinite returns the simplest possible answer.
a, b = symbols('a b', positive=True)
betadist = x**(a - 1)*(-x + 1)**(b - 1)*gamma(a + b)/(gamma(a)*gamma(b))
assert simplify(integrate(betadist, (x, 0, 1), meijerg=True)) == 1
assert simplify(integrate(x*betadist, (x, 0, 1), meijerg=True)) == \
a/(a + b)
assert simplify(integrate(x**2*betadist, (x, 0, 1), meijerg=True)) == \
a*(a + 1)/(a + b)/(a + b + 1)
assert simplify(integrate(x**y*betadist, (x, 0, 1), meijerg=True)) == \
gamma(a + b)*gamma(a + y)/gamma(a)/gamma(a + b + y)
# Chi distribution
k = Symbol('k', integer=True, positive=True)
chi = 2**(1 - k/2)*x**(k - 1)*exp(-x**2/2)/gamma(k/2)
assert powsimp(integrate(chi, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chi, (x, 0, oo), meijerg=True)) == \
sqrt(2)*gamma((k + 1)/2)/gamma(k/2)
assert simplify(integrate(x**2*chi, (x, 0, oo), meijerg=True)) == k
# Chi^2 distribution
chisquared = 2**(-k/2)/gamma(k/2)*x**(k/2 - 1)*exp(-x/2)
assert powsimp(integrate(chisquared, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chisquared, (x, 0, oo), meijerg=True)) == k
assert simplify(integrate(x**2*chisquared, (x, 0, oo), meijerg=True)) == \
k*(k + 2)
assert gammasimp(integrate(((x - k)/sqrt(2*k))**3*chisquared, (x, 0, oo),
meijerg=True)) == 2*sqrt(2)/sqrt(k)
# Dagum distribution
a, b, p = symbols('a b p', positive=True)
# XXX (x/b)**a does not work
dagum = a*p/x*(x/b)**(a*p)/(1 + x**a/b**a)**(p + 1)
assert simplify(integrate(dagum, (x, 0, oo), meijerg=True)) == 1
# XXX conditions are a mess
arg = x*dagum
assert simplify(integrate(arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b*gamma(1 - 1/a)*gamma(p + 1 + 1/a)/(
(a*p + 1)*gamma(p))
assert simplify(integrate(x*arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b**2*gamma(1 - 2/a)*gamma(p + 1 + 2/a)/(
(a*p + 2)*gamma(p))
# F-distribution
d1, d2 = symbols('d1 d2', positive=True)
f = sqrt(((d1*x)**d1 * d2**d2)/(d1*x + d2)**(d1 + d2))/x \
/gamma(d1/2)/gamma(d2/2)*gamma((d1 + d2)/2)
assert simplify(integrate(f, (x, 0, oo), meijerg=True)) == 1
# TODO conditions are a mess
assert simplify(integrate(x*f, (x, 0, oo), meijerg=True, conds='none')
) == d2/(d2 - 2)
assert simplify(integrate(x**2*f, (x, 0, oo), meijerg=True, conds='none')
) == d2**2*(d1 + 2)/d1/(d2 - 4)/(d2 - 2)
# TODO gamma, rayleigh
# inverse gaussian
lamda, mu = symbols('lamda mu', positive=True)
dist = sqrt(lamda/2/pi)*x**(Rational(-3, 2))*exp(-lamda*(x - mu)**2/x/2/mu**2)
mysimp = lambda expr: simplify(expr.rewrite(exp))
assert mysimp(integrate(dist, (x, 0, oo))) == 1
assert mysimp(integrate(x*dist, (x, 0, oo))) == mu
assert mysimp(integrate((x - mu)**2*dist, (x, 0, oo))) == mu**3/lamda
assert mysimp(integrate((x - mu)**3*dist, (x, 0, oo))) == 3*mu**5/lamda**2
# Levi
c = Symbol('c', positive=True)
assert integrate(sqrt(c/2/pi)*exp(-c/2/(x - mu))/(x - mu)**S('3/2'),
(x, mu, oo)) == 1
# higher moments oo
# log-logistic
alpha, beta = symbols('alpha beta', positive=True)
distn = (beta/alpha)*x**(beta - 1)/alpha**(beta - 1)/ \
(1 + x**beta/alpha**beta)**2
# FIXME: If alpha, beta are not declared as finite the line below hangs
# after the changes in:
# https://github.com/sympy/sympy/pull/16603
assert simplify(integrate(distn, (x, 0, oo))) == 1
# NOTE the conditions are a mess, but correctly state beta > 1
assert simplify(integrate(x*distn, (x, 0, oo), conds='none')) == \
pi*alpha/beta/sin(pi/beta)
# (similar comment for conditions applies)
assert simplify(integrate(x**y*distn, (x, 0, oo), conds='none')) == \
pi*alpha**y*y/beta/sin(pi*y/beta)
# weibull
k = Symbol('k', positive=True)
n = Symbol('n', positive=True)
distn = k/lamda*(x/lamda)**(k - 1)*exp(-(x/lamda)**k)
assert simplify(integrate(distn, (x, 0, oo))) == 1
assert simplify(integrate(x**n*distn, (x, 0, oo))) == \
lamda**n*gamma(1 + n/k)
# rice distribution
from sympy import besseli
nu, sigma = symbols('nu sigma', positive=True)
rice = x/sigma**2*exp(-(x**2 + nu**2)/2/sigma**2)*besseli(0, x*nu/sigma**2)
assert integrate(rice, (x, 0, oo), meijerg=True) == 1
# can someone verify higher moments?
# Laplace distribution
mu = Symbol('mu', real=True)
b = Symbol('b', positive=True)
laplace = exp(-abs(x - mu)/b)/2/b
assert integrate(laplace, (x, -oo, oo), meijerg=True) == 1
assert integrate(x*laplace, (x, -oo, oo), meijerg=True) == mu
assert integrate(x**2*laplace, (x, -oo, oo), meijerg=True) == \
2*b**2 + mu**2
# TODO are there other distributions supported on (-oo, oo) that we can do?
# misc tests
k = Symbol('k', positive=True)
assert gammasimp(expand_mul(integrate(log(x)*x**(k - 1)*exp(-x)/gamma(k),
(x, 0, oo)))) == polygamma(0, k)
@slow
def test_expint():
""" Test various exponential integrals. """
from sympy import (expint, unpolarify, Symbol, Ci, Si, Shi, Chi,
sin, cos, sinh, cosh, Ei)
assert simplify(unpolarify(integrate(exp(-z*x)/x**y, (x, 1, oo),
meijerg=True, conds='none'
).rewrite(expint).expand(func=True))) == expint(y, z)
assert integrate(exp(-z*x)/x, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(1, z)
assert integrate(exp(-z*x)/x**2, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(2, z).rewrite(Ei).rewrite(expint)
assert integrate(exp(-z*x)/x**3, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(3, z).rewrite(Ei).rewrite(expint).expand()
t = Symbol('t', positive=True)
assert integrate(-cos(x)/x, (x, t, oo), meijerg=True).expand() == Ci(t)
assert integrate(-sin(x)/x, (x, t, oo), meijerg=True).expand() == \
Si(t) - pi/2
assert integrate(sin(x)/x, (x, 0, z), meijerg=True) == Si(z)
assert integrate(sinh(x)/x, (x, 0, z), meijerg=True) == Shi(z)
assert integrate(exp(-x)/x, x, meijerg=True).expand().rewrite(expint) == \
I*pi - expint(1, x)
assert integrate(exp(-x)/x**2, x, meijerg=True).rewrite(expint).expand() \
== expint(1, x) - exp(-x)/x - I*pi
u = Symbol('u', polar=True)
assert integrate(cos(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Ci(u)
assert integrate(cosh(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Chi(u)
assert integrate(expint(1, x), x, meijerg=True
).rewrite(expint).expand() == x*expint(1, x) - exp(-x)
assert integrate(expint(2, x), x, meijerg=True
).rewrite(expint).expand() == \
-x**2*expint(1, x)/2 + x*exp(-x)/2 - exp(-x)/2
assert simplify(unpolarify(integrate(expint(y, x), x,
meijerg=True).rewrite(expint).expand(func=True))) == \
-expint(y + 1, x)
assert integrate(Si(x), x, meijerg=True) == x*Si(x) + cos(x)
assert integrate(Ci(u), u, meijerg=True).expand() == u*Ci(u) - sin(u)
assert integrate(Shi(x), x, meijerg=True) == x*Shi(x) - cosh(x)
assert integrate(Chi(u), u, meijerg=True).expand() == u*Chi(u) - sinh(u)
assert integrate(Si(x)*exp(-x), (x, 0, oo), meijerg=True) == pi/4
assert integrate(expint(1, x)*sin(x), (x, 0, oo), meijerg=True) == log(2)/2
def test_messy():
from sympy import (laplace_transform, Si, Shi, Chi, atan, Piecewise,
acoth, E1, besselj, acosh, asin, And, re,
fourier_transform, sqrt)
assert laplace_transform(Si(x), x, s) == ((-atan(s) + pi/2)/s, 0, True)
assert laplace_transform(Shi(x), x, s) == (acoth(s)/s, 1, True)
# where should the logs be simplified?
assert laplace_transform(Chi(x), x, s) == \
((log(s**(-2)) - log((s**2 - 1)/s**2))/(2*s), 1, True)
# TODO maybe simplify the inequalities?
assert laplace_transform(besselj(a, x), x, s)[1:] == \
(0, And(re(a/2) + S.Half > S.Zero, re(a/2) + 1 > S.Zero))
# NOTE s < 0 can be done, but argument reduction is not good enough yet
assert fourier_transform(besselj(1, x)/x, x, s, noconds=False) == \
(Piecewise((0, 4*abs(pi**2*s**2) > 1),
(2*sqrt(-4*pi**2*s**2 + 1), True)), s > 0)
# TODO FT(besselj(0,x)) - conditions are messy (but for acceptable reasons)
# - folding could be better
assert integrate(E1(x)*besselj(0, x), (x, 0, oo), meijerg=True) == \
log(1 + sqrt(2))
assert integrate(E1(x)*besselj(1, x), (x, 0, oo), meijerg=True) == \
log(S.Half + sqrt(2)/2)
assert integrate(1/x/sqrt(1 - x**2), x, meijerg=True) == \
Piecewise((-acosh(1/x), abs(x**(-2)) > 1), (I*asin(1/x), True))
def test_issue_6122():
assert integrate(exp(-I*x**2), (x, -oo, oo), meijerg=True) == \
-I*sqrt(pi)*exp(I*pi/4)
def test_issue_6252():
expr = 1/x/(a + b*x)**Rational(1, 3)
anti = integrate(expr, x, meijerg=True)
assert not anti.has(hyper)
# XXX the expression is a mess, but actually upon differentiation and
# putting in numerical values seems to work...
def test_issue_6348():
assert integrate(exp(I*x)/(1 + x**2), (x, -oo, oo)).simplify().rewrite(exp) \
== pi*exp(-1)
def test_fresnel():
from sympy import fresnels, fresnelc
assert expand_func(integrate(sin(pi*x**2/2), x)) == fresnels(x)
assert expand_func(integrate(cos(pi*x**2/2), x)) == fresnelc(x)
def test_issue_6860():
assert meijerint_indefinite(x**x**x, x) is None
def test_issue_7337():
f = meijerint_indefinite(x*sqrt(2*x + 3), x).together()
assert f == sqrt(2*x + 3)*(2*x**2 + x - 3)/5
assert f._eval_interval(x, S.NegativeOne, S.One) == Rational(2, 5)
def test_issue_8368():
assert meijerint_indefinite(cosh(x)*exp(-x*t), x) == (
(-t - 1)*exp(x) + (-t + 1)*exp(-x))*exp(-t*x)/2/(t**2 - 1)
def test_issue_10211():
from sympy.abc import h, w
assert integrate((1/sqrt(((y-x)**2 + h**2))**3), (x,0,w), (y,0,w)) == \
2*sqrt(1 + w**2/h**2)/h - 2/h
def test_issue_11806():
from sympy import symbols
y, L = symbols('y L', positive=True)
assert integrate(1/sqrt(x**2 + y**2)**3, (x, -L, L)) == \
2*L/(y**2*sqrt(L**2 + y**2))
def test_issue_10681():
from sympy import RR
from sympy.abc import R, r
f = integrate(r**2*(R**2-r**2)**0.5, r, meijerg=True)
g = (1.0/3)*R**1.0*r**3*hyper((-0.5, Rational(3, 2)), (Rational(5, 2),),
r**2*exp_polar(2*I*pi)/R**2)
assert RR.almosteq((f/g).n(), 1.0, 1e-12)
def test_issue_13536():
from sympy import Symbol
a = Symbol('a', real=True, positive=True)
assert integrate(1/x**2, (x, oo, a)) == -1/a
def test_issue_6462():
from sympy import Symbol
x = Symbol('x')
n = Symbol('n')
# Not the actual issue, still wrong answer for n = 1, but that there is no
# exception
assert integrate(cos(x**n)/x**n, x, meijerg=True).subs(n, 2).equals(
integrate(cos(x**2)/x**2, x, meijerg=True))
| kaushik94/sympy | sympy/integrals/tests/test_meijerint.py | Python | bsd-3-clause | 30,251 | [
"Gaussian"
] | 633c90c87d2dfd813230c53ceb72f5659224eafb914b539d8fde804e1a6f49b9 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import binascii
import cookielib
import glob
import inspect
import logging
import httplib
import os
import random
import re
import socket
import string
import sys
import tempfile
import threading
import time
import urllib2
import urlparse
import lib.controller.checks
import lib.core.common
import lib.core.threads
import lib.core.convert
import lib.request.connect
import lib.utils.search
from lib.controller.checks import checkConnection
from lib.core.common import Backend
from lib.core.common import boldifyMessage
from lib.core.common import checkFile
from lib.core.common import dataToStdout
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import extractRegexResult
from lib.core.common import filterStringValue
from lib.core.common import findLocalPort
from lib.core.common import findPageForms
from lib.core.common import getConsoleWidth
from lib.core.common import getFileItems
from lib.core.common import getFileType
from lib.core.common import getUnicode
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import openFile
from lib.core.common import parseTargetDirect
from lib.core.common import parseTargetUrl
from lib.core.common import paths
from lib.core.common import randomStr
from lib.core.common import readCachedFileContent
from lib.core.common import readInput
from lib.core.common import resetCookieJar
from lib.core.common import runningAsAdmin
from lib.core.common import safeExpandUser
from lib.core.common import saveConfig
from lib.core.common import setOptimize
from lib.core.common import setPaths
from lib.core.common import singleTimeWarnMessage
from lib.core.common import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import mergedOptions
from lib.core.data import queries
from lib.core.datatype import AttribDict
from lib.core.datatype import InjectionDict
from lib.core.defaults import defaults
from lib.core.dicts import DBMS_DICT
from lib.core.dicts import DUMP_REPLACEMENTS
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import AUTH_TYPE
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import DUMP_FORMAT
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MOBILES
from lib.core.enums import OPTION_TYPE
from lib.core.enums import PAYLOAD
from lib.core.enums import PRIORITY
from lib.core.enums import PROXY_TYPE
from lib.core.enums import REFLECTIVE_COUNTER
from lib.core.enums import WIZARD
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapInstallationException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUnsupportedDBMSException
from lib.core.exception import SqlmapUserQuitException
from lib.core.log import FORMATTER
from lib.core.optiondict import optDict
from lib.core.settings import BURP_REQUEST_REGEX
from lib.core.settings import BURP_XML_HISTORY_REGEX
from lib.core.settings import CODECS_LIST_PAGE
from lib.core.settings import CRAWL_EXCLUDE_EXTENSIONS
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DBMS_ALIASES
from lib.core.settings import DEFAULT_PAGE_ENCODING
from lib.core.settings import DEFAULT_TOR_HTTP_PORTS
from lib.core.settings import DEFAULT_TOR_SOCKS_PORTS
from lib.core.settings import DUMMY_URL
from lib.core.settings import INJECT_HERE_REGEX
from lib.core.settings import IS_WIN
from lib.core.settings import KB_CHARS_BOUNDARY_CHAR
from lib.core.settings import KB_CHARS_LOW_FREQUENCY_ALPHABET
from lib.core.settings import LOCALHOST
from lib.core.settings import MAX_CONNECT_RETRIES
from lib.core.settings import MAX_NUMBER_OF_THREADS
from lib.core.settings import NULL
from lib.core.settings import PARAMETER_SPLITTING_REGEX
from lib.core.settings import PRECONNECT_CANDIDATE_TIMEOUT
from lib.core.settings import PROBLEMATIC_CUSTOM_INJECTION_PATTERNS
from lib.core.settings import SITE
from lib.core.settings import SOCKET_PRE_CONNECT_QUEUE_SIZE
from lib.core.settings import SQLMAP_ENVIRONMENT_PREFIX
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import SUPPORTED_OS
from lib.core.settings import TIME_DELAY_CANDIDATES
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import UNION_CHAR_REGEX
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_INJECTABLE_REGEX
from lib.core.settings import VERSION_STRING
from lib.core.settings import WEBSCARAB_SPLITTER
from lib.core.threads import getCurrentThreadData
from lib.core.threads import setDaemon
from lib.core.update import update
from lib.parse.configfile import configFileParser
from lib.parse.payloads import loadBoundaries
from lib.parse.payloads import loadPayloads
from lib.parse.sitemap import parseSitemap
from lib.request.basic import checkCharEncoding
from lib.request.connect import Connect as Request
from lib.request.dns import DNSServer
from lib.request.basicauthhandler import SmartHTTPBasicAuthHandler
from lib.request.httpshandler import HTTPSHandler
from lib.request.pkihandler import HTTPSPKIAuthHandler
from lib.request.rangehandler import HTTPRangeHandler
from lib.request.redirecthandler import SmartRedirectHandler
from lib.request.templates import getPageTemplate
from lib.utils.har import HTTPCollectorFactory
from lib.utils.crawler import crawl
from lib.utils.deps import checkDependencies
from lib.utils.search import search
from lib.utils.purge import purge
from thirdparty.keepalive import keepalive
from thirdparty.multipart import multipartpost
from thirdparty.oset.pyoset import oset
from thirdparty.socks import socks
from xml.etree.ElementTree import ElementTree
authHandler = urllib2.BaseHandler()
httpsHandler = HTTPSHandler()
keepAliveHandler = keepalive.HTTPHandler()
proxyHandler = urllib2.ProxyHandler()
redirectHandler = SmartRedirectHandler()
rangeHandler = HTTPRangeHandler()
multipartPostHandler = multipartpost.MultipartPostHandler()
# Reference: https://mail.python.org/pipermail/python-list/2009-November/558615.html
try:
WindowsError
except NameError:
WindowsError = None
def _feedTargetsDict(reqFile, addedTargetUrls):
"""
Parses web scarab and burp logs and adds results to the target URL list
"""
def _parseWebScarabLog(content):
"""
Parses web scarab logs (POST method not supported)
"""
reqResList = content.split(WEBSCARAB_SPLITTER)
for request in reqResList:
url = extractRegexResult(r"URL: (?P<result>.+?)\n", request, re.I)
method = extractRegexResult(r"METHOD: (?P<result>.+?)\n", request, re.I)
cookie = extractRegexResult(r"COOKIE: (?P<result>.+?)\n", request, re.I)
if not method or not url:
logger.debug("not a valid WebScarab log data")
continue
if method.upper() == HTTPMETHOD.POST:
warnMsg = "POST requests from WebScarab logs aren't supported "
warnMsg += "as their body content is stored in separate files. "
warnMsg += "Nevertheless you can use -r to load them individually."
logger.warning(warnMsg)
continue
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, method, None, cookie, None))
addedTargetUrls.add(url)
def _parseBurpLog(content):
"""
Parses burp logs
"""
if not re.search(BURP_REQUEST_REGEX, content, re.I | re.S):
if re.search(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
reqResList = []
for match in re.finditer(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
port, request = match.groups()
try:
request = request.decode("base64")
except binascii.Error:
continue
_ = re.search(r"%s:.+" % re.escape(HTTP_HEADER.HOST), request)
if _:
host = _.group(0).strip()
if not re.search(r":\d+\Z", host):
request = request.replace(host, "%s:%d" % (host, int(port)))
reqResList.append(request)
else:
reqResList = [content]
else:
reqResList = re.finditer(BURP_REQUEST_REGEX, content, re.I | re.S)
for match in reqResList:
request = match if isinstance(match, basestring) else match.group(0)
request = re.sub(r"\A[^\w]+", "", request)
schemePort = re.search(r"(http[\w]*)\:\/\/.*?\:([\d]+).+?={10,}", request, re.I | re.S)
if schemePort:
scheme = schemePort.group(1)
port = schemePort.group(2)
request = re.sub(r"\n=+\Z", "", request.split(schemePort.group(0))[-1].lstrip())
else:
scheme, port = None, None
if not re.search(r"^[\n]*(%s).*?\sHTTP\/" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), request, re.I | re.M):
continue
if re.search(r"^[\n]*%s.*?\.(%s)\sHTTP\/" % (HTTPMETHOD.GET, "|".join(CRAWL_EXCLUDE_EXTENSIONS)), request, re.I | re.M):
continue
getPostReq = False
url = None
host = None
method = None
data = None
cookie = None
params = False
newline = None
lines = request.split('\n')
headers = []
for index in xrange(len(lines)):
line = lines[index]
if not line.strip() and index == len(lines) - 1:
break
newline = "\r\n" if line.endswith('\r') else '\n'
line = line.strip('\r')
match = re.search(r"\A(%s) (.+) HTTP/[\d.]+\Z" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), line) if not method else None
if len(line.strip()) == 0 and method and method != HTTPMETHOD.GET and data is None:
data = ""
params = True
elif match:
method = match.group(1)
url = match.group(2)
if any(_ in line for _ in ('?', '=', kb.customInjectionMark)):
params = True
getPostReq = True
# POST parameters
elif data is not None and params:
data += "%s%s" % (line, newline)
# GET parameters
elif "?" in line and "=" in line and ": " not in line:
params = True
# Headers
elif re.search(r"\A\S+:", line):
key, value = line.split(":", 1)
value = value.strip().replace("\r", "").replace("\n", "")
# Cookie and Host headers
if key.upper() == HTTP_HEADER.COOKIE.upper():
cookie = value
elif key.upper() == HTTP_HEADER.HOST.upper():
if '://' in value:
scheme, value = value.split('://')[:2]
splitValue = value.split(":")
host = splitValue[0]
if len(splitValue) > 1:
port = filterStringValue(splitValue[1], "[0-9]")
# Avoid to add a static content length header to
# headers and consider the following lines as
# POSTed data
if key.upper() == HTTP_HEADER.CONTENT_LENGTH.upper():
params = True
# Avoid proxy and connection type related headers
elif key not in (HTTP_HEADER.PROXY_CONNECTION, HTTP_HEADER.CONNECTION):
headers.append((getUnicode(key), getUnicode(value)))
if kb.customInjectionMark in re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value or ""):
params = True
data = data.rstrip("\r\n") if data else data
if getPostReq and (params or cookie):
if not port and isinstance(scheme, basestring) and scheme.lower() == "https":
port = "443"
elif not scheme and port == "443":
scheme = "https"
if conf.forceSSL:
scheme = "https"
port = port or "443"
if not host:
errMsg = "invalid format of a request file"
raise SqlmapSyntaxException, errMsg
if not url.startswith("http"):
url = "%s://%s:%s%s" % (scheme or "http", host, port or "80", url)
scheme = None
port = None
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, conf.method or method, data, cookie, tuple(headers)))
addedTargetUrls.add(url)
checkFile(reqFile)
try:
with openFile(reqFile, "rb") as f:
content = f.read()
except (IOError, OSError, MemoryError), ex:
errMsg = "something went wrong while trying "
errMsg += "to read the content of file '%s' ('%s')" % (reqFile, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
if conf.scope:
logger.info("using regular expression '%s' for filtering targets" % conf.scope)
_parseBurpLog(content)
_parseWebScarabLog(content)
if not addedTargetUrls:
errMsg = "unable to find usable request(s) "
errMsg += "in provided file ('%s')" % reqFile
raise SqlmapGenericException(errMsg)
def _loadQueries():
"""
Loads queries from 'xml/queries.xml' file.
"""
def iterate(node, retVal=None):
class DictObject(object):
def __init__(self):
self.__dict__ = {}
def __contains__(self, name):
return name in self.__dict__
if retVal is None:
retVal = DictObject()
for child in node.findall("*"):
instance = DictObject()
retVal.__dict__[child.tag] = instance
if child.attrib:
instance.__dict__.update(child.attrib)
else:
iterate(child, instance)
return retVal
tree = ElementTree()
try:
tree.parse(paths.QUERIES_XML)
except Exception, ex:
errMsg = "something appears to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (paths.QUERIES_XML, getSafeExString(ex))
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException, errMsg
for node in tree.findall("*"):
queries[node.attrib['value']] = iterate(node)
def _setMultipleTargets():
"""
Define a configuration parameter if we are running in multiple target
mode.
"""
initialTargetsCount = len(kb.targets)
addedTargetUrls = set()
if not conf.logFile:
return
debugMsg = "parsing targets list from '%s'" % conf.logFile
logger.debug(debugMsg)
if not os.path.exists(conf.logFile):
errMsg = "the specified list of targets does not exist"
raise SqlmapFilePathException(errMsg)
if os.path.isfile(conf.logFile):
_feedTargetsDict(conf.logFile, addedTargetUrls)
elif os.path.isdir(conf.logFile):
files = os.listdir(conf.logFile)
files.sort()
for reqFile in files:
if not re.search("([\d]+)\-request", reqFile):
continue
_feedTargetsDict(os.path.join(conf.logFile, reqFile), addedTargetUrls)
else:
errMsg = "the specified list of targets is not a file "
errMsg += "nor a directory"
raise SqlmapFilePathException(errMsg)
updatedTargetsCount = len(kb.targets)
if updatedTargetsCount > initialTargetsCount:
infoMsg = "sqlmap parsed %d " % (updatedTargetsCount - initialTargetsCount)
infoMsg += "(parameter unique) requests from the "
infoMsg += "targets list ready to be tested"
logger.info(infoMsg)
def _adjustLoggingFormatter():
"""
Solves problem of line deletition caused by overlapping logging messages
and retrieved data info in inference mode
"""
if hasattr(FORMATTER, '_format'):
return
def format(record):
message = FORMATTER._format(record)
message = boldifyMessage(message)
if kb.get("prependFlag"):
message = "\n%s" % message
kb.prependFlag = False
return message
FORMATTER._format = FORMATTER.format
FORMATTER.format = format
def _setRequestFromFile():
"""
This function checks if the way to make a HTTP request is through supplied
textual file, parses it and saves the information into the knowledge base.
"""
if not conf.requestFile:
return
addedTargetUrls = set()
conf.requestFile = safeExpandUser(conf.requestFile)
if not os.path.isfile(conf.requestFile):
errMsg = "specified HTTP request file '%s' " % conf.requestFile
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
infoMsg = "parsing HTTP request from '%s'" % conf.requestFile
logger.info(infoMsg)
_feedTargetsDict(conf.requestFile, addedTargetUrls)
def _setCrawler():
if not conf.crawlDepth:
return
if not any((conf.bulkFile, conf.sitemapUrl)):
crawl(conf.url)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
else:
targets = parseSitemap(conf.sitemapUrl)
for i in xrange(len(targets)):
try:
target = targets[i]
crawl(target)
if conf.verbose in (1, 2):
status = "%d/%d links visited (%d%%)" % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except Exception, ex:
errMsg = "problem occurred while crawling at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _doSearch():
"""
This function performs search dorking, parses results
and saves the testable hosts into the knowledge base.
"""
if not conf.googleDork:
return
kb.data.onlyGETs = None
def retrieve():
links = search(conf.googleDork)
if not links:
errMsg = "unable to find results for your "
errMsg += "search dork expression"
raise SqlmapGenericException(errMsg)
for link in links:
link = urldecode(link)
if re.search(r"(.*?)\?(.+)", link):
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
elif re.search(URI_INJECTABLE_REGEX, link, re.I):
if kb.data.onlyGETs is None and conf.data is None and not conf.googleDork:
message = "do you want to scan only results containing GET parameters? [Y/n] "
kb.data.onlyGETs = readInput(message, default='Y', boolean=True)
if not kb.data.onlyGETs or conf.googleDork:
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
return links
while True:
links = retrieve()
if kb.targets:
infoMsg = "sqlmap got %d results for your " % len(links)
infoMsg += "search dork expression, "
if len(links) == len(kb.targets):
infoMsg += "all "
else:
infoMsg += "%d " % len(kb.targets)
infoMsg += "of them are testable targets"
logger.info(infoMsg)
break
else:
message = "sqlmap got %d results " % len(links)
message += "for your search dork expression, but none of them "
message += "have GET parameters to test for SQL injection. "
message += "Do you want to skip to the next result page? [Y/n]"
if not readInput(message, default='Y', boolean=True):
raise SqlmapSilentQuitException
else:
conf.googlePage += 1
def _setBulkMultipleTargets():
if not conf.bulkFile:
return
conf.bulkFile = safeExpandUser(conf.bulkFile)
infoMsg = "parsing multiple targets list from '%s'" % conf.bulkFile
logger.info(infoMsg)
if not os.path.isfile(conf.bulkFile):
errMsg = "the specified bulk file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
found = False
for line in getFileItems(conf.bulkFile):
if re.match(r"[^ ]+\?(.+)", line, re.I) or kb.customInjectionMark in line:
found = True
kb.targets.add((line.strip(), conf.method, conf.data, conf.cookie, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _setSitemapTargets():
if not conf.sitemapUrl:
return
infoMsg = "parsing sitemap '%s'" % conf.sitemapUrl
logger.info(infoMsg)
found = False
for item in parseSitemap(conf.sitemapUrl):
if re.match(r"[^ ]+\?(.+)", item, re.I):
found = True
kb.targets.add((item.strip(), None, None, None, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _findPageForms():
if not conf.forms or conf.crawlDepth:
return
if conf.url and not checkConnection():
return
infoMsg = "searching for forms"
logger.info(infoMsg)
if not any((conf.bulkFile, conf.googleDork, conf.sitemapUrl)):
page, _, _ = Request.queryPage(content=True)
findPageForms(page, conf.url, True, True)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
elif conf.sitemapUrl:
targets = parseSitemap(conf.sitemapUrl)
elif conf.googleDork:
targets = [_[0] for _ in kb.targets]
kb.targets.clear()
for i in xrange(len(targets)):
try:
target = targets[i]
page, _, _ = Request.getPage(url=target.strip(), crawling=True, raise404=False)
findPageForms(page, target, False, True)
if conf.verbose in (1, 2):
status = '%d/%d links visited (%d%%)' % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except KeyboardInterrupt:
break
except Exception, ex:
errMsg = "problem occurred while searching for forms at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _setDBMSAuthentication():
"""
Check and set the DBMS authentication credentials to run statements as
another user, not the session user
"""
if not conf.dbmsCred:
return
debugMsg = "setting the DBMS authentication credentials"
logger.debug(debugMsg)
match = re.search("^(.+?):(.*?)$", conf.dbmsCred)
if not match:
errMsg = "DBMS authentication credentials value must be in format "
errMsg += "username:password"
raise SqlmapSyntaxException(errMsg)
conf.dbmsUsername = match.group(1)
conf.dbmsPassword = match.group(2)
def _setMetasploit():
if not conf.osPwn and not conf.osSmb and not conf.osBof:
return
debugMsg = "setting the takeover out-of-band functionality"
logger.debug(debugMsg)
msfEnvPathExists = False
if IS_WIN:
try:
import win32file
except ImportError:
errMsg = "sqlmap requires third-party module 'pywin32' "
errMsg += "in order to use Metasploit functionalities on "
errMsg += "Windows. You can download it from "
errMsg += "'http://sourceforge.net/projects/pywin32/files/pywin32/'"
raise SqlmapMissingDependence(errMsg)
if not conf.msfPath:
def _(key, value):
retVal = None
try:
from _winreg import ConnectRegistry, OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE
_ = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
_ = OpenKey(_, key)
retVal = QueryValueEx(_, value)[0]
except:
logger.debug("unable to identify Metasploit installation path via registry key")
return retVal
conf.msfPath = _(r"SOFTWARE\Rapid7\Metasploit", "Location")
if conf.msfPath:
conf.msfPath = os.path.join(conf.msfPath, "msf3")
if conf.osSmb:
isAdmin = runningAsAdmin()
if not isAdmin:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a SMB relay attack because "
errMsg += "it will need to listen on a user-specified SMB "
errMsg += "TCP port for incoming connection attempts"
raise SqlmapMissingPrivileges(errMsg)
if conf.msfPath:
for path in (conf.msfPath, os.path.join(conf.msfPath, "bin")):
if any(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
conf.msfPath = path
break
if msfEnvPathExists:
debugMsg = "provided Metasploit Framework path "
debugMsg += "'%s' is valid" % conf.msfPath
logger.debug(debugMsg)
else:
warnMsg = "the provided Metasploit Framework path "
warnMsg += "'%s' is not valid. The cause could " % conf.msfPath
warnMsg += "be that the path does not exists or that one "
warnMsg += "or more of the needed Metasploit executables "
warnMsg += "within msfcli, msfconsole, msfencode and "
warnMsg += "msfpayload do not exist"
logger.warn(warnMsg)
else:
warnMsg = "you did not provide the local path where Metasploit "
warnMsg += "Framework is installed"
logger.warn(warnMsg)
if not msfEnvPathExists:
warnMsg = "sqlmap is going to look for Metasploit Framework "
warnMsg += "installation inside the environment path(s)"
logger.warn(warnMsg)
envPaths = os.environ.get("PATH", "").split(";" if IS_WIN else ":")
for envPath in envPaths:
envPath = envPath.replace(";", "")
if any(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
if msfEnvPathExists:
infoMsg = "Metasploit Framework has been found "
infoMsg += "installed in the '%s' path" % envPath
logger.info(infoMsg)
conf.msfPath = envPath
break
if not msfEnvPathExists:
errMsg = "unable to locate Metasploit Framework installation. "
errMsg += "You can get it at 'http://www.metasploit.com/download/'"
raise SqlmapFilePathException(errMsg)
def _setWriteFile():
if not conf.wFile:
return
debugMsg = "setting the write file functionality"
logger.debug(debugMsg)
if not os.path.exists(conf.wFile):
errMsg = "the provided local file '%s' does not exist" % conf.wFile
raise SqlmapFilePathException(errMsg)
if not conf.dFile:
errMsg = "you did not provide the back-end DBMS absolute path "
errMsg += "where you want to write the local file '%s'" % conf.wFile
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.wFileType = getFileType(conf.wFile)
def _setOS():
"""
Force the back-end DBMS operating system option.
"""
if not conf.os:
return
if conf.os.lower() not in SUPPORTED_OS:
errMsg = "you provided an unsupported back-end DBMS operating "
errMsg += "system. The supported DBMS operating systems for OS "
errMsg += "and file system access are %s. " % ', '.join([o.capitalize() for o in SUPPORTED_OS])
errMsg += "If you do not know the back-end DBMS underlying OS, "
errMsg += "do not provide it and sqlmap will fingerprint it for "
errMsg += "you."
raise SqlmapUnsupportedDBMSException(errMsg)
debugMsg = "forcing back-end DBMS operating system to user defined "
debugMsg += "value '%s'" % conf.os
logger.debug(debugMsg)
Backend.setOs(conf.os)
def _setTechnique():
validTechniques = sorted(getPublicTypeMembers(PAYLOAD.TECHNIQUE), key=lambda x: x[1])
validLetters = [_[0][0].upper() for _ in validTechniques]
if conf.tech and isinstance(conf.tech, basestring):
_ = []
for letter in conf.tech.upper():
if letter not in validLetters:
errMsg = "value for --technique must be a string composed "
errMsg += "by the letters %s. Refer to the " % ", ".join(validLetters)
errMsg += "user's manual for details"
raise SqlmapSyntaxException(errMsg)
for validTech, validInt in validTechniques:
if letter == validTech[0]:
_.append(validInt)
break
conf.tech = _
def _setDBMS():
"""
Force the back-end DBMS option.
"""
if not conf.dbms:
return
debugMsg = "forcing back-end DBMS to user defined value"
logger.debug(debugMsg)
conf.dbms = conf.dbms.lower()
regex = re.search("%s ([\d\.]+)" % ("(%s)" % "|".join([alias for alias in SUPPORTED_DBMS])), conf.dbms, re.I)
if regex:
conf.dbms = regex.group(1)
Backend.setVersion(regex.group(2))
if conf.dbms not in SUPPORTED_DBMS:
errMsg = "you provided an unsupported back-end database management "
errMsg += "system. Supported DBMSes are as follows: %s. " % ', '.join(sorted(_ for _ in DBMS_DICT))
errMsg += "If you do not know the back-end DBMS, do not provide "
errMsg += "it and sqlmap will fingerprint it for you."
raise SqlmapUnsupportedDBMSException(errMsg)
for dbms, aliases in DBMS_ALIASES:
if conf.dbms in aliases:
conf.dbms = dbms
break
def _setTamperingFunctions():
"""
Loads tampering functions from given script(s)
"""
if conf.tamper:
last_priority = PRIORITY.HIGHEST
check_priority = True
resolve_priorities = False
priorities = []
for script in re.split(PARAMETER_SPLITTING_REGEX, conf.tamper):
found = False
path = paths.SQLMAP_TAMPER_PATH.encode(sys.getfilesystemencoding() or UNICODE_ENCODING)
script = script.strip().encode(sys.getfilesystemencoding() or UNICODE_ENCODING)
try:
if not script:
continue
elif os.path.exists(os.path.join(path, script if script.endswith(".py") else "%s.py" % script)):
script = os.path.join(path, script if script.endswith(".py") else "%s.py" % script)
elif not os.path.exists(script):
errMsg = "tamper script '%s' does not exist" % script
raise SqlmapFilePathException(errMsg)
elif not script.endswith(".py"):
errMsg = "tamper script '%s' should have an extension '.py'" % script
raise SqlmapSyntaxException(errMsg)
except UnicodeDecodeError:
errMsg = "invalid character provided in option '--tamper'"
raise SqlmapSyntaxException(errMsg)
dirname, filename = os.path.split(script)
dirname = os.path.abspath(dirname)
infoMsg = "loading tamper script '%s'" % filename[:-3]
logger.info(infoMsg)
if not os.path.exists(os.path.join(dirname, "__init__.py")):
errMsg = "make sure that there is an empty file '__init__.py' "
errMsg += "inside of tamper scripts directory '%s'" % dirname
raise SqlmapGenericException(errMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding() or UNICODE_ENCODING))
except (ImportError, SyntaxError), ex:
raise SqlmapSyntaxException("cannot import tamper script '%s' (%s)" % (filename[:-3], getSafeExString(ex)))
priority = PRIORITY.NORMAL if not hasattr(module, "__priority__") else module.__priority__
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "tamper" and inspect.getargspec(function).args and inspect.getargspec(function).keywords == "kwargs":
found = True
kb.tamperFunctions.append(function)
function.func_name = module.__name__
if check_priority and priority > last_priority:
message = "it appears that you might have mixed "
message += "the order of tamper scripts. "
message += "Do you want to auto resolve this? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'N':
resolve_priorities = False
elif choice == 'Q':
raise SqlmapUserQuitException
else:
resolve_priorities = True
check_priority = False
priorities.append((priority, function))
last_priority = priority
break
elif name == "dependencies":
function()
if not found:
errMsg = "missing function 'tamper(payload, **kwargs)' "
errMsg += "in tamper script '%s'" % script
raise SqlmapGenericException(errMsg)
if kb.tamperFunctions and len(kb.tamperFunctions) > 3:
warnMsg = "using too many tamper scripts is usually not "
warnMsg += "a good idea"
logger.warning(warnMsg)
if resolve_priorities and priorities:
priorities.sort(reverse=True)
kb.tamperFunctions = []
for _, function in priorities:
kb.tamperFunctions.append(function)
def _setWafFunctions():
"""
Loads WAF/IPS/IDS detecting functions from script(s)
"""
if conf.identifyWaf:
for found in glob.glob(os.path.join(paths.SQLMAP_WAF_PATH, "*.py")):
dirname, filename = os.path.split(found)
dirname = os.path.abspath(dirname)
if filename == "__init__.py":
continue
debugMsg = "loading WAF script '%s'" % filename[:-3]
logger.debug(debugMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
if filename[:-3] in sys.modules:
del sys.modules[filename[:-3]]
module = __import__(filename[:-3].encode(sys.getfilesystemencoding() or UNICODE_ENCODING))
except ImportError, msg:
raise SqlmapSyntaxException("cannot import WAF script '%s' (%s)" % (filename[:-3], msg))
_ = dict(inspect.getmembers(module))
if "detect" not in _:
errMsg = "missing function 'detect(get_page)' "
errMsg += "in WAF script '%s'" % found
raise SqlmapGenericException(errMsg)
else:
kb.wafFunctions.append((_["detect"], _.get("__product__", filename[:-3])))
kb.wafFunctions = sorted(kb.wafFunctions, key=lambda _: "generic" in _[1].lower())
def _setThreads():
if not isinstance(conf.threads, int) or conf.threads <= 0:
conf.threads = 1
def _setDNSCache():
"""
Makes a cached version of socket._getaddrinfo to avoid subsequent DNS requests.
"""
def _getaddrinfo(*args, **kwargs):
if args in kb.cache.addrinfo:
return kb.cache.addrinfo[args]
else:
kb.cache.addrinfo[args] = socket._getaddrinfo(*args, **kwargs)
return kb.cache.addrinfo[args]
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
socket.getaddrinfo = _getaddrinfo
def _setSocketPreConnect():
"""
Makes a pre-connect version of socket.connect
"""
if conf.disablePrecon:
return
def _():
while kb.get("threadContinue") and not conf.get("disablePrecon"):
try:
for key in socket._ready:
if len(socket._ready[key]) < SOCKET_PRE_CONNECT_QUEUE_SIZE:
family, type, proto, address = key
s = socket.socket(family, type, proto)
s._connect(address)
with kb.locks.socket:
socket._ready[key].append((s._sock, time.time()))
except KeyboardInterrupt:
break
except:
pass
finally:
time.sleep(0.01)
def connect(self, address):
found = False
key = (self.family, self.type, self.proto, address)
with kb.locks.socket:
if key not in socket._ready:
socket._ready[key] = []
while len(socket._ready[key]) > 0:
candidate, created = socket._ready[key].pop(0)
if (time.time() - created) < PRECONNECT_CANDIDATE_TIMEOUT:
self._sock = candidate
found = True
break
else:
try:
candidate.close()
except socket.error:
pass
if not found:
self._connect(address)
if not hasattr(socket.socket, "_connect"):
socket._ready = {}
socket.socket._connect = socket.socket.connect
socket.socket.connect = connect
thread = threading.Thread(target=_)
setDaemon(thread)
thread.start()
def _setHTTPHandlers():
"""
Check and set the HTTP/SOCKS proxy for all HTTP requests.
"""
global proxyHandler
for _ in ("http", "https"):
if hasattr(proxyHandler, "%s_open" % _):
delattr(proxyHandler, "%s_open" % _)
if conf.proxyList is not None:
if not conf.proxyList:
errMsg = "list of usable proxies is exhausted"
raise SqlmapNoneDataException(errMsg)
conf.proxy = conf.proxyList[0]
conf.proxyList = conf.proxyList[1:]
infoMsg = "loading proxy '%s' from a supplied proxy list file" % conf.proxy
logger.info(infoMsg)
elif not conf.proxy:
if conf.hostname in ("localhost", "127.0.0.1") or conf.ignoreProxy:
proxyHandler.proxies = {}
if conf.proxy:
debugMsg = "setting the HTTP/SOCKS proxy for all HTTP requests"
logger.debug(debugMsg)
try:
_ = urlparse.urlsplit(conf.proxy)
except Exception, ex:
errMsg = "invalid proxy address '%s' ('%s')" % (conf.proxy, getSafeExString(ex))
raise SqlmapSyntaxException, errMsg
hostnamePort = _.netloc.split(":")
scheme = _.scheme.upper()
hostname = hostnamePort[0]
port = None
username = None
password = None
if len(hostnamePort) == 2:
try:
port = int(hostnamePort[1])
except:
pass # drops into the next check block
if not all((scheme, hasattr(PROXY_TYPE, scheme), hostname, port)):
errMsg = "proxy value must be in format '(%s)://address:port'" % "|".join(_[0].lower() for _ in getPublicTypeMembers(PROXY_TYPE))
raise SqlmapSyntaxException(errMsg)
if conf.proxyCred:
_ = re.search("^(.*?):(.*?)$", conf.proxyCred)
if not _:
errMsg = "proxy authentication credentials "
errMsg += "value must be in format username:password"
raise SqlmapSyntaxException(errMsg)
else:
username = _.group(1)
password = _.group(2)
if scheme in (PROXY_TYPE.SOCKS4, PROXY_TYPE.SOCKS5):
proxyHandler.proxies = {}
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if scheme == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, hostname, port, username=username, password=password)
socks.wrapmodule(urllib2)
else:
socks.unwrapmodule(urllib2)
if conf.proxyCred:
# Reference: http://stackoverflow.com/questions/34079/how-to-specify-an-authenticated-proxy-for-a-python-http-connection
proxyString = "%s@" % conf.proxyCred
else:
proxyString = ""
proxyString += "%s:%d" % (hostname, port)
proxyHandler.proxies = {"http": proxyString, "https": proxyString}
proxyHandler.__init__(proxyHandler.proxies)
debugMsg = "creating HTTP requests opener object"
logger.debug(debugMsg)
handlers = filter(None, [multipartPostHandler, proxyHandler if proxyHandler.proxies else None, authHandler, redirectHandler, rangeHandler, httpsHandler])
if not conf.dropSetCookie:
if not conf.loadCookies:
conf.cj = cookielib.CookieJar()
else:
conf.cj = cookielib.MozillaCookieJar()
resetCookieJar(conf.cj)
handlers.append(urllib2.HTTPCookieProcessor(conf.cj))
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html
if conf.keepAlive:
warnMsg = "persistent HTTP(s) connections, Keep-Alive, has "
warnMsg += "been disabled because of its incompatibility "
if conf.proxy:
warnMsg += "with HTTP(s) proxy"
logger.warn(warnMsg)
elif conf.authType:
warnMsg += "with authentication methods"
logger.warn(warnMsg)
else:
handlers.append(keepAliveHandler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
def _setSafeVisit():
"""
Check and set the safe visit options.
"""
if not any((conf.safeUrl, conf.safeReqFile)):
return
if conf.safeReqFile:
checkFile(conf.safeReqFile)
raw = readCachedFileContent(conf.safeReqFile)
match = re.search(r"\A([A-Z]+) ([^ ]+) HTTP/[0-9.]+\Z", raw[:raw.find('\n')])
if match:
kb.safeReq.method = match.group(1)
kb.safeReq.url = match.group(2)
kb.safeReq.headers = {}
for line in raw[raw.find('\n') + 1:].split('\n'):
line = line.strip()
if line and ':' in line:
key, value = line.split(':', 1)
value = value.strip()
kb.safeReq.headers[key] = value
if key == HTTP_HEADER.HOST:
if not value.startswith("http"):
scheme = "http"
if value.endswith(":443"):
scheme = "https"
value = "%s://%s" % (scheme, value)
kb.safeReq.url = urlparse.urljoin(value, kb.safeReq.url)
else:
break
post = None
if '\r\n\r\n' in raw:
post = raw[raw.find('\r\n\r\n') + 4:]
elif '\n\n' in raw:
post = raw[raw.find('\n\n') + 2:]
if post and post.strip():
kb.safeReq.post = post
else:
kb.safeReq.post = None
else:
errMsg = "invalid format of a safe request file"
raise SqlmapSyntaxException, errMsg
else:
if not re.search("^http[s]*://", conf.safeUrl):
if ":443/" in conf.safeUrl:
conf.safeUrl = "https://" + conf.safeUrl
else:
conf.safeUrl = "http://" + conf.safeUrl
if conf.safeFreq <= 0:
errMsg = "please provide a valid value (>0) for safe frequency (--safe-freq) while using safe visit features"
raise SqlmapSyntaxException(errMsg)
def _setPrefixSuffix():
if conf.prefix is not None and conf.suffix is not None:
# Create a custom boundary object for user's supplied prefix
# and suffix
boundary = AttribDict()
boundary.level = 1
boundary.clause = [0]
boundary.where = [1, 2, 3]
boundary.prefix = conf.prefix
boundary.suffix = conf.suffix
if " like" in boundary.suffix.lower():
if "'" in boundary.suffix.lower():
boundary.ptype = 3
elif '"' in boundary.suffix.lower():
boundary.ptype = 5
elif "'" in boundary.suffix:
boundary.ptype = 2
elif '"' in boundary.suffix:
boundary.ptype = 4
else:
boundary.ptype = 1
# user who provides --prefix/--suffix does not want other boundaries
# to be tested for
conf.boundaries = [boundary]
def _setAuthCred():
"""
Adds authentication credentials (if any) for current target to the password manager
(used by connection handler)
"""
if kb.passwordMgr and all(_ is not None for _ in (conf.scheme, conf.hostname, conf.port, conf.authUsername, conf.authPassword)):
kb.passwordMgr.add_password(None, "%s://%s:%d" % (conf.scheme, conf.hostname, conf.port), conf.authUsername, conf.authPassword)
def _setHTTPAuthentication():
"""
Check and set the HTTP(s) authentication method (Basic, Digest, NTLM or PKI),
username and password for first three methods, or PEM private key file for
PKI authentication
"""
global authHandler
if not conf.authType and not conf.authCred and not conf.authFile:
return
if conf.authFile and not conf.authType:
conf.authType = AUTH_TYPE.PKI
elif conf.authType and not conf.authCred and not conf.authFile:
errMsg = "you specified the HTTP authentication type, but "
errMsg += "did not provide the credentials"
raise SqlmapSyntaxException(errMsg)
elif not conf.authType and conf.authCred:
errMsg = "you specified the HTTP authentication credentials, "
errMsg += "but did not provide the type"
raise SqlmapSyntaxException(errMsg)
elif (conf.authType or "").lower() not in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST, AUTH_TYPE.NTLM, AUTH_TYPE.PKI):
errMsg = "HTTP authentication type value must be "
errMsg += "Basic, Digest, NTLM or PKI"
raise SqlmapSyntaxException(errMsg)
if not conf.authFile:
debugMsg = "setting the HTTP authentication type and credentials"
logger.debug(debugMsg)
authType = conf.authType.lower()
if authType in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST):
regExp = "^(.*?):(.*?)$"
errMsg = "HTTP %s authentication credentials " % authType
errMsg += "value must be in format 'username:password'"
elif authType == AUTH_TYPE.NTLM:
regExp = "^(.*\\\\.*):(.*?)$"
errMsg = "HTTP NTLM authentication credentials value must "
errMsg += "be in format 'DOMAIN\username:password'"
elif authType == AUTH_TYPE.PKI:
errMsg = "HTTP PKI authentication require "
errMsg += "usage of option `--auth-pki`"
raise SqlmapSyntaxException(errMsg)
aCredRegExp = re.search(regExp, conf.authCred)
if not aCredRegExp:
raise SqlmapSyntaxException(errMsg)
conf.authUsername = aCredRegExp.group(1)
conf.authPassword = aCredRegExp.group(2)
kb.passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
_setAuthCred()
if authType == AUTH_TYPE.BASIC:
authHandler = SmartHTTPBasicAuthHandler(kb.passwordMgr)
elif authType == AUTH_TYPE.DIGEST:
authHandler = urllib2.HTTPDigestAuthHandler(kb.passwordMgr)
elif authType == AUTH_TYPE.NTLM:
try:
from ntlm import HTTPNtlmAuthHandler
except ImportError:
errMsg = "sqlmap requires Python NTLM third-party library "
errMsg += "in order to authenticate via NTLM, "
errMsg += "http://code.google.com/p/python-ntlm/"
raise SqlmapMissingDependence(errMsg)
authHandler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(kb.passwordMgr)
else:
debugMsg = "setting the HTTP(s) authentication PEM private key"
logger.debug(debugMsg)
_ = safeExpandUser(conf.authFile)
checkFile(_)
authHandler = HTTPSPKIAuthHandler(_)
def _setHTTPExtraHeaders():
if conf.headers:
debugMsg = "setting extra HTTP headers"
logger.debug(debugMsg)
conf.headers = conf.headers.split("\n") if "\n" in conf.headers else conf.headers.split("\\n")
for headerValue in conf.headers:
if not headerValue.strip():
continue
if headerValue.count(':') >= 1:
header, value = (_.lstrip() for _ in headerValue.split(":", 1))
if header and value:
conf.httpHeaders.append((header, value))
else:
errMsg = "invalid header value: %s. Valid header format is 'name:value'" % repr(headerValue).lstrip('u')
raise SqlmapSyntaxException(errMsg)
elif not conf.requestFile and len(conf.httpHeaders or []) < 2:
if conf.charset:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "%s;q=0.7,*;q=0.1" % conf.charset))
# Invalidating any caching mechanism in between
# Reference: http://stackoverflow.com/a/1383359
conf.httpHeaders.append((HTTP_HEADER.CACHE_CONTROL, "no-cache"))
def _defaultHTTPUserAgent():
"""
@return: default sqlmap HTTP User-Agent header
@rtype: C{str}
"""
return "%s (%s)" % (VERSION_STRING, SITE)
def _setHTTPUserAgent():
"""
Set the HTTP User-Agent header.
Depending on the user options it can be:
* The default sqlmap string
* A default value read as user option
* A random value read from a list of User-Agent headers from a
file choosed as user option
"""
if conf.mobile:
message = "which smartphone do you want sqlmap to imitate "
message += "through HTTP User-Agent header?\n"
items = sorted(getPublicTypeMembers(MOBILES, True))
for count in xrange(len(items)):
item = items[count]
message += "[%d] %s%s\n" % (count + 1, item[0], " (default)" if item == MOBILES.IPHONE else "")
test = readInput(message.rstrip('\n'), default=items.index(MOBILES.IPHONE) + 1)
try:
item = items[int(test) - 1]
except:
item = MOBILES.IPHONE
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, item[1]))
elif conf.agent:
debugMsg = "setting the HTTP User-Agent header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, conf.agent))
elif not conf.randomAgent:
_ = True
for header, _ in conf.httpHeaders:
if header == HTTP_HEADER.USER_AGENT:
_ = False
break
if _:
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
else:
if not kb.userAgents:
debugMsg = "loading random HTTP User-Agent header(s) from "
debugMsg += "file '%s'" % paths.USER_AGENTS
logger.debug(debugMsg)
try:
kb.userAgents = getFileItems(paths.USER_AGENTS)
except IOError:
warnMsg = "unable to read HTTP User-Agent header "
warnMsg += "file '%s'" % paths.USER_AGENTS
logger.warn(warnMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
return
userAgent = random.sample(kb.userAgents or [_defaultHTTPUserAgent()], 1)[0]
infoMsg = "fetched random HTTP User-Agent header from "
infoMsg += "file '%s': '%s'" % (paths.USER_AGENTS, userAgent)
logger.info(infoMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, userAgent))
def _setHTTPReferer():
"""
Set the HTTP Referer
"""
if conf.referer:
debugMsg = "setting the HTTP Referer header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.REFERER, conf.referer))
def _setHTTPHost():
"""
Set the HTTP Host
"""
if conf.host:
debugMsg = "setting the HTTP Host header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.HOST, conf.host))
def _setHTTPCookies():
"""
Set the HTTP Cookie header
"""
if conf.cookie:
debugMsg = "setting the HTTP Cookie header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.COOKIE, conf.cookie))
def _setHTTPTimeout():
"""
Set the HTTP timeout
"""
if conf.timeout:
debugMsg = "setting the HTTP timeout"
logger.debug(debugMsg)
conf.timeout = float(conf.timeout)
if conf.timeout < 3.0:
warnMsg = "the minimum HTTP timeout is 3 seconds, sqlmap "
warnMsg += "will going to reset it"
logger.warn(warnMsg)
conf.timeout = 3.0
else:
conf.timeout = 30.0
socket.setdefaulttimeout(conf.timeout)
def _checkDependencies():
"""
Checks for missing dependencies.
"""
if conf.dependencies:
checkDependencies()
def _createTemporaryDirectory():
"""
Creates temporary directory for this run.
"""
if conf.tmpDir:
try:
if not os.path.isdir(conf.tmpDir):
os.makedirs(conf.tmpDir)
_ = os.path.join(conf.tmpDir, randomStr())
open(_, "w+b").close()
os.remove(_)
tempfile.tempdir = conf.tmpDir
warnMsg = "using '%s' as the temporary directory" % conf.tmpDir
logger.warn(warnMsg)
except (OSError, IOError), ex:
errMsg = "there has been a problem while accessing "
errMsg += "temporary directory location(s) ('%s')" % getSafeExString(ex)
raise SqlmapSystemException, errMsg
else:
try:
if not os.path.isdir(tempfile.gettempdir()):
os.makedirs(tempfile.gettempdir())
except (OSError, IOError, WindowsError), ex:
warnMsg = "there has been a problem while accessing "
warnMsg += "system's temporary directory location(s) ('%s'). Please " % getSafeExString(ex)
warnMsg += "make sure that there is enough disk space left. If problem persists, "
warnMsg += "try to set environment variable 'TEMP' to a location "
warnMsg += "writeable by the current user"
logger.warn(warnMsg)
if "sqlmap" not in (tempfile.tempdir or "") or conf.tmpDir and tempfile.tempdir == conf.tmpDir:
try:
tempfile.tempdir = tempfile.mkdtemp(prefix="sqlmap", suffix=str(os.getpid()))
except (OSError, IOError, WindowsError):
tempfile.tempdir = os.path.join(paths.SQLMAP_HOME_PATH, "tmp", "sqlmap%s%d" % (randomStr(6), os.getpid()))
kb.tempDir = tempfile.tempdir
if not os.path.isdir(tempfile.tempdir):
try:
os.makedirs(tempfile.tempdir)
except (OSError, IOError, WindowsError), ex:
errMsg = "there has been a problem while setting "
errMsg += "temporary directory location ('%s')" % getSafeExString(ex)
raise SqlmapSystemException, errMsg
def _cleanupOptions():
"""
Cleanup configuration attributes.
"""
debugMsg = "cleaning up configuration parameters"
logger.debug(debugMsg)
width = getConsoleWidth()
if conf.eta:
conf.progressWidth = width - 26
else:
conf.progressWidth = width - 46
for key, value in conf.items():
if value and any(key.endswith(_) for _ in ("Path", "File", "Dir")):
conf[key] = safeExpandUser(value)
if conf.testParameter:
conf.testParameter = urldecode(conf.testParameter)
conf.testParameter = conf.testParameter.replace(" ", "")
conf.testParameter = re.split(PARAMETER_SPLITTING_REGEX, conf.testParameter)
else:
conf.testParameter = []
if conf.agent:
conf.agent = re.sub(r"[\r\n]", "", conf.agent)
if conf.user:
conf.user = conf.user.replace(" ", "")
if conf.rParam:
conf.rParam = conf.rParam.replace(" ", "")
conf.rParam = re.split(PARAMETER_SPLITTING_REGEX, conf.rParam)
else:
conf.rParam = []
if conf.paramDel and '\\' in conf.paramDel:
conf.paramDel = conf.paramDel.decode("string_escape")
if conf.skip:
conf.skip = conf.skip.replace(" ", "")
conf.skip = re.split(PARAMETER_SPLITTING_REGEX, conf.skip)
else:
conf.skip = []
if conf.cookie:
conf.cookie = re.sub(r"[\r\n]", "", conf.cookie)
if conf.delay:
conf.delay = float(conf.delay)
if conf.rFile:
conf.rFile = ntToPosixSlashes(normalizePath(conf.rFile))
if conf.wFile:
conf.wFile = ntToPosixSlashes(normalizePath(conf.wFile))
if conf.dFile:
conf.dFile = ntToPosixSlashes(normalizePath(conf.dFile))
if conf.sitemapUrl and not conf.sitemapUrl.lower().startswith("http"):
conf.sitemapUrl = "http%s://%s" % ('s' if conf.forceSSL else '', conf.sitemapUrl)
if conf.msfPath:
conf.msfPath = ntToPosixSlashes(normalizePath(conf.msfPath))
if conf.tmpPath:
conf.tmpPath = ntToPosixSlashes(normalizePath(conf.tmpPath))
if any((conf.googleDork, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.forms, conf.crawlDepth)):
conf.multipleTargets = True
if conf.optimize:
setOptimize()
match = re.search(INJECT_HERE_REGEX, conf.data or "")
if match:
kb.customInjectionMark = match.group(0)
match = re.search(INJECT_HERE_REGEX, conf.url or "")
if match:
kb.customInjectionMark = match.group(0)
if conf.os:
conf.os = conf.os.capitalize()
if conf.dbms:
conf.dbms = conf.dbms.capitalize()
if conf.testFilter:
conf.testFilter = conf.testFilter.strip('*+')
conf.testFilter = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testFilter)
try:
re.compile(conf.testFilter)
except re.error:
conf.testFilter = re.escape(conf.testFilter)
if conf.testSkip:
conf.testSkip = conf.testSkip.strip('*+')
conf.testSkip = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testSkip)
try:
re.compile(conf.testSkip)
except re.error:
conf.testSkip = re.escape(conf.testSkip)
if "timeSec" not in kb.explicitSettings:
if conf.tor:
conf.timeSec = 2 * conf.timeSec
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
warnMsg = "increasing default value for "
warnMsg += "option '--time-sec' to %d because " % conf.timeSec
warnMsg += "switch '--tor' was provided"
logger.warn(warnMsg)
else:
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
if conf.retries:
conf.retries = min(conf.retries, MAX_CONNECT_RETRIES)
if conf.code:
conf.code = int(conf.code)
if conf.csvDel:
conf.csvDel = conf.csvDel.decode("string_escape") # e.g. '\\t' -> '\t'
if conf.torPort and isinstance(conf.torPort, basestring) and conf.torPort.isdigit():
conf.torPort = int(conf.torPort)
if conf.torType:
conf.torType = conf.torType.upper()
if conf.outputDir:
paths.SQLMAP_OUTPUT_PATH = os.path.realpath(os.path.expanduser(conf.outputDir))
setPaths(paths.SQLMAP_ROOT_PATH)
if conf.string:
try:
conf.string = conf.string.decode("unicode_escape")
except:
charset = string.whitespace.replace(" ", "")
for _ in charset:
conf.string = conf.string.replace(_.encode("string_escape"), _)
if conf.getAll:
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
if conf.noCast:
for _ in DUMP_REPLACEMENTS.keys():
del DUMP_REPLACEMENTS[_]
if conf.dumpFormat:
conf.dumpFormat = conf.dumpFormat.upper()
if conf.torType:
conf.torType = conf.torType.upper()
if conf.col:
conf.col = re.sub(r"\s*,\s*", ',', conf.col)
if conf.excludeCol:
conf.excludeCol = re.sub(r"\s*,\s*", ',', conf.excludeCol)
if conf.binaryFields:
conf.binaryFields = re.sub(r"\s*,\s*", ',', conf.binaryFields)
if any((conf.proxy, conf.proxyFile, conf.tor)):
conf.disablePrecon = True
threadData = getCurrentThreadData()
threadData.reset()
def _cleanupEnvironment():
"""
Cleanup environment (e.g. from leftovers after --sqlmap-shell).
"""
if issubclass(urllib2.socket.socket, socks.socksocket):
socks.unwrapmodule(urllib2)
if hasattr(socket, "_ready"):
socket._ready.clear()
def _dirtyPatches():
"""
Place for "dirty" Python related patches
"""
httplib._MAXLINE = 1 * 1024 * 1024 # accept overly long result lines (e.g. SQLi results in HTTP header responses)
if IS_WIN:
from thirdparty.wininetpton import win_inet_pton # add support for inet_pton() on Windows OS
def _purgeOutput():
"""
Safely removes (purges) output directory.
"""
if conf.purgeOutput:
purge(paths.SQLMAP_OUTPUT_PATH)
def _setConfAttributes():
"""
This function set some needed attributes into the configuration
singleton.
"""
debugMsg = "initializing the configuration"
logger.debug(debugMsg)
conf.authUsername = None
conf.authPassword = None
conf.boundaries = []
conf.cj = None
conf.dbmsConnector = None
conf.dbmsHandler = None
conf.dnsServer = None
conf.dumpPath = None
conf.hashDB = None
conf.hashDBFile = None
conf.httpCollector = None
conf.httpHeaders = []
conf.hostname = None
conf.ipv6 = False
conf.multipleTargets = False
conf.outputPath = None
conf.paramDict = {}
conf.parameters = {}
conf.path = None
conf.port = None
conf.proxyList = None
conf.resultsFilename = None
conf.resultsFP = None
conf.scheme = None
conf.tests = []
conf.trafficFP = None
conf.HARCollectorFactory = None
conf.wFileType = None
def _setKnowledgeBaseAttributes(flushAll=True):
"""
This function set some needed attributes into the knowledge base
singleton.
"""
debugMsg = "initializing the knowledge base"
logger.debug(debugMsg)
kb.absFilePaths = set()
kb.adjustTimeDelay = None
kb.alerted = False
kb.alwaysRefresh = None
kb.arch = None
kb.authHeader = None
kb.bannerFp = AttribDict()
kb.binaryField = False
kb.browserVerification = None
kb.brute = AttribDict({"tables": [], "columns": []})
kb.bruteMode = False
kb.cache = AttribDict()
kb.cache.addrinfo = {}
kb.cache.content = {}
kb.cache.encoding = {}
kb.cache.intBoundaries = None
kb.cache.parsedDbms = {}
kb.cache.regex = {}
kb.cache.stdev = {}
kb.captchaDetected = None
kb.chars = AttribDict()
kb.chars.delimiter = randomStr(length=6, lowercase=True)
kb.chars.start = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.stop = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.at, kb.chars.space, kb.chars.dollar, kb.chars.hash_ = ("%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, _, KB_CHARS_BOUNDARY_CHAR) for _ in randomStr(length=4, lowercase=True))
kb.columnExistsChoice = None
kb.commonOutputs = None
kb.connErrorChoice = None
kb.connErrorCounter = 0
kb.cookieEncodeChoice = None
kb.counters = {}
kb.customInjectionMark = CUSTOM_INJECTION_MARK_CHAR
kb.data = AttribDict()
kb.dataOutputFlag = False
# Active back-end DBMS fingerprint
kb.dbms = None
kb.dbmsVersion = [UNKNOWN_DBMS_VERSION]
kb.delayCandidates = TIME_DELAY_CANDIDATES * [0]
kb.dep = None
kb.dnsMode = False
kb.dnsTest = None
kb.docRoot = None
kb.droppingRequests = False
kb.dumpColumns = None
kb.dumpTable = None
kb.dumpKeyboardInterrupt = False
kb.dynamicMarkings = []
kb.dynamicParameter = False
kb.endDetection = False
kb.explicitSettings = set()
kb.extendTests = None
kb.errorChunkLength = None
kb.errorIsNone = True
kb.falsePositives = []
kb.fileReadMode = False
kb.followSitemapRecursion = None
kb.forcedDbms = None
kb.forcePartialUnion = False
kb.forceWhere = None
kb.futileUnion = None
kb.headersFp = {}
kb.heuristicDbms = None
kb.heuristicExtendedDbms = None
kb.heuristicMode = False
kb.heuristicPage = False
kb.heuristicTest = None
kb.hintValue = None
kb.htmlFp = []
kb.httpErrorCodes = {}
kb.inferenceMode = False
kb.ignoreCasted = None
kb.ignoreNotFound = False
kb.ignoreTimeout = False
kb.injection = InjectionDict()
kb.injections = []
kb.laggingChecked = False
kb.lastParserStatus = None
kb.locks = AttribDict()
for _ in ("cache", "connError", "count", "index", "io", "limit", "log", "socket", "redirect", "request", "value"):
kb.locks[_] = threading.Lock()
kb.matchRatio = None
kb.maxConnectionsFlag = False
kb.mergeCookies = None
kb.multiThreadMode = False
kb.negativeLogic = False
kb.nullConnection = None
kb.oldMsf = None
kb.orderByColumns = None
kb.originalCode = None
kb.originalPage = None
kb.originalPageTime = None
kb.originalTimeDelay = None
kb.originalUrls = dict()
# Back-end DBMS underlying operating system fingerprint via banner (-b)
# parsing
kb.os = None
kb.osVersion = None
kb.osSP = None
kb.pageCompress = True
kb.pageTemplate = None
kb.pageTemplates = dict()
kb.pageEncoding = DEFAULT_PAGE_ENCODING
kb.pageStable = None
kb.partRun = None
kb.permissionFlag = False
kb.postHint = None
kb.postSpaceToPlus = False
kb.postUrlEncode = True
kb.prependFlag = False
kb.processResponseCounter = 0
kb.previousMethod = None
kb.processUserMarks = None
kb.proxyAuthHeader = None
kb.queryCounter = 0
kb.redirectChoice = None
kb.reflectiveMechanism = True
kb.reflectiveCounters = {REFLECTIVE_COUNTER.MISS: 0, REFLECTIVE_COUNTER.HIT: 0}
kb.requestCounter = 0
kb.resendPostOnRedirect = None
kb.resolutionDbms = None
kb.responseTimes = {}
kb.responseTimeMode = None
kb.responseTimePayload = None
kb.resumeValues = True
kb.rowXmlMode = False
kb.safeCharEncode = False
kb.safeReq = AttribDict()
kb.singleLogFlags = set()
kb.skipSeqMatcher = False
kb.reduceTests = None
kb.tlsSNI = {}
kb.stickyDBMS = False
kb.stickyLevel = None
kb.storeCrawlingChoice = None
kb.storeHashesChoice = None
kb.suppressResumeInfo = False
kb.tableFrom = None
kb.technique = None
kb.tempDir = None
kb.testMode = False
kb.testOnlyCustom = False
kb.testQueryCount = 0
kb.testType = None
kb.threadContinue = True
kb.threadException = False
kb.tableExistsChoice = None
kb.uChar = NULL
kb.unionDuplicates = False
kb.wafSpecificResponse = None
kb.xpCmdshellAvailable = False
if flushAll:
kb.headerPaths = {}
kb.keywords = set(getFileItems(paths.SQL_KEYWORDS))
kb.passwordMgr = None
kb.skipVulnHost = None
kb.tamperFunctions = []
kb.targets = oset()
kb.testedParams = set()
kb.userAgents = None
kb.vainRun = True
kb.vulnHosts = set()
kb.wafFunctions = []
kb.wordlists = None
def _useWizardInterface():
"""
Presents simple wizard interface for beginner users
"""
if not conf.wizard:
return
logger.info("starting wizard interface")
while not conf.url:
message = "Please enter full target URL (-u): "
conf.url = readInput(message, default=None)
message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
conf.data = readInput(message, default=None)
if not (filter(lambda _: '=' in unicode(_), (conf.url, conf.data)) or '*' in conf.url):
warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). "
if not conf.crawlDepth and not conf.forms:
warnMsg += "Will search for forms"
conf.forms = True
logger.warn(warnMsg)
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Injection difficulty (--level/--risk). Please choose:\n"
message += "[1] Normal (default)\n[2] Medium\n[3] Hard"
choice = readInput(message, default='1')
if choice == '2':
conf.risk = 2
conf.level = 3
elif choice == '3':
conf.risk = 3
conf.level = 5
else:
conf.risk = 1
conf.level = 1
if not conf.getAll:
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Enumeration (--banner/--current-user/etc). Please choose:\n"
message += "[1] Basic (default)\n[2] Intermediate\n[3] All"
choice = readInput(message, default='1')
if choice == '2':
map(lambda x: conf.__setitem__(x, True), WIZARD.INTERMEDIATE)
elif choice == '3':
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
else:
map(lambda x: conf.__setitem__(x, True), WIZARD.BASIC)
logger.debug("muting sqlmap.. it will do the magic for you")
conf.verbose = 0
conf.batch = True
conf.threads = 4
dataToStdout("\nsqlmap is running, please wait..\n\n")
def _saveConfig():
"""
Saves the command line options to a sqlmap configuration INI file
Format.
"""
if not conf.saveConfig:
return
debugMsg = "saving command line options to a sqlmap configuration INI file"
logger.debug(debugMsg)
saveConfig(conf, conf.saveConfig)
infoMsg = "saved command line options to the configuration file '%s'" % conf.saveConfig
logger.info(infoMsg)
def setVerbosity():
"""
This function set the verbosity of sqlmap output messages.
"""
if conf.verbose is None:
conf.verbose = 1
conf.verbose = int(conf.verbose)
if conf.verbose == 0:
logger.setLevel(logging.ERROR)
elif conf.verbose == 1:
logger.setLevel(logging.INFO)
elif conf.verbose > 2 and conf.eta:
conf.verbose = 2
logger.setLevel(logging.DEBUG)
elif conf.verbose == 2:
logger.setLevel(logging.DEBUG)
elif conf.verbose == 3:
logger.setLevel(CUSTOM_LOGGING.PAYLOAD)
elif conf.verbose == 4:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)
elif conf.verbose >= 5:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)
def _normalizeOptions(inputOptions):
"""
Sets proper option types
"""
types_ = {}
for group in optDict.keys():
types_.update(optDict[group])
for key in inputOptions:
if key in types_:
value = inputOptions[key]
if value is None:
continue
type_ = types_[key]
if type_ and isinstance(type_, tuple):
type_ = type_[0]
if type_ == OPTION_TYPE.BOOLEAN:
try:
value = bool(value)
except (TypeError, ValueError):
value = False
elif type_ == OPTION_TYPE.INTEGER:
try:
value = int(value)
except (TypeError, ValueError):
value = 0
elif type_ == OPTION_TYPE.FLOAT:
try:
value = float(value)
except (TypeError, ValueError):
value = 0.0
inputOptions[key] = value
def _mergeOptions(inputOptions, overrideOptions):
"""
Merge command line options with configuration file and default options.
@param inputOptions: optparse object with command line options.
@type inputOptions: C{instance}
"""
if inputOptions.configFile:
configFileParser(inputOptions.configFile)
if hasattr(inputOptions, "items"):
inputOptionsItems = inputOptions.items()
else:
inputOptionsItems = inputOptions.__dict__.items()
for key, value in inputOptionsItems:
if key not in conf or value not in (None, False) or overrideOptions:
conf[key] = value
if not conf.api:
for key, value in conf.items():
if value is not None:
kb.explicitSettings.add(key)
for key, value in defaults.items():
if hasattr(conf, key) and conf[key] is None:
conf[key] = value
lut = {}
for group in optDict.keys():
lut.update((_.upper(), _) for _ in optDict[group])
envOptions = {}
for key, value in os.environ.items():
if key.upper().startswith(SQLMAP_ENVIRONMENT_PREFIX):
_ = key[len(SQLMAP_ENVIRONMENT_PREFIX):].upper()
if _ in lut:
envOptions[lut[_]] = value
if envOptions:
_normalizeOptions(envOptions)
for key, value in envOptions.items():
conf[key] = value
mergedOptions.update(conf)
def _setTrafficOutputFP():
if conf.trafficFile:
infoMsg = "setting file for logging HTTP traffic"
logger.info(infoMsg)
conf.trafficFP = openFile(conf.trafficFile, "w+")
def _setupHTTPCollector():
if not conf.harFile:
return
conf.httpCollector = HTTPCollectorFactory(conf.harFile).create()
def _setDNSServer():
if not conf.dnsDomain:
return
infoMsg = "setting up DNS server instance"
logger.info(infoMsg)
isAdmin = runningAsAdmin()
if isAdmin:
try:
conf.dnsServer = DNSServer()
conf.dnsServer.run()
except socket.error, msg:
errMsg = "there was an error while setting up "
errMsg += "DNS server instance ('%s')" % msg
raise SqlmapGenericException(errMsg)
else:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a DNS data exfiltration attack "
errMsg += "as it will need to listen on privileged UDP port 53 "
errMsg += "for incoming address resolution attempts"
raise SqlmapMissingPrivileges(errMsg)
def _setProxyList():
if not conf.proxyFile:
return
conf.proxyList = []
for match in re.finditer(r"(?i)((http[^:]*|socks[^:]*)://)?([\w\-.]+):(\d+)", readCachedFileContent(conf.proxyFile)):
_, type_, address, port = match.groups()
conf.proxyList.append("%s://%s:%s" % (type_ or "http", address, port))
def _setTorProxySettings():
if not conf.tor:
return
if conf.torType == PROXY_TYPE.HTTP:
_setTorHttpProxySettings()
else:
_setTorSocksProxySettings()
def _setTorHttpProxySettings():
infoMsg = "setting Tor HTTP proxy settings"
logger.info(infoMsg)
port = findLocalPort(DEFAULT_TOR_HTTP_PORTS if not conf.torPort else (conf.torPort,))
if port:
conf.proxy = "http://%s:%d" % (LOCALHOST, port)
else:
errMsg = "can't establish connection with the Tor HTTP proxy. "
errMsg += "Please make sure that you have Tor (bundle) installed and setup "
errMsg += "so you could be able to successfully use switch '--tor' "
raise SqlmapConnectionException(errMsg)
if not conf.checkTor:
warnMsg = "use switch '--check-tor' at "
warnMsg += "your own convenience when accessing "
warnMsg += "Tor anonymizing network because of "
warnMsg += "known issues with default settings of various 'bundles' "
warnMsg += "(e.g. Vidalia)"
logger.warn(warnMsg)
def _setTorSocksProxySettings():
infoMsg = "setting Tor SOCKS proxy settings"
logger.info(infoMsg)
port = findLocalPort(DEFAULT_TOR_SOCKS_PORTS if not conf.torPort else (conf.torPort,))
if not port:
errMsg = "can't establish connection with the Tor SOCKS proxy. "
errMsg += "Please make sure that you have Tor service installed and setup "
errMsg += "so you could be able to successfully use switch '--tor' "
raise SqlmapConnectionException(errMsg)
# SOCKS5 to prevent DNS leaks (http://en.wikipedia.org/wiki/Tor_%28anonymity_network%29)
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if conf.torType == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, LOCALHOST, port)
socks.wrapmodule(urllib2)
def _checkWebSocket():
if conf.url and (conf.url.startswith("ws:/") or conf.url.startswith("wss:/")):
try:
from websocket import ABNF
except ImportError:
errMsg = "sqlmap requires third-party module 'websocket-client' "
errMsg += "in order to use WebSocket funcionality"
raise SqlmapMissingDependence(errMsg)
def _checkTor():
if not conf.checkTor:
return
infoMsg = "checking Tor connection"
logger.info(infoMsg)
try:
page, _, _ = Request.getPage(url="https://check.torproject.org/", raise404=False)
except SqlmapConnectionException:
page = None
if not page or 'Congratulations' not in page:
errMsg = "it appears that Tor is not properly set. Please try using options '--tor-type' and/or '--tor-port'"
raise SqlmapConnectionException(errMsg)
else:
infoMsg = "Tor is properly being used"
logger.info(infoMsg)
def _basicOptionValidation():
if conf.limitStart is not None and not (isinstance(conf.limitStart, int) and conf.limitStart > 0):
errMsg = "value for option '--start' (limitStart) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.limitStop is not None and not (isinstance(conf.limitStop, int) and conf.limitStop > 0):
errMsg = "value for option '--stop' (limitStop) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.level is not None and not (isinstance(conf.level, int) and conf.level >= 1 and conf.level <= 5):
errMsg = "value for option '--level' must be an integer value from range [1, 5]"
raise SqlmapSyntaxException(errMsg)
if conf.risk is not None and not (isinstance(conf.risk, int) and conf.risk >= 1 and conf.risk <= 3):
errMsg = "value for option '--risk' must be an integer value from range [1, 3]"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.limitStart, int) and conf.limitStart > 0 and \
isinstance(conf.limitStop, int) and conf.limitStop < conf.limitStart:
errMsg = "value for option '--start' (limitStart) must be smaller or equal than value for --stop (limitStop) option"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.firstChar, int) and conf.firstChar > 0 and \
isinstance(conf.lastChar, int) and conf.lastChar < conf.firstChar:
errMsg = "value for option '--first' (firstChar) must be smaller than or equal to value for --last (lastChar) option"
raise SqlmapSyntaxException(errMsg)
if conf.textOnly and conf.nullConnection:
errMsg = "switch '--text-only' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.eta and conf.verbose > defaults.verbose:
errMsg = "switch '--eta' is incompatible with option '-v'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.url:
errMsg = "option '-d' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.identifyWaf and conf.skipWaf:
errMsg = "switch '--identify-waf' is incompatible with switch '--skip-waf'"
raise SqlmapSyntaxException(errMsg)
if conf.titles and conf.nullConnection:
errMsg = "switch '--titles' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.search:
errMsg = "switch '--dump' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.api and not conf.configFile:
errMsg = "switch '--api' requires usage of option '-c'"
raise SqlmapSyntaxException(errMsg)
if conf.data and conf.nullConnection:
errMsg = "option '--data' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.nullConnection:
errMsg = "option '--string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.notString and conf.nullConnection:
errMsg = "option '--not-string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.noCast and conf.hexConvert:
errMsg = "switch '--no-cast' is incompatible with switch '--hex'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpAll and conf.search:
errMsg = "switch '--dump-all' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.notString:
errMsg = "option '--string' is incompatible with switch '--not-string'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp and conf.nullConnection:
errMsg = "option '--regexp' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp:
try:
re.compile(conf.regexp)
except Exception, ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.regexp, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude:
try:
re.compile(conf.crawlExclude)
except Exception, ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.crawlExclude, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.dumpAll:
errMsg = "switch '--dump' is incompatible with switch '--dump-all'"
raise SqlmapSyntaxException(errMsg)
if conf.predictOutput and (conf.threads > 1 or conf.optimize):
errMsg = "switch '--predict-output' is incompatible with option '--threads' and switch '-o'"
raise SqlmapSyntaxException(errMsg)
if conf.threads > MAX_NUMBER_OF_THREADS and not conf.get("skipThreadCheck"):
errMsg = "maximum number of used threads is %d avoiding potential connection issues" % MAX_NUMBER_OF_THREADS
raise SqlmapSyntaxException(errMsg)
if conf.forms and not any((conf.url, conf.googleDork, conf.bulkFile, conf.sitemapUrl)):
errMsg = "switch '--forms' requires usage of option '-u' ('--url'), '-g', '-m' or '-x'"
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude and not conf.crawlDepth:
errMsg = "option '--crawl-exclude' requires usage of switch '--crawl'"
raise SqlmapSyntaxException(errMsg)
if conf.safePost and not conf.safeUrl:
errMsg = "option '--safe-post' requires usage of option '--safe-url'"
raise SqlmapSyntaxException(errMsg)
if conf.safeFreq and not any((conf.safeUrl, conf.safeReqFile)):
errMsg = "option '--safe-freq' requires usage of option '--safe-url' or '--safe-req'"
raise SqlmapSyntaxException(errMsg)
if conf.safeReqFile and any((conf.safeUrl, conf.safePost)):
errMsg = "option '--safe-req' is incompatible with option '--safe-url' and option '--safe-post'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfUrl and not conf.csrfToken:
errMsg = "option '--csrf-url' requires usage of option '--csrf-token'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfToken and conf.threads > 1:
errMsg = "option '--csrf-url' is incompatible with option '--threads'"
raise SqlmapSyntaxException(errMsg)
if conf.requestFile and conf.url and conf.url != DUMMY_URL:
errMsg = "option '-r' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.proxy:
errMsg = "option '-d' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.tor:
errMsg = "option '-d' is incompatible with switch '--tor'"
raise SqlmapSyntaxException(errMsg)
if not conf.tech:
errMsg = "option '--technique' can't be empty"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.ignoreProxy:
errMsg = "switch '--tor' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.proxy:
errMsg = "switch '--tor' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.proxyFile:
errMsg = "switch '--proxy' is incompatible with option '--proxy-file'"
raise SqlmapSyntaxException(errMsg)
if conf.checkTor and not any((conf.tor, conf.proxy)):
errMsg = "switch '--check-tor' requires usage of switch '--tor' (or option '--proxy' with HTTP proxy address using Tor)"
raise SqlmapSyntaxException(errMsg)
if conf.torPort is not None and not (isinstance(conf.torPort, int) and conf.torPort >= 0 and conf.torPort <= 65535):
errMsg = "value for option '--tor-port' must be in range 0-65535"
raise SqlmapSyntaxException(errMsg)
if conf.torType not in getPublicTypeMembers(PROXY_TYPE, True):
errMsg = "option '--tor-type' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(PROXY_TYPE, True))
raise SqlmapSyntaxException(errMsg)
if conf.dumpFormat not in getPublicTypeMembers(DUMP_FORMAT, True):
errMsg = "option '--dump-format' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(DUMP_FORMAT, True))
raise SqlmapSyntaxException(errMsg)
if conf.skip and conf.testParameter:
errMsg = "option '--skip' is incompatible with option '-p'"
raise SqlmapSyntaxException(errMsg)
if conf.mobile and conf.agent:
errMsg = "switch '--mobile' is incompatible with option '--user-agent'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.ignoreProxy:
errMsg = "option '--proxy' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.timeSec < 1:
errMsg = "value for option '--time-sec' must be a positive integer"
raise SqlmapSyntaxException(errMsg)
if conf.uChar and not re.match(UNION_CHAR_REGEX, conf.uChar):
errMsg = "value for option '--union-char' must be an alpha-numeric value (e.g. 1)"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.uCols, basestring):
if not conf.uCols.isdigit() and ("-" not in conf.uCols or len(conf.uCols.split("-")) != 2):
errMsg = "value for option '--union-cols' must be a range with hyphon "
errMsg += "(e.g. 1-10) or integer value (e.g. 5)"
raise SqlmapSyntaxException(errMsg)
if conf.dbmsCred and ':' not in conf.dbmsCred:
errMsg = "value for option '--dbms-cred' must be in "
errMsg += "format <username>:<password> (e.g. \"root:pass\")"
raise SqlmapSyntaxException(errMsg)
if conf.charset:
_ = checkCharEncoding(conf.charset, False)
if _ is None:
errMsg = "unknown charset '%s'. Please visit " % conf.charset
errMsg += "'%s' to get the full list of " % CODECS_LIST_PAGE
errMsg += "supported charsets"
raise SqlmapSyntaxException(errMsg)
else:
conf.charset = _
if conf.loadCookies:
if not os.path.exists(conf.loadCookies):
errMsg = "cookies file '%s' does not exist" % conf.loadCookies
raise SqlmapFilePathException(errMsg)
def _resolveCrossReferences():
lib.core.threads.readInput = readInput
lib.core.common.getPageTemplate = getPageTemplate
lib.core.convert.singleTimeWarnMessage = singleTimeWarnMessage
lib.request.connect.setHTTPHandlers = _setHTTPHandlers
lib.utils.search.setHTTPHandlers = _setHTTPHandlers
lib.controller.checks.setVerbosity = setVerbosity
lib.controller.checks.setWafFunctions = _setWafFunctions
def initOptions(inputOptions=AttribDict(), overrideOptions=False):
_setConfAttributes()
_setKnowledgeBaseAttributes()
_mergeOptions(inputOptions, overrideOptions)
def init():
"""
Set attributes into both configuration and knowledge base singletons
based upon command line and configuration file options.
"""
_useWizardInterface()
setVerbosity()
_saveConfig()
_setRequestFromFile()
_cleanupOptions()
_cleanupEnvironment()
_dirtyPatches()
_purgeOutput()
_checkDependencies()
_createTemporaryDirectory()
_basicOptionValidation()
_setProxyList()
_setTorProxySettings()
_setDNSServer()
_adjustLoggingFormatter()
_setMultipleTargets()
_setTamperingFunctions()
_setWafFunctions()
_setTrafficOutputFP()
_setupHTTPCollector()
_resolveCrossReferences()
_checkWebSocket()
parseTargetUrl()
parseTargetDirect()
if any((conf.url, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.requestFile, conf.googleDork, conf.liveTest)):
_setHTTPTimeout()
_setHTTPExtraHeaders()
_setHTTPCookies()
_setHTTPReferer()
_setHTTPHost()
_setHTTPUserAgent()
_setHTTPAuthentication()
_setHTTPHandlers()
_setDNSCache()
_setSocketPreConnect()
_setSafeVisit()
_doSearch()
_setBulkMultipleTargets()
_setSitemapTargets()
_checkTor()
_setCrawler()
_findPageForms()
_setDBMS()
_setTechnique()
_setThreads()
_setOS()
_setWriteFile()
_setMetasploit()
_setDBMSAuthentication()
loadBoundaries()
loadPayloads()
_setPrefixSuffix()
update()
_loadQueries()
| zhinaonet/sqlmap-z | lib/core/option.py | Python | gpl-3.0 | 93,578 | [
"VisIt"
] | 5fc9d8ac2ad24331ec712e04884de0b684c2ea2ae81b586cef2d9466f64cccd9 |
"""
Inference model with Gaussian process for sampling isoform proportions.
"""
import numpy as np
def erf(x):
"""error function approximation, with maximum error 1.5e-7.
"""
# save the sign of x
if x >= 0: sign = 1
else: sign = -1
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def gamma_pdf(x, k, theta, log=True):
"""
Calculate the probability density of Gamma distribution.
Parameters
----------
x : float
The variable for calculating the probability density.
k : float
The shape of the Gamma distribution.
theta : float
The scale of the Gamma distribution.
log : bool
If true, the return value is at log scale.
Returns
-------
pdf : numpy float
The probability density of x.
"""
if k == 0:
print("The shape of Gamma distribution cannot be 0, please check!")
return None
pdf = -np.math.lgamma(k) - k*np.log(theta)
pdf += (k-1)*np.log(x) - x/theta
if log == False: pdf = np.exp(pdf)
return pdf
def normal_pdf(x, mu, cov, log=True):
"""
Calculate the probability density of Gaussian (Normal) distribution.
Parameters
----------
x : float, 1-D array_like (K, ), or 2-D array_like (K, N)
The variable for calculating the probability density.
mu : float or 1-D array_like, (K, )
The mean of the Gaussian distribution.
cov : float or 2-D array_like, (K, K)
The variance or the covariance matrix of the Gaussian distribution.
log : bool
If true, the return value is at log scale.
Returns
-------
pdf : numpy float
The probability density of x.
if N==1, return a float
elif N>1, return an array
"""
if len(np.array(mu).shape) == 0:
x = np.array(x).reshape(-1,1)
elif len(np.array(x).shape) <= 1:
x = np.array(x).reshape(1, -1)
x = x - np.array(mu)
N, K = x.shape
if len(np.array(cov).shape) < 2:
cov = np.array(cov).reshape(-1,1)
cov_inv = np.linalg.inv(cov)
cov_det = np.linalg.det(cov)
if cov_det <= 0:
print("Warning: the det of covariance is not positive!")
return None
pdf_all = np.zeros(N)
pdf_part1 = -(K*np.log(2*np.pi) + np.log(cov_det)) / 2.0
for i in range(N):
pdf_all[i] = pdf_part1 - np.dot(np.dot(x[i,:], cov_inv), x[i,:]) / 2.0
if log == False: pdf_all = np.exp(pdf_all)
if N == 1: pdf_all = pdf_all[0]
return pdf_all
def trun_normal_pdf(x, mu, sigma, a, b, log=True):
"""
Calculate the probability density of Truncated Normal distribution.
Parameters
----------
x : float
The variable for calculating the probability density.
mu : float
The mean of the Gaussian distribution.
sigma : float
The standard variance of the Gaussian distribution.
a : float
The lower bounder of the Truncated Normal distribution
b : float
The upper bounder of the Truncated Normal distribution
log : bool
If true, the return value is at log scale.
Returns
-------
pdf : float
The probability density of x.
"""
x = x - mu
a = a - mu
b = b - mu
pdf = np.exp(-0.5 * (x/sigma)**2) / (sigma * np.sqrt(2 * np.pi))
cdf_a = (1 + erf(a / sigma / np.sqrt(2))) / 2.0
cdf_b = (1 + erf(b / sigma / np.sqrt(2))) / 2.0
pdf = pdf / abs(cdf_b - cdf_a)
if log == True: pdf = np.log(pdf)
return pdf
def GP_K(X, theta):
"""
Covariance of Gaussian process generator.
It is based on a common squared-exponential kernel, with two parameters.
Parameters
----------
X : 1-D array_like, (N, )
The x-axis of the Gaussian process, e.g., time points.
theta : 1-D array_like, (2,)
The array of the two parameters of the squared-exponential kernel.
Returns
-------
K : 2-D array_like, (N, N)
The covariance matrix of the N points at x-axis.
"""
N = len(X)
K = np.zeros((N, N))
for i in range(N):
for j in range(N):
K[i,j] = theta[0] * np.exp(-0.5 * (X[i]-X[j])**2 / theta[1])
return K
def Geweke_Z(X, first=0.1, last=0.5):
"""
Geweke diagnostics for MCMC chain convergence.
See Geweke J. Evaluating the accuracy of sampling-based approaches to the
calculation of posterior moments[M]. Minneapolis, MN, USA: Federal Reserve
Bank of Minneapolis, Research Department, 1991.
and https://pymc-devs.github.io/pymc/modelchecking.html#formal-methods
Parameters
----------
X : 1-D array_like, (N, )
The uni-variate MCMC sampled chain for convergence diagnostic.
first : float
The proportion of first part in Geweke diagnostics.
last : float
The proportion of last part in Geweke diagnostics.
Returns
-------
Z : float
The Z score of Geweke diagnostics.
"""
N = X.shape[0]
A = X[:int(first*N)]
B = X[int(last*N):]
if np.sqrt(np.var(A) + np.var(B)) == 0:
Z = None
else:
Z = abs(A.mean() - B.mean()) / np.sqrt(np.var(A) + np.var(B))
return Z
def Psi_GP_MH(R_mat, len_isos, prob_isos, X=None, Ymean=None, var=None,
theta1=3.0, theta2=None, M=20000, initial=1000, gap=500,
randomS=None, theta2_std=1.0, theta2_low=0.00001, theta2_up=100):
"""
Estimate the proportion of C isoforms at T time points with all reads
by MCMC samplling (MH algorithm) combined with a GP prior.
Parameters
----------
R_mat : list of 2-D array_like, of length T
A set of reads identities of belonging to C isoforms
len_isos : list of 2-D array_like, of length T
A set of effective length of C isoforms
prob_isos : list of 2-D array_like, of length T
A set of probablity for isoform specific reads
X : 1-D array_like, (T, )
An array of time points.
Ymean : 2-D array_like, (C, T)
The means for Y.
var : 1-D array_like, (C-1, )
An array of variation of each y.
theta1 : float
The fixed hyper-parameter theta1
theta2 : float
The fixed hyper-parameter theta2. If it is None, then sample it.
theta2_std : float
The jump std of hyper-parameter theta2 for each dimension, default=1.0
theta2_low : float
The lower bound of Truncated Normal distribution for sampling theta2.
theta2_up : float
The upper bound of Truncated Normal distribution for sampling theta2.
randomS : float
The fixed seeds for random number generator. None means ignoring it.
M : int
the maximum iterations of in MCMC sampler, default=100000
initial : int
the minmum iterations of in MCMC sampler, default=3000
gap : int
the gap iterations of in MCMC sampler, default=1000
Returns
-------
Psi_all : 3-D array_like, (m, C, T)
The the proposed m proportion of C isoforms of T time points
Y_all : 3-D array_like, (m, C, T)
The the proposed m latent y for C isoforms of T time points
theta2_all : 2-D array_like, (m, C-1)
The the proposed m hyper-parameter theta2 for C-1 isoforms
Pro_all : 1-D array_like, (m,)
The the probability for accepted proposals
Lik_all : 1-D array_like, (m,)
The the probability for accepted proposals
cnt : int
The number of acceptances
m : int
The number of iterations
"""
T = len(len_isos)
C = len(len_isos[0])
if X is None: X = np.arange(T)
if Ymean is None: Ymean = np.zeros((C,T))
if randomS is not None: np.random.seed(randomS)
for t in range(T):
idx = (len_isos[t] != len_isos[t])
len_isos[t][idx] = 0.0
prob_isos[t][:,idx] = 0.0
R_mat[t][:,idx] = False
idx = np.where(R_mat[t] != R_mat[t])
R_mat[t][idx] = False
idx = np.where(prob_isos[t] != prob_isos[t])
prob_isos[t][idx] = 0.0
idx = (R_mat[t].sum(axis=1) > 0) * (prob_isos[t].sum(axis=1) > 0)
R_mat[t] = R_mat[t][idx,:]
prob_isos[t] = prob_isos[t][idx,:]
# step 0: MCMC fixed initializations
if var is None:
var = 0.05 * np.ones(C-1)
theta_now = np.zeros((C-1, 2))
theta_now[:,0] = theta1
if theta2 is not None:
theta_now[:,1] = theta2
else:
theta_now[:,1] = 0.1 * (np.max(T)-np.min(T)+0.001)**2 #0.75
Y_now = Ymean + 0.0
Ymean = np.zeros((C,T))
psi_now = np.zeros((C, T))
fsi_now = np.zeros((C, T))
for t in range(T):
psi_now[:,t] = np.exp(Y_now[:,t]) / np.sum(np.exp(Y_now[:,t]))
fsi_now[:,t] = len_isos[t]*psi_now[:,t]/np.sum(len_isos[t]*psi_now[:,t])
P_now, L_now = 0, 0
cov_now = np.zeros((T, T, C-1))
for c in range(C-1):
cov_now[:,:,c] = GP_K(X, theta_now[c,:])
P_now += normal_pdf(Y_now[c,:], Ymean[c,:], cov_now[:,:,c])
for t in range(T):
P_now += np.log(np.dot(R_mat[t]*prob_isos[t], fsi_now[:, t])).sum()
L_now += np.log(np.dot(R_mat[t]*prob_isos[t], fsi_now[:, t])).sum()
# MCMC running
Y_try = np.zeros((C, T))
Y_all = np.zeros((M, C, T))
psi_try = np.zeros((C, T))
fsi_try = np.zeros((C, T))
Psi_all = np.zeros((M, C, T))
cov_try = np.zeros((T, T, C-1))
theta_try = np.zeros((C-1, 2))
theta2_all = np.zeros((M, C-1))
theta_try[:, 0] = theta1
if theta2 is not None:
theta_try[:,1] = theta2
cov_try[:,:,:] = GP_K(X, theta_try[0,:]).reshape(T,T,1)
cnt = 0
Pro_all = np.zeros(M)
Lik_all = np.zeros(M)
for m in range(M):
P_try, L_try, Q_now, Q_try = 0, 0, 0, 0
# step 1: propose a value
for c in range(C-1):
# sample single theta2 for all isoforms
if theta2 is None and c==0:
theta_try[:,1] = np.random.normal(theta_now[c,1], theta2_std)
while theta_try[c,1]<theta2_low or theta_try[c,1]>theta2_up:
theta_try[:,1] = np.random.normal(theta_now[c,1],theta2_std)
cov_try[:,:,c] = GP_K(X, theta_try[c,:])
Q_now += trun_normal_pdf(theta_now[c,1], theta_try[c,1],
theta2_std, theta2_low, theta2_up)
Q_try += trun_normal_pdf(theta_try[c,1], theta_now[c,1],
theta2_std, theta2_low, theta2_up)
cov_jmp = cov_try[:,:,c] * var[c] * 5 / (T * C * theta1)
Y_try[c,:] = np.random.multivariate_normal(Y_now[c,:], cov_jmp)
Q_now += normal_pdf(Y_now[c,:], Y_try[c,:], cov_jmp)
Q_try += normal_pdf(Y_try[c,:], Y_now[c,:], cov_jmp)
P_try += normal_pdf(Y_try[c,:], Ymean[c,:], cov_try[:,:,c])
for t in range(T):
psi_try[:,t] = np.exp(Y_try[:,t]) / np.sum(np.exp(Y_try[:,t]))
fsi_try[:,t] = (len_isos[t]*psi_try[:,t] /
np.sum(len_isos[t]*psi_try[:,t]))
_lik_list = np.dot(R_mat[t]*prob_isos[t], fsi_try[:,t])
# if min(_lik_list) <= 0:
# P_try, L_try = -np.inf, -np.inf
# else:
# P_try += np.log(_lik_list).sum()
# L_try += np.log(_lik_list).sum()
P_try += np.log(_lik_list).sum()
L_try += np.log(_lik_list).sum()
# step 2: calculate the MH ratio; accept or reject the proposal
alpha = np.exp(min(P_try+Q_now-P_now-Q_try, 0))
if alpha is None:
print("alpha is none!")
elif np.random.rand(1) < alpha:
#print alpha
cnt += 1
P_now = P_try + 0.0
L_now = L_try + 0.0
Y_now = Y_try + 0.0
cov_now = cov_try + 0.0
psi_now = psi_try + 0.0
fsi_now = fsi_try + 0.0
theta_now = theta_try + 0.0
Pro_all[m] = P_now
Lik_all[m] = L_now
Y_all[m,:,:] = Y_now
Psi_all[m,:,:] = psi_now
theta2_all[m,:] = theta_now[:,1]
#step 3. convergence diagnostics
if m >= initial and m % gap == 0:
conv = 1
for c in range(C-1):
for t in range(T):
# Z = Geweke_Z(Y_all[:m,c,t])
Z = Geweke_Z(Psi_all[:m,c,t])
if Z is None or Z > 2:
conv = 0
break
#print("psi converged!")
if theta2 is None:
Z = Geweke_Z(theta2_all[:m, c])
if Z is None or Z > 2:
conv = 0
break
if conv == 0: break
if conv == 1:
Pro_all = Pro_all[:m,]
Lik_all = Lik_all[:m,]
Y_all = Y_all[:m,:,:]
Psi_all = Psi_all[:m,:,:]
theta2_all = theta2_all[:m,:]
break
# if m >= initial and conv == 0:
# print("Warning: Not converged. Need a longer MCMC chain.")
return Psi_all, Y_all, theta2_all, Pro_all, Lik_all, cnt, m
| huangyh09/diceseq | diceseq/models/model_GP.py | Python | apache-2.0 | 13,423 | [
"Gaussian"
] | 60faa053924527989091a0f52971df4b6487274708c9f7557b9e4f8662f97b53 |
"""
Augmenters that apply affine transformations or other similar augmentations.
Do not import directly from this file, as the categorization is not final.
Use instead
`from imgaug import augmenters as iaa`
and then e.g. ::
seq = iaa.Sequential([
iaa.Affine(...),
iaa.PerspectiveTransform(...)
])
List of augmenters:
* Affine
* PiecewiseAffine
* PerspectiveTransform
* ElasticTransformation
"""
from __future__ import print_function, division, absolute_import
from .. import imgaug as ia
# TODO replace these imports with iap.XYZ
from ..parameters import StochasticParameter, Deterministic, Binomial, Choice, DiscreteUniform, Normal, Uniform, FromLowerResolution
from .. import parameters as iap
from abc import ABCMeta, abstractmethod
import random
import numpy as np
import copy as copy_module
import re
import math
from scipy import misc, ndimage
from skimage import transform as tf, segmentation, measure
import itertools
import cv2
import six
import six.moves as sm
import types
import warnings
from .meta import Augmenter
class Affine(Augmenter):
"""
Augmenter to apply affine transformations to images.
This is mostly a wrapper around skimage's AffineTransform class and
warp function.
Affine transformations
involve:
- Translation ("move" image on the x-/y-axis)
- Rotation
- Scaling ("zoom" in/out)
- Shear (move one side of the image, turning a square into a trapezoid)
All such transformations can create "new" pixels in the image without a
defined content, e.g. if the image is translated to the left, pixels
are created on the right.
A method has to be defined to deal with these pixel values. The
parameters `cval` and `mode` of this class deal with this.
Some transformations involve interpolations between several pixels
of the input image to generate output pixel values. The parameter `order`
deals with the method of interpolation used for this.
Parameters
----------
scale : float or tuple of two floats or StochasticParameter or dict {"x": float/tuple/StochasticParameter, "y": float/tuple/StochasticParameter}, optional(default=1.0)
Scaling factor to use, where 1.0 represents no change and 0.5 is
zoomed out to 50 percent of the original size.
* If a single float, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled from the range
a <= x <= b per image. That value will be used identically for
both x- and y-axis.
* If a StochasticParameter, then from that parameter a value will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`scale`). Using a dictionary allows to
set different values for the axis. If they are set to the same
ranges, different values may still be sampled per axis.
translate_percent : float or tuple of two floats or StochasticParameter or dict {"x": float/tuple/StochasticParameter, "y": float/tuple/StochasticParameter}, optional(default=1.0)
Translation in percent relative to the image
height/width (x-translation, y-translation) to use,
where 0 represents no change and 0.5 is half of the image
height/width.
* If a single float, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled from the range
a <= x <= b per image. That percent value will be used identically
for both x- and y-axis.
* If a StochasticParameter, then from that parameter a value will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`translate_percent`).
Using a dictionary allows to set different values for the axis.
If they are set to the same ranges, different values may still
be sampled per axis.
translate_px : int or tuple of two ints or StochasticParameter or dict {"x": int/tuple/StochasticParameter, "y": int/tuple/StochasticParameter}, optional(default=1.0)
Translation in
pixels.
* If a single int, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled from the discrete
range [a .. b] per image. That number will be used identically
for both x- and y-axis.
* If a StochasticParameter, then from that parameter a value will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`translate_px`).
Using a dictionary allows to set different values for the axis.
If they are set to the same ranges, different values may still
be sampled per axis.
rotate : float or int or tuple of two floats/ints or StochasticParameter, optional(default=0)
Rotation in degrees (NOT radians), i.e. expected value range is
0 to 360 for positive rotations (may also be negative).
* If a float/int, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled per image from the
range a <= x <= b and be used as the rotation value.
* If a StochasticParameter, then this parameter will be used to
sample the rotation value per image.
shear : float or int or tuple of two floats/ints or StochasticParameter, optional(default=0)
Shear in degrees (NOT radians), i.e. expected value range is
0 to 360 for positive shear (may also be negative).
* If a float/int, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled per image from the
range a <= x <= b and be used as the rotation value.
* If a StochasticParameter, then this parameter will be used to
sample the shear value per image.
order : int or iterable of int or ia.ALL or StochasticParameter, optional(default=1)
Interpolation order to use. Same meaning as in
skimage:
* 0: Nearest-neighbor
* 1: Bi-linear (default)
* 2: Bi-quadratic (not recommended by skimage)
* 3: Bi-cubic
* 4: Bi-quartic
* 5: Bi-quintic
Method 0 and 1 are fast, 3 is a bit slower, 4 and 5 are very
slow.
* If a single int, then that order will be used for all images.
* If an iterable, then for each image a random value will be sampled
from that iterable (i.e. list of allowed order values).
* If ia.ALL, then equivalant to list [0, 1, 3, 4, 5].
* If StochasticParameter, then that parameter is queried per image
to sample the order value to use.
cval : int or float or tuple of two floats or ia.ALL or StochasticParameter, optional(default=0)
The constant value used for skimage's transform function.
This is the value used to fill up pixels in the result image that
didn't exist in the input image (e.g. when translating to the left,
some new pixels are created at the right). Such a fill-up with a
constant value only happens, when `mode` is "constant".
For standard uint8 images (value range 0-255), this value may also
come from the range 0-255. It may be a float value, even for
integer image dtypes.
* If this is a single int or float, then that value will be used
(e.g. 0 results in black pixels).
* If a tuple (a, b), then a random value from the range a <= x <= b
is picked per image.
* If ia.ALL, a value from the discrete range [0 .. 255] will be
sampled per image.
* If a StochasticParameter, a new value will be sampled from the
parameter per image.
mode : string or list of string or ia.ALL or StochasticParameter, optional(default="constant")
Parameter that defines the handling of newly created pixels.
Same meaning as in skimage (and numpy.pad):
* "constant": Pads with a constant value
* "edge": Pads with the edge values of array
* "symmetric": Pads with the reflection of the vector mirrored
along the edge of the array.
* "reflect": Pads with the reflection of the vector mirrored on
the first and last values of the vector along each axis.
* "wrap": Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the end values
are used to pad the beginning.
The datatype of the parameter may
be:
* If a single string, then that mode will be used for all images.
* If a list of strings, then per image a random mode will be picked
from that list.
* If ia.ALL, then a random mode from all possible modes will be
picked.
* If StochasticParameter, then the mode will be sampled from that
parameter per image, i.e. it must return only the above mentioned
strings.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Affine(scale=2.0)
zooms all images by a factor of 2.
>>> aug = iaa.Affine(translate_px=16)
translates all images on the x- and y-axis by 16 pixels (to the
right/top), fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(translate_percent=0.1)
translates all images on the x- and y-axis by 10 percent of their
width/height (to the right/top), fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(rotate=35)
rotates all images by 35 degrees, fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(shear=15)
rotates all images by 15 degrees, fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(translate_px=(-16, 16))
translates all images on the x- and y-axis by a random value
between -16 and 16 pixels (to the right/top) (same for both axis, i.e.
sampled once per image), fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(translate_px={"x": (-16, 16), "y": (-4, 4)})
translates all images on the x-axis by a random value
between -16 and 16 pixels (to the right) and on the y-axis by a
random value between -4 and 4 pixels to the top. Even if both ranges
were the same, both axis could use different samples.
Fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(scale=2.0, order=[0, 1])
same as previously, but uses (randomly) either nearest neighbour
interpolation or linear interpolation.
>>> aug = iaa.Affine(translate_px=16, cval=(0, 255))
same as previously, but fills up any new pixels with a random
brightness (same for the whole image).
>>> aug = iaa.Affine(translate_px=16, mode=["constant", "edge"])
same as previously, but fills up the new pixels in only 50 percent
of all images with black values. In the other 50 percent of all cases,
the value of the nearest edge is used.
"""
def __init__(self, scale=1.0, translate_percent=None, translate_px=None,
rotate=0.0, shear=0.0, order=1, cval=0, mode="constant",
name=None, deterministic=False, random_state=None):
"""Create a new Affine instance.
"""
super(Affine, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
# Peformance:
# 1.0x order 0
# 1.5x order 1
# 3.0x order 3
# 30.0x order 4
# 60.0x order 5
# measurement based on 256x256x3 batches, difference is smaller
# on smaller images (seems to grow more like exponentially with image
# size)
if order == ia.ALL:
# self.order = DiscreteUniform(0, 5)
self.order = Choice([0, 1, 3, 4, 5]) # dont use order=2 (bi-quadratic) because that is apparently currently not recommended (and throws a warning)
elif ia.is_single_integer(order):
assert 0 <= order <= 5, "Expected order's integer value to be in range 0 <= x <= 5, got %d." % (order,)
self.order = Deterministic(order)
elif isinstance(order, list):
assert all([ia.is_single_integer(val) for val in order]), "Expected order list to only contain integers, got types %s." % (str([type(val) for val in order]),)
assert all([0 <= val <= 5 for val in order]), "Expected all of order's integer values to be in range 0 <= x <= 5, got %s." % (str(order),)
self.order = Choice(order)
elif isinstance(order, StochasticParameter):
self.order = order
else:
raise Exception("Expected order to be imgaug.ALL, int or StochasticParameter, got %s." % (type(order),))
if cval == ia.ALL:
self.cval = DiscreteUniform(0, 255)
elif ia.is_single_number(cval):
self.cval = Deterministic(cval)
elif ia.is_iterable(cval):
assert len(cval) == 2
assert 0 <= cval[0] <= 255
assert 0 <= cval[1] <= 255
self.cval = Uniform(cval[0], cval[1])
elif isinstance(cval, StochasticParameter):
self.cval = cval
else:
raise Exception("Expected cval to be imgaug.ALL, int, float or StochasticParameter, got %s." % (type(cval),))
# constant, edge, symmetric, reflect, wrap
if mode == ia.ALL:
self.mode = Choice(["constant", "edge", "symmetric", "reflect", "wrap"])
elif ia.is_string(mode):
self.mode = Deterministic(mode)
elif isinstance(mode, list):
assert all([ia.is_string(val) for val in mode])
self.mode = Choice(mode)
elif isinstance(mode, StochasticParameter):
self.mode = mode
else:
raise Exception("Expected mode to be imgaug.ALL, a string, a list of strings or StochasticParameter, got %s." % (type(mode),))
# scale
# float | (float, float) | [float, float] | StochasticParameter
def scale_handle_param(param, allow_dict):
if isinstance(param, StochasticParameter):
return param
elif ia.is_single_number(param):
assert param > 0.0, "Expected scale to have range (0, inf), got value %.4f. Note: The value to _not_ change the scale of images is 1.0, not 0.0." % (param,)
return Deterministic(param)
elif ia.is_iterable(param) and not isinstance(param, dict):
assert len(param) == 2, "Expected scale tuple/list with 2 entries, got %d entries." % (len(param),)
assert param[0] > 0.0 and param[1] > 0.0, "Expected scale tuple/list to have values in range (0, inf), got values %.4f and %.4f. Note: The value to _not_ change the scale of images is 1.0, not 0.0." % (param[0], param[1])
return Uniform(param[0], param[1])
elif allow_dict and isinstance(param, dict):
assert "x" in param or "y" in param
x = param.get("x")
y = param.get("y")
x = x if x is not None else 1.0
y = y if y is not None else 1.0
return (scale_handle_param(x, False), scale_handle_param(y, False))
else:
raise Exception("Expected float, int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(param),))
self.scale = scale_handle_param(scale, True)
# translate
if translate_percent is None and translate_px is None:
translate_px = 0
assert translate_percent is None or translate_px is None
if translate_percent is not None:
# translate by percent
def translate_handle_param(param, allow_dict):
if ia.is_single_number(param):
return Deterministic(float(param))
elif ia.is_iterable(param) and not isinstance(param, dict):
assert len(param) == 2, "Expected translate_percent tuple/list with 2 entries, got %d entries." % (len(param),)
all_numbers = all([ia.is_single_number(p) for p in param])
assert all_numbers, "Expected translate_percent tuple/list to contain only numbers, got types %s." % (str([type(p) for p in param]),)
#assert param[0] > 0.0 and param[1] > 0.0, "Expected translate_percent tuple/list to have values in range (0, inf), got values %.4f and %.4f." % (param[0], param[1])
return Uniform(param[0], param[1])
elif allow_dict and isinstance(param, dict):
assert "x" in param or "y" in param
x = param.get("x")
y = param.get("y")
x = x if x is not None else 0
y = y if y is not None else 0
return (translate_handle_param(x, False), translate_handle_param(y, False))
elif isinstance(param, StochasticParameter):
return param
else:
raise Exception("Expected float, int or tuple/list with 2 entries of both floats or ints or StochasticParameter. Got %s." % (type(param),))
self.translate = translate_handle_param(translate_percent, True)
else:
# translate by pixels
def translate_handle_param(param, allow_dict):
if ia.is_single_integer(param):
return Deterministic(param)
elif ia.is_iterable(param) and not isinstance(param, dict):
assert len(param) == 2, "Expected translate_px tuple/list with 2 entries, got %d entries." % (len(param),)
all_integer = all([ia.is_single_integer(p) for p in param])
assert all_integer, "Expected translate_px tuple/list to contain only integers, got types %s." % (str([type(p) for p in param]),)
return DiscreteUniform(param[0], param[1])
elif allow_dict and isinstance(param, dict):
assert "x" in param or "y" in param
x = param.get("x")
y = param.get("y")
x = x if x is not None else 0
y = y if y is not None else 0
return (translate_handle_param(x, False), translate_handle_param(y, False))
elif isinstance(param, StochasticParameter):
return param
else:
raise Exception("Expected int or tuple/list with 2 ints or StochasticParameter. Got %s." % (type(param),))
self.translate = translate_handle_param(translate_px, True)
# rotate
# StochasticParameter | float | int | (float or int, float or int) | [float or int, float or int]
if isinstance(rotate, StochasticParameter):
self.rotate = rotate
elif ia.is_single_number(rotate):
self.rotate = Deterministic(rotate)
elif ia.is_iterable(rotate):
assert len(rotate) == 2, "Expected rotate tuple/list with 2 entries, got %d entries." % (len(rotate),)
assert all([ia.is_single_number(val) for val in rotate]), "Expected floats/ints in rotate tuple/list"
self.rotate = Uniform(rotate[0], rotate[1])
else:
raise Exception("Expected float, int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(rotate),))
# shear
# StochasticParameter | float | int | (float or int, float or int) | [float or int, float or int]
if isinstance(shear, StochasticParameter):
self.shear = shear
elif ia.is_single_number(shear):
self.shear = Deterministic(shear)
elif ia.is_iterable(shear):
assert len(shear) == 2, "Expected rotate tuple/list with 2 entries, got %d entries." % (len(shear),)
assert all([ia.is_single_number(val) for val in shear]), "Expected floats/ints in shear tuple/list."
self.shear = Uniform(shear[0], shear[1])
else:
raise Exception("Expected float, int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(shear),))
def _augment_images(self, images, random_state, parents, hooks):
#images = images if isinstance(images, list) else [images]
nb_images = len(images)
#result = [None] * nb_images
result = images
scale_samples, translate_samples, rotate_samples, shear_samples, cval_samples, mode_samples, order_samples = self._draw_samples(nb_images, random_state)
for i in sm.xrange(nb_images):
height, width = images[i].shape[0], images[i].shape[1]
shift_x = width / 2.0 - 0.5
shift_y = height / 2.0 - 0.5
scale_x, scale_y = scale_samples[0][i], scale_samples[1][i]
translate_x, translate_y = translate_samples[0][i], translate_samples[1][i]
#assert isinstance(translate_x, (float, int))
#assert isinstance(translate_y, (float, int))
if ia.is_single_float(translate_y):
translate_y_px = int(round(translate_y * images[i].shape[0]))
else:
translate_y_px = translate_y
if ia.is_single_float(translate_x):
translate_x_px = int(round(translate_x * images[i].shape[1]))
else:
translate_x_px = translate_x
rotate = rotate_samples[i]
shear = shear_samples[i]
cval = cval_samples[i]
mode = mode_samples[i]
order = order_samples[i]
if scale_x != 1.0 or scale_y != 1.0 or translate_x_px != 0 or translate_y_px != 0 or rotate != 0 or shear != 0:
matrix_to_topleft = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
matrix_transforms = tf.AffineTransform(
scale=(scale_x, scale_y),
translation=(translate_x_px, translate_y_px),
rotation=math.radians(rotate),
shear=math.radians(shear)
)
matrix_to_center = tf.SimilarityTransform(translation=[shift_x, shift_y])
matrix = (matrix_to_topleft + matrix_transforms + matrix_to_center)
image_warped = tf.warp(
images[i],
matrix.inverse,
order=order,
mode=mode,
cval=cval,
preserve_range=True
)
# warp changes uint8 to float64, making this necessary
if image_warped.dtype != images[i].dtype:
image_warped = image_warped.astype(images[i].dtype, copy=False)
result[i] = image_warped
else:
result[i] = images[i]
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = []
nb_images = len(keypoints_on_images)
scale_samples, translate_samples, rotate_samples, shear_samples, cval_samples, mode_samples, order_samples = self._draw_samples(nb_images, random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
height, width = keypoints_on_image.height, keypoints_on_image.width
shift_x = width / 2.0 - 0.5
shift_y = height / 2.0 - 0.5
scale_x, scale_y = scale_samples[0][i], scale_samples[1][i]
translate_x, translate_y = translate_samples[0][i], translate_samples[1][i]
#assert isinstance(translate_x, (float, int))
#assert isinstance(translate_y, (float, int))
if ia.is_single_float(translate_y):
translate_y_px = int(round(translate_y * keypoints_on_image.shape[0]))
else:
translate_y_px = translate_y
if ia.is_single_float(translate_x):
translate_x_px = int(round(translate_x * keypoints_on_image.shape[1]))
else:
translate_x_px = translate_x
rotate = rotate_samples[i]
shear = shear_samples[i]
#cval = cval_samples[i]
#mode = mode_samples[i]
#order = order_samples[i]
if scale_x != 1.0 or scale_y != 1.0 or translate_x_px != 0 or translate_y_px != 0 or rotate != 0 or shear != 0:
matrix_to_topleft = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
matrix_transforms = tf.AffineTransform(
scale=(scale_x, scale_y),
translation=(translate_x_px, translate_y_px),
rotation=math.radians(rotate),
shear=math.radians(shear)
)
matrix_to_center = tf.SimilarityTransform(translation=[shift_x, shift_y])
matrix = (matrix_to_topleft + matrix_transforms + matrix_to_center)
coords = keypoints_on_image.get_coords_array()
#print("coords", coords)
#print("matrix", matrix.params)
coords_aug = tf.matrix_transform(coords, matrix.params)
#print("coords before", coords)
#print("coordsa ftre", coords_aug, np.around(coords_aug).astype(np.int32))
result.append(ia.KeypointsOnImage.from_coords_array(np.around(coords_aug).astype(np.int32), shape=keypoints_on_image.shape))
else:
result.append(keypoints_on_image)
return result
def get_parameters(self):
return [self.scale, self.translate, self.rotate, self.shear]
def _draw_samples(self, nb_samples, random_state):
seed = random_state.randint(0, 10**6, 1)[0]
if isinstance(self.scale, tuple):
scale_samples = (
self.scale[0].draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 10)),
self.scale[1].draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 20)),
)
else:
scale_samples = self.scale.draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 30))
scale_samples = (scale_samples, scale_samples)
if isinstance(self.translate, tuple):
translate_samples = (
self.translate[0].draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 40)),
self.translate[1].draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 50)),
)
else:
translate_samples = self.translate.draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 60))
translate_samples = (translate_samples, translate_samples)
assert translate_samples[0].dtype in [np.int32, np.int64, np.float32, np.float64]
assert translate_samples[1].dtype in [np.int32, np.int64, np.float32, np.float64]
rotate_samples = self.rotate.draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 70))
shear_samples = self.shear.draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 80))
cval_samples = self.cval.draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 90))
mode_samples = self.mode.draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 100))
order_samples = self.order.draw_samples((nb_samples,), random_state=ia.new_random_state(seed + 110))
return scale_samples, translate_samples, rotate_samples, shear_samples, cval_samples, mode_samples, order_samples
class PiecewiseAffine(Augmenter):
"""
Augmenter that places a regular grid of points on an image and randomly
moves the neighbourhood of these point around via affine transformations.
This leads to local distortions.
This is mostly a wrapper around around scikit-image's PiecewiseAffine.
See also the Affine augmenter for a similar technique.
Parameters
----------
scale : float or tuple of two floats or StochasticParameter, optional(default=0)
Each point on the regular grid is moved around via a normal
distribution. This scale factor is equivalent to the normal
distribution's sigma. Note that the jitter (how far each point is
moved in which direction) is multiplied by the height/width of the
image, so this scale can be the same for different sized images.
Recommended values are in the range 0.01 to 0.05 (weak to strong
augmentations).
* If a single float, then that value will always be used as the
scale.
* If a tuple (a, b) of floats, then a random value will be picked
from the interval (a, b) (per image).
* If a StochasticParameter, then that parameter will be queried to
draw one value per image.
nb_rows : int or tuple of ints or StochasticParameter, optional(default=4)
Number of rows of points that the regular grid should have.
Must be at least 2. For large images, you might want to pick a
higher value than 4. You might have to then adjust scale to lower
values.
* If a single int, then that value will always be used as the
number of rows.
* If a tuple (a, b), then a value from the discrete interval [a..b]
will be sampled per image.
* If a StochasticParameter, then that parameter will be queried to
draw one value per image.
nb_cols : int or tuple of ints or StochasticParameter, optional(default=4)
Number of columns. See `nb_rows`.
order : int or iterable of int or ia.ALL or StochasticParameter, optional(default=1)
See Affine.__init__().
cval : int or float or tuple of two floats or ia.ALL or StochasticParameter, optional(default=0)
See Affine.__init__().
mode : string or list of string or ia.ALL or StochasticParameter, optional(default="constant")
See Affine.__init__().
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.PiecewiseAffine(scale=(0.01, 0.05))
Puts a grid of points on each image and then randomly moves each point
around by 1 to 5 percent (with respect to the image height/width). Pixels
between these points will be moved accordingly.
>>> aug = iaa.PiecewiseAffine(scale=(0.01, 0.05), nb_rows=8, nb_cols=8)
Same as the previous example, but uses a denser grid of 8x8 points (default
is 4x4). This can be useful for large images.
"""
def __init__(self, scale=0, nb_rows=4, nb_cols=4, order=1, cval=0, mode="constant",
name=None, deterministic=False, random_state=None):
super(PiecewiseAffine, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
if ia.is_single_number(scale):
self.scale = Deterministic(scale)
elif ia.is_iterable(scale):
assert len(scale) == 2, "Expected tuple/list with 2 entries for argument 'scale', got %d entries." % (len(scale),)
self.scale = Uniform(scale[0], scale[1])
elif isinstance(scale, StochasticParameter):
self.scale = scale
else:
raise Exception("Expected float, int, tuple/list with 2 entries or StochasticParameter for argument 'scale'. Got %s." % (type(scale),))
self.jitter = Normal(loc=0, scale=self.scale)
if ia.is_single_number(nb_rows):
assert nb_rows >= 2
self.nb_rows = Deterministic(int(nb_rows))
elif ia.is_iterable(nb_rows):
assert len(nb_rows) == 2, "Expected tuple/list with 2 entries for argument 'nb_rows', got %d entries." % (len(nb_rows),)
assert nb_rows[0] >= 2
assert nb_rows[1] >= 2
self.nb_rows = DiscreteUniform(nb_rows[0], nb_rows[1])
elif isinstance(nb_rows, StochasticParameter):
self.nb_rows = nb_rows
else:
raise Exception("Expected int, tuple of two ints or StochasticParameter as nb_rows, got %s." % (type(nb_rows),))
if ia.is_single_number(nb_cols):
assert nb_cols >= 2
self.nb_cols = Deterministic(int(nb_cols))
elif ia.is_iterable(nb_cols):
assert len(nb_cols) == 2, "Expected tuple/list with 2 entries for argument 'nb_cols', got %d entries." % (len(nb_cols),)
assert nb_cols[0] >= 2
assert nb_cols[1] >= 2
self.nb_cols = DiscreteUniform(nb_cols[0], nb_cols[1])
elif isinstance(nb_cols, StochasticParameter):
self.nb_cols = nb_cols
else:
raise Exception("Expected int, tuple of two ints or StochasticParameter as nb_cols, got %s." % (type(nb_cols),))
# --------------
# order, mode, cval
# TODO these are the same as in class Affine, make DRY
# --------------
# Peformance:
# 1.0x order 0
# 1.5x order 1
# 3.0x order 3
# 30.0x order 4
# 60.0x order 5
# measurement based on 256x256x3 batches, difference is smaller
# on smaller images (seems to grow more like exponentially with image
# size)
if order == ia.ALL:
# self.order = DiscreteUniform(0, 5)
self.order = Choice([0, 1, 3, 4, 5]) # dont use order=2 (bi-quadratic) because that is apparently currently not recommended (and throws a warning)
elif ia.is_single_integer(order):
assert 0 <= order <= 5, "Expected order's integer value to be in range 0 <= x <= 5, got %d." % (order,)
self.order = Deterministic(order)
elif isinstance(order, list):
assert all([ia.is_single_integer(val) for val in order]), "Expected order list to only contain integers, got types %s." % (str([type(val) for val in order]),)
assert all([0 <= val <= 5 for val in order]), "Expected all of order's integer values to be in range 0 <= x <= 5, got %s." % (str(order),)
self.order = Choice(order)
elif isinstance(order, StochasticParameter):
self.order = order
else:
raise Exception("Expected order to be imgaug.ALL, int or StochasticParameter, got %s." % (type(order),))
if cval == ia.ALL:
self.cval = DiscreteUniform(0, 255)
elif ia.is_single_number(cval):
self.cval = Deterministic(cval)
elif ia.is_iterable(cval):
assert len(cval) == 2
assert 0 <= cval[0] <= 255
assert 0 <= cval[1] <= 255
self.cval = Uniform(cval[0], cval[1])
elif isinstance(cval, StochasticParameter):
self.cval = cval
else:
raise Exception("Expected cval to be imgaug.ALL, int, float or StochasticParameter, got %s." % (type(cval),))
# constant, edge, symmetric, reflect, wrap
if mode == ia.ALL:
self.mode = Choice(["constant", "edge", "symmetric", "reflect", "wrap"])
elif ia.is_string(mode):
self.mode = Deterministic(mode)
elif isinstance(mode, list):
assert all([ia.is_string(val) for val in mode])
self.mode = Choice(mode)
elif isinstance(mode, StochasticParameter):
self.mode = mode
else:
raise Exception("Expected mode to be imgaug.ALL, a string, a list of strings or StochasticParameter, got %s." % (type(mode),))
def _augment_images(self, images, random_state, parents, hooks):
result = images
nb_images = len(images)
seeds = ia.copy_random_state(random_state).randint(0, 10**6, (nb_images+1,))
seed = seeds[-1]
nb_rows_samples = self.nb_rows.draw_samples((nb_images,), random_state=ia.new_random_state(seed + 1))
nb_cols_samples = self.nb_cols.draw_samples((nb_images,), random_state=ia.new_random_state(seed + 2))
cval_samples = self.cval.draw_samples((nb_images,), random_state=ia.new_random_state(seed + 3))
mode_samples = self.mode.draw_samples((nb_images,), random_state=ia.new_random_state(seed + 4))
order_samples = self.order.draw_samples((nb_images,), random_state=ia.new_random_state(seed + 5))
for i in sm.xrange(nb_images):
rs_image = ia.new_random_state(seeds[i])
h, w = images[i].shape[0:2]
transformer = self._get_transformer(h, w, nb_rows_samples[i], nb_cols_samples[i], rs_image)
if transformer is not None:
#print("transformer vertices img", transformer._tesselation.vertices)
image_warped = tf.warp(
images[i],
transformer,
order=order_samples[i],
mode=mode_samples[i],
cval=cval_samples[i],
preserve_range=True,
output_shape=images[i].shape
)
# warp changes uint8 to float64, making this necessary
if image_warped.dtype != images[i].dtype:
image_warped = image_warped.astype(images[i].dtype, copy=False)
result[i] = image_warped
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = []
nb_images = len(keypoints_on_images)
seeds = ia.copy_random_state(random_state).randint(0, 10**6, (nb_images+1,))
seed = seeds[-1]
nb_rows_samples = self.nb_rows.draw_samples((nb_images,), random_state=ia.new_random_state(seed + 1))
nb_cols_samples = self.nb_cols.draw_samples((nb_images,), random_state=ia.new_random_state(seed + 2))
for i in sm.xrange(nb_images):
rs_image = ia.new_random_state(seeds[i])
kpsoi = keypoints_on_images[i]
h, w = kpsoi.shape[0:2]
transformer = self._get_transformer(h, w, nb_rows_samples[i], nb_cols_samples[i], rs_image)
if transformer is None or len(kpsoi.keypoints) == 0:
result.append(kpsoi)
else:
#print("transformer vertices kp", transformer._tesselation.vertices)
# Augmentation routine that only modifies keypoint coordinates
# This is efficient (coordinates of all other locations in the
# image are ignored). The code below should usually work, but
# for some reason augmented coordinates are often wildly off
# for large scale parameters (lots of jitter/distortion).
# The reason for that is unknown.
"""
coords = keypoints_on_images[i].get_coords_array()
coords_aug = transformer.inverse(coords)
result.append(
ia.KeypointsOnImage.from_coords_array(
np.around(coords_aug).astype(np.int32),
shape=keypoints_on_images[i].shape
)
)
"""
# Image based augmentation routine. Draws the keypoints on
# the image plane (black and white, only keypoint marked),
# then augments these images, then searches for the new
# (visual) location of the keypoints.
# Much slower than directly augmenting the coordinates, but
# here the only method that reliably works.
kp_image = kpsoi.to_keypoint_image(size=3) # size=1 sometimes leads to dropped/lost keypoints
kp_image_warped = tf.warp(
kp_image,
transformer,
order=1,
preserve_range=True,
output_shape=(kpsoi.shape[0], kpsoi.shape[1], len(kpsoi.keypoints))
)
kps_aug = ia.KeypointsOnImage.from_keypoint_image(
kp_image_warped,
if_not_found_coords={"x": -1, "y": -1}
)
if len(kpsoi.shape) > 2:
kps_aug.shape = (
kps_aug.shape[0],
kps_aug.shape[1],
kpsoi.shape[2]
)
# Keypoints that were outside of the image plane before the
# augmentation will be replaced with (-1, -1) by default (as
# they can't be drawn on the keypoint images). They are now
# replaced by their old coordinates values.
ooi = [not 0 <= kp.x < w or not 0 <= kp.y < h for kp in kpsoi.keypoints]
for kp_idx in sm.xrange(len(kps_aug.keypoints)):
if ooi[kp_idx]:
kp_unaug = kpsoi.keypoints[kp_idx]
kps_aug.keypoints[kp_idx] = kp_unaug
result.append(kps_aug)
return result
def _get_transformer(self, h, w, nb_rows, nb_cols, random_state):
#cell_height = h / self.rows
#cell_width = w / self.cols
#cell_height_h = cell_height / 2
#cell_width_h = cell_width / 2
# get coords on y and x axis of points to move around
# these coordinates are supposed to be at the centers of each cell
# (otherwise the first coordinate would be at (0, 0) and could hardly
# be moved around before leaving the image),
# so we use here (half cell height/width to H/W minus half height/width)
# instead of (0, H/W)
#y = np.linspace(cell_height_h, h - cell_height_h, self.rows)
#x = np.linspace(cell_width_h, w - cell_width_h, self.cols)
nb_rows = max(nb_rows, 2)
nb_cols = max(nb_cols, 2)
y = np.linspace(0, h, nb_rows)
x = np.linspace(0, w, nb_cols)
xx_src, yy_src = np.meshgrid(x, y) # (H, W) and (H, W) for H=rows, W=cols
points_src = np.dstack([yy_src.flat, xx_src.flat])[0] # (1, HW, 2) => (HW, 2) for H=rows, W=cols
#print("nb_rows", nb_rows, "nb_cols", nb_cols, "x", x, "y", y, "xx_src", xx_src.shape, "yy_src", yy_src.shape, "points_src", np.dstack([yy_src.flat, xx_src.flat]).shape)
jitter_img = self.jitter.draw_samples(points_src.shape, random_state=random_state)
nb_nonzero = len(jitter_img.flatten().nonzero()[0])
if nb_nonzero == 0:
return None
else:
jitter_img[:, 0] = jitter_img[:, 0] * h
jitter_img[:, 1] = jitter_img[:, 1] * w
points_dest = np.copy(points_src)
points_dest[:, 0] = points_dest[:, 0] + jitter_img[:, 0]
points_dest[:, 1] = points_dest[:, 1] + jitter_img[:, 1]
# Restrict all destination points to be inside the image plane.
# This is necessary, as otherwise keypoints could be augmented
# outside of the image plane and these would be replaced by
# (-1, -1), which would not conform with the behaviour of the
# other augmenters.
points_dest[:, 0] = np.clip(points_dest[:, 0], 0, h-1)
points_dest[:, 1] = np.clip(points_dest[:, 1], 0, w-1)
#print("points_src", points_src, "points_dest", points_dest)
matrix = tf.PiecewiseAffineTransform()
matrix.estimate(points_src[:, ::-1], points_dest[:, ::-1])
return matrix
def get_parameters(self):
return [self.scale]
class PerspectiveTransform(Augmenter):
"""
Augmenter that performs a random four point perspective transform.
Each of the four points is placed on the image using a random distance from
its respective corner. The distance is sampled from a normal distribution.
As a result, most transformations don't change very much, while some
"focus" on polygons far inside the image.
The results of this augmenter have some similarity with Crop.
Code partially from http://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/ .
Parameters
----------
scale : float or tuple of two floats or StochasticParameter, optional(default=0)
Standard deviation of the normal distributions. These are used to sample
the random distances of the subimage's corners from the full image's
corners. The sampled values reflect percentage values (with respect
to image height/width). Recommended values are in the range 0.0 to 0.1.
* If a single float, then that value will always be used as the
scale.
* If a tuple (a, b) of floats, then a random value will be picked
from the interval (a, b) (per image).
* If a StochasticParameter, then that parameter will be queried to
draw one value per image.
keep_size : bool, optional(default=True)
Whether to resize image's back to their original size after applying
the perspective transform. If set to False, the resulting images
may end up having different shapes and will always be a list, never
an array.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.PerspectiveTransform(scale=(0.01, 0.10))
Applies perspective transformations using a random scale between 0.01 and
0.1 per image, where the scale is roughly a measure of how far the
perspective transform's corner points may be distanced from the original
image's corner points.
"""
def __init__(self, scale=0, keep_size=True, name=None, deterministic=False, random_state=None):
super(PerspectiveTransform, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
if ia.is_single_number(scale):
self.scale = Deterministic(scale)
elif ia.is_iterable(scale):
assert len(scale) == 2, "Expected tuple/list with 2 entries for argument 'scale', got %d entries." % (len(scale),)
self.scale = Uniform(scale[0], scale[1])
elif isinstance(scale, StochasticParameter):
self.scale = scale
else:
raise Exception("Expected float, int, tuple/list with 2 entries or StochasticParameter for argument 'scale'. Got %s." % (type(scale),))
self.jitter = Normal(loc=0, scale=self.scale)
self.keep_size = keep_size
def _augment_images(self, images, random_state, parents, hooks):
result = images
if not self.keep_size:
result = list(result)
matrices, max_heights, max_widths = self._create_matrices(
[image.shape for image in images],
random_state
)
for i, (M, max_height, max_width) in enumerate(zip(matrices, max_heights, max_widths)):
# cv2.warpPerspective only supports <=4 channels
assert images[i].shape[2] <= 4, "PerspectiveTransform is currently limited to images with 4 or less channels."
warped = cv2.warpPerspective(images[i], M, (max_width, max_height))
if warped.ndim == 2 and images[i].ndim == 3:
warped = np.expand_dims(warped, 2)
#print(np.min(warped), np.max(warped), warped.dtype)
if self.keep_size:
h, w = images[i].shape[0:2]
warped = ia.imresize_single_image(warped, (h, w), interpolation="cubic")
result[i] = warped
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = keypoints_on_images
matrices, max_heights, max_widths = self._create_matrices(
[kps.shape for kps in keypoints_on_images],
random_state
)
for i, (M, max_height, max_width) in enumerate(zip(matrices, max_heights, max_widths)):
keypoints_on_image = keypoints_on_images[i]
kps_arr = keypoints_on_image.get_coords_array()
warped = cv2.perspectiveTransform(np.array([kps_arr], dtype=np.float32), M)
warped = warped[0]
warped_kps = ia.KeypointsOnImage.from_coords_array(
np.around(warped, decimals=0).astype(np.int32),
shape=(max_height, max_width)
)
if self.keep_size:
warped_kps = warped_kps.on(keypoints_on_image.shape)
result[i] = warped_kps
return result
def _create_matrices(self, shapes, random_state):
matrices = []
max_heights = []
max_widths = []
nb_images = len(shapes)
seeds = ia.copy_random_state(random_state).randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
h, w = shapes[i][0:2]
points = self.jitter.draw_samples((4, 2), random_state=ia.new_random_state(seeds[i]))
points = np.mod(np.abs(points), 1)
# top left
points[0, 1] = 1.0 - points[0, 1] # h = 1.0 - jitter
# top right
points[1, 0] = 1.0 - points[1, 0] # w = 1.0 - jitter
points[1, 1] = 1.0 - points[1, 1] # h = 1.0 - jitter
# bottom right
points[2, 0] = 1.0 - points[2, 0] # h = 1.0 - jitter
# bottom left
# nothing
points[:, 0] = points[:, 0] * w
points[:, 1] = points[:, 1] * h
# obtain a consistent order of the points and unpack them
# individually
points = self._order_points(points)
(tl, tr, br, bl) = points
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]
], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(points, dst)
matrices.append(M)
max_heights.append(maxHeight)
max_widths.append(maxWidth)
return matrices, max_heights, max_widths
def _order_points(self, pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
pts_ordered = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
pts_ordered[0] = pts[np.argmin(s)]
pts_ordered[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
pts_ordered[1] = pts[np.argmin(diff)]
pts_ordered[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return pts_ordered
def get_parameters(self):
return [self.scale]
# code partially from
# https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
class ElasticTransformation(Augmenter):
"""
Augmenter to transform images by moving pixels locally around using
displacement fields.
See
Simard, Steinkraus and Platt
Best Practices for Convolutional Neural Networks applied to Visual
Document Analysis
in Proc. of the International Conference on Document Analysis and
Recognition, 2003
for a detailed explanation.
Parameters
----------
alpha : float or tuple of two floats or StochasticParameter, optional(default=0)
Strength of the distortion field. Higher values mean more "movement" of
pixels.
* If float, then that value will be used for all images.
* If tuple (a, b), then a random value from range a <= x <= b will be
sampled per image.
* If StochasticParameter, then that parameter will be used to sample
a value per image.
sigma : float or tuple of two floats or StochasticParameter, optional(default=0)
Standard deviation of the gaussian kernel used to smooth the distortion
fields.
* If float, then that value will be used for all images.
* If tuple (a, b), then a random value from range a <= x <= b will be
sampled per image.
* If StochasticParameter, then that parameter will be used to sample
a value per image.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
apply elastic transformations with a strength/alpha of 0.5 and
smoothness of 0.25 to all images.
>>> aug = iaa.ElasticTransformation(alpha=(0.25, 3.0), sigma=0.25)
apply elastic transformations with a strength/alpha that comes
from the range 0.25 <= x <= 3.0 (randomly picked per image) and
smoothness of 0.25.
"""
def __init__(self, alpha=0, sigma=0, name=None, deterministic=False,
random_state=None):
super(ElasticTransformation, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
if ia.is_single_number(alpha):
assert alpha >= 0.0, "Expected alpha to have range [0, inf), got value %.4f." % (alpha,)
self.alpha = Deterministic(alpha)
elif ia.is_iterable(alpha):
assert len(alpha) == 2, "Expected tuple/list with 2 entries, got %d entries." % (len(alpha),)
self.alpha = Uniform(alpha[0], alpha[1])
elif isinstance(alpha, StochasticParameter):
self.alpha = alpha
else:
raise Exception("Expected float or int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(alpha),))
if ia.is_single_number(sigma):
assert sigma >= 0.0, "Expected sigma to have range [0, inf), got value %.4f." % (sigma,)
self.sigma = Deterministic(sigma)
elif ia.is_iterable(sigma):
assert len(sigma) == 2, "Expected tuple/list with 2 entries, got %d entries." % (len(sigma),)
self.sigma = Uniform(sigma[0], sigma[1])
elif isinstance(sigma, StochasticParameter):
self.sigma = sigma
else:
raise Exception("Expected float or int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(sigma),))
def _augment_images(self, images, random_state, parents, hooks):
result = images
nb_images = len(images)
seeds = ia.copy_random_state(random_state).randint(0, 10**6, (nb_images,))
alphas = self.alpha.draw_samples((nb_images,), random_state=ia.copy_random_state(random_state))
sigmas = self.sigma.draw_samples((nb_images,), random_state=ia.copy_random_state(random_state))
for i in sm.xrange(nb_images):
image = images[i]
image_first_channel = np.squeeze(image[..., 0])
indices_x, indices_y = ElasticTransformation.generate_indices(image_first_channel.shape, alpha=alphas[i], sigma=sigmas[i], random_state=ia.new_random_state(seeds[i]))
result[i] = ElasticTransformation.map_coordinates(images[i], indices_x, indices_y)
return result
"""
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
# TODO do keypoints even have to be augmented for elastic transformations?
# TODO this transforms keypoints to images, augments the images, then transforms
# back to keypoints - inefficient and keypoints that get outside of the images
# cannot be recovered
result = []
nb_images = len(keypoints_on_images)
seeds = ia.copy_random_state(random_state).randint(0, 10**6, (nb_images,))
alphas = self.alpha.draw_samples((nb_images,), random_state=ia.copy_random_state(random_state))
sigmas = self.sigma.draw_samples((nb_images,), random_state=ia.copy_random_state(random_state))
for i, keypoints_on_image in enumerate(keypoints_on_images):
indices_x, indices_y = ElasticTransformation.generate_indices(keypoints_on_image.shape[0:2], alpha=alphas[i], sigma=sigmas[i], random_state=ia.new_random_state(seeds[i]))
keypoint_image = keypoints_on_image.to_keypoint_image()
keypoint_image_aug = ElasticTransformation.map_coordinates(keypoint_image, indices_x, indices_y)
keypoints_aug = ia.KeypointsOnImage.from_keypoint_image(keypoint_image_aug)
result.append(keypoints_aug)
return result
"""
# no transformation of keypoints for this currently,
# it seems like this is the more appropiate choice overall for this augmentation
# technique
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.alpha, self.sigma]
@staticmethod
def generate_indices(shape, alpha, sigma, random_state):
assert len(shape) == 2
dx = ndimage.gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = ndimage.gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
return np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1))
@staticmethod
def map_coordinates(image, indices_x, indices_y):
assert len(image.shape) == 3
result = np.copy(image)
height, width = image.shape[0:2]
for c in sm.xrange(image.shape[2]):
remapped_flat = ndimage.interpolation.map_coordinates(image[..., c], (indices_x, indices_y), order=1)
remapped = remapped_flat.reshape((height, width))
result[..., c] = remapped
return result
| nektor211/imgaug | imgaug/augmenters/geometric.py | Python | mit | 60,930 | [
"Gaussian"
] | 52b4106ccf356e7ba001d4d2892b707e8f8c514905fecaf824ba6e0cc7bf074a |
'''
__author__ = 'Brian Chow'
__date__ = '2013-07-29'
4 degree and rare event feature engineering, fit to logistic regression model
'''
from numpy import array, hstack
from sklearn import linear_model
from scipy import sparse, stats
from kaggle import *
import numpy as np
import pandas as pd
SEED = 42
def main():
# Options
load_data_from_file = False
processed_train_data_file = 'train_all.csv'
processed_test_data_file = 'test_all.csv'
train_data = 'train.csv'
test_data = 'test.csv'
preds_file = 'cutoffs_preds.csv'
if not load_data_from_file:
### Load and Process Data ###
train_data = pd.read_csv(train_data)
test_data = pd.read_csv(test_data)
all_data = np.vstack((train_data.ix[:,1:-1], test_data.ix[:,1:-1]))
num_train = np.shape(train_data)[0]
# reencode data using LabelEncoder to "normalize" to 0
all_data = label_reencoder(all_data)
# Transform data
print "Transforming data..."
dp = group_data(all_data, degree=2)
dt = group_data(all_data, degree=3)
d4 = group_data(all_data, degree=4)
y = array(train_data.ACTION)
X = all_data[:num_train]
X_2 = dp[:num_train]
X_3 = dt[:num_train]
X_4 = d4[:num_train]
X_test = all_data[num_train:]
X_test_2 = dp[num_train:]
X_test_3 = dt[num_train:]
X_test_4 = d4[num_train:]
# Compile data
X_train_all = np.hstack((X, X_2, X_3, X_4))
X_test_all = np.hstack((X_test, X_test_2, X_test_3, X_test_4))
num_features = X_train_all.shape[1]
num_test = np.shape(X_test_all)[0]
# Save processed data
np.savetxt("train_all.csv", np.asarray(X_train_all), delimiter = ",")
np.savetxt("test_all.csv", np.asarray(X_test_all), delimiter = ",")
else:
### Load already processed data from previous runs ###
X_train_all = array(pd.read_csv(processed_train_data_file, header = None))
X_test_all = array(pd.read_csv(processed_test_data_file, header = None))
train_data = pd.read_csv(train_data)
y = array(train_data.ACTION)
num_train = np.shape(train_data)[0]
num_test = np.shape(X_test_all)[0]
num_features = X_train_all.shape[1]
### Create index of cutoff data for use in feature selection ###
cutoffs = []
for i in range(num_features):
print 'Creating Index of Cutoffs for Feature ', i
cutoffs.append(create_cutoffs(X_train_all[:,i]))
# # debug
# sum_size = 0
# for i in cutoffs:
# sum_size += np.shape(i)[0]
# print np.shape(i)[0]
# sum_size # = 1058
### One hot encode cutoffs ###
cos = [[] for i in range(num_features)]
# cutoffs are ordered as cos[feature #][cutoff #] and encoded in CSR
for i in range(num_features):
print 'One Hot Encoding Feature', i
cos[i].append([OneHotEncoder(array([cutoffs[i][j]]).T)[0] for j in range(len(cutoffs[i]))])
# gotta remove extraneous lists
for i in range(num_features):
cos[i] = cos[i][0]
### Declaring model ###
model = linear_model.LogisticRegression()
### Calculate good features and cuts ###
good_features, score_hist = greedy_selection_cutoff(cos, y, model)
### get ordered lists of good features and respective cuts ###
ordered_good_cuts = []
ordered_features = []
for i in sorted(score_hist):
ordered_good_cuts.append(i[2])
ordered_features.append(i[1])
good_features = sorted(ordered_features)
### Create sparse matrix of only good features and cuts for hyperparameter selection ###
cos_gf = []
for i in range(len(ordered_features)):
cos_gf.append([OneHotEncoder(array([cutoffs[ordered_features[i]][ordered_good_cuts[i]]]).T)[0]])
for i in range(len(cos_gf)):
cos_gf[i] = cos_gf[i][0]
cos_gfs = sparse.hstack(cos_gf).tocsr()
### Hyperparameter selection for good features and cuts ###
bestC = hyperparameter_selection_2(cos_gfs, y, model)
### Create test matrix with good features and cutoffs ###
train_cutoffs = []
test_cutoffs = []
for i in range(len(ordered_features)):
print 'Feature', i
train_cutoffs.append(create_cutoffs(X_train_all[:,ordered_features[i]], cutoffs = [ordered_good_cuts[i]]))
test_cutoffs.append(create_cutoffs(X_test_all[:,ordered_features[i]], cutoffs = [ordered_good_cuts[i]]))
for i in range(len(train_cutoffs)):
train_cutoffs[i] = train_cutoffs[i][0]
test_cutoffs[i] = test_cutoffs[i][0]
Xt = np.hstack((train_cutoffs, test_cutoffs)).T
Xt, keymap = OneHotEncoder(Xt)
X_train = Xt[:num_train]
X_test = Xt[num_train:]
print "Training full model..."
model.fit(X_train, y)
print "Making prediction and saving results..."
preds = model.predict_proba(X_test)[:,1]
create_test_submission(preds_file, preds)
# scored 0.89912 on Kaggle
if __name__ == "__main__":
main()
| bachow/kaggle-amazon-contest | cutoffs.py | Python | mit | 5,055 | [
"Brian"
] | acfca9c0ab8e3b8b54b07874db80ad508b665a3064612332522ceb9d1a983850 |
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphicalLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphicalLasso setting the sparsity of the model is
set by internal cross-validation in the GraphicalLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphicalLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
# #############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(
n_features, alpha=0.98, smallest_coef=0.4, largest_coef=0.7, random_state=prng
)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
# #############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphicalLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
# #############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [
("Empirical", emp_cov),
("Ledoit-Wolf", lw_cov_),
("GraphicalLassoCV", cov_),
("True", cov),
]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(
this_cov, interpolation="nearest", vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r
)
plt.xticks(())
plt.yticks(())
plt.title("%s covariance" % name)
# plot the precisions
precs = [
("Empirical", linalg.inv(emp_cov)),
("Ledoit-Wolf", lw_prec_),
("GraphicalLasso", prec_),
("True", prec),
]
vmax = 0.9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(
np.ma.masked_equal(this_prec, 0),
interpolation="nearest",
vmin=-vmax,
vmax=vmax,
cmap=plt.cm.RdBu_r,
)
plt.xticks(())
plt.yticks(())
plt.title("%s precision" % name)
if hasattr(ax, "set_facecolor"):
ax.set_facecolor(".7")
else:
ax.set_axis_bgcolor(".7")
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([0.2, 0.15, 0.75, 0.7])
plt.plot(model.cv_results_["alphas"], model.cv_results_["mean_score"], "o-")
plt.axvline(model.alpha_, color=".5")
plt.title("Model selection")
plt.ylabel("Cross-validation score")
plt.xlabel("alpha")
plt.show()
| manhhomienbienthuy/scikit-learn | examples/covariance/plot_sparse_cov.py | Python | bsd-3-clause | 5,149 | [
"Gaussian"
] | 4c2f7c00691ad663733a591fb48cbff40b1a9c0f9e6b32473bf6b459cfdc9fcf |
# Copyright 2017 Andrey Sobolev, Tilde Materials Informatics (Berlin)
# This file is a part of quantum_esperanto project. The project is licensed under the MIT license.
# See the LICENSE file in the project root for license terms.
from setuptools import setup, Extension
from quantum_esperanto import __version__
# check if we have Cython available
try:
from Cython.Build import cythonize
use_cython = True
except ImportError:
use_cython = False
# vasp extension
sources = [
"src/fast_atoi.c",
"src/fast_atof.c"]
if use_cython:
sources.append("src/vasp.pyx")
else:
sources.append("src/vasp.c")
vasp_ext = Extension("quantum_esperanto.vasp",
include_dirs=['include'],
sources=sources)
if use_cython:
exts = cythonize([vasp_ext])
else:
exts = [vasp_ext]
with open("README.rst", "r") as f:
long_description = f.read()
setup(
name='quantum_esperanto',
version=__version__,
author='Andrey Sobolev',
author_email="andrey.n.sobolev@gmail.com",
url="https://github.com/tilde-lab/quantum_esperanto",
license='MIT',
description="A fast parser of XML files output by VASP DFT code written in Cython.",
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
ext_modules=exts,
packages=['quantum_esperanto'],
install_requires=['numpy>=1.10', 'lxml'],
extras_require={'dev': ['Cython', 'nose']},
tests_require=['nose'],
test_suite='nose.collector'
)
| tilde-lab/DFTXMLParser | setup.py | Python | mit | 2,067 | [
"VASP"
] | 47948053db39853cb61bbf0afc022cc85286b576e6cc3dd223a1e6ec220478d9 |
###########################################################
# Demo WPS service for testing and debugging.
#
# See the werkzeug documentation on how to use the debugger:
# http://werkzeug.pocoo.org/docs/0.12/debug/
###########################################################
import os
import psutil
import click
from jinja2 import Environment, PackageLoader
from pywps import configuration
from . import wsgi
from urllib.parse import urlparse
PID_FILE = os.path.abspath(os.path.join(os.path.curdir, "pywps.pid"))
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
template_env = Environment(
loader=PackageLoader('flyingpigeon', 'templates'),
autoescape=True
)
def write_user_config(**kwargs):
config_templ = template_env.get_template('pywps.cfg')
rendered_config = config_templ.render(**kwargs)
config_file = os.path.abspath(os.path.join(os.path.curdir, ".custom.cfg"))
with open(config_file, 'w') as fp:
fp.write(rendered_config)
return config_file
def get_host():
url = configuration.get_config_value('server', 'url')
url = url or 'http://localhost:8093/wps'
click.echo("starting WPS service on {}".format(url))
parsed_url = urlparse(url)
if ':' in parsed_url.netloc:
host, port = parsed_url.netloc.split(':')
port = int(port)
else:
host = parsed_url.netloc
port = 80
return host, port
def run_process_action(action=None):
"""Run an action with psutil on current process
and return a status message."""
action = action or 'status'
try:
with open(PID_FILE, 'r') as fp:
pid = int(fp.read())
p = psutil.Process(pid)
if action == 'stop':
p.terminate()
msg = "pid={}, status=terminated".format(p.pid)
else:
from psutil import _pprint_secs
msg = "pid={}, status={}, created={}".format(
p.pid, p.status(), _pprint_secs(p.create_time()))
if action == 'stop':
os.remove(PID_FILE)
except IOError:
msg = 'No PID file found. Service not running? Try "netstat -nlp | grep :5000".'
except psutil.NoSuchProcess as e:
msg = e.msg
click.echo(msg)
def _run(application, bind_host=None, daemon=False):
from werkzeug.serving import run_simple
# call this *after* app is initialized ... needs pywps config.
host, port = get_host()
bind_host = bind_host or host
# need to serve the wps outputs
static_files = {
'/outputs': configuration.get_config_value('server', 'outputpath')
}
run_simple(
hostname=bind_host,
port=port,
application=application,
use_debugger=False,
use_reloader=False,
threaded=True,
# processes=2,
use_evalex=not daemon,
static_files=static_files)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option()
def cli():
"""Command line to start/stop a PyWPS service.
Do not use this service in a production environment.
It's intended to be running in a test environment only!
For more documentation, visit http://pywps.org/doc
"""
pass
@cli.command()
def status():
"""Show status of PyWPS service"""
run_process_action(action='status')
@cli.command()
def stop():
"""Stop PyWPS service"""
run_process_action(action='stop')
@cli.command()
@click.option('--config', '-c', metavar='PATH', help='path to pywps configuration file.')
@click.option('--bind-host', '-b', metavar='IP-ADDRESS', default='127.0.0.1',
help='IP address used to bind service.')
@click.option('--daemon', '-d', is_flag=True, help='run in daemon mode.')
@click.option('--hostname', metavar='HOSTNAME', default='localhost', help='hostname in PyWPS configuration.')
@click.option('--port', metavar='PORT', default='8093', help='port in PyWPS configuration.')
@click.option('--maxsingleinputsize', default='200mb', help='maxsingleinputsize in PyWPS configuration.')
@click.option('--maxprocesses', metavar='INT', default='10', help='maxprocesses in PyWPS configuration.')
@click.option('--parallelprocesses', metavar='INT', default='2', help='parallelprocesses in PyWPS configuration.')
@click.option('--log-level', metavar='LEVEL', default='INFO', help='log level in PyWPS configuration.')
@click.option('--log-file', metavar='PATH', default='pywps.log', help='log file in PyWPS configuration.')
@click.option('--database', default='sqlite:///pywps-logs.sqlite', help='database in PyWPS configuration')
def start(config, bind_host, daemon, hostname, port,
maxsingleinputsize, maxprocesses, parallelprocesses,
log_level, log_file, database):
"""Start PyWPS service.
This service is by default available at http://localhost:8093/wps
"""
if os.path.exists(PID_FILE):
click.echo('PID file exists: "{}". Service still running?'.format(PID_FILE))
os._exit(0)
cfgfiles = []
cfgfiles.append(write_user_config(
wps_hostname=hostname,
wps_port=port,
wps_maxsingleinputsize=maxsingleinputsize,
wps_maxprocesses=maxprocesses,
wps_parallelprocesses=parallelprocesses,
wps_log_level=log_level,
wps_log_file=log_file,
wps_database=database,
))
if config:
cfgfiles.append(config)
app = wsgi.create_app(cfgfiles)
# let's start the service ...
# See:
# * https://github.com/geopython/pywps-flask/blob/master/demo.py
# * http://werkzeug.pocoo.org/docs/0.14/serving/
if daemon:
# daemon (fork) mode
pid = None
try:
pid = os.fork()
if pid:
click.echo('forked process id: {}'.format(pid))
with open(PID_FILE, 'w') as fp:
fp.write("{}".format(pid))
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
if pid == 0:
os.setsid()
_run(app, bind_host=bind_host, daemon=True)
else:
os._exit(0)
else:
# no daemon
_run(app, bind_host=bind_host)
| bird-house/flyingpigeon | flyingpigeon/cli.py | Python | apache-2.0 | 6,165 | [
"VisIt"
] | 7404a0899c464a638f8984a70cd1c0b31290ad7da2f7841f169de2e787d35618 |
#!/usr/bin/python
import computefarm as cf
from computefarm.farm import depth_first, breadth_first
import random
import logging
import numpy as np
HOUR = 60 * 60
default_queue_properties = {
'grid': { 'num': 0, 'mem': 750, 'avg': HOUR, 'std': 0.6 * HOUR},
'prod': { 'num': 0, 'avg': 8 * HOUR, 'std': 3 * HOUR},
'short': { 'num': 500, 'avg': 1.2 * HOUR, 'std': 600},
'long': { 'num': 500, 'avg': 5 * HOUR, 'std': 2 * HOUR},
'test': { 'num': 0, 'avg': 8 * HOUR, 'cpu': 3},
'mp8': { 'num': 0, 'avg': 6 * HOUR, 'std': 4 * HOUR, 'cpu': 8, 'mem': 6000}
}
def sort_like(array, like):
# All items in @like are picked in order if they exist in the array
for x in like:
if x in array:
yield x
# All the remaining are picked here
for x in sorted(set(array) - set(like)):
yield x
log = logging.getLogger('sim')
class Simulation(object):
def __init__(self, nodes, negotiate_interval=150, stat_freq=10, submit_interval=200):
""" Initialize the farm simulation, attach groups and queues to it and
provide method of submitting jobs of a predetermined size into the
queues.
"""
self.farm = cf.Farm()
# Distribution of farm nodes, e.g. 331/90 is ratio of 24/32 core machines
dist = (
(24, 331),
(32, 90),
(8, 238),
)
self.farm.generate_from_dist(dist, size=nodes)
root = self.setup_groups(cf.Group('<root>'))
self.farm.attach_groups(root)
self._init_stat(stat_freq * 100)
#Default ranking
self.farm.set_negotiatior_rank(depth_first)
self.queue = cf.JobQueue()
self.farm.attach_queue(self.queue)
# How many seconds per negotiation/stat gathering cycle
self.int_stat = stat_freq
self.int_negotiate = negotiate_interval
self.int_submit = submit_interval
self.next_stat = 0
self.next_negotiate = 0
self.next_submit = 0
# How many seconds to simulate each step
self.sec_per_step = 5
# these two _set* knobs are used in callbacks by the GUI
def _set_neg_df(self):
self.farm.set_negotiatior_rank(depth_first)
def _set_neg_bf(self):
self.farm.set_negotiatior_rank(breadth_first)
def _init_stat(self, hist_size):
""" Statistics are kept in a constant-size numpy array that is updated
periodically
"""
self._stat = {}
self._stat_size = hist_size
for x in self.farm.groups.active_groups():
self._stat[x.name] = np.zeros((hist_size), int)
def _update_stat(self):
self.farm.update_usage()
for g in self.farm.groups.active_groups():
# Left-shift entire array back by one, so element n -> element n - 1
self._stat[g.name] = np.roll(self._stat[g.name], -1)
# New last element is current update
self._stat[g.name][-1] = g.usage
def setup_groups(self, root):
""" Reflects current ATLAS group structure:
/- atlas +-- production +-- prod
| | |
| | \-- mp8
| | |
| | \-- test
<root>-+ |
| \-- analysis +-- short
| |
| \-- long
\- grid
"""
root.add_child('atlas')
root.add_child('grid', 3)
root['atlas'].add_child('production')
root['atlas'].add_child('analysis')
root['atlas']['production'].add_child('prod', 40)
root['atlas']['production'].add_child('mp8', 5)
root['atlas']['production'].add_child('test', 7)
root['atlas']['analysis'].add_child('short', 10)
root['atlas']['analysis'].add_child('long', 10)
# Populate with default properties from top of this module
for x in root.walk():
if x.name in default_queue_properties:
x.set_character(**default_queue_properties[x.name])
return root
def add_jobs(self):
""" Submit more jobs into the queue, keeping the total idle jobs where
they should be according to the sliders in the GUI.
"""
for group in self.farm.groups.active_groups():
num_submit = group.num - self.farm.queue.get_group_idle(group.name)
if num_submit <= 0:
continue
log.info("Submitting %d more %s jobs", num_submit, group.name)
for n in xrange(num_submit):
# Job length is random within a Gaussian distribution
length = abs(random.gauss(group.avg, group.std))
# Create job object and add it to queue
job = cf.BatchJob(group=group.name, cpus=group.cpu, memory=group.mem,
length=length)
self.queue.add_job(job)
def step(self, dt):
""" Advance time of the simulation by dt steps at a time, making next
submission/negotiation/statistics-gathering as appropriate
"""
for i in xrange(dt):
self.farm.advance_time(self.sec_per_step)
if self.farm.time > self.next_submit:
self.add_jobs()
self.next_submit = self.farm.time + self.int_submit
if self.farm.time > self.next_negotiate:
self.farm.negotiate_jobs()
self.next_negotiate = self.farm.time + self.int_negotiate
if self.farm.time > self.next_stat:
self._update_stat()
self.next_stat = self.farm.time + self.int_stat
def display_order(self):
sort_order = ('short', 'long', 'test', 'prod', 'mp8')
return list(sort_like(self._stat.keys(), sort_order))
def make_plotdata(self, groups='all'):
x = np.arange(self._stat_size)
if groups == 'all':
y = np.vstack((self._stat[x] for x in self.display_order()))
else:
y = np.vstack((self._stat[x] for x in self.display_order() if x in groups))
return x, y
if __name__ == '__main__':
s = Simulation()
| fubarwrangler/atlassim | simulation.py | Python | gpl-2.0 | 6,264 | [
"Gaussian"
] | 4945da02f5956d923e33e9203a93a635eea72da8aa4adc950a506e4e64214aa4 |
from __future__ import print_function, division
INPUT_LAYERS = ['Data', 'ImageData', 'Input']
# Layers that only support elwise
ELWISE_LAYERS = ['Deconvolution']
# Layers that support parameters
PARAMETER_LAYERS = ['Convolution', 'InnerProduct']+ELWISE_LAYERS
# All supported layers
SUPPORTED_LAYERS = ['ReLU', 'Sigmoid', 'LRN', 'Pooling', 'Eltwise'] + PARAMETER_LAYERS + INPUT_LAYERS
STRIP_LAYER = ['Softmax', 'SoftmaxWithLoss', 'SigmoidCrossEntropyLoss']
# Use 'Dropout' at your own risk
# Unless Jon merges #2865 , 'Split' cannot be supported
UNSUPPORTED_LAYERS = ['Split', 'BatchNorm', 'Reshape']
def forward(net, i, NIT, data, output_names):
n = net._layer_names[i]
# Create the top data if needed
output = {t: [None]*NIT for t in output_names}
for it in range(NIT):
for b in data:
net.blobs[b].data[...] = data[b][it]
net._forward(i, i)
for t in output_names:
output[t][it] = 1*net.blobs[t].data
return output
def flattenData(data):
import numpy as np
return np.concatenate([d.swapaxes(0, 1).reshape((d.shape[1],-1)) for d in data], axis=1).T
def gatherInputData(net, layer_id, bottom_data, top_name, fast=False, max_data=None):
# This functions gathers all input data.
# In order to not replicate all the internal functionality of convolutions (eg. padding ...)
# we gather the data in the output space and use random gaussian weights. The output of this
# function is W and D, there the input data I = D * W^-1 [with some abuse of tensor notation]
# If we not compute an initialization A for D, we then simply multiply A by W to obtain the
# proper initialization in the input space
import numpy as np
l = net.layers[layer_id]
NIT = len(list(bottom_data.values())[0])
# How many times do we need to over-sample to get a full basis (out of random projections)
OS = int(np.ceil( np.prod(l.blobs[0].data.shape[1:]) / l.blobs[0].data.shape[0] ))
if fast: OS = 1
# If we are over sampling we might run out of memory at some point, especially for filters higher up
# Do avoid any issues we never return more than max_data number of elements
subsample = None
# Note this could cause some memory issues in the FC layers
W, D = [], []
for i in range(OS):
d = l.blobs[0].data
d[...] = np.random.normal(0, 1, d.shape)
W.append(1*d)
# Collect the data and flatten out the convs
data = np.concatenate([i.swapaxes(0, 1).reshape((i.shape[1],-1)).T for i in forward(net, layer_id, NIT, bottom_data, [top_name])[top_name]], axis=0)
# Do we need to subsample the data to save memory?
if subsample is None and max_data is not None:
# Randomly select n data representative samples
N = int(max_data / (data.shape[1]*OS))
subsample = np.arange(data.shape[0])
if N < data.shape[0]:
np.random.shuffle(subsample)
subsample = subsample[:N]
if subsample is not None:
data = data[subsample]
D.append(data)
# In order to handle any sort of groups we want to have the samples packed in the following order:
# a1 a2 a3 a4 b1 b2 b3 b4 c1 ... (where the original data was a b c and OS=4)
W, D = np.concatenate([w[:,None] for w in W], axis=1), np.concatenate([d[:,:,None] for d in D], axis=2)
return W.reshape((-1,)+W.shape[2:]), D.reshape((D.shape[0], -1)+D.shape[3:])
def initializeWeight(D, type, N_OUT):
# Here we first whiten the data (PCA or ZCA) and then optionally run k-means
# on this whitened data.
import numpy as np
if D.shape[0] < N_OUT:
print( " Not enough data for '%s' estimation, using elwise"%type )
return np.random.normal(0, 1, (N_OUT,D.shape[1]))
D = D - np.mean(D, axis=0, keepdims=True)
# PCA, ZCA, K-Means
assert type in ['pca', 'zca', 'kmeans', 'rand'], "Unknown initialization type '%s'"%type
C = D.T.dot(D)
s, V = np.linalg.eigh(C)
# order the eigenvalues
ids = np.argsort(s)[-N_OUT:]
s = s[ids]
V = V[:,ids]
s[s<1e-6] = 0
s[s>=1e-6] = 1. / np.sqrt(s[s>=1e-6]+1e-3)
S = np.diag(s)
if type == 'pca':
return S.dot(V.T)
elif type == 'zca':
return V.dot(S.dot(V.T))
# Whiten the data
wD = D.dot(V.dot(S))
wD /= np.linalg.norm(wD, axis=1)[:,None]
if type == 'kmeans':
# Run k-means
from sklearn.cluster import MiniBatchKMeans
km = MiniBatchKMeans(n_clusters = wD.shape[1], batch_size=10*wD.shape[1]).fit(wD).cluster_centers_
elif type == 'rand':
km = wD[np.random.choice(wD.shape[0], wD.shape[1], False)]
C = km.dot(S.dot(V.T))
C /= np.std(D.dot(C.T), axis=0, keepdims=True).T
return C
def initializeLayer(net, layer_id, bottom_data, top_name, bias=0, type='elwise', max_data=None):
import numpy as np
l = net.layers[layer_id]
NIT = len(list(bottom_data.values())[0])
if type!='elwise' and l.type in ELWISE_LAYERS:
print( "Only 'elwise' supported for layer '%s'. Falling back."%net._layer_names[layer_id] )
type = 'elwise'
for p in l.blobs: p.data[...] = 0
fast = 'fast_' in type
if fast:
type = type.replace('fast_', '')
# Initialize the weights [k-means, ...]
if type == 'elwise':
d = l.blobs[0].data
d[...] = np.random.normal(0, 1, d.shape)
else: # Use the input data
# Are there any groups?
G = 1
bottom_names = net.bottom_names[net._layer_names[layer_id]]
if len(bottom_names) == 1:
N1 = net.blobs[bottom_names[0]].shape[1]
N2 = l.blobs[0].shape[1]
G = N1 // N2
# Gather the input data
T, D = gatherInputData(net, layer_id, bottom_data, top_name, fast, max_data=max_data)
# Figure out the output dimensionality of d
d = l.blobs[0].data
# Loop over groups
for g in range(G):
dg, Dg = d[g*(d.shape[0]//G):(g+1)*(d.shape[0]//G)], D[:,g*(D.shape[1]//G):(g+1)*(D.shape[1]//G):]
Tg = T[g*(T.shape[0]//G):(g+1)*(T.shape[0]//G)]
# Compute the weights
W = initializeWeight(Dg, type, N_OUT=dg.shape[0])
# Multiply the weights by the random basis
# NOTE: This matrix multiplication is a bit large, if it's too slow,
# reduce the oversampling in gatherInputData
dg[...] = np.dot(W, Tg.reshape((Tg.shape[0],-1))).reshape(dg.shape)
# Scale the mean and initialize the bias
top_data = forward(net, layer_id, NIT, bottom_data, [top_name])[top_name]
flat_data = flattenData(top_data)
mu = flat_data.mean(axis=0)
std = flat_data.std(axis=0)
if l.type == 'Deconvolution':
l.blobs[0].data[...] /= std.reshape((1,-1,)+(1,)*(len(l.blobs[0].data.shape)-2))
else:
l.blobs[0].data[...] /= std.reshape((-1,)+(1,)*(len(l.blobs[0].data.shape)-1))
for b in l.blobs[1:]:
b.data[...] = -mu / std + bias
def magicInitialize(net, bias=0, NIT=10, type='elwise', max_data=None):
import numpy as np
# When was a blob last used
last_used = {}
# Make sure all layers are supported, and compute the last time each blob is used
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if l.type in UNSUPPORTED_LAYERS:
print( "WARNING: Layer type '%s' not supported! Things might go very wrong..."%l.type )
elif l.type not in SUPPORTED_LAYERS+STRIP_LAYER:
print( "Unknown layer type '%s'. double check if it is supported"%l.type )
for b in net.bottom_names[n]:
last_used[b] = i
active_data = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
# Initialize the layer
if len(l.blobs) > 0:
if np.sum(np.abs(l.blobs[0].data)) <= 1e-10:
print( "Initializing layer '%s'"%n )
assert l.type in PARAMETER_LAYERS, "Unsupported parameter layer"
assert len(net.top_names[n]) == 1, "Exactly one output supported"
# Fill the parameters
initializeLayer(net, i, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n][0], bias, type, max_data=max_data)
else:
print( "Skipping layer '%s'"%n )
# TODO: Estimate and rescale the values [TODO: Record and undo this scaling above]
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
active_data.update(new_data)
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
def load(net, blobs):
for l,n in zip(net.layers, net._layer_names):
if n in blobs:
for b, sb in zip(l.blobs, blobs[n]):
b.data[...] = sb
def save(net):
import numpy as np
r = {}
for l,n in zip(net.layers, net._layer_names):
if len(l.blobs) > 0:
r[n] = [np.copy(b.data) for b in l.blobs]
return r
def estimateHomogenety(net):
# Estimate if a certain layer is homogeneous and if yes return the degree k
# by which the output is scaled (if input is scaled by alpha then the output
# is scaled by alpha^k). Return None if the layer is not homogeneous.
import numpy as np
# When was a blob last used
last_used = {}
# Make sure all layers are supported, and compute the range each blob is used in
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
for b in net.bottom_names[n]:
last_used[b] = i
active_data = {}
homogenety = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
# Run the network forward
new_data1 = forward(net, i, 1, {b: [1*d for d in active_data[b]] for b in net.bottom_names[n]}, net.top_names[n])
new_data2 = forward(net, i, 1, {b: [2*d for d in active_data[b]] for b in net.bottom_names[n]}, net.top_names[n])
active_data.update(new_data1)
if len(new_data1) == 1:
m = list(new_data1.keys())[0]
d1, d2 = flattenData(new_data1[m]), flattenData(new_data2[m])
f = np.mean(np.abs(d1), axis=0) / np.mean(np.abs(d2), axis=0)
if 1e-3*np.mean(f) < np.std(f):
# Not homogeneous
homogenety[n] = None
else:
# Compute the degree of the homogeneous transformation
homogenety[n] = (np.log(np.mean(np.abs(d2))) - np.log(np.mean(np.abs(d1)))) / np.log(2)
else:
homogenety[n] = None
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
return homogenety
def calibrateGradientRatio(net, NIT=1):
import numpy as np
# When was a blob last used
last_used = {}
# Find the last layer to use
last_layer = 0
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if l.type not in STRIP_LAYER:
last_layer = i
for b in net.bottom_names[n]:
last_used[b] = i
# Figure out which tops are involved
last_tops = net.top_names[net._layer_names[last_layer]]
for t in last_tops:
last_used[t] = len(net.layers)
# Call forward and store the data of all data layers
active_data, input_data, bottom_scale = {}, {}, {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if i > last_layer: break
# Compute the input scale for parameter layers
if len(l.blobs) > 0:
bottom_scale[n] = np.mean([np.mean(np.abs(active_data[b])) for b in net.bottom_names[n]])
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
if l.type in INPUT_LAYERS:
input_data.update(new_data)
active_data.update(new_data)
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
output_std = np.mean(np.std(flattenData(active_data[last_tops[0]]), axis=0))
for it in range(10):
# Reset the diffs
for l in net.layers:
for b in l.blobs:
b.diff[...] = 0
# Set the top diffs
for t in last_tops:
net.blobs[t].diff[...] = np.random.normal(0, 1, net.blobs[t].shape)
# Compute all gradients
net._backward(last_layer, 0)
# Compute the gradient ratio
ratio={}
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if len(l.blobs) > 0:
assert l.type in PARAMETER_LAYERS, "Parameter layer '%s' currently not supported"%l.type
b = l.blobs[0]
ratio[n] = np.sqrt(np.mean(b.diff**2) / np.mean(b.data**2))
# If all layers are homogeneous, then the target ratio is the geometric mean of all ratios
# (assuming we want the same output)
# To deal with non-homogeneous layers we scale by output_std in the hope to undo correct the
# estimation over time.
# NOTE: for non feed-forward networks the geometric mean might not be the right scaling factor
target_ratio = np.exp(np.mean(np.log(np.array(list(ratio.values()))))) * (output_std)**(1. / len(ratio))
# Terminate if the relative change is less than 1% for all values
log_ratio = np.log( np.array(list(ratio.values())) )
if np.all( np.abs(log_ratio/np.log(target_ratio) - 1) < 0.01 ):
break
# Update all the weights and biases
active_data = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if i > last_layer: break
# Use the stored input
if l.type in INPUT_LAYERS:
active_data.update({b: input_data[b] for b in net.top_names[n]})
else:
if len(l.blobs) > 0:
# Add the scaling from the bottom to the biases
current_scale = np.mean([np.mean(np.abs(active_data[b])) for b in net.bottom_names[n]])
adj = current_scale / bottom_scale[n]
for b in list(l.blobs)[1:]:
b.data[...] *= adj
bottom_scale[n] = current_scale
# Scale to obtain the target ratio
scale = np.sqrt(ratio[n] / target_ratio)
for b in l.blobs:
b.data[...] *= scale
active_data.update(forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n]))
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
new_output_std = np.mean(np.std(flattenData(active_data[last_tops[0]]), axis=0))
if np.abs(np.log(output_std) - np.log(new_output_std)) > 0.25:
# If we diverge by a factor of exp(0.25) = ~1.3, then we should check if the network is really
# homogeneous
print( "WARNING: It looks like one or more layers are not homogeneous! Trying to correct for this..." )
print( " Output std = %f" % new_output_std )
output_std = new_output_std
def netFromString(s, t=None):
import caffe
from tempfile import NamedTemporaryFile
if t is None: t = caffe.TEST
f = NamedTemporaryFile('w')
f.write(s)
f.flush()
r = caffe.Net(f.name, t)
f.close()
return r
def getFileList(f):
from glob import glob
from os import path
return [f for f in glob(f) if path.isfile(f)]
def main():
from argparse import ArgumentParser
from os import path
import numpy as np
parser = ArgumentParser()
parser.add_argument('prototxt')
parser.add_argument('output_caffemodel')
parser.add_argument('-l', '--load', help='Load a pretrained model and rescale it [bias and type are not supported]')
parser.add_argument('-d', '--data', default=None, help='Image list to use [default prototxt data]')
parser.add_argument('-b', '--bias', type=float, default=0.1, help='Bias')
parser.add_argument('-t', '--type', default='elwise', help='Type: elwise, pca, zca, kmeans, rand (random input patches). Add fast_ to speed up the initialization, but you might lose in precision.')
parser.add_argument('-z', action='store_true', help='Zero all weights and reinitialize')
parser.add_argument('-cs', action='store_true', help='Correct for scaling')
parser.add_argument('-q', action='store_true', help='Quiet execution')
parser.add_argument('-s', type=float, default=1.0, help='Scale the input [only custom data "-d"]')
parser.add_argument('-bs', type=int, default=16, help='Batch size [only custom data "-d"]')
parser.add_argument('-nit', type=int, default=10, help='Number of iterations')
parser.add_argument('--mem-limit', type=int, default=500, help='How much memory should we use for the data buffer (MB)?')
parser.add_argument('--gpu', type=int, default=0, help='What gpu to run it on?')
args = parser.parse_args()
if args.q:
from os import environ
environ['GLOG_minloglevel'] = '2'
import caffe, load
from caffe import NetSpec, layers as L
caffe.set_mode_gpu()
if args.gpu is not None:
caffe.set_device(args.gpu)
if args.data is not None:
model = load.ProtoDesc(args.prototxt)
net = NetSpec()
fl = getFileList(args.data)
if len(fl) == 0:
print("Unknown data type for '%s'"%args.data)
exit(1)
from tempfile import NamedTemporaryFile
f = NamedTemporaryFile('w')
f.write('\n'.join([path.abspath(i)+' 0' for i in fl]))
f.flush()
net.data, net.label = L.ImageData(source=f.name, batch_size=args.bs, new_width=model.input_dim[-1], new_height=model.input_dim[-1], transform_param=dict(mean_value=[104,117,123], scale=args.s),ntop=2)
net.out = model(data=net.data, label=net.label)
n = netFromString('force_backward:true\n'+str(net.to_proto()), caffe.TRAIN )
else:
n = caffe.Net(args.prototxt, caffe.TRAIN)
if args.load is not None:
n.copy_from(args.load)
# Rescale existing layers?
#if args.fix:
#magicFix(n, args.nit)
if args.z:
# Zero out all layers
for l in n.layers:
for b in l.blobs:
b.data[...] = 0
if any([np.abs(l.blobs[0].data).sum() < 1e-10 for l in n.layers if len(l.blobs) > 0]):
print( [m for l,m in zip(n.layers, n._layer_names) if len(l.blobs) > 0 and np.abs(l.blobs[0].data).sum() < 1e-10] )
magicInitialize(n, args.bias, NIT=args.nit, type=args.type, max_data=args.mem_limit*1024*1024/4)
else:
print( "Network already initialized, skipping magic init" )
if args.cs:
# A simply helper function that lets you figure out which layers are not
# homogeneous
#print( estimateHomogenety(n) )
calibrateGradientRatio(n)
n.save(args.output_caffemodel)
if __name__ == "__main__":
main()
| philkr/magic_init | magic_init.py | Python | bsd-2-clause | 17,395 | [
"Gaussian"
] | e675c6ca91ec19690804270e2c5c7d5272747719c62b66004fbca1d46b32c569 |
# Merge active and previous version's generated next major version candidate
# shadow. This involve simultaneously traversing both FileDescriptorProtos and:
# 1. Recovering hidden_envoy_depreacted_* fields and enum values in active proto.
# 2. Recovering deprecated (sub)message types.
# 3. Misc. fixups for oneof metadata and reserved ranges/names.
from collections import defaultdict
import copy
import pathlib
import sys
from tools.api_proto_plugin import type_context as api_type_context
from google.protobuf import descriptor_pb2
from google.protobuf import text_format
# Note: we have to include those proto definitions for text_format sanity.
from google.api import annotations_pb2 as _
from validate import validate_pb2 as _
from envoy.annotations import deprecation_pb2 as _
from envoy.annotations import resource_pb2 as _
from udpa.annotations import migrate_pb2 as _
from udpa.annotations import security_pb2 as _
from udpa.annotations import sensitive_pb2 as _
from udpa.annotations import status_pb2 as _
from udpa.annotations import versioning_pb2 as _
# Set reserved_range in target_proto to reflect previous_reserved_range skipping
# skip_reserved_numbers.
def AdjustReservedRange(target_proto, previous_reserved_range, skip_reserved_numbers):
del target_proto.reserved_range[:]
for rr in previous_reserved_range:
# We can only handle singleton ranges today.
assert ((rr.start == rr.end) or (rr.end == rr.start + 1))
if rr.start not in skip_reserved_numbers:
target_proto.reserved_range.add().MergeFrom(rr)
# Merge active/shadow EnumDescriptorProtos to a fresh target EnumDescriptorProto.
def MergeActiveShadowEnum(active_proto, shadow_proto, target_proto):
target_proto.MergeFrom(active_proto)
if not shadow_proto:
return
shadow_values = {v.name: v for v in shadow_proto.value}
skip_reserved_numbers = []
# For every reserved name, check to see if it's in the shadow, and if so,
# reintroduce in target_proto.
del target_proto.reserved_name[:]
for n in active_proto.reserved_name:
hidden_n = 'hidden_envoy_deprecated_' + n
if hidden_n in shadow_values:
v = shadow_values[hidden_n]
skip_reserved_numbers.append(v.number)
target_proto.value.add().MergeFrom(v)
else:
target_proto.reserved_name.append(n)
AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers)
# Special fixup for deprecation of default enum values.
for tv in target_proto.value:
if tv.name == 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE':
for sv in shadow_proto.value:
if sv.number == tv.number:
assert (sv.number == 0)
tv.CopyFrom(sv)
# Adjust source code info comments path to reflect insertions of oneof fields
# inside the middle of an existing collection of fields.
def AdjustSourceCodeInfo(type_context, field_index, field_adjustment):
def HasPathPrefix(s, t):
return len(s) <= len(t) and all(p[0] == p[1] for p in zip(s, t))
for loc in type_context.source_code_info.proto.location:
if HasPathPrefix(type_context.path + [2], loc.path):
path_field_index = len(type_context.path) + 1
if path_field_index < len(loc.path) and loc.path[path_field_index] >= field_index:
loc.path[path_field_index] += field_adjustment
# Merge active/shadow DescriptorProtos to a fresh target DescriptorProto.
def MergeActiveShadowMessage(type_context, active_proto, shadow_proto, target_proto):
target_proto.MergeFrom(active_proto)
if not shadow_proto:
return
shadow_fields = {f.name: f for f in shadow_proto.field}
skip_reserved_numbers = []
# For every reserved name, check to see if it's in the shadow, and if so,
# reintroduce in target_proto. We track both the normal fields we need to add
# back in (extra_simple_fields) and those that belong to oneofs
# (extra_oneof_fields). The latter require special treatment, as we can't just
# append them to the end of the message, they need to be reordered.
extra_simple_fields = []
extra_oneof_fields = defaultdict(list) # oneof index -> list of fields
del target_proto.reserved_name[:]
for n in active_proto.reserved_name:
hidden_n = 'hidden_envoy_deprecated_' + n
if hidden_n in shadow_fields:
f = shadow_fields[hidden_n]
skip_reserved_numbers.append(f.number)
missing_field = copy.deepcopy(f)
# oneof fields from the shadow need to have their index set to the
# corresponding index in active/target_proto.
if missing_field.HasField('oneof_index'):
oneof_name = shadow_proto.oneof_decl[missing_field.oneof_index].name
missing_oneof_index = None
for oneof_index, oneof_decl in enumerate(target_proto.oneof_decl):
if oneof_decl.name == oneof_name:
missing_oneof_index = oneof_index
if missing_oneof_index is None:
missing_oneof_index = len(target_proto.oneof_decl)
target_proto.oneof_decl.add().MergeFrom(
shadow_proto.oneof_decl[missing_field.oneof_index])
missing_field.oneof_index = missing_oneof_index
extra_oneof_fields[missing_oneof_index].append(missing_field)
else:
extra_simple_fields.append(missing_field)
else:
target_proto.reserved_name.append(n)
# Copy existing fields, as we need to nuke them.
existing_fields = copy.deepcopy(target_proto.field)
del target_proto.field[:]
# Rebuild fields, taking into account extra_oneof_fields. protoprint.py
# expects that oneof fields are consecutive, so need to sort for this.
current_oneof_index = None
def AppendExtraOneofFields(current_oneof_index, last_oneof_field_index):
# Add fields from extra_oneof_fields for current_oneof_index.
for oneof_f in extra_oneof_fields[current_oneof_index]:
target_proto.field.add().MergeFrom(oneof_f)
field_adjustment = len(extra_oneof_fields[current_oneof_index])
# Fixup the comments in source code info. Note that this is really
# inefficient, O(N^2) in the worst case, but since we have relatively few
# deprecated fields, is the easiest to implement method.
if last_oneof_field_index is not None:
AdjustSourceCodeInfo(type_context, last_oneof_field_index, field_adjustment)
del extra_oneof_fields[current_oneof_index]
return field_adjustment
field_index = 0
for f in existing_fields:
if current_oneof_index is not None:
field_oneof_index = f.oneof_index if f.HasField('oneof_index') else None
# Are we exiting the oneof? If so, add the respective extra_one_fields.
if field_oneof_index != current_oneof_index:
field_index += AppendExtraOneofFields(current_oneof_index, field_index)
current_oneof_index = field_oneof_index
elif f.HasField('oneof_index'):
current_oneof_index = f.oneof_index
target_proto.field.add().MergeFrom(f)
field_index += 1
if current_oneof_index is not None:
# No need to adjust source code info here, since there are no comments for
# trailing deprecated fields, so just set field index to None.
AppendExtraOneofFields(current_oneof_index, None)
# Non-oneof fields are easy to treat, we just append them to the existing
# fields. They don't get any comments, but that's fine in the generated
# shadows.
for f in extra_simple_fields:
target_proto.field.add().MergeFrom(f)
for oneof_index in sorted(extra_oneof_fields.keys()):
for f in extra_oneof_fields[oneof_index]:
target_proto.field.add().MergeFrom(f)
# Same is true for oneofs that are exclusively from the shadow.
AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers)
# Visit nested message types
del target_proto.nested_type[:]
shadow_msgs = {msg.name: msg for msg in shadow_proto.nested_type}
for index, msg in enumerate(active_proto.nested_type):
MergeActiveShadowMessage(
type_context.ExtendNestedMessage(index, msg.name, msg.options.deprecated), msg,
shadow_msgs.get(msg.name), target_proto.nested_type.add())
# Visit nested enum types
del target_proto.enum_type[:]
shadow_enums = {msg.name: msg for msg in shadow_proto.enum_type}
for enum in active_proto.enum_type:
MergeActiveShadowEnum(enum, shadow_enums.get(enum.name), target_proto.enum_type.add())
# Ensure target has any deprecated sub-message types in case they are needed.
active_msg_names = set([msg.name for msg in active_proto.nested_type])
for msg in shadow_proto.nested_type:
if msg.name not in active_msg_names:
target_proto.nested_type.add().MergeFrom(msg)
# Merge active/shadow FileDescriptorProtos, returning a the resulting FileDescriptorProto.
def MergeActiveShadowFile(active_file_proto, shadow_file_proto):
target_file_proto = copy.deepcopy(active_file_proto)
source_code_info = api_type_context.SourceCodeInfo(target_file_proto.name,
target_file_proto.source_code_info)
package_type_context = api_type_context.TypeContext(source_code_info, target_file_proto.package)
# Visit message types
del target_file_proto.message_type[:]
shadow_msgs = {msg.name: msg for msg in shadow_file_proto.message_type}
for index, msg in enumerate(active_file_proto.message_type):
MergeActiveShadowMessage(
package_type_context.ExtendMessage(index, msg.name, msg.options.deprecated), msg,
shadow_msgs.get(msg.name), target_file_proto.message_type.add())
# Visit enum types
del target_file_proto.enum_type[:]
shadow_enums = {msg.name: msg for msg in shadow_file_proto.enum_type}
for enum in active_file_proto.enum_type:
MergeActiveShadowEnum(enum, shadow_enums.get(enum.name), target_file_proto.enum_type.add())
# Ensure target has any deprecated message types in case they are needed.
active_msg_names = set([msg.name for msg in active_file_proto.message_type])
for msg in shadow_file_proto.message_type:
if msg.name not in active_msg_names:
target_file_proto.message_type.add().MergeFrom(msg)
return target_file_proto
if __name__ == '__main__':
active_src, shadow_src, dst = sys.argv[1:]
active_proto = descriptor_pb2.FileDescriptorProto()
text_format.Merge(pathlib.Path(active_src).read_text(), active_proto)
shadow_proto = descriptor_pb2.FileDescriptorProto()
text_format.Merge(pathlib.Path(shadow_src).read_text(), shadow_proto)
pathlib.Path(dst).write_text(str(MergeActiveShadowFile(active_proto, shadow_proto)))
| lizan/envoy | tools/protoxform/merge_active_shadow.py | Python | apache-2.0 | 10,444 | [
"VisIt"
] | b711c4f88628dd4286844fd0b1b84b4f07034ee81aaa74828eb49798110f4667 |
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
DEBUG = True
SECRET_KEY = '5hvhpe6gv2t5x4$3dtq(w2v#vg@)sx4p3r_@wv%l41g!stslc*'
INSTALLED_APPS = [
'tldap.django',
]
| brianmay/python-tldap | docs/settings.py | Python | gpl-3.0 | 828 | [
"Brian"
] | 513d8142f2d22ab3a6fc0df484710b0b0d96b28c64406391d0ec4a0e59fe11e6 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Verifies that all source files contain the necessary copyright boilerplate
# snippet.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
parser.add_argument("--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "verify/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
args = parser.parse_args()
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh" or extension == "py":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'vendor', '__init__.py']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell/python scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
if __name__ == "__main__":
exit_code = 0
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
nonconforming_files = []
for filename in filenames:
if not file_passes(filename, refs, regexs):
nonconforming_files.append(filename)
if nonconforming_files:
print('%d files have incorrect boilerplate headers:' %
len(nonconforming_files))
for filename in sorted(nonconforming_files):
print(filename)
sys.exit(1)
| girishkalele/test-infra | verify/verify-boilerplate.py | Python | apache-2.0 | 5,549 | [
"VisIt"
] | 63423875b8eafdec0d4061304dc5a5ef3e9e28e8cfef26afc70652d19f087a12 |
import numpy as np
from ase.atoms import Atom, Atoms
from ase.calculators.singlepoint import SinglePointDFTCalculator
from ase.calculators.singlepoint import SinglePointKPoint
def read_gpaw_text(fileobj, index=-1):
if isinstance(fileobj, str):
fileobj = open(fileobj, 'rU')
notfound = []
def index_startswith(lines, string):
if string in notfound:
raise ValueError
for i, line in enumerate(lines):
if line.startswith(string):
return i
notfound.append(string)
raise ValueError
lines = fileobj.readlines()
images = []
while True:
try:
i = lines.index('Unit Cell:\n')
except ValueError:
pass
else:
cell = []
pbc = []
for line in lines[i + 3:i + 6]:
words = line.split()
if len(words) == 5: # old format
cell.append(float(words[2]))
pbc.append(words[1] == 'yes')
else: # new format with GUC
cell.append([float(word) for word in words[3:6]])
pbc.append(words[2] == 'yes')
try:
i = lines.index('Positions:\n')
except ValueError:
break
symbols = []
positions = []
for line in lines[i + 1:]:
words = line.split()
if len(words) != 5:
break
n, symbol, x, y, z = words
symbols.append(symbol.split('.')[0])
positions.append([float(x), float(y), float(z)])
if len(symbols):
atoms = Atoms(symbols=symbols, positions=positions, cell=cell, pbc=pbc)
else:
atoms = Atoms(cell=cell, pbc=pbc)
lines = lines[i + 5:]
ene = {
# key position
'Kinetic:' : 1,
'Potential:' : 2,
'XC:' : 4,
}
try:
i = lines.index('-------------------------\n')
except ValueError:
e = None
else:
for key in ene:
pos = ene[key]
ene[key] = None
line = lines[i + pos]
try:
assert line.startswith(key)
ene[key] = float(line.split()[-1])
except ValueError:
pass
line = lines[i + 9]
assert line.startswith('Zero Kelvin:')
e = float(line.split()[-1])
try:
ii = index_startswith(lines, 'Fermi Level')
except ValueError:
eFermi = None
else:
try:
eFermi = float(lines[ii].split()[2])
except ValueError: # we have two Fermi levels
fields = lines[ii].split()
def strip(string):
for rubbish in '[],':
string = string.replace(rubbish, '')
return string
eFermi = [float(strip(fields[2])),
float(strip(fields[3])) ]
# read Eigenvalues and occupations
ii1 = ii2 = 1e32
try:
ii1 = index_startswith(lines, ' Band Eigenvalues Occupancy')
except ValueError:
pass
try:
ii2 = index_startswith(lines, ' Band Eigenvalues Occupancy')
except ValueError:
pass
ii = min(ii1, ii2)
if ii == 1e32:
kpts = None
else:
ii += 1
words = lines[ii].split()
vals = []
while(len(words) > 2):
vals.append([float(word) for word in words])
ii += 1
words = lines[ii].split()
vals = np.array(vals).transpose()
kpts = [SinglePointKPoint(1, 0, 0)]
kpts[0].eps_n = vals[1]
kpts[0].f_n = vals[2]
if vals.shape[0] > 3:
kpts.append(SinglePointKPoint(1, 0, 1))
kpts[1].eps_n = vals[3]
kpts[1].f_n = vals[4]
# read charge
try:
ii = index_startswith(lines, 'Total Charge:')
except ValueError:
q = None
else:
q = float(lines[ii].split()[2])
# read dipole moment
try:
ii = index_startswith(lines, 'Dipole Moment:')
except ValueError:
dipole = None
else:
line = lines[ii].replace(']', '').replace('[', '')
dipole = np.array([float(c) for c in line.split()[-3:]])
try:
ii = index_startswith(lines, 'Local Magnetic Moments')
except ValueError:
magmoms = None
else:
magmoms = []
for i in range(ii + 1, ii + 1 + len(atoms)):
iii, magmom = lines[i].split()[:2]
magmoms.append(float(magmom))
try:
ii = lines.index('Forces in eV/Ang:\n')
except ValueError:
f = None
else:
f = []
for i in range(ii + 1, ii + 1 + len(atoms)):
try:
x, y, z = lines[i].split()[-3:]
f.append((float(x), float(y), float(z)))
except (ValueError, IndexError), m:
raise IOError('Malformed GPAW log file: %s' % m)
if len(images) > 0 and e is None:
break
if e is not None or f is not None:
calc = SinglePointDFTCalculator(atoms, energy=e, forces=f,
dipole=dipole, magmoms=magmoms,
eFermi=eFermi)
if kpts is not None:
calc.kpts = kpts
atoms.set_calculator(calc)
if q is not None and len(atoms) > 0:
n = len(atoms)
atoms.set_initial_charges([q / n] * n)
images.append(atoms)
lines = lines[i:]
if len(images) == 0:
raise IOError('Corrupted GPAW-text file!')
return images[index]
| grhawk/ASE | tools/ase/io/gpawtext.py | Python | gpl-2.0 | 6,114 | [
"ASE",
"GPAW"
] | 6c8454043c3e2574faa6ec097aaf2456f51d02d0db1337f6eff625f7d3d8cbaa |
# -*- coding: utf-8 -*-
"""This module tests various ways how to set up the provisioning using the provisioning dialog."""
from datetime import datetime, timedelta
import fauxfactory
import pytest
import re
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.base.login import BaseLoggedInPage
from cfme.common.provider import cleanup_vm
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.infrastructure.virtual_machines import Vm
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.wait import wait_for, TimedOutError
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.long_running,
test_requirements.provision,
pytest.mark.tier(3),
pytest.mark.provider([InfraProvider],
required_fields=[['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']],
scope="module"),
]
@pytest.fixture(scope="function")
def vm_name():
vm_name = random_vm_name('provd')
return vm_name
@pytest.fixture(scope="function")
def prov_data(provisioning, provider):
data = {
'request': {
'email': "{}@{}.test".format(fauxfactory.gen_alphanumeric(),
fauxfactory.gen_alphanumeric()),
'first_name': fauxfactory.gen_alphanumeric(),
'last_name': fauxfactory.gen_alphanumeric(),
'manager_name': '{} {}'.format(fauxfactory.gen_alphanumeric(),
fauxfactory.gen_alphanumeric())},
'network': {'vlan': partial_match(provisioning.get('vlan'))},
'environment': {'datastore_name': {'name': provisioning['datastore']},
'host_name': {'name': provisioning['host']}},
'catalog': {},
'hardware': {},
'schedule': {},
'purpose': {},
}
if provider.one_of(RHEVMProvider):
data['catalog']['provision_type'] = 'Native Clone'
elif provider.one_of(VMwareProvider):
data['catalog']['provision_type'] = 'VMware'
# Otherwise just leave it alone
return data
@pytest.fixture(scope="function")
def provisioner(appliance, request, setup_provider, provider, vm_name):
def _provisioner(template, provisioning_data, delayed=None):
vm = Vm(name=vm_name, provider=provider, template_name=template)
view = navigate_to(vm, 'Provision')
view.form.fill_with(provisioning_data, on_change=view.form.submit_button)
base_view = vm.appliance.browser.create_view(BaseLoggedInPage)
base_view.flash.assert_no_error()
request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
request_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
provision_request = appliance.collections.requests.instantiate(
description=request_description)
if delayed is not None:
total_seconds = (delayed - datetime.utcnow()).total_seconds()
try:
wait_for(provision_request.is_finished,
fail_func=provision_request.update, num_sec=total_seconds, delay=5)
pytest.fail("The provisioning was not postponed")
except TimedOutError:
pass
logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
wait_for(
provider.mgmt.does_vm_exist, [vm_name],
fail_func=provider.refresh_provider_relationships,
handle_exception=True, num_sec=600
)
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for vm %s', vm_name)
provision_request.wait_for_request()
assert provision_request.is_succeeded(method='ui')
return vm
return _provisioner
def test_change_cpu_ram(provisioner, soft_assert, provider, prov_data, vm_name):
""" Tests change RAM and CPU in provisioning dialog.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set number of CPUs and amount of RAM.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM. The summary should state correct values for CPU&RAM.
Metadata:
test_flag: provision
"""
prov_data['catalog']["vm_name"] = vm_name
prov_data['hardware']["num_sockets"] = "4"
prov_data['hardware']["cores_per_socket"] = "1" if not provider.one_of(SCVMMProvider) else None
prov_data['hardware']["memory"] = "2048"
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
data = vm.get_detail(properties=("Properties", "Container")).strip()
# No longer possible to use version pick because of cherrypicking?
regexes = map(re.compile, [
r"^[^(]*(\d+) CPUs?.*, ([^)]+)[^)]*$",
r"^[^(]*\((\d+) CPUs?, ([^)]+)\)[^)]*$",
r"^.*?(\d+) CPUs? .*?(\d+ MB)$"])
for regex in regexes:
match = regex.match(data)
if match is not None:
num_cpus, memory = match.groups()
break
else:
raise ValueError("Could not parse string {}".format(repr(data)))
soft_assert(num_cpus == "4", "num_cpus should be {}, is {}".format("4", num_cpus))
soft_assert(memory == "2048 MB", "memory should be {}, is {}".format("2048 MB", memory))
# Special parametrization in testgen above
@pytest.mark.meta(blockers=[1209847, 1380782])
@pytest.mark.parametrize("disk_format", ["Thin", "Thick", "Preallocated"])
@pytest.mark.uncollectif(lambda provider, disk_format:
(provider.one_of(RHEVMProvider) and disk_format == "Thick") or
(not provider.one_of(RHEVMProvider) and disk_format == "Preallocated") or
# Temporarily, our storage domain cannot handle Preallocated disks
(provider.one_of(RHEVMProvider) and disk_format == "Preallocated") or
(provider.one_of(SCVMMProvider)) or
(provider.key == "vsphere55" and disk_format == "Thick"))
def test_disk_format_select(provisioner, disk_format, provider, prov_data, vm_name):
""" Tests disk format selection in provisioning dialog.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set the disk format to be thick or thin.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM.
* The ``Thin Provisioning Used`` field should state true of false according to the selection
Metadata:
test_flag: provision
"""
prov_data['catalog']['vm_name'] = vm_name
prov_data['hardware']["disk_format"] = disk_format
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
view = navigate_to(vm, 'Details')
thin = view.entities.datastore_allocation.get_text_of('Thin Provisioning Used').strip().lower()
vm.load_details(refresh=True)
if disk_format == "Thin":
assert thin == 'true', "The disk format should be Thin"
else:
assert thin != 'true', "The disk format should not be Thin"
@pytest.mark.parametrize("started", [True, False])
def test_power_on_or_off_after_provision(provisioner, prov_data, provider, started, vm_name):
""" Tests setting the desired power state after provisioning.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set whether you want or not the VM to be
powered on after provisioning.
* Submit the provisioning request and wait for it to finish.
* The VM should become steady in the desired VM power state.
Metadata:
test_flag: provision
"""
prov_data['catalog']['vm_name'] = vm_name
prov_data['schedule']["power_on"] = started
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data)
wait_for(
lambda: provider.mgmt.does_vm_exist(vm_name) and
(provider.mgmt.is_vm_running if started else provider.mgmt.is_vm_stopped)(vm_name),
num_sec=240, delay=5
)
def test_tag(provisioner, prov_data, provider, vm_name):
""" Tests tagging VMs using provisioning dialogs.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit th page of VM, it should display the selected tags
Metadata:
test_flag: provision
"""
prov_data['catalog']['vm_name'] = vm_name
prov_data['purpose']["apply_tags"] = ("Service Level *", "Gold")
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
tags = vm.get_tags()
assert any(
tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in tags
), "Service Level: Gold not in tags ({})".format(tags)
@pytest.mark.meta(blockers=[1204115])
def test_provisioning_schedule(provisioner, provider, prov_data, vm_name):
""" Tests provision scheduling.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set a scheduled provision and pick a time.
* Submit the provisioning request, it should not start before the scheduled time.
Metadata:
test_flag: provision
"""
now = datetime.utcnow()
prov_data['catalog']['vm_name'] = vm_name
prov_data['schedule']["schedule_type"] = "Schedule"
prov_data['schedule']["provision_date"] = now.strftime("%m/%d/%Y")
STEP = 5
minutes_diff = (STEP - (now.minute % STEP))
# To have some gap for automation
if minutes_diff <= 3:
minutes_diff += 5
provision_time = timedelta(minutes=minutes_diff) + now
prov_data['schedule']["provision_start_hour"] = str(provision_time.hour)
prov_data['schedule']["provision_start_min"] = str(provision_time.minute)
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data, delayed=provision_time)
| quarckster/cfme_tests | cfme/tests/infrastructure/test_provisioning_dialog.py | Python | gpl-2.0 | 11,178 | [
"VisIt"
] | d7ae754cd3577758b25ef3f4887795284e40f671da4d470bf601e7b247ada866 |
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for BLAST+ tab output format, with or without comments."""
import re
from Bio._py3k import _as_bytes, _bytes_to_string
from Bio._py3k import basestring
from Bio.SearchIO._index import SearchIndexer
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
__all__ = ['BlastTabIndexer', 'BlastTabParser', 'BlastTabWriter']
__docformat__ = "restructuredtext en"
# longname-shortname map
# maps the column names shown in a commented output to its short name
# (the one used in the command line)
_LONG_SHORT_MAP = {
'query id': 'qseqid',
'query acc.': 'qacc',
'query acc.ver': 'qaccver',
'query length': 'qlen',
'subject id': 'sseqid',
'subject acc.': 'sacc',
'subject acc.ver': 'saccver',
'subject length': 'slen',
'alignment length': 'length',
'bit score': 'bitscore',
'score': 'score',
'evalue': 'evalue',
'identical': 'nident',
'% identity': 'pident',
'positives': 'positive',
'% positives': 'ppos',
'mismatches': 'mismatch',
'gaps': 'gaps',
'q. start': 'qstart',
'q. end': 'qend',
's. start': 'sstart',
's. end': 'send',
'query frame': 'qframe',
'sbjct frame': 'sframe',
'query/sbjct frames': 'frames',
'query seq': 'qseq',
'subject seq': 'sseq',
'gap opens': 'gapopen',
'query gi': 'qgi',
'subject ids': 'sallseqid',
'subject gi': 'sgi',
'subject gis': 'sallgi',
'BTOP': 'btop',
'subject accs.': 'sallacc',
'subject tax ids': 'staxids',
'subject sci names': 'sscinames',
'subject com names': 'scomnames',
'subject blast names': 'sblastnames',
'subject super kingdoms': 'sskingdoms',
'subject title': 'stitle',
'subject titles': 'salltitles',
'subject strand': 'sstrand',
'% subject coverage': 'qcovs',
'% hsp coverage': 'qcovhsp',
}
# function to create a list from semicolon-delimited string
# used in BlastTabParser._parse_result_row
_list_semicol = lambda x: x.split(';')
_list_diamond = lambda x: x.split('<>')
# column to class attribute map
_COLUMN_QRESULT = {
'qseqid': ('id', str),
'qacc': ('accession', str),
'qaccver': ('accession_version', str),
'qlen': ('seq_len', int),
'qgi': ('gi', str),
}
_COLUMN_HIT = {
'sseqid': ('id', str),
'sallseqid': ('id_all', _list_semicol),
'sacc': ('accession', str),
'saccver': ('accession_version', str),
'sallacc': ('accession_all', _list_semicol),
'sgi': ('gi', str),
'sallgi': ('gi_all', str),
'slen': ('seq_len', int),
'staxids': ('tax_ids', _list_semicol),
'sscinames': ('sci_names', _list_semicol),
'scomnames': ('com_names', _list_semicol),
'sblastnames': ('blast_names', _list_semicol),
'sskingdoms': ('super_kingdoms', _list_semicol),
'stitle': ('title', str),
'salltitles': ('title_all', _list_diamond),
# set strand as HSP property?
'sstrand': ('strand', str),
'qcovs': ('query_coverage', float),
}
_COLUMN_HSP = {
'bitscore': ('bitscore', float),
'score': ('bitscore_raw', int),
'evalue': ('evalue', float),
'nident': ('ident_num', int),
'pident': ('ident_pct', float),
'positive': ('pos_num', int),
'ppos': ('pos_pct', float),
'mismatch': ('mismatch_num', int),
'gaps': ('gap_num', int),
'gapopen': ('gapopen_num', int),
'btop': ('btop', str),
'qcovhsp': ('query_coverage', float),
}
_COLUMN_FRAG = {
'length': ('aln_span', int),
'qstart': ('query_start', int),
'qend': ('query_end', int),
'sstart': ('hit_start', int),
'send': ('hit_end', int),
'qframe': ('query_frame', int),
'sframe': ('hit_frame', int),
'frames': ('frames', str),
'qseq': ('query', str),
'sseq': ('hit', str),
}
_SUPPORTED_FIELDS = set(list(_COLUMN_QRESULT) + list(_COLUMN_HIT) +
list(_COLUMN_HSP) + list(_COLUMN_FRAG))
# column order in the non-commented tabular output variant
# values must be keys inside the column-attribute maps above
_DEFAULT_FIELDS = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch',
'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
# one field from each of the following sets must exist in order for the
# parser to work
_MIN_QUERY_FIELDS = set(['qseqid', 'qacc', 'qaccver'])
_MIN_HIT_FIELDS = set(['sseqid', 'sacc', 'saccver', 'sallseqid'])
# simple function to create BLAST HSP attributes that may be computed if
# other certain attributes are present
# This was previously implemented in the HSP objects in the old model
_RE_GAPOPEN = re.compile(r'\w-')
def _compute_gapopen_num(hsp):
"""Returns the number of gap openings in the given HSP."""
gapopen = 0
for seq_type in ('query', 'hit'):
seq = str(getattr(hsp, seq_type).seq)
gapopen += len(re.findall(_RE_GAPOPEN, seq))
return gapopen
def _augment_blast_hsp(hsp, attr):
"""Calculates the given HSP attribute, for writing."""
if attr == 'aln_span':
# aln_span is number of identical matches + mismatches + gaps
func = lambda hsp: hsp.ident_num + hsp.mismatch_num + hsp.gap_num
# ident and gap will require the num values to be computed first
elif attr.startswith('ident'):
func = lambda hsp: hsp.aln_span - hsp.mismatch_num - hsp.gap_num
elif attr.startswith('gap'):
func = lambda hsp: hsp.aln_span - hsp.ident_num - hsp.mismatch_num
elif attr == 'mismatch_num':
func = lambda hsp: hsp.aln_span - hsp.ident_num - hsp.gap_num
elif attr == 'gapopen_num':
if not hasattr(hsp, 'query') or not hasattr(hsp, 'hit'):
# mock function so that the except clause below is triggered
# as both the query and hit are required to compute gapopen
def mock(hsp):
raise AttributeError
func = mock
else:
func = _compute_gapopen_num
# set the num values
# requires the endswith check, since we only want to set 'num' or 'span'
# attributes here
if not hasattr(hsp, attr) and not attr.endswith('_pct'):
value = func(hsp)
setattr(hsp, attr, value)
# if the attr is a percent value, calculate it
if attr == 'ident_pct':
func2 = lambda hsp: hsp.ident_num / float(hsp.aln_span) * 100
elif attr == 'pos_pct':
func = lambda hsp: hsp.pos_num / float(hsp.aln_span) * 100
elif attr == 'gap_pct':
func2 = lambda hsp: hsp.gap_num / float(hsp.aln_span) * 100
else:
func2 = None
# set the pct values
if func2 is not None:
value = func2(hsp)
setattr(hsp, attr, value)
class BlastTabParser(object):
"""Parser for the BLAST tabular format."""
def __init__(self, handle, comments=False, fields=_DEFAULT_FIELDS):
self.handle = handle
self.has_comments = comments
self.fields = self._prep_fields(fields)
self.line = self.handle.readline().strip()
def __iter__(self):
# stop iteration if file has no lines
if not self.line:
raise StopIteration
# determine which iterator to use
elif self.has_comments:
iterfunc = self._parse_commented_qresult
else:
iterfunc = self._parse_qresult
for qresult in iterfunc():
yield qresult
def _prep_fields(self, fields):
"""Validates and formats the given fields for use by the parser."""
# cast into list if fields is a space-separated string
if isinstance(fields, basestring):
fields = fields.strip().split(' ')
# blast allows 'std' as a proxy for the standard default lists
# we want to transform 'std' to its proper column names
if 'std' in fields:
idx = fields.index('std')
fields = fields[:idx] + _DEFAULT_FIELDS + fields[idx+1:]
# if set(fields) has a null intersection with minimum required
# fields for hit and query, raise an exception
if not set(fields).intersection(_MIN_QUERY_FIELDS) or \
not set(fields).intersection(_MIN_HIT_FIELDS):
raise ValueError("Required query and/or hit ID field not found.")
return fields
def _parse_commented_qresult(self):
"""Iterator returning `QueryResult` objects from a commented file."""
while True:
comments = self._parse_comments()
if comments:
try:
self.fields = comments['fields']
# iterator for the query results
qres_iter = self._parse_qresult()
except KeyError:
# no fields means the query has no results
assert 'fields' not in comments
# create an iterator returning one empty qresult
# if the query has no results
qres_iter = iter([QueryResult()])
for qresult in qres_iter:
for key, value in comments.items():
setattr(qresult, key, value)
yield qresult
else:
break
def _parse_comments(self):
"""Returns a dictionary containing tab file comments."""
comments = {}
while True:
# parse program and version
# example: # BLASTX 2.2.26+
if 'BLAST' in self.line and 'processed' not in self.line:
program_line = self.line[len(' #'):].split(' ')
comments['program'] = program_line[0].lower()
comments['version'] = program_line[1]
# parse query id and description (if available)
# example: # Query: gi|356995852 Mus musculus POU domain
elif 'Query' in self.line:
query_line = self.line[len('# Query: '):].split(' ', 1)
comments['id'] = query_line[0]
if len(query_line) == 2:
comments['description'] = query_line[1]
# parse target database
# example: # Database: db/minirefseq_protein
elif 'Database' in self.line:
comments['target'] = self.line[len('# Database: '):]
# parse RID (from remote searches)
elif 'RID' in self.line:
comments['rid'] = self.line[len('# RID: '):]
# parse column order, required for parsing the result lines
# example: # Fields: query id, query gi, query acc., query length
elif 'Fields' in self.line:
comments['fields'] = self._parse_fields_line()
# if the line has these strings, it's either the end of a comment
# or the end of a file, so we return all the comments we've parsed
elif ' hits found' in self.line or 'processed' in self.line:
self.line = self.handle.readline().strip()
return comments
self.line = self.handle.readline()
if not self.line:
return comments
else:
self.line = self.line.strip()
def _parse_fields_line(self):
"""Returns a list of column short names from the 'Fields'
comment line."""
raw_field_str = self.line[len('# Fields: '):]
long_fields = raw_field_str.split(', ')
fields = [_LONG_SHORT_MAP[long_name] for long_name in long_fields]
return self._prep_fields(fields)
def _parse_result_row(self):
"""Returns a dictionary of parsed row values."""
fields = self.fields
columns = self.line.strip().split('\t')
assert len(fields) == len(columns), "Expected %i columns, found: " \
"%i" % (len(fields), len(columns))
qresult, hit, hsp, frag = {}, {}, {}, {}
for idx, value in enumerate(columns):
sname = fields[idx]
# flag to check if any of the _COLUMNs contain sname
in_mapping = False
# iterate over each dict, mapping pair to determine
# attribute name and value of each column
for parsed_dict, mapping in (
(qresult, _COLUMN_QRESULT),
(hit, _COLUMN_HIT),
(hsp, _COLUMN_HSP),
(frag, _COLUMN_FRAG)):
# process parsed value according to mapping
if sname in mapping:
attr_name, caster = mapping[sname]
if caster is not str:
value = caster(value)
parsed_dict[attr_name] = value
in_mapping = True
# make sure that any unhandled field is not supported
if not in_mapping:
assert sname not in _SUPPORTED_FIELDS
return {'qresult': qresult, 'hit': hit, 'hsp': hsp, 'frag': frag}
def _get_id(self, parsed):
"""Returns the value used for a QueryResult or Hit ID from a parsed row."""
# use 'id', with 'id_all', 'accession' and 'accession_version'
# fallbacks one of these must have a value since we've checked whether
# they exist or not when parsing the comments
id_cache = parsed.get('id')
if id_cache is None and 'id_all' in parsed:
id_cache = parsed.get('id_all')[0]
if id_cache is None:
id_cache = parsed.get('accession')
if id_cache is None:
id_cache = parsed.get('accession_version')
return id_cache
def _parse_qresult(self):
"""Generator function that returns QueryResult objects."""
# state values, used to determine what to do with each line
state_EOF = 0
state_QRES_NEW = 1
state_QRES_SAME = 3
state_HIT_NEW = 2
state_HIT_SAME = 4
# dummies for initial states
qres_state = None
hit_state = None
file_state = None
# dummies for initial id caches
prev_qid = None
prev_hid = None
# dummies for initial parsed value containers
cur, prev = None, None
hit_list, hsp_list = [], []
while True:
# store previous line's parsed values if we've past the first line
if cur is not None:
prev = cur
prev_qid = cur_qid
prev_hid = cur_hid
# only parse the line if it's not EOF or not a comment line
if self.line and not self.line.startswith('#'):
cur = self._parse_result_row()
cur_qid = self._get_id(cur['qresult'])
cur_hid = self._get_id(cur['hit'])
else:
file_state = state_EOF
# mock values for cur_qid and cur_hid since the line is empty
cur_qid, cur_hid = None, None
# get the state of hit and qresult
if prev_qid != cur_qid:
qres_state = state_QRES_NEW
else:
qres_state = state_QRES_SAME
# new hits are hits with different id or hits in a new qresult
if prev_hid != cur_hid or qres_state == state_QRES_NEW:
hit_state = state_HIT_NEW
else:
hit_state = state_HIT_SAME
# we're creating objects for the previously parsed line(s),
# so nothing is done in the first parsed line (prev == None)
if prev is not None:
# every line is essentially an HSP with one fragment, so we
# create both of these for every line
frag = HSPFragment(prev_hid, prev_qid)
for attr, value in prev['frag'].items():
# adjust coordinates to Python range
# NOTE: this requires both start and end coords to be
# present, otherwise a KeyError will be raised.
# Without this limitation, we might misleadingly set the
# start / end coords
for seq_type in ('query', 'hit'):
if attr == seq_type + '_start':
value = min(value,
prev['frag'][seq_type + '_end']) - 1
elif attr == seq_type + '_end':
value = max(value,
prev['frag'][seq_type + '_start'])
setattr(frag, attr, value)
# strand and frame setattr require the full parsed values
# to be set first
for seq_type in ('hit', 'query'):
# try to set hit and query frame
frame = self._get_frag_frame(frag, seq_type,
prev['frag'])
setattr(frag, '%s_frame' % seq_type, frame)
# try to set hit and query strand
strand = self._get_frag_strand(frag, seq_type,
prev['frag'])
setattr(frag, '%s_strand' % seq_type, strand)
hsp = HSP([frag])
for attr, value in prev['hsp'].items():
setattr(hsp, attr, value)
hsp_list.append(hsp)
# create hit and append to temp hit container if hit_state
# says we're not at the same hit or at a new query
if hit_state == state_HIT_NEW:
hit = Hit(hsp_list)
for attr, value in prev['hit'].items():
if attr != 'id_all':
setattr(hit, attr, value)
else:
# not setting hit ID since it's already set from the
# prev_hid above
setattr(hit, '_id_alt', value[1:])
hit_list.append(hit)
hsp_list = []
# create qresult and yield if we're at a new qresult or EOF
if qres_state == state_QRES_NEW or file_state == state_EOF:
qresult = QueryResult(hit_list, prev_qid)
for attr, value in prev['qresult'].items():
setattr(qresult, attr, value)
yield qresult
# if current line is EOF, break
if file_state == state_EOF:
break
hit_list = []
self.line = self.handle.readline().strip()
def _get_frag_frame(self, frag, seq_type, parsedict):
"""Returns `HSPFragment` frame given the object, its sequence type,
and its parsed dictionary values."""
assert seq_type in ('query', 'hit')
frame = getattr(frag, '%s_frame' % seq_type, None)
if frame is not None:
return frame
else:
if 'frames' in parsedict:
# frames is 'x1/x2' string, x1 is query frame, x2 is hit frame
idx = 0 if seq_type == 'query' else 1
return int(parsedict['frames'].split('/')[idx])
# else implicit None return
def _get_frag_strand(self, frag, seq_type, parsedict):
"""Returns `HSPFragment` strand given the object, its sequence type,
and its parsed dictionary values."""
# NOTE: this will never set the strands as 0 for protein
# queries / hits, since we can't detect the blast flavors
# from the columns alone.
assert seq_type in ('query', 'hit')
strand = getattr(frag, '%s_strand' % seq_type, None)
if strand is not None:
return strand
else:
# using parsedict instead of the fragment object since
# we need the unadjusted coordinated values
start = parsedict.get('%s_start' % seq_type)
end = parsedict.get('%s_end' % seq_type)
if start is not None and end is not None:
return 1 if start <= end else -1
# else implicit None return
class BlastTabIndexer(SearchIndexer):
"""Indexer class for BLAST+ tab output."""
_parser = BlastTabParser
def __init__(self, filename, comments=False, fields=_DEFAULT_FIELDS):
SearchIndexer.__init__(self, filename, comments=comments, fields=fields)
# if the file doesn't have comments,
# get index of column used as the key (qseqid / qacc / qaccver)
if not self._kwargs['comments']:
if 'qseqid' in fields:
self._key_idx = fields.index('qseqid')
elif 'qacc' in fields:
self._key_idx = fields.index('qacc')
elif 'qaccver' in fields:
self._key_idx = fields.index('qaccver')
else:
raise ValueError("Custom fields is missing an ID column. "
"One of these must be present: 'qseqid', 'qacc', or 'qaccver'.")
def __iter__(self):
"""Iterates over the file handle; yields key, start offset, and length."""
handle = self._handle
handle.seek(0)
if not self._kwargs['comments']:
iterfunc = self._qresult_index
else:
iterfunc = self._qresult_index_commented
for key, offset, length in iterfunc():
yield _bytes_to_string(key), offset, length
def _qresult_index_commented(self):
"""Indexer for commented BLAST tabular files."""
handle = self._handle
handle.seek(0)
start_offset = 0
# mark of a new query
query_mark = None
# mark of the query's ID
qid_mark = _as_bytes('# Query: ')
# mark of the last line
end_mark = _as_bytes('# BLAST processed')
while True:
end_offset = handle.tell()
line = handle.readline()
if query_mark is None:
query_mark = line
start_offset = end_offset
elif line.startswith(qid_mark):
qresult_key = line[len(qid_mark):].split()[0]
elif line == query_mark or line.startswith(end_mark):
yield qresult_key, start_offset, end_offset - start_offset
start_offset = end_offset
elif not line:
break
def _qresult_index(self):
"""Indexer for noncommented BLAST tabular files."""
handle = self._handle
handle.seek(0)
start_offset = 0
qresult_key = None
key_idx = self._key_idx
tab_char = _as_bytes('\t')
while True:
# get end offset here since we only know a qresult ends after
# encountering the next one
end_offset = handle.tell()
# line = handle.readline()
line = handle.readline()
if qresult_key is None:
qresult_key = line.split(tab_char)[key_idx]
else:
try:
curr_key = line.split(tab_char)[key_idx]
except IndexError:
curr_key = _as_bytes('')
if curr_key != qresult_key:
yield qresult_key, start_offset, end_offset - start_offset
qresult_key = curr_key
start_offset = end_offset
# break if we've reached EOF
if not line:
break
def get_raw(self, offset):
"""Returns the raw string of a QueryResult object from the given offset."""
if self._kwargs['comments']:
getfunc = self._get_raw_qresult_commented
else:
getfunc = self._get_raw_qresult
return getfunc(offset)
def _get_raw_qresult(self, offset):
"""Returns the raw string of a single QueryResult from a noncommented file."""
handle = self._handle
handle.seek(offset)
qresult_raw = _as_bytes('')
tab_char = _as_bytes('\t')
key_idx = self._key_idx
qresult_key = None
while True:
line = handle.readline()
# get the key if the first line (qresult key)
if qresult_key is None:
qresult_key = line.split(tab_char)[key_idx]
else:
try:
curr_key = line.split(tab_char)[key_idx]
except IndexError:
curr_key = _as_bytes('')
# only break when qresult is finished (key is different)
if curr_key != qresult_key:
break
# append to the raw string as long as qresult is the same
qresult_raw += line
return qresult_raw
def _get_raw_qresult_commented(self, offset):
"""Returns the raw string of a single QueryResult from a commented file."""
handle = self._handle
handle.seek(offset)
qresult_raw = _as_bytes('')
end_mark = _as_bytes('# BLAST processed')
# query mark is the line marking a new query
# something like '# TBLASTN 2.2.25+'
query_mark = None
line = handle.readline()
while line:
# since query_mark depends on the BLAST search, we need to obtain it
# first
if query_mark is None:
query_mark = line
# break when we've reached the next qresult or the search ends
elif line == query_mark or line.startswith(end_mark):
break
qresult_raw += line
line = handle.readline()
return qresult_raw
class BlastTabWriter(object):
"""Writer for blast-tab output format."""
def __init__(self, handle, comments=False, fields=_DEFAULT_FIELDS):
self.handle = handle
self.has_comments = comments
self.fields = fields
def write_file(self, qresults):
"""Writes to the handle, returns how many QueryResult objects are written."""
handle = self.handle
qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0
for qresult in qresults:
if self.has_comments:
handle.write(self._build_comments(qresult))
if qresult:
handle.write(self._build_rows(qresult))
if not self.has_comments:
qresult_counter += 1
hit_counter += len(qresult)
hsp_counter += sum(len(hit) for hit in qresult)
frag_counter += sum(len(hit.fragments) for hit in qresult)
# if it's commented and there are no hits in the qresult, we still
# increment the counter
if self.has_comments:
qresult_counter += 1
# commented files have a line saying how many queries were processed
if self.has_comments:
handle.write('# BLAST processed %i queries' % qresult_counter)
return qresult_counter, hit_counter, hsp_counter, frag_counter
def _build_rows(self, qresult):
"""Returns a string containing tabular rows of the QueryResult object."""
coordinates = set(['qstart', 'qend', 'sstart', 'send'])
qresult_lines = ''
for hit in qresult:
for hsp in hit:
line = []
for field in self.fields:
# get the column value ~ could either be an attribute
# of qresult, hit, or hsp
if field in _COLUMN_QRESULT:
value = getattr(qresult, _COLUMN_QRESULT[field][0])
elif field in _COLUMN_HIT:
if field == 'sallseqid':
value = getattr(hit, 'id_all')
else:
value = getattr(hit, _COLUMN_HIT[field][0])
# special case, since 'frames' can be determined from
# query frame and hit frame
elif field == 'frames':
value = '%i/%i' % (hsp.query_frame, hsp.hit_frame)
elif field in _COLUMN_HSP:
try:
value = getattr(hsp, _COLUMN_HSP[field][0])
except AttributeError:
attr = _COLUMN_HSP[field][0]
_augment_blast_hsp(hsp, attr)
value = getattr(hsp, attr)
elif field in _COLUMN_FRAG:
value = getattr(hsp, _COLUMN_FRAG[field][0])
else:
assert field not in _SUPPORTED_FIELDS
continue
# adjust from and to according to strand, if from and to
# is included in the output field
if field in coordinates:
value = self._adjust_coords(field, value, hsp)
# adjust output formatting
value = self._adjust_output(field, value)
line.append(value)
hsp_line = '\t'.join(line)
qresult_lines += hsp_line + '\n'
return qresult_lines
def _adjust_coords(self, field, value, hsp):
"""Adjusts start and end coordinates according to strand."""
assert field in ('qstart', 'qend', 'sstart', 'send')
# determine sequence type to operate on based on field's first letter
seq_type = 'query' if field.startswith('q') else 'hit'
strand = getattr(hsp, '%s_strand' % seq_type, None)
if strand is None:
raise ValueError("Required attribute %r not found." %
('%s_strand' % (seq_type)))
# switch start <--> end coordinates if strand is -1
if strand < 0:
if field.endswith('start'):
value = getattr(hsp, '%s_end' % seq_type)
elif field.endswith('end'):
value = getattr(hsp, '%s_start' % seq_type) + 1
elif field.endswith('start'):
# adjust start coordinate for positive strand
value += 1
return value
def _adjust_output(self, field, value):
"""Adjusts formatting of the given field and value to mimic native tab output."""
# qseq and sseq are stored as SeqRecord, but here we only need the str
if field in ('qseq', 'sseq'):
value = str(value.seq)
# evalue formatting, adapted from BLAST+ source:
# src/objtools/align_format/align_format_util.cpp#L668
elif field == 'evalue':
if value < 1.0e-180:
value = '0.0'
elif value < 1.0e-99:
value = '%2.0e' % value
elif value < 0.0009:
value = '%3.0e' % value
elif value < 0.1:
value = '%4.3f' % value
elif value < 1.0:
value = '%3.2f' % value
elif value < 10.0:
value = '%2.1f' % value
else:
value = '%5.0f' % value
# pident and ppos formatting
elif field in ('pident', 'ppos'):
value = '%.2f' % value
# evalue formatting, adapted from BLAST+ source:
# src/objtools/align_format/align_format_util.cpp#L723
elif field == 'bitscore':
if value > 9999:
value = '%4.3e' % value
elif value > 99.9:
value = '%4.0d' % value
else:
value = '%4.1f' % value
# coverages have no comma (using floats still ~ a more proper
# representation)
elif field in ('qcovhsp', 'qcovs'):
value = '%.0f' % value
# list into '<>'-delimited string
elif field == 'salltitles':
value = '<>'.join(value)
# list into ';'-delimited string
elif field in ('sallseqid', 'sallacc', 'staxids', 'sscinames',
'scomnames', 'sblastnames', 'sskingdoms'):
value = ';'.join(value)
# everything else
else:
value = str(value)
return value
def _build_comments(self, qres):
"""Returns a string of a QueryResult tabular comment."""
comments = []
# inverse mapping of the long-short name map, required
# for writing comments
inv_field_map = dict((v, k) for k, v in _LONG_SHORT_MAP.items())
# try to anticipate qress without version
if not hasattr(qres, 'version'):
program_line = '# %s' % qres.program.upper()
else:
program_line = '# %s %s' % (qres.program.upper(), qres.version)
comments.append(program_line)
# description may or may not be None
if qres.description is None:
comments.append('# Query: %s' % qres.id)
else:
comments.append('# Query: %s %s' % (qres.id, qres.description))
# try appending RID line, if present
try:
comments.append('# RID: %s' % qres.rid)
except AttributeError:
pass
comments.append('# Database: %s' % qres.target)
# qresults without hits don't show the Fields comment
if qres:
comments.append('# Fields: %s' %
', '.join(inv_field_map[field] for field in self.fields))
comments.append('# %i hits found' % len(qres))
return '\n'.join(comments) + '\n'
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/SearchIO/BlastIO/blast_tab.py | Python | gpl-2.0 | 33,684 | [
"BLAST",
"Biopython"
] | ee0a236d39182344f2bf5280d5b38bc1ec5134abd23e2f7928bf86be1e282871 |
from itertools import chain, islice
from visitor import Visitor
from expr import ExpresionOut
from stmt import StatementOut
from op import OperatorOut
from slice import SliceOut
class ExcepthandlerOut(Visitor):
@classmethod
def visit_ExceptHandler(self, ast, *a, **k):
type = (ExpresionOut.visit(ast.type, *a, **k),) if ast.type else ()
name = (ExpresionOut.visit(ast.name, *a, **k),) if ast.name else ()
head = "except %s:"
head %= ", ".join(chain(type, name))
head = [head]
body = StatementOut.handle_body(ast.body, *a, **k)
return head + body
class Out(Visitor):
@classmethod
def visit_Module(self, ast, *a, **k):
return "\n".join(chain.from_iterable((StatementOut.visit(node, *a, **k) for node in ast.body)))
if __name__ == "__main__":
from ast import parse
code = "global a"
print Out.visit(parse(code))
| Neppord/py2py | py2py_lib/out/__init__.py | Python | mit | 858 | [
"VisIt"
] | ef2771f4b23416be29f2ae1c4595a9c170a1a640b5b7fa0b050b0b07c220e52d |
# -*- coding: utf-8 -*-
"""
Acceptance tests for studio related to the outline page.
"""
import itertools
import json
from datetime import datetime, timedelta
from unittest import skip
from pytz import UTC
from base_studio_test import StudioCourseTest
from common.test.acceptance.fixtures.config import ConfigModelFixture
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.progress import ProgressPage
from common.test.acceptance.pages.studio.overview import ContainerPage, CourseOutlinePage, ExpandCollapseLinkState
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.pages.studio.checklists import CourseChecklistsPage
from common.test.acceptance.pages.studio.settings_advanced import AdvancedSettingsPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.utils import add_discussion, drag, verify_ordering
from common.test.acceptance.tests.helpers import disable_animations, load_data_str
from openedx.core.lib.tests import attr
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
class CourseOutlineTest(StudioCourseTest):
"""
Base class for all course outline tests
"""
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseOutlineTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.advanced_settings = AdvancedSettingsPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
""" Install a course with sections/problems, tabs, updates, and handouts """
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML Component'),
XBlockFixtureDesc('discussion', 'Test Discussion Component')
)
)
)
)
def do_action_and_verify(self, outline_page, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
if outline_page is None:
outline_page = self.course_outline_page.visit()
action(outline_page)
verify_ordering(self, outline_page, expected_ordering)
# Reload the page and expand all subsections to see that the change was persisted.
outline_page = self.course_outline_page.visit()
outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').click()
verify_ordering(self, outline_page, expected_ordering)
@attr(shard=3)
class CourseOutlineDragAndDropTest(CourseOutlineTest):
"""
Tests of drag and drop within the outline page.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Create a course with one section, two subsections, and four units
"""
# with collapsed outline
self.chap_1_handle = 0
self.chap_1_seq_1_handle = 1
# with first sequential expanded
self.seq_1_vert_1_handle = 2
self.seq_1_vert_2_handle = 3
self.chap_1_seq_2_handle = 4
course_fixture.add_children(
XBlockFixtureDesc('chapter', "1").add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def drag_and_verify(self, source, target, expected_ordering, outline_page=None):
self.do_action_and_verify(
outline_page,
lambda outline: drag(outline, source, target),
expected_ordering
)
@skip("Fails in Firefox 45 but passes in Chrome")
def test_drop_unit_in_collapsed_subsection(self):
"""
Drag vertical "1.1.2" from subsection "1.1" into collapsed subsection "1.2" which already
have its own verticals.
"""
course_outline_page = self.course_outline_page.visit()
# expand first subsection
course_outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').first.click()
expected_ordering = [{"1": ["1.1", "1.2"]},
{"1.1": ["1.1.1"]},
{"1.2": ["1.1.2", "1.2.1", "1.2.2"]}]
self.drag_and_verify(self.seq_1_vert_2_handle, self.chap_1_seq_2_handle, expected_ordering, course_outline_page)
@attr(shard=3)
class WarningMessagesTest(CourseOutlineTest):
"""
Feature: Warning messages on sections, subsections, and units
"""
__test__ = True
STAFF_ONLY_WARNING = 'Contains staff only content'
LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content'
FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future'
NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released'
class PublishState(object):
"""
Default values for representing the published state of a unit
"""
NEVER_PUBLISHED = 1
UNPUBLISHED_CHANGES = 2
PUBLISHED = 3
VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED]
class UnitState(object):
""" Represents the state of a unit """
def __init__(self, is_released, publish_state, is_locked):
""" Creates a new UnitState with the given properties """
self.is_released = is_released
self.publish_state = publish_state
self.is_locked = is_locked
@property
def name(self):
""" Returns an appropriate name based on the properties of the unit """
result = "Released " if self.is_released else "Unreleased "
if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED:
result += "Never Published "
elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES:
result += "Unpublished Changes "
else:
result += "Published "
result += "Locked" if self.is_locked else "Unlocked"
return result
def populate_course_fixture(self, course_fixture):
""" Install a course with various configurations that could produce warning messages """
# Define the dimensions that map to the UnitState constructor
features = [
[True, False], # Possible values for is_released
self.PublishState.VALUES, # Possible values for publish_state
[True, False] # Possible values for is_locked
]
# Add a fixture for every state in the product of features
course_fixture.add_children(*[
self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features)
])
def _build_fixture(self, unit_state):
""" Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """
name = unit_state.name
start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat()
subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start})
# Children of never published subsections will be added on demand via _ensure_unit_present
return XBlockFixtureDesc('chapter', name).add_children(
subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED
else subsection.add_children(
XBlockFixtureDesc('vertical', name, metadata={
'visible_to_staff_only': True if unit_state.is_locked else None
})
)
)
def test_released_never_published_locked(self):
""" Tests that released never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_never_published_unlocked(self):
""" Tests that released never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_released_unpublished_changes_locked(self):
""" Tests that released unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_unpublished_changes_unlocked(self):
""" Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.LIVE_UNPUBLISHED_WARNING
)
def test_released_published_locked(self):
""" Tests that released published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_published_unlocked(self):
""" Tests that released published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def test_unreleased_never_published_locked(self):
""" Tests that unreleased never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_never_published_unlocked(self):
""" Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_unreleased_unpublished_changes_locked(self):
""" Tests that unreleased unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_unpublished_changes_unlocked(self):
"""
Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will
release in the future'
"""
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.FUTURE_UNPUBLISHED_WARNING
)
def test_unreleased_published_locked(self):
""" Tests that unreleased published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_published_unlocked(self):
""" Tests that unreleased published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def _verify_unit_warning(self, unit_state, expected_status_message):
"""
Verifies that the given unit's messages match the expected messages.
If expected_status_message is None, then the unit status message is expected to not be present.
"""
self._ensure_unit_present(unit_state)
self.course_outline_page.visit()
section = self.course_outline_page.section(unit_state.name)
subsection = section.subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
if expected_status_message == self.STAFF_ONLY_WARNING:
self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING)
else:
self.assertFalse(section.has_status_message)
self.assertFalse(subsection.has_status_message)
if expected_status_message:
self.assertEqual(unit.status_message, expected_status_message)
else:
self.assertFalse(unit.has_status_message)
def _ensure_unit_present(self, unit_state):
""" Ensures that a unit with the given state is present on the course outline """
if unit_state.publish_state == self.PublishState.PUBLISHED:
return
name = unit_state.name
self.course_outline_page.visit()
subsection = self.course_outline_page.section(name).subsection(name)
subsection.expand_subsection()
if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES:
unit = subsection.unit(name).go_to()
add_discussion(unit)
elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED:
subsection.add_unit()
unit = ContainerPage(self.browser, None)
unit.wait_for_page()
if unit.is_staff_locked != unit_state.is_locked:
unit.toggle_staff_lock()
@attr(shard=3)
class EditingSectionsTest(CourseOutlineTest):
"""
Feature: Editing Release date, Due date and grading type.
"""
__test__ = True
def test_can_edit_subsection(self):
"""
Scenario: I can edit settings of subsection.
Given that I have created a subsection
Then I see release date, due date and grading policy of subsection in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date, due date and grading policy fields present
And they have correct initial values
Then I set new values for these fields
And I click save button on the modal
Then I see release date, due date and grading policy of subsection in course outline
"""
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(subsection.release_date)
# Verify that Due date and Policy hidden by default
self.assertFalse(subsection.due_date)
self.assertFalse(subsection.policy)
modal = subsection.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertTrue(modal.has_release_time())
self.assertTrue(modal.has_due_date())
self.assertTrue(modal.has_due_time())
self.assertTrue(modal.has_policy())
# Verify initial values
self.assertEqual(modal.release_date, u'1/1/1970')
self.assertEqual(modal.release_time, u'00:00')
self.assertEqual(modal.due_date, u'')
self.assertEqual(modal.due_time, u'')
self.assertEqual(modal.policy, u'Not Graded')
# Set new values
modal.release_date = '3/12/1972'
modal.release_time = '04:01'
modal.due_date = '7/21/2014'
modal.due_time = '23:39'
modal.policy = 'Lab'
modal.save()
self.assertIn(u'Released: Mar 12, 1972', subsection.release_date)
self.assertIn(u'04:01', subsection.release_date)
self.assertIn(u'Due: Jul 21, 2014', subsection.due_date)
self.assertIn(u'23:39', subsection.due_date)
self.assertIn(u'Lab', subsection.policy)
def test_can_edit_section(self):
"""
Scenario: I can edit settings of section.
Given that I have created a section
Then I see release date of section in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date field present
And it has correct initial value
Then I set new value for this field
And I click save button on the modal
Then I see release date of section in course outline
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
modal = section.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertFalse(modal.has_due_date())
self.assertFalse(modal.has_policy())
# Verify initial value
self.assertEqual(modal.release_date, u'1/1/1970')
# Set new value
modal.release_date = '5/14/1969'
modal.save()
self.assertIn(u'Released: May 14, 1969', section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
def test_subsection_is_graded_in_lms(self):
"""
Scenario: I can grade subsection from course outline page.
Given I visit progress page
And I see that problem in subsection has grading type "Practice"
Then I visit course outline page
And I click on the configuration icon of subsection
And I set grading policy to "Lab"
And I click save button on the modal
Then I visit progress page
And I see that problem in subsection has grading type "Problem"
"""
progress_page = ProgressPage(self.browser, self.course_id)
progress_page.visit()
progress_page.wait_for_page()
self.assertEqual(u'Practice', progress_page.grading_formats[0])
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
modal = subsection.edit()
# Set new values
modal.policy = 'Lab'
modal.save()
progress_page.visit()
self.assertEqual(u'Problem', progress_page.grading_formats[0])
def test_unchanged_release_date_is_not_saved(self):
"""
Scenario: Saving a subsection without changing the release date will not override the release date
Given that I have created a section with a subsection
When I open the settings modal for the subsection
And I pressed save
And I open the settings modal for the section
And I change the release date to 07/20/1969
And I press save
Then the subsection and the section have the release date 07/20/1969
"""
self.course_outline_page.visit()
modal = self.course_outline_page.section_at(0).subsection_at(0).edit()
modal.save()
modal = self.course_outline_page.section_at(0).edit()
modal.release_date = '7/20/1969'
modal.save()
release_text = 'Released: Jul 20, 1969'
self.assertIn(release_text, self.course_outline_page.section_at(0).release_date)
self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date)
@attr(shard=3)
class UnitAccessTest(CourseOutlineTest):
"""
Feature: Units can be restricted and unrestricted to certain groups from the course outline.
"""
__test__ = True
def setUp(self):
super(UnitAccessTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.content_group_a = "Test Group A"
self.content_group_b = "Test Group B"
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config_a = self.group_configurations_page.content_groups[0]
config_a.name = self.content_group_a
config_a.save()
self.content_group_a_id = config_a.id
self.group_configurations_page.add_content_group()
config_b = self.group_configurations_page.content_groups[1]
config_b.name = self.content_group_b
config_b.save()
self.content_group_b_id = config_b.id
def populate_course_fixture(self, course_fixture):
"""
Create a course with one section, one subsection, and two units
"""
# with collapsed outline
self.chap_1_handle = 0
self.chap_1_seq_1_handle = 1
# with first sequential expanded
self.seq_1_vert_1_handle = 2
self.seq_1_vert_2_handle = 3
self.chap_1_seq_2_handle = 4
course_fixture.add_children(
XBlockFixtureDesc('chapter', "1").add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
)
)
)
@attr(shard=14)
class StaffLockTest(CourseOutlineTest):
"""
Feature: Sections, subsections, and units can be locked and unlocked from the course outline.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Create a course with one section, two subsections, and four units """
course_fixture.add_children(
XBlockFixtureDesc('chapter', '1').add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def _verify_descendants_are_staff_only(self, item):
"""Verifies that all the descendants of item are staff only"""
self.assertTrue(item.is_staff_only)
if hasattr(item, 'children'):
for child in item.children():
self._verify_descendants_are_staff_only(child)
def _remove_staff_lock_and_verify_warning(self, outline_item, expect_warning):
"""Removes staff lock from a course outline item and checks whether or not a warning appears."""
modal = outline_item.edit()
modal.is_explicitly_locked = False
if expect_warning:
self.assertTrue(modal.shows_staff_lock_warning())
else:
self.assertFalse(modal.shows_staff_lock_warning())
modal.save()
def _toggle_lock_on_unlocked_item(self, outline_item):
"""Toggles outline_item's staff lock on and then off, verifying the staff lock warning"""
self.assertFalse(outline_item.has_staff_lock_warning)
outline_item.set_staff_lock(True)
self.assertTrue(outline_item.has_staff_lock_warning)
self._verify_descendants_are_staff_only(outline_item)
outline_item.set_staff_lock(False)
self.assertFalse(outline_item.has_staff_lock_warning)
def _verify_explicit_staff_lock_remains_after_unlocking_parent(self, child_item, parent_item):
"""Verifies that child_item's explicit staff lock remains after removing parent_item's staff lock"""
child_item.set_staff_lock(True)
parent_item.set_staff_lock(True)
self.assertTrue(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
parent_item.set_staff_lock(False)
self.assertFalse(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
def test_units_can_be_locked(self):
"""
Scenario: Units can be locked and unlocked from the course outline page
Given I have a course with a unit
When I click on the configuration icon
And I enable explicit staff locking
And I click save
Then the unit shows a staff lock warning
And when I click on the configuration icon
And I disable explicit staff locking
And I click save
Then the unit does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
self._toggle_lock_on_unlocked_item(unit)
def test_subsections_can_be_locked(self):
"""
Scenario: Subsections can be locked and unlocked from the course outline page
Given I have a course with a subsection
When I click on the subsection's configuration icon
And I enable explicit staff locking
And I click save
Then the subsection shows a staff lock warning
And all its descendants are staff locked
And when I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
self._toggle_lock_on_unlocked_item(subsection)
def test_sections_can_be_locked(self):
"""
Scenario: Sections can be locked and unlocked from the course outline page
Given I have a course with a section
When I click on the section's configuration icon
And I enable explicit staff locking
And I click save
Then the section shows a staff lock warning
And all its descendants are staff locked
And when I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
self._toggle_lock_on_unlocked_item(section)
def test_explicit_staff_lock_remains_after_unlocking_section(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a section
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section and one of its units
When I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, section)
def test_explicit_staff_lock_remains_after_unlocking_subsection(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a subsection
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection and one of its units
When I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, subsection)
def test_section_displays_lock_when_all_subsections_locked(self):
"""
Scenario: All subsections in section are explicitly locked, section should display staff only warning
Given I have a course one section and two subsections
When I enable explicit staff lock on all the subsections
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
section.subsection_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_section_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in a section are explicitly locked, section should display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on all the units
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(0).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_subsection_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in subsection are explicitly locked, subsection should display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on all the units
Then the subsection shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
subsection.unit_at(1).set_staff_lock(True)
self.assertTrue(subsection.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_subsections_locked(self):
"""
Scenario: Only some subsections in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section and two subsections
When I enable explicit staff lock on one subsection
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on three units
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_subsection_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in subsection are explicitly locked, subsection should NOT display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on one unit
Then the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
self.assertFalse(subsection.has_staff_lock_warning)
def test_locked_sections_do_not_appear_in_lms(self):
"""
Scenario: A locked section is not visible to students in the LMS
Given I have a course with two sections
When I enable explicit staff lock on one section
And I click the View Live button to switch to staff view
And I visit the course home with the outline
Then I see two sections in the outline
And when I switch the view mode to student view
Then I see one section in the outline
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.course_outline_page.section_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
course_home_page = CourseHomePage(self.browser, self.course_id)
course_home_page.visit()
course_home_page.wait_for_page()
self.assertEqual(course_home_page.outline.num_sections, 2)
course_home_page.preview.set_staff_view_mode('Learner')
course_home_page.wait_for(lambda: course_home_page.outline.num_sections == 1,
'Only 1 section is visible in the outline')
def test_toggling_staff_lock_on_section_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a section will not publish its draft units
Given I have a course with a section and unit
And the unit has a draft and published version
When I enable explicit staff lock on the section
And I disable explicit staff lock on the section
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.set_staff_lock(True)
section.set_staff_lock(False)
unit = section.subsection_at(0).unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_toggling_staff_lock_on_subsection_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a subsection will not publish its draft units
Given I have a course with a subsection and unit
And the unit has a draft and published version
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
subsection.set_staff_lock(False)
unit = subsection.unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_removing_staff_lock_from_unit_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which does not inherit staff lock displays a warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the unit
And I disable explicit staff lock on the unit
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, True)
def test_removing_staff_lock_from_subsection_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which does not inherit staff lock displays a warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, True)
def test_removing_staff_lock_from_unit_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which also inherits staff lock displays no warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the subsection
And I enable explicit staff lock on the unit
When I disable explicit staff lock on the unit
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, False)
def test_removing_staff_lock_from_subsection_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which also inherits staff lock displays no warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the section
And I enable explicit staff lock on the subsection
When I disable explicit staff lock on the subsection
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
subsection = section.subsection_at(0)
section.set_staff_lock(True)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, False)
@attr(shard=14)
class EditNamesTest(CourseOutlineTest):
"""
Feature: Click-to-edit section/subsection names
"""
__test__ = True
def set_name_and_verify(self, item, old_name, new_name, expected_name):
"""
Changes the display name of item from old_name to new_name, then verifies that its value is expected_name.
"""
self.assertEqual(item.name, old_name)
item.change_name(new_name)
self.assertFalse(item.in_editable_form())
self.assertEqual(item.name, expected_name)
def test_edit_section_name(self):
"""
Scenario: Click-to-edit section name
Given that I have created a section
When I click on the name of section
Then the section name becomes editable
And given that I have edited the section name
When I click outside of the edited section name
Then the section name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'Changed',
'Changed'
)
def test_edit_subsection_name(self):
"""
Scenario: Click-to-edit subsection name
Given that I have created a subsection
When I click on the name of subsection
Then the subsection name becomes editable
And given that I have edited the subsection name
When I click outside of the edited subsection name
Then the subsection name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'Changed',
'Changed'
)
def test_edit_empty_section_name(self):
"""
Scenario: Click-to-edit section name, enter empty name
Given that I have created a section
And I have clicked to edit the name of the section
And I have entered an empty section name
When I click outside of the edited section name
Then the section name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'',
'Test Section'
)
def test_edit_empty_subsection_name(self):
"""
Scenario: Click-to-edit subsection name, enter empty name
Given that I have created a subsection
And I have clicked to edit the name of the subsection
And I have entered an empty subsection name
When I click outside of the edited subsection name
Then the subsection name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'',
'Test Subsection'
)
def test_editing_names_does_not_expand_collapse(self):
"""
Scenario: A section stays in the same expand/collapse state while its name is edited
Given that I have created a section
And the section is collapsed
When I click on the name of the section
Then the section is collapsed
And given that I have entered a new name
Then the section is collapsed
And given that I press ENTER to finalize the name
Then the section is collapsed
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).expand_subsection()
self.assertFalse(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).edit_name()
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).enter_name('Changed')
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).finalize_name()
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
@attr(shard=14)
class CreateSectionsTest(CourseOutlineTest):
"""
Feature: Create new sections/subsections/units
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a completely empty course to easily test adding things to it """
pass
def test_create_new_section_from_top_button(self):
"""
Scenario: Create new section from button at top of page
Given that I am on the course outline
When I click the "+ Add section" button at the top of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button(self):
"""
Scenario: Create new section from button at bottom of page
Given that I am on the course outline
When I click the "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button_plus_icon(self):
"""
Scenario: Create new section from button plus icon at bottom of page
Given that I am on the course outline
When I click the plus icon in "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button(click_child_icon=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_subsection(self):
"""
Scenario: Create new subsection
Given that I have created a section
When I click the "+ Add subsection" button in that section
Then I see a new subsection added to the bottom of the section
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
subsections = self.course_outline_page.section_at(0).subsections()
self.assertEqual(len(subsections), 1)
self.assertTrue(subsections[0].in_editable_form())
def test_create_new_unit(self):
"""
Scenario: Create new unit
Given that I have created a section
And that I have created a subsection within that section
When I click the "+ Add unit" button in that subsection
Then I am redirected to a New Unit page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).add_unit()
unit_page = ContainerPage(self.browser, None)
unit_page.wait_for_page()
self.assertTrue(unit_page.is_inline_editing_display_name())
@attr(shard=14)
class DeleteContentTest(CourseOutlineTest):
"""
Feature: Deleting sections/subsections/units
"""
__test__ = True
def test_delete_section(self):
"""
Scenario: Delete section
Given that I am on the course outline
When I click the delete button for a section on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the section
When I click "Yes, I want to delete this component"
Then the confirmation message should close
And the section should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
def test_cancel_delete_section(self):
"""
Scenario: Cancel delete of section
Given that I clicked the delte button for a section on the course outline
And I received a confirmation message, asking me if I really want to delete the component
When I click "Cancel"
Then the confirmation message should close
And the section should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
def test_delete_subsection(self):
"""
Scenario: Delete subsection
Given that I am on the course outline
When I click the delete button for a subsection on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the subsection
When I click "Yes, I want to delete this component"
Then the confiramtion message should close
And the subsection should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0)
def test_cancel_delete_subsection(self):
"""
Scenario: Cancel delete of subsection
Given that I clicked the delete button for a subsection on the course outline
And I received a confirmation message, asking me if I really want to delete the subsection
When I click "cancel"
Then the confirmation message should close
And the subsection should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
def test_delete_unit(self):
"""
Scenario: Delete unit
Given that I am on the course outline
When I click the delete button for a unit on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the unit
When I click "Yes, I want to delete this unit"
Then the confirmation message should close
And the unit should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0)
def test_cancel_delete_unit(self):
"""
Scenario: Cancel delete of unit
Given that I clicked the delete button for a unit on the course outline
And I received a confirmation message, asking me if I really want to delete the unit
When I click "Cancel"
Then the confirmation message should close
And the unit should remain in the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
def test_delete_all_no_content_message(self):
"""
Scenario: Delete all sections/subsections/units in a course, "no content" message should appear
Given that I delete all sections, subsections, and units in a course
When I visit the course outline
Then I will see a message that says, "You haven't added any content to this course yet"
Add see a + Add Section button
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.has_no_content_message)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
self.assertTrue(self.course_outline_page.has_no_content_message)
@attr(shard=14)
class ExpandCollapseMultipleSectionsTest(CourseOutlineTest):
"""
Feature: Courses with multiple sections can expand and collapse all sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a course with two sections """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 2')
)
)
)
def verify_all_sections(self, collapsed):
"""
Verifies that all sections are collapsed if collapsed is True, otherwise all expanded.
"""
for section in self.course_outline_page.sections():
self.assertEqual(collapsed, section.is_collapsed)
def toggle_all_sections(self):
"""
Toggles the expand collapse state of all sections.
"""
for section in self.course_outline_page.sections():
section.expand_subsection()
def test_expanded_by_default(self):
"""
Scenario: The default layout for the outline page is to show sections in expanded view
Given I have a course with sections
When I navigate to the course outline page
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with multiple sections
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
for section in self.course_outline_page.sections():
section.delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_collapse_all_when_all_expanded(self):
"""
Scenario: Collapse all sections when all sections are expanded
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_collapse_all_when_some_expanded(self):
"""
Scenario: Collapsing all sections when 1 or more sections are already collapsed
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I collapse the first section
And I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_expand_all_when_all_collapsed(self):
"""
Scenario: Expanding all sections when all sections are collapsed
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_expand_all_when_some_collapsed(self):
"""
Scenario: Expanding all sections when 1 or more sections are already expanded
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I expand the first section
And I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
# We have seen unexplainable sporadic failures in this test. Try disabling animations to see
# if that helps.
disable_animations(self.course_outline_page)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
@attr(shard=14)
class ExpandCollapseSingleSectionTest(CourseOutlineTest):
"""
Feature: Courses with a single section can expand and collapse all sections.
"""
__test__ = True
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with one section
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_old_subsection_stays_collapsed_after_creation(self):
"""
Scenario: Collapsed subsection stays collapsed after creating a new subsection
Given I have a course with one section and subsection
And I navigate to the course outline page
Then the subsection is collapsed
And when I create a new subsection
Then the first subsection is collapsed
And the second subsection is expanded
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.course_outline_page.section_at(0).add_subsection()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed)
@attr(shard=14)
class ExpandCollapseEmptyTest(CourseOutlineTest):
"""
Feature: Courses with no sections initially can expand and collapse all sections after addition.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Expand/collapse for a course with no sections
Given I have a course with no sections
When I navigate to the course outline page
Then I do not see the "Collapse All Sections" link
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
def test_link_appears_after_section_creation(self):
"""
Scenario: Collapse link appears after creating first section of a course
Given I have a course with no sections
When I navigate to the course outline page
And I add a section
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.course_outline_page.add_section_from_top_button()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.assertFalse(self.course_outline_page.section_at(0).is_collapsed)
@attr(shard=14)
class DefaultStatesEmptyTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with an empty course
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_empty_course_message(self):
"""
Scenario: Empty course state
Given that I am in a course with no sections, subsections, nor units
When I visit the course outline
Then I will see a message that says "You haven't added any content to this course yet"
And see a + Add Section button
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_no_content_message)
self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present())
@attr(shard=14)
class DefaultStatesContentTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with a course with content
"""
__test__ = True
def test_view_live(self):
"""
Scenario: View Live version from course outline
Given that I am on the course outline
When I click the "View Live" button
Then a new tab will open to the course on the LMS
"""
self.course_outline_page.visit()
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 3)
self.assertEqual(courseware.xblock_component_type(0), 'problem')
self.assertEqual(courseware.xblock_component_type(1), 'html')
self.assertEqual(courseware.xblock_component_type(2), 'discussion')
@attr(shard=7)
class UnitNavigationTest(CourseOutlineTest):
"""
Feature: Navigate to units
"""
__test__ = True
def test_navigate_to_unit(self):
"""
Scenario: Click unit name to navigate to unit page
Given that I have expanded a section/subsection so I can see unit names
When I click on a unit name
Then I will be taken to the appropriate unit page
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
unit.wait_for_page()
@attr(shard=7)
class PublishSectionTest(CourseOutlineTest):
"""
Feature: Publish sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with 2 subsections inside a single section.
The first subsection has 2 units, and the second subsection has one unit.
"""
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME),
XBlockFixtureDesc('vertical', 'Test Unit 2'),
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 3'),
),
),
)
def test_unit_publishing(self):
"""
Scenario: Can publish a unit and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the first unit, subsection, section
When I publish the first unit
Then I see that publish button for the first unit disappears
And I see publish buttons for subsection, section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
unit.publish()
self.assertFalse(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
def test_subsection_publishing(self):
"""
Scenario: Can publish a subsection and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the first subsection
Then I see that publish button for the first subsection disappears
And I see that publish buttons disappear for the child units of the subsection
And I see publish button for section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME).publish()
self.assertFalse(unit.publish_action)
self.assertFalse(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.courseware.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
def test_section_publishing(self):
"""
Scenario: Can publish a section and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the section
Then I see that publish buttons disappears
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.assertTrue(unit.publish_action)
self.course_outline_page.section(SECTION_NAME).publish()
self.assertFalse(subsection.publish_action)
self.assertFalse(section.publish_action)
self.assertFalse(unit.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.courseware.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section(SECTION_NAME, 'Test Subsection 2')
self.assertEqual(1, self.courseware.num_xblock_components)
def _add_unpublished_content(self):
"""
Adds unpublished HTML content to first three units in the course.
"""
for index in xrange(3):
self.course_fixture.create_xblock(
self.course_fixture.get_nested_xblocks(category="vertical")[index].locator,
XBlockFixtureDesc('html', 'Unpublished HTML Component ' + str(index)),
)
def _get_items(self):
"""
Returns first section, subsection, and unit on the page.
"""
section = self.course_outline_page.section(SECTION_NAME)
subsection = section.subsection(SUBSECTION_NAME)
unit = subsection.expand_subsection().unit(UNIT_NAME)
return (section, subsection, unit)
@attr(shard=7)
class DeprecationWarningMessageTest(CourseOutlineTest):
"""
Feature: Verify deprecation warning message.
"""
HEADING_TEXT = 'This course uses features that are no longer supported.'
COMPONENT_LIST_HEADING = 'You must delete or replace the following components.'
ADVANCE_MODULES_REMOVE_TEXT = (
u'To avoid errors, édX strongly recommends that you remove unsupported features '
u'from the course advanced settings. To do this, go to the Advanced Settings '
u'page, locate the "Advanced Module List" setting, and then delete the following '
u'modules from the list.'
)
DEFAULT_DISPLAYNAME = "Deprecated Component"
def _add_deprecated_advance_modules(self, block_types):
"""
Add `block_types` into `Advanced Module List`
Arguments:
block_types (list): list of block types
"""
self.advanced_settings.visit()
self.advanced_settings.set_values({"Advanced Module List": json.dumps(block_types)})
def _create_deprecated_components(self):
"""
Create deprecated components.
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc('poll', "Poll", data=load_data_str('poll_markdown.xml'))
)
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('survey', 'Survey'))
def _verify_deprecation_warning_info(
self,
deprecated_blocks_present,
components_present,
components_display_name_list=None,
deprecated_modules_list=None
):
"""
Verify deprecation warning
Arguments:
deprecated_blocks_present (bool): deprecated blocks remove text and
is list is visible if True else False
components_present (bool): components list shown if True else False
components_display_name_list (list): list of components display name
deprecated_modules_list (list): list of deprecated advance modules
"""
self.assertTrue(self.course_outline_page.deprecated_warning_visible)
self.assertEqual(self.course_outline_page.warning_heading_text, self.HEADING_TEXT)
self.assertEqual(self.course_outline_page.modules_remove_text_shown, deprecated_blocks_present)
if deprecated_blocks_present:
self.assertEqual(self.course_outline_page.modules_remove_text, self.ADVANCE_MODULES_REMOVE_TEXT)
self.assertEqual(self.course_outline_page.deprecated_advance_modules, deprecated_modules_list)
self.assertEqual(self.course_outline_page.components_visible, components_present)
if components_present:
self.assertEqual(self.course_outline_page.components_list_heading, self.COMPONENT_LIST_HEADING)
self.assertItemsEqual(self.course_outline_page.components_display_names, components_display_name_list)
def test_no_deprecation_warning_message_present(self):
"""
Scenario: Verify that deprecation warning message is not shown if no deprecated
advance modules are not present and also no deprecated component exist in
course outline.
When I goto course outline
Then I don't see any deprecation warning
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self):
"""
Scenario: Verify deprecation warning message if deprecated modules
and components are present.
Given I have "poll" advance modules present in `Advanced Module List`
And I have created 2 poll components
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see correct poll deprecated warning advance modules remove text
And I see list of poll components with correct display names
"""
self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=True,
components_display_name_list=['Poll', 'Survey'],
deprecated_modules_list=['poll', 'survey']
)
def test_deprecation_warning_with_no_displayname(self):
"""
Scenario: Verify deprecation warning message if poll components are present.
Given I have created 1 poll deprecated component
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see list of poll components with correct message
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
# Create a deprecated component with display_name to be empty and make sure
# the deprecation warning is displayed with
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc(category='poll', display_name="", data=load_data_str('poll_markdown.xml'))
)
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=[self.DEFAULT_DISPLAYNAME],
)
def test_warning_with_poll_advance_modules_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
poll advance modules are present and no poll component exist.
Given I have poll advance modules present in `Advanced Module List`
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see correct poll deprecated warning advance modules remove text
And I don't see list of poll components
"""
self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=False,
deprecated_modules_list=['poll', 'survey']
)
def test_warning_with_poll_components_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
poll component exist and no poll advance modules are present.
Given I have created two poll components
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I don't see poll deprecated warning advance modules remove text
And I see list of poll components with correct display names
"""
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=['Poll', 'Survey']
)
@attr(shard=4)
class SelfPacedOutlineTest(CourseOutlineTest):
"""Test the course outline for a self-paced course."""
def populate_course_fixture(self, course_fixture):
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME)
)
),
)
self.course_fixture.add_course_details({
'self_paced': True,
'start_date': datetime.now() + timedelta(days=1)
})
ConfigModelFixture('/config/self_paced', {'enabled': True}).install()
def test_release_dates_not_shown(self):
"""
Scenario: Ensure that block release dates are not shown on the
course outline page of a self-paced course.
Given I am the author of a self-paced course
When I go to the course outline
Then I should not see release dates for course content
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
self.assertEqual(section.release_date, '')
subsection = section.subsection(SUBSECTION_NAME)
self.assertEqual(subsection.release_date, '')
def test_edit_section_and_subsection(self):
"""
Scenario: Ensure that block release/due dates are not shown
in their settings modals.
Given I am the author of a self-paced course
When I go to the course outline
And I click on settings for a section or subsection
Then I should not see release or due date settings
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
modal = section.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
modal.cancel()
subsection = section.subsection(SUBSECTION_NAME)
modal = subsection.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
class CourseStatusOutlineTest(CourseOutlineTest):
"""Test the course outline status section."""
shard = 6
def setUp(self):
super(CourseStatusOutlineTest, self).setUp()
self.schedule_and_details_settings = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.checklists = CourseChecklistsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def test_course_status_section(self):
"""
Ensure that the course status section appears in the course outline.
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_course_status_section)
def test_course_status_section_start_date_link(self):
"""
Ensure that the course start date link in the course status section in
the course outline links to the "Schedule and Details" page.
"""
self.course_outline_page.visit()
self.course_outline_page.click_course_status_section_start_date_link()
self.schedule_and_details_settings.wait_for_page()
def test_course_status_section_checklists_link(self):
"""
Ensure that the course checklists link in the course status section in
the course outline links to the "Checklists" page.
"""
self.course_outline_page.visit()
self.course_outline_page.click_course_status_section_checklists_link()
self.checklists.wait_for_page()
class InstructorPacedToSelfPacedOutlineTest(CourseOutlineTest):
"""
Test the course outline when pacing is changed from
instructor to self paced.
"""
def populate_course_fixture(self, course_fixture):
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME)
)
),
)
self.course_fixture.add_course_details({
'start_date': datetime.now() + timedelta(days=1),
})
self.course_fixture.add_advanced_settings({
'enable_timed_exams': {
'value': True
}
})
def test_due_dates_not_shown(self):
"""
Scenario: Ensure that due dates for timed exams
are not displayed on the course outline page when switched to
self-paced mode from instructor-paced.
Given an instructor paced course, add a due date for a subsection.
Change the course's pacing to self-paced.
Make the subsection a timed exam.
Make sure adding the timed exam doesn't display the due date.
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
subsection = section.subsection(SUBSECTION_NAME)
modal = subsection.edit()
modal.due_date = '5/14/2016'
modal.policy = 'Homework'
modal.save()
# Checking if the added due date saved
self.assertIn('May 14', subsection.due_date)
# Checking if grading policy added
self.assertEqual('Homework', subsection.policy)
# Updating the course mode to self-paced
self.course_fixture.add_course_details({
'self_paced': True
})
# Making the subsection a timed exam
self.course_outline_page.open_subsection_settings_dialog()
self.course_outline_page.select_advanced_tab()
self.course_outline_page.make_exam_timed()
# configure call to actually update course with new settings
self.course_fixture.configure_course()
# Reloading page after the changes
self.course_outline_page.visit()
self.assertIsNone(subsection.due_date)
| jolyonb/edx-platform | common/test/acceptance/tests/studio/test_studio_outline.py | Python | agpl-3.0 | 87,380 | [
"VisIt"
] | ead79505df725d3f9686e36b62e254bf0939b00c72971d57c37ef7a8193044db |
import os
import numpy as np
from ase import Atom, Atoms
from ase.structure import bulk
from ase.units import Hartree, Bohr
from gpaw import GPAW, FermiDirac
from gpaw.response.bse import BSE
from ase.dft import monkhorst_pack
from gpaw.mpi import rank
GS = 1
bse = 1
check = 1
if GS:
kpts = (4,4,4)
a = 5.431 # From PRB 73,045112 (2006)
atoms = bulk('Si', 'diamond', a=a)
calc = GPAW(h=0.2,
kpts=kpts,
occupations=FermiDirac(0.001),
nbands=8,
convergence={'bands':'all'})
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Si.gpw','all')
if bse:
eshift = 0.8
bse = BSE('Si.gpw',w=np.linspace(0,10,201),
q=np.array([0.0001,0,0.0]),optical_limit=True,ecut=50.,
nc=np.array([4,6]), nv=np.array([2,4]), eshift=eshift,
nbands=8,positive_w=True,use_W=True,qsymm=True)
bse.get_dielectric_function('Si_bse.dat')
if rank == 0 and os.path.isfile('phi_qaGp'):
os.remove('phi_qaGp')
if check:
d = np.loadtxt('Si_bse.dat')
Nw1 = 67
Nw2 = 80
if d[Nw1, 2] > d[Nw1-1, 2] and d[Nw1, 2] > d[Nw1+1, 2] \
and d[Nw2, 2] > d[Nw2-1, 2] and d[Nw2, 2] > d[Nw2+1, 2]:
pass
else:
raise ValueError('Absorption peak not correct ! ')
if np.abs(d[Nw1, 2] - 53.3382894891) > 0.6 \
or np.abs(d[Nw2, 2] - 62.7667801949 ) > 2.:
print d[Nw1, 2], d[Nw2, 2]
raise ValueError('Please check spectrum strength ! ')
| ajylee/gpaw-rtxs | gpaw/test/bse_silicon.py | Python | gpl-3.0 | 1,554 | [
"ASE",
"GPAW"
] | 48a6e78bd14bfbeb56e2b9f5449715166b7696c655ab6aefa234ca2b56d5593c |
#
#
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom, and Karl MacMillan
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""The convolution module contains plugins for linear filtering"""
from gamera.plugin import PluginFunction, PluginModule
from gamera.args import Choice, Int, ImageType, ImageList, Args, Float
from gamera.enums import RGB, FLOAT, GREYSCALE, GREY16, COMPLEX
from gamera.plugins import image_utilities
import _arithmetic
import _convolution
CONVOLUTION_TYPES = [GREYSCALE, GREY16, FLOAT, RGB, COMPLEX]
# Note: The convolution exposed here does not allow for the case where the
# logical center of the kernel is different from the physical center.
# Saving that for another day... MGD
########################################
# Convolution methods
class convolve(PluginFunction):
u"""
Convolves an image with a given kernel.
Uses code from the Vigra library (Copyright 1998-2007 by Ullrich
K\u00f6the).
*kernel*
A kernel for the convolution. The kernel may either be a FloatImage
or a nested Python list of floats.
*border_treatment*
Specifies how to treat the borders of the image. Must be one of
the following:
- BORDER_TREATMENT_AVOID (0)
do not operate on a pixel where the kernel does not fit in the image
- BORDER_TREATMENT_CLIP (1)
clip kernel at image border. The kernel entries are renormalized
so that the total kernel sum is not changed (this is only useful
if the kernel is >= 0 everywhere).
- BORDER_TREATMENT_REPEAT (2)
repeat the nearest valid pixel
- BORDER_TREATMENT_REFLECT (3)
reflect image at last row/column
- BORDER_TREATMENT_WRAP (4)
wrap image around (periodic boundary conditions)
Example usage:
.. code:: Python
# Using a custom kernel
img2 = image.convolve([[0.125, 0.0, -0.125],
[0.25 , 0.0, -0.25 ],
[0.125, 0.0, -0.125]])
# Using one of the included kernel generators
img2 = image.convolve(GaussianKernel(3.0))
"""
category = "Filter/Convolution"
self_type = ImageType(CONVOLUTION_TYPES)
args = Args([ImageType([FLOAT], 'kernel'),
Choice('border_treatment',
['avoid', 'clip', 'repeat', 'reflect', 'wrap'],
default=1)])
return_type = ImageType(CONVOLUTION_TYPES)
def __call__(self, kernel, border_treatment=3):
from gamera.gameracore import FLOAT
if type(kernel) == list:
kernel = image_utilities.nested_list_to_image(kernel, FLOAT)
return _convolution.convolve(self, kernel, border_treatment)
__call__ = staticmethod(__call__)
class convolve_xy(PluginFunction):
u"""
Convolves an image in both X and Y directions with 1D kernels.
This is equivalent to what the Vigra library calls "Separable
Convolution".
Uses code from the Vigra library (Copyright 1998-2007 by Ullrich
K\u00f6the).
*kernel_y*
A kernel for the convolution in the *y* direction. The kernel
may either be a FloatImage or a nested Python list of floats.
*kernel_x*
A kernel for the convolution in the *x* direction. The kernel
may either be a FloatImage or a nested Python list of floats.
If *kernel_x* is omitted, *kernel_y* will be used in the *x*
direction.
*border_treatment*
Specifies how to treat the borders of the image. See
``convolve`` for information about *border_treatment* values.
"""
category = "Filter/Convolution"
self_type = ImageType(CONVOLUTION_TYPES)
args = Args([ImageType([FLOAT], 'kernel_x'),
ImageType([FLOAT], 'kernel_y'),
Choice('border_treatment',
['avoid', 'clip', 'repeat', 'reflect', 'wrap'],
default=1)])
return_type = ImageType(CONVOLUTION_TYPES)
pure_python = True
def __call__(self, kernel_x, kernel_y=None, border_treatment=1):
from gamera.gameracore import FLOAT
if kernel_y is None:
kernel_y = kernel_x
if kernel_y == kernel_x:
if type(kernel_y) == list:
kernel_x = kernel_y = image_utilities.nested_list_to_image(kernel_y, FLOAT)
else:
if type(kernel_y) == list:
kernel_y = image_utilities.nested_list_to_image(kernel_y, FLOAT)
if type(kernel_x) == list:
kernel_x = image_utilities.nested_list_to_image(kernel_x, FLOAT)
result = _convolution.convolve_x(self, kernel_x, border_treatment)
return _convolution.convolve_y(result, kernel_y, border_treatment)
__call__ = staticmethod(__call__)
class convolve_x(PluginFunction):
u"""
Convolves an image in the X directions with a 1D kernel. This is
equivalent to what the Vigra library calls "Separable
Convolution".
Uses code from the Vigra library (Copyright 1998-2007 by Ullrich
K\u00f6the).
*kernel_x*
A kernel for the convolution in the *x* direction. The kernel
may either be a FloatImage or a nested Python list of floats.
It must consist of only a single row.
*border_treatment*
Specifies how to treat the borders of the image. See
``convolve`` for information about *border_treatment* values.
"""
category = "Filter/Convolution"
self_type = ImageType(CONVOLUTION_TYPES)
args = Args([ImageType([FLOAT], 'kernel_x'),
Choice('border_treatment',
['avoid', 'clip', 'repeat', 'reflect', 'wrap'],
default=1)])
return_type = ImageType(CONVOLUTION_TYPES)
def __call__(self, kernel, border_treatment=1):
from gamera.gameracore import FLOAT
if type(kernel) == list:
kernel = image_utilities.nested_list_to_image(kernel, FLOAT)
return _convolution.convolve_x(self, kernel, border_treatment)
__call__ = staticmethod(__call__)
class convolve_y(PluginFunction):
u"""
Convolves an image in the X directions with a 1D kernel. This is
equivalent to what the Vigra library calls "Separable Convolution".
Uses code from the Vigra library (Copyright 1998-2007 by Ullrich
K\u00f6the).
*kernel_y*
A kernel for the convolution in the *x* direction. The kernel
may either be a FloatImage or a nested Python list of floats.
It must consist of only a single row.
*border_treatment*
Specifies how to treat the borders of the image. See
``convolve`` for information about *border_treatment* values.
"""
category = "Filter/Convolution"
self_type = ImageType(CONVOLUTION_TYPES)
args = Args([ImageType([FLOAT], 'kernel_y'),
Choice('border_treatment',
['avoid', 'clip', 'repeat', 'reflect', 'wrap'],
default=1)])
return_type = ImageType(CONVOLUTION_TYPES)
def __call__(self, kernel, border_treatment=1):
from gamera.gameracore import FLOAT
if type(kernel) == list:
kernel = image_utilities.nested_list_to_image(kernel, FLOAT)
return _convolution.convolve_y(self, kernel, border_treatment)
__call__ = staticmethod(__call__)
########################################
# Convolution kernels
class ConvolutionKernel(PluginFunction):
self_type = None
return_type = ImageType([FLOAT])
category = "Filter/ConvolutionKernels"
class GaussianKernel(ConvolutionKernel):
"""
Init as a Gaussian function. The radius of the kernel is always
3*standard_deviation.
*standard_deviation*
The standard deviation of the Gaussian kernel.
"""
args = Args([Float("standard_deviation", default=1.0)])
class GaussianDerivativeKernel(ConvolutionKernel):
"""
Init as a Gaussian derivative of order 'order'. The radius of the
kernel is always 3*std_dev.
*standard_deviation*
The standard deviation of the Gaussian kernel.
*order*
The order of the Gaussian kernel.
"""
args = Args([Float("standard_deviation", default=1.0),
Int("order", default=1)])
class BinomialKernel(ConvolutionKernel):
"""
Creates a binomial filter kernel for use with separable
convolution of a given radius.
*radius*
The radius of the kernel.
"""
args = Args([Int("radius", default=3)])
class AveragingKernel(ConvolutionKernel):
"""
Creates an Averaging filter kernel for use with separable
convolution. The window size is (2*radius+1) * (2*radius+1).
*radius*
The radius of the kernel.
"""
args = Args([Int("radius", default=3)])
class SymmetricGradientKernel(ConvolutionKernel):
"""
Init as a symmetric gradient filter of the form [ 0.5, 0.0, -0.5]
"""
args = Args([])
class SimpleSharpeningKernel(ConvolutionKernel):
"""
Creates a kernel for simple sharpening.
"""
args = Args([Float('sharpening_factor', default=0.5)])
########################################
# Convolution applications
#
# The following are some applications of convolution built from the above
# parts. This could have been implemented by calling the corresponding
# Vigra functions directly, but that would have increased the compiled
# binary size of an already large module emmensely. This approach has
# slightly more overhead, being in Python, but it should hopefully
# not have a significant impact. MGD
class gaussian_smoothing(PluginFunction):
"""
Performs gaussian smoothing on an image.
*standard_deviation*
The standard deviation of the Gaussian kernel.
"""
self_type = ImageType(CONVOLUTION_TYPES)
args = Args([Float("standard_deviation", default=1.0)])
return_type = ImageType(CONVOLUTION_TYPES)
pure_python = True
doc_examples = [(GREYSCALE, 1.0), (RGB, 3.0), (COMPLEX, 1.0)]
def __call__(self, std_dev=1.0):
return self.convolve_xy(
_convolution.GaussianKernel(std_dev),
border_treatment=BORDER_TREATMENT_REFLECT)
__call__ = staticmethod(__call__)
class simple_sharpen(PluginFunction):
"""
Perform simple sharpening.
*sharpening_factor*
The amount of sharpening to perform.
"""
self_type = ImageType(CONVOLUTION_TYPES)
args = Args([Float("sharpening_factor", default=0.5)])
return_type = ImageType(CONVOLUTION_TYPES)
pure_python = True
doc_examples = [(GREYSCALE, 1.0), (RGB, 3.0)]
def __call__(self, sharpening_factor=0.5):
return self.convolve(
_convolution.SimpleSharpeningKernel(sharpening_factor),
border_treatment=BORDER_TREATMENT_REFLECT)
__call__ = staticmethod(__call__)
class gaussian_gradient(PluginFunction):
"""
Calculate the gradient vector by means of a 1st derivatives of
Gaussian filter.
*scale*
Returns a tuple of (*x_gradient*, *y_gradient*).
"""
self_type = ImageType(CONVOLUTION_TYPES)
args = Args([Float("scale", default=0.5)])
return_type = ImageList("gradients")
pure_python = True
doc_examples = [(GREYSCALE, 1.0), (RGB, 1.0), (COMPLEX, 1.0)]
def __call__(self, scale=1.0):
smooth = _convolution.GaussianKernel(scale)
grad = _convolution.GaussianDerivativeKernel(scale, 1)
tmp = self.convolve_x(grad)
result_x = tmp.convolve_y(smooth)
tmp = self.convolve_x(smooth)
result_y = tmp.convolve_y(grad)
return result_x, result_y
__call__ = staticmethod(__call__)
class laplacian_of_gaussian(PluginFunction):
"""
Filter image with the Laplacian of Gaussian operator at the given
scale.
*scale*
"""
self_type = ImageType([GREYSCALE, GREY16, FLOAT])
args = Args([Float("scale", default=0.5)])
return_type = ImageType([GREYSCALE, GREY16, FLOAT])
pure_python = True
doc_examples = [(GREYSCALE, 1.0)]
def __call__(self, scale=1.0):
smooth = _convolution.GaussianKernel(scale)
deriv = _convolution.GaussianDerivativeKernel(scale, 2)
fp = self.to_float()
tmp = fp.convolve_x(deriv)
tmp_x = tmp.convolve_y(smooth)
tmp = fp.convolve_x(smooth)
tmp_y = tmp.convolve_y(deriv)
result = _arithmetic.add_images(tmp_x, tmp_y, False)
if self.data.pixel_type == GREYSCALE:
return result.to_greyscale()
if self.data.pixel_type == GREY16:
return result.to_grey16()
return result
__call__ = staticmethod(__call__)
class hessian_matrix_of_gaussian(PluginFunction):
"""
Filter image with the 2nd derivatives of the Gaussian at the given
scale to get the Hessian matrix.
*scale*
"""
self_type = ImageType([GREYSCALE, GREY16, FLOAT])
args = Args([Float("scale", default=0.5)])
return_type = ImageList("hessian_matrix")
pure_python = True
doc_examples = [(GREYSCALE, 1.0)]
def __call__(self, scale=1.0):
smooth = _convolution.GaussianKernel(scale)
deriv1 = _convolution.GaussianDerivativeKernel(scale, 1)
deriv2 = _convolution.GaussianDerivativeKernel(scale, 2)
fp = self.to_float()
tmp = fp.convolve_x(deriv2)
tmp_x = tmp.convolve_y(smooth)
tmp = fp.convolve_x(smooth)
tmp_y = tmp.convolve_y(deriv2)
tmp = fp.convolve_x(deriv1)
tmp_xy = fp.convolve_y(deriv1)
if self.data.pixel_type == GREYSCALE:
return tmp_x.to_greyscale(), tmp_y.to_greyscale(), tmp_xy.to_greyscale()
if self.data.pixel_type == GREY16:
return tmp_x.to_grey16(), tmp_y.to_grey16(), tmp_xy.to_grey16()
# return result <-- This would result in a crash since result is not defined -- AH, 2013
# it seems as though it would happen if this method is passed a FLOAT.
return None
__call__ = staticmethod(__call__)
class sobel_edge_detection(PluginFunction):
"""
Performs simple Sobel edge detection on the image.
"""
self_type = ImageType(CONVOLUTION_TYPES)
return_type = ImageType(CONVOLUTION_TYPES)
pure_python = True
doc_examples = [(GREYSCALE, 1.0), (RGB, 3.0)]
def __call__(self, scale=1.0):
return self.convolve([[.125, 0.0, -.125],
[.25, 0.0, -.25],
[.125, 0.0, -.125]])
__call__ = staticmethod(__call__)
class ConvolutionModule(PluginModule):
cpp_headers = ["convolution.hpp"]
category = "Filter"
functions = [convolve, convolve_xy, convolve_x, convolve_y,
GaussianKernel, GaussianDerivativeKernel,
BinomialKernel, AveragingKernel,
SymmetricGradientKernel, SimpleSharpeningKernel,
gaussian_smoothing, simple_sharpen,
gaussian_gradient, laplacian_of_gaussian,
hessian_matrix_of_gaussian, sobel_edge_detection]
author = u"Michael Droettboom (With code from VIGRA by Ullrich K\u00f6the)"
url = "http://gamera.sourceforge.net/"
module = ConvolutionModule()
BORDER_TREATMENT_AVOID = 0
BORDER_TREATMENT_CLIP = 1
BORDER_TREATMENT_REPEAT = 2
BORDER_TREATMENT_REFLECT = 3
BORDER_TREATMENT_WRAP = 4
GaussianKernel = GaussianKernel()
GaussianDerivativeKernel = GaussianDerivativeKernel()
BinomialKernel = BinomialKernel()
AveragingKernel = AveragingKernel()
SymmetricGradientKernel = SymmetricGradientKernel()
SimpleSharpeningKernel = SimpleSharpeningKernel()
del CONVOLUTION_TYPES
del ConvolutionKernel
| DDMAL/Gamera | gamera/plugins/convolution.py | Python | gpl-2.0 | 16,256 | [
"Gaussian"
] | d3b41b37a1210a9bf553941b146265f6a9935115854bffa640d828f9dcebe08e |
#pylint: disable=C0111
#pylint: disable=W0621
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
#pylint: disable=W0401
# Disable the "Unused import %s from wildcard import" warning
#pylint: disable=W0614
# Disable the "unused argument" warning because lettuce uses "step"
#pylint: disable=W0613
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=E0611
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert world.browser.title == 'Dashboard'
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
assert world.url_equals(path)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
world.register_by_course_id(course_id, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
| pelikanchik/edx-platform | common/djangoapps/terrain/steps.py | Python | agpl-3.0 | 6,704 | [
"VisIt"
] | 6b6b8a30716592b3bf6f3f3a860d9bd5907127049bb645f0e98eb4a6fd464861 |
"""
CloudEndpoint is a base class for the clients used to connect to different
cloud providers
"""
import os
import ssl
import time
from libcloud import security
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.exceptions import BaseHTTPError
# DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Resources.Cloud.Endpoint import Endpoint
from DIRAC.Resources.Cloud.Utilities import STATE_MAP
class CloudEndpoint(Endpoint):
"""CloudEndpoint base class"""
def __init__(self, parameters=None):
super(CloudEndpoint, self).__init__(parameters=parameters)
# logger
self.log = gLogger.getSubLogger("CloudEndpoint")
self.valid = False
result = self.initialize()
if result["OK"]:
self.log.debug("CloudEndpoint created and validated")
self.valid = True
def initialize(self):
# Relax security
security.SSL_VERSION = ssl.PROTOCOL_SSLv23
security.VERIFY_SSL_CERT = False
# Variables needed to contact the service
connDict = {}
for var in [
"ex_domain_name",
"ex_force_auth_url",
"ex_force_service_region",
"ex_force_auth_version",
"ex_tenant_name",
"ex_keyname",
"ex_voms_proxy",
]:
if var in self.parameters:
connDict[var] = self.parameters[var]
username = self.parameters.get("User")
password = self.parameters.get("Password")
for key in connDict:
self.log.info("%s: %s" % (key, connDict[key]))
# get cloud driver
providerName = self.parameters.get("Provider", "OPENSTACK").upper()
providerCode = getattr(Provider, providerName)
self.driverClass = get_driver(providerCode)
self.__driver = self.driverClass(username, password, **connDict)
return self.__checkConnection()
def __checkConnection(self):
"""
Checks connection status by trying to list the images.
:return: S_OK | S_ERROR
"""
try:
_result = self.__driver.list_images()
except Exception as errmsg:
return S_ERROR(errmsg)
return S_OK()
def __getImageByName(self, imageName):
"""
Given the imageName, returns the current image object from the server.
:Parameters:
**imageName** - `string`
imageName as stored on the OpenStack image repository ( glance )
:return: S_OK( image ) | S_ERROR
"""
try:
images = self.__driver.list_images()
except Exception as errmsg:
return S_ERROR(errmsg)
image = None
for im in images:
if im.name == imageName:
image = im
break
if image is None:
return S_ERROR("Image %s not found" % imageName)
return S_OK(image)
def __getFlavorByName(self, flavorName):
"""
Given the flavorName, returns the current flavor object from the server.
:Parameters:
**flavorName** - `string`
flavorName as stored on the OpenStack service
:return: S_OK( flavor ) | S_ERROR
"""
try:
flavors = self.__driver.list_sizes()
except Exception as errmsg:
return S_ERROR(errmsg)
flavor = None
for fl in flavors:
if fl.name == flavorName:
flavor = fl
if flavor is None:
return S_ERROR("Flavor %s not found" % flavorName)
return S_OK(flavor)
def __getSecurityGroups(self, securityGroupNames=None):
"""
Given the securityGroupName, returns the current security group object from the server.
:Parameters:
**securityGroupName** - `string`
securityGroupName as stored on the OpenStack service
:return: S_OK( securityGroup ) | S_ERROR
"""
if not securityGroupNames:
securityGroupNames = []
elif not isinstance(securityGroupNames, list):
securityGroupNames = [securityGroupNames]
if "default" not in securityGroupNames:
securityGroupNames.append("default")
try:
secGroups = self.__driver.ex_list_security_groups()
except Exception as errmsg:
return S_ERROR(errmsg)
return S_OK([secGroup for secGroup in secGroups if secGroup.name in securityGroupNames])
def createInstances(self, vmsToSubmit):
outputDict = {}
for nvm in range(vmsToSubmit):
instanceID = makeGuid()[:8]
createPublicIP = "ipPool" in self.parameters
result = self.createInstance(instanceID, createPublicIP)
if result["OK"]:
node, publicIP = result["Value"]
self.log.debug("Created VM instance %s/%s with publicIP %s" % (node.id, instanceID, publicIP))
nodeDict = {}
nodeDict["PublicIP"] = publicIP
nodeDict["InstanceID"] = instanceID
nodeDict["NumberOfProcessors"] = self.flavor.vcpus
nodeDict["RAM"] = self.flavor.ram
nodeDict["DiskSize"] = self.flavor.disk
nodeDict["Price"] = self.flavor.price
outputDict[node.id] = nodeDict
else:
break
if not outputDict:
# Submission failed
return result
return S_OK(outputDict)
def createInstance(self, instanceID="", createPublicIP=True):
"""
This creates a VM instance for the given boot image
and creates a context script, taken the given parameters.
Successful creation returns instance VM
Boots a new node on the OpenStack server defined by self.endpointConfig. The
'personality' of the node is done by self.imageConfig. Both variables are
defined on initialization phase.
The node name has the following format:
<bootImageName><contextMethod><time>
It boots the node. If IPpool is defined on the imageConfiguration, a floating
IP is created and assigned to the node.
:return: S_OK( ( nodeID, publicIP ) ) | S_ERROR
"""
if not instanceID:
instanceID = makeGuid()[:8]
self.parameters["VMUUID"] = instanceID
self.parameters["VMType"] = self.parameters.get("CEType", "OpenStack")
createNodeDict = {}
# Get the image object
if "ImageID" in self.parameters:
try:
image = self.__driver.get_image(self.parameters["ImageID"])
except BaseHTTPError as err:
if err.code == 404:
# Image not found
return S_ERROR("Image with ID %s not found" % self.parameters["ImageID"])
return S_ERROR("Failed to get image for ID %s (%s)" % (self.parameters["ImageID"], str(err)))
elif "ImageName" in self.parameters:
result = self.__getImageByName(self.parameters["ImageName"])
if not result["OK"]:
return result
image = result["Value"]
else:
return S_ERROR("No image specified")
createNodeDict["image"] = image
# Get the flavor object
if "FlavorID" in self.parameters and "FlavorName" not in self.parameters:
result = self.__getFlavorByName(self.parameters["FlavorName"])
if not result["OK"]:
return result
flavor = result["Value"]
elif "FlavorID" in self.parameters:
flavor = self.__driver.ex_get_size(self.parameters["FlavorID"])
else:
return S_ERROR("No flavor specified")
self.flavor = flavor
createNodeDict["size"] = flavor
# Get security groups
# if 'ex_security_groups' in self.parameters:
# result = self.__getSecurityGroups( self.parameters['ex_security_groups'] )
# if not result[ 'OK' ]:
# self.log.error( result[ 'Message' ] )
# return result
# self.parameters['ex_security_groups'] = result[ 'Value' ]
result = self._createUserDataScript()
if not result["OK"]:
return result
createNodeDict["ex_userdata"] = result["Value"]
# Optional node contextualization parameters
for param in ["ex_metadata", "ex_pubkey_path", "ex_keyname", "ex_config_drive"]:
if param in self.parameters:
createNodeDict[param] = self.parameters[param]
createNodeDict["name"] = "DIRAC_%s" % instanceID
# createNodeDict['ex_config_drive'] = True
self.log.verbose("Creating node:")
for key, value in createNodeDict.items():
self.log.verbose("%s: %s" % (key, value))
if "networks" in self.parameters:
result = self.getVMNetwork()
if not result["OK"]:
return result
createNodeDict["networks"] = result["Value"]
if "keyname" in self.parameters:
createNodeDict["ex_keyname"] = self.parameters["keyname"]
if "availability_zone" in self.parameters:
createNodeDict["ex_availability_zone"] = self.parameters["availability_zone"]
# Create the VM instance now
try:
vmNode = self.__driver.create_node(**createNodeDict)
except Exception as errmsg:
self.log.error("Exception in driver.create_node", errmsg)
return S_ERROR(errmsg)
publicIP = None
if createPublicIP:
# Wait until the node is running, otherwise getting public IP fails
try:
self.__driver.wait_until_running([vmNode], timeout=600)
result = self.assignFloatingIP(vmNode)
if result["OK"]:
publicIP = result["Value"]
else:
vmNode.destroy()
return result
except Exception as exc:
self.log.debug("Failed to wait node running %s" % str(exc))
vmNode.destroy()
return S_ERROR("Failed to wait until the node is Running")
return S_OK((vmNode, publicIP))
def getVMNodes(self):
"""Get all the nodes on the endpoint
:return: S_OK(list of Node) / S_ERROR
"""
try:
nodes = self.__driver.list_nodes()
except Exception as errmsg:
return S_ERROR(errmsg)
return S_OK(nodes)
def getVMNode(self, nodeID):
"""
Given a Node ID, returns all its configuration details on a
libcloud.compute.base.Node object.
:Parameters:
**nodeID** - `string`
openstack node id ( not uuid ! )
:return: S_OK( Node ) | S_ERROR
"""
try:
node = self.__driver.ex_get_node_details(nodeID)
except Exception as errmsg:
# Let's if the node is in the list of available nodes
result = self.getVMNodes()
if not result["OK"]:
return S_ERROR("Failed to get nodes")
nodeList = result["Value"]
for nd in nodeList:
if nd.id == nodeID:
# Let's try again
try:
node = self.__driver.ex_get_node_details(nodeID)
break
except Exception as exc:
return S_ERROR("Failed to get node details %s" % str(exc))
node = None
return S_OK(node)
def getVMStatus(self, nodeID):
"""
Get the status for a given node ID. libcloud translates the status into a digit
from 0 to 4 using a many-to-one relation ( ACTIVE and RUNNING -> 0 ), which
means we cannot undo that translation. It uses an intermediate states mapping
dictionary, SITEMAP, which we use here inverted to return the status as a
meaningful string. The five possible states are ( ordered from 0 to 4 ):
RUNNING, REBOOTING, TERMINATED, PENDING & UNKNOWN.
:Parameters:
**uniqueId** - `string`
openstack node id ( not uuid ! )
:return: S_OK( status ) | S_ERROR
"""
result = self.getVMNode(nodeID)
if not result["OK"]:
return result
state = result["Value"].state
if state not in STATE_MAP:
return S_ERROR("State %s not in STATEMAP" % state)
return S_OK(STATE_MAP[state])
def getVMNetwork(self, networkNames=None):
"""Get a network object corresponding to the networkName
:param str networkName: network name
:return: S_OK|S_ERROR network object in case of S_OK
"""
if not networkNames:
nameList = []
else:
nameList = list(networkNames)
resultList = []
if not nameList:
nameList = self.parameters.get("networks")
if not nameList:
return S_ERROR("Network names are not specified")
else:
nameList = nameList.split(",")
result = self.__driver.ex_list_networks()
for oNetwork in result:
if oNetwork.name in nameList:
resultList.append(oNetwork)
return S_OK(resultList)
def stopVM(self, nodeID, publicIP=""):
"""
Given the node ID it gets the node details, which are used to destroy the
node making use of the libcloud.openstack driver. If three is any public IP
( floating IP ) assigned, frees it as well.
:Parameters:
**uniqueId** - `string`
openstack node id ( not uuid ! )
**public_ip** - `string`
public IP assigned to the node if any
:return: S_OK | S_ERROR
"""
# Get Node object with node details
result = self.getVMNode(nodeID)
if not result["OK"]:
return result
node = result["Value"]
if node is None:
# Node does not exist
return S_OK()
nodeIP = node.public_ips[0] if node.public_ips else None
if not publicIP and nodeIP is not None:
publicIP = nodeIP
# Delete floating IP if any
if publicIP:
result = self.deleteFloatingIP(publicIP, node)
if not result["OK"]:
self.log.error("Failed in deleteFloatingIP:", result["Message"])
# Destroy the VM instance
if node is not None:
try:
result = self.__driver.destroy_node(node)
if not result:
return S_ERROR("Failed to destroy node: %s" % node.id)
except Exception as errmsg:
return S_ERROR(errmsg)
return S_OK()
def getVMPool(self, poolName):
try:
poolList = self.__driver.ex_list_floating_ip_pools()
for pool in poolList:
if pool.name == poolName:
return S_OK(pool)
except Exception as errmsg:
return S_ERROR(errmsg)
return S_ERROR("IP Pool with the name %s not found" % poolName)
def assignFloatingIP(self, node):
"""
Given a node, assign a floating IP from the ipPool defined on the imageConfiguration
on the CS.
:Parameters:
**node** - `libcloud.compute.base.Node`
node object with the vm details
:return: S_OK( public_ip ) | S_ERROR
"""
ipPool = self.parameters.get("ipPool")
if ipPool:
result = self.getVMPool(ipPool)
if not result["OK"]:
return result
pool = result["Value"]
try:
floatingIP = pool.create_floating_ip()
# Add sleep between creation and assignment
time.sleep(60)
self.__driver.ex_attach_floating_ip_to_node(node, floatingIP)
publicIP = floatingIP.ip_address
return S_OK(publicIP)
except Exception as errmsg:
return S_ERROR(errmsg)
else:
return S_ERROR("No IP pool specified")
def getVMFloatingIP(self, publicIP):
# We are still with IPv4
publicIP = publicIP.replace("::ffff:", "")
ipPool = self.parameters.get("ipPool")
if ipPool:
try:
floatingIP = None
poolList = self.__driver.ex_list_floating_ip_pools()
for pool in poolList:
if pool.name == ipPool:
ipList = pool.list_floating_ips()
for ip in ipList:
if ip.ip_address == publicIP:
floatingIP = ip
break
break
return S_OK(floatingIP)
except Exception as errmsg:
return S_ERROR(errmsg)
else:
return S_ERROR("No IP pool specified")
def deleteFloatingIP(self, publicIP, node):
"""
Deletes a floating IP <public_ip> from the server.
:param str publicIP: public IP to be deleted
:param object node: node to which IP is attached
:return: S_OK | S_ERROR
"""
# We are still with IPv4
publicIP = publicIP.replace("::ffff:", "")
result = self.getVMFloatingIP(publicIP)
if not result["OK"]:
return result
floatingIP = result["Value"]
if floatingIP is None:
return S_OK()
try:
if node is not None:
self.__driver.ex_detach_floating_ip_from_node(node, floatingIP)
floatingIP.delete()
return S_OK()
except Exception as errmsg:
return S_ERROR(errmsg)
| DIRACGrid/DIRAC | src/DIRAC/Resources/Cloud/CloudEndpoint.py | Python | gpl-3.0 | 18,088 | [
"DIRAC"
] | 7bcdb0d4e7f270a7be03b329caf78063347e0d275296e1f962d4cb2801050d68 |
__author__ = 'Daan Wierstra and Tom Schaul'
from itertools import chain
from scipy import zeros
from pybrain.structure.networks.feedforward import FeedForwardNetwork
from pybrain.structure.networks.recurrent import RecurrentNetwork
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.structure.connections import FullConnection
# CHECKME: allow modules that do not inherit from NeuronLayer? and treat them as single neurons?
class NeuronDecomposableNetwork(object):
""" A Network, that allows accessing parameters decomposed by their
corresponding individual neuron. """
# ESP style treatment:
espStyleDecomposition = True
def addModule(self, m):
assert isinstance(m, NeuronLayer)
super(NeuronDecomposableNetwork, self).addModule(m)
def sortModules(self):
super(NeuronDecomposableNetwork, self).sortModules()
self._constructParameterInfo()
# contains a list of lists of indices
self.decompositionIndices = {}
for neuron in self._neuronIterator():
self.decompositionIndices[neuron] = []
for w in range(self.paramdim):
inneuron, outneuron = self.paramInfo[w]
if self.espStyleDecomposition and outneuron[0] in self.outmodules:
self.decompositionIndices[inneuron].append(w)
else:
self.decompositionIndices[outneuron].append(w)
def _neuronIterator(self):
for m in self.modules:
for n in range(m.dim):
yield (m, n)
def _constructParameterInfo(self):
""" construct a dictionnary with information about each parameter:
The key is the index in self.params, and the value is a tuple containing
(inneuron, outneuron), where a neuron is a tuple of it's module and an index.
"""
self.paramInfo = {}
index = 0
for x in self._containerIterator():
if isinstance(x, FullConnection):
for w in range(x.paramdim):
inbuf, outbuf = x.whichBuffers(w)
self.paramInfo[index + w] = ((x.inmod, x.inmod.whichNeuron(outputIndex=inbuf)),
(x.outmod, x.outmod.whichNeuron(inputIndex=outbuf)))
elif isinstance(x, NeuronLayer):
for n in range(x.paramdim):
self.paramInfo[index + n] = ((x, n), (x, n))
else:
raise
index += x.paramdim
def getDecomposition(self):
""" return a list of arrays, each corresponding to one neuron's relevant parameters """
res = []
for neuron in self._neuronIterator():
nIndices = self.decompositionIndices[neuron]
if len(nIndices) > 0:
tmp = zeros(len(nIndices))
for i, ni in enumerate(nIndices):
tmp[i] = self.params[ni]
res.append(tmp)
return res
def setDecomposition(self, decomposedParams):
""" set parameters by neuron decomposition,
each corresponding to one neuron's relevant parameters """
nindex = 0
for neuron in self._neuronIterator():
nIndices = self.decompositionIndices[neuron]
if len(nIndices) > 0:
for i, ni in enumerate(nIndices):
self.params[ni] = decomposedParams[nindex][i]
nindex += 1
@staticmethod
def convertNormalNetwork(n):
""" convert a normal network into a decomposable one """
if isinstance(n, RecurrentNetwork):
res = RecurrentDecomposableNetwork()
for c in n.recurrentConns:
res.addRecurrentConnection(c)
else:
res = FeedForwardDecomposableNetwork()
for m in n.inmodules:
res.addInputModule(m)
for m in n.outmodules:
res.addOutputModule(m)
for m in n.modules:
res.addModule(m)
for c in chain(*list(n.connections.values())):
res.addConnection(c)
res.name = n.name
res.sortModules()
return res
class FeedForwardDecomposableNetwork(NeuronDecomposableNetwork, FeedForwardNetwork):
pass
class RecurrentDecomposableNetwork(NeuronDecomposableNetwork, RecurrentNetwork):
pass
| Ryanglambert/pybrain | pybrain/structure/networks/neurondecomposable.py | Python | bsd-3-clause | 4,341 | [
"NEURON"
] | adbd4a70f33da7a9753355fa0c9018ec0ba6a73c18af1e3a4f831c14b9b9e809 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from typing import Dict, List
import numpy as np
from psi4 import core
from psi4.driver import constants
from psi4.driver.p4util.exceptions import *
def least_squares_fit_polynomial(xvals, fvals, localization_point, no_factorials=True, weighted=True, polynomial_order=4):
"""Performs and unweighted least squares fit of a polynomial, with specified order
to an array of input function values (fvals) evaluated at given locations (xvals).
See https://doi.org/10.1063/1.4862157, particularly eqn (7) for details. """
xpts = np.array(xvals) - localization_point
if weighted:
R = 1.0
p_nu = 1
epsilon = 1e-3
zvals = np.square(xpts/R)
weights = np.exp(-zvals) / (zvals**p_nu + epsilon**p_nu)
else:
weights = None
fit = np.polynomial.polynomial.polyfit(xpts, fvals, polynomial_order, w=weights)
# Remove the 1/n! coefficients
if no_factorials:
scalefac = 1.0
for n in range(2,polynomial_order+1):
scalefac *= n
fit[n] *= scalefac
return fit
def anharmonicity(rvals: List, energies: List, plot_fit: str = '', mol = None) -> Dict:
"""Generates spectroscopic constants for a diatomic molecules.
Fits a diatomic potential energy curve using a weighted least squares approach
(c.f. https://doi.org/10.1063/1.4862157, particularly eqn. 7), locates the minimum
energy point, and then applies second order vibrational perturbation theory to obtain spectroscopic
constants. Any number of points greater than 4 may be provided, and they should bracket the minimum.
The data need not be evenly spaced, and can be provided in any order. The data are weighted such that
those closest to the minimum have highest impact.
A dictionary with the following keys, which correspond to spectroscopic constants, is returned:
:param rvals: The bond lengths (in Angstrom) for which energies are
provided, of length at least 5 and equal to the length of the energies array
:param energies: The energies (Eh) computed at the bond lengths in the rvals list
:param plot_fit: A string describing where to save a plot of the harmonic and anharmonic fits, the
inputted data points, re, r0 and the first few energy levels, if matplotlib
is available. Set to 'screen' to generate an interactive plot on the screen instead. If a filename is
provided, the image type is determined by the extension; see matplotlib for supported file types.
:returns: (*dict*) Keys: "re", "r0", "we", "wexe", "nu", "ZPVE(harmonic)", "ZPVE(anharmonic)", "Be", "B0", "ae", "De"
corresponding to the spectroscopic constants in cm-1
"""
angstrom_to_bohr = 1.0 / constants.bohr2angstroms
angstrom_to_meter = 10e-10
# Make sure the input is valid
if len(rvals) != len(energies):
raise ValidationError("The number of energies must match the number of distances")
npoints = len(rvals)
if npoints < 5:
raise ValidationError("At least 5 data points must be provided to compute anharmonicity")
core.print_out("\n\nPerforming a fit to %d data points\n" % npoints)
# Sort radii and values first from lowest to highest radius
indices = np.argsort(rvals)
rvals = np.array(rvals)[indices]
energies = np.array(energies)[indices]
# Make sure the molecule the user provided is the active one
molecule = mol or core.get_active_molecule()
molecule.update_geometry()
natoms = molecule.natom()
if natoms != 2:
raise Exception("The current molecule must be a diatomic for this code to work!")
m1 = molecule.mass(0)
m2 = molecule.mass(1)
# Find rval of the minimum of energies, check number of points left and right
min_index = np.argmin(energies)
if min_index < 3 :
core.print_out("\nWarning: fewer than 3 points provided with a r < r(min(E))!\n")
if min_index >= len(energies) - 3:
core.print_out("\nWarning: fewer than 3 points provided with a r > r(min(E))!\n")
# Optimize the geometry, refitting the surface around each new geometry
core.print_out("\nOptimizing geometry based on current surface:\n\n")
re = rvals[min_index]
maxit = 30
thres = 1.0e-9
for i in range(maxit):
derivs = least_squares_fit_polynomial(rvals,energies,localization_point=re)
e,g,H = derivs[0:3]
core.print_out(" E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g))
if abs(g) < thres:
break
re -= g/H
if i == maxit-1:
raise ConvergenceError("diatomic geometry optimization", maxit)
core.print_out(" Final E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g))
if re < min(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a lower range of r values.")
if re > max(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a higher range of r values.")
# Convert to convenient units, and compute spectroscopic constants
d0,d1,d2,d3,d4 = derivs*constants.hartree2aJ
core.print_out("\nEquilibrium Energy %20.14f Hartrees\n" % e)
core.print_out("Gradient %20.14f\n" % g)
core.print_out("Quadratic Force Constant %14.7f MDYNE/A\n" % d2)
core.print_out("Cubic Force Constant %14.7f MDYNE/A**2\n" % d3)
core.print_out("Quartic Force Constant %14.7f MDYNE/A**3\n" % d4)
hbar = constants.h / (2.0 * np.pi)
mu = ((m1*m2)/(m1+m2))*constants.amu2kg
we = 5.3088375e-11 * np.sqrt(d2/mu)
wexe = (1.2415491e-6)*(we/d2)**2 * ((5.0*d3*d3)/(3.0*d2)-d4)
# Rotational constant: Be
I = ((m1*m2)/(m1+m2)) * constants.amu2kg * (re * angstrom_to_meter)**2
B = constants.h / (8.0 * np.pi**2 * constants.c * I)
# alpha_e and quartic centrifugal distortion constant
ae = -(6.0 * B**2 / we) * ((1.05052209e-3*we*d3)/(np.sqrt(B * d2**3))+1.0)
de = 4.0*B**3 / we**2
# B0 and r0 (plus re check using Be)
B0 = B - ae / 2.0
r0 = np.sqrt(constants.h / (8.0 * np.pi**2 * mu * constants.c * B0))
recheck = np.sqrt(constants.h / (8.0 * np.pi**2 * mu * constants.c * B))
r0 /= angstrom_to_meter
recheck /= angstrom_to_meter
# Fundamental frequency nu
nu = we - 2.0 * wexe
zpve_nu = 0.5 * we - 0.25 * wexe
zpve_we = 0.5 * we
# Generate pretty pictures, if requested
if(plot_fit):
try:
import matplotlib.pyplot as plt
except ImportError:
msg = "\n\tPlot not generated; matplotlib is not installed on this machine.\n\n"
print(msg)
core.print_out(msg)
# Correct the derivatives for the missing factorial prefactors
dvals = np.zeros(5)
dvals[0:5] = derivs[0:5]
dvals[2] /= 2
dvals[3] /= 6
dvals[4] /= 24
# Default plot range, before considering energy levels
minE = np.min(energies)
maxE = np.max(energies)
minR = np.min(rvals)
maxR = np.max(rvals)
# Plot vibrational energy levels
we_au = we / constants.hartree2wavenumbers
wexe_au = wexe / constants.hartree2wavenumbers
coefs2 = [ dvals[2], dvals[1], dvals[0] ]
coefs4 = [ dvals[4], dvals[3], dvals[2], dvals[1], dvals[0] ]
for n in range(3):
Eharm = we_au*(n+0.5)
Evpt2 = Eharm - wexe_au*(n+0.5)**2
coefs2[-1] = -Eharm
coefs4[-1] = -Evpt2
roots2 = np.roots(coefs2)
roots4 = np.roots(coefs4)
xvals2 = roots2 + re
xvals4 = np.choose(np.where(np.isreal(roots4)), roots4)[0].real + re
Eharm += dvals[0]
Evpt2 += dvals[0]
plt.plot(xvals2, [Eharm, Eharm], 'b', linewidth=1)
plt.plot(xvals4, [Evpt2, Evpt2], 'g', linewidth=1)
maxE = Eharm
maxR = np.max([xvals2,xvals4])
minR = np.min([xvals2,xvals4])
# Find ranges for the plot
dE = maxE - minE
minE -= 0.2*dE
maxE += 0.4*dE
dR = maxR - minR
minR -= 0.2*dR
maxR += 0.2*dR
# Generate the fitted PES
xpts = np.linspace(minR, maxR, 1000)
xrel = xpts - re
xpows = xrel[:, None] ** range(5)
fit2 = np.einsum('xd,d', xpows[:,0:3], dvals[0:3])
fit4 = np.einsum('xd,d', xpows, dvals)
# Make / display the plot
plt.plot(xpts, fit2, 'b', linewidth=2.5, label='Harmonic (quadratic) fit')
plt.plot(xpts, fit4, 'g', linewidth=2.5, label='Anharmonic (quartic) fit')
plt.plot([re, re], [minE, maxE], 'b--', linewidth=0.5)
plt.plot([r0, r0], [minE, maxE], 'g--', linewidth=0.5)
plt.scatter(rvals, energies, c='Black', linewidth=3, label='Input Data')
plt.legend()
plt.xlabel('Bond length (Angstroms)')
plt.ylabel('Energy (Eh)')
plt.xlim(minR, maxR)
plt.ylim(minE, maxE)
if plot_fit == 'screen':
plt.show()
else:
plt.savefig(plot_fit)
core.print_out("\n\tPES fit saved to %s.\n\n" % plot_fit)
core.print_out("\nre = %10.6f A check: %10.6f\n" % (re, recheck))
core.print_out("r0 = %10.6f A\n" % r0)
core.print_out("E at re = %17.10f Eh\n" % e)
core.print_out("we = %10.4f cm-1\n" % we)
core.print_out("wexe = %10.4f cm-1\n" % wexe)
core.print_out("nu = %10.4f cm-1\n" % nu)
core.print_out("ZPVE(we) = %10.4f cm-1\n" % zpve_we)
core.print_out("ZPVE(nu) = %10.4f cm-1\n" % zpve_nu)
core.print_out("Be = %10.4f cm-1\n" % B)
core.print_out("B0 = %10.4f cm-1\n" % B0)
core.print_out("ae = %10.4f cm-1\n" % ae)
core.print_out("De = %10.7f cm-1\n" % de)
results = {
"re" : re,
"r0" : r0,
"we" : we,
"wexe" : wexe,
"nu" : nu,
"E(re)" : e,
"ZPVE(harmonic)" : zpve_we,
"ZPVE(anharmonic)" : zpve_nu,
"Be" : B,
"B0" : B0,
"ae" : ae,
"De" : de
}
return results
| psi4/psi4 | psi4/driver/diatomic.py | Python | lgpl-3.0 | 11,346 | [
"Psi4"
] | fdf6072018ea616ab13276b9feff55310bd684c2a7016a82fb6bc2edeebc1f28 |
#! /usr/bin/env python
# ==========================================================================
# This scripts performs unit tests for the csmodelsois tool.
#
# Copyright (C) 2017-2020 Josh Cardenzana
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import os
import gammalib
import cscripts
from testing import test
# =============================== #
# Test class for csmodelsois tool #
# =============================== #
class Test(test):
"""
Test class for csmodelsois tool
This test class makes unit tests for the csmodelsois tool by using it from
the command line and from Python.
"""
# Constructor
def __init__(self):
"""
Constructor
"""
# Call base class constructor
test.__init__(self)
# Return
return
# Set test functions
def set(self):
"""
Set all test functions
"""
# Set test name
self.name('csmodelsois')
# Append tests
self.append(self._test_cmd, 'Test csmodelsois on command line')
self.append(self._test_python, 'Test csmodelsois from Python')
# Return
return
# Test csmodelsois on command line
def _test_cmd(self):
"""
Test csmodelsois on the command line
"""
# Set tool name
csmodelsois = self._script('csmodelsois')
# Setup csmodelsois command
cmd = csmodelsois+' inmodel="'+self._model+'"'+ \
' outcube="csmodelsois_cmd1.fits"'+\
' emin=0.1 emax=100.0 enumbins=10 ebinalg=LOG'+ \
' nxpix=100 nypix=100 binsz=0.04 coordsys=CEL'+ \
' soilist="" outmodel=NONE'+ \
' ra=83.63 dec=22.01 proj=CAR'+ \
' logfile="csmodelsois_cmd1.log" chatter=1'
# Check if execution was successful
self.test_assert(self._execute(cmd) == 0,
'Check successful execution from command line')
# Check map cube
self._check_result_file('csmodelsois_cmd1.fits')
# Setup csmodelsois command
cmd = csmodelsois+' inmodel="events_that_do_not_exist.fits"'+ \
' outcube="ccsmodelsois_cmd2.fits"'+\
' emin=0.1 emax=100.0 enumbins=10 ebinalg=LOG'+ \
' nxpix=100 nypix=100 binsz=0.04 coordsys=CEL'+ \
' soilist="" outmodel=NONE'+ \
' ra=83.63 dec=22.01 proj=CAR'+ \
' logfile="csmodelsois_cmd2.log" debug=yes'
# Check if execution failed
self.test_assert(self._execute(cmd, success=False) != 0,
'Check invalid input file when executed from command line')
# Check csmodelsois --help
self._check_help(csmodelsois)
# Return
return
# Test csmodelsois from Python
def _test_python(self):
"""
Test csmodelsois from Python
"""
# Allocate csmodelsois
modelsois = cscripts.csmodelsois()
# Check that empty csmodelsois tool holds an map cube that has no
# energy bins
self._check_cube(modelsois.mapcube(), nmaps=0, npixels=0)
# Check that saving does nothing
modelsois['outcube'] = 'csmodelsois_py0.fits'
modelsois['outmodel'] = 'NONE'
modelsois['logfile'] = 'csmodelsois_py0.log'
modelsois.logFileOpen()
modelsois.save()
self.test_assert(not os.path.isfile('csmodelsois_py0.fits'),
'Check that no map cube has been created')
# Check that clearing does not lead to an exception or segfault
modelsois.clear()
# Now set csmodelsois parameters
modelsois['inmodel'] = self._model
modelsois['ebinalg'] = 'LOG'
modelsois['emin'] = 0.1
modelsois['emax'] = 100.0
modelsois['enumbins'] = 10
modelsois['nxpix'] = 100
modelsois['nypix'] = 100
modelsois['binsz'] = 0.04
modelsois['coordsys'] = 'CEL'
modelsois['proj'] = 'CAR'
modelsois['ra'] = 83.63
modelsois['dec'] = 22.01
modelsois['ptsrcsig'] = 1.0
modelsois['outcube'] = 'csmodelsois_py1.fits'
modelsois['logfile'] = 'csmodelsois_py1.log'
modelsois['chatter'] = 2
modelsois['soilist'] = ''
modelsois['outmodel'] = 'NONE'
# Run csmodelsois tool
modelsois.logFileOpen() # Make sure we get a log file
modelsois.run()
# Save map cube
modelsois.save()
# Check map cube
self._check_result_file('csmodelsois_py1.fits')
# Allocate csmodelsois scripts, set models and do now a linear
# binning; also do not use Gaussian point sources
modelsois = cscripts.csmodelsois()
modelsois['inmodel'] = self._model
modelsois['ebinalg'] = 'LIN'
modelsois['emin'] = 0.1
modelsois['emax'] = 100.0
modelsois['enumbins'] = 10
modelsois['nxpix'] = 100
modelsois['nypix'] = 100
modelsois['binsz'] = 0.04
modelsois['coordsys'] = 'CEL'
modelsois['proj'] = 'CAR'
modelsois['ra'] = 83.63
modelsois['dec'] = 22.01
modelsois['ptsrcsig'] = 0.0
modelsois['outcube'] = 'csmodelsois_py2.fits'
modelsois['logfile'] = 'csmodelsois_py2.log'
modelsois['chatter'] = 4
modelsois['soilist'] = ''
modelsois['outmodel'] = 'NONE'
# Execute mapcube
modelsois.logFileOpen() # Needed to get a new log file
modelsois.execute()
# Check result file
self._check_result_file('csmodelsois_py2.fits')
# Update output filenames and output a new model file
modelsois['outcube'] = 'csmodelsois_py3.fits'
modelsois['logfile'] = 'csmodelsois_py3.log'
modelsois['outmodel'] = 'csmodelsois_py3.xml'
# Execute the file
modelsois.logFileOpen()
modelsois.execute()
# Now check that the output model file doesnt contain a Crab Sources
models3 = gammalib.GModels('csmodelsois_py3.xml')
self.test_assert(not models3.contains('Crab'),
'Check Crab model is not present')
self.test_assert(models3.contains(modelsois.cubemodelname()),
'Check cube model is present')
self._check_result_file('csmodelsois_py3.fits')
# Return
return
# Check result file
def _check_result_file(self, filename):
"""
Check content of map cube
Parameters
----------
filename : str
Map cube file name
"""
# Load map cube
cube = gammalib.GModelSpatialDiffuseCube(filename)
# Check map cube
self._check_cube(cube)
# Return
return
# Check map cube
def _check_cube(self, cube, nmaps=11, npixels=10000):
"""
Check map cube
Parameters
----------
cube : `~gammalib.GModelSpatialDiffuseCube`
Map cube
nmaps : int, optional
Number of maps
npixels : int, optional
Number of pixels
"""
# Get energies (this forces loading in case the map cube is not
# loaded)
self.test_value(len(cube.energies()), nmaps, 'Check number of energies')
# Check dimensions
self.test_value(cube.cube().nmaps(), nmaps, 'Check number of maps')
self.test_value(cube.cube().npix(), npixels, 'Check number of pixels')
# Return
return
| ctools/ctools | test/test_csmodelsois.py | Python | gpl-3.0 | 8,410 | [
"Gaussian"
] | d110ccf521516698e70a5c4a8731b5db84adfd941014a50495b6b0214d11de8d |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Methods for selecting the bin width of histograms
Ported from the astroML project: http://astroml.org/
"""
import numpy as np
from . import bayesian_blocks
__all__ = ['histogram', 'scott_bin_width', 'freedman_bin_width',
'knuth_bin_width', 'calculate_bin_edges']
def calculate_bin_edges(a, bins=10, range=None, weights=None):
"""
Calculate histogram bin edges like `numpy.histogram_bin_edges`.
Parameters
----------
a : array_like
Input data. The bin edges are calculated over the flattened array.
bins : int or list or str (optional)
If ``bins`` is an int, it is the number of bins. If it is a list
it is taken to be the bin edges. If it is a string, it must be one
of 'blocks', 'knuth', 'scott' or 'freedman'. See
`~astropy.stats.histogram` for a description of each method.
range : tuple or None (optional)
The minimum and maximum range for the histogram. If not specified,
it will be (a.min(), a.max()). However, if bins is a list it is
returned unmodified regardless of the range argument.
weights : array_like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges, though they may be used in the future as new methods are added.
"""
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError("weights are not yet supported "
"for the enhanced histogram")
if bins == 'blocks':
bins = bayesian_blocks(a)
elif bins == 'knuth':
da, bins = knuth_bin_width(a, True)
elif bins == 'scott':
da, bins = scott_bin_width(a, True)
elif bins == 'freedman':
da, bins = freedman_bin_width(a, True)
else:
raise ValueError(f"unrecognized bin code: '{bins}'")
if range:
# Check that the upper and lower edges are what was requested.
# The current implementation of the bin width estimators does not
# guarantee this, it only ensures that data outside the range is
# excluded from calculation of the bin widths.
if bins[0] != range[0]:
bins[0] = range[0]
if bins[-1] != range[1]:
bins[-1] = range[1]
elif np.ndim(bins) == 0:
# Number of bins was given
bins = np.histogram_bin_edges(a, bins, range=range, weights=weights)
return bins
def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as ``numpy.histogram()``.
Parameters
----------
a : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None (optional)
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array_like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges.
other keyword arguments are described in numpy.histogram().
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
bins = calculate_bin_edges(a, bins=bins, range=range, weights=weights)
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
try:
bins = dmin + dx * np.arange(Nbins + 1)
except ValueError as e:
if 'Maximum allowed size exceeded' in str(e):
raise ValueError(
'The inter-quartile range of the data is too small: '
'failed to construct histogram with {} bins. '
'Please use another bin method, such as '
'bins="scott"'.format(Nbins + 1))
else: # Something else # pragma: no cover
raise
return dx, bins
else:
return dx
def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
quiet : bool (optional)
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
class _KnuthF:
r"""Class which implements the function minimized by knuth_bin_width
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M):
"""Return the bin edges given a width dx"""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function
Parameters
----------
dx : float
Width of bins
Returns
-------
F : float
evaluation of the negative Knuth likelihood function:
smaller values indicate a better fit.
"""
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(self.n * np.log(M) +
self.gammaln(0.5 * M) -
M * self.gammaln(0.5) -
self.gammaln(self.n + 0.5 * M) +
np.sum(self.gammaln(nk + 0.5)))
| MSeifert04/astropy | astropy/stats/histogram.py | Python | bsd-3-clause | 12,617 | [
"Gaussian"
] | efd01eb34d6bcab87872359642cd345141795ea76b6d037aad8885f1e86d4a60 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.simulation.base.networks import PreSynapticTypes
class StdTagFunctors(object):
@classmethod
def get_record_functors_synapse(cls):
return [SynapseInPopulationRecordTags.get_tags]
@classmethod
def get_record_functors_neuron(cls):
return [NeuronInPopulationRecordTags.get_tags]
class UserTagFunctorCellLocation(object):
pass
class NeuronInPopulationRecordTags(object):
@classmethod
def get_tags(cls, neuron, neuron_population, cell_location):
tags = []
if cell_location.section.idtag:
tags.append('SECTION:%s' % cell_location.section.idtag)
tags.append(neuron.name)
tags.append(neuron_population.pop_name)
return tags
class SynapseInPopulationRecordTags(object):
@classmethod
def get_tags(cls, synapse, synapse_population):
tags = []
# Presynaptic Cell Tagging:
if synapse.get_trigger().get_type() == PreSynapticTypes.Cell:
tags.append('PRECELL:%s' % synapse.get_presynaptic_cell().name)
if synapse.get_presynaptic_cell().population is not None:
tags.append('PREPOP:%s'
% synapse.get_presynaptic_cell().population.pop_name)
else:
tags.append('FIXEDTIMETRIGGER')
# Post Synaptic Cell Tagging:
tags.append('POSTCELL:%s'
% synapse.get_postsynaptic_cell().name)
if synapse.get_postsynaptic_cell().population:
tags.append('POSTPOP:%s'
% synapse.get_postsynaptic_cell().population.pop_name)
tags.append(synapse.name)
tags.append(synapse_population.synapse_pop_name)
return tags
| mikehulluk/morphforge | src/morphforgecontrib/tags/__init__.py | Python | bsd-2-clause | 3,265 | [
"NEURON"
] | fade114507d2f44f96c84918bd102ec984782fb1e64a11e12a7ddfad06c0a359 |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class SuiteVisitor(object):
def visit_suite(self, suite):
if self.start_suite(suite) is not False:
suite.keywords.visit(self)
suite.suites.visit(self)
suite.tests.visit(self)
self.end_suite(suite)
def start_suite(self, suite):
pass
def end_suite(self, suite):
pass
def visit_test(self, test):
if self.start_test(test) is not False:
test.keywords.visit(self)
self.end_test(test)
def start_test(self, test):
pass
def end_test(self, test):
pass
def visit_keyword(self, kw):
if self.start_keyword(kw) is not False:
kw.keywords.visit(self)
kw.messages.visit(self)
self.end_keyword(kw)
def start_keyword(self, keyword):
pass
def end_keyword(self, keyword):
pass
def visit_message(self, msg):
if self.start_message(msg) is not False:
self.end_message(msg)
def start_message(self, msg):
pass
def end_message(self, msg):
pass
class SkipAllVisitor(SuiteVisitor):
"""Travels suite and it's sub-suites without doing anything."""
def visit_suite(self, suite):
pass
def visit_keyword(self, kw):
pass
def visit_test(self, test):
pass
def visit_message(self, msg):
pass
| eric-stanley/robotframework | src/robot/model/visitor.py | Python | apache-2.0 | 1,995 | [
"VisIt"
] | 793a28e3cf8a3de4fb67499d3a63c4f3a0e8e0bb8c731b9933aafd40d3f3f1da |
def load_parameters():
"""
Loads the defined hyperparameters
:return parameters: Dictionary of loaded parameters
"""
# Input data params
TASK_NAME = 'infreq' #out-domain' # Task name
DATASET_NAME = '10' # Dataset name
SRC_LAN = 'en' # Language of the source text
TRG_LAN = 'es' # Language of the target text
DATA_ROOT_PATH = '/mnt/data/zparcheta/nmt-keras-forked/examples/medical_corpus/infreq-selection/10/joint_bpe/' # Path where data is stored
# SRC_LAN or TRG_LAN will be added to the file names
TEXT_FILES = {'train': TASK_NAME + DATASET_NAME +'.clean.lowercased.', # Data files
'val': 'dev-test/dev.clean.lowercased.',
'test': 'dev-test/test.clean.lowercased.'}
# Dataset class parameters
INPUTS_IDS_DATASET = ['source_text', 'state_below'] # Corresponding inputs of the dataset
OUTPUTS_IDS_DATASET = ['target_text'] # Corresponding outputs of the dataset
INPUTS_IDS_MODEL = ['source_text', 'state_below'] # Corresponding inputs of the built model
OUTPUTS_IDS_MODEL = ['target_text'] # Corresponding outputs of the built model
# Evaluation params
METRICS = ['coco'] # Metric used for evaluating the model
EVAL_ON_SETS = ['val', 'test'] # Possible values: 'train', 'val' and 'test' (external evaluator)
EVAL_ON_SETS_KERAS = [] # Possible values: 'train', 'val' and 'test' (Keras' evaluator). Untested.
START_EVAL_ON_EPOCH = 4 # First epoch to start the model evaluation
EVAL_EACH_EPOCHS = False # Select whether evaluate between N epochs or N updates
EVAL_EACH = 2000 # Sets the evaluation frequency (epochs or updates)
# Search parameters
SAMPLING = 'max_likelihood' # Possible values: multinomial or max_likelihood (recommended)
TEMPERATURE = 1 # Multinomial sampling parameter
BEAM_SEARCH = True # Switches on-off the beam search procedure
BEAM_SIZE = 6 # Beam size (in case of BEAM_SEARCH == True)
OPTIMIZED_SEARCH = True # Compute annotations only a single time per sample
SEARCH_PRUNING = False # Apply pruning strategies to the beam search method.
# It will likely increase decoding speed, but decrease quality.
MAXLEN_GIVEN_X = True # Generate translations of similar length to the source sentences
MAXLEN_GIVEN_X_FACTOR = 1.7 # The hypotheses will have (as maximum) the number of words of the
# source sentence * LENGTH_Y_GIVEN_X_FACTOR
MINLEN_GIVEN_X = True # Generate translations of similar length to the source sentences
MINLEN_GIVEN_X_FACTOR = 2 # The hypotheses will have (as minimum) the number of words of the
# source sentence / LENGTH_Y_GIVEN_X_FACTOR
# Apply length and coverage decoding normalizations.
# See Section 7 from Wu et al. (2016) (https://arxiv.org/abs/1609.08144)
LENGTH_PENALTY = False # Apply length penalty
LENGTH_NORM_FACTOR = 0.2 # Length penalty factor
COVERAGE_PENALTY = False # Apply source coverage penalty
COVERAGE_NORM_FACTOR = 0.2 # Coverage penalty factor
# Alternative (simple) length normalization.
NORMALIZE_SAMPLING = False # Normalize hypotheses scores according to their length:
ALPHA_FACTOR = .6 # Normalization according to |h|**ALPHA_FACTOR
# Sampling params: Show some samples during training
SAMPLE_ON_SETS = ['train', 'val'] # Possible values: 'train', 'val' and 'test'
N_SAMPLES = 5 # Number of samples generated
START_SAMPLING_ON_EPOCH = 2 # First epoch where to start the sampling counter
SAMPLE_EACH_UPDATES = 10000 # Sampling frequency (always in #updates)
# Unknown words treatment
POS_UNK = True # Enable POS_UNK strategy for unknown words
HEURISTIC = 0 # Heuristic to follow:
# 0: Replace the UNK by the correspondingly aligned source
# 1: Replace the UNK by the translation (given by an external
# dictionary) of the correspondingly aligned source
# 2: Replace the UNK by the translation (given by an external
# dictionary) of the correspondingly aligned source only if it
# starts with a lowercase. Otherwise, copies the source word.
ALIGN_FROM_RAW = True # Align using the full vocabulary or the short_list
MAPPING = DATA_ROOT_PATH + '/mapping.%s_%s.pkl' % (SRC_LAN, TRG_LAN) # Source -- Target pkl mapping (used for heuristics 1--2)
# Word representation params
TOKENIZATION_METHOD = 'tokenize_none' # Select which tokenization we'll apply.
# See Dataset class (from stager_keras_wrapper) for more info.
BPE_CODES_PATH = DATA_ROOT_PATH + '/training_codes.joint' # If TOKENIZATION_METHOD = 'tokenize_bpe',
# sets the path to the learned BPE codes.
DETOKENIZATION_METHOD = 'detokenize_bpe' # Select which de-tokenization method we'll apply
APPLY_DETOKENIZATION = True # Wheter we apply a detokenization method
TOKENIZE_HYPOTHESES = True # Whether we tokenize the hypotheses using the
# previously defined tokenization method
TOKENIZE_REFERENCES = True # Whether we tokenize the references using the
# previously defined tokenization method
# Input image parameters
DATA_AUGMENTATION = False # Apply data augmentation on input data (still unimplemented for text inputs)
# Text parameters
FILL = 'end' # Whether we pad the 'end' or the 'start' of the sentence with 0s
PAD_ON_BATCH = True # Whether we take as many timesteps as the longest sequence of
# the batch or a fixed size (MAX_OUTPUT_TEXT_LEN)
# Input text parameters
INPUT_VOCABULARY_SIZE = 0 # Size of the input vocabulary. Set to 0 for using all,
# otherwise it will be truncated to these most frequent words.
MIN_OCCURRENCES_INPUT_VOCAB = 0 # Minimum number of occurrences allowed for the words in the input vocabulary.
# Set to 0 for using them all.
MAX_INPUT_TEXT_LEN = 70 # Maximum length of the input sequence
# Output text parameters
OUTPUT_VOCABULARY_SIZE = 0 # Size of the input vocabulary. Set to 0 for using all,
# otherwise it will be truncated to these most frequent words.
MIN_OCCURRENCES_OUTPUT_VOCAB = 0 # Minimum number of occurrences allowed for the words in the output vocabulary.
MAX_OUTPUT_TEXT_LEN = 70 # Maximum length of the output sequence
# set to 0 if we want to use the whole answer as a single class
MAX_OUTPUT_TEXT_LEN_TEST = MAX_OUTPUT_TEXT_LEN * 3 # Maximum length of the output sequence during test time
# Optimizer parameters (see model.compile() function)
LOSS = 'categorical_crossentropy'
CLASSIFIER_ACTIVATION = 'softmax'
OPTIMIZER = 'Adam' # Optimizer
LR = 0.0002 # Learning rate. Recommended values - Adam 0.001 - Adadelta 1.0
CLIP_C = 5. # During training, clip L2 norm of gradients to this value (0. means deactivated)
CLIP_V = 0. # During training, clip absolute value of gradients to this value (0. means deactivated)
SAMPLE_WEIGHTS = True # Select whether we use a weights matrix (mask) for the data outputs
# Learning rate annealing
LR_DECAY = None # Frequency (number of epochs or updates) between LR annealings. Set to None for not decay the learning rate
LR_GAMMA = 0.8 # Multiplier used for decreasing the LR
LR_REDUCE_EACH_EPOCHS = False # Reduce each LR_DECAY number of epochs or updates
LR_START_REDUCTION_ON_EPOCH = 0 # Epoch to start the reduction
LR_REDUCER_TYPE = 'exponential' # Function to reduce. 'linear' and 'exponential' implemented.
LR_REDUCER_EXP_BASE = 0.5 # Base for the exponential decay
LR_HALF_LIFE = 5000 # Factor for exponenital decay
# Training parameters
MAX_EPOCH = 500 # Stop when computed this number of epochs
BATCH_SIZE = 20 # Size of each minibatch
HOMOGENEOUS_BATCHES = False # Use batches with homogeneous output lengths (Dangerous!!)
JOINT_BATCHES = 4 # When using homogeneous batches, get this number of batches to sort
PARALLEL_LOADERS = 1 # Parallel data batch loaders
EPOCHS_FOR_SAVE = 1 # Number of epochs between model saves
WRITE_VALID_SAMPLES = True # Write valid samples in file
SAVE_EACH_EVALUATION = True # Save each time we evaluate the model
# Early stop parameters
EARLY_STOP = True # Turns on/off the early stop protocol
PATIENCE = 20 # We'll stop if the val STOP_METRIC does not improve after this
# number of evaluations
STOP_METRIC = 'Bleu_4' # Metric for the stop
# Model parameters
MODEL_TYPE = 'GroundHogModel' # Model to train. See model_zoo() for the supported architectures
ENCODER_RNN_TYPE = 'LSTM' # Encoder's RNN unit type ('LSTM' and 'GRU' supported)
DECODER_RNN_TYPE = 'ConditionalLSTM' # Decoder's RNN unit type
# ('LSTM', 'GRU', 'ConditionalLSTM' and 'ConditionalGRU' supported)
# Initializers (see keras/initializations.py).
INIT_FUNCTION = 'glorot_uniform' # General initialization function for matrices.
INNER_INIT = 'orthogonal' # Initialization function for inner RNN matrices.
INIT_ATT = 'glorot_uniform' # Initialization function for attention mechism matrices
SOURCE_TEXT_EMBEDDING_SIZE = 512 # Source language word embedding size.
SRC_PRETRAINED_VECTORS = None # Path to pretrained vectors (e.g.: DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % SRC_LAN)
# Set to None if you don't want to use pretrained vectors.
# When using pretrained word embeddings. this parameter must match with the word embeddings size
SRC_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors.
TARGET_TEXT_EMBEDDING_SIZE = 512 # Source language word embedding size.
TRG_PRETRAINED_VECTORS = None # Path to pretrained vectors. (e.g. DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % TRG_LAN)
# Set to None if you don't want to use pretrained vectors.
# When using pretrained word embeddings, the size of the pretrained word embeddings must match with the word embeddings size.
TRG_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors.
# Encoder configuration
ENCODER_HIDDEN_SIZE = 512 # For models with RNN encoder
BIDIRECTIONAL_ENCODER = True # Use bidirectional encoder
N_LAYERS_ENCODER = 1 # Stack this number of encoding layers
BIDIRECTIONAL_DEEP_ENCODER = True # Use bidirectional encoder in all encoding layers
# Decoder configuration
DECODER_HIDDEN_SIZE = 512 # For models with RNN decoder
N_LAYERS_DECODER = 1 # Stack this number of decoding layers.
ADDITIONAL_OUTPUT_MERGE_MODE = 'Add' # Merge mode for the skip-connections (see keras.layers.merge.py)
ATTENTION_SIZE = DECODER_HIDDEN_SIZE
# Skip connections size
SKIP_VECTORS_HIDDEN_SIZE = TARGET_TEXT_EMBEDDING_SIZE
# Fully-Connected layers for initializing the first RNN state
# Here we should only specify the activation function of each layer
# (as they have a potentially fixed size)
# (e.g INIT_LAYERS = ['tanh', 'relu'])
INIT_LAYERS = ['tanh']
# Additional Fully-Connected layers applied before softmax.
# Here we should specify the activation function and the output dimension
# (e.g DEEP_OUTPUT_LAYERS = [('tanh', 600), ('relu', 400), ('relu', 200)])
DEEP_OUTPUT_LAYERS = [('linear', TARGET_TEXT_EMBEDDING_SIZE)]
# Regularizers
WEIGHT_DECAY = 1e-4 # L2 regularization
RECURRENT_WEIGHT_DECAY = 0. # L2 regularization in recurrent layers
DROPOUT_P = 0 # Percentage of units to drop (0 means no dropout)
RECURRENT_INPUT_DROPOUT_P = 0 # Percentage of units to drop in input cells of recurrent layers
RECURRENT_DROPOUT_P = 0 # Percentage of units to drop in recurrent layers
USE_NOISE = True # Use gaussian noise during training
NOISE_AMOUNT = 0.01 # Amount of noise
USE_BATCH_NORMALIZATION = True # If True it is recommended to deactivate Dropout
BATCH_NORMALIZATION_MODE = 1 # See documentation in Keras' BN
USE_PRELU = False # use PReLU activations as regularizer
USE_L2 = False # L2 normalization on the features
DOUBLE_STOCHASTIC_ATTENTION_REG = 0.0 # Doubly stochastic attention (Eq. 14 from arXiv:1502.03044)
# Results plot and models storing parameters
EXTRA_NAME = '' # This will be appended to the end of the model name
MODEL_NAME = TASK_NAME + '_' + DATASET_NAME + '_' + SRC_LAN + TRG_LAN
MODEL_NAME += EXTRA_NAME
STORE_PATH = 'trained_models/' + MODEL_NAME + '/' # Models and evaluation results will be stored here
DATASET_STORE_PATH = STORE_PATH # Dataset instance will be stored here
SAMPLING_SAVE_MODE = 'list' # 'list': Store in a text file, one sentence per line.
VERBOSE = 1 # Verbosity level
RELOAD = 0 # If 0 start training from scratch, otherwise the model
# Saved on epoch 'RELOAD' will be used
RELOAD_EPOCH = False # Select whether we reload epoch or update number
REBUILD_DATASET = True # Build again or use stored instance
MODE = 'training' # 'training' or 'sampling' (if 'sampling' then RELOAD must
# be greater than 0 and EVAL_ON_SETS will be used)
# Extra parameters for special trainings
TRAIN_ON_TRAINVAL = False # train the model on both training and validation sets combined
FORCE_RELOAD_VOCABULARY = False # force building a new vocabulary from the training samples
# applicable if RELOAD > 1
# ================================================ #
parameters = locals().copy()
return parameters
| Sasanita/nmt-keras | config.py | Python | mit | 17,209 | [
"Gaussian"
] | 50945715e80ee3b44879b3d1295267085f1cdd3deeea44f3bae8a235ef11b615 |
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import gw
class KnowValues(unittest.TestCase):
def test_gw_h2_ae_spin_rf0_speed(self):
""" This is GW """
mol = gto.M( verbose = 1,
atom = '''H 0 0 0; H 0 0 0.5; H 0 0 1.0; H 0 0 1.5; H 0 0 2.0; H 0 0 2.5; H 0 0 3.0; H 0 0 3.5;''',
basis = 'cc-pvdz', spin=0)
#mol = gto.M( verbose = 0, atom = '''H 0.0 0.0 -0.3707; H 0.0 0.0 0.3707''', basis = 'cc-pvdz',)
gto_mf = scf.RHF(mol)
etot = gto_mf.kernel()
#print(__name__, 'etot', etot)
#print('gto_mf.mo_energy:', gto_mf.mo_energy)
b = gw(mf=gto_mf, gto=mol, verbosity=0, nvrt=4)
ww = np.arange(0.0, 1.0, 0.1)+1j*0.2
rf0 = b.rf0(ww)
rf0_ref = b.rf0_cmplx_ref(ww)
#print(__name__, '|diff|', abs(rf0_ref-rf0).sum()/rf0.size)
self.assertTrue(abs(rf0_ref-rf0).sum()/rf0.size<1e-11)
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0064_gw_h_chain.py | Python | apache-2.0 | 972 | [
"PySCF"
] | e1cfc749c3fa4f32a9eeefb9b4241ca4b12270e3421b254e3064dfb6ec4a171b |
"""
Course Outline page in Studio.
"""
import datetime
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from common.test.acceptance.pages.common.utils import click_css, confirm_prompt
from common.test.acceptance.pages.studio.course_page import CoursePage
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.utils import set_input_value_and_save, set_input_value
class CourseOutlineItem(object):
"""
A mixin class for any :class:`PageObject` shown in a course outline.
"""
# Note there are a few pylint disable=no-member occurances in this class, because
# it was written assuming it is going to be a mixin to a PageObject and will have functions
# such as self.wait_for_ajax, which doesn't exist on a generic `object`.
BODY_SELECTOR = None
EDIT_BUTTON_SELECTOR = '.xblock-field-value-edit'
NAME_SELECTOR = '.item-title'
NAME_INPUT_SELECTOR = '.xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.xblock-title .wrapper-xblock-field'
STATUS_MESSAGE_SELECTOR = '> div[class$="status"] .status-message'
CONFIGURATION_BUTTON_SELECTOR = '.action-item .configure-button'
def __repr__(self):
# CourseOutlineItem is also used as a mixin for CourseOutlinePage, which doesn't have a locator
# Check for the existence of a locator so that errors when navigating to the course outline page don't show up
# as errors in the repr method instead.
try:
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.locator) # pylint: disable=no-member
except AttributeError:
return "{}(<browser>)".format(self.__class__.__name__)
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineItem` context
"""
# If the item doesn't have a body selector or locator, then it can't be bounded
# This happens in the context of the CourseOutlinePage
# pylint: disable=no-member
if self.BODY_SELECTOR and hasattr(self, 'locator'):
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
else:
return selector
@property
def name(self):
"""
Returns the display name of this object.
"""
name_element = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).first # pylint: disable=no-member
if name_element:
return name_element.text[0]
else:
return None
@property
def has_status_message(self):
"""
Returns True if the item has a status message, False otherwise.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).first.visible # pylint: disable=no-member
@property
def status_message(self):
"""
Returns the status message of this item.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).text[0] # pylint: disable=no-member
@property
def has_staff_lock_warning(self):
""" Returns True if the 'Contains staff only content' message is visible """
return self.status_message == 'Contains staff only content' if self.has_status_message else False
@property
def is_staff_only(self):
""" Returns True if the visiblity state of this item is staff only (has a black sidebar) """
return "is-staff-only" in self.q(css=self._bounded_selector(''))[0].get_attribute("class") # pylint: disable=no-member
def edit_name(self):
"""
Puts the item's name into editable form.
"""
self.q(css=self._bounded_selector(self.EDIT_BUTTON_SELECTOR)).first.click() # pylint: disable=no-member
def enter_name(self, new_name):
"""
Enters new_name as the item's display name.
"""
set_input_value(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
def change_name(self, new_name):
"""
Changes the container's name.
"""
self.edit_name()
set_input_value_and_save(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
self.wait_for_ajax() # pylint: disable=no-member
def finalize_name(self):
"""
Presses ENTER, saving the value of the display name for this item.
"""
# pylint: disable=no-member
self.q(css=self._bounded_selector(self.NAME_INPUT_SELECTOR)).results[0].send_keys(Keys.ENTER)
self.wait_for_ajax()
def set_staff_lock(self, is_locked):
"""
Sets the explicit staff lock of item on the container page to is_locked.
"""
modal = self.edit()
modal.is_explicitly_locked = is_locked
modal.save()
def in_editable_form(self):
"""
Return whether this outline item's display name is in its editable form.
"""
# pylint: disable=no-member
return "is-editing" in self.q(
css=self._bounded_selector(self.NAME_FIELD_WRAPPER_SELECTOR)
)[0].get_attribute("class")
def edit(self):
"""
Puts the item into editable form.
"""
self.q(css=self._bounded_selector(self.CONFIGURATION_BUTTON_SELECTOR)).first.click() # pylint: disable=no-member
if 'subsection' in self.BODY_SELECTOR:
modal = SubsectionOutlineModal(self)
else:
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.') # pylint: disable=unnecessary-lambda
return modal
@property
def release_date(self):
"""
Returns the release date from the page. Date is "mm/dd/yyyy" string.
"""
element = self.q(css=self._bounded_selector(".status-release-value")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
@property
def due_date(self):
"""
Returns the due date from the page. Date is "mm/dd/yyyy" string.
"""
element = self.q(css=self._bounded_selector(".status-grading-date")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.q(css=self._bounded_selector(".status-grading-value")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
def publish(self):
"""
Publish the unit.
"""
click_css(self, self._bounded_selector('.action-publish'), require_notification=False)
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.') # pylint: disable=unnecessary-lambda
modal.publish()
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css=self._bounded_selector('.action-publish')).first # pylint: disable=no-member
class CourseOutlineContainer(CourseOutlineItem):
"""
A mixin to a CourseOutline page object that adds the ability to load
a child page object by title or by index.
CHILD_CLASS must be a :class:`CourseOutlineChild` subclass.
"""
CHILD_CLASS = None
ADD_BUTTON_SELECTOR = '> .outline-content > .add-item a.button-new'
def child(self, title, child_class=None):
"""
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
# pylint: disable=no-member
return child_class(
self.browser,
self.q(css=child_class.BODY_SELECTOR).filter(
lambda el: title in [inner.text for inner in
el.find_elements_by_css_selector(child_class.NAME_SELECTOR)]
).attrs('data-locator')[0]
)
def children(self, child_class=None):
"""
Returns all the children page objects of class child_class.
"""
if not child_class:
child_class = self.CHILD_CLASS
# pylint: disable=no-member
return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(
lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results
def child_at(self, index, child_class=None):
"""
Returns the child at the specified index.
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.children(child_class)[index]
def add_child(self, require_notification=True):
"""
Adds a child to this xblock, waiting for notifications.
"""
click_css(
self,
self._bounded_selector(self.ADD_BUTTON_SELECTOR),
require_notification=require_notification,
)
def expand_subsection(self):
"""
Toggle the expansion of this subsection.
"""
# pylint: disable=no-member
self.browser.execute_script("jQuery.fx.off = true;")
def subsection_expanded():
"""
Returns whether or not this subsection is expanded.
"""
self.wait_for_element_presence(
self._bounded_selector(self.ADD_BUTTON_SELECTOR), 'Toggle control is present'
)
add_button = self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).first.results
return add_button and add_button[0].is_displayed()
currently_expanded = subsection_expanded()
# Need to click slightly off-center in order for the click to be recognized.
ele = self.browser.find_element_by_css_selector(self._bounded_selector('.ui-toggle-expansion .fa'))
ActionChains(self.browser).move_to_element_with_offset(ele, 4, 4).click().perform()
self.wait_for_element_presence(self._bounded_selector(self.ADD_BUTTON_SELECTOR), 'Subsection is expanded')
EmptyPromise(
lambda: subsection_expanded() != currently_expanded,
"Check that the container {} has been toggled".format(self.locator)
).fulfill()
self.browser.execute_script("jQuery.fx.off = false;")
return self
@property
def is_collapsed(self):
"""
Return whether this outline item is currently collapsed.
"""
return "is-collapsed" in self.q(css=self._bounded_selector('')).first.attrs("class")[0] # pylint: disable=no-member
class CourseOutlineChild(PageObject, CourseOutlineItem):
"""
A page object that will be used as a child of :class:`CourseOutlineContainer`.
"""
url = None
BODY_SELECTOR = '.outline-item'
def __init__(self, browser, locator):
super(CourseOutlineChild, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def delete(self, cancel=False):
"""
Clicks the delete button, then cancels at the confirmation prompt if cancel is True.
"""
click_css(self, self._bounded_selector('.delete-button'), require_notification=False)
confirm_prompt(self, cancel)
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant items of this item.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineChild(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
class CourseOutlineUnit(CourseOutlineChild):
"""
PageObject that wraps a unit link on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-unit'
NAME_SELECTOR = '.unit-title a'
def go_to(self):
"""
Open the container page linked to by this unit link, and return
an initialized :class:`.ContainerPage` for that unit.
"""
return ContainerPage(self.browser, self.locator).visit()
def is_browser_on_page(self):
return self.q(css=self.BODY_SELECTOR).present
def children(self):
return self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineUnit(self.browser, el.get_attribute('data-locator'))).results
class CourseOutlineSubsection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a subsection block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-subsection'
NAME_SELECTOR = '.subsection-title'
NAME_FIELD_WRAPPER_SELECTOR = '.subsection-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineUnit
def unit(self, title):
"""
Return the :class:`.CourseOutlineUnit with the title `title`.
"""
return self.child(title)
def units(self):
"""
Returns the units in this subsection.
"""
return self.children()
def unit_at(self, index):
"""
Returns the CourseOutlineUnit at the specified index.
"""
return self.child_at(index)
def add_unit(self):
"""
Adds a unit to this subsection
"""
self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()
class CourseOutlineSection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a section block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-section'
NAME_SELECTOR = '.section-title'
NAME_FIELD_WRAPPER_SELECTOR = '.section-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineSubsection
def subsection(self, title):
"""
Return the :class:`.CourseOutlineSubsection` with the title `title`.
"""
return self.child(title)
def subsections(self):
"""
Returns a list of the CourseOutlineSubsections of this section
"""
return self.children()
def subsection_at(self, index):
"""
Returns the CourseOutlineSubsection at the specified index.
"""
return self.child_at(index)
def add_subsection(self):
"""
Adds a subsection to this section
"""
self.add_child()
class ExpandCollapseLinkState(object):
"""
Represents the three states that the expand/collapse link can be in
"""
MISSING = 0
COLLAPSE = 1
EXPAND = 2
class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
Course Outline page in Studio.
"""
url_path = "course"
CHILD_CLASS = CourseOutlineSection
EXPAND_COLLAPSE_CSS = '.button-toggle-expand-collapse'
BOTTOM_ADD_SECTION_BUTTON = '.outline > .add-section .button-new'
def is_browser_on_page(self):
return all([
self.q(css='body.view-outline').present,
self.q(css='.content-primary').present,
self.q(css='div.ui-loading.is-hidden').present
])
def view_live(self):
"""
Clicks the "View Live" link and switches to the new tab
"""
click_css(self, '.view-live-button', require_notification=False)
self.browser.switch_to_window(self.browser.window_handles[-1])
def section(self, title):
"""
Return the :class:`.CourseOutlineSection` with the title `title`.
"""
return self.child(title)
def section_at(self, index):
"""
Returns the :class:`.CourseOutlineSection` at the specified index.
"""
return self.child_at(index)
def click_section_name(self, parent_css=''):
"""
Find and click on first section name in course outline
"""
self.q(css='{} .section-name'.format(parent_css)).first.click()
def get_section_name(self, parent_css='', page_refresh=False):
"""
Get the list of names of all sections present
"""
if page_refresh:
self.browser.refresh()
return self.q(css='{} .section-name'.format(parent_css)).text
def section_name_edit_form_present(self, parent_css=''):
"""
Check that section name edit form present
"""
return self.q(css='{} .section-name input'.format(parent_css)).present
def change_section_name(self, new_name, parent_css=''):
"""
Change section name of first section present in course outline
"""
self.click_section_name(parent_css)
self.q(css='{} .section-name input'.format(parent_css)).first.fill(new_name)
self.q(css='{} .section-name .save-button'.format(parent_css)).first.click()
self.wait_for_ajax()
def sections(self):
"""
Returns the sections of this course outline page.
"""
return self.children()
def add_section_from_top_button(self):
"""
Clicks the button for adding a section which resides at the top of the screen.
"""
click_css(self, '.wrapper-mast nav.nav-actions .button-new')
def add_section_from_bottom_button(self, click_child_icon=False):
"""
Clicks the button for adding a section which resides at the bottom of the screen.
"""
element_css = self.BOTTOM_ADD_SECTION_BUTTON
if click_child_icon:
element_css += " .fa-plus"
click_css(self, element_css)
def toggle_expand_collapse(self):
"""
Toggles whether all sections are expanded or collapsed
"""
self.q(css=self.EXPAND_COLLAPSE_CSS).click()
def start_reindex(self):
"""
Starts course reindex by clicking reindex button
"""
self.reindex_button.click()
def open_subsection_settings_dialog(self, index=0):
"""
clicks on the settings button of subsection.
"""
self.q(css=".subsection-header-actions .configure-button").nth(index).click()
self.wait_for_element_presence('.course-outline-modal', 'Subsection settings modal is present.')
def change_problem_release_date(self):
"""
Sets a new start date
"""
self.q(css=".subsection-header-actions .configure-button").first.click()
self.q(css="#start_date").fill("01/01/2030")
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def change_problem_due_date(self, date):
"""
Sets a new due date.
Expects date to be a string that will be accepted by the input (for example, '01/01/1970')
"""
self.q(css=".subsection-header-actions .configure-button").first.click()
self.q(css="#due_date").fill(date)
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def select_advanced_tab(self, desired_item='special_exam'):
"""
Select the advanced settings tab
"""
self.q(css=".settings-tab-button[data-tab='advanced']").first.click()
if desired_item == 'special_exam':
self.wait_for_element_presence('input.no_special_exam', 'Special exam settings fields not present.')
if desired_item == 'gated_content':
self.wait_for_element_visibility('#is_prereq', 'Gating settings fields are present.')
if desired_item == 'hide_after_due_date':
self.wait_for_element_presence('input[value=hide_after_due]', 'Visibility fields not present.')
def make_exam_proctored(self):
"""
Makes a Proctored exam.
"""
self.q(css="input.proctored_exam").first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def make_exam_timed(self, hide_after_due=False):
"""
Makes a timed exam.
"""
self.q(css="input.timed_exam").first.click()
if hide_after_due:
self.q(css='input[name=content-visibility][value=hide_after_due]').first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def make_subsection_hidden_after_due_date(self):
"""
Sets a subsection to be hidden after due date.
"""
self.q(css='input[value=hide_after_due]').first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def select_none_exam(self):
"""
Choose "none" exam but do not press enter
"""
self.q(css="input.no_special_exam").first.click()
def select_timed_exam(self):
"""
Choose a timed exam but do not press enter
"""
self.q(css="input.timed_exam").first.click()
def select_proctored_exam(self):
"""
Choose a proctored exam but do not press enter
"""
self.q(css="input.proctored_exam").first.click()
def select_practice_exam(self):
"""
Choose a practice exam but do not press enter
"""
self.q(css="input.practice_exam").first.click()
def time_allotted_field_visible(self):
"""
returns whether the time allotted field is visible
"""
return self.q(css=".field-time-limit").visible
def exam_review_rules_field_visible(self):
"""
Returns whether the review rules field is visible
"""
return self.q(css=".field-exam-review-rules").visible
def proctoring_items_are_displayed(self):
"""
Returns True if all the items are found.
"""
# The None radio button
if not self.q(css="input.no_special_exam").present:
return False
# The Timed exam radio button
if not self.q(css="input.timed_exam").present:
return False
# The Proctored exam radio button
if not self.q(css="input.proctored_exam").present:
return False
# The Practice exam radio button
if not self.q(css="input.practice_exam").present:
return False
return True
def make_gating_prerequisite(self):
"""
Makes a subsection a gating prerequisite.
"""
if not self.q(css="#is_prereq")[0].is_selected():
self.q(css='label[for="is_prereq"]').click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def add_prerequisite_to_subsection(self, min_score):
"""
Adds a prerequisite to a subsection.
"""
Select(self.q(css="#prereq")[0]).select_by_index(1)
self.q(css="#prereq_min_score").fill(min_score)
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def gating_prerequisite_checkbox_is_visible(self):
"""
Returns True if the gating prerequisite checkbox is visible.
"""
# The Prerequisite checkbox is visible
return self.q(css="#is_prereq").visible
def gating_prerequisite_checkbox_is_checked(self):
"""
Returns True if the gating prerequisite checkbox is checked.
"""
# The Prerequisite checkbox is checked
return self.q(css="#is_prereq:checked").present
def gating_prerequisites_dropdown_is_visible(self):
"""
Returns True if the gating prerequisites dropdown is visible.
"""
# The Prerequisites dropdown is visible
return self.q(css="#prereq").visible
def gating_prerequisite_min_score_is_visible(self):
"""
Returns True if the gating prerequisite minimum score input is visible.
"""
# The Prerequisites dropdown is visible
return self.q(css="#prereq_min_score").visible
@property
def bottom_add_section_button(self):
"""
Returns the query representing the bottom add section button.
"""
return self.q(css=self.BOTTOM_ADD_SECTION_BUTTON).first
@property
def has_no_content_message(self):
"""
Returns true if a message informing the user that the course has no content is visible
"""
return self.q(css='.outline .no-content').is_present()
@property
def has_rerun_notification(self):
"""
Returns true iff the rerun notification is present on the page.
"""
return self.q(css='.wrapper-alert.is-shown').is_present()
def dismiss_rerun_notification(self):
"""
Clicks the dismiss button in the rerun notification.
"""
self.q(css='.dismiss-button').click()
@property
def expand_collapse_link_state(self):
"""
Returns the current state of the expand/collapse link
"""
link = self.q(css=self.EXPAND_COLLAPSE_CSS)[0]
if not link.is_displayed():
return ExpandCollapseLinkState.MISSING
elif "collapse-all" in link.get_attribute("class"):
return ExpandCollapseLinkState.COLLAPSE
else:
return ExpandCollapseLinkState.EXPAND
@property
def reindex_button(self):
"""
Returns reindex button.
"""
return self.q(css=".button.button-reindex")[0]
def expand_all_subsections(self):
"""
Expands all the subsections in this course.
"""
for section in self.sections():
if section.is_collapsed:
section.expand_subsection()
for subsection in section.subsections():
if subsection.is_collapsed:
subsection.expand_subsection()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the outline page.
"""
return self.children(CourseOutlineChild)
@property
def license(self):
"""
Returns the course license text, if present. Else returns None.
"""
return self.q(css=".license-value").first.text[0]
@property
def deprecated_warning_visible(self):
"""
Returns true if the deprecated warning is visible.
"""
return self.q(css='.wrapper-alert-error.is-shown').is_present()
@property
def warning_heading_text(self):
"""
Returns deprecated warning heading text.
"""
return self.q(css='.warning-heading-text').text[0]
@property
def components_list_heading(self):
"""
Returns deprecated warning component list heading text.
"""
return self.q(css='.components-list-heading-text').text[0]
@property
def modules_remove_text_shown(self):
"""
Returns True if deprecated warning advance modules remove text is visible.
"""
return self.q(css='.advance-modules-remove-text').visible
@property
def modules_remove_text(self):
"""
Returns deprecated warning advance modules remove text.
"""
return self.q(css='.advance-modules-remove-text').text[0]
@property
def components_visible(self):
"""
Returns True if components list visible.
"""
return self.q(css='.components-list').visible
@property
def components_display_names(self):
"""
Returns deprecated warning components display name list.
"""
return self.q(css='.components-list li>a').text
@property
def deprecated_advance_modules(self):
"""
Returns deprecated advance modules list.
"""
return self.q(css='.advance-modules-list li').text
class CourseOutlineModal(object):
"""
Page object specifically for a modal window on the course outline page.
Subsections are handled slightly differently in some regards, and should use SubsectionOutlineModal.
"""
MODAL_SELECTOR = ".wrapper-modal-window"
def __init__(self, page):
self.page = page
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineModal` context.
"""
return " ".join([self.MODAL_SELECTOR, selector])
def is_shown(self):
"""
Return whether or not the modal defined by self.MODAL_SELECTOR is shown.
"""
return self.page.q(css=self.MODAL_SELECTOR).present
def find_css(self, selector):
"""
Find the given css selector on the page.
"""
return self.page.q(css=self._bounded_selector(selector))
def click(self, selector, index=0):
"""
Perform a Click action on the given selector.
"""
self.find_css(selector).nth(index).click()
def save(self):
"""
Click the save action button, and wait for the ajax call to return.
"""
self.click(".action-save")
self.page.wait_for_ajax()
def publish(self):
"""
Click the publish action button, and wait for the ajax call to return.
"""
self.click(".action-publish")
self.page.wait_for_ajax()
def cancel(self):
"""
Click the cancel action button.
"""
self.click(".action-cancel")
def has_release_date(self):
"""
Check if the input box for the release date exists in the subsection's settings window
"""
return self.find_css("#start_date").present
def has_release_time(self):
"""
Check if the input box for the release time exists in the subsection's settings window
"""
return self.find_css("#start_time").present
def has_due_date(self):
"""
Check if the input box for the due date exists in the subsection's settings window
"""
return self.find_css("#due_date").present
def has_due_time(self):
"""
Check if the input box for the due time exists in the subsection's settings window
"""
return self.find_css("#due_time").present
def has_policy(self):
"""
Check if the input for the grading policy is present.
"""
return self.find_css("#grading_type").present
def set_date(self, property_name, input_selector, date):
"""
Set `date` value to input pointed by `selector` and `property_name`.
"""
month, day, year = map(int, date.split('/'))
self.click(input_selector)
if getattr(self, property_name):
current_month, current_year = map(int, getattr(self, property_name).split('/')[1:])
else: # Use default timepicker values, which are current month and year.
current_month, current_year = datetime.datetime.today().month, datetime.datetime.today().year
date_diff = 12 * (year - current_year) + month - current_month
selector = "a.ui-datepicker-{}".format('next' if date_diff > 0 else 'prev')
for __ in xrange(abs(date_diff)):
self.page.q(css=selector).click()
self.page.q(css="a.ui-state-default").nth(day - 1).click() # set day
self.page.wait_for_element_invisibility("#ui-datepicker-div", "datepicker should be closed")
EmptyPromise(
lambda: getattr(self, property_name) == u'{m}/{d}/{y}'.format(m=month, d=day, y=year),
"{} is updated in modal.".format(property_name)
).fulfill()
def set_time(self, input_selector, time):
"""
Set `time` value to input pointed by `input_selector`
Not using the time picker to make sure it's not being rounded up
"""
self.page.q(css=input_selector).fill(time)
self.page.q(css=input_selector).results[0].send_keys(Keys.ENTER)
@property
def release_date(self):
"""
Returns the unit's release date. Date is "mm/dd/yyyy" string.
"""
return self.find_css("#start_date").first.attrs('value')[0]
@release_date.setter
def release_date(self, date):
"""
Sets the unit's release date to `date`. Date is "mm/dd/yyyy" string.
"""
self.set_date('release_date', "#start_date", date)
@property
def release_time(self):
"""
Returns the current value of the release time. Default is u'00:00'
"""
return self.find_css("#start_time").first.attrs('value')[0]
@release_time.setter
def release_time(self, time):
"""
Time is "HH:MM" string.
"""
self.set_time("#start_time", time)
@property
def due_date(self):
"""
Returns the due date from the page. Date is "mm/dd/yyyy" string.
"""
return self.find_css("#due_date").first.attrs('value')[0]
@due_date.setter
def due_date(self, date):
"""
Sets the due date for the unit. Date is "mm/dd/yyyy" string.
"""
self.set_date('due_date', "#due_date", date)
@property
def due_time(self):
"""
Returns the current value of the release time. Default is u''
"""
return self.find_css("#due_time").first.attrs('value')[0]
@due_time.setter
def due_time(self, time):
"""
Time is "HH:MM" string.
"""
self.set_time("#due_time", time)
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
return self.get_selected_option_text(element)
@policy.setter
def policy(self, grading_label):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
select = Select(element)
select.select_by_visible_text(grading_label)
EmptyPromise(
lambda: self.policy == grading_label,
"Grading label is updated.",
).fulfill()
@property
def is_staff_lock_visible(self):
"""
Returns True if the staff lock option is visible.
"""
return self.find_css('#staff_lock').visible
def ensure_staff_lock_visible(self):
"""
Ensures the staff lock option is visible, clicking on the advanced tab
if needed.
"""
if not self.is_staff_lock_visible:
self.find_css(".settings-tab-button[data-tab=advanced]").click()
EmptyPromise(
lambda: self.is_staff_lock_visible,
"Staff lock option is visible",
).fulfill()
@property
def is_explicitly_locked(self):
"""
Returns true if the explict staff lock checkbox is checked, false otherwise.
"""
self.ensure_staff_lock_visible()
return self.find_css('#staff_lock')[0].is_selected()
@is_explicitly_locked.setter
def is_explicitly_locked(self, value):
"""
Checks the explicit staff lock box if value is true, otherwise selects "visible".
"""
self.ensure_staff_lock_visible()
if value != self.is_explicitly_locked:
self.find_css('label[for="staff_lock"]').click()
EmptyPromise(lambda: value == self.is_explicitly_locked, "Explicit staff lock is updated").fulfill()
def shows_staff_lock_warning(self):
"""
Returns true iff the staff lock warning is visible.
"""
return self.find_css('.staff-lock .tip-warning').visible
def get_selected_option_text(self, element):
"""
Returns the text of the first selected option for the element.
"""
if element:
select = Select(element)
return select.first_selected_option.text
else:
return None
class SubsectionOutlineModal(CourseOutlineModal):
"""
Subclass to handle a few special cases with subsection modals.
"""
def __init__(self, page):
super(SubsectionOutlineModal, self).__init__(page)
@property
def is_explicitly_locked(self):
"""
Override - returns True if staff_only is set.
"""
return self.subsection_visibility == 'staff_only'
@property
def subsection_visibility(self):
"""
Returns the current visibility setting for a subsection
"""
self.ensure_staff_lock_visible()
return self.find_css('input[name=content-visibility]:checked').first.attrs('value')[0]
@is_explicitly_locked.setter
def is_explicitly_locked(self, value): # pylint: disable=arguments-differ
"""
Override - sets visibility to staff_only if True, else 'visible'.
For hide_after_due, use the set_subsection_visibility method directly.
"""
self.subsection_visibility = 'staff_only' if value else 'visible'
@subsection_visibility.setter
def subsection_visibility(self, value):
"""
Sets the subsection visibility to the given value.
"""
self.ensure_staff_lock_visible()
self.find_css('input[name=content-visibility][value=' + value + ']').click()
EmptyPromise(lambda: value == self.subsection_visibility, "Subsection visibility is updated").fulfill()
@property
def is_staff_lock_visible(self):
"""
Override - Returns true if the staff lock option is visible.
"""
return self.find_css('input[name=content-visibility]').visible
| longmen21/edx-platform | common/test/acceptance/pages/studio/overview.py | Python | agpl-3.0 | 38,374 | [
"VisIt"
] | 79c99b93a86024f8b2631464b7101baf1461486cba09c315cdb923234bf39658 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .superfuncs import *
| jH0ward/psi4 | psi4/driver/procrouting/dft_funcs/__init__.py | Python | lgpl-3.0 | 940 | [
"Psi4"
] | 84908789daef5de80d3e1424dc09a07e6a1ea005a0ab7fb87bc5aa995d5a4dcd |
''' ------------------------------------- '''
''' Plot DTW for joint angles '''
for i_key,key in enumerate(skels_subactivity_angles):
figure(key)
print "{} iterations of {}".format(len(skels_subactivity_angles[key]), key)
for i_iter in range(len(skels_subactivity_angles[key])):
print 'iter', i_iter
for i,ang in enumerate(skels_subactivity_angles[key][i_iter].T):
x = skels_subactivity_angles[key][0][:,i]
y = ang
y = resample(y, len(x))
# error, dtw_mat, y_ind = mlpy.dtw.dtw_std(x, y, dist_only=False)
error, dtw_mat, y_ind = DynamicTimeWarping(x, y)
subplot(3,4,i+1)
y_new = y[y_ind[0]]
x_new = np.linspace(0, 1, len(y_new))
# poly = polyfit(x_new, y_new, 5)
# y_spline_ev = poly1d(poly)(x_new)
nknots = 4
idx_knots = (np.arange(1,len(x_new)-1,(len(x_new)-2)/np.double(nknots))).astype('int')
knots = x_new[idx_knots]
y_spline = splrep(x_new, y_new, t=knots)
y_spline_ev = splev(np.linspace(0, 1, len(y_new)), y_spline)
# plot(y_new)
plot(y_spline_ev)
y_spline_ev = resample(y_spline_ev, len(ang))
skels_subactivity_angles[key][i_iter].T[i] = y_spline_ev
# show()
# plot(y[y_ind[0]])
# subplot(3,10,i+1 + i_iter*10)
# plot(x[y_ind[1]])
# plot(y[y_ind[0]])
print i,":", len(ang), len(x), len(y[y_ind[0]])
title(CAD_JOINTS[i])
if i == 10:
break
show()
''' Plot relative object positions wrt to afforance '''
for i_key,key in enumerate(object_affordances):
print "{} iterations of {}".format(len(object_affordances[key]), key)
for i_iter in range(len(object_affordances[key])):
print 'iter', i_iter
for i,hand in enumerate(object_affordances[key][i_iter].T):
y = hand
y[y>2000] = 0
subplot(2,8,i_key+1 + 8*i)
plot(y)
title(key)
show()
''' Plot relative object positions wrt to subaction '''
n_subactions = len(object_subactions.keys())
for i_key,key in enumerate(object_subactions):
print "{} iterations of {}".format(len(object_subactions[key]), key)
for i_iter in range(len(object_subactions[key])):
print 'iter', i_iter
for i,hand in enumerate(object_subactions[key][i_iter].T):
y = hand[0]
y[y>2000] = 0
if np.all(y==0):
continue
subplot(2,n_subactions,i_key+1 + n_subactions*i- n_subactions)
plot(y.T)
title(key)
show()
''' Come up with prototypical motion for each subaction using DTW '''
def get_prototype_motions(skels_subactivity_train, smooth=False, nknots=10):
'''
Input: a set of skeleton trajectories
Output: a motif/prototype skeleton trajectory
This algorithm takes every instance of a class and compares it to every other instance
in that class using DTW, optionally smooths. Each (pairwise) transformed class instance
is then averaged to output a motif.
Todo: currently this is done independently per-joint per-dimension. Should be per skeleton!
'''
skels_subactivity_train = apply_user_frame(skels_subactivity_train)
proto_motion = {}
for i_key,key in enumerate(skels_subactivity_train):
n_instances = len(skels_subactivity_train[key])
n_frames = int(np.mean([len(x) for x in skels_subactivity_train[key]]))
# Do x,y,z seperately
proto_motion[key] = np.zeros([n_frames, 15, 3])
for i_joint in range(15):
# error_matrix = np.zeros([n_instances, n_instances], np.float)
y_spline_set = []
for i in xrange(n_instances):
for j in xrange(n_instances):
if i >= j:
continue
x = skels_subactivity_train[key][i][:,i_joint]
y = skels_subactivity_train[key][j][:,i_joint]
# error, dtw_mat, y_ind = mlpy.dtw.dtw_std(x, y, dist_only=False)
error, dtw_mat, y_ind = DynamicTimeWarping(x, y)
# error = mlpy.dtw.dtw_std(x, y, dist_only=True)
# error_matrix[i,j] = error
y_new = y[y_ind[1]]
x_new = np.linspace(0, 1, len(y_new))
# poly = polyfit(x_new, y_new, 5)
# y_spline_ev = poly1d(poly)(x_new)
if smooth:
# Generate Spline representation
nknots = np.minimum(nknots, len(x_new)/2)
idx_knots = (np.arange(1,len(x_new)-1,(len(x_new)-2)/np.double(nknots))).astype('int')
knots = x_new[idx_knots]
y_spline = splrep(x_new, y_new, t=knots)
y_spline_ev = splev(np.linspace(0, 1, len(y_new)), y_spline)
y_spline_ev = resample(y_spline_ev, n_frames)
y_spline_set += [y_spline_ev]
else:
y_spline_ev = resample(y_new, n_frames)
y_spline_set += [y_spline_ev]
proto_motion[key][:,i_joint] = np.mean(y_spline_set, 0)
return proto_motion
''' Gaussian mixture model prototype '''
y_spline_set = np.vstack(y_spline_set)
from sklearn import gaussian_process
gp = gaussian_process.GaussianProcess(theta0=1e+1, normalize=False)
x = np.arange(y_spline_set.shape[1])[:,None].repeat(y_spline_set.shape[0], 1).T.astype(np.float)
x += np.random.random(x.shape)/10000
x_test = np.arange(y_spline_set.shape[1])[:,None]
y = y_spline_set - y_spline_set[:,0][:,None]
# gp.fit(y.ravel()[:,None], x.ravel()[:,None])
gp.fit(x.ravel()[:,None], y.ravel()[:,None])
y_pred, MSE = gp.predict(x_test, eval_MSE=True)
sigma = np.sqrt(MSE)
plot(x_test, y_pred, 'b', label=u'Prediction')
fill(np.concatenate([x_test, x_test[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
# plot(x_test, y[0], 'r', label=u'Obs')
for i in range(15):
plot(x_test, y[i], 'r', label=u'Obs_DTW')
for i in range(6):
plot(np.arange(len(skels_subactivity_train['opening'][i][:,-1,2])), skels_subactivity_train['opening'][i][:,-1,2], 'y', label=u'Obs')
plot(proto_motion['opening'][:,-1,2], 'g', label=u'Prototype')
legend()
show()
''' Test similarity of samples to prototypical (eval on test data) '''
accuracy_dtw = []
accuracy_lcs = []
training_sets = list(it.combinations([1,3,4], 1))
testing_sets = [tuple([x for x in [1,3,4] if x not in y]) for y in training_sets]
for train_set, test_set in zip(training_sets, testing_sets):
# Get training/test sets + calculate prototype motions
skels_subactivity_train, skels_subactivity_test = split_skeleton_data(skels_subactivity, train_set, test_set)
proto_motion = get_prototype_motions(skels_subactivity_train, nknots=5)
skels_subactivity_test = apply_user_frame(skels_subactivity_test)
print "Prototypes generated"
# Generate precision/recall
n_subactions = len(skels_subactivity_test.keys())
max_iterations = max([len(skels_subactivity_test[x]) for x in skels_subactivity_test])
errors_dtw = np.zeros([n_subactions, n_subactions,max_iterations], np.float)
errors_lcs = np.zeros([n_subactions, n_subactions,max_iterations], np.float)
errors_mask = np.zeros([n_subactions, max_iterations], dtype=np.int)
# Evaluate each test instance
for i_key,key in enumerate(skels_subactivity_test):
n_instances = len(skels_subactivity_test[key])
for i in xrange(n_instances):
# Evaluate for each prototype
for i_key2,key2 in enumerate(skels_subactivity_test):
err_dtw = 0
err_lcs = 0
x_skel = proto_motion[key2]
y_skel = skels_subactivity_test[key][i]
for i_joint in CAD_ENABLED:
for i_dim in range(3):
x = x_skel[:,i_joint,i_dim]
y = y_skel[:,i_joint,i_dim]
error_dtw, _, y_ind = mlpy.dtw_std(x, y, dist_only=False, squared=True)
error_lcs,_ = mlpy.lcs_real(x,y[y_ind[1]], np.std(x), len(x)/2)
err_dtw += error_dtw
err_lcs += error_lcs/len(x)
errors_dtw[i_key, i_key2, i] = err_dtw
errors_lcs[i_key, i_key2, i] = err_lcs
errors_mask[i_key,i] = 1
iterations_per_subaction = np.sum(errors_mask,1).astype(np.float)
print "Train:{}, Test:{}".format(train_set, test_set)
solution = np.arange(n_subactions)[:,None].repeat(errors_dtw.shape[2], 1)
true_positives = np.sum((errors_dtw.argmin(1) == solution)*errors_mask, 1)
false_negatives = np.sum((errors_dtw.argmin(1) != solution)*errors_mask, 1)
accuracy_dtw += [np.mean(true_positives / iterations_per_subaction)]
print "DTW Precision:", accuracy_dtw[-1]
# print "DTW Recall:", np.mean(true_positives / (true_positives+false_negatives).astype(np.float))
# precision = True Positives / (True Positives + False Positives) = TP/TP+FP
# recall = True Positives / (True Positives + False Negatives) = TP/TP+0
accuracy_lcs += [np.mean(np.sum((errors_lcs.argmax(1) == solution)*errors_mask, 1) / iterations_per_subaction)]
# print accuracy_lcs
print "LCS Precision:", accuracy_lcs[-1]
print ""
print '---- N-fold accuracy ---'
print "DTW: {:.4}%".format(np.mean(accuracy_dtw)*100)
print "LCS: {:.4}%".format(np.mean(accuracy_lcs)*100)
skels_subactivity_test = apply_user_frame(skels_subactivity_test)
skels_subactivity_train = apply_user_frame(skels_subactivity_train)
proto_motion = get_prototype_motions(skels_subactivity_train, smooth=False)
''' Put together and visualize a new sequence '''
object_position = np.array([0, 1050,3500])
obj_position_uv = cam.camera_model.world2im(np.array([object_position]), [480,640])
# actions = ['null', 'moving', 'cleaning', 'moving', 'null', 'placing']
actions = proto_motion.keys()
a = actions[0]
new_action = proto_motion[a] + object_position
new_action_labels = [a]*len(new_action)
for a in actions:
new_action = np.vstack([new_action, proto_motion[a] + object_position])
new_action_labels += [a]*len(proto_motion[a])
from time import time
t0 = time()
ii = 0
sequence_samples = np.random.choice(n_samples, 5, replace=False)
for i,f in enumerate(new_action):
if i>0 and new_action_labels[i] != new_action_labels[i-1]:
ii = 0
im = np.ones([480,640])*255
n_samples = len(skels_subactivity_train[new_action_labels[i]])
sequence_samples = np.random.choice(n_samples-1, 5, replace=False)
cv2.imshow("New action", im)
cv2.waitKey(1)
bg_im = np.ones([480,640])
# cv2.rectangle(bg_im, tuple(obj_position_uv[0][[1,0]]-[30,30]), tuple(obj_position_uv[0][[1,0]]+[30,30]), 2000)
f_uv = cam.camera_model.world2im(f, [480,640])
f_uv[:,0] = 480 - f_uv[:,0]
im = display_skeletons(bg_im, f_uv, skel_type='CAD_Upper', color=2000)
cv2.putText(im, "Action: "+new_action_labels[i], (20,60), cv2.FONT_HERSHEY_DUPLEX, 1, (2000,0,0), thickness=2)
cv2.putText(im, "Prototype", (240,160), cv2.FONT_HERSHEY_DUPLEX, 1, (2000,0,0), thickness=2)
# Plot training samples below the protype action
for i_iter,i_sample in enumerate(sequence_samples):
try:
ii_frame = min(ii, len(skels_subactivity_train[new_action_labels[i]][i_sample])-1)
skel = skels_subactivity_train[new_action_labels[i]][i_sample][ii_frame] - skels_subactivity_train[new_action_labels[i]][i_sample][ii_frame][2]
# ii_frame = min(ii, len(skels_subactivity_test[new_action_labels[i]][i_iter])-1)
# skel = skels_subactivity_test[new_action_labels[i]][i_iter][ii_frame] - skels_subactivity_test[new_action_labels[i]][i_iter][ii_frame][2]
# skel = normalize_basis(skel)
skel += [-1400+i_iter*700, 0,3500]
f_uv = cam.camera_model.world2im(skel, [480,640])
f_uv[:,0] = 480 - f_uv[:,0]
im = display_skeletons(bg_im, f_uv, skel_type='CAD_Upper', color=2000)
except: pass
cv2.putText(im, "Training Samples: "+str(list(train_set)), (140,320), cv2.FONT_HERSHEY_DUPLEX, 1, (2000,0,0), thickness=2)
# Plot test samples below the protype action
for i_iter in range(5):
try:
ii_frame = min(ii, len(skels_subactivity_test[new_action_labels[i]][i_iter])-1)
skel = skels_subactivity_test[new_action_labels[i]][i_iter][ii_frame] - skels_subactivity_test[new_action_labels[i]][i_iter][ii_frame][2]
# skel = normalize_basis(skel)
skel += [-1400+i_iter*700, -1000,3500]
f_uv = cam.camera_model.world2im(skel, [480,650])
f_uv[:,0] = 480 - f_uv[:,0]
im = display_skeletons(bg_im, f_uv, skel_type='CAD_Upper', color=2000)
except: pass
cv2.putText(im, "Testing Samples: "+str(list(test_set)), (150,470), cv2.FONT_HERSHEY_DUPLEX, 1, (2000,0,0), thickness=2)
cv2.imshow("New action", (im-1000.)/(im.max()-1000))
cv2.waitKey(30)
ii += 1
print "{} fps".format(i/(time()-t0))
''' Inverse kinematics from hand to torso? '''
''' Add physical/collision constraints '''
''' add symmetries '''
''' break into left hand, right hand, torso, legs '''
for i,f in enumerate(y_skel):
bg_im = np.ones([480,640])
# f_uv = cam.camera_model.world2im(f, [480,640])
f_uv = cam.camera_model.world2im(f+[0,0,3000], [480,640])
f_uv[:,0] = 480 - f_uv[:,0]
im = display_skeletons(bg_im, f_uv, skel_type='CAD_Upper', color=2000)
cv2.putText(im, "Action: "+new_action_labels[i], (20,60), cv2.FONT_HERSHEY_DUPLEX, 1, (2000,0,0), thickness=2)
cv2.imshow("New action", (im-1000.)/(im.max()-1000))
cv2.waitKey(1)
| colincsl/pyKinectTools | pyKinectTools/dataset_readers/CAD_Repr.py | Python | bsd-2-clause | 12,563 | [
"Gaussian"
] | 8de62bb299e318af731b92343367436ca805acaadb1e9e065ac3baa35a0653cc |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Editors definitions for sellable"""
import collections
import gtk
from kiwi.datatypes import ValidationError
from kiwi.ui.forms import PercentageField, TextField
from stoqdrivers.enum import TaxType, UnitType
from stoqlib.api import api
from stoqlib.database.exceptions import IntegrityError
from stoqlib.domain.fiscal import CfopData
from stoqlib.domain.sellable import (SellableCategory, Sellable,
SellableUnit,
SellableTaxConstant,
ClientCategoryPrice)
from stoqlib.domain.product import Product
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.dialogs.labeldialog import PrintLabelEditor
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.gui.editors.categoryeditor import SellableCategoryEditor
from stoqlib.gui.slaves.commissionslave import CommissionSlave
from stoqlib.gui.utils.databaseform import DatabaseForm
from stoqlib.gui.utils.printing import print_labels
from stoqlib.lib.decorators import cached_property
from stoqlib.lib.defaults import MAX_INT
from stoqlib.lib.formatters import get_price_format_str
from stoqlib.lib.message import yesno, warning
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.stringutils import next_value_for
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
_DEMO_BAR_CODES = ['2368694135945', '6234564656756', '6985413595971',
'2692149835416', '1595843695465', '8596458216412',
'9586249534513', '7826592136954', '5892458629421',
'1598756984265', '1598756984265', '']
_DEMO_PRODUCT_LIMIT = 30
#
# Editors
#
class SellableTaxConstantEditor(BaseEditor):
model_type = SellableTaxConstant
model_name = _('Taxes and Tax rates')
@cached_property()
def fields(self):
return collections.OrderedDict(
description=TextField(_('Name'), proxy=True, mandatory=True),
tax_value=PercentageField(_('Value'), proxy=True, mandatory=True),
)
#
# BaseEditor
#
def create_model(self, store):
return SellableTaxConstant(tax_type=int(TaxType.CUSTOM),
tax_value=None,
description=u'',
store=store)
class BasePriceEditor(BaseEditor):
gladefile = 'SellablePriceEditor'
proxy_widgets = ['markup', 'cost', 'max_discount', 'price']
def set_widget_formats(self):
widgets = (self.markup, self.max_discount)
for widget in widgets:
widget.set_data_format(get_price_format_str())
#
# BaseEditor hooks
#
def get_title(self, *args):
return _('Price settings')
def setup_proxies(self):
self._editing_price = True
self.markup.update(self.model.markup)
# These are used to avoid circular updates when changing price or markup
self._editing_price = False
self._editing_markup = False
self.set_widget_formats()
self.main_proxy = self.add_proxy(self.model, self.proxy_widgets)
if self.model.markup is not None:
return
sellable = self.model.sellable
self.model.markup = sellable.get_suggested_markup()
self.main_proxy.update('markup')
#
# Kiwi handlers
#
def on_price__validate(self, entry, value):
if value <= 0:
return ValidationError(_("Price cannot be zero or negative"))
def after_price__content_changed(self, entry_box):
# If markup is being edited, dont update the price, or the markup may be
# programatically changed (if there was any rounding involved)
if self._editing_markup:
return
self._editing_price = True
self.markup.update(self.model.markup)
self._editing_price = False
def after_markup__content_changed(self, spin_button):
# Like above, if the price is being edited, dont update the markup, or
# the price may change again.
if self._editing_price:
return
self._editing_markup = True
self.main_proxy.update("price")
self._editing_markup = False
class SellablePriceEditor(BasePriceEditor):
model_name = _(u'Product Price')
model_type = Sellable
def setup_slaves(self):
from stoqlib.gui.slaves.sellableslave import OnSaleInfoSlave
slave = OnSaleInfoSlave(self.store, self.model)
self.attach_slave('on_sale_holder', slave)
commission_slave = CommissionSlave(self.store, self.model)
self.attach_slave('on_commission_data_holder', commission_slave)
if self.model.category:
desc = self.model.category.description
label = _('Calculate Commission From: %s') % desc
commission_slave.change_label(label)
class CategoryPriceEditor(BasePriceEditor):
model_name = _(u'Category Price')
model_type = ClientCategoryPrice
sellable_widgets = ('cost', )
proxy_widgets = ('markup', 'max_discount', 'price')
def setup_proxies(self):
self.sellable_proxy = self.add_proxy(self.model.sellable,
self.sellable_widgets)
BasePriceEditor.setup_proxies(self)
#
# Editors
#
class SellableEditor(BaseEditor):
"""This is a base class for ProductEditor and ServiceEditor and should
be used when editing sellable objects. Note that sellable objects
are instances inherited by Sellable."""
# This must be be properly defined in the child classes
model_name = None
model_type = None
gladefile = 'SellableEditor'
confirm_widgets = ['description', 'cost', 'price']
ui_form_name = None
sellable_tax_widgets = ['tax_constant', 'tax_value']
sellable_widgets = ['code',
'barcode',
'description',
'category_combo',
'cost',
'price',
'status_str',
'default_sale_cfop',
'unit_combo']
proxy_widgets = (sellable_tax_widgets + sellable_widgets)
def __init__(self, store, model=None, visual_mode=False):
from stoqlib.gui.slaves.sellableslave import CategoryPriceSlave
is_new = not model
self._sellable = None
self._demo_mode = sysparam.get_bool('DEMO_MODE')
self._requires_weighing_text = (
"<b>%s</b>" % api.escape(_("This unit type requires weighing")))
if self.ui_form_name:
self.db_form = DatabaseForm(self.ui_form_name)
else:
self.db_form = None
BaseEditor.__init__(self, store, model, visual_mode)
self.enable_window_controls()
if self._demo_mode:
self._add_demo_warning()
# Code suggestion. We need to do this before disabling sensitivity,
# otherwise, the sellable will not be updated.
if not self.code.read():
self._update_default_sellable_code()
edit_code_product = sysparam.get_bool('EDIT_CODE_PRODUCT')
self.code.set_sensitive(not edit_code_product and not self.visual_mode)
self.description.grab_focus()
self.table.set_focus_chain([self.code,
self.barcode,
self.default_sale_cfop,
self.description,
self.cost_hbox,
self.price_hbox,
self.category_combo,
self.tax_hbox,
self.unit_combo,
])
self._print_labels_btn = self.add_button('print_labels', gtk.STOCK_PRINT)
self._print_labels_btn.connect('clicked', self.on_print_labels_clicked,
'print_labels')
label = self._print_labels_btn.get_children()[0]
label = label.get_children()[0].get_children()[1]
label.set_label(_(u'Print labels'))
self.setup_widgets()
if not is_new and not self.visual_mode:
# Although a sellable can be both removed/closed, we show only one,
# to avoid having *lots* of buttons. If it's closed, provide a way
# to reopen it, else, show a delete button if it can be removed
# or a close button if it can be closed
if self._sellable.is_closed():
self._add_reopen_button()
elif self._sellable.can_remove():
self._add_delete_button()
elif self._sellable.can_close():
self._add_close_button()
self.set_main_tab_label(self.model_name)
price_slave = CategoryPriceSlave(self.store, self.model.sellable,
self.visual_mode)
self.add_extra_tab(_(u'Category Prices'), price_slave)
self._setup_ui_forms()
self._update_print_labels()
def _add_demo_warning(self):
fmt = _("This is a demostration mode of Stoq, you cannot "
"create more than %d products.\n"
"To avoid this limitation, enable production mode.")
self.set_message(fmt % (_DEMO_PRODUCT_LIMIT))
if self.store.find(Sellable).count() > _DEMO_PRODUCT_LIMIT:
self.disable_ok()
def _add_extra_button(self, label, stock=None,
callback_func=None, connect_on='clicked'):
button = self.add_button(label, stock)
if callback_func:
button.connect(connect_on, callback_func, label)
def _add_delete_button(self):
self._add_extra_button(_('Remove'), gtk.STOCK_DELETE,
self._on_delete_button__clicked)
def _add_close_button(self):
if self._sellable.product:
label = _('Close Product')
else:
label = _('Close Service')
self._add_extra_button(label, None,
self._on_close_sellable_button__clicked)
def _add_reopen_button(self):
if self._sellable.product:
label = _('Reopen Product')
else:
label = _('Reopen Service')
self._add_extra_button(label, None,
self._on_reopen_sellable_button__clicked)
def _update_default_sellable_code(self):
code = Sellable.get_max_value(self.store, Sellable.code)
self.code.update(next_value_for(code))
def _update_print_labels(self):
sellable = self.model.sellable
self._print_labels_btn.set_sensitive(
all([sellable.code, sellable.barcode,
sellable.description, sellable.price]))
def _setup_ui_forms(self):
if not self.db_form:
return
self.db_form.update_widget(self.code, other=self.code_lbl)
self.db_form.update_widget(self.barcode, other=self.barcode_lbl)
self.db_form.update_widget(self.category_combo,
other=self.category_lbl)
#
# Public API
#
def set_main_tab_label(self, tabname):
self.sellable_notebook.set_tab_label(self.sellable_tab,
gtk.Label(tabname))
def add_extra_tab(self, tabname, tabslave):
self.sellable_notebook.set_show_tabs(True)
self.sellable_notebook.set_show_border(True)
event_box = gtk.EventBox()
event_box.show()
self.sellable_notebook.append_page(event_box, gtk.Label(tabname))
self.attach_slave(tabname, tabslave, event_box)
def set_widget_formats(self):
for widget in (self.cost, self.price):
widget.set_adjustment(gtk.Adjustment(lower=0, upper=MAX_INT,
step_incr=1))
self.requires_weighing_label.set_size("small")
self.requires_weighing_label.set_text("")
def edit_sale_price(self):
sellable = self.model.sellable
self.store.savepoint('before_run_editor_sellable_price')
result = run_dialog(SellablePriceEditor,
self.get_toplevel().get_toplevel(),
self.store, sellable)
if result:
self.sellable_proxy.update('price')
else:
self.store.rollback_to_savepoint('before_run_editor_sellable_price')
def setup_widgets(self):
raise NotImplementedError
def update_requires_weighing_label(self):
unit = self._sellable.unit
if unit and unit.unit_index == UnitType.WEIGHT:
self.requires_weighing_label.set_text(self._requires_weighing_text)
else:
self.requires_weighing_label.set_text("")
def _update_tax_value(self):
if not hasattr(self, 'tax_proxy'):
return
self.tax_proxy.update('tax_constant.tax_value')
def get_taxes(self):
"""Subclasses may override this method to provide a custom
tax selection.
:returns: a list of tuples containing the tax description and a
:class:`stoqlib.domain.sellable.SellableTaxConstant` object.
"""
return []
def _fill_categories(self):
categories = self.store.find(SellableCategory)
self.category_combo.set_sensitive(any(categories) and not self.visual_mode)
self.category_combo.prefill(api.for_combo(categories,
attr='full_description'))
#
# BaseEditor hooks
#
def update_visual_mode(self):
self.add_category.set_sensitive(False)
self.sale_price_button.set_sensitive(False)
def setup_sellable_combos(self):
self._fill_categories()
self.edit_category.set_sensitive(False)
cfops = CfopData.get_for_sale(self.store)
self.default_sale_cfop.prefill(api.for_combo(cfops, empty=''))
self.setup_unit_combo()
def setup_unit_combo(self):
units = self.store.find(SellableUnit)
self.unit_combo.prefill(api.for_combo(units, empty=_('No units')))
def setup_tax_constants(self):
taxes = self.get_taxes()
self.tax_constant.prefill(taxes)
def setup_proxies(self):
self.set_widget_formats()
self._sellable = self.model.sellable
self.add_category.set_tooltip_text(_("Add a new category"))
self.edit_category.set_tooltip_text(_("Edit the selected category"))
self.setup_sellable_combos()
self.setup_tax_constants()
self.tax_proxy = self.add_proxy(self._sellable,
SellableEditor.sellable_tax_widgets)
self.sellable_proxy = self.add_proxy(self._sellable,
SellableEditor.sellable_widgets)
self.update_requires_weighing_label()
def setup_slaves(self):
from stoqlib.gui.slaves.sellableslave import SellableDetailsSlave
details_slave = SellableDetailsSlave(self.store, self.model.sellable,
visual_mode=self.visual_mode)
self.attach_slave('slave_holder', details_slave)
if isinstance(self.model, Product) and self.model.parent is not None:
details_slave.notes.set_property('sensitive', False)
# Make everything aligned by pytting notes_lbl on the same size group
self.left_labels_group.add_widget(details_slave.notes_lbl)
def _run_category_editor(self, category=None):
self.store.savepoint('before_run_editor_sellable_category')
model = run_dialog(SellableCategoryEditor, self, self.store, category)
if model:
self._fill_categories()
self.category_combo.select(model)
else:
self.store.rollback_to_savepoint('before_run_editor_sellable_category')
#
# Kiwi handlers
#
def _on_delete_button__clicked(self, button, parent_button_label=None):
sellable_description = self._sellable.get_description()
msg = (_("This will delete '%s' from the database. Are you sure?")
% sellable_description)
if not yesno(msg, gtk.RESPONSE_NO, _("Delete"), _("Keep")):
return
try:
self._sellable.remove()
except IntegrityError as details:
warning(_("It was not possible to remove '%s'")
% sellable_description, str(details))
return
# We are doing this by hand instead of calling confirm/cancel because,
# if we call self.cancel(), the transaction will not be committed. If
# we call self.confirm(), it will, but some on_confirm hooks (like
# ProductComponentSlave's one) will try to create other objects and
# relate them with this product that doesn't exist anymore (we removed
# them above), resulting in an IntegrityError.
self.retval = self.model
self.store.retval = self.retval
self.main_dialog.close()
def _on_close_sellable_button__clicked(self, button,
parent_button_label=None):
msg = (_("Do you really want to close '%s'?\n"
"Please note that when it's closed, you won't be able to "
"commercialize it anymore.")
% self._sellable.get_description())
if not yesno(msg, gtk.RESPONSE_NO,
parent_button_label, _("Don't close")):
return
self._sellable.close()
self.confirm()
def _on_reopen_sellable_button__clicked(self, button,
parent_button_label=None):
msg = (_("Do you really want to reopen '%s'?\n"
"Note that when it's opened, you will be able to "
"commercialize it again.") % self._sellable.get_description())
if not yesno(msg, gtk.RESPONSE_NO,
parent_button_label, _("Keep closed")):
return
self._sellable.set_available()
self.confirm()
def on_category_combo__content_changed(self, category):
self.edit_category.set_sensitive(bool(category.get_selected()))
def on_tax_constant__changed(self, combo):
self._update_tax_value()
def on_unit_combo__changed(self, combo):
self.update_requires_weighing_label()
def on_sale_price_button__clicked(self, button):
self.edit_sale_price()
def on_add_category__clicked(self, widget):
self._run_category_editor()
def on_edit_category__clicked(self, widget):
self._run_category_editor(self.category_combo.get_selected())
def on_code__validate(self, widget, value):
if not value:
return ValidationError(_(u'The code can not be empty.'))
if self.model.sellable.check_code_exists(value):
return ValidationError(_(u'The code %s already exists.') % value)
def on_barcode__validate(self, widget, value):
if not value:
return
if value and len(value) > 14:
return ValidationError(_(u'Barcode must have 14 digits or less.'))
if self.model.sellable.check_barcode_exists(value):
return ValidationError(_('The barcode %s already exists') % value)
if self._demo_mode and value not in _DEMO_BAR_CODES:
return ValidationError(_("Cannot create new barcodes in "
"demonstration mode"))
def on_price__validate(self, entry, value):
if value <= 0:
return ValidationError(_("Price cannot be zero or negative"))
def on_cost__validate(self, entry, value):
if value <= 0:
return ValidationError(_("Cost cannot be zero or negative"))
def after_description__changed(self, widget):
self._update_print_labels()
def after_code__changed(self, widget):
self._update_print_labels()
def after_barcode__changed(self, widget):
self._update_print_labels()
def after_price__changed(self, widget):
self._update_print_labels()
def on_print_labels_clicked(self, button, parent_label_button=None):
label_data = run_dialog(PrintLabelEditor, None, self.store,
self.model.sellable)
if label_data:
print_labels(label_data, self.store)
def test_sellable_tax_constant(): # pragma nocover
ec = api.prepare_test()
tax_constant = api.sysparam.get_object(ec.store, 'DEFAULT_PRODUCT_TAX_CONSTANT')
run_dialog(SellableTaxConstantEditor,
parent=None, store=ec.store, model=tax_constant)
print(tax_constant)
def test_price_editor(): # pragma nocover
from decimal import Decimal
ec = api.prepare_test()
sellable = ec.create_sellable()
sellable.cost = Decimal('15.55')
sellable.price = Decimal('21.50')
run_dialog(SellablePriceEditor,
parent=None, store=ec.store, model=sellable)
if __name__ == '__main__': # pragma nocover
test_price_editor()
| andrebellafronte/stoq | stoqlib/gui/editors/sellableeditor.py | Python | gpl-2.0 | 22,084 | [
"VisIt"
] | c16912036f160f264603ca234ed5d80814e279404284046a97b58cfe02098dac |
########################################################################
# File: File.py
# Date: 2012/08/03 15:02:53
########################################################################
"""
:mod: File
.. module: File
:synopsis: RMS operation file
operation file
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Disable invalid names warning
# pylint: disable=invalid-name
__RCSID__ = "$Id$"
# # imports
import datetime
import os
import json
import six
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.File import checkGuid
from DIRAC.RequestManagementSystem.private.JSONUtils import RMSEncoder
########################################################################
class File(object):
"""
A bag object holding Operation file attributes.
:param Operation.Operation _parent: reference to parent Operation
:param dict __data__: attrs dict
It is managed by SQLAlchemy, so the OperationID, FileID should never be set by hand
(except when constructed from JSON of course...)
In principle, the _parent attribute could be totally managed by SQLAlchemy. However, it is
set only when inserted into the DB, this is why I manually set it in the Operation
.. warning::
You cannot add a File object to multiple Operations. They are different entry in the DB, so they must be different
objects
"""
_datetimeFormat = '%Y-%m-%d %H:%M:%S'
def __init__(self, fromDict=None):
"""c'tor
:param self: self reference
:param dict fromDict: property dict
"""
self._parent = None
self._Status = 'Waiting'
self._LFN = None
self.PFN = None
self._ChecksumType = None
self.Checksum = None
self._GUID = None
self.Attempt = 0
self.Size = 0
self.Error = None
self._duration = 0
# This variable is used in the setter to know whether they are called
# because of the json initialization or not
self.initialLoading = True
fromDict = fromDict if isinstance(fromDict, dict)\
else json.loads(fromDict) if isinstance(fromDict, six.string_types)\
else {}
for attrName, attrValue in fromDict.items():
# The JSON module forces the use of UTF-8, which is not properly
# taken into account in DIRAC.
# One would need to replace all the '== str' with 'in six.string_types'
# This is converting `unicode` to `str` and doesn't make sense in Python 3
if six.PY2 and isinstance(attrValue, six.string_types):
attrValue = attrValue.encode()
if attrValue:
setattr(self, attrName, attrValue)
self.initialLoading = False
@property
def LFN(self):
""" LFN prop """
return self._LFN
@LFN.setter
def LFN(self, value):
""" lfn setter """
if not isinstance(value, six.string_types):
raise TypeError("LFN has to be a string!")
if not os.path.isabs(value):
raise ValueError("LFN should be an absolute path!")
self._LFN = value
@property
def GUID(self):
""" GUID prop """
return self._GUID
@GUID.setter
def GUID(self, value):
""" GUID setter """
if value:
if not isinstance(value, six.string_types):
raise TypeError("GUID should be a string!")
if not checkGuid(value):
raise ValueError("'%s' is not a valid GUID!" % str(value))
self._GUID = value
@property
def ChecksumType(self):
""" checksum type prop """
return self._ChecksumType
@ChecksumType.setter
def ChecksumType(self, value):
""" checksum type setter """
if not value:
self._ChecksumType = ""
elif value and str(value).strip().upper() not in ("ADLER32", "MD5", "SHA1"):
if str(value).strip().upper() == 'AD':
self._ChecksumType = 'ADLER32'
else:
raise ValueError("unknown checksum type: %s" % value)
else:
self._ChecksumType = str(value).strip().upper()
@property
def Status(self):
""" status prop """
if not self._Status:
self._Status = 'Waiting'
return self._Status
@Status.setter
def Status(self, value):
""" status setter """
if value not in ("Waiting", "Failed", "Done", "Scheduled"):
raise ValueError("Unknown Status: %s!" % str(value))
if value == 'Done':
self.Error = ''
updateTime = (self._Status != value)
if updateTime and self._parent:
self._parent.LastUpdate = datetime.datetime.utcnow().replace(microsecond=0)
self._Status = value
if self._parent:
self._parent._notify()
def __str__(self):
""" str operator """
return self.toJSON()['Value']
def toJSON(self):
""" Returns the json formated string that describes the File """
try:
jsonStr = json.dumps(self, cls=RMSEncoder)
return S_OK(jsonStr)
except Exception as e:
return S_ERROR(str(e))
def _getJSONData(self):
""" Returns the data that have to be serialized by JSON """
attrNames = ['FileID', 'OperationID', "Status", "LFN",
"PFN", "ChecksumType", "Checksum", "GUID", "Attempt",
"Size", "Error"]
jsonData = {}
for attrName in attrNames:
# FileID and OperationID might not be set since they are managed by SQLAlchemy
if not hasattr(self, attrName):
continue
jsonData[attrName] = getattr(self, attrName)
value = getattr(self, attrName)
if isinstance(value, datetime.datetime):
# We convert date time to a string
jsonData[attrName] = value.strftime(self._datetimeFormat) # pylint: disable=no-member
else:
jsonData[attrName] = value
return jsonData
| yujikato/DIRAC | src/DIRAC/RequestManagementSystem/Client/File.py | Python | gpl-3.0 | 5,634 | [
"DIRAC"
] | aea55f9ac843c2ec4e2955e0cfd267f985276e2a6bc0c6ebb1e63db0fd23f0ca |
import tensorflow as tf
import matplotlib.pyplot as plt
from random import uniform
import numpy as np
## Placeholders for x and y variables
x = tf.placeholder("float", None)
y = tf.placeholder("float", None)
## Output = (x^3)*(y^2) - 7(x^2)*y + x - 12
output = (x ** 3) * (y ** 2) - 7 * (x **2) * y + x - 12
## Number of points to plot
n = 1000
## Generates the feed dictionary as input Tensors x, y with a specified distribution
def feedDictionary(x, y, distribution = "Random"):
# Generate y value
gen2 = 0.2
# Generate x value based on the required distribution
if distribution == "Gamma":
gen1 = np.random.gamma(1)
elif distribution == "Cauchy":
gen1 = np.random.standard_cauchy()
elif distribution == "Gaussian":
u = 0
s = 1
gen1 = np.random.normal(u, s)
else:
gen1 = uniform(-1, 1)
fd = {x: gen1, y: gen2}
return fd
def run():
with tf.Session() as session:
for i in range(n):
fd = feedDictionary(x, y, "Gaussian")
result = session.run(output, feed_dict=fd)
## We only want to plot for -1 < x < 1
if fd.get(x) > -1 and fd.get(x) < 1:
plt.plot(fd.get(x), result, "bo")
plt.show()
if __name__ == "__main__":
run()
| mewturn/Python | 50.021 Artificial Intelligence/tensorFDPractice.py | Python | mit | 1,380 | [
"Gaussian"
] | 80c5c0d5a51a1e7505585e415d642ca250f30084a0250b378e1723bed798ae90 |
# coding: utf-8
from __future__ import unicode_literals, division, print_function
import unittest
from pymatgen.io.abinitio.launcher import ScriptEditor
class ScriptEditorTest(unittest.TestCase):
def test_base(self):
"base test"
se = ScriptEditor()
se.shebang()
se.declare_var("FOO", "BAR")
se.add_emptyline()
se.add_comment("This is a comment")
se.declare_vars({"FOO1": "BAR1"})
se.load_modules(["module1", "module2"])
print(se.get_script_str())
if __name__ == '__main__':
unittest.main()
| Dioptas/pymatgen | pymatgen/io/abinitio/tests/test_launcher.py | Python | mit | 578 | [
"pymatgen"
] | 703bd6c6c173b2c740a0de87c636cceac71893cfc40a3821e1bdf6c12d3aeea3 |
import typing
import symtab
def signature(ret_type, param_types):
def inside_decorator(function):
function._ret_type = ret_type
function._param_types = param_types
return function
return inside_decorator
@signature(typing.Pointer(typing.Int8), [typing.Int32, typing.Int32])
def PlacedInt8Array(compiler, node, builder):
# assign the address to the new variable
node.args = [compiler.visit(arg) for arg in node.args]
node.llvm_value = builder.inttoptr(node.args[1].llvm_value,
typing.Pointer(typing.Int8))
return node
def create_builtin_scope():
builtin_scope = symtab.SymbolTable(None)
t = typing.Function(typing.Pointer(typing.Int8),
[typing.Int32, typing.Int32])
placedInt8Array = symtab.Symbol("PlacedInt8Array", t)
placedInt8Array.generator = PlacedInt8Array
builtin_scope.add_symbol(placedInt8Array)
return builtin_scope
| Jokymon/hpcs | hpcs_builtins.py | Python | gpl-3.0 | 968 | [
"VisIt"
] | 93c913a4905615097509edca10eebea80aeaf80379ad7efca85d786351606dd7 |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._numa_topologies = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
self._fake_instance_extra_get_by_instance_uuid)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# NOTE(danms): Remove this when all the compute_node stuff is
# converted to objects
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
extra = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': None,
'pci_requests': None,
}
numa_topology = kwargs.pop('numa_topology', None)
if numa_topology:
extra['numa_topology'] = numa_topology._to_json()
instance.update(kwargs)
instance['extra'] = extra
self._instances[instance_uuid] = instance
self._numa_topologies[instance_uuid] = extra
return instance
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename,
columns_to_join=None):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_extra_get_by_instance_uuid(self, context,
instance_uuid, columns=None):
return self._numa_topologies.get(instance_uuid)
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node()
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self.stubs.Set(self.tracker.scheduler_client, 'update_resource_stats',
self._fake_compute_node_update)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
self.tracker.compute_node['pci_device_pools'])
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock(
side_effect=self._fake_compute_node_update)
def test_update_resource(self):
# change a compute node value to simulate a change
self.tracker.compute_node['local_gb_used'] += 1
expected = copy.deepcopy(self.tracker.compute_node)
self.tracker._update(self.context)
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
def test_no_update_resource(self):
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
# NOTE(danms): PciDeviceStats only supports iteration, so we have to
# listify it before we can examine the contents by index.
pools = list(self.tracker.compute_node['pci_device_pools'])
self.assertEqual(driver.pci_stats[0]['product_id'],
pools[0]['product_id'])
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance_obj(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance_obj(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance_obj()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
self.flags(compute_monitors=['FakeMontorClass1'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
self.tracker.monitors = [class1]
with mock.patch.object(class1, 'get_metrics',
side_effect=test.TestingException()):
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', class1)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
| yatinkumbhare/openstack-nova | nova/tests/unit/compute/test_resource_tracker.py | Python | apache-2.0 | 61,016 | [
"exciting"
] | dc6a22b3fdbf9fc6204cb93e163e24123358165f0955d37806d58d984ec26ebb |
# -*- coding: utf-8 -*-
from gettext import gettext as _
NAME = _('North America')
STATES = [
(_('United States'), 254, 388, 540, 0),
(_('Canada'), 253, 320, 283, 0),
(_('Alaska'), 252, 133, 144, 0),
(_('México'), 251, 398, 791, 0),
(_('Cuba'), 250, 681, 728, 0),
(_('Guatemala'), 249, 554, 845, 80),
(_('Honduras'), 248, 623, 853, 0),
(_('Nicaragua'), 247, 651, 878, 0),
(_('Jamaica'), 246, 714, 773, 0),
(_('Haiti'), 245, 770, 737, 90),
(_('Russia'), 242, 56, 41, 0),
(_('Greenland'), 241, 591, 29, 0),
(_('The Bahamas'), 244, 716, 675, -30),
(_('Revillagigedo Islands'), 243, 242, 808, 0),
(_('Baffin Island'), 240, 534, 146, -45),
(_('Victoria Island'), 239, 357, 150, 0),
(_('Banks Island'), 238, 312, 105, 0),
(_('Ellesmere Island'), 237, 452, 70, 90),
(_('Elizabeth Island'), 236, 370, 68, 0)
]
CAPITALS = [
(_('México'), 430, 808, 1, -10, -14),
(_('Ottawa'), 617, 428, 1, 0, 14),
(_('Washington'), 633, 508, 1, 60, 0)
]
CITIES = [
(_('Acapulco'), 422, 844, 2, -30, 14),
(_('Albuquerque'), 343, 597, 2, 10, 14),
(_('Anchorage'), 125, 178, 2, 0, -14),
(_('Atlanta'), 579, 593, 2, 0, -14),
(_('Barrow'), 177, 60, 2, 0, -14),
(_('Bethel'), 65, 133, 2, 0, 14),
(_('Boston'), 669, 447, 2, 40, 0),
(_('Calgary'), 299, 391, 2, 0, 14),
(_('Cambridge Bay'), 377, 168, 2, 0, -14),
(_('Cancún'), 585, 768, 2, 0, -14),
(_('Charleston'), 631, 587, 2, 55, 0),
(_('Charlottetown'), 710, 369, 2, -60, -14),
(_('Chicago'), 524, 500, 2, -20, -14),
(_('Chihuahua'), 344, 682, 2, 0, 14),
(_('Churchill'), 442, 303, 2, 0, -14),
(_('Cleveland'), 582, 491, 2, -10, 14),
(_('Columbus'), 574, 513, 2, 15, 14),
(_('Dallas'), 448, 626, 2, 0, 14),
(_('Dawson'), 194, 178, 2, 0, 14),
(_('Denver'), 363, 539, 2, 0, 14),
(_('Detroit'), 562, 483, 2, 0, -14),
(_('Echo Bay'), 313, 200, 2, 0, 14),
(_('Edmonton'), 308, 361, 2, 0, -14),
(_('El Paso'), 342, 641, 2, 0, 14),
(_('Fairbanks'), 162, 151, 2, 0, -14),
(_('Fort George'), 559, 335, 2, 0, -14),
(_('Fredericton'), 685, 389, 2, -40, -14),
(_('Frobisher Bay'), 570, 198, 2, 0, -14),
(_('Goose Bay'), 682, 287, 2, 0, -14),
(_('Guadalajara'), 374, 792, 2, -30, 14),
(_('Halifax'), 714, 390, 2, 35, 0),
(_('Hay River'), 310, 271, 2, 20, 14),
(_('Hermosillo'), 288, 671, 2, -35, 14),
(_('Houston'), 468, 665, 2, 0, -14),
(_('Indianapolis'), 541, 522, 2, -70, 0),
(_('Inuvik'), 243, 149, 2, 0, 14),
(_('Ivujivik'), 532, 235, 2, 0, 14),
(_('Jacksonville'), 619, 629, 2, 0, 14),
(_('Kansas City'), 464, 544, 2, -20, -14),
(_('Kaujuitoq'), 417, 91, 2, 0, 14),
(_('Kodiak'), 78, 201, 2, 0, 14),
(_('La Paz'), 286, 737, 2, 0, 14),
(_('Las Vegas'), 257, 572, 2, 25, 14),
(_('Los Angeles'), 220, 592, 2, -40, -14),
(_('Mérida'), 553, 774, 2, 10, 14),
(_('Matamoros'), 446, 720, 2, 25, -14),
(_('Mazatlán'), 339, 754, 2, 0, 14),
(_('Memphis'), 516, 584, 2, 0, 14),
(_('Miami'), 652, 683, 2, 0, 14),
(_('Minneapolis'), 468, 469, 2, 0, -14),
(_('Monterrey'), 402, 722, 2, 0, 14),
(_('Montréal'), 633, 420, 2, 45, 0),
(_('Moosonee'), 551, 371, 2, -35, 14),
(_('New Orleans'), 527, 655, 2, 20, 14),
(_('New York'), 653, 474, 2, 40, 0),
(_('Nome'), 90, 87, 2, 0, 14),
(_('Norfolk'), 648, 532, 2, 0, 14),
(_('Oaxaca'), 464, 841, 2, 20, 14),
(_('Oklahoma City'), 438, 593, 2, -10, -14),
(_('Philadelphia'), 647, 487, 2, 50, 0),
(_('Phoenix'), 283, 612, 2, 0, 14),
(_('Portland'), 214, 441, 2, 0, 14),
(_('Prince George'), 242, 342, 2, -30, 14),
(_('Prince Rupert'), 196, 320, 2, 0, -14),
(_('Prudhoe Bay'), 197, 92, 2, 0, 14),
(_('Puebla'), 443, 814, 2, 0, 14),
(_('Québec'), 645, 397, 2, -40, 0),
(_('Regina'), 371, 405, 2, 0, 14),
(_('Repulse Bay'), 472, 193, 2, 0, 14),
(_('Sacramento'), 201, 528, 2, 0, -14),
(_("Saint John's"), 773, 310, 2, -35, -14),
(_('Saint John'), 693, 395, 2, 30, 14),
(_('Salt Lake City'), 298, 521, 2, 20, -14),
(_('San Antonio'), 432, 672, 2, 20, 14),
(_('San Diego'), 230, 612, 2, -30, 14),
(_('San Francisco'), 191, 537, 2, 0, 14),
(_('Saskatoon'), 357, 384, 2, 20, -14),
(_('Schefferville'), 632, 292, 2, 0, 14),
(_('Seattle'), 227, 417, 2, 0, 14),
(_('St. Louis'), 507, 543, 2, 0, 14),
(_('Sydney'), 729, 361, 2, 0, -14),
(_('Tampico'), 443, 769, 2, 0, -14),
(_('Thunder Bay'), 495, 421, 2, 0, 14),
(_('Toronto'), 592, 458, 2, 50, 0),
(_('Torreón'), 374, 325, 2, 0, -14),
(_('Valdez'), 139, 194, 2, 0, 14),
(_('Vancouver'), 226, 395, 2, -20, -14),
(_('Veracruz'), 468, 811, 2, 20, -14),
(_('Watson Lake'), 231, 258, 2, 0, 14),
(_('Whitehorse'), 197, 232, 2, 0, 14),
(_('Winnipeg'), 461, 411, 2, 0, -14),
(_('Yellowknife'), 320, 249, 2, 0, -14)
]
RIVERS = [
(_('Grande River'), 254, 382, 692, -30),
(_('Colorado River'), 253, 303, 548, 30),
(_('Arkansas River'), 252, 429, 582, -20),
(_('Missouri River'), 251, 418, 510, -45),
(_('Mississippi River'), 250, 521, 627, 90),
(_('Ohio River'), 249, 581, 541, 30),
(_('Snake River'), 248, 240, 474, -45),
(_('Columbia River'), 247, 247, 392, 80),
(_('Saskatchewan River'), 246, 342, 352, -5),
(_('Nelson River'), 245, 435, 322, 40),
(_('Yukon River'), 244, 206, 189, -80),
(_('Mackenzie River'), 243, 249, 201, -70),
(_('Peace River'), 242, 277, 299, 10),
(_('Saint Lawrence River'), 241, 634, 425, 60),
(_('Hudson Bay'), 240, 495, 281, 0),
(_('Baffin Bay'), 239, 527, 85, 0),
(_('Strait of Davis'), 238, 637, 179, 0),
(_('Labrador Sea'), 237, 711, 241, -25),
(_('Beaufort Sea'), 236, 255, 102, 0),
(_('Chukci Sea'), 235, 123, 25, 0),
(_('Arctic Ocean'), 234, 270, 31, 0),
(_('Bering Sea'), 233, 29, 102, 90),
(_('Gulf of Alaska'), 232, 102, 231, 0),
(_('Gulf of California'), 231, 278, 690, -60),
(_('Campeche Bay'), 230, 495, 793, 0),
(_('Gulf of Mexico'), 229, 538, 713, 0),
(_('Caribbean Sea'), 228, 707, 812, 0),
(_('Atlantic Ocean'), 227, 725, 514, 90),
(_('St. Lawrence Gulf'), 226, 696, 336, 0),
(_('Pacific Ocean'), 225, 85, 521, 90),
(_('Lake Winnipeg'), 224, 431, 388, 0),
(_('Lake Superior'), 223, 505, 428, 30),
(_('Lake Michigan'), 222, 524, 487, 90),
(_('Lake Huron'), 221, 561, 450, 70),
(_('Norwegian Sea'), 220, 738, 90, 70)
]
| AlanJAS/iknowAmerica | recursos/0adelnorte/datos/0adelnorte.py | Python | gpl-3.0 | 6,543 | [
"COLUMBUS"
] | e39c5c94396e5f38c53f96e38c087d6141b549b435ba45a8cfbe6eb125d06d46 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CueProc setup.
Copyright (c) 2006-2008 by Nyaochi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
http://www.gnu.org/copyleft/gpl.html .
"""
from distutils.core import setup
import py2exe
setup(
name='cueproc',
version='1.10.2',
description='Cuesheet Processor (CueProc)',
author='nyaochi',
author_email='nyaochi2008@nyaochi.sakura.ne.jp',
url='http://nyaochi.sakura.ne.jp/',
py_modules=[
"cueproc",
"celib",
"cuesheet",
"ce_ctmp4",
"ce_extpipe",
"ce_fiismp3",
"ce_flac",
"ce_getaudio",
"ce_hmp3",
"ce_lame",
"ce_lame_eyed3",
"ce_mpc",
"ce_nero",
"ce_nero_ap",
"ce_nero_mpeg4ip",
"ce_oggenc",
"ce_wave",
"ce_wavpack",
"ce_wma",
],
console=['cueproc.py'],
options={"py2exe": {"packages": ["encodings"]}},
)
| rinrinne/cueproc-alternative | src/setup.py | Python | gpl-2.0 | 1,590 | [
"VisIt"
] | 216a3212c3546f6b49b19337b642a5775c4ce0e085eb863e421592e4ce2bca64 |
#!/usr/bin/env python
import MySQLdb as mysql
import velvet_assembler_notrimmer
import velvet_optimize
import abyss_assembler
import clc_assembler
import wgs_assembler
import supersembler
import spades_assembler
#import masurca_assembler
import mira_assembler
import worst_assembler
#import idba_assembler
import mix_assembler
#import sga_assembler
import basic_trimmer
import katz_trimmer
import quality_trimmer
import fasta_statter
import ten_each_end_trimmer
import kmergenie_trimmer
import datetime
from time import sleep
import subprocess
import shutil
import os
import sys
import signal
"""
Assembly Dispatch Daemon
May 1, 2013
Justin Payne
Assembly job dispatch daemon, single-threaded version. Interacts with Pipeline Job DB.
Assembly bindings are written as submodules and encapsulate the shell commands necessary
to operate the assemblers and capture statistics on an automated basis.
This module handles the database interface and dispatches assembly jobs according to
the "assembler_dict", below.
"""
def sigterm_handler(signum, frame):
"Hook into SIGTERM to try to reset active jobs."
raise KeyboardInterrupt()
signal.signal(signal.SIGTERM, sigterm_handler)
def null_trimmer(assembler, **kwargs):
"Do-nothing trim function to support no trimming."
return assembler.assemble(**kwargs)
assembler_dict = { #comment in or out to enable or disable assemblers.
'Velvet':velvet_assembler_notrimmer,
'Supersembler':supersembler,
'Celera':wgs_assembler,
'Velvet_optimize':velvet_optimize,
'ABySS':abyss_assembler,
'SPAdes':spades_assembler,
'CLC':clc_assembler,
#'MaSuRCA':masurca_assembler,
#'SGA':sga_assembler,
'Mira':mira_assembler,
#'IDBA-UD':idba_assembler,
'Mix':mix_assembler,
'WORST':worst_assembler, #Misassembly tool, negative control for assembly comparison tools like QUAST or GAGE.
#'Convey_velvet':convey_velvet_assembler,
#'Convey_velver_optimize':convey_velvet_optimize,
}
trimmer_dict = {'no_trimmer':null_trimmer, #trimmers can be in this module or others
'basic_trimmer':basic_trimmer.trim,
'katz_trimmer':katz_trimmer.trim, #currently unsupported
'quality_trimmer':quality_trimmer.trim,
'ten_each_end_trimmer':ten_each_end_trimmer.trim,
'kmergenie_trimmer':kmergenie_trimmer.trim
}
default_root = '/shared'
filepaths = {'cfsan_genomes':'/shared/gn2/CFSANgenomes',
'genome3':'/shared/gn3'}
def query(s, commit_function=lambda l: True):
"Generic method to abstract querying database; allows optional function to determine whether changes should be committed"
items = []
try:
job_db = mysql.connect(host='xserve15.fda.gov', user='job_user', passwd='job_user', db='Jobs')
jobc = job_db.cursor(mysql.cursors.DictCursor)
jobc.execute(s)
items = jobc.fetchall()
if commit_function(items):
job_db.commit()
else:
job_db.rollback()
job_db.close()
except mysql.DatabaseError as e:
sys.__stdout__.write("[{}] {}:{}\n".format(datetime.datetime.today().ctime(), type(e), e))
return items
def update_status_callback(id, message, status='Running',):
sys.__stdout__.write("[{}] {}\n".format(datetime.datetime.today().ctime(), message)) #in case some assembler redirects print/stdout
sys.__stdout__.flush()
if len(message) > 80:
query("UPDATE assemblies SET status='{status}', exception_note='{message}' WHERE id='{id}';".format(status=status.encode('string_escape'), message=message.encode('string_escape'), id=id))
else:
query("UPDATE assemblies SET status='{message}', exception_note='' WHERE id='{id}';".format(message=message.encode('string_escape'), id=id))
def assemble(job, debug=True):
"Worker function."
tempdir = None
try:
try:
import os
this_worker = os.environ.get('HOSTNAME', os.environ.get('CDC_LOCALHOST', "Generic PIPELINE WORKER"))
pid = os.getpid()
except:
import traceback
traceback.print_exc()
this_worker = 'Generic PIPELINE worker.'
pid = 'pid unknown'
query("UPDATE assemblies SET status='running', exception_note='', dateStarted='{}', worker='{} ({})' WHERE id = '{}'".format(datetime.datetime.now().isoformat(), this_worker, pid, job['id']))
try:
import assembly_logging
print "Assembly logging supported."
except Exception as e:
print "Assembly logging not supported."
if debug:
import traceback
traceback.print_exc(sys.stdout)
path = os.path.abspath(os.path.join(default_root, job['path_1'], '../asm'))
if not os.path.exists(path):
os.mkdir(path)
try:
exp_coverage = int(job['average_coverage'].replace('X', '').replace('x', ''))
except ValueError:
exp_coverage = 20
assembler = assembler_dict.get(job['job_type'], velvet_assembler_notrimmer)
trimmer = trimmer_dict.get(job['trimmer'], null_trimmer) #default - no trimming
statter = getattr(fasta_statter, job['statter'])
def update_callback(d):
for (k, v) in d.items():
print "[{}]\tmodule override: {}='{}'".format(datetime.datetime.today().ctime(), k, v)
query("UPDATE assemblies SET {k}='{v}' WHERE id='{i}';".format(i=job['id'], k=k, v=str(v).encode('string_escape')))
if "gz" in job['file_1']:
#unzip file to tempdir
import assembly_resources as tempfile
import gzip
tempdir = tempfile.mkdtemp()
update_status_callback(job['id'], 'Unzipping reads in {}...'.format(tempdir))
with gzip.open(os.path.join(default_root, job['path_1'], job['file_1'])) as r_in, open(os.path.join(tempdir, job['file_1'].replace(".gz", "")), 'w') as r_out:
for l in r_in:
r_out.write(l)
if job['file_2']:
with gzip.open(os.path.join(default_root, job['path_2'], job['file_2'])) as r_in, open(os.path.join(tempdir, job['file_2'].replace(".gz", "")), 'w') as r_out:
for l in r_in:
r_out.write(l)
job['file_1'] = job['file_1'].replace(".gz", "")
job['file_2'] = job['file_2'].replace(".gz", "")
job['path_1'] = job['path_2'] = tempdir
if 'sff' in job['file_1']:
from Bio import SeqIO
import tempfile
tempdir = tempfile.mkdtemp()
update_status_callback(job['id'], 'Converting reads in {}...'.format(tempdir))
with open(os.path.join(default_root, job['path_1'], job['file_1']), 'rb') as r_in, open(os.path.join(tempdir, job['file_1'].replace(".sff", ".fastq")), 'w') as r_out:
for c in SeqIO.parse(r_in, 'sff'):
r_out.write(c.format('fastq'))
job['file_1'] = job['file_1'].replace('.sff', '.fastq')
job['path_1'] = tempdir
reads1=os.path.join(default_root, job['path_1'], job['file_1'])
reads2=None
if job['file_2']:
reads2=os.path.join(default_root, job['path_2'], job['file_2'])
def logging_callback(message, **kw):
update_status_callback(job['id'], message, **kw)
try:
with assembly_logging.open(path) as logfile:
logfile.write("[{}] {}\n".format(datetime.datetime.today().ctime(), message))
except Exception as e:
if debug:
import traceback
traceback.print_exc(sys.stdout)
results = trimmer(assembler=assembler,
accession=job['accession'], path=path,
reads1=reads1,
reads2=reads2,
insert_size=job['insert_size'] or 500,
minimum_contig_length=job['min_contig_length'] or 200,
k_value=job['k_value'] or 177,
exp_coverage=exp_coverage or 'auto',
callback=logging_callback,
assembler_dict=assembler_dict,
update_callback=update_callback,
statter=statter,
ref_file=job['ref_file'],
ref_url=job['ref_url'],
data_type=job['data_type'],
debug=debug,
trimmer_args=job['trimmer_args'],
fasta_file_name = job['fasta_file'] or job['accession'] + '.fasta'
)
try:
job['job_type'] = 'Bowtie read mapping'
results['lib_insert_length'] = fasta_statter.read_map(os.path.join(path, results['fasta_file']),
os.path.join(default_root, job['path_1'], job['file_1']),
os.path.join(default_root, job['path_2'], job['file_2']),
callback=logging_callback)
except ZeroDivisionError:
update_callback({"exception_note":"Bowtie returned no reads mapped to assembly; insert size not calculated.", 'lib_insert_length':'Unknown'})
except subprocess.CalledProcessError as e:
print job['job_type'], ": ", type(e), e, e.output
query("UPDATE assemblies SET status = 'exception', exception_note='{}:{}({})' WHERE id = '{}';".format(str(type(e)).encode('string_escape'), str(e).encode('string_escape'), str(e.output).encode('string_escape'), job['id']))
except ValueError as e:
#assembly exception
print job['job_type'], ": ", type(e), e
query("UPDATE assemblies SET status = 'exception', exception_note='{}:{}' WHERE id = '{}';".format(str(type(e)).encode('string_escape'), str(e).encode('string_escape'), job['id']))
except KeyboardInterrupt:
query("UPDATE assemblies SET status='ready', exception_note='canceled by keyboard interrupt' WHERE id = '{}'".format(job['id']))
print "Terminated by keyboard interrupt."
quit()
except IOError as e:
import errno
if e.errno == errno.ENOSPC:
query("UPDATE assemblies SET status='ready', exception_note='stopped on out-of-space error' WHERE id = '{}'".format(job['id']))
print "Out of scratch space; terminating."
quit()
else:
print job['job_type'], ": ", type(e), e
if debug:
import traceback
traceback.print_exc(sys.stdout)
quit()
query("UPDATE assemblies SET status = 'exception', exception_note='{}:{}' WHERE id = '{}';".format(str(type(e)).encode('string_escape'), str(e).encode('string_escape'), job['id']))
except Exception as e:
print job['job_type'], ": ", type(e), e
if debug:
import traceback
traceback.print_exc(sys.stdout)
quit()
query("UPDATE assemblies SET status = 'exception', exception_note='{}:{}' WHERE id = '{}';".format(str(type(e)).encode('string_escape'), str(e).encode('string_escape'), job['id']))
else:
update_status_callback(job['id'], "Finished.")
query("""
UPDATE assemblies SET status='finished',
average_coverage='{average_coverage}',
num_contigs='{num_contigs}',
n50='{n50}',
num_bases='{num_bases}',
assembly_version='{assembly_version}',
lib_insert_length='{lib_insert_length}',
dateCompleted='{date}',
fasta_file='{fasta_file}'
WHERE id = '{id}';""".format(date=datetime.datetime.now().isoformat(),
id = job['id'],
**results))
finally:
try:
print "removing {}...".format(tempdir)
shutil.rmtree(tempdir)
#update_status_callback(job['id'], "finished")
except Exception as e:
print e
def main(loop=False):
#build list of jobs this instance can perform
import random
sleep(10 * random.random())
jobs_available = ", ".join(["'{}'".format(j) for j in assembler_dict.keys()]) #('Velvet', 'CLC', etc)
job_queue_query = "SELECT * FROM assemblies WHERE (status='ready' OR status='priority') AND job_type IN ({}) ORDER BY status ASC LIMIT 1;".format(jobs_available)
results = query(job_queue_query)
while True:
while len(results) > 0:
job = results[0]
job['path_1'] = job['path_1'].format(**filepaths)
job['path_2'] = job['path_2'].format(**filepaths)
assemble(job, debug=('-debug' in sys.argv))
results = query(job_queue_query)
if not loop:
break
sleep(10 * random.random())
if __name__ == "__main__":
if '-test' in sys.argv:
print "Starting test..."
def query(s, f=lambda l: True):
print "[SQL Cmd] {} ({})".format(s, f(0))
job1 = {'id':'TEST', #fake job,
'data_type':'MiSeq',
'job_type':'WORST',
'path_1':'/shared/gn2/CFSANgenomes/CFSAN001656/CFSAN001656_01/',
'file_1':'CFSAN001656_S8_L001_R1_001.fastq',
'path_2':'/shared/gn2/CFSANgenomes/CFSAN001656/CFSAN001656_01/',
'file_2':'CFSAN001656_S8_L001_R2_001.fastq',
'ref_file':'',
'ref_url':'',
'trimmer':'kmergenie_trimmer',
'trimmer_args':'',
'insert_size':500,
'min_contig_length':200,
'k_value':177,
'status':'ready',
'exception_note':'',
'dateAdded':datetime.datetime.today().strftime("%Y-%m-%d %h-%M-%S"),
'dateCompleted':'',
'accession':'CFSAN001656_01',
'run_id':'',
'fasta_file':'test.fasta',
'statter':'stat_fasta',
'average_coverage':'20X',
'num_contigs':'',
'n50':'',
'num_bases':'',
'assembly_version':'',
'lib_insert_length':''}
assemble(job1, debug=True)
quit()
#update options
for (assembler_name, assembler_module) in assembler_dict.items():
if not query("SELECT option_value FROM assembly_options WHERE option_value = '{}';".format(assembler_name)):
query("INSERT INTO assembly_options (option_type, option_value, option_description) VALUES ('assembler', '{}', '{} (Supports {})');".format(assembler_name, assembler_module.description.encode('string_escape'), ', '.join(assembler_module.supports)))
for (trimmer, trim_func) in trimmer_dict.items():
if not query("SELECT option_value FROM assembly_options WHERE option_value = '{}';".format(trimmer)):
query("INSERT INTO assembly_options (option_type, option_value, option_description, option_supports) VALUES ('trimmer', '{}', '{}', 'All');".format(trimmer, str(trim_func.__doc__).encode('string_escape')))
#run assemblies
try:
import prctl
prctl.set_proctitle('Assem_dispatch')
if '-nodaemon' in sys.argv:
#raise ValueError
main()
quit()
raise ValueError()
import daemon
print "Daemon module successfully imported; running in daemon mode."
with daemon.DaemonContext(working_directory='/', detach_process=True, stdout=open('/data/Pipeline_jobs/assembly.log', 'a')):
main(True)
except (ImportError, ValueError) as e:
print "Daemon module not found; running in normal mode."
print e
main(False)
| crashfrog/Dispatch | assembly_dispatch.py | Python | unlicense | 13,877 | [
"Bowtie"
] | 4814b3c0c23ff553a63013b96c24f7bc0754fdb1c43e1241434b87550787f086 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import warnings
from operator import itemgetter
from six import string_types
from tabulate import tabulate
from monty.io import zopen
from monty.json import MSONable
from pymatgen import Structure, Lattice, Element, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.string_utils import str_delimited
"""
This module defines classes for reading/manipulating/writing the main sections
of FEFF input file(feff.inp), namely HEADER, ATOMS, POTENTIAL and the program
control tags.
XANES and EXAFS input files, are available, for non-spin case at this time.
"""
__author__ = "Alan Dozier, Kiran Mathew"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__status__ = "Beta"
__date__ = "April 7, 2013"
# **Non-exhaustive** list of valid Feff.inp tags
VALID_FEFF_TAGS = ("CONTROL", "PRINT", "ATOMS", "POTENTIALS", "RECIPROCAL",
"REAL", "MARKER", "LATTICE", "TITLE", "RMULTIPLIER",
"SGROUP", "COORDINATES", "EQUIVALENCE", "CIF", "CGRID",
"CFAVERAGE", "OVERLAP", "EXAFS", "XANES", "ELNES", "EXELFS",
"LDOS", "ELLIPTICITY", "MULTIPOLE", "POLARIZATION",
"RHOZZP", "DANES", "FPRIME", "NRIXS", "XES", "XNCD",
"XMCD", "XNCDCONTROL", "END", "KMESH", "PRINT", "EGRID",
"DIMS", "AFOLP", "EDGE", "COMPTON", "DANES",
"FPRIME" "MDFF", "HOLE", "COREHOLE", "S02", "CHBROAD",
"EXCHANGE", "FOLP", "NOHOLE", "RGRID", "SCF",
"UNFREEZEF", "CHSHIFT", "DEBYE",
"INTERSTITIAL", "CHWIDTH", "EGAP", "EPS0", "EXTPOT",
"ION", "JUMPRM", "EXPOT", "SPIN", "LJMAX", "LDEC", "MPSE",
"PLASMON", "RPHASES", "RSIGMA", "PMBSE", "TDLDA", "FMS",
"DEBYA", "OPCONS", "PREP", "RESTART", "SCREEN", "SETE",
"STRFACTORS", "BANDSTRUCTURE", "RPATH", "NLEG", "PCRITERIA",
"SYMMETRY", "SS", "CRITERIA", "IORDER", "NSTAR", "ABSOLUTE",
"CORRECTIONS", "SIG2", "SIG3", "MBCONV", "SFCONV", "RCONV",
"SELF", "SFSE", "MAGIC", "TARGET", "STRFAC")
class Header(MSONable):
"""
Creates Header for the FEFF input file.
Has the following format::
* This feff.inp file generated by pymatgen, www.materialsproject.org
TITLE comment:
TITLE Source: CoO19128.cif
TITLE Structure Summary: (Co2 O2)
TITLE Reduced formula: CoO
TITLE space group: P1, space number: 1
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.0 90.0 120.0
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675
Args:
struct: Structure object, See pymatgen.core.structure.Structure.
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: Comment for first header line
"""
def __init__(self, struct, source='', comment=''):
if struct.is_ordered:
self.struct = struct
self.source = source
sym = SpacegroupAnalyzer(struct)
data = sym.get_symmetry_dataset()
self.space_number = data["number"]
self.space_group = data["international"]
self.comment = comment or "None given"
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
@staticmethod
def from_cif_file(cif_file, source='', comment=''):
"""
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
"""
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
@property
def structure_symmetry(self):
"""
Returns space number and space group
Returns:
Space number and space group list
"""
return self.space_group, self.space_number
@property
def formula(self):
"""
Formula of structure
"""
return self.struct.composition.formula
@staticmethod
def from_file(filename):
"""
Returns Header object from file
"""
hs = Header.header_string_from_file(filename)
return Header.from_string(hs)
@staticmethod
def header_string_from_file(filename='feff.inp'):
"""
Reads Header string from either a HEADER file or feff.inp file
Will also read a header from a non-pymatgen generated feff.inp file
Args:
filename: File name containing the Header data.
Returns:
Reads header string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
feff_header_str = []
ln = 0
# Checks to see if generated by pymatgen
try:
feffpmg = f[0].find("pymatgen")
except IndexError:
feffpmg = False
# Reads pymatgen generated header or feff.inp file
if feffpmg:
nsites = int(f[8].split()[2])
for line in f:
ln += 1
if ln <= nsites + 9:
feff_header_str.append(line)
else:
# Reads header from header from feff.inp file from unknown
# source
end = 0
for line in f:
if (line[0] == "*" or line[0] == "T") and end == 0:
feff_header_str.append(line.replace("\r", ""))
else:
end = 1
return ''.join(feff_header_str)
@staticmethod
def from_string(header_str):
"""
Reads Header string and returns Header object if header was
generated by pymatgen.
Note: Checks to see if generated by pymatgen, if not it is impossible
to generate structure object so it is not possible to generate
header object and routine ends
Args:
header_str: pymatgen generated feff.inp header
Returns:
Structure object.
"""
lines = tuple(clean_lines(header_str.split("\n"), False))
comment1 = lines[0]
feffpmg = comment1.find("pymatgen")
if feffpmg:
comment2 = ' '.join(lines[1].split()[2:])
source = ' '.join(lines[2].split()[2:])
basis_vec = lines[6].split(":")[-1].split()
# a, b, c
a = float(basis_vec[0])
b = float(basis_vec[1])
c = float(basis_vec[2])
lengths = [a, b, c]
# alpha, beta, gamma
basis_ang = lines[7].split(":")[-1].split()
alpha = float(basis_ang[0])
beta = float(basis_ang[1])
gamma = float(basis_ang[2])
angles = [alpha, beta, gamma]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
natoms = int(lines[8].split(":")[-1].split()[0])
atomic_symbols = []
for i in range(9, 9 + natoms):
atomic_symbols.append(lines[i].split()[2])
# read the atomic coordinates
coords = []
for i in range(natoms):
toks = lines[i + 9].split()
coords.append([float(s) for s in toks[3:]])
struct = Structure(lattice, atomic_symbols, coords, False,
False, False)
h = Header(struct, source, comment2)
return h
else:
return "Header not generated by pymatgen, cannot return header object"
def __str__(self):
"""
String representation of Header.
"""
to_s = lambda x: "%0.6f" % x
output = ["* This FEFF.inp file generated by pymatgen",
''.join(["TITLE comment: ", self.comment]),
''.join(["TITLE Source: ", self.source]),
"TITLE Structure Summary: {}"
.format(self.struct.composition.formula),
"TITLE Reduced formula: {}"
.format(self.struct.composition.reduced_formula),
"TITLE space group: ({}), space number: ({})"
.format(self.space_group, self.space_number),
"TITLE abc:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.abc])),
"TITLE angles:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.angles])),
"TITLE sites: {}".format(self.struct.num_sites)]
for i, site in enumerate(self.struct):
output.append(" ".join(["*", str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(output)
def write_file(self, filename='HEADER'):
"""
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class Atoms(MSONable):
"""
Atomic cluster centered around the absorbing atom.
"""
def __init__(self, struct, absorbing_atom, radius):
"""
Args:
struct (Structure): input structure
absorbing_atom (str/int): Symbol for absorbing atom or site index
radius (float): radius of the atom cluster in Angstroms.
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, self.center_index = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
self.radius = radius
self._cluster = self._set_cluster()
def _set_cluster(self):
"""
Compute and set the cluster of atoms as a Molecule object. The site
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule
"""
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords)
@property
def cluster(self):
"""
Returns the atomic cluster as a Molecule object.
"""
return self._cluster
@staticmethod
def atoms_string_from_file(filename):
"""
Reads atomic shells from file such as feff.inp or ATOMS file
The lines are arranged as follows:
x y z ipot Atom Symbol Distance Number
with distance being the shell radius and ipot an integer identifying
the potential used.
Args:
filename: File name containing atomic coord data.
Returns:
Atoms string.
"""
with zopen(filename, "rt") as fobject:
f = fobject.readlines()
coords = 0
atoms_str = []
for line in f:
if coords == 0:
find_atoms = line.find("ATOMS")
if find_atoms >= 0:
coords = 1
if coords == 1 and not ("END" in line):
atoms_str.append(line.replace("\r", ""))
return ''.join(atoms_str)
@staticmethod
def cluster_from_file(filename):
"""
Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin.
"""
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords)
def get_lines(self):
"""
Returns a list of string representations of the atomic configuration
information(x, y, z, ipot, atom_symbol, distance, id).
Returns:
list: list of strings, sorted by the distance from the absorbing
atom.
"""
lines = [["{:f}".format(self._cluster[0].x),
"{:f}".format(self._cluster[0].y),
"{:f}".format(self._cluster[0].z),
0, self.absorbing_atom, "0.0", 0]]
for i, site in enumerate(self._cluster[1:]):
site_symbol = re.sub(r"[^aA-zZ]+", "", site.species_string)
ipot = self.pot_dict[site_symbol]
lines.append(["{:f}".format(site.x), "{:f}".format(site.y),
"{:f}".format(site.z), ipot, site_symbol,
"{:f}".format(self._cluster.get_distance(0, i+1)), i+1])
return sorted(lines, key=itemgetter(5))
def __str__(self):
"""
String representation of Atoms file.
"""
lines_sorted = self.get_lines()
# TODO: remove the formatting and update the unittests
lines_formatted = str(tabulate(lines_sorted,
headers=["* x", "y", "z", "ipot",
"Atom", "Distance", "Number"]))
atom_list = lines_formatted.replace("--", "**")
return ''.join(["ATOMS\n", atom_list, "\nEND\n"])
def write_file(self, filename='ATOMS'):
"""
Write Atoms list to file.
Args:
filename: path for file to be written
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Tags(dict):
"""
FEFF control parameters.
"""
def __init__(self, params=None):
"""
Args:
params: A set of input parameters as a dictionary.
"""
super(Tags, self).__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair. Warns if parameter is not in list of valid
Feff tags. Also cleans the parameter and val by stripping leading and
trailing white spaces.
Arg:
key: dict key value
value: value associated with key in dictionary
"""
if key.strip().upper() not in VALID_FEFF_TAGS:
warnings.warn(key.strip() + " not in VALID_FEFF_TAGS list")
super(Tags, self).__setitem__(key.strip(),
Tags.proc_val(key.strip(), val.strip())
if isinstance(val, string_types) else val)
def as_dict(self):
"""
Dict representation.
Returns:
Dictionary of parameters from fefftags object
"""
tags_dict = dict(self)
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict
@staticmethod
def from_dict(d):
"""
Creates Tags object from a dictionary.
Args:
d: Dict of feff parameters and values.
Returns:
Tags object
"""
i = Tags()
for k, v in d.items():
if k not in ("@module", "@class"):
i[k] = v
return i
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the Tags. The reason why this
method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output. Defaults to False.
Returns:
String representation of Tags.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], dict):
if k in ["ELNES", "EXELFS"]:
lines.append([k, self._stringify_val(self[k]["ENERGY"])])
beam_energy = self._stringify_val(self[k]["BEAM_ENERGY"])
beam_energy_list = beam_energy.split()
if int(beam_energy_list[1]) == 0: # aver=0, specific beam direction
lines.append([beam_energy])
lines.append([self._stringify_val(self[k]["BEAM_DIRECTION"])])
else:
# no cross terms for orientation averaged spectrum
beam_energy_list[2] = str(0)
lines.append([self._stringify_val(beam_energy_list)])
lines.append([self._stringify_val(self[k]["ANGLES"])])
lines.append([self._stringify_val(self[k]["MESH"])])
lines.append([self._stringify_val(self[k]["POSITION"])])
else:
lines.append([k, self._stringify_val(self[k])])
if pretty:
return tabulate(lines)
else:
return str_delimited(lines, None, " ")
@staticmethod
def _stringify_val(val):
"""
Convert the given value to string.
"""
if isinstance(val, list):
return " ".join([str(i) for i in val])
else:
return str(val)
def __str__(self):
return self.get_string()
def write_file(self, filename='PARAMETERS'):
"""
Write Tags to a Feff parameter tag file.
Args:
filename: filename and path to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__() + "\n")
@staticmethod
def from_file(filename="feff.inp"):
"""
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
params = {}
eels_params = []
ieels = -1
ieels_max = -1
for i, line in enumerate(lines):
m = re.match("([A-Z]+\d*\d*)\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Tags.proc_val(key, val)
if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
if key in ["ELNES", "EXELFS"]:
ieels = i
ieels_max = ieels + 5
else:
params[key] = val
if ieels >= 0:
if i >= ieels and i <= ieels_max:
if i == ieels+1:
if int(line.split()[1]) == 1:
ieels_max -= 1
eels_params.append(line)
if eels_params:
if len(eels_params) == 6:
eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']
else:
eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']
eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])}
for k, v in zip(eels_keys, eels_params[1:]):
eels_dict[k] = str(v)
params[str(eels_params[0].split()[0])] = eels_dict
return Tags(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = list(VALID_FEFF_TAGS)
del list_type_keys[list_type_keys.index("ELNES")]
del list_type_keys[list_type_keys.index("EXELFS")]
boolean_type_keys = ()
float_type_keys = ("S02", "EXAFS", "RPATH")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in list_type_keys:
output = list()
toks = re.split("\s+", val)
for tok in toks:
m = re.match("(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] *
int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search("^\W+([TtFf])", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
except ValueError:
return val.capitalize()
return val.capitalize()
def diff(self, other):
"""
Diff function. Compares two PARAMETER files and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other: The other PARAMETER dictionary to compare to.
Returns:
Dict of the format {"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different} Note that the
parameters are return as full dictionaries of values.
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": "Default"}
elif v1 != other[k1]:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"FEFF_TAGS1": "Default",
"FEFF_TAGS2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another Tags object to this object
Facilitates the use of "standard" Tags
"""
params = dict(self)
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Tags have conflicting values!")
else:
params[k] = v
return Tags(params)
class Potential(MSONable):
"""
FEFF atomic potential.
"""
def __init__(self, struct, absorbing_atom):
"""
Args:
struct (Structure): Structure object.
absorbing_atom (str/int): Absorbing atom symbol or site index
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, _ = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
@staticmethod
def pot_string_from_file(filename='feff.inp'):
"""
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
"""
with zopen(filename, "rt") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return ''.join(pot_str)
@staticmethod
def pot_dict_from_string(pot_data):
"""
Creates atomic symbol/potential number dictionary
forward and reverse
Arg:
pot_data: potential data in string format
Returns:
forward and reverse atom symbol and potential number dictionaries.
"""
pot_dict = {}
pot_dict_reverse = {}
begin = 0
ln = -1
for line in pot_data.split("\n"):
try:
if begin == 0 and line.split()[0] == "0":
begin += 1
ln = 0
if begin == 1:
ln += 1
if ln > 0:
atom = line.split()[2]
index = int(line.split()[0])
pot_dict[atom] = index
pot_dict_reverse[index] = atom
except (ValueError, IndexError):
pass
return pot_dict, pot_dict_reverse
def __str__(self):
"""
Returns a string representation of potential parameters to be used in
the feff.inp file,
determined from structure object.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichiometry spinph
Returns:
String representation of Atomic Coordinate Shells.
"""
central_element = Element(self.absorbing_atom)
ipotrow = [[0, central_element.Z, central_element.symbol, -1, -1, .0001, 0]]
for el, amt in self.struct.composition.items():
ipot = self.pot_dict[el.symbol]
ipotrow.append([ipot, el.Z, el.symbol, -1, -1, amt, 0])
ipot_sorted = sorted(ipotrow, key=itemgetter(0))
ipotrow = str(tabulate(ipot_sorted,
headers=["*ipot", "Z", "tag", "lmax1",
"lmax2", "xnatph(stoichometry)",
"spinph"]))
ipotlist = ipotrow.replace("--", "**")
ipotlist = ''.join(["POTENTIALS\n", ipotlist])
return ipotlist
def write_file(self, filename='POTENTIALS'):
"""
Write to file.
Args:
filename: filename and path to write potential file to.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class FeffParserError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "FeffParserError : " + self.msg
def get_atom_map(structure):
"""
Returns a dict that maps each atomic symbol to a unique integer starting
from 1.
Args:
structure (Structure)
Returns:
dict
"""
syms = [site.specie.symbol for site in structure]
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]
atom_map = {}
for i, atom in enumerate(unique_pot_atoms):
atom_map[atom] = i + 1
return atom_map
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
"""
Return the absorbing atom symboll and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
"""
if isinstance(absorbing_atom, string_types):
return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]
elif isinstance(absorbing_atom, int):
return str(structure[absorbing_atom].specie), absorbing_atom
else:
raise ValueError("absorbing_atom must be either specie symbol or site index")
| aykol/pymatgen | pymatgen/io/feff/inputs.py | Python | mit | 30,411 | [
"FEFF",
"pymatgen"
] | 58bc906aba8d2087f13feb54df54c9bd4a381e3e343d1127df2fa493bc4eea22 |
""" Storage Factory Class - creates instances of various Storage plugins from the Core DIRAC or extensions
This Class has three public methods:
getStorageName(): Resolves links in the CS to the target SE name.
getStorage(): This creates a single storage stub based on the parameters passed in a dictionary.
This dictionary must have the following keys: 'StorageName','PluginName','Protocol'
Other optional keys are 'Port','Host','Path','SpaceToken'
getStorages() This takes a DIRAC SE definition and creates storage stubs for the protocols found in the CS.
By providing an optional list of protocols it is possible to limit the created stubs.
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
class StorageFactory( object ):
def __init__( self, useProxy = False, vo = None ):
self.rootConfigPath = '/Resources/StorageElements'
self.proxy = False
self.proxy = useProxy
self.resourceStatus = ResourceStatus()
self.vo = vo
if self.vo is None:
result = getVOfromProxyGroup()
if result['OK']:
self.vo = result['Value']
else:
RuntimeError( "Can not get the current VO context" )
self.remotePlugins = []
self.localPlugins = []
self.name = ''
self.options = {}
self.protocolDetails = []
self.storages = []
###########################################################################################
#
# Below are public methods for obtaining storage objects
#
def getStorageName( self, initialName ):
return self._getConfigStorageName( initialName, 'Alias' )
def getStorage( self, parameterDict, hideExceptions = False ):
""" This instantiates a single storage for the details provided and doesn't check the CS.
"""
# The storage name must be supplied.
if parameterDict.has_key( 'StorageName' ):
storageName = parameterDict['StorageName']
else:
errStr = "StorageFactory.getStorage: StorageName must be supplied"
gLogger.error( errStr )
return S_ERROR( errStr )
# PluginName must be supplied otherwise nothing with work.
if parameterDict.has_key( 'PluginName' ):
pluginName = parameterDict['PluginName']
# Temporary fix for backward compatibility
elif parameterDict.has_key( 'ProtocolName' ):
pluginName = parameterDict['ProtocolName']
else:
errStr = "StorageFactory.getStorage: PluginName must be supplied"
gLogger.error( errStr )
return S_ERROR( errStr )
return self.__generateStorageObject( storageName, pluginName, parameterDict, hideExceptions = hideExceptions )
def getStorages( self, storageName, pluginList = None, hideExceptions = False ):
""" Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS
'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
'pluginList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
"""
self.remotePlugins = []
self.localPlugins = []
self.name = ''
self.options = {}
self.protocolDetails = []
self.storages = []
if pluginList is None:
pluginList = []
elif isinstance( pluginList, basestring ):
pluginList = [pluginList]
if not self.vo:
gLogger.warn( 'No VO information available' )
# Get the name of the storage provided
res = self._getConfigStorageName( storageName, 'Alias' )
if not res['OK']:
return res
storageName = res['Value']
self.name = storageName
# In case the storage is made from a base SE, get this information
res = self._getConfigStorageName( storageName, 'BaseSE' )
if not res['OK']:
return res
# If the storage is derived frmo another one, keep the information
if res['Value'] != storageName:
derivedStorageName = storageName
storageName = res['Value']
else:
derivedStorageName = None
# Get the options defined in the CS for this storage
res = self._getConfigStorageOptions( storageName, derivedStorageName = derivedStorageName )
if not res['OK']:
return res
self.options = res['Value']
# Get the protocol specific details
res = self._getConfigStorageProtocols( storageName, derivedStorageName = derivedStorageName )
if not res['OK']:
return res
self.protocolDetails = res['Value']
requestedLocalPlugins = []
requestedRemotePlugins = []
requestedProtocolDetails = []
turlProtocols = []
# Generate the protocol specific plug-ins
for protocolDict in self.protocolDetails:
pluginName = protocolDict.get( 'PluginName' )
if pluginList and pluginName not in pluginList:
continue
protocol = protocolDict['Protocol']
result = self.__generateStorageObject( storageName, pluginName, protocolDict, hideExceptions = hideExceptions )
if result['OK']:
self.storages.append( result['Value'] )
if pluginName in self.localPlugins:
turlProtocols.append( protocol )
requestedLocalPlugins.append( pluginName )
if pluginName in self.remotePlugins:
requestedRemotePlugins.append( pluginName )
requestedProtocolDetails.append( protocolDict )
else:
gLogger.info( result['Message'] )
if len( self.storages ) > 0:
resDict = {}
resDict['StorageName'] = self.name
resDict['StorageOptions'] = self.options
resDict['StorageObjects'] = self.storages
resDict['LocalPlugins'] = requestedLocalPlugins
resDict['RemotePlugins'] = requestedRemotePlugins
resDict['ProtocolOptions'] = requestedProtocolDetails
resDict['TurlProtocols'] = turlProtocols
return S_OK( resDict )
else:
errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
gLogger.error( errStr, self.name )
return S_ERROR( errStr )
###########################################################################################
#
# Below are internal methods for obtaining section/option/value configuration
#
def _getConfigStorageName( self, storageName, referenceType ):
"""
This gets the name of the storage the configuration service.
If the storage is a reference to another SE the resolution is performed.
'storageName' is the storage section to check in the CS
"""
configPath = '%s/%s' % ( self.rootConfigPath, storageName )
res = gConfig.getOptions( configPath )
if not res['OK']:
errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
if not res['Value']:
errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
gLogger.error( errStr, configPath )
return S_ERROR( errStr )
if referenceType in res['Value']:
configPath = cfgPath( self.rootConfigPath, storageName, referenceType )
referenceName = gConfig.getValue( configPath )
result = self._getConfigStorageName( referenceName, 'Alias' )
if not result['OK']:
return result
resolvedName = result['Value']
else:
resolvedName = storageName
return S_OK( resolvedName )
def _getConfigStorageOptions( self, storageName, derivedStorageName = None ):
""" Get the options associated to the StorageElement as defined in the CS
"""
optionsDict = {}
# We first get the options of the baseSE, and then overwrite with the derivedSE
for seName in ( storageName, derivedStorageName ) if derivedStorageName else ( storageName, ):
storageConfigPath = cfgPath( self.rootConfigPath, seName )
res = gConfig.getOptions( storageConfigPath )
if not res['OK']:
errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
gLogger.error( errStr, "%s: %s" % ( seName, res['Message'] ) )
return S_ERROR( errStr )
for option in set( res['Value'] ) - set( ( 'ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess' ) ):
optionConfigPath = cfgPath( storageConfigPath, option )
default = [] if option in [ 'VO' ] else ''
optionsDict[option] = gConfig.getValue( optionConfigPath, default )
# The status is that of the derived SE only
seName = derivedStorageName if derivedStorageName else storageName
res = self.resourceStatus.getStorageElementStatus( seName )
if not res[ 'OK' ]:
errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
gLogger.error( errStr, "%s: %s" % ( seName, res['Message'] ) )
return S_ERROR( errStr )
# For safety, we did not add the ${statusType}Access keys
# this requires modifications in the StorageElement class
# We add the dictionary with the statusTypes and values
# { 'statusType1' : 'status1', 'statusType2' : 'status2' ... }
optionsDict.update( res[ 'Value' ][ seName ] )
return S_OK( optionsDict )
def __getProtocolsSections( self, storageName ):
storageConfigPath = cfgPath( self.rootConfigPath, storageName )
res = gConfig.getSections( storageConfigPath )
if not res['OK']:
errStr = "StorageFactory._getConfigStorageProtocols: Failed to get storage sections"
gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
return S_ERROR( errStr )
protocolSections = res['Value']
return S_OK( protocolSections )
def _getConfigStorageProtocols( self, storageName, derivedStorageName = None ):
""" Protocol specific information is present as sections in the Storage configuration
"""
res = self.__getProtocolsSections( storageName )
if not res['OK']:
return res
protocolSections = res['Value']
sortedProtocolSections = sorted( protocolSections )
protocolDetails = []
for protocolSection in sortedProtocolSections:
res = self._getConfigStorageProtocolDetails( storageName, protocolSection )
if not res['OK']:
return res
protocolDetails.append( res['Value'] )
if derivedStorageName:
# We may have parameters overwriting the baseSE protocols
res = self.__getProtocolsSections( derivedStorageName )
if not res['OK']:
return res
for protocolSection in res['Value']:
res = self._getConfigStorageProtocolDetails( derivedStorageName, protocolSection, checkAccess = False )
if not res['OK']:
return res
detail = res['Value']
pluginName = detail.get( 'PluginName' )
if pluginName:
for protocolDetail in protocolDetails:
if protocolDetail.get( 'PluginName' ) == pluginName:
protocolDetail.update( detail )
break
return S_OK( protocolDetails )
def _getConfigStorageProtocolDetails( self, storageName, protocolSection, checkAccess = True ):
"""
Parse the contents of the protocol block
"""
# First obtain the options that are available
protocolConfigPath = cfgPath( self.rootConfigPath, storageName, protocolSection )
res = gConfig.getOptions( protocolConfigPath )
if not res['OK']:
errStr = "StorageFactory.__getProtocolDetails: Failed to get protocol options."
gLogger.error( errStr, "%s: %s" % ( storageName, protocolSection ) )
return S_ERROR( errStr )
options = res['Value']
# We must have certain values internally even if not supplied in CS
protocolDict = {'Access':'', 'Host':'', 'Path':'', 'Port':'', 'Protocol':'', 'SpaceToken':'', 'WSUrl':''}
for option in options:
configPath = cfgPath( protocolConfigPath, option )
optionValue = gConfig.getValue( configPath, '' )
protocolDict[option] = optionValue
# This is a temporary for backward compatibility: move ProtocolName to PluginName
protocolDict.setdefault( 'PluginName', protocolDict.pop( 'ProtocolName', None ) )
# Evaluate the base path taking into account possible VO specific setting
if self.vo:
result = gConfig.getOptionsDict( cfgPath( protocolConfigPath, 'VOPath' ) )
voPath = ''
if result['OK']:
voPath = result['Value'].get( self.vo, '' )
if voPath:
protocolDict['Path'] = voPath
# Now update the local and remote protocol lists.
# A warning will be given if the Access option is not set.
if checkAccess:
if protocolDict['Access'].lower() == 'remote':
self.remotePlugins.append( protocolDict['PluginName'] )
elif protocolDict['Access'].lower() == 'local':
self.localPlugins.append( protocolDict['PluginName'] )
else:
errStr = "StorageFactory.__getProtocolDetails: The 'Access' option for %s:%s is neither 'local' or 'remote'." % ( storageName, protocolSection )
gLogger.warn( errStr )
# The PluginName option must be defined
if not protocolDict['PluginName']:
errStr = "StorageFactory.__getProtocolDetails: 'PluginName' option is not defined."
gLogger.error( errStr, "%s: %s" % ( storageName, protocolSection ) )
return S_ERROR( errStr )
return S_OK( protocolDict )
###########################################################################################
#
# Below is the method for obtaining the object instantiated for a provided storage configuration
#
def __generateStorageObject( self, storageName, pluginName, parameters, hideExceptions = False ):
storageType = pluginName
if self.proxy:
storageType = 'Proxy'
objectLoader = ObjectLoader()
result = objectLoader.loadObject( 'Resources.Storage.%sStorage' % storageType, storageType + 'Storage',
hideExceptions = hideExceptions )
if not result['OK']:
gLogger.error( 'Failed to load storage object: %s' % result['Message'] )
return result
storageClass = result['Value']
try:
storage = storageClass( storageName, parameters )
except Exception, x:
errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s: %s" % ( storageName, x )
gLogger.exception( errStr )
return S_ERROR( errStr )
return S_OK( storage )
| vmendez/DIRAC | Resources/Storage/StorageFactory.py | Python | gpl-3.0 | 14,518 | [
"DIRAC"
] | 3718f4b80c077d2b71bcd49e7d4069cece10f57fe6404fb2e3e6515610d8a05a |
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from model_utils import FieldTracker
from karaage.common import is_admin, log
from karaage.institutes.managers import ActiveInstituteManager
from karaage.machines.models import Account
from karaage.people.models import Group, Person
@python_2_unicode_compatible
class Institute(models.Model):
name = models.CharField(max_length=255, unique=True)
delegates = models.ManyToManyField(
Person, related_name='delegate_for',
blank=True, through='InstituteDelegate')
group = models.ForeignKey(Group, on_delete=models.PROTECT)
saml_scoped_affiliation = models.CharField(
max_length=200,
null=True, blank=True, unique=True)
saml_entityid = models.CharField(
max_length=200,
null=True, blank=True, unique=True)
is_active = models.BooleanField(default=True)
objects = models.Manager()
active = ActiveInstituteManager()
_tracker = FieldTracker()
class Meta:
ordering = ['name']
db_table = 'institute'
app_label = 'karaage'
def save(self, *args, **kwargs):
created = self.pk is None
# save the object
super(Institute, self).save(*args, **kwargs)
if created:
log.add(self, 'Created')
for field in self._tracker.changed():
log.change(self, 'Changed %s to %s'
% (field, getattr(self, field)))
# update the datastore
from karaage.datastores import save_institute
save_institute(self)
# has group changed?
if self._tracker.has_changed("group_id"):
old_group_pk = self._tracker.previous("group_id")
new_group = self.group
if old_group_pk is not None:
old_group = Group.objects.get(pk=old_group_pk)
from karaage.datastores import remove_accounts_from_institute
query = Account.objects.filter(person__groups=old_group)
remove_accounts_from_institute(query, self)
if new_group is not None:
from karaage.datastores import add_accounts_to_institute
query = Account.objects.filter(person__groups=new_group)
add_accounts_to_institute(query, self)
save.alters_data = True
def delete(self, *args, **kwargs):
# Get list of accounts.
# This must happen before we call the super method,
# as this will delete accounts that use this institute.
old_group_pk = self._tracker.previous("group_id")
if old_group_pk is not None:
old_group = Group.objects.get(pk=old_group_pk)
query = Account.objects.filter(person__groups=old_group)
query = query.filter(date_deleted__isnull=True)
accounts = list(query)
else:
accounts = []
# delete the object
log.delete(self, 'Deleted')
super(Institute, self).delete(*args, **kwargs)
# update datastore associations
for account in accounts:
from karaage.datastores import remove_account_from_institute
remove_account_from_institute(account, self)
# update the datastore
from karaage.datastores import delete_institute
delete_institute(self)
delete.alters_data = True
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('kg_institute_detail', args=[self.id])
def can_view(self, request):
person = request.user
if not person.is_authenticated:
return False
# staff members can view everything
if is_admin(request):
return True
if not self.is_active:
return False
if not person.is_active:
return False
if person.is_locked():
return False
# Institute delegates==person can view institute
if person in self.delegates.all():
return True
return False
class InstituteDelegate(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
institute = models.ForeignKey(Institute, on_delete=models.CASCADE)
send_email = models.BooleanField()
_tracker = FieldTracker()
class Meta:
db_table = 'institutedelegate'
app_label = 'karaage'
def save(self, *args, **kwargs):
super(InstituteDelegate, self).save(*args, **kwargs)
for field in self._tracker.changed():
log.change(
self.institute,
'Delegate %s: Changed %s to %s' %
(self.person, field, getattr(self, field)))
def delete(self, *args, **kwargs):
super(InstituteDelegate, self).delete(*args, **kwargs)
log.delete(
self.institute,
'Delegate %s: Deleted' % self.person)
| brianmay/karaage | karaage/institutes/models.py | Python | gpl-3.0 | 5,691 | [
"Brian"
] | 2b5cdef5e2e584e7876c78543763e8341bf859df3435164bd74e357bb57a2636 |
import deepchem as dc
import numpy as np
import tensorflow as tf
import unittest
from deepchem.models.tensorgraph import layers
from flaky import flaky
def generate_batch(batch_size):
"""Draw training data from a Gaussian distribution, where the mean is a conditional input."""
means = 10 * np.random.random([batch_size, 1])
values = np.random.normal(means, scale=2.0)
return means, values
def generate_data(gan, batches, batch_size):
for i in range(batches):
means, values = generate_batch(batch_size)
batch = {gan.data_inputs[0]: values, gan.conditional_inputs[0]: means}
yield batch
class ExampleGAN(dc.models.GAN):
def get_noise_input_shape(self):
return (None, 2)
def get_data_input_shapes(self):
return [(None, 1)]
def get_conditional_input_shapes(self):
return [(None, 1)]
def create_generator(self, noise_input, conditional_inputs):
gen_in = layers.Concat([noise_input] + conditional_inputs)
return [layers.Dense(1, in_layers=gen_in)]
def create_discriminator(self, data_inputs, conditional_inputs):
discrim_in = layers.Concat(data_inputs + conditional_inputs)
dense = layers.Dense(10, in_layers=discrim_in, activation_fn=tf.nn.relu)
return layers.Dense(1, in_layers=dense, activation_fn=tf.sigmoid)
class TestGAN(unittest.TestCase):
@flaky
def test_cgan(self):
"""Test fitting a conditional GAN."""
gan = ExampleGAN(learning_rate=0.003)
gan.fit_gan(
generate_data(gan, 5000, 100),
generator_steps=0.5,
checkpoint_interval=0)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
values = gan.predict_gan_generator(conditional_inputs=[means])
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
@flaky
def test_mix_gan(self):
"""Test a GAN with multiple generators and discriminators."""
gan = ExampleGAN(n_generators=2, n_discriminators=2, learning_rate=0.003)
gan.fit_gan(
generate_data(gan, 5000, 100),
generator_steps=0.5,
checkpoint_interval=0)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
for i in range(2):
values = gan.predict_gan_generator(
conditional_inputs=[means], generator_index=i)
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
@flaky
def test_wgan(self):
"""Test fitting a conditional WGAN."""
class ExampleWGAN(dc.models.WGAN):
def get_noise_input_shape(self):
return (None, 2)
def get_data_input_shapes(self):
return [(None, 1)]
def get_conditional_input_shapes(self):
return [(None, 1)]
def create_generator(self, noise_input, conditional_inputs):
gen_in = layers.Concat([noise_input] + conditional_inputs)
return [layers.Dense(1, in_layers=gen_in)]
def create_discriminator(self, data_inputs, conditional_inputs):
discrim_in = layers.Concat(data_inputs + conditional_inputs)
dense = layers.Dense(10, in_layers=discrim_in, activation_fn=tf.nn.relu)
return layers.Dense(1, in_layers=dense)
# We have to set the gradient penalty very small because the generator's
# output is only a single number, so the default penalty would constrain
# it far too much.
gan = ExampleWGAN(learning_rate=0.003, gradient_penalty=0.1)
gan.fit_gan(
generate_data(gan, 10000, 100),
generator_steps=0.1,
checkpoint_interval=0)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
values = gan.predict_gan_generator(conditional_inputs=[means])
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
| ktaneishi/deepchem | deepchem/models/tensorgraph/tests/test_gan.py | Python | mit | 3,900 | [
"Gaussian"
] | 16cf5084cb866a887e4384604d0c08afd3866dffbe4cb0ac2212d286cfc89567 |
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# Author : Anthony Geay
from MEDLoader import *
""" This test is a non regression test. ExtractCellType then ExtractGroup.
"""
fname="testMEDReader8.med"
outImgName="testMEDReader8.png"
#########
arr=DataArrayDouble([(0,0,0),(1,0,0),(2,0,0),(3,0,0),(0,1,0),(1,1,0),(2,1,0),(3,1,0),(0,2,0),(1,2,0),(2,2,0),(3,2,0),(0,3,0),(1,3,0),(2,3,0),(3,3,0)])
m0=MEDCouplingUMesh("mesh",2) ; m0.setCoords(arr) ; m0.allocateCells()
for elt in [[2,3,6],[3,7,6],[6,9,5],[6,10,9]]:
m0.insertNextCell(NORM_TRI3,elt)
pass
for elt in [[0,4,5,1],[5,6,2,1],[4,8,9,5],[6,10,11,7],[8,12,13,9],[9,13,14,10],[10,14,15,11]]:
m0.insertNextCell(NORM_QUAD4,elt)
pass
mm=MEDFileUMesh()
mm.setMeshAtLevel(0,m0)
grp0=DataArrayInt([0,1,2,5]) ; grp0.setName("grp0")
mm.setGroupsAtLevel(0,[grp0])
fmts=MEDFileFieldMultiTS()
#
fNode=MEDCouplingFieldDouble(ON_NODES) ; fNode.setName("fNode")
fNode.setMesh(m0)
fNode.setArray(DataArrayDouble([3,2,1,0,3.16,2.23,1.41,1,3.6,2.82,2.23,2,4.24,3.6,3.16,3]))
fNode.getArray().setInfoOnComponent(0,"C0")
fNode.setTime(0.5,1,1)
f1ts=MEDFileField1TS() ; f1ts.setFieldNoProfileSBT(fNode) ; fmts.pushBackTimeStep(f1ts)
#
fNode.getArray().reverse()
fNode.setTime(0.5,1,2)
f1ts=MEDFileField1TS() ; f1ts.setFieldNoProfileSBT(fNode) ; fmts.pushBackTimeStep(f1ts)
#
fNode.getArray().reverse()
fNode.setTime(0.5,2,1)
f1ts=MEDFileField1TS() ; f1ts.setFieldNoProfileSBT(fNode) ; fmts.pushBackTimeStep(f1ts)
#
fNode.getArray().reverse()
fNode.setTime(0.5,2,2)
f1ts=MEDFileField1TS() ; f1ts.setFieldNoProfileSBT(fNode) ; fmts.pushBackTimeStep(f1ts)
#
mm.write(fname,2)
fmts.write(fname,0)
################### MED write is done -> Go to MEDReader
from paraview.simple import *
myMedReader=MEDReader(FileName=fname)
myMedReader.AllArrays = ['TS0/mesh/ComSup0/fNode@@][@@P1']
assert(list(myMedReader.TimestepValues)==[0.,1.,2.,3.])
myMedReader.UpdatePipeline()
extractCT=ExtractCellType()
extractCT.Input=myMedReader
extractCT.UpdatePipelineInformation()
assert(list(extractCT.GetProperty("GeoTypesInfo"))==['TRI3','0','QUAD4','0'])
extractCT.AllGeoTypes=['TRI3']
extGrp=ExtractGroup()
extGrp.Input=extractCT
extGrp.UpdatePipelineInformation()
assert(filter(lambda x:x[:4]=="GRP_",list(extGrp.GetProperty("GroupsFlagsInfo")[::2]))==['GRP_grp0'])
extGrp.AllGroups="GRP_grp0"
RenderView1 = GetRenderView()
RenderView1.CameraFocalPoint = [1.5, 1.5, 0.0]
RenderView1.CameraPosition = [1.5, 1.5, 10000.0]
RenderView1.InteractionMode = '3D'
RenderView1.CameraPosition = [1.5, 1.5, 8.196152422706632]
RenderView1.CameraClippingRange = [7.825640906782493, 8.682319698595558]
RenderView1.CameraParallelScale = 2.1213203435596424
RenderView1.CenterOfRotation = [1.5, 1.5, 0.0]
DataRepresentation4 = Show()
DataRepresentation4.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation4.SelectionPointFieldDataArrayName = 'fNode'
DataRepresentation4.ScaleFactor = 0.3182729169726372
a1_fGauss_PVLookupTable = GetLookupTableForArray( "fNode", 1, RGBPoints=[0.22, 0.23, 0.299, 0.754, 2.95, 0.706, 0.016, 0.15], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ColorSpace='Diverging', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_fGauss_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] )
DataRepresentation4.ColorArrayName = 'fNode'
DataRepresentation4.LookupTable = a1_fGauss_PVLookupTable
a1_fGauss_PVLookupTable.ScalarOpacityFunction = a1_fGauss_PiecewiseFunction
RenderView1.ViewTime = 1.0 #### Important # red is in right bottom
RenderView1.CacheKey = 1.0
RenderView1.UseCache = 1
RenderView1.ViewSize=[300,300]
WriteImage(outImgName)
| FedoraScientific/salome-paravis | src/Plugins/MEDReader/Test/testMEDReader8.py | Python | lgpl-2.1 | 4,484 | [
"ParaView"
] | e8e8681cec027aae843ec54cd816527eff9853c4b5b24490bc537eb7f6ba941c |
from pygraph.classes.digraph import digraph
from pygraph.algorithms.searching import depth_first_search
import sys
import re
RHETORICAL_RELATIONS = [
"continuation",
"narration",
"result",
"contrast",
"parallel",
"precondition",
"consequence",
"conditional",
"alternation",
"background",
"elaboration",
"explanation",
"source",
"attribution",
"presupposition",
"because",
"since",
"until"
]
def is_event(node):
return node[0] == "e" and node[1:].isdigit()
class DRGTuple:
def __init__(self):
self.from_node = ""
self.edge_type = ""
self.to_node = ""
self.structure = ""
self.token_index = ""
self.tokens = []
class DRG:
def __init__(self):
self.tuples = []
self.nodes = set()
self.parent = dict()
def add_tuple(self, tup):
self.tuples.append(tup)
self.nodes.add(tup.from_node)
self.nodes.add(tup.to_node)
self.parent[tup.to_node] = tup.from_node
def root(self):
for node in self.nodes:
if not node in self.parent:
return node
# returns an ordered list of discourse unit to generate from
def discourse_units(self):
discourse_units = digraph()
for tup in self.tuples:
if tup.structure == "discourse" or tup.edge_type == "dominates" or "subordinates" in tup.edge_type:
if not tup.from_node in discourse_units.nodes():
discourse_units.add_node(tup.from_node)
if not tup.to_node in discourse_units.nodes():
discourse_units.add_node(tup.to_node)
discourse_units.add_edge((tup.from_node, tup.to_node))
st, order_pre, order_post = depth_first_search(discourse_units, root="k0")
return order_pre
def in_edges(self, node, edge_type="", structure=""):
edges = []
for tup in self.tuples:
if tup.to_node == node and (edge_type == "" or edge_type == tup.edge_type) and (structure == "" or structure == tup.structure):
edges.append(tup)
return edges
def out_edges(self, node, edge_type="", structure=""):
edges = []
for tup in self.tuples:
if tup.from_node == node and (edge_type == "" or edge_type == tup.edge_type) and (structure == "" or structure == tup.structure):
edges.append(tup)
return edges
def neighbors(self, node):
neighbors = []
for tup in self.tuples:
if tup.from_node == node:
neighbors.append(tup.to_node)
return neighbors
# returns the list of neighbors to visit, ordered by token index
def visit_neighbors(self, node):
neighbors = []
for tup in self.tuples:
if tup.from_node == node:
if not (tup.edge_type == "referent" and tup.from_node == "k0"):
neighbors.append((tup.token_index, tup.to_node, tup.tokens))
neighbors = sorted(neighbors, key=lambda token_index: neighbors[0])
return neighbors
def make_reification_mapping(self):
# de-reificate variables (build a mapping)
self.reificated = dict()
self.dereificated = dict()
for t in self.tuples:
if t.edge_type == "referent":
dereificated_var = re.sub(".*:", "", t.to_node)
if not dereificated_var in self.reificated:
self.reificated[dereificated_var] = set()
self.reificated[dereificated_var].add(t.to_node)
self.dereificated[t.to_node] = dereificated_var
class DRGParser:
def __init__(self):
pass
def parse_tup_file(self, tup_file):
drg = DRG()
fd_tup = open(tup_file)
for line in fd_tup:
if line[0] != "%" and line != "\n":
tup = self.parse_tup_line(line)
drg.add_tuple(tup)
fd_tup.close()
drg.make_reification_mapping()
return drg
def parse_tup_lines(self, lines):
drg = DRG()
for line in lines:
if line[0] != "%" and line != "\n":
tup = self.parse_tup_line(line)
drg.add_tuple(tup)
drg.make_reification_mapping()
return drg
def parse_tup_line(self, line):
tup = DRGTuple()
fields = line[:-1].decode("utf-8").split()
tup.edge_type = fields[1].split("-")[0]
tup.from_node = fields[0]
tup.to_node = fields[2]
if tup.edge_type in RHETORICAL_RELATIONS:
tup.structure = "discourse"
elif tup.edge_type in ["referent", "dominates"] or ("subordinates" in tup.edge_type):
tup.structure = "structure"
elif tup.edge_type in ["surface", "punctuation"]:
tup.structure = "surface"
else:
tup.structure = "argument"
try:
tup.token_index = eval(fields[3])
except:
print line
sys.exit(1)
tup.tokens = fields[5:-1]
return tup
| Remper/learningbyreading | src/unboxer/drg.py | Python | gpl-2.0 | 5,123 | [
"VisIt"
] | 33534cb04dfbdbc616c1cd2c641aadd7ddf66fddac4c8fb6675587b22e64bc8e |
#! /usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Data literal storing emoji names and unicode code points
Copy of https://github.com/carpedm20/emoji/blob/master/emoji/unicode_codes.py
Attributes:
EMOJI_ALIAS_UNICODE (dict): Emoji alias to unicode mapping
EMOJI_UNICODE (dict): Emoji name to unicode mapping
EMOJI_UNICODE_SET (set): Set of all emoji unicode code points
UNICODE_EMOJI (dict): Unicode to emoji name mapping
UNICODE_EMOJI_ALIAS (dict): Unicode to emoji alias mapping
"""
__all__ = [
'EMOJI_UNICODE',
'UNICODE_EMOJI',
'EMOJI_ALIAS_UNICODE',
'UNICODE_EMOJI_ALIAS',
'EMOJI_UNICODE_SET'
]
EMOJI_UNICODE = {
u':1st_place_medal:': u'\U0001F947',
u':2nd_place_medal:': u'\U0001F948',
u':3rd_place_medal:': u'\U0001F949',
u':AB_button_(blood_type):': u'\U0001F18E',
u':ATM_sign:': u'\U0001F3E7',
u':A_button_(blood_type):': u'\U0001F170',
u':A_button_(blood_type)_selector:': u'\U0001F170\U0000FE0F',
u':Afghanistan:': u'\U0001F1E6\U0001F1EB',
u':Aland_Islands:': u'\U0001F1E6\U0001F1FD',
u':Albania:': u'\U0001F1E6\U0001F1F1',
u':Algeria:': u'\U0001F1E9\U0001F1FF',
u':American_Samoa:': u'\U0001F1E6\U0001F1F8',
u':Andorra:': u'\U0001F1E6\U0001F1E9',
u':Angola:': u'\U0001F1E6\U0001F1F4',
u':Anguilla:': u'\U0001F1E6\U0001F1EE',
u':Antarctica:': u'\U0001F1E6\U0001F1F6',
u':Antigua_&_Barbuda:': u'\U0001F1E6\U0001F1EC',
u':Aquarius:': u'\U00002652',
u':Argentina:': u'\U0001F1E6\U0001F1F7',
u':Aries:': u'\U00002648',
u':Armenia:': u'\U0001F1E6\U0001F1F2',
u':Aruba:': u'\U0001F1E6\U0001F1FC',
u':Ascension_Island:': u'\U0001F1E6\U0001F1E8',
u':Australia:': u'\U0001F1E6\U0001F1FA',
u':Austria:': u'\U0001F1E6\U0001F1F9',
u':Azerbaijan:': u'\U0001F1E6\U0001F1FF',
u':BACK_arrow:': u'\U0001F519',
u':B_button_(blood_type):': u'\U0001F171',
u':B_button_(blood_type)_selector:': u'\U0001F171\U0000FE0F',
u':Bahamas:': u'\U0001F1E7\U0001F1F8',
u':Bahrain:': u'\U0001F1E7\U0001F1ED',
u':Bangladesh:': u'\U0001F1E7\U0001F1E9',
u':Barbados:': u'\U0001F1E7\U0001F1E7',
u':Belarus:': u'\U0001F1E7\U0001F1FE',
u':Belgium:': u'\U0001F1E7\U0001F1EA',
u':Belize:': u'\U0001F1E7\U0001F1FF',
u':Benin:': u'\U0001F1E7\U0001F1EF',
u':Bermuda:': u'\U0001F1E7\U0001F1F2',
u':Bhutan:': u'\U0001F1E7\U0001F1F9',
u':Bolivia:': u'\U0001F1E7\U0001F1F4',
u':Bosnia_&_Herzegovina:': u'\U0001F1E7\U0001F1E6',
u':Botswana:': u'\U0001F1E7\U0001F1FC',
u':Bouvet_Island:': u'\U0001F1E7\U0001F1FB',
u':Brazil:': u'\U0001F1E7\U0001F1F7',
u':British_Indian_Ocean_Territory:': u'\U0001F1EE\U0001F1F4',
u':British_Virgin_Islands:': u'\U0001F1FB\U0001F1EC',
u':Brunei:': u'\U0001F1E7\U0001F1F3',
u':Bulgaria:': u'\U0001F1E7\U0001F1EC',
u':Burkina_Faso:': u'\U0001F1E7\U0001F1EB',
u':Burundi:': u'\U0001F1E7\U0001F1EE',
u':CL_button:': u'\U0001F191',
u':COOL_button:': u'\U0001F192',
u':Cambodia:': u'\U0001F1F0\U0001F1ED',
u':Cameroon:': u'\U0001F1E8\U0001F1F2',
u':Canada:': u'\U0001F1E8\U0001F1E6',
u':Canary_Islands:': u'\U0001F1EE\U0001F1E8',
u':Cancer:': u'\U0000264B',
u':Cape_Verde:': u'\U0001F1E8\U0001F1FB',
u':Capricorn:': u'\U00002651',
u':Caribbean_Netherlands:': u'\U0001F1E7\U0001F1F6',
u':Cayman_Islands:': u'\U0001F1F0\U0001F1FE',
u':Central_African_Republic:': u'\U0001F1E8\U0001F1EB',
u':Ceuta_&_Melilla:': u'\U0001F1EA\U0001F1E6',
u':Chad:': u'\U0001F1F9\U0001F1E9',
u':Chile:': u'\U0001F1E8\U0001F1F1',
u':China:': u'\U0001F1E8\U0001F1F3',
u':Christmas_Island:': u'\U0001F1E8\U0001F1FD',
u':Christmas_tree:': u'\U0001F384',
u':Clipperton_Island:': u'\U0001F1E8\U0001F1F5',
u':Cocos_(Keeling)_Islands:': u'\U0001F1E8\U0001F1E8',
u':Colombia:': u'\U0001F1E8\U0001F1F4',
u':Comoros:': u'\U0001F1F0\U0001F1F2',
u':Congo_-_Brazzaville:': u'\U0001F1E8\U0001F1EC',
u':Congo_-_Kinshasa:': u'\U0001F1E8\U0001F1E9',
u':Cook_Islands:': u'\U0001F1E8\U0001F1F0',
u':Costa_Rica:': u'\U0001F1E8\U0001F1F7',
u':Croatia:': u'\U0001F1ED\U0001F1F7',
u':Cuba:': u'\U0001F1E8\U0001F1FA',
u':Cura\xe7ao:': u'\U0001F1E8\U0001F1FC',
u':Cyprus:': u'\U0001F1E8\U0001F1FE',
u':Czechia:': u'\U0001F1E8\U0001F1FF',
u':C\xf4te_d\u2019Ivoire:': u'\U0001F1E8\U0001F1EE',
u':Denmark:': u'\U0001F1E9\U0001F1F0',
u':Diego_Garcia:': u'\U0001F1E9\U0001F1EC',
u':Djibouti:': u'\U0001F1E9\U0001F1EF',
u':Dominica:': u'\U0001F1E9\U0001F1F2',
u':Dominican_Republic:': u'\U0001F1E9\U0001F1F4',
u':END_arrow:': u'\U0001F51A',
u':Ecuador:': u'\U0001F1EA\U0001F1E8',
u':Egypt:': u'\U0001F1EA\U0001F1EC',
u':El_Salvador:': u'\U0001F1F8\U0001F1FB',
u':England:': u'\U0001F3F4\U000E0067\U000E0062\U000E0065\U000E006E\U000E0067\U000E007F',
u':Equatorial_Guinea:': u'\U0001F1EC\U0001F1F6',
u':Eritrea:': u'\U0001F1EA\U0001F1F7',
u':Estonia:': u'\U0001F1EA\U0001F1EA',
u':Ethiopia:': u'\U0001F1EA\U0001F1F9',
u':European_Union:': u'\U0001F1EA\U0001F1FA',
u':FREE_button:': u'\U0001F193',
u':Falkland_Islands:': u'\U0001F1EB\U0001F1F0',
u':Faroe_Islands:': u'\U0001F1EB\U0001F1F4',
u':Fiji:': u'\U0001F1EB\U0001F1EF',
u':Finland:': u'\U0001F1EB\U0001F1EE',
u':France:': u'\U0001F1EB\U0001F1F7',
u':French_Guiana:': u'\U0001F1EC\U0001F1EB',
u':French_Polynesia:': u'\U0001F1F5\U0001F1EB',
u':French_Southern_Territories:': u'\U0001F1F9\U0001F1EB',
u':Gabon:': u'\U0001F1EC\U0001F1E6',
u':Gambia:': u'\U0001F1EC\U0001F1F2',
u':Gemini:': u'\U0000264A',
u':Georgia:': u'\U0001F1EC\U0001F1EA',
u':Germany:': u'\U0001F1E9\U0001F1EA',
u':Ghana:': u'\U0001F1EC\U0001F1ED',
u':Gibraltar:': u'\U0001F1EC\U0001F1EE',
u':Greece:': u'\U0001F1EC\U0001F1F7',
u':Greenland:': u'\U0001F1EC\U0001F1F1',
u':Grenada:': u'\U0001F1EC\U0001F1E9',
u':Guadeloupe:': u'\U0001F1EC\U0001F1F5',
u':Guam:': u'\U0001F1EC\U0001F1FA',
u':Guatemala:': u'\U0001F1EC\U0001F1F9',
u':Guernsey:': u'\U0001F1EC\U0001F1EC',
u':Guinea-Bissau:': u'\U0001F1EC\U0001F1FC',
u':Guinea:': u'\U0001F1EC\U0001F1F3',
u':Guyana:': u'\U0001F1EC\U0001F1FE',
u':Haiti:': u'\U0001F1ED\U0001F1F9',
u':Heard_&_McDonald_Islands:': u'\U0001F1ED\U0001F1F2',
u':Honduras:': u'\U0001F1ED\U0001F1F3',
u':Hong_Kong_SAR_China:': u'\U0001F1ED\U0001F1F0',
u':Hungary:': u'\U0001F1ED\U0001F1FA',
u':ID_button:': u'\U0001F194',
u':Iceland:': u'\U0001F1EE\U0001F1F8',
u':India:': u'\U0001F1EE\U0001F1F3',
u':Indonesia:': u'\U0001F1EE\U0001F1E9',
u':Iran:': u'\U0001F1EE\U0001F1F7',
u':Iraq:': u'\U0001F1EE\U0001F1F6',
u':Ireland:': u'\U0001F1EE\U0001F1EA',
u':Isle_of_Man:': u'\U0001F1EE\U0001F1F2',
u':Israel:': u'\U0001F1EE\U0001F1F1',
u':Italy:': u'\U0001F1EE\U0001F1F9',
u':Jamaica:': u'\U0001F1EF\U0001F1F2',
u':Japan:': u'\U0001F1EF\U0001F1F5',
u':Japanese_acceptable_button:': u'\U0001F251',
u':Japanese_application_button:': u'\U0001F238',
u':Japanese_bargain_button:': u'\U0001F250',
u':Japanese_castle:': u'\U0001F3EF',
u':Japanese_congratulations_button:': u'\U00003297',
u':Japanese_discount_button:': u'\U0001F239',
u':Japanese_dolls:': u'\U0001F38E',
u':Japanese_free_of_charge_button:': u'\U0001F21A',
u':Japanese_here_button:': u'\U0001F201',
u':Japanese_monthly_amount_button:': u'\U0001F237',
u':Japanese_no_vacancy_button:': u'\U0001F235',
u':Japanese_not_free_of_charge_button:': u'\U0001F236',
u':Japanese_open_for_business_button:': u'\U0001F23A',
u':Japanese_passing_grade_button:': u'\U0001F234',
u':Japanese_post_office:': u'\U0001F3E3',
u':Japanese_prohibited_button:': u'\U0001F232',
u':Japanese_reserved_button:': u'\U0001F22F',
u':Japanese_secret_button:': u'\U00003299',
u':Japanese_service_charge_button:': u'\U0001F202',
u':Japanese_symbol_for_beginner:': u'\U0001F530',
u':Japanese_vacancy_button:': u'\U0001F233',
u':Japanese_congratulations_button_selector:': u'\U00003297\U0000FE0F',
u':Japanese_monthly_amount_button_selector:': u'\U0001F237\U0000FE0F',
u':Japanese_secret_button_selector:': u'\U00003299\U0000FE0F',
u':Japanese_service_charge_button_selector:': u'\U0001F202\U0000FE0F',
u':Jersey:': u'\U0001F1EF\U0001F1EA',
u':Jordan:': u'\U0001F1EF\U0001F1F4',
u':Kazakhstan:': u'\U0001F1F0\U0001F1FF',
u':Kenya:': u'\U0001F1F0\U0001F1EA',
u':Kiribati:': u'\U0001F1F0\U0001F1EE',
u':Kosovo:': u'\U0001F1FD\U0001F1F0',
u':Kuwait:': u'\U0001F1F0\U0001F1FC',
u':Kyrgyzstan:': u'\U0001F1F0\U0001F1EC',
u':Laos:': u'\U0001F1F1\U0001F1E6',
u':Latvia:': u'\U0001F1F1\U0001F1FB',
u':Lebanon:': u'\U0001F1F1\U0001F1E7',
u':Leo:': u'\U0000264C',
u':Lesotho:': u'\U0001F1F1\U0001F1F8',
u':Liberia:': u'\U0001F1F1\U0001F1F7',
u':Libra:': u'\U0000264E',
u':Libya:': u'\U0001F1F1\U0001F1FE',
u':Liechtenstein:': u'\U0001F1F1\U0001F1EE',
u':Lithuania:': u'\U0001F1F1\U0001F1F9',
u':Luxembourg:': u'\U0001F1F1\U0001F1FA',
u':Macau_SAR_China:': u'\U0001F1F2\U0001F1F4',
u':Macedonia:': u'\U0001F1F2\U0001F1F0',
u':Madagascar:': u'\U0001F1F2\U0001F1EC',
u':Malawi:': u'\U0001F1F2\U0001F1FC',
u':Malaysia:': u'\U0001F1F2\U0001F1FE',
u':Maldives:': u'\U0001F1F2\U0001F1FB',
u':Mali:': u'\U0001F1F2\U0001F1F1',
u':Malta:': u'\U0001F1F2\U0001F1F9',
u':Marshall_Islands:': u'\U0001F1F2\U0001F1ED',
u':Martinique:': u'\U0001F1F2\U0001F1F6',
u':Mauritania:': u'\U0001F1F2\U0001F1F7',
u':Mauritius:': u'\U0001F1F2\U0001F1FA',
u':Mayotte:': u'\U0001F1FE\U0001F1F9',
u':Mexico:': u'\U0001F1F2\U0001F1FD',
u':Micronesia:': u'\U0001F1EB\U0001F1F2',
u':Moldova:': u'\U0001F1F2\U0001F1E9',
u':Monaco:': u'\U0001F1F2\U0001F1E8',
u':Mongolia:': u'\U0001F1F2\U0001F1F3',
u':Montenegro:': u'\U0001F1F2\U0001F1EA',
u':Montserrat:': u'\U0001F1F2\U0001F1F8',
u':Morocco:': u'\U0001F1F2\U0001F1E6',
u':Mozambique:': u'\U0001F1F2\U0001F1FF',
u':Mrs._Claus:': u'\U0001F936',
u':Mrs._Claus_dark_skin_tone:': u'\U0001F936\U0001F3FF',
u':Mrs._Claus_light_skin_tone:': u'\U0001F936\U0001F3FB',
u':Mrs._Claus_medium-dark_skin_tone:': u'\U0001F936\U0001F3FE',
u':Mrs._Claus_medium-light_skin_tone:': u'\U0001F936\U0001F3FC',
u':Mrs._Claus_medium_skin_tone:': u'\U0001F936\U0001F3FD',
u':Myanmar_(Burma):': u'\U0001F1F2\U0001F1F2',
u':NEW_button:': u'\U0001F195',
u':NG_button:': u'\U0001F196',
u':Namibia:': u'\U0001F1F3\U0001F1E6',
u':Nauru:': u'\U0001F1F3\U0001F1F7',
u':Nepal:': u'\U0001F1F3\U0001F1F5',
u':Netherlands:': u'\U0001F1F3\U0001F1F1',
u':New_Caledonia:': u'\U0001F1F3\U0001F1E8',
u':New_Zealand:': u'\U0001F1F3\U0001F1FF',
u':Nicaragua:': u'\U0001F1F3\U0001F1EE',
u':Niger:': u'\U0001F1F3\U0001F1EA',
u':Nigeria:': u'\U0001F1F3\U0001F1EC',
u':Niue:': u'\U0001F1F3\U0001F1FA',
u':Norfolk_Island:': u'\U0001F1F3\U0001F1EB',
u':North_Korea:': u'\U0001F1F0\U0001F1F5',
u':Northern_Mariana_Islands:': u'\U0001F1F2\U0001F1F5',
u':Norway:': u'\U0001F1F3\U0001F1F4',
u':OK_button:': u'\U0001F197',
u':OK_hand:': u'\U0001F44C',
u':OK_hand_dark_skin_tone:': u'\U0001F44C\U0001F3FF',
u':OK_hand_light_skin_tone:': u'\U0001F44C\U0001F3FB',
u':OK_hand_medium-dark_skin_tone:': u'\U0001F44C\U0001F3FE',
u':OK_hand_medium-light_skin_tone:': u'\U0001F44C\U0001F3FC',
u':OK_hand_medium_skin_tone:': u'\U0001F44C\U0001F3FD',
u':ON!_arrow:': u'\U0001F51B',
u':O_button_(blood_type):': u'\U0001F17E',
u':O_button_(blood_type)_selector:': u'\U0001F17E\U0000FE0F',
u':Oman:': u'\U0001F1F4\U0001F1F2',
u':Ophiuchus:': u'\U000026CE',
u':P_button:': u'\U0001F17F',
u':P_button_selector:': u'\U0001F17F\U0000FE0F',
u':Pakistan:': u'\U0001F1F5\U0001F1F0',
u':Palau:': u'\U0001F1F5\U0001F1FC',
u':Palestinian_Territories:': u'\U0001F1F5\U0001F1F8',
u':Panama:': u'\U0001F1F5\U0001F1E6',
u':Papua_New_Guinea:': u'\U0001F1F5\U0001F1EC',
u':Paraguay:': u'\U0001F1F5\U0001F1FE',
u':Peru:': u'\U0001F1F5\U0001F1EA',
u':Philippines:': u'\U0001F1F5\U0001F1ED',
u':Pisces:': u'\U00002653',
u':Pitcairn_Islands:': u'\U0001F1F5\U0001F1F3',
u':Poland:': u'\U0001F1F5\U0001F1F1',
u':Portugal:': u'\U0001F1F5\U0001F1F9',
u':Puerto_Rico:': u'\U0001F1F5\U0001F1F7',
u':Qatar:': u'\U0001F1F6\U0001F1E6',
u':Romania:': u'\U0001F1F7\U0001F1F4',
u':Russia:': u'\U0001F1F7\U0001F1FA',
u':Rwanda:': u'\U0001F1F7\U0001F1FC',
u':R\xe9union:': u'\U0001F1F7\U0001F1EA',
u':SOON_arrow:': u'\U0001F51C',
u':SOS_button:': u'\U0001F198',
u':Sagittarius:': u'\U00002650',
u':Samoa:': u'\U0001F1FC\U0001F1F8',
u':San_Marino:': u'\U0001F1F8\U0001F1F2',
u':Santa_Claus:': u'\U0001F385',
u':Santa_Claus_dark_skin_tone:': u'\U0001F385\U0001F3FF',
u':Santa_Claus_light_skin_tone:': u'\U0001F385\U0001F3FB',
u':Santa_Claus_medium-dark_skin_tone:': u'\U0001F385\U0001F3FE',
u':Santa_Claus_medium-light_skin_tone:': u'\U0001F385\U0001F3FC',
u':Santa_Claus_medium_skin_tone:': u'\U0001F385\U0001F3FD',
u':Saudi_Arabia:': u'\U0001F1F8\U0001F1E6',
u':Scorpio:': u'\U0000264F',
u':Scotland:': u'\U0001F3F4\U000E0067\U000E0062\U000E0073\U000E0063\U000E0074\U000E007F',
u':Senegal:': u'\U0001F1F8\U0001F1F3',
u':Serbia:': u'\U0001F1F7\U0001F1F8',
u':Seychelles:': u'\U0001F1F8\U0001F1E8',
u':Sierra_Leone:': u'\U0001F1F8\U0001F1F1',
u':Singapore:': u'\U0001F1F8\U0001F1EC',
u':Sint_Maarten:': u'\U0001F1F8\U0001F1FD',
u':Slovakia:': u'\U0001F1F8\U0001F1F0',
u':Slovenia:': u'\U0001F1F8\U0001F1EE',
u':Solomon_Islands:': u'\U0001F1F8\U0001F1E7',
u':Somalia:': u'\U0001F1F8\U0001F1F4',
u':South_Africa:': u'\U0001F1FF\U0001F1E6',
u':South_Georgia_&_South_Sandwich_Islands:': u'\U0001F1EC\U0001F1F8',
u':South_Korea:': u'\U0001F1F0\U0001F1F7',
u':South_Sudan:': u'\U0001F1F8\U0001F1F8',
u':Spain:': u'\U0001F1EA\U0001F1F8',
u':Sri_Lanka:': u'\U0001F1F1\U0001F1F0',
u':St._Barth\xe9lemy:': u'\U0001F1E7\U0001F1F1',
u':St._Helena:': u'\U0001F1F8\U0001F1ED',
u':St._Kitts_&_Nevis:': u'\U0001F1F0\U0001F1F3',
u':St._Lucia:': u'\U0001F1F1\U0001F1E8',
u':St._Martin:': u'\U0001F1F2\U0001F1EB',
u':St._Pierre_&_Miquelon:': u'\U0001F1F5\U0001F1F2',
u':St._Vincent_&_Grenadines:': u'\U0001F1FB\U0001F1E8',
u':Statue_of_Liberty:': u'\U0001F5FD',
u':Sudan:': u'\U0001F1F8\U0001F1E9',
u':Suriname:': u'\U0001F1F8\U0001F1F7',
u':Svalbard_&_Jan_Mayen:': u'\U0001F1F8\U0001F1EF',
u':Swaziland:': u'\U0001F1F8\U0001F1FF',
u':Sweden:': u'\U0001F1F8\U0001F1EA',
u':Switzerland:': u'\U0001F1E8\U0001F1ED',
u':Syria:': u'\U0001F1F8\U0001F1FE',
u':S\xe3o_Tom\xe9_&_Pr\xedncipe:': u'\U0001F1F8\U0001F1F9',
u':T-Rex:': u'\U0001F996',
u':TOP_arrow:': u'\U0001F51D',
u':Taiwan:': u'\U0001F1F9\U0001F1FC',
u':Tajikistan:': u'\U0001F1F9\U0001F1EF',
u':Tanzania:': u'\U0001F1F9\U0001F1FF',
u':Taurus:': u'\U00002649',
u':Thailand:': u'\U0001F1F9\U0001F1ED',
u':Timor-Leste:': u'\U0001F1F9\U0001F1F1',
u':Togo:': u'\U0001F1F9\U0001F1EC',
u':Tokelau:': u'\U0001F1F9\U0001F1F0',
u':Tokyo_tower:': u'\U0001F5FC',
u':Tonga:': u'\U0001F1F9\U0001F1F4',
u':Trinidad_&_Tobago:': u'\U0001F1F9\U0001F1F9',
u':Tristan_da_Cunha:': u'\U0001F1F9\U0001F1E6',
u':Tunisia:': u'\U0001F1F9\U0001F1F3',
u':Turkey:': u'\U0001F1F9\U0001F1F7',
u':Turkmenistan:': u'\U0001F1F9\U0001F1F2',
u':Turks_&_Caicos_Islands:': u'\U0001F1F9\U0001F1E8',
u':Tuvalu:': u'\U0001F1F9\U0001F1FB',
u':U.S._Outlying_Islands:': u'\U0001F1FA\U0001F1F2',
u':U.S._Virgin_Islands:': u'\U0001F1FB\U0001F1EE',
u':UP!_button:': u'\U0001F199',
u':Uganda:': u'\U0001F1FA\U0001F1EC',
u':Ukraine:': u'\U0001F1FA\U0001F1E6',
u':United_Arab_Emirates:': u'\U0001F1E6\U0001F1EA',
u':United_Kingdom:': u'\U0001F1EC\U0001F1E7',
u':United_Nations:': u'\U0001F1FA\U0001F1F3',
u':United_States:': u'\U0001F1FA\U0001F1F8',
u':Uruguay:': u'\U0001F1FA\U0001F1FE',
u':Uzbekistan:': u'\U0001F1FA\U0001F1FF',
u':VS_button:': u'\U0001F19A',
u':Vanuatu:': u'\U0001F1FB\U0001F1FA',
u':Vatican_City:': u'\U0001F1FB\U0001F1E6',
u':Venezuela:': u'\U0001F1FB\U0001F1EA',
u':Vietnam:': u'\U0001F1FB\U0001F1F3',
u':Virgo:': u'\U0000264D',
u':Wales:': u'\U0001F3F4\U000E0067\U000E0062\U000E0077\U000E006C\U000E0073\U000E007F',
u':Wallis_&_Futuna:': u'\U0001F1FC\U0001F1EB',
u':Western_Sahara:': u'\U0001F1EA\U0001F1ED',
u':Yemen:': u'\U0001F1FE\U0001F1EA',
u':Zambia:': u'\U0001F1FF\U0001F1F2',
u':Zimbabwe:': u'\U0001F1FF\U0001F1FC',
u':abacus:': u'\U0001F9EE',
u':adhesive_bandage:': u'\U0001FA79',
u':admission_tickets:': u'\U0001F39F',
u':admission_tickets_selector:': u'\U0001F39F\U0000FE0F',
u':adult:': u'\U0001F9D1',
u':adult_dark_skin_tone:': u'\U0001F9D1\U0001F3FF',
u':adult_light_skin_tone:': u'\U0001F9D1\U0001F3FB',
u':adult_medium-dark_skin_tone:': u'\U0001F9D1\U0001F3FE',
u':adult_medium-light_skin_tone:': u'\U0001F9D1\U0001F3FC',
u':adult_medium_skin_tone:': u'\U0001F9D1\U0001F3FD',
u':aerial_tramway:': u'\U0001F6A1',
u':airplane:': u'\U00002708',
u':airplane_arrival:': u'\U0001F6EC',
u':airplane_departure:': u'\U0001F6EB',
u':airplane_selector:': u'\U00002708\U0000FE0F',
u':alarm_clock:': u'\U000023F0',
u':alembic:': u'\U00002697',
u':alembic_selector:': u'\U00002697\U0000FE0F',
u':alien:': u'\U0001F47D',
u':alien_monster:': u'\U0001F47E',
u':ambulance:': u'\U0001F691',
u':american_football:': u'\U0001F3C8',
u':amphora:': u'\U0001F3FA',
u':anchor:': u'\U00002693',
u':anger_symbol:': u'\U0001F4A2',
u':angry_face:': u'\U0001F620',
u':angry_face_with_horns:': u'\U0001F47F',
u':anguished_face:': u'\U0001F627',
u':ant:': u'\U0001F41C',
u':antenna_bars:': u'\U0001F4F6',
u':anxious_face_with_sweat:': u'\U0001F630',
u':articulated_lorry:': u'\U0001F69B',
u':artist_palette:': u'\U0001F3A8',
u':astonished_face:': u'\U0001F632',
u':atom_symbol:': u'\U0000269B',
u':atom_symbol_selector:': u'\U0000269B\U0000FE0F',
u':auto_rickshaw:': u'\U0001F6FA',
u':automobile:': u'\U0001F697',
u':avocado:': u'\U0001F951',
u':axe:': u'\U0001FA93',
u':baby:': u'\U0001F476',
u':baby_angel:': u'\U0001F47C',
u':baby_angel_dark_skin_tone:': u'\U0001F47C\U0001F3FF',
u':baby_angel_light_skin_tone:': u'\U0001F47C\U0001F3FB',
u':baby_angel_medium-dark_skin_tone:': u'\U0001F47C\U0001F3FE',
u':baby_angel_medium-light_skin_tone:': u'\U0001F47C\U0001F3FC',
u':baby_angel_medium_skin_tone:': u'\U0001F47C\U0001F3FD',
u':baby_bottle:': u'\U0001F37C',
u':baby_chick:': u'\U0001F424',
u':baby_dark_skin_tone:': u'\U0001F476\U0001F3FF',
u':baby_light_skin_tone:': u'\U0001F476\U0001F3FB',
u':baby_medium-dark_skin_tone:': u'\U0001F476\U0001F3FE',
u':baby_medium-light_skin_tone:': u'\U0001F476\U0001F3FC',
u':baby_medium_skin_tone:': u'\U0001F476\U0001F3FD',
u':baby_symbol:': u'\U0001F6BC',
u':backhand_index_pointing_down:': u'\U0001F447',
u':backhand_index_pointing_down_dark_skin_tone:': u'\U0001F447\U0001F3FF',
u':backhand_index_pointing_down_light_skin_tone:': u'\U0001F447\U0001F3FB',
u':backhand_index_pointing_down_medium-dark_skin_tone:': u'\U0001F447\U0001F3FE',
u':backhand_index_pointing_down_medium-light_skin_tone:': u'\U0001F447\U0001F3FC',
u':backhand_index_pointing_down_medium_skin_tone:': u'\U0001F447\U0001F3FD',
u':backhand_index_pointing_left:': u'\U0001F448',
u':backhand_index_pointing_left_dark_skin_tone:': u'\U0001F448\U0001F3FF',
u':backhand_index_pointing_left_light_skin_tone:': u'\U0001F448\U0001F3FB',
u':backhand_index_pointing_left_medium-dark_skin_tone:': u'\U0001F448\U0001F3FE',
u':backhand_index_pointing_left_medium-light_skin_tone:': u'\U0001F448\U0001F3FC',
u':backhand_index_pointing_left_medium_skin_tone:': u'\U0001F448\U0001F3FD',
u':backhand_index_pointing_right:': u'\U0001F449',
u':backhand_index_pointing_right_dark_skin_tone:': u'\U0001F449\U0001F3FF',
u':backhand_index_pointing_right_light_skin_tone:': u'\U0001F449\U0001F3FB',
u':backhand_index_pointing_right_medium-dark_skin_tone:': u'\U0001F449\U0001F3FE',
u':backhand_index_pointing_right_medium-light_skin_tone:': u'\U0001F449\U0001F3FC',
u':backhand_index_pointing_right_medium_skin_tone:': u'\U0001F449\U0001F3FD',
u':backhand_index_pointing_up:': u'\U0001F446',
u':backhand_index_pointing_up_dark_skin_tone:': u'\U0001F446\U0001F3FF',
u':backhand_index_pointing_up_light_skin_tone:': u'\U0001F446\U0001F3FB',
u':backhand_index_pointing_up_medium-dark_skin_tone:': u'\U0001F446\U0001F3FE',
u':backhand_index_pointing_up_medium-light_skin_tone:': u'\U0001F446\U0001F3FC',
u':backhand_index_pointing_up_medium_skin_tone:': u'\U0001F446\U0001F3FD',
u':bacon:': u'\U0001F953',
u':badger:': u'\U0001F9A1',
u':badminton:': u'\U0001F3F8',
u':bagel:': u'\U0001F96F',
u':baggage_claim:': u'\U0001F6C4',
u':baguette_bread:': u'\U0001F956',
u':balance_scale:': u'\U00002696',
u':balance_scale_selector:': u'\U00002696\U0000FE0F',
u':bald:': u'\U0001F9B2',
u':bald_man:': u'\U0001F468\U0000200D\U0001F9B2',
u':bald_woman:': u'\U0001F469\U0000200D\U0001F9B2',
u':ballet_shoes:': u'\U0001FA70',
u':balloon:': u'\U0001F388',
u':ballot_box_with_ballot:': u'\U0001F5F3',
u':ballot_box_with_ballot_selector:': u'\U0001F5F3\U0000FE0F',
u':ballot_box_with_check:': u'\U00002611',
u':banana:': u'\U0001F34C',
u':banjo:': u'\U0001FA95',
u':bank:': u'\U0001F3E6',
u':bar_chart:': u'\U0001F4CA',
u':barber_pole:': u'\U0001F488',
u':baseball:': u'\U000026BE',
u':basket:': u'\U0001F9FA',
u':basketball:': u'\U0001F3C0',
u':bat:': u'\U0001F987',
u':bathtub:': u'\U0001F6C1',
u':battery:': u'\U0001F50B',
u':beach_with_umbrella:': u'\U0001F3D6',
u':beach_with_umbrella_selector:': u'\U0001F3D6\U0000FE0F',
u':beaming_face_with_smiling_eyes:': u'\U0001F601',
u':bear_face:': u'\U0001F43B',
u':bearded_person:': u'\U0001F9D4',
u':bearded_person_dark_skin_tone:': u'\U0001F9D4\U0001F3FF',
u':bearded_person_light_skin_tone:': u'\U0001F9D4\U0001F3FB',
u':bearded_person_medium-dark_skin_tone:': u'\U0001F9D4\U0001F3FE',
u':bearded_person_medium-light_skin_tone:': u'\U0001F9D4\U0001F3FC',
u':bearded_person_medium_skin_tone:': u'\U0001F9D4\U0001F3FD',
u':beating_heart:': u'\U0001F493',
u':bed:': u'\U0001F6CF',
u':bed_selector:': u'\U0001F6CF\U0000FE0F',
u':beer_mug:': u'\U0001F37A',
u':bell:': u'\U0001F514',
u':bell_with_slash:': u'\U0001F515',
u':bellhop_bell:': u'\U0001F6CE',
u':bellhop_bell_selector:': u'\U0001F6CE\U0000FE0F',
u':bento_box:': u'\U0001F371',
u':beverage_box:': u'\U0001F9C3',
u':bicycle:': u'\U0001F6B2',
u':bikini:': u'\U0001F459',
u':billed_cap:': u'\U0001F9E2',
u':biohazard:': u'\U00002623',
u':biohazard_selector:': u'\U00002623\U0000FE0F',
u':bird:': u'\U0001F426',
u':birthday_cake:': u'\U0001F382',
u':black_circle:': u'\U000026AB',
u':black_flag:': u'\U0001F3F4',
u':black_heart:': u'\U0001F5A4',
u':black_large_square:': u'\U00002B1B',
u':black_medium-small_square:': u'\U000025FE',
u':black_medium_square:': u'\U000025FC',
u':black_medium_square_selector:': u'\U000025FC\U0000FE0F',
u':black_nib:': u'\U00002712',
u':black_nib_selector:': u'\U00002712\U0000FE0F',
u':black_small_square:': u'\U000025AA',
u':black_small_square_selector:': u'\U000025AA\U0000FE0F',
u':black_square_button:': u'\U0001F532',
u':blond-haired_man:': u'\U0001F471\U0000200D\U00002642\U0000FE0F',
u':blond-haired_man_dark_skin_tone:': u'\U0001F471\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':blond-haired_man_light_skin_tone:': u'\U0001F471\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':blond-haired_man_medium-dark_skin_tone:': u'\U0001F471\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':blond-haired_man_medium-light_skin_tone:': u'\U0001F471\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':blond-haired_man_medium_skin_tone:': u'\U0001F471\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':blond-haired_person:': u'\U0001F471',
u':blond-haired_person_dark_skin_tone:': u'\U0001F471\U0001F3FF',
u':blond-haired_person_light_skin_tone:': u'\U0001F471\U0001F3FB',
u':blond-haired_person_medium-dark_skin_tone:': u'\U0001F471\U0001F3FE',
u':blond-haired_person_medium-light_skin_tone:': u'\U0001F471\U0001F3FC',
u':blond-haired_person_medium_skin_tone:': u'\U0001F471\U0001F3FD',
u':blond-haired_woman:': u'\U0001F471\U0000200D\U00002640\U0000FE0F',
u':blond-haired_woman_dark_skin_tone:': u'\U0001F471\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':blond-haired_woman_light_skin_tone:': u'\U0001F471\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':blond-haired_woman_medium-dark_skin_tone:': u'\U0001F471\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':blond-haired_woman_medium-light_skin_tone:': u'\U0001F471\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':blond-haired_woman_medium_skin_tone:': u'\U0001F471\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':blossom:': u'\U0001F33C',
u':blowfish:': u'\U0001F421',
u':blue_book:': u'\U0001F4D8',
u':blue_circle:': u'\U0001F535',
u':blue_heart:': u'\U0001F499',
u':blue_square:': u'\U0001F7E6',
u':boar:': u'\U0001F417',
u':bomb:': u'\U0001F4A3',
u':bone:': u'\U0001F9B4',
u':bookmark:': u'\U0001F516',
u':bookmark_tabs:': u'\U0001F4D1',
u':books:': u'\U0001F4DA',
u':bottle_with_popping_cork:': u'\U0001F37E',
u':bouquet:': u'\U0001F490',
u':bow_and_arrow:': u'\U0001F3F9',
u':bowl_with_spoon:': u'\U0001F963',
u':bowling:': u'\U0001F3B3',
u':boxing_glove:': u'\U0001F94A',
u':boy:': u'\U0001F466',
u':boy_dark_skin_tone:': u'\U0001F466\U0001F3FF',
u':boy_light_skin_tone:': u'\U0001F466\U0001F3FB',
u':boy_medium-dark_skin_tone:': u'\U0001F466\U0001F3FE',
u':boy_medium-light_skin_tone:': u'\U0001F466\U0001F3FC',
u':boy_medium_skin_tone:': u'\U0001F466\U0001F3FD',
u':brain:': u'\U0001F9E0',
u':bread:': u'\U0001F35E',
u':breast-feeding:': u'\U0001F931',
u':breast-feeding_dark_skin_tone:': u'\U0001F931\U0001F3FF',
u':breast-feeding_light_skin_tone:': u'\U0001F931\U0001F3FB',
u':breast-feeding_medium-dark_skin_tone:': u'\U0001F931\U0001F3FE',
u':breast-feeding_medium-light_skin_tone:': u'\U0001F931\U0001F3FC',
u':breast-feeding_medium_skin_tone:': u'\U0001F931\U0001F3FD',
u':brick:': u'\U0001F9F1',
u':bride_with_veil:': u'\U0001F470',
u':bride_with_veil_dark_skin_tone:': u'\U0001F470\U0001F3FF',
u':bride_with_veil_light_skin_tone:': u'\U0001F470\U0001F3FB',
u':bride_with_veil_medium-dark_skin_tone:': u'\U0001F470\U0001F3FE',
u':bride_with_veil_medium-light_skin_tone:': u'\U0001F470\U0001F3FC',
u':bride_with_veil_medium_skin_tone:': u'\U0001F470\U0001F3FD',
u':bridge_at_night:': u'\U0001F309',
u':briefcase:': u'\U0001F4BC',
u':briefs:': u'\U0001FA72',
u':bright_button:': u'\U0001F506',
u':broccoli:': u'\U0001F966',
u':broken_heart:': u'\U0001F494',
u':broom:': u'\U0001F9F9',
u':brown_circle:': u'\U0001F7E4',
u':brown_heart:': u'\U0001F90E',
u':brown_square:': u'\U0001F7EB',
u':bug:': u'\U0001F41B',
u':building_construction:': u'\U0001F3D7',
u':building_construction_selector:': u'\U0001F3D7\U0000FE0F',
u':bullet_train:': u'\U0001F685',
u':burrito:': u'\U0001F32F',
u':bus:': u'\U0001F68C',
u':bus_stop:': u'\U0001F68F',
u':bust_in_silhouette:': u'\U0001F464',
u':busts_in_silhouette:': u'\U0001F465',
u':butter:': u'\U0001F9C8',
u':butterfly:': u'\U0001F98B',
u':cactus:': u'\U0001F335',
u':calendar:': u'\U0001F4C5',
u':call_me_hand:': u'\U0001F919',
u':call_me_hand_dark_skin_tone:': u'\U0001F919\U0001F3FF',
u':call_me_hand_light_skin_tone:': u'\U0001F919\U0001F3FB',
u':call_me_hand_medium-dark_skin_tone:': u'\U0001F919\U0001F3FE',
u':call_me_hand_medium-light_skin_tone:': u'\U0001F919\U0001F3FC',
u':call_me_hand_medium_skin_tone:': u'\U0001F919\U0001F3FD',
u':camel:': u'\U0001F42A',
u':camera:': u'\U0001F4F7',
u':camera_with_flash:': u'\U0001F4F8',
u':camping:': u'\U0001F3D5',
u':camping_selector:': u'\U0001F3D5\U0000FE0F',
u':candle:': u'\U0001F56F',
u':candle_selector:': u'\U0001F56F\U0000FE0F',
u':candy:': u'\U0001F36C',
u':canned_food:': u'\U0001F96B',
u':canoe:': u'\U0001F6F6',
u':card_file_box:': u'\U0001F5C3',
u':card_file_box_selector:': u'\U0001F5C3\U0000FE0F',
u':card_index:': u'\U0001F4C7',
u':card_index_dividers:': u'\U0001F5C2',
u':card_index_dividers_selector:': u'\U0001F5C2\U0000FE0F',
u':carousel_horse:': u'\U0001F3A0',
u':carp_streamer:': u'\U0001F38F',
u':carrot:': u'\U0001F955',
u':castle:': u'\U0001F3F0',
u':cat:': u'\U0001F408',
u':cat_face:': u'\U0001F431',
u':cat_face_with_tears_of_joy:': u'\U0001F639',
u':cat_face_with_wry_smile:': u'\U0001F63C',
u':chains:': u'\U000026D3',
u':chains_selector:': u'\U000026D3\U0000FE0F',
u':chair:': u'\U0001FA91',
u':chart_decreasing:': u'\U0001F4C9',
u':chart_increasing:': u'\U0001F4C8',
u':chart_increasing_with_yen:': u'\U0001F4B9',
u':check_box_with_check:': u'\U00002611\U0000FE0F',
u':check_mark:': u'\U00002714\U0000FE0F',
u':cheese_wedge:': u'\U0001F9C0',
u':chequered_flag:': u'\U0001F3C1',
u':cherries:': u'\U0001F352',
u':cherry_blossom:': u'\U0001F338',
u':chess_pawn:': u'\U0000265F',
u':chess_pawn_selector:': u'\U0000265F\U0000FE0F',
u':chestnut:': u'\U0001F330',
u':chicken:': u'\U0001F414',
u':child:': u'\U0001F9D2',
u':child_dark_skin_tone:': u'\U0001F9D2\U0001F3FF',
u':child_light_skin_tone:': u'\U0001F9D2\U0001F3FB',
u':child_medium-dark_skin_tone:': u'\U0001F9D2\U0001F3FE',
u':child_medium-light_skin_tone:': u'\U0001F9D2\U0001F3FC',
u':child_medium_skin_tone:': u'\U0001F9D2\U0001F3FD',
u':children_crossing:': u'\U0001F6B8',
u':chipmunk:': u'\U0001F43F',
u':chipmunk_selector:': u'\U0001F43F\U0000FE0F',
u':chocolate_bar:': u'\U0001F36B',
u':chopsticks:': u'\U0001F962',
u':church:': u'\U000026EA',
u':cigarette:': u'\U0001F6AC',
u':cinema:': u'\U0001F3A6',
u':circled_M:': u'\U000024C2',
u':circled_M_selector:': u'\U000024C2\U0000FE0F',
u':circus_tent:': u'\U0001F3AA',
u':cityscape:': u'\U0001F3D9',
u':cityscape_at_dusk:': u'\U0001F306',
u':cityscape_selector:': u'\U0001F3D9\U0000FE0F',
u':clamp:': u'\U0001F5DC',
u':clamp_selector:': u'\U0001F5DC\U0000FE0F',
u':clapper_board:': u'\U0001F3AC',
u':clapping_hands:': u'\U0001F44F',
u':clapping_hands_dark_skin_tone:': u'\U0001F44F\U0001F3FF',
u':clapping_hands_light_skin_tone:': u'\U0001F44F\U0001F3FB',
u':clapping_hands_medium-dark_skin_tone:': u'\U0001F44F\U0001F3FE',
u':clapping_hands_medium-light_skin_tone:': u'\U0001F44F\U0001F3FC',
u':clapping_hands_medium_skin_tone:': u'\U0001F44F\U0001F3FD',
u':classical_building:': u'\U0001F3DB',
u':classical_building_selector:': u'\U0001F3DB\U0000FE0F',
u':clinking_beer_mugs:': u'\U0001F37B',
u':clinking_glasses:': u'\U0001F942',
u':clipboard:': u'\U0001F4CB',
u':clockwise_vertical_arrows:': u'\U0001F503',
u':closed_book:': u'\U0001F4D5',
u':closed_mailbox_with_lowered_flag:': u'\U0001F4EA',
u':closed_mailbox_with_raised_flag:': u'\U0001F4EB',
u':closed_umbrella:': u'\U0001F302',
u':cloud:': u'\U00002601',
u':cloud_selector:': u'\U00002601\U0000FE0F',
u':cloud_with_lightning:': u'\U0001F329',
u':cloud_with_lightning_and_rain:': u'\U000026C8',
u':cloud_with_lightning_and_rain_selector:': u'\U000026C8\U0000FE0F',
u':cloud_with_lightning_selector:': u'\U0001F329\U0000FE0F',
u':cloud_with_rain:': u'\U0001F327',
u':cloud_with_rain_selector:': u'\U0001F327\U0000FE0F',
u':cloud_with_snow:': u'\U0001F328',
u':cloud_with_snow_selector:': u'\U0001F328\U0000FE0F',
u':clown_face:': u'\U0001F921',
u':club_suit:': u'\U00002663',
u':club_suit_selector:': u'\U00002663\U0000FE0F',
u':clutch_bag:': u'\U0001F45D',
u':coat:': u'\U0001F9E5',
u':cocktail_glass:': u'\U0001F378',
u':coconut:': u'\U0001F965',
u':coffin:': u'\U000026B0',
u':coffin_selector:': u'\U000026B0\U0000FE0F',
u':cold_face:': u'\U0001F976',
u':collision:': u'\U0001F4A5',
u':comet:': u'\U00002604',
u':comet_selector:': u'\U00002604\U0000FE0F',
u':compass:': u'\U0001F9ED',
u':computer_disk:': u'\U0001F4BD',
u':computer_mouse:': u'\U0001F5B1',
u':computer_mouse_selector:': u'\U0001F5B1\U0000FE0F',
u':confetti_ball:': u'\U0001F38A',
u':confounded_face:': u'\U0001F616',
u':confused_face:': u'\U0001F615',
u':construction:': u'\U0001F6A7',
u':construction_worker:': u'\U0001F477',
u':construction_worker_dark_skin_tone:': u'\U0001F477\U0001F3FF',
u':construction_worker_light_skin_tone:': u'\U0001F477\U0001F3FB',
u':construction_worker_medium-dark_skin_tone:': u'\U0001F477\U0001F3FE',
u':construction_worker_medium-light_skin_tone:': u'\U0001F477\U0001F3FC',
u':construction_worker_medium_skin_tone:': u'\U0001F477\U0001F3FD',
u':control_knobs:': u'\U0001F39B',
u':control_knobs_selector:': u'\U0001F39B\U0000FE0F',
u':convenience_store:': u'\U0001F3EA',
u':cooked_rice:': u'\U0001F35A',
u':cookie:': u'\U0001F36A',
u':cooking:': u'\U0001F373',
u':copyright:': u'\U000000A9',
u':copyright_selector:': u'\U000000A9\U0000FE0F',
u':couch_and_lamp:': u'\U0001F6CB',
u':couch_and_lamp_selector:': u'\U0001F6CB\U0000FE0F',
u':counterclockwise_arrows_button:': u'\U0001F504',
u':couple_with_heart-man-man:': u'\U0001F468\U0000200D\U00002764\U0000200D\U0001F468',
u':couple_with_heart-woman-man:': u'\U0001F469\U0000200D\U00002764\U0000200D\U0001F468',
u':couple_with_heart-woman-woman:': u'\U0001F469\U0000200D\U00002764\U0000200D\U0001F469',
u':couple_with_heart:': u'\U0001F491',
u':couple_with_heart_man_man:': u'\U0001F468\U0000200D\U00002764\U0000FE0F\U0000200D\U0001F468',
u':couple_with_heart_woman_man:': u'\U0001F469\U0000200D\U00002764\U0000FE0F\U0000200D\U0001F468',
u':couple_with_heart_woman_woman:': u'\U0001F469\U0000200D\U00002764\U0000FE0F\U0000200D\U0001F469',
u':cow:': u'\U0001F404',
u':cow_face:': u'\U0001F42E',
u':cowboy_hat_face:': u'\U0001F920',
u':crab:': u'\U0001F980',
u':crayon:': u'\U0001F58D',
u':crayon_selector:': u'\U0001F58D\U0000FE0F',
u':credit_card:': u'\U0001F4B3',
u':crescent_moon:': u'\U0001F319',
u':cricket:': u'\U0001F997',
u':cricket_game:': u'\U0001F3CF',
u':crocodile:': u'\U0001F40A',
u':croissant:': u'\U0001F950',
u':cross_mark:': u'\U0000274C',
u':cross_mark_button:': u'\U0000274E',
u':crossed_fingers:': u'\U0001F91E',
u':crossed_fingers_dark_skin_tone:': u'\U0001F91E\U0001F3FF',
u':crossed_fingers_light_skin_tone:': u'\U0001F91E\U0001F3FB',
u':crossed_fingers_medium-dark_skin_tone:': u'\U0001F91E\U0001F3FE',
u':crossed_fingers_medium-light_skin_tone:': u'\U0001F91E\U0001F3FC',
u':crossed_fingers_medium_skin_tone:': u'\U0001F91E\U0001F3FD',
u':crossed_flags:': u'\U0001F38C',
u':crossed_swords:': u'\U00002694',
u':crossed_swords_selector:': u'\U00002694\U0000FE0F',
u':crown:': u'\U0001F451',
u':crying_cat_face:': u'\U0001F63F',
u':crying_face:': u'\U0001F622',
u':crystal_ball:': u'\U0001F52E',
u':cucumber:': u'\U0001F952',
u':cup_with_straw:': u'\U0001F964',
u':cupcake:': u'\U0001F9C1',
u':curling_stone:': u'\U0001F94C',
u':curly-haired_man:': u'\U0001F468\U0000200D\U0001F9B1',
u':curly-haired_woman:': u'\U0001F469\U0000200D\U0001F9B1',
u':curly_hair:': u'\U0001F9B1',
u':curly_loop:': u'\U000027B0',
u':currency_exchange:': u'\U0001F4B1',
u':curry_rice:': u'\U0001F35B',
u':custard:': u'\U0001F36E',
u':customs:': u'\U0001F6C3',
u':cut_of_meat:': u'\U0001F969',
u':cyclone:': u'\U0001F300',
u':dagger:': u'\U0001F5E1',
u':dagger_selector:': u'\U0001F5E1\U0000FE0F',
u':dango:': u'\U0001F361',
u':dark_skin_tone:': u'\U0001F3FF',
u':dashing_away:': u'\U0001F4A8',
u':deaf_man-dark_skin_tone:': u'\U0001F9CF\U0001F3FF\U0000200D\U00002642',
u':deaf_man-dark_skin_tone_selector:': u'\U0001F9CF\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':deaf_man-light_skin_tone:': u'\U0001F9CF\U0001F3FB\U0000200D\U00002642',
u':deaf_man-light_skin_tone_selector:': u'\U0001F9CF\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':deaf_man-medium-dark_skin_tone:': u'\U0001F9CF\U0001F3FE\U0000200D\U00002642',
u':deaf_man-medium-dark_skin_tone_selector:': u'\U0001F9CF\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':deaf_man-medium-light_skin_tone:': u'\U0001F9CF\U0001F3FC\U0000200D\U00002642',
u':deaf_man-medium-light_skin_tone_selector:': u'\U0001F9CF\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':deaf_man-medium_skin_tone:': u'\U0001F9CF\U0001F3FD\U0000200D\U00002642',
u':deaf_man-medium_skin_tone_selector:': u'\U0001F9CF\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':deaf_man:': u'\U0001F9CF\U0000200D\U00002642',
u':deaf_man_selector:': u'\U0001F9CF\U0000200D\U00002642\U0000FE0F',
u':deaf_person-dark_skin_tone:': u'\U0001F9CF\U0001F3FF',
u':deaf_person-light_skin_tone:': u'\U0001F9CF\U0001F3FB',
u':deaf_person-medium-dark_skin_tone:': u'\U0001F9CF\U0001F3FE',
u':deaf_person-medium-light_skin_tone:': u'\U0001F9CF\U0001F3FC',
u':deaf_person-medium_skin_tone:': u'\U0001F9CF\U0001F3FD',
u':deaf_person:': u'\U0001F9CF',
u':deaf_woman-dark_skin_tone:': u'\U0001F9CF\U0001F3FF\U0000200D\U00002640',
u':deaf_woman-dark_skin_tone_selector:': u'\U0001F9CF\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':deaf_woman-light_skin_tone:': u'\U0001F9CF\U0001F3FB\U0000200D\U00002640',
u':deaf_woman-light_skin_tone_selector:': u'\U0001F9CF\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':deaf_woman-medium-dark_skin_tone:': u'\U0001F9CF\U0001F3FE\U0000200D\U00002640',
u':deaf_woman-medium-dark_skin_tone_selector:': u'\U0001F9CF\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':deaf_woman-medium-light_skin_tone:': u'\U0001F9CF\U0001F3FC\U0000200D\U00002640',
u':deaf_woman-medium-light_skin_tone_selector:': u'\U0001F9CF\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':deaf_woman-medium_skin_tone:': u'\U0001F9CF\U0001F3FD\U0000200D\U00002640',
u':deaf_woman-medium_skin_tone_selector:': u'\U0001F9CF\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':deaf_woman:': u'\U0001F9CF\U0000200D\U00002640',
u':deaf_woman_selector:': u'\U0001F9CF\U0000200D\U00002640\U0000FE0F',
u':deciduous_tree:': u'\U0001F333',
u':deer:': u'\U0001F98C',
u':delivery_truck:': u'\U0001F69A',
u':department_store:': u'\U0001F3EC',
u':derelict_house:': u'\U0001F3DA',
u':derelict_house_selector:': u'\U0001F3DA\U0000FE0F',
u':desert:': u'\U0001F3DC',
u':desert_island:': u'\U0001F3DD',
u':desert_island_selector:': u'\U0001F3DD\U0000FE0F',
u':desert_selector:': u'\U0001F3DC\U0000FE0F',
u':desktop_computer:': u'\U0001F5A5',
u':desktop_computer_selector:': u'\U0001F5A5\U0000FE0F',
u':detective:': u'\U0001F575',
u':detective_dark_skin_tone:': u'\U0001F575\U0001F3FF',
u':detective_light_skin_tone:': u'\U0001F575\U0001F3FB',
u':detective_medium-dark_skin_tone:': u'\U0001F575\U0001F3FE',
u':detective_medium-light_skin_tone:': u'\U0001F575\U0001F3FC',
u':detective_medium_skin_tone:': u'\U0001F575\U0001F3FD',
u':detective_selector:': u'\U0001F575\U0000FE0F',
u':diamond_suit:': u'\U00002666',
u':diamond_suit_selector:': u'\U00002666\U0000FE0F',
u':diamond_with_a_dot:': u'\U0001F4A0',
u':dim_button:': u'\U0001F505',
u':direct_hit:': u'\U0001F3AF',
u':disappointed_face:': u'\U0001F61E',
u':diving_mask:': u'\U0001F93F',
u':diya_lamp:': u'\U0001FA94',
u':dizzy:': u'\U0001F4AB',
u':dizzy_face:': u'\U0001F635',
u':dna:': u'\U0001F9EC',
u':dog:': u'\U0001F415',
u':dog_face:': u'\U0001F436',
u':dollar_banknote:': u'\U0001F4B5',
u':dolphin:': u'\U0001F42C',
u':door:': u'\U0001F6AA',
u':dotted_six-pointed_star:': u'\U0001F52F',
u':double_curly_loop:': u'\U000027BF',
u':double_exclamation_mark:': u'\U0000203C',
u':double_exclamation_mark_selector:': u'\U0000203C\U0000FE0F',
u':doughnut:': u'\U0001F369',
u':dove:': u'\U0001F54A',
u':dove_selector:': u'\U0001F54A\U0000FE0F',
u':down-left_arrow:': u'\U00002199',
u':down-left_arrow_selector:': u'\U00002199\U0000FE0F',
u':down-right_arrow:': u'\U00002198',
u':down-right_arrow_selector:': u'\U00002198\U0000FE0F',
u':down_arrow:': u'\U00002B07',
u':down_arrow_selector:': u'\U00002B07\U0000FE0F',
u':downcast_face_with_sweat:': u'\U0001F613',
u':downwards_button:': u'\U0001F53D',
u':dragon:': u'\U0001F409',
u':dragon_face:': u'\U0001F432',
u':dress:': u'\U0001F457',
u':drooling_face:': u'\U0001F924',
u':drop_of_blood:': u'\U0001FA78',
u':droplet:': u'\U0001F4A7',
u':drum:': u'\U0001F941',
u':duck:': u'\U0001F986',
u':dumpling:': u'\U0001F95F',
u':dvd:': u'\U0001F4C0',
u':e-mail:': u'\U0001F4E7',
u':eagle:': u'\U0001F985',
u':ear:': u'\U0001F442',
u':ear_dark_skin_tone:': u'\U0001F442\U0001F3FF',
u':ear_light_skin_tone:': u'\U0001F442\U0001F3FB',
u':ear_medium-dark_skin_tone:': u'\U0001F442\U0001F3FE',
u':ear_medium-light_skin_tone:': u'\U0001F442\U0001F3FC',
u':ear_medium_skin_tone:': u'\U0001F442\U0001F3FD',
u':ear_of_corn:': u'\U0001F33D',
u':ear_with_hearing_aid-dark_skin_tone:': u'\U0001F9BB\U0001F3FF',
u':ear_with_hearing_aid-light_skin_tone:': u'\U0001F9BB\U0001F3FB',
u':ear_with_hearing_aid-medium-dark_skin_tone:': u'\U0001F9BB\U0001F3FE',
u':ear_with_hearing_aid-medium-light_skin_tone:': u'\U0001F9BB\U0001F3FC',
u':ear_with_hearing_aid-medium_skin_tone:': u'\U0001F9BB\U0001F3FD',
u':ear_with_hearing_aid:': u'\U0001F9BB',
u':egg:': u'\U0001F95A',
u':eggplant:': u'\U0001F346',
u':eight-pointed_star:': u'\U00002734',
u':eight-pointed_star_selector:': u'\U00002734\U0000FE0F',
u':eight-spoked_asterisk:': u'\U00002733',
u':eight-spoked_asterisk_selector:': u'\U00002733\U0000FE0F',
u':eight-thirty:': u'\U0001F563',
u':eight_o\u2019clock:': u'\U0001F557',
u':eject_button:': u'\U000023CF',
u':eject_button_selector:': u'\U000023CF\U0000FE0F',
u':electric_plug:': u'\U0001F50C',
u':elephant:': u'\U0001F418',
u':eleven-thirty:': u'\U0001F566',
u':eleven_o\u2019clock:': u'\U0001F55A',
u':elf:': u'\U0001F9DD',
u':elf_dark_skin_tone:': u'\U0001F9DD\U0001F3FF',
u':elf_light_skin_tone:': u'\U0001F9DD\U0001F3FB',
u':elf_medium-dark_skin_tone:': u'\U0001F9DD\U0001F3FE',
u':elf_medium-light_skin_tone:': u'\U0001F9DD\U0001F3FC',
u':elf_medium_skin_tone:': u'\U0001F9DD\U0001F3FD',
u':envelope:': u'\U00002709',
u':envelope_selector:': u'\U00002709\U0000FE0F',
u':envelope_with_arrow:': u'\U0001F4E9',
u':euro_banknote:': u'\U0001F4B6',
u':evergreen_tree:': u'\U0001F332',
u':ewe:': u'\U0001F411',
u':exclamation_mark:': u'\U00002757',
u':exclamation_question_mark:': u'\U00002049',
u':exclamation_question_mark_selector:': u'\U00002049\U0000FE0F',
u':exploding_head:': u'\U0001F92F',
u':expressionless_face:': u'\U0001F611',
u':eye:': u'\U0001F441',
u':eye_in_speech_bubble:': u'\U0001F441\U0000200D\U0001F5E8',
u':eye_in_speech_bubble_2:': u'\U0001F441\U0000200D\U0001F5E8\U0000FE0F',
u':eye_in_speech_bubble_3:': u'\U0001F441\U0000FE0F\U0000200D\U0001F5E8\U0000FE0F',
u':eye_in_speech_bubble_selector:': u'\U0001F441\U0000FE0F\U0000200D\U0001F5E8',
u':eye_selector:': u'\U0001F441\U0000FE0F',
u':eyes:': u'\U0001F440',
u':face_blowing_a_kiss:': u'\U0001F618',
u':face_savoring_food:': u'\U0001F60B',
u':face_screaming_in_fear:': u'\U0001F631',
u':face_vomiting:': u'\U0001F92E',
u':face_with_hand_over_mouth:': u'\U0001F92D',
u':face_with_head-bandage:': u'\U0001F915',
u':face_with_medical_mask:': u'\U0001F637',
u':face_with_monocle:': u'\U0001F9D0',
u':face_with_open_mouth:': u'\U0001F62E',
u':face_with_raised_eyebrow:': u'\U0001F928',
u':face_with_rolling_eyes:': u'\U0001F644',
u':face_with_steam_from_nose:': u'\U0001F624',
u':face_with_symbols_on_mouth:': u'\U0001F92C',
u':face_with_tears_of_joy:': u'\U0001F602',
u':face_with_thermometer:': u'\U0001F912',
u':face_with_tongue:': u'\U0001F61B',
u':face_without_mouth:': u'\U0001F636',
u':factory:': u'\U0001F3ED',
u':fairy:': u'\U0001F9DA',
u':fairy_dark_skin_tone:': u'\U0001F9DA\U0001F3FF',
u':fairy_light_skin_tone:': u'\U0001F9DA\U0001F3FB',
u':fairy_medium-dark_skin_tone:': u'\U0001F9DA\U0001F3FE',
u':fairy_medium-light_skin_tone:': u'\U0001F9DA\U0001F3FC',
u':fairy_medium_skin_tone:': u'\U0001F9DA\U0001F3FD',
u':falafel:': u'\U0001F9C6',
u':fallen_leaf:': u'\U0001F342',
u':family:': u'\U0001F46A',
u':family_man_boy:': u'\U0001F468\U0000200D\U0001F466',
u':family_man_boy_boy:': u'\U0001F468\U0000200D\U0001F466\U0000200D\U0001F466',
u':family_man_girl:': u'\U0001F468\U0000200D\U0001F467',
u':family_man_girl_boy:': u'\U0001F468\U0000200D\U0001F467\U0000200D\U0001F466',
u':family_man_girl_girl:': u'\U0001F468\U0000200D\U0001F467\U0000200D\U0001F467',
u':family_man_man_boy:': u'\U0001F468\U0000200D\U0001F468\U0000200D\U0001F466',
u':family_man_man_boy_boy:': u'\U0001F468\U0000200D\U0001F468\U0000200D\U0001F466\U0000200D\U0001F466',
u':family_man_man_girl:': u'\U0001F468\U0000200D\U0001F468\U0000200D\U0001F467',
u':family_man_man_girl_boy:': u'\U0001F468\U0000200D\U0001F468\U0000200D\U0001F467\U0000200D\U0001F466',
u':family_man_man_girl_girl:': u'\U0001F468\U0000200D\U0001F468\U0000200D\U0001F467\U0000200D\U0001F467',
u':family_man_woman_boy:': u'\U0001F468\U0000200D\U0001F469\U0000200D\U0001F466',
u':family_man_woman_boy_boy:': u'\U0001F468\U0000200D\U0001F469\U0000200D\U0001F466\U0000200D\U0001F466',
u':family_man_woman_girl:': u'\U0001F468\U0000200D\U0001F469\U0000200D\U0001F467',
u':family_man_woman_girl_boy:': u'\U0001F468\U0000200D\U0001F469\U0000200D\U0001F467\U0000200D\U0001F466',
u':family_man_woman_girl_girl:': u'\U0001F468\U0000200D\U0001F469\U0000200D\U0001F467\U0000200D\U0001F467',
u':family_woman_boy:': u'\U0001F469\U0000200D\U0001F466',
u':family_woman_boy_boy:': u'\U0001F469\U0000200D\U0001F466\U0000200D\U0001F466',
u':family_woman_girl:': u'\U0001F469\U0000200D\U0001F467',
u':family_woman_girl_boy:': u'\U0001F469\U0000200D\U0001F467\U0000200D\U0001F466',
u':family_woman_girl_girl:': u'\U0001F469\U0000200D\U0001F467\U0000200D\U0001F467',
u':family_woman_woman_boy:': u'\U0001F469\U0000200D\U0001F469\U0000200D\U0001F466',
u':family_woman_woman_boy_boy:': u'\U0001F469\U0000200D\U0001F469\U0000200D\U0001F466\U0000200D\U0001F466',
u':family_woman_woman_girl:': u'\U0001F469\U0000200D\U0001F469\U0000200D\U0001F467',
u':family_woman_woman_girl_boy:': u'\U0001F469\U0000200D\U0001F469\U0000200D\U0001F467\U0000200D\U0001F466',
u':family_woman_woman_girl_girl:': u'\U0001F469\U0000200D\U0001F469\U0000200D\U0001F467\U0000200D\U0001F467',
u':fast-forward_button:': u'\U000023E9',
u':fast_down_button:': u'\U000023EC',
u':fast_reverse_button:': u'\U000023EA',
u':fast_up_button:': u'\U000023EB',
u':fax_machine:': u'\U0001F4E0',
u':fearful_face:': u'\U0001F628',
u':female_sign:': u'\U00002640',
u':female_sign_selector:': u'\U00002640\U0000FE0F',
u':ferris_wheel:': u'\U0001F3A1',
u':ferry:': u'\U000026F4',
u':ferry_selector:': u'\U000026F4\U0000FE0F',
u':field_hockey:': u'\U0001F3D1',
u':file_cabinet:': u'\U0001F5C4',
u':file_cabinet_selector:': u'\U0001F5C4\U0000FE0F',
u':file_folder:': u'\U0001F4C1',
u':film_frames:': u'\U0001F39E',
u':film_frames_selector:': u'\U0001F39E\U0000FE0F',
u':film_projector:': u'\U0001F4FD',
u':film_projector_selector:': u'\U0001F4FD\U0000FE0F',
u':fire:': u'\U0001F525',
u':fire_engine:': u'\U0001F692',
u':fire_extinguisher:': u'\U0001F9EF',
u':firecracker:': u'\U0001F9E8',
u':fireworks:': u'\U0001F386',
u':first_quarter_moon:': u'\U0001F313',
u':first_quarter_moon_face:': u'\U0001F31B',
u':fish:': u'\U0001F41F',
u':fish_cake_with_swirl:': u'\U0001F365',
u':fishing_pole:': u'\U0001F3A3',
u':five-thirty:': u'\U0001F560',
u':five_o\u2019clock:': u'\U0001F554',
u':flag_in_hole:': u'\U000026F3',
u':flamingo:': u'\U0001F9A9',
u':flashlight:': u'\U0001F526',
u':flat_shoe:': u'\U0001F97F',
u':fleur-de-lis:': u'\U0000269C',
u':fleur-de-lis_selector:': u'\U0000269C\U0000FE0F',
u':flexed_biceps:': u'\U0001F4AA',
u':flexed_biceps_dark_skin_tone:': u'\U0001F4AA\U0001F3FF',
u':flexed_biceps_light_skin_tone:': u'\U0001F4AA\U0001F3FB',
u':flexed_biceps_medium-dark_skin_tone:': u'\U0001F4AA\U0001F3FE',
u':flexed_biceps_medium-light_skin_tone:': u'\U0001F4AA\U0001F3FC',
u':flexed_biceps_medium_skin_tone:': u'\U0001F4AA\U0001F3FD',
u':floppy_disk:': u'\U0001F4BE',
u':flower_playing_cards:': u'\U0001F3B4',
u':flushed_face:': u'\U0001F633',
u':flying_disc:': u'\U0001F94F',
u':flying_saucer:': u'\U0001F6F8',
u':fog:': u'\U0001F32B',
u':fog_selector:': u'\U0001F32B\U0000FE0F',
u':foggy:': u'\U0001F301',
u':folded_hands:': u'\U0001F64F',
u':folded_hands_dark_skin_tone:': u'\U0001F64F\U0001F3FF',
u':folded_hands_light_skin_tone:': u'\U0001F64F\U0001F3FB',
u':folded_hands_medium-dark_skin_tone:': u'\U0001F64F\U0001F3FE',
u':folded_hands_medium-light_skin_tone:': u'\U0001F64F\U0001F3FC',
u':folded_hands_medium_skin_tone:': u'\U0001F64F\U0001F3FD',
u':foot-dark_skin_tone:': u'\U0001F9B6\U0001F3FF',
u':foot-light_skin_tone:': u'\U0001F9B6\U0001F3FB',
u':foot-medium-dark_skin_tone:': u'\U0001F9B6\U0001F3FE',
u':foot-medium-light_skin_tone:': u'\U0001F9B6\U0001F3FC',
u':foot-medium_skin_tone:': u'\U0001F9B6\U0001F3FD',
u':foot:': u'\U0001F9B6',
u':footprints:': u'\U0001F463',
u':fork_and_knife:': u'\U0001F374',
u':fork_and_knife_with_plate:': u'\U0001F37D',
u':fork_and_knife_with_plate_selector:': u'\U0001F37D\U0000FE0F',
u':fortune_cookie:': u'\U0001F960',
u':fountain:': u'\U000026F2',
u':fountain_pen:': u'\U0001F58B',
u':fountain_pen_selector:': u'\U0001F58B\U0000FE0F',
u':four-thirty:': u'\U0001F55F',
u':four_leaf_clover:': u'\U0001F340',
u':four_o\u2019clock:': u'\U0001F553',
u':fox_face:': u'\U0001F98A',
u':framed_picture:': u'\U0001F5BC',
u':framed_picture_selector:': u'\U0001F5BC\U0000FE0F',
u':french_fries:': u'\U0001F35F',
u':fried_shrimp:': u'\U0001F364',
u':frog_face:': u'\U0001F438',
u':front-facing_baby_chick:': u'\U0001F425',
u':frowning_face:': u'\U00002639',
u':frowning_face_selector:': u'\U00002639\U0000FE0F',
u':frowning_face_with_open_mouth:': u'\U0001F626',
u':fuel_pump:': u'\U000026FD',
u':full_moon:': u'\U0001F315',
u':full_moon_face:': u'\U0001F31D',
u':funeral_urn:': u'\U000026B1',
u':funeral_urn_selector:': u'\U000026B1\U0000FE0F',
u':game_die:': u'\U0001F3B2',
u':garlic:': u'\U0001F9C4',
u':gear:': u'\U00002699',
u':gear_selector:': u'\U00002699\U0000FE0F',
u':gem_stone:': u'\U0001F48E',
u':genie:': u'\U0001F9DE',
u':ghost:': u'\U0001F47B',
u':giraffe:': u'\U0001F992',
u':girl:': u'\U0001F467',
u':girl_dark_skin_tone:': u'\U0001F467\U0001F3FF',
u':girl_light_skin_tone:': u'\U0001F467\U0001F3FB',
u':girl_medium-dark_skin_tone:': u'\U0001F467\U0001F3FE',
u':girl_medium-light_skin_tone:': u'\U0001F467\U0001F3FC',
u':girl_medium_skin_tone:': u'\U0001F467\U0001F3FD',
u':glass_of_milk:': u'\U0001F95B',
u':glasses:': u'\U0001F453',
u':globe_showing_Americas:': u'\U0001F30E',
u':globe_showing_Asia-Australia:': u'\U0001F30F',
u':globe_showing_Europe-Africa:': u'\U0001F30D',
u':globe_with_meridians:': u'\U0001F310',
u':gloves:': u'\U0001F9E4',
u':glowing_star:': u'\U0001F31F',
u':goal_net:': u'\U0001F945',
u':goat:': u'\U0001F410',
u':goblin:': u'\U0001F47A',
u':goggles:': u'\U0001F97D',
u':gorilla:': u'\U0001F98D',
u':graduation_cap:': u'\U0001F393',
u':grapes:': u'\U0001F347',
u':green_apple:': u'\U0001F34F',
u':green_book:': u'\U0001F4D7',
u':green_circle:': u'\U0001F7E2',
u':green_heart:': u'\U0001F49A',
u':green_salad:': u'\U0001F957',
u':green_square:': u'\U0001F7E9',
u':grimacing_face:': u'\U0001F62C',
u':grinning_cat_face:': u'\U0001F63A',
u':grinning_cat_face_with_smiling_eyes:': u'\U0001F638',
u':grinning_face:': u'\U0001F600',
u':grinning_face_with_big_eyes:': u'\U0001F603',
u':grinning_face_with_smiling_eyes:': u'\U0001F604',
u':grinning_face_with_sweat:': u'\U0001F605',
u':grinning_squinting_face:': u'\U0001F606',
u':growing_heart:': u'\U0001F497',
u':guard:': u'\U0001F482',
u':guard_dark_skin_tone:': u'\U0001F482\U0001F3FF',
u':guard_light_skin_tone:': u'\U0001F482\U0001F3FB',
u':guard_medium-dark_skin_tone:': u'\U0001F482\U0001F3FE',
u':guard_medium-light_skin_tone:': u'\U0001F482\U0001F3FC',
u':guard_medium_skin_tone:': u'\U0001F482\U0001F3FD',
u':guide_dog:': u'\U0001F9AE',
u':guitar:': u'\U0001F3B8',
u':hamburger:': u'\U0001F354',
u':hammer:': u'\U0001F528',
u':hammer_and_pick:': u'\U00002692',
u':hammer_and_pick_selector:': u'\U00002692\U0000FE0F',
u':hammer_and_wrench:': u'\U0001F6E0',
u':hammer_and_wrench_selector:': u'\U0001F6E0\U0000FE0F',
u':hamster_face:': u'\U0001F439',
u':hand_with_fingers_splayed:': u'\U0001F590',
u':hand_with_fingers_splayed_dark_skin_tone:': u'\U0001F590\U0001F3FF',
u':hand_with_fingers_splayed_light_skin_tone:': u'\U0001F590\U0001F3FB',
u':hand_with_fingers_splayed_medium-dark_skin_tone:': u'\U0001F590\U0001F3FE',
u':hand_with_fingers_splayed_medium-light_skin_tone:': u'\U0001F590\U0001F3FC',
u':hand_with_fingers_splayed_medium_skin_tone:': u'\U0001F590\U0001F3FD',
u':hand_with_fingers_splayed_selector:': u'\U0001F590\U0000FE0F',
u':handbag:': u'\U0001F45C',
u':handshake:': u'\U0001F91D',
u':hatching_chick:': u'\U0001F423',
u':headphone:': u'\U0001F3A7',
u':hear-no-evil_monkey:': u'\U0001F649',
u':heart_decoration:': u'\U0001F49F',
u':heart_exclamation:': u'\U00002763\U0000FE0F',
u':heart_suit:': u'\U00002665',
u':heart_suit_selector:': u'\U00002665\U0000FE0F',
u':heart_with_arrow:': u'\U0001F498',
u':heart_with_ribbon:': u'\U0001F49D',
u':heavy_check_mark:': u'\U00002714',
u':heavy_division_sign:': u'\U00002797',
u':heavy_dollar_sign:': u'\U0001F4B2',
u':heavy_heart_exclamation:': u'\U00002763',
u':heavy_large_circle:': u'\U00002B55',
u':heavy_minus_sign:': u'\U00002796',
u':heavy_multiplication_x:': u'\U00002716',
u':heavy_plus_sign:': u'\U00002795',
u':hedgehog:': u'\U0001F994',
u':helicopter:': u'\U0001F681',
u':herb:': u'\U0001F33F',
u':hibiscus:': u'\U0001F33A',
u':high-heeled_shoe:': u'\U0001F460',
u':high-speed_train:': u'\U0001F684',
u':high_voltage:': u'\U000026A1',
u':hiking_boot:': u'\U0001F97E',
u':hindu_temple:': u'\U0001F6D5',
u':hippopotamus:': u'\U0001F99B',
u':hole:': u'\U0001F573',
u':hole_selector:': u'\U0001F573\U0000FE0F',
u':honey_pot:': u'\U0001F36F',
u':honeybee:': u'\U0001F41D',
u':horizontal_traffic_light:': u'\U0001F6A5',
u':horse:': u'\U0001F40E',
u':horse_face:': u'\U0001F434',
u':horse_racing:': u'\U0001F3C7',
u':horse_racing_dark_skin_tone:': u'\U0001F3C7\U0001F3FF',
u':horse_racing_light_skin_tone:': u'\U0001F3C7\U0001F3FB',
u':horse_racing_medium-dark_skin_tone:': u'\U0001F3C7\U0001F3FE',
u':horse_racing_medium-light_skin_tone:': u'\U0001F3C7\U0001F3FC',
u':horse_racing_medium_skin_tone:': u'\U0001F3C7\U0001F3FD',
u':hospital:': u'\U0001F3E5',
u':hot_beverage:': u'\U00002615',
u':hot_dog:': u'\U0001F32D',
u':hot_face:': u'\U0001F975',
u':hot_pepper:': u'\U0001F336',
u':hot_pepper_selector:': u'\U0001F336\U0000FE0F',
u':hot_springs:': u'\U00002668',
u':hot_springs_selector:': u'\U00002668\U0000FE0F',
u':hotel:': u'\U0001F3E8',
u':hourglass_done:': u'\U0000231B',
u':hourglass_not_done:': u'\U000023F3',
u':house:': u'\U0001F3E0',
u':house_with_garden:': u'\U0001F3E1',
u':houses:': u'\U0001F3D8',
u':houses_selector:': u'\U0001F3D8\U0000FE0F',
u':hugging_face:': u'\U0001F917',
u':hundred_points:': u'\U0001F4AF',
u':hushed_face:': u'\U0001F62F',
u':ice:': u'\U0001F9CA',
u':ice_cream:': u'\U0001F368',
u':ice_hockey:': u'\U0001F3D2',
u':ice_skate:': u'\U000026F8',
u':ice_skate_selector:': u'\U000026F8\U0000FE0F',
u':inbox_tray:': u'\U0001F4E5',
u':incoming_envelope:': u'\U0001F4E8',
u':index_pointing_up:': u'\U0000261D',
u':index_pointing_up_dark_skin_tone:': u'\U0000261D\U0001F3FF',
u':index_pointing_up_light_skin_tone:': u'\U0000261D\U0001F3FB',
u':index_pointing_up_medium-dark_skin_tone:': u'\U0000261D\U0001F3FE',
u':index_pointing_up_medium-light_skin_tone:': u'\U0000261D\U0001F3FC',
u':index_pointing_up_medium_skin_tone:': u'\U0000261D\U0001F3FD',
u':index_pointing_up_selector:': u'\U0000261D\U0000FE0F',
u':infinity:': u'\U0000267E',
u':infinity_selector:': u'\U0000267E\U0000FE0F',
u':information:': u'\U00002139',
u':information_selector:': u'\U00002139\U0000FE0F',
u':input_latin_letters:': u'\U0001F524',
u':input_latin_lowercase:': u'\U0001F521',
u':input_latin_uppercase:': u'\U0001F520',
u':input_numbers:': u'\U0001F522',
u':input_symbols:': u'\U0001F523',
u':jack-o-lantern:': u'\U0001F383',
u':jeans:': u'\U0001F456',
u':jigsaw:': u'\U0001F9E9',
u':joker:': u'\U0001F0CF',
u':joystick:': u'\U0001F579',
u':joystick_selector:': u'\U0001F579\U0000FE0F',
u':kaaba:': u'\U0001F54B',
u':kangaroo:': u'\U0001F998',
u':key:': u'\U0001F511',
u':keyboard:': u'\U00002328',
u':keyboard_selector:': u'\U00002328\U0000FE0F',
u':keycap:': u'\U00000023\U000020E3',
u':keycap_#:': u'\U00000023\U0000FE0F\U000020E3',
u':keycap_*:': u'\U0000002A\U0000FE0F\U000020E3',
u':keycap_0:': u'\U00000030\U0000FE0F\U000020E3',
u':keycap_10:': u'\U0001F51F',
u':keycap_1:': u'\U00000031\U0000FE0F\U000020E3',
u':keycap_2:': u'\U00000032\U0000FE0F\U000020E3',
u':keycap_3:': u'\U00000033\U0000FE0F\U000020E3',
u':keycap_4:': u'\U00000034\U0000FE0F\U000020E3',
u':keycap_5:': u'\U00000035\U0000FE0F\U000020E3',
u':keycap_6:': u'\U00000036\U0000FE0F\U000020E3',
u':keycap_7:': u'\U00000037\U0000FE0F\U000020E3',
u':keycap_8:': u'\U00000038\U0000FE0F\U000020E3',
u':keycap_9:': u'\U00000039\U0000FE0F\U000020E3',
u':keycap_asterisk:': u'\U0000002A\U000020E3',
u':keycap_digit_eight:': u'\U00000038\U000020E3',
u':keycap_digit_five:': u'\U00000035\U000020E3',
u':keycap_digit_four:': u'\U00000034\U000020E3',
u':keycap_digit_nine:': u'\U00000039\U000020E3',
u':keycap_digit_one:': u'\U00000031\U000020E3',
u':keycap_digit_seven:': u'\U00000037\U000020E3',
u':keycap_digit_six:': u'\U00000036\U000020E3',
u':keycap_digit_three:': u'\U00000033\U000020E3',
u':keycap_digit_two:': u'\U00000032\U000020E3',
u':keycap_digit_zero:': u'\U00000030\U000020E3',
u':kick_scooter:': u'\U0001F6F4',
u':kimono:': u'\U0001F458',
u':kiss-man-man:': u'\U0001F468\U0000200D\U00002764\U0000200D\U0001F48B\U0000200D\U0001F468',
u':kiss-woman-man:': u'\U0001F469\U0000200D\U00002764\U0000200D\U0001F48B\U0000200D\U0001F468',
u':kiss-woman-woman:': u'\U0001F469\U0000200D\U00002764\U0000200D\U0001F48B\U0000200D\U0001F469',
u':kiss:': u'\U0001F48F',
u':kiss_man_man:': u'\U0001F468\U0000200D\U00002764\U0000FE0F\U0000200D\U0001F48B\U0000200D\U0001F468',
u':kiss_mark:': u'\U0001F48B',
u':kiss_woman_man:': u'\U0001F469\U0000200D\U00002764\U0000FE0F\U0000200D\U0001F48B\U0000200D\U0001F468',
u':kiss_woman_woman:': u'\U0001F469\U0000200D\U00002764\U0000FE0F\U0000200D\U0001F48B\U0000200D\U0001F469',
u':kissing_cat_face:': u'\U0001F63D',
u':kissing_face:': u'\U0001F617',
u':kissing_face_with_closed_eyes:': u'\U0001F61A',
u':kissing_face_with_smiling_eyes:': u'\U0001F619',
u':kitchen_knife:': u'\U0001F52A',
u':kite:': u'\U0001FA81',
u':kiwi_fruit:': u'\U0001F95D',
u':koala:': u'\U0001F428',
u':lab_coat:': u'\U0001F97C',
u':label:': u'\U0001F3F7',
u':label_selector:': u'\U0001F3F7\U0000FE0F',
u':lacrosse:': u'\U0001F94D',
u':lady_beetle:': u'\U0001F41E',
u':laptop_computer:': u'\U0001F4BB',
u':large_blue_diamond:': u'\U0001F537',
u':large_orange_diamond:': u'\U0001F536',
u':last_quarter_moon:': u'\U0001F317',
u':last_quarter_moon_face:': u'\U0001F31C',
u':last_track_button:': u'\U000023EE',
u':last_track_button_selector:': u'\U000023EE\U0000FE0F',
u':latin_cross:': u'\U0000271D',
u':latin_cross_selector:': u'\U0000271D\U0000FE0F',
u':leaf_fluttering_in_wind:': u'\U0001F343',
u':leafy_green:': u'\U0001F96C',
u':ledger:': u'\U0001F4D2',
u':left-facing_fist:': u'\U0001F91B',
u':left-facing_fist_dark_skin_tone:': u'\U0001F91B\U0001F3FF',
u':left-facing_fist_light_skin_tone:': u'\U0001F91B\U0001F3FB',
u':left-facing_fist_medium-dark_skin_tone:': u'\U0001F91B\U0001F3FE',
u':left-facing_fist_medium-light_skin_tone:': u'\U0001F91B\U0001F3FC',
u':left-facing_fist_medium_skin_tone:': u'\U0001F91B\U0001F3FD',
u':left-right_arrow:': u'\U00002194',
u':left-right_arrow_selector:': u'\U00002194\U0000FE0F',
u':left_arrow:': u'\U00002B05',
u':left_arrow_curving_right:': u'\U000021AA',
u':left_arrow_curving_right_selector:': u'\U000021AA\U0000FE0F',
u':left_arrow_selector:': u'\U00002B05\U0000FE0F',
u':left_luggage:': u'\U0001F6C5',
u':left_speech_bubble:': u'\U0001F5E8',
u':left_speech_bubble_selector:': u'\U0001F5E8\U0000FE0F',
u':leg-dark_skin_tone:': u'\U0001F9B5\U0001F3FF',
u':leg-light_skin_tone:': u'\U0001F9B5\U0001F3FB',
u':leg-medium-dark_skin_tone:': u'\U0001F9B5\U0001F3FE',
u':leg-medium-light_skin_tone:': u'\U0001F9B5\U0001F3FC',
u':leg-medium_skin_tone:': u'\U0001F9B5\U0001F3FD',
u':leg:': u'\U0001F9B5',
u':lemon:': u'\U0001F34B',
u':leopard:': u'\U0001F406',
u':level_slider:': u'\U0001F39A',
u':level_slider_selector:': u'\U0001F39A\U0000FE0F',
u':light_bulb:': u'\U0001F4A1',
u':light_rail:': u'\U0001F688',
u':light_skin_tone:': u'\U0001F3FB',
u':link:': u'\U0001F517',
u':linked_paperclips:': u'\U0001F587',
u':linked_paperclips_selector:': u'\U0001F587\U0000FE0F',
u':lion_face:': u'\U0001F981',
u':lipstick:': u'\U0001F484',
u':litter_in_bin_sign:': u'\U0001F6AE',
u':lizard:': u'\U0001F98E',
u':llama:': u'\U0001F999',
u':lobster:': u'\U0001F99E',
u':locked:': u'\U0001F512',
u':locked_with_key:': u'\U0001F510',
u':locked_with_pen:': u'\U0001F50F',
u':locomotive:': u'\U0001F682',
u':lollipop:': u'\U0001F36D',
u':lotion_bottle:': u'\U0001F9F4',
u':loudly_crying_face:': u'\U0001F62D',
u':loudspeaker:': u'\U0001F4E2',
u':love-you_gesture:': u'\U0001F91F',
u':love-you_gesture_dark_skin_tone:': u'\U0001F91F\U0001F3FF',
u':love-you_gesture_light_skin_tone:': u'\U0001F91F\U0001F3FB',
u':love-you_gesture_medium-dark_skin_tone:': u'\U0001F91F\U0001F3FE',
u':love-you_gesture_medium-light_skin_tone:': u'\U0001F91F\U0001F3FC',
u':love-you_gesture_medium_skin_tone:': u'\U0001F91F\U0001F3FD',
u':love_hotel:': u'\U0001F3E9',
u':love_letter:': u'\U0001F48C',
u':luggage:': u'\U0001F9F3',
u':lying_face:': u'\U0001F925',
u':mage:': u'\U0001F9D9',
u':mage_dark_skin_tone:': u'\U0001F9D9\U0001F3FF',
u':mage_light_skin_tone:': u'\U0001F9D9\U0001F3FB',
u':mage_medium-dark_skin_tone:': u'\U0001F9D9\U0001F3FE',
u':mage_medium-light_skin_tone:': u'\U0001F9D9\U0001F3FC',
u':mage_medium_skin_tone:': u'\U0001F9D9\U0001F3FD',
u':magnet:': u'\U0001F9F2',
u':magnifying_glass_tilted_left:': u'\U0001F50D',
u':magnifying_glass_tilted_right:': u'\U0001F50E',
u':mahjong_red_dragon:': u'\U0001F004',
u':male_sign:': u'\U00002642',
u':male_sign_selector:': u'\U00002642\U0000FE0F',
u':man-blond_hair:': u'\U0001F471\U0000200D\U00002642',
u':man-dark_skin_tone-bald:': u'\U0001F468\U0001F3FF\U0000200D\U0001F9B2',
u':man-dark_skin_tone-blond_hair:': u'\U0001F471\U0001F3FF\U0000200D\U00002642',
u':man-dark_skin_tone-curly_hair:': u'\U0001F468\U0001F3FF\U0000200D\U0001F9B1',
u':man-dark_skin_tone-red_hair:': u'\U0001F468\U0001F3FF\U0000200D\U0001F9B0',
u':man-dark_skin_tone-white_hair:': u'\U0001F468\U0001F3FF\U0000200D\U0001F9B3',
u':man-light_skin_tone-bald:': u'\U0001F468\U0001F3FB\U0000200D\U0001F9B2',
u':man-light_skin_tone-blond_hair:': u'\U0001F471\U0001F3FB\U0000200D\U00002642',
u':man-light_skin_tone-curly_hair:': u'\U0001F468\U0001F3FB\U0000200D\U0001F9B1',
u':man-light_skin_tone-red_hair:': u'\U0001F468\U0001F3FB\U0000200D\U0001F9B0',
u':man-light_skin_tone-white_hair:': u'\U0001F468\U0001F3FB\U0000200D\U0001F9B3',
u':man-medium-dark_skin_tone-bald:': u'\U0001F468\U0001F3FE\U0000200D\U0001F9B2',
u':man-medium-dark_skin_tone-blond_hair:': u'\U0001F471\U0001F3FE\U0000200D\U00002642',
u':man-medium-dark_skin_tone-curly_hair:': u'\U0001F468\U0001F3FE\U0000200D\U0001F9B1',
u':man-medium-dark_skin_tone-red_hair:': u'\U0001F468\U0001F3FE\U0000200D\U0001F9B0',
u':man-medium-dark_skin_tone-white_hair:': u'\U0001F468\U0001F3FE\U0000200D\U0001F9B3',
u':man-medium-light_skin_tone-bald:': u'\U0001F468\U0001F3FC\U0000200D\U0001F9B2',
u':man-medium-light_skin_tone-blond_hair:': u'\U0001F471\U0001F3FC\U0000200D\U00002642',
u':man-medium-light_skin_tone-curly_hair:': u'\U0001F468\U0001F3FC\U0000200D\U0001F9B1',
u':man-medium-light_skin_tone-red_hair:': u'\U0001F468\U0001F3FC\U0000200D\U0001F9B0',
u':man-medium-light_skin_tone-white_hair:': u'\U0001F468\U0001F3FC\U0000200D\U0001F9B3',
u':man-medium_skin_tone-bald:': u'\U0001F468\U0001F3FD\U0000200D\U0001F9B2',
u':man-medium_skin_tone-blond_hair:': u'\U0001F471\U0001F3FD\U0000200D\U00002642',
u':man-medium_skin_tone-curly_hair:': u'\U0001F468\U0001F3FD\U0000200D\U0001F9B1',
u':man-medium_skin_tone-red_hair:': u'\U0001F468\U0001F3FD\U0000200D\U0001F9B0',
u':man-medium_skin_tone-white_hair:': u'\U0001F468\U0001F3FD\U0000200D\U0001F9B3',
u':man:': u'\U0001F468',
u':man_and_woman_holding_hands:': u'\U0001F46B',
u':man_artist:': u'\U0001F468\U0000200D\U0001F3A8',
u':man_artist_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F3A8',
u':man_artist_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F3A8',
u':man_artist_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F3A8',
u':man_artist_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F3A8',
u':man_artist_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F3A8',
u':man_astronaut:': u'\U0001F468\U0000200D\U0001F680',
u':man_astronaut_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F680',
u':man_astronaut_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F680',
u':man_astronaut_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F680',
u':man_astronaut_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F680',
u':man_astronaut_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F680',
u':man_biking-dark_skin_tone:': u'\U0001F6B4\U0001F3FF\U0000200D\U00002642',
u':man_biking-light_skin_tone:': u'\U0001F6B4\U0001F3FB\U0000200D\U00002642',
u':man_biking-medium-dark_skin_tone:': u'\U0001F6B4\U0001F3FE\U0000200D\U00002642',
u':man_biking-medium-light_skin_tone:': u'\U0001F6B4\U0001F3FC\U0000200D\U00002642',
u':man_biking-medium_skin_tone:': u'\U0001F6B4\U0001F3FD\U0000200D\U00002642',
u':man_biking:': u'\U0001F6B4\U0000200D\U00002642',
u':man_biking_dark_skin_tone:': u'\U0001F6B4\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_biking_light_skin_tone:': u'\U0001F6B4\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_biking_medium-dark_skin_tone:': u'\U0001F6B4\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_biking_medium-light_skin_tone:': u'\U0001F6B4\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_biking_medium_skin_tone:': u'\U0001F6B4\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_biking_selector:': u'\U0001F6B4\U0000200D\U00002642\U0000FE0F',
u':man_bouncing_ball-dark_skin_tone:': u'\U000026F9\U0001F3FF\U0000200D\U00002642',
u':man_bouncing_ball-light_skin_tone:': u'\U000026F9\U0001F3FB\U0000200D\U00002642',
u':man_bouncing_ball-medium-dark_skin_tone:': u'\U000026F9\U0001F3FE\U0000200D\U00002642',
u':man_bouncing_ball-medium-light_skin_tone:': u'\U000026F9\U0001F3FC\U0000200D\U00002642',
u':man_bouncing_ball-medium_skin_tone:': u'\U000026F9\U0001F3FD\U0000200D\U00002642',
u':man_bouncing_ball:': u'\U000026F9\U0000200D\U00002642',
u':man_bouncing_ball_2:': u'\U000026F9\U0000FE0F\U0000200D\U00002642\U0000FE0F',
u':man_bouncing_ball_3:': u'\U000026F9\U0000200D\U00002642\U0000FE0F',
u':man_bouncing_ball_dark_skin_tone:': u'\U000026F9\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_bouncing_ball_light_skin_tone:': u'\U000026F9\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_bouncing_ball_medium-dark_skin_tone:': u'\U000026F9\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_bouncing_ball_medium-light_skin_tone:': u'\U000026F9\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_bouncing_ball_medium_skin_tone:': u'\U000026F9\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_bouncing_ball_selector:': u'\U000026F9\U0000FE0F\U0000200D\U00002642',
u':man_bowing-dark_skin_tone:': u'\U0001F647\U0001F3FF\U0000200D\U00002642',
u':man_bowing-light_skin_tone:': u'\U0001F647\U0001F3FB\U0000200D\U00002642',
u':man_bowing-medium-dark_skin_tone:': u'\U0001F647\U0001F3FE\U0000200D\U00002642',
u':man_bowing-medium-light_skin_tone:': u'\U0001F647\U0001F3FC\U0000200D\U00002642',
u':man_bowing-medium_skin_tone:': u'\U0001F647\U0001F3FD\U0000200D\U00002642',
u':man_bowing:': u'\U0001F647\U0000200D\U00002642',
u':man_bowing_dark_skin_tone:': u'\U0001F647\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_bowing_light_skin_tone:': u'\U0001F647\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_bowing_medium-dark_skin_tone:': u'\U0001F647\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_bowing_medium-light_skin_tone:': u'\U0001F647\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_bowing_medium_skin_tone:': u'\U0001F647\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_bowing_selector:': u'\U0001F647\U0000200D\U00002642\U0000FE0F',
u':man_cartwheeling-dark_skin_tone:': u'\U0001F938\U0001F3FF\U0000200D\U00002642',
u':man_cartwheeling-light_skin_tone:': u'\U0001F938\U0001F3FB\U0000200D\U00002642',
u':man_cartwheeling-medium-dark_skin_tone:': u'\U0001F938\U0001F3FE\U0000200D\U00002642',
u':man_cartwheeling-medium-light_skin_tone:': u'\U0001F938\U0001F3FC\U0000200D\U00002642',
u':man_cartwheeling-medium_skin_tone:': u'\U0001F938\U0001F3FD\U0000200D\U00002642',
u':man_cartwheeling:': u'\U0001F938\U0000200D\U00002642',
u':man_cartwheeling_dark_skin_tone:': u'\U0001F938\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_cartwheeling_light_skin_tone:': u'\U0001F938\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_cartwheeling_medium-dark_skin_tone:': u'\U0001F938\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_cartwheeling_medium-light_skin_tone:': u'\U0001F938\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_cartwheeling_medium_skin_tone:': u'\U0001F938\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_cartwheeling_selector:': u'\U0001F938\U0000200D\U00002642\U0000FE0F',
u':man_climbing-dark_skin_tone:': u'\U0001F9D7\U0001F3FF\U0000200D\U00002642',
u':man_climbing-light_skin_tone:': u'\U0001F9D7\U0001F3FB\U0000200D\U00002642',
u':man_climbing-medium-dark_skin_tone:': u'\U0001F9D7\U0001F3FE\U0000200D\U00002642',
u':man_climbing-medium-light_skin_tone:': u'\U0001F9D7\U0001F3FC\U0000200D\U00002642',
u':man_climbing-medium_skin_tone:': u'\U0001F9D7\U0001F3FD\U0000200D\U00002642',
u':man_climbing:': u'\U0001F9D7\U0000200D\U00002642',
u':man_climbing_dark_skin_tone:': u'\U0001F9D7\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_climbing_light_skin_tone:': u'\U0001F9D7\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_climbing_medium-dark_skin_tone:': u'\U0001F9D7\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_climbing_medium-light_skin_tone:': u'\U0001F9D7\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_climbing_medium_skin_tone:': u'\U0001F9D7\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_climbing_selector:': u'\U0001F9D7\U0000200D\U00002642\U0000FE0F',
u':man_construction_worker-dark_skin_tone:': u'\U0001F477\U0001F3FF\U0000200D\U00002642',
u':man_construction_worker-light_skin_tone:': u'\U0001F477\U0001F3FB\U0000200D\U00002642',
u':man_construction_worker-medium-dark_skin_tone:': u'\U0001F477\U0001F3FE\U0000200D\U00002642',
u':man_construction_worker-medium-light_skin_tone:': u'\U0001F477\U0001F3FC\U0000200D\U00002642',
u':man_construction_worker-medium_skin_tone:': u'\U0001F477\U0001F3FD\U0000200D\U00002642',
u':man_construction_worker:': u'\U0001F477\U0000200D\U00002642',
u':man_construction_worker_dark_skin_tone:': u'\U0001F477\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_construction_worker_light_skin_tone:': u'\U0001F477\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_construction_worker_medium-dark_skin_tone:': u'\U0001F477\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_construction_worker_medium-light_skin_tone:': u'\U0001F477\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_construction_worker_medium_skin_tone:': u'\U0001F477\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_construction_worker_selector:': u'\U0001F477\U0000200D\U00002642\U0000FE0F',
u':man_cook:': u'\U0001F468\U0000200D\U0001F373',
u':man_cook_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F373',
u':man_cook_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F373',
u':man_cook_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F373',
u':man_cook_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F373',
u':man_cook_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F373',
u':man_dancing:': u'\U0001F57A',
u':man_dancing_dark_skin_tone:': u'\U0001F57A\U0001F3FF',
u':man_dancing_light_skin_tone:': u'\U0001F57A\U0001F3FB',
u':man_dancing_medium-dark_skin_tone:': u'\U0001F57A\U0001F3FE',
u':man_dancing_medium-light_skin_tone:': u'\U0001F57A\U0001F3FC',
u':man_dancing_medium_skin_tone:': u'\U0001F57A\U0001F3FD',
u':man_dark_skin_tone:': u'\U0001F468\U0001F3FF',
u':man_detective-dark_skin_tone:': u'\U0001F575\U0001F3FF\U0000200D\U00002642',
u':man_detective-light_skin_tone:': u'\U0001F575\U0001F3FB\U0000200D\U00002642',
u':man_detective-medium-dark_skin_tone:': u'\U0001F575\U0001F3FE\U0000200D\U00002642',
u':man_detective-medium-light_skin_tone:': u'\U0001F575\U0001F3FC\U0000200D\U00002642',
u':man_detective-medium_skin_tone:': u'\U0001F575\U0001F3FD\U0000200D\U00002642',
u':man_detective:': u'\U0001F575\U0000200D\U00002642',
u':man_detective_2:': u'\U0001F575\U0000FE0F\U0000200D\U00002642\U0000FE0F',
u':man_detective_selector_2:': u'\U0001F575\U0000200D\U00002642\U0000FE0F',
u':man_detective_dark_skin_tone:': u'\U0001F575\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_detective_light_skin_tone:': u'\U0001F575\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_detective_medium-dark_skin_tone:': u'\U0001F575\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_detective_medium-light_skin_tone:': u'\U0001F575\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_detective_medium_skin_tone:': u'\U0001F575\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_detective_selector:': u'\U0001F575\U0000FE0F\U0000200D\U00002642',
u':man_elf-dark_skin_tone:': u'\U0001F9DD\U0001F3FF\U0000200D\U00002642',
u':man_elf-light_skin_tone:': u'\U0001F9DD\U0001F3FB\U0000200D\U00002642',
u':man_elf-medium-dark_skin_tone:': u'\U0001F9DD\U0001F3FE\U0000200D\U00002642',
u':man_elf-medium-light_skin_tone:': u'\U0001F9DD\U0001F3FC\U0000200D\U00002642',
u':man_elf-medium_skin_tone:': u'\U0001F9DD\U0001F3FD\U0000200D\U00002642',
u':man_elf:': u'\U0001F9DD\U0000200D\U00002642',
u':man_elf_dark_skin_tone:': u'\U0001F9DD\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_elf_light_skin_tone:': u'\U0001F9DD\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_elf_medium-dark_skin_tone:': u'\U0001F9DD\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_elf_medium-light_skin_tone:': u'\U0001F9DD\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_elf_medium_skin_tone:': u'\U0001F9DD\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_elf_selector:': u'\U0001F9DD\U0000200D\U00002642\U0000FE0F',
u':man_facepalming-dark_skin_tone:': u'\U0001F926\U0001F3FF\U0000200D\U00002642',
u':man_facepalming-light_skin_tone:': u'\U0001F926\U0001F3FB\U0000200D\U00002642',
u':man_facepalming-medium-dark_skin_tone:': u'\U0001F926\U0001F3FE\U0000200D\U00002642',
u':man_facepalming-medium-light_skin_tone:': u'\U0001F926\U0001F3FC\U0000200D\U00002642',
u':man_facepalming-medium_skin_tone:': u'\U0001F926\U0001F3FD\U0000200D\U00002642',
u':man_facepalming:': u'\U0001F926\U0000200D\U00002642',
u':man_facepalming_dark_skin_tone:': u'\U0001F926\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_facepalming_light_skin_tone:': u'\U0001F926\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_facepalming_medium-dark_skin_tone:': u'\U0001F926\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_facepalming_medium-light_skin_tone:': u'\U0001F926\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_facepalming_medium_skin_tone:': u'\U0001F926\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_facepalming_selector:': u'\U0001F926\U0000200D\U00002642\U0000FE0F',
u':man_factory_worker:': u'\U0001F468\U0000200D\U0001F3ED',
u':man_factory_worker_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F3ED',
u':man_factory_worker_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F3ED',
u':man_factory_worker_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F3ED',
u':man_factory_worker_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F3ED',
u':man_factory_worker_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F3ED',
u':man_fairy-dark_skin_tone:': u'\U0001F9DA\U0001F3FF\U0000200D\U00002642',
u':man_fairy-light_skin_tone:': u'\U0001F9DA\U0001F3FB\U0000200D\U00002642',
u':man_fairy-medium-dark_skin_tone:': u'\U0001F9DA\U0001F3FE\U0000200D\U00002642',
u':man_fairy-medium-light_skin_tone:': u'\U0001F9DA\U0001F3FC\U0000200D\U00002642',
u':man_fairy-medium_skin_tone:': u'\U0001F9DA\U0001F3FD\U0000200D\U00002642',
u':man_fairy:': u'\U0001F9DA\U0000200D\U00002642',
u':man_fairy_dark_skin_tone:': u'\U0001F9DA\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_fairy_light_skin_tone:': u'\U0001F9DA\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_fairy_medium-dark_skin_tone:': u'\U0001F9DA\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_fairy_medium-light_skin_tone:': u'\U0001F9DA\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_fairy_medium_skin_tone:': u'\U0001F9DA\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_fairy_selector:': u'\U0001F9DA\U0000200D\U00002642\U0000FE0F',
u':man_farmer:': u'\U0001F468\U0000200D\U0001F33E',
u':man_farmer_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F33E',
u':man_farmer_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F33E',
u':man_farmer_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F33E',
u':man_farmer_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F33E',
u':man_farmer_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F33E',
u':man_firefighter:': u'\U0001F468\U0000200D\U0001F692',
u':man_firefighter_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F692',
u':man_firefighter_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F692',
u':man_firefighter_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F692',
u':man_firefighter_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F692',
u':man_firefighter_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F692',
u':man_frowning-dark_skin_tone:': u'\U0001F64D\U0001F3FF\U0000200D\U00002642',
u':man_frowning-light_skin_tone:': u'\U0001F64D\U0001F3FB\U0000200D\U00002642',
u':man_frowning-medium-dark_skin_tone:': u'\U0001F64D\U0001F3FE\U0000200D\U00002642',
u':man_frowning-medium-light_skin_tone:': u'\U0001F64D\U0001F3FC\U0000200D\U00002642',
u':man_frowning-medium_skin_tone:': u'\U0001F64D\U0001F3FD\U0000200D\U00002642',
u':man_frowning:': u'\U0001F64D\U0000200D\U00002642',
u':man_frowning_dark_skin_tone:': u'\U0001F64D\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_frowning_light_skin_tone:': u'\U0001F64D\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_frowning_medium-dark_skin_tone:': u'\U0001F64D\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_frowning_medium-light_skin_tone:': u'\U0001F64D\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_frowning_medium_skin_tone:': u'\U0001F64D\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_frowning_selector:': u'\U0001F64D\U0000200D\U00002642\U0000FE0F',
u':man_genie:': u'\U0001F9DE\U0000200D\U00002642',
u':man_genie_selector:': u'\U0001F9DE\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_NO-dark_skin_tone:': u'\U0001F645\U0001F3FF\U0000200D\U00002642',
u':man_gesturing_NO-light_skin_tone:': u'\U0001F645\U0001F3FB\U0000200D\U00002642',
u':man_gesturing_NO-medium-dark_skin_tone:': u'\U0001F645\U0001F3FE\U0000200D\U00002642',
u':man_gesturing_NO-medium-light_skin_tone:': u'\U0001F645\U0001F3FC\U0000200D\U00002642',
u':man_gesturing_NO-medium_skin_tone:': u'\U0001F645\U0001F3FD\U0000200D\U00002642',
u':man_gesturing_NO:': u'\U0001F645\U0000200D\U00002642',
u':man_gesturing_NO_dark_skin_tone:': u'\U0001F645\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_NO_light_skin_tone:': u'\U0001F645\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_NO_medium-dark_skin_tone:': u'\U0001F645\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_NO_medium-light_skin_tone:': u'\U0001F645\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_NO_medium_skin_tone:': u'\U0001F645\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_NO_selector:': u'\U0001F645\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_OK-dark_skin_tone:': u'\U0001F646\U0001F3FF\U0000200D\U00002642',
u':man_gesturing_OK-light_skin_tone:': u'\U0001F646\U0001F3FB\U0000200D\U00002642',
u':man_gesturing_OK-medium-dark_skin_tone:': u'\U0001F646\U0001F3FE\U0000200D\U00002642',
u':man_gesturing_OK-medium-light_skin_tone:': u'\U0001F646\U0001F3FC\U0000200D\U00002642',
u':man_gesturing_OK-medium_skin_tone:': u'\U0001F646\U0001F3FD\U0000200D\U00002642',
u':man_gesturing_OK:': u'\U0001F646\U0000200D\U00002642',
u':man_gesturing_OK_dark_skin_tone:': u'\U0001F646\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_OK_light_skin_tone:': u'\U0001F646\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_OK_medium-dark_skin_tone:': u'\U0001F646\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_OK_medium-light_skin_tone:': u'\U0001F646\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_OK_medium_skin_tone:': u'\U0001F646\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_gesturing_OK_selector:': u'\U0001F646\U0000200D\U00002642\U0000FE0F',
u':man_getting_haircut-dark_skin_tone:': u'\U0001F487\U0001F3FF\U0000200D\U00002642',
u':man_getting_haircut-light_skin_tone:': u'\U0001F487\U0001F3FB\U0000200D\U00002642',
u':man_getting_haircut-medium-dark_skin_tone:': u'\U0001F487\U0001F3FE\U0000200D\U00002642',
u':man_getting_haircut-medium-light_skin_tone:': u'\U0001F487\U0001F3FC\U0000200D\U00002642',
u':man_getting_haircut-medium_skin_tone:': u'\U0001F487\U0001F3FD\U0000200D\U00002642',
u':man_getting_haircut:': u'\U0001F487\U0000200D\U00002642',
u':man_getting_haircut_dark_skin_tone:': u'\U0001F487\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_getting_haircut_light_skin_tone:': u'\U0001F487\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_getting_haircut_medium-dark_skin_tone:': u'\U0001F487\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_getting_haircut_medium-light_skin_tone:': u'\U0001F487\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_getting_haircut_medium_skin_tone:': u'\U0001F487\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_getting_haircut_selector:': u'\U0001F487\U0000200D\U00002642\U0000FE0F',
u':man_getting_massage-dark_skin_tone:': u'\U0001F486\U0001F3FF\U0000200D\U00002642',
u':man_getting_massage-light_skin_tone:': u'\U0001F486\U0001F3FB\U0000200D\U00002642',
u':man_getting_massage-medium-dark_skin_tone:': u'\U0001F486\U0001F3FE\U0000200D\U00002642',
u':man_getting_massage-medium-light_skin_tone:': u'\U0001F486\U0001F3FC\U0000200D\U00002642',
u':man_getting_massage-medium_skin_tone:': u'\U0001F486\U0001F3FD\U0000200D\U00002642',
u':man_getting_massage:': u'\U0001F486\U0000200D\U00002642',
u':man_getting_massage_dark_skin_tone:': u'\U0001F486\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_getting_massage_light_skin_tone:': u'\U0001F486\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_getting_massage_medium-dark_skin_tone:': u'\U0001F486\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_getting_massage_medium-light_skin_tone:': u'\U0001F486\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_getting_massage_medium_skin_tone:': u'\U0001F486\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_getting_massage_selector:': u'\U0001F486\U0000200D\U00002642\U0000FE0F',
u':man_golfing-dark_skin_tone:': u'\U0001F3CC\U0001F3FF\U0000200D\U00002642',
u':man_golfing-light_skin_tone:': u'\U0001F3CC\U0001F3FB\U0000200D\U00002642',
u':man_golfing-medium-dark_skin_tone:': u'\U0001F3CC\U0001F3FE\U0000200D\U00002642',
u':man_golfing-medium-light_skin_tone:': u'\U0001F3CC\U0001F3FC\U0000200D\U00002642',
u':man_golfing-medium_skin_tone:': u'\U0001F3CC\U0001F3FD\U0000200D\U00002642',
u':man_golfing:': u'\U0001F3CC\U0000200D\U00002642',
u':man_golfing_2:': u'\U0001F3CC\U0000FE0F\U0000200D\U00002642\U0000FE0F',
u':man_golfing_3:': u'\U0001F3CC\U0000200D\U00002642\U0000FE0F',
u':man_golfing_dark_skin_tone:': u'\U0001F3CC\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_golfing_light_skin_tone:': u'\U0001F3CC\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_golfing_medium-dark_skin_tone:': u'\U0001F3CC\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_golfing_medium-light_skin_tone:': u'\U0001F3CC\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_golfing_medium_skin_tone:': u'\U0001F3CC\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_golfing_selector:': u'\U0001F3CC\U0000FE0F\U0000200D\U00002642',
u':man_guard-dark_skin_tone:': u'\U0001F482\U0001F3FF\U0000200D\U00002642',
u':man_guard-light_skin_tone:': u'\U0001F482\U0001F3FB\U0000200D\U00002642',
u':man_guard-medium-dark_skin_tone:': u'\U0001F482\U0001F3FE\U0000200D\U00002642',
u':man_guard-medium-light_skin_tone:': u'\U0001F482\U0001F3FC\U0000200D\U00002642',
u':man_guard-medium_skin_tone:': u'\U0001F482\U0001F3FD\U0000200D\U00002642',
u':man_guard:': u'\U0001F482\U0000200D\U00002642',
u':man_guard_dark_skin_tone:': u'\U0001F482\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_guard_light_skin_tone:': u'\U0001F482\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_guard_medium-dark_skin_tone:': u'\U0001F482\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_guard_medium-light_skin_tone:': u'\U0001F482\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_guard_medium_skin_tone:': u'\U0001F482\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_guard_selector:': u'\U0001F482\U0000200D\U00002642\U0000FE0F',
u':man_health_worker-dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U00002695',
u':man_health_worker-light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U00002695',
u':man_health_worker-medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U00002695',
u':man_health_worker-medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U00002695',
u':man_health_worker-medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U00002695',
u':man_health_worker:': u'\U0001F468\U0000200D\U00002695',
u':man_health_worker_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U00002695\U0000FE0F',
u':man_health_worker_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U00002695\U0000FE0F',
u':man_health_worker_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U00002695\U0000FE0F',
u':man_health_worker_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U00002695\U0000FE0F',
u':man_health_worker_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U00002695\U0000FE0F',
u':man_health_worker_selector:': u'\U0001F468\U0000200D\U00002695\U0000FE0F',
u':man_in_lotus_position-dark_skin_tone:': u'\U0001F9D8\U0001F3FF\U0000200D\U00002642',
u':man_in_lotus_position-light_skin_tone:': u'\U0001F9D8\U0001F3FB\U0000200D\U00002642',
u':man_in_lotus_position-medium-dark_skin_tone:': u'\U0001F9D8\U0001F3FE\U0000200D\U00002642',
u':man_in_lotus_position-medium-light_skin_tone:': u'\U0001F9D8\U0001F3FC\U0000200D\U00002642',
u':man_in_lotus_position-medium_skin_tone:': u'\U0001F9D8\U0001F3FD\U0000200D\U00002642',
u':man_in_lotus_position:': u'\U0001F9D8\U0000200D\U00002642',
u':man_in_lotus_position_dark_skin_tone:': u'\U0001F9D8\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_in_lotus_position_light_skin_tone:': u'\U0001F9D8\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_in_lotus_position_medium-dark_skin_tone:': u'\U0001F9D8\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_in_lotus_position_medium-light_skin_tone:': u'\U0001F9D8\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_in_lotus_position_medium_skin_tone:': u'\U0001F9D8\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_in_lotus_position_selector:': u'\U0001F9D8\U0000200D\U00002642\U0000FE0F',
u':man_in_manual_wheelchair-dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F9BD',
u':man_in_manual_wheelchair-light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F9BD',
u':man_in_manual_wheelchair-medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F9BD',
u':man_in_manual_wheelchair-medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F9BD',
u':man_in_manual_wheelchair-medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F9BD',
u':man_in_manual_wheelchair:': u'\U0001F468\U0000200D\U0001F9BD',
u':man_in_motorized_wheelchair-dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F9BC',
u':man_in_motorized_wheelchair-light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F9BC',
u':man_in_motorized_wheelchair-medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F9BC',
u':man_in_motorized_wheelchair-medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F9BC',
u':man_in_motorized_wheelchair-medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F9BC',
u':man_in_motorized_wheelchair:': u'\U0001F468\U0000200D\U0001F9BC',
u':man_in_steamy_room-dark_skin_tone:': u'\U0001F9D6\U0001F3FF\U0000200D\U00002642',
u':man_in_steamy_room-light_skin_tone:': u'\U0001F9D6\U0001F3FB\U0000200D\U00002642',
u':man_in_steamy_room-medium-dark_skin_tone:': u'\U0001F9D6\U0001F3FE\U0000200D\U00002642',
u':man_in_steamy_room-medium-light_skin_tone:': u'\U0001F9D6\U0001F3FC\U0000200D\U00002642',
u':man_in_steamy_room-medium_skin_tone:': u'\U0001F9D6\U0001F3FD\U0000200D\U00002642',
u':man_in_steamy_room:': u'\U0001F9D6\U0000200D\U00002642',
u':man_in_steamy_room_dark_skin_tone:': u'\U0001F9D6\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_in_steamy_room_light_skin_tone:': u'\U0001F9D6\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_in_steamy_room_medium-dark_skin_tone:': u'\U0001F9D6\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_in_steamy_room_medium-light_skin_tone:': u'\U0001F9D6\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_in_steamy_room_medium_skin_tone:': u'\U0001F9D6\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_in_steamy_room_selector:': u'\U0001F9D6\U0000200D\U00002642\U0000FE0F',
u':man_in_suit_levitating:': u'\U0001F574',
u':man_in_suit_levitating_dark_skin_tone:': u'\U0001F574\U0001F3FF',
u':man_in_suit_levitating_light_skin_tone:': u'\U0001F574\U0001F3FB',
u':man_in_suit_levitating_medium-dark_skin_tone:': u'\U0001F574\U0001F3FE',
u':man_in_suit_levitating_medium-light_skin_tone:': u'\U0001F574\U0001F3FC',
u':man_in_suit_levitating_medium_skin_tone:': u'\U0001F574\U0001F3FD',
u':man_in_suit_levitating_selector:': u'\U0001F574\U0000FE0F',
u':man_in_tuxedo:': u'\U0001F935',
u':man_in_tuxedo_dark_skin_tone:': u'\U0001F935\U0001F3FF',
u':man_in_tuxedo_light_skin_tone:': u'\U0001F935\U0001F3FB',
u':man_in_tuxedo_medium-dark_skin_tone:': u'\U0001F935\U0001F3FE',
u':man_in_tuxedo_medium-light_skin_tone:': u'\U0001F935\U0001F3FC',
u':man_in_tuxedo_medium_skin_tone:': u'\U0001F935\U0001F3FD',
u':man_judge-dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U00002696',
u':man_judge-light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U00002696',
u':man_judge-medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U00002696',
u':man_judge-medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U00002696',
u':man_judge-medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U00002696',
u':man_judge:': u'\U0001F468\U0000200D\U00002696',
u':man_judge_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U00002696\U0000FE0F',
u':man_judge_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U00002696\U0000FE0F',
u':man_judge_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U00002696\U0000FE0F',
u':man_judge_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U00002696\U0000FE0F',
u':man_judge_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U00002696\U0000FE0F',
u':man_judge_selector:': u'\U0001F468\U0000200D\U00002696\U0000FE0F',
u':man_juggling-dark_skin_tone:': u'\U0001F939\U0001F3FF\U0000200D\U00002642',
u':man_juggling-light_skin_tone:': u'\U0001F939\U0001F3FB\U0000200D\U00002642',
u':man_juggling-medium-dark_skin_tone:': u'\U0001F939\U0001F3FE\U0000200D\U00002642',
u':man_juggling-medium-light_skin_tone:': u'\U0001F939\U0001F3FC\U0000200D\U00002642',
u':man_juggling-medium_skin_tone:': u'\U0001F939\U0001F3FD\U0000200D\U00002642',
u':man_juggling:': u'\U0001F939\U0000200D\U00002642',
u':man_juggling_dark_skin_tone:': u'\U0001F939\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_juggling_light_skin_tone:': u'\U0001F939\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_juggling_medium-dark_skin_tone:': u'\U0001F939\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_juggling_medium-light_skin_tone:': u'\U0001F939\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_juggling_medium_skin_tone:': u'\U0001F939\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_juggling_selector:': u'\U0001F939\U0000200D\U00002642\U0000FE0F',
u':man_kneeling-dark_skin_tone:': u'\U0001F9CE\U0001F3FF\U0000200D\U00002642',
u':man_kneeling-dark_skin_tone_selector:': u'\U0001F9CE\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_kneeling-light_skin_tone:': u'\U0001F9CE\U0001F3FB\U0000200D\U00002642',
u':man_kneeling-light_skin_tone_selector:': u'\U0001F9CE\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_kneeling-medium-dark_skin_tone:': u'\U0001F9CE\U0001F3FE\U0000200D\U00002642',
u':man_kneeling-medium-dark_skin_tone_selector:': u'\U0001F9CE\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_kneeling-medium-light_skin_tone:': u'\U0001F9CE\U0001F3FC\U0000200D\U00002642',
u':man_kneeling-medium-light_skin_tone_selector:': u'\U0001F9CE\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_kneeling-medium_skin_tone:': u'\U0001F9CE\U0001F3FD\U0000200D\U00002642',
u':man_kneeling-medium_skin_tone_selector:': u'\U0001F9CE\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_kneeling:': u'\U0001F9CE\U0000200D\U00002642',
u':man_kneeling_selector:': u'\U0001F9CE\U0000200D\U00002642\U0000FE0F',
u':man_lifting_weights-dark_skin_tone:': u'\U0001F3CB\U0001F3FF\U0000200D\U00002642',
u':man_lifting_weights-light_skin_tone:': u'\U0001F3CB\U0001F3FB\U0000200D\U00002642',
u':man_lifting_weights-medium-dark_skin_tone:': u'\U0001F3CB\U0001F3FE\U0000200D\U00002642',
u':man_lifting_weights-medium-light_skin_tone:': u'\U0001F3CB\U0001F3FC\U0000200D\U00002642',
u':man_lifting_weights-medium_skin_tone:': u'\U0001F3CB\U0001F3FD\U0000200D\U00002642',
u':man_lifting_weights:': u'\U0001F3CB\U0000200D\U00002642',
u':man_lifting_weights_2:': u'\U0001F3CB\U0000FE0F\U0000200D\U00002642\U0000FE0F',
u':man_lifting_weights_3:': u'\U0001F3CB\U0000200D\U00002642\U0000FE0F',
u':man_lifting_weights_dark_skin_tone:': u'\U0001F3CB\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_lifting_weights_light_skin_tone:': u'\U0001F3CB\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_lifting_weights_medium-dark_skin_tone:': u'\U0001F3CB\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_lifting_weights_medium-light_skin_tone:': u'\U0001F3CB\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_lifting_weights_medium_skin_tone:': u'\U0001F3CB\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_lifting_weights_selector:': u'\U0001F3CB\U0000FE0F\U0000200D\U00002642',
u':man_light_skin_tone:': u'\U0001F468\U0001F3FB',
u':man_mage-dark_skin_tone:': u'\U0001F9D9\U0001F3FF\U0000200D\U00002642',
u':man_mage-light_skin_tone:': u'\U0001F9D9\U0001F3FB\U0000200D\U00002642',
u':man_mage-medium-dark_skin_tone:': u'\U0001F9D9\U0001F3FE\U0000200D\U00002642',
u':man_mage-medium-light_skin_tone:': u'\U0001F9D9\U0001F3FC\U0000200D\U00002642',
u':man_mage-medium_skin_tone:': u'\U0001F9D9\U0001F3FD\U0000200D\U00002642',
u':man_mage:': u'\U0001F9D9\U0000200D\U00002642',
u':man_mage_dark_skin_tone:': u'\U0001F9D9\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_mage_light_skin_tone:': u'\U0001F9D9\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_mage_medium-dark_skin_tone:': u'\U0001F9D9\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_mage_medium-light_skin_tone:': u'\U0001F9D9\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_mage_medium_skin_tone:': u'\U0001F9D9\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_mage_selector:': u'\U0001F9D9\U0000200D\U00002642\U0000FE0F',
u':man_mechanic:': u'\U0001F468\U0000200D\U0001F527',
u':man_mechanic_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F527',
u':man_mechanic_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F527',
u':man_mechanic_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F527',
u':man_mechanic_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F527',
u':man_mechanic_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F527',
u':man_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE',
u':man_medium-light_skin_tone:': u'\U0001F468\U0001F3FC',
u':man_medium_skin_tone:': u'\U0001F468\U0001F3FD',
u':man_mountain_biking-dark_skin_tone:': u'\U0001F6B5\U0001F3FF\U0000200D\U00002642',
u':man_mountain_biking-light_skin_tone:': u'\U0001F6B5\U0001F3FB\U0000200D\U00002642',
u':man_mountain_biking-medium-dark_skin_tone:': u'\U0001F6B5\U0001F3FE\U0000200D\U00002642',
u':man_mountain_biking-medium-light_skin_tone:': u'\U0001F6B5\U0001F3FC\U0000200D\U00002642',
u':man_mountain_biking-medium_skin_tone:': u'\U0001F6B5\U0001F3FD\U0000200D\U00002642',
u':man_mountain_biking:': u'\U0001F6B5\U0000200D\U00002642',
u':man_mountain_biking_dark_skin_tone:': u'\U0001F6B5\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_mountain_biking_light_skin_tone:': u'\U0001F6B5\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_mountain_biking_medium-dark_skin_tone:': u'\U0001F6B5\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_mountain_biking_medium-light_skin_tone:': u'\U0001F6B5\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_mountain_biking_medium_skin_tone:': u'\U0001F6B5\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_mountain_biking_selector:': u'\U0001F6B5\U0000200D\U00002642\U0000FE0F',
u':man_office_worker:': u'\U0001F468\U0000200D\U0001F4BC',
u':man_office_worker_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F4BC',
u':man_office_worker_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F4BC',
u':man_office_worker_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F4BC',
u':man_office_worker_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F4BC',
u':man_office_worker_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F4BC',
u':man_pilot-dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U00002708',
u':man_pilot-light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U00002708',
u':man_pilot-medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U00002708',
u':man_pilot-medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U00002708',
u':man_pilot-medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U00002708',
u':man_pilot:': u'\U0001F468\U0000200D\U00002708',
u':man_pilot_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U00002708\U0000FE0F',
u':man_pilot_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U00002708\U0000FE0F',
u':man_pilot_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U00002708\U0000FE0F',
u':man_pilot_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U00002708\U0000FE0F',
u':man_pilot_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U00002708\U0000FE0F',
u':man_pilot_selector:': u'\U0001F468\U0000200D\U00002708\U0000FE0F',
u':man_playing_handball-dark_skin_tone:': u'\U0001F93E\U0001F3FF\U0000200D\U00002642',
u':man_playing_handball-light_skin_tone:': u'\U0001F93E\U0001F3FB\U0000200D\U00002642',
u':man_playing_handball-medium-dark_skin_tone:': u'\U0001F93E\U0001F3FE\U0000200D\U00002642',
u':man_playing_handball-medium-light_skin_tone:': u'\U0001F93E\U0001F3FC\U0000200D\U00002642',
u':man_playing_handball-medium_skin_tone:': u'\U0001F93E\U0001F3FD\U0000200D\U00002642',
u':man_playing_handball:': u'\U0001F93E\U0000200D\U00002642',
u':man_playing_handball_dark_skin_tone:': u'\U0001F93E\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_playing_handball_light_skin_tone:': u'\U0001F93E\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_playing_handball_medium-dark_skin_tone:': u'\U0001F93E\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_playing_handball_medium-light_skin_tone:': u'\U0001F93E\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_playing_handball_medium_skin_tone:': u'\U0001F93E\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_playing_handball_selector:': u'\U0001F93E\U0000200D\U00002642\U0000FE0F',
u':man_playing_water_polo-dark_skin_tone:': u'\U0001F93D\U0001F3FF\U0000200D\U00002642',
u':man_playing_water_polo-light_skin_tone:': u'\U0001F93D\U0001F3FB\U0000200D\U00002642',
u':man_playing_water_polo-medium-dark_skin_tone:': u'\U0001F93D\U0001F3FE\U0000200D\U00002642',
u':man_playing_water_polo-medium-light_skin_tone:': u'\U0001F93D\U0001F3FC\U0000200D\U00002642',
u':man_playing_water_polo-medium_skin_tone:': u'\U0001F93D\U0001F3FD\U0000200D\U00002642',
u':man_playing_water_polo:': u'\U0001F93D\U0000200D\U00002642',
u':man_playing_water_polo_dark_skin_tone:': u'\U0001F93D\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_playing_water_polo_light_skin_tone:': u'\U0001F93D\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_playing_water_polo_medium-dark_skin_tone:': u'\U0001F93D\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_playing_water_polo_medium-light_skin_tone:': u'\U0001F93D\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_playing_water_polo_medium_skin_tone:': u'\U0001F93D\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_playing_water_polo_selector:': u'\U0001F93D\U0000200D\U00002642\U0000FE0F',
u':man_police_officer-dark_skin_tone:': u'\U0001F46E\U0001F3FF\U0000200D\U00002642',
u':man_police_officer-light_skin_tone:': u'\U0001F46E\U0001F3FB\U0000200D\U00002642',
u':man_police_officer-medium-dark_skin_tone:': u'\U0001F46E\U0001F3FE\U0000200D\U00002642',
u':man_police_officer-medium-light_skin_tone:': u'\U0001F46E\U0001F3FC\U0000200D\U00002642',
u':man_police_officer-medium_skin_tone:': u'\U0001F46E\U0001F3FD\U0000200D\U00002642',
u':man_police_officer:': u'\U0001F46E\U0000200D\U00002642',
u':man_police_officer_dark_skin_tone:': u'\U0001F46E\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_police_officer_light_skin_tone:': u'\U0001F46E\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_police_officer_medium-dark_skin_tone:': u'\U0001F46E\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_police_officer_medium-light_skin_tone:': u'\U0001F46E\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_police_officer_medium_skin_tone:': u'\U0001F46E\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_police_officer_selector:': u'\U0001F46E\U0000200D\U00002642\U0000FE0F',
u':man_pouting-dark_skin_tone:': u'\U0001F64E\U0001F3FF\U0000200D\U00002642',
u':man_pouting-light_skin_tone:': u'\U0001F64E\U0001F3FB\U0000200D\U00002642',
u':man_pouting-medium-dark_skin_tone:': u'\U0001F64E\U0001F3FE\U0000200D\U00002642',
u':man_pouting-medium-light_skin_tone:': u'\U0001F64E\U0001F3FC\U0000200D\U00002642',
u':man_pouting-medium_skin_tone:': u'\U0001F64E\U0001F3FD\U0000200D\U00002642',
u':man_pouting:': u'\U0001F64E\U0000200D\U00002642',
u':man_pouting_dark_skin_tone:': u'\U0001F64E\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_pouting_light_skin_tone:': u'\U0001F64E\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_pouting_medium-dark_skin_tone:': u'\U0001F64E\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_pouting_medium-light_skin_tone:': u'\U0001F64E\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_pouting_medium_skin_tone:': u'\U0001F64E\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_pouting_selector:': u'\U0001F64E\U0000200D\U00002642\U0000FE0F',
u':man_raising_hand-dark_skin_tone:': u'\U0001F64B\U0001F3FF\U0000200D\U00002642',
u':man_raising_hand-light_skin_tone:': u'\U0001F64B\U0001F3FB\U0000200D\U00002642',
u':man_raising_hand-medium-dark_skin_tone:': u'\U0001F64B\U0001F3FE\U0000200D\U00002642',
u':man_raising_hand-medium-light_skin_tone:': u'\U0001F64B\U0001F3FC\U0000200D\U00002642',
u':man_raising_hand-medium_skin_tone:': u'\U0001F64B\U0001F3FD\U0000200D\U00002642',
u':man_raising_hand:': u'\U0001F64B\U0000200D\U00002642',
u':man_raising_hand_dark_skin_tone:': u'\U0001F64B\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_raising_hand_light_skin_tone:': u'\U0001F64B\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_raising_hand_medium-dark_skin_tone:': u'\U0001F64B\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_raising_hand_medium-light_skin_tone:': u'\U0001F64B\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_raising_hand_medium_skin_tone:': u'\U0001F64B\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_raising_hand_selector:': u'\U0001F64B\U0000200D\U00002642\U0000FE0F',
u':man_rowing_boat-dark_skin_tone:': u'\U0001F6A3\U0001F3FF\U0000200D\U00002642',
u':man_rowing_boat-light_skin_tone:': u'\U0001F6A3\U0001F3FB\U0000200D\U00002642',
u':man_rowing_boat-medium-dark_skin_tone:': u'\U0001F6A3\U0001F3FE\U0000200D\U00002642',
u':man_rowing_boat-medium-light_skin_tone:': u'\U0001F6A3\U0001F3FC\U0000200D\U00002642',
u':man_rowing_boat-medium_skin_tone:': u'\U0001F6A3\U0001F3FD\U0000200D\U00002642',
u':man_rowing_boat:': u'\U0001F6A3\U0000200D\U00002642',
u':man_rowing_boat_dark_skin_tone:': u'\U0001F6A3\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_rowing_boat_light_skin_tone:': u'\U0001F6A3\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_rowing_boat_medium-dark_skin_tone:': u'\U0001F6A3\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_rowing_boat_medium-light_skin_tone:': u'\U0001F6A3\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_rowing_boat_medium_skin_tone:': u'\U0001F6A3\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_rowing_boat_selector:': u'\U0001F6A3\U0000200D\U00002642\U0000FE0F',
u':man_running-dark_skin_tone:': u'\U0001F3C3\U0001F3FF\U0000200D\U00002642',
u':man_running-light_skin_tone:': u'\U0001F3C3\U0001F3FB\U0000200D\U00002642',
u':man_running-medium-dark_skin_tone:': u'\U0001F3C3\U0001F3FE\U0000200D\U00002642',
u':man_running-medium-light_skin_tone:': u'\U0001F3C3\U0001F3FC\U0000200D\U00002642',
u':man_running-medium_skin_tone:': u'\U0001F3C3\U0001F3FD\U0000200D\U00002642',
u':man_running:': u'\U0001F3C3\U0000200D\U00002642',
u':man_running_dark_skin_tone:': u'\U0001F3C3\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_running_light_skin_tone:': u'\U0001F3C3\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_running_medium-dark_skin_tone:': u'\U0001F3C3\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_running_medium-light_skin_tone:': u'\U0001F3C3\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_running_medium_skin_tone:': u'\U0001F3C3\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_running_selector:': u'\U0001F3C3\U0000200D\U00002642\U0000FE0F',
u':man_scientist:': u'\U0001F468\U0000200D\U0001F52C',
u':man_scientist_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F52C',
u':man_scientist_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F52C',
u':man_scientist_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F52C',
u':man_scientist_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F52C',
u':man_scientist_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F52C',
u':man_shrugging-dark_skin_tone:': u'\U0001F937\U0001F3FF\U0000200D\U00002642',
u':man_shrugging-light_skin_tone:': u'\U0001F937\U0001F3FB\U0000200D\U00002642',
u':man_shrugging-medium-dark_skin_tone:': u'\U0001F937\U0001F3FE\U0000200D\U00002642',
u':man_shrugging-medium-light_skin_tone:': u'\U0001F937\U0001F3FC\U0000200D\U00002642',
u':man_shrugging-medium_skin_tone:': u'\U0001F937\U0001F3FD\U0000200D\U00002642',
u':man_shrugging:': u'\U0001F937\U0000200D\U00002642',
u':man_shrugging_dark_skin_tone:': u'\U0001F937\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_shrugging_light_skin_tone:': u'\U0001F937\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_shrugging_medium-dark_skin_tone:': u'\U0001F937\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_shrugging_medium-light_skin_tone:': u'\U0001F937\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_shrugging_medium_skin_tone:': u'\U0001F937\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_shrugging_selector:': u'\U0001F937\U0000200D\U00002642\U0000FE0F',
u':man_singer:': u'\U0001F468\U0000200D\U0001F3A4',
u':man_singer_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F3A4',
u':man_singer_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F3A4',
u':man_singer_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F3A4',
u':man_singer_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F3A4',
u':man_singer_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F3A4',
u':man_standing-dark_skin_tone:': u'\U0001F9CD\U0001F3FF\U0000200D\U00002642',
u':man_standing-dark_skin_tone_selector:': u'\U0001F9CD\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_standing-light_skin_tone:': u'\U0001F9CD\U0001F3FB\U0000200D\U00002642',
u':man_standing-light_skin_tone_selector:': u'\U0001F9CD\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_standing-medium-dark_skin_tone:': u'\U0001F9CD\U0001F3FE\U0000200D\U00002642',
u':man_standing-medium-dark_skin_tone_selector:': u'\U0001F9CD\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_standing-medium-light_skin_tone:': u'\U0001F9CD\U0001F3FC\U0000200D\U00002642',
u':man_standing-medium-light_skin_tone_selector:': u'\U0001F9CD\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_standing-medium_skin_tone:': u'\U0001F9CD\U0001F3FD\U0000200D\U00002642',
u':man_standing-medium_skin_tone_selector:': u'\U0001F9CD\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_standing:': u'\U0001F9CD\U0000200D\U00002642',
u':man_standing_selector:': u'\U0001F9CD\U0000200D\U00002642\U0000FE0F',
u':man_student:': u'\U0001F468\U0000200D\U0001F393',
u':man_student_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F393',
u':man_student_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F393',
u':man_student_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F393',
u':man_student_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F393',
u':man_student_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F393',
u':man_superhero-dark_skin_tone:': u'\U0001F9B8\U0001F3FF\U0000200D\U00002642',
u':man_superhero-dark_skin_tone_selector:': u'\U0001F9B8\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_superhero-light_skin_tone:': u'\U0001F9B8\U0001F3FB\U0000200D\U00002642',
u':man_superhero-light_skin_tone_selector:': u'\U0001F9B8\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_superhero-medium-dark_skin_tone:': u'\U0001F9B8\U0001F3FE\U0000200D\U00002642',
u':man_superhero-medium-dark_skin_tone_selector:': u'\U0001F9B8\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_superhero-medium-light_skin_tone:': u'\U0001F9B8\U0001F3FC\U0000200D\U00002642',
u':man_superhero-medium-light_skin_tone_selector:': u'\U0001F9B8\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_superhero-medium_skin_tone:': u'\U0001F9B8\U0001F3FD\U0000200D\U00002642',
u':man_superhero-medium_skin_tone_selector:': u'\U0001F9B8\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_superhero:': u'\U0001F9B8\U0000200D\U00002642',
u':man_superhero_selector:': u'\U0001F9B8\U0000200D\U00002642\U0000FE0F',
u':man_supervillain-dark_skin_tone:': u'\U0001F9B9\U0001F3FF\U0000200D\U00002642',
u':man_supervillain-dark_skin_tone_selector:': u'\U0001F9B9\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_supervillain-light_skin_tone:': u'\U0001F9B9\U0001F3FB\U0000200D\U00002642',
u':man_supervillain-light_skin_tone_selector:': u'\U0001F9B9\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_supervillain-medium-dark_skin_tone:': u'\U0001F9B9\U0001F3FE\U0000200D\U00002642',
u':man_supervillain-medium-dark_skin_tone_selector:': u'\U0001F9B9\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_supervillain-medium-light_skin_tone:': u'\U0001F9B9\U0001F3FC\U0000200D\U00002642',
u':man_supervillain-medium-light_skin_tone_selector:': u'\U0001F9B9\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_supervillain-medium_skin_tone:': u'\U0001F9B9\U0001F3FD\U0000200D\U00002642',
u':man_supervillain-medium_skin_tone_selector:': u'\U0001F9B9\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_supervillain:': u'\U0001F9B9\U0000200D\U00002642',
u':man_supervillain_selector:': u'\U0001F9B9\U0000200D\U00002642\U0000FE0F',
u':man_surfing-dark_skin_tone:': u'\U0001F3C4\U0001F3FF\U0000200D\U00002642',
u':man_surfing-light_skin_tone:': u'\U0001F3C4\U0001F3FB\U0000200D\U00002642',
u':man_surfing-medium-dark_skin_tone:': u'\U0001F3C4\U0001F3FE\U0000200D\U00002642',
u':man_surfing-medium-light_skin_tone:': u'\U0001F3C4\U0001F3FC\U0000200D\U00002642',
u':man_surfing-medium_skin_tone:': u'\U0001F3C4\U0001F3FD\U0000200D\U00002642',
u':man_surfing:': u'\U0001F3C4\U0000200D\U00002642',
u':man_surfing_dark_skin_tone:': u'\U0001F3C4\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_surfing_light_skin_tone:': u'\U0001F3C4\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_surfing_medium-dark_skin_tone:': u'\U0001F3C4\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_surfing_medium-light_skin_tone:': u'\U0001F3C4\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_surfing_medium_skin_tone:': u'\U0001F3C4\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_surfing_selector:': u'\U0001F3C4\U0000200D\U00002642\U0000FE0F',
u':man_swimming-dark_skin_tone:': u'\U0001F3CA\U0001F3FF\U0000200D\U00002642',
u':man_swimming-light_skin_tone:': u'\U0001F3CA\U0001F3FB\U0000200D\U00002642',
u':man_swimming-medium-dark_skin_tone:': u'\U0001F3CA\U0001F3FE\U0000200D\U00002642',
u':man_swimming-medium-light_skin_tone:': u'\U0001F3CA\U0001F3FC\U0000200D\U00002642',
u':man_swimming-medium_skin_tone:': u'\U0001F3CA\U0001F3FD\U0000200D\U00002642',
u':man_swimming:': u'\U0001F3CA\U0000200D\U00002642',
u':man_swimming_dark_skin_tone:': u'\U0001F3CA\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_swimming_light_skin_tone:': u'\U0001F3CA\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_swimming_medium-dark_skin_tone:': u'\U0001F3CA\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_swimming_medium-light_skin_tone:': u'\U0001F3CA\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_swimming_medium_skin_tone:': u'\U0001F3CA\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_swimming_selector:': u'\U0001F3CA\U0000200D\U00002642\U0000FE0F',
u':man_teacher:': u'\U0001F468\U0000200D\U0001F3EB',
u':man_teacher_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F3EB',
u':man_teacher_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F3EB',
u':man_teacher_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F3EB',
u':man_teacher_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F3EB',
u':man_teacher_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F3EB',
u':man_technologist:': u'\U0001F468\U0000200D\U0001F4BB',
u':man_technologist_dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F4BB',
u':man_technologist_light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F4BB',
u':man_technologist_medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F4BB',
u':man_technologist_medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F4BB',
u':man_technologist_medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F4BB',
u':man_tipping_hand-dark_skin_tone:': u'\U0001F481\U0001F3FF\U0000200D\U00002642',
u':man_tipping_hand-light_skin_tone:': u'\U0001F481\U0001F3FB\U0000200D\U00002642',
u':man_tipping_hand-medium-dark_skin_tone:': u'\U0001F481\U0001F3FE\U0000200D\U00002642',
u':man_tipping_hand-medium-light_skin_tone:': u'\U0001F481\U0001F3FC\U0000200D\U00002642',
u':man_tipping_hand-medium_skin_tone:': u'\U0001F481\U0001F3FD\U0000200D\U00002642',
u':man_tipping_hand:': u'\U0001F481\U0000200D\U00002642',
u':man_tipping_hand_dark_skin_tone:': u'\U0001F481\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_tipping_hand_light_skin_tone:': u'\U0001F481\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_tipping_hand_medium-dark_skin_tone:': u'\U0001F481\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_tipping_hand_medium-light_skin_tone:': u'\U0001F481\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_tipping_hand_medium_skin_tone:': u'\U0001F481\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_tipping_hand_selector:': u'\U0001F481\U0000200D\U00002642\U0000FE0F',
u':man_vampire-dark_skin_tone:': u'\U0001F9DB\U0001F3FF\U0000200D\U00002642',
u':man_vampire-light_skin_tone:': u'\U0001F9DB\U0001F3FB\U0000200D\U00002642',
u':man_vampire-medium-dark_skin_tone:': u'\U0001F9DB\U0001F3FE\U0000200D\U00002642',
u':man_vampire-medium-light_skin_tone:': u'\U0001F9DB\U0001F3FC\U0000200D\U00002642',
u':man_vampire-medium_skin_tone:': u'\U0001F9DB\U0001F3FD\U0000200D\U00002642',
u':man_vampire:': u'\U0001F9DB\U0000200D\U00002642',
u':man_vampire_dark_skin_tone:': u'\U0001F9DB\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_vampire_light_skin_tone:': u'\U0001F9DB\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_vampire_medium-dark_skin_tone:': u'\U0001F9DB\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_vampire_medium-light_skin_tone:': u'\U0001F9DB\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_vampire_medium_skin_tone:': u'\U0001F9DB\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_vampire_selector:': u'\U0001F9DB\U0000200D\U00002642\U0000FE0F',
u':man_walking-dark_skin_tone:': u'\U0001F6B6\U0001F3FF\U0000200D\U00002642',
u':man_walking-light_skin_tone:': u'\U0001F6B6\U0001F3FB\U0000200D\U00002642',
u':man_walking-medium-dark_skin_tone:': u'\U0001F6B6\U0001F3FE\U0000200D\U00002642',
u':man_walking-medium-light_skin_tone:': u'\U0001F6B6\U0001F3FC\U0000200D\U00002642',
u':man_walking-medium_skin_tone:': u'\U0001F6B6\U0001F3FD\U0000200D\U00002642',
u':man_walking:': u'\U0001F6B6\U0000200D\U00002642',
u':man_walking_dark_skin_tone:': u'\U0001F6B6\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_walking_light_skin_tone:': u'\U0001F6B6\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_walking_medium-dark_skin_tone:': u'\U0001F6B6\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_walking_medium-light_skin_tone:': u'\U0001F6B6\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_walking_medium_skin_tone:': u'\U0001F6B6\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_walking_selector:': u'\U0001F6B6\U0000200D\U00002642\U0000FE0F',
u':man_wearing_turban-dark_skin_tone:': u'\U0001F473\U0001F3FF\U0000200D\U00002642',
u':man_wearing_turban-light_skin_tone:': u'\U0001F473\U0001F3FB\U0000200D\U00002642',
u':man_wearing_turban-medium-dark_skin_tone:': u'\U0001F473\U0001F3FE\U0000200D\U00002642',
u':man_wearing_turban-medium-light_skin_tone:': u'\U0001F473\U0001F3FC\U0000200D\U00002642',
u':man_wearing_turban-medium_skin_tone:': u'\U0001F473\U0001F3FD\U0000200D\U00002642',
u':man_wearing_turban:': u'\U0001F473\U0000200D\U00002642',
u':man_wearing_turban_dark_skin_tone:': u'\U0001F473\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':man_wearing_turban_light_skin_tone:': u'\U0001F473\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':man_wearing_turban_medium-dark_skin_tone:': u'\U0001F473\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':man_wearing_turban_medium-light_skin_tone:': u'\U0001F473\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':man_wearing_turban_medium_skin_tone:': u'\U0001F473\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':man_wearing_turban_selector:': u'\U0001F473\U0000200D\U00002642\U0000FE0F',
u':man_with_Chinese_cap:': u'\U0001F472',
u':man_with_Chinese_cap_dark_skin_tone:': u'\U0001F472\U0001F3FF',
u':man_with_Chinese_cap_light_skin_tone:': u'\U0001F472\U0001F3FB',
u':man_with_Chinese_cap_medium-dark_skin_tone:': u'\U0001F472\U0001F3FE',
u':man_with_Chinese_cap_medium-light_skin_tone:': u'\U0001F472\U0001F3FC',
u':man_with_Chinese_cap_medium_skin_tone:': u'\U0001F472\U0001F3FD',
u':man_with_probing_cane-dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F9AF',
u':man_with_probing_cane-light_skin_tone:': u'\U0001F468\U0001F3FB\U0000200D\U0001F9AF',
u':man_with_probing_cane-medium-dark_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F9AF',
u':man_with_probing_cane-medium-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F9AF',
u':man_with_probing_cane-medium_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F9AF',
u':man_with_probing_cane:': u'\U0001F468\U0000200D\U0001F9AF',
u':man_zombie:': u'\U0001F9DF\U0000200D\U00002642',
u':man_zombie_selector:': u'\U0001F9DF\U0000200D\U00002642\U0000FE0F',
u':mango:': u'\U0001F96D',
u':mantelpiece_clock:': u'\U0001F570',
u':mantelpiece_clock_selector:': u'\U0001F570\U0000FE0F',
u':manual_wheelchair:': u'\U0001F9BD',
u':man\u2019s_shoe:': u'\U0001F45E',
u':map_of_Japan:': u'\U0001F5FE',
u':maple_leaf:': u'\U0001F341',
u':martial_arts_uniform:': u'\U0001F94B',
u':mate:': u'\U0001F9C9',
u':meat_on_bone:': u'\U0001F356',
u':mechanical_arm:': u'\U0001F9BE',
u':mechanical_leg:': u'\U0001F9BF',
u':medical_symbol:': u'\U00002695',
u':medical_symbol_selector:': u'\U00002695\U0000FE0F',
u':medium_dark_skin_tone:': u'\U0001F3FE',
u':medium_light_skin_tone:': u'\U0001F3FC',
u':medium_skin_tone:': u'\U0001F3FD',
u':megaphone:': u'\U0001F4E3',
u':melon:': u'\U0001F348',
u':memo:': u'\U0001F4DD',
u':men_holding_hands-dark_skin_tone-light_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FB',
u':men_holding_hands-dark_skin_tone-medium-dark_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FE',
u':men_holding_hands-dark_skin_tone-medium-light_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FC',
u':men_holding_hands-dark_skin_tone-medium_skin_tone:': u'\U0001F468\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FD',
u':men_holding_hands-dark_skin_tone:': u'\U0001F46C\U0001F3FF',
u':men_holding_hands-light_skin_tone:': u'\U0001F46C\U0001F3FB',
u':men_holding_hands-medium-dark_skin_tone-light_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FB',
u':men_holding_hands-medium-dark_skin_tone-medium-light_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FC',
u':men_holding_hands-medium-dark_skin_tone-medium_skin_tone:': u'\U0001F468\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FD',
u':men_holding_hands-medium-dark_skin_tone:': u'\U0001F46C\U0001F3FE',
u':men_holding_hands-medium-light_skin_tone-light_skin_tone:': u'\U0001F468\U0001F3FC\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FB',
u':men_holding_hands-medium-light_skin_tone:': u'\U0001F46C\U0001F3FC',
u':men_holding_hands-medium_skin_tone-light_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FB',
u':men_holding_hands-medium_skin_tone-medium-light_skin_tone:': u'\U0001F468\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FC',
u':men_holding_hands-medium_skin_tone:': u'\U0001F46C\U0001F3FD',
u':men_with_bunny_ears:': u'\U0001F46F\U0000200D\U00002642',
u':men_with_bunny_ears_selector:': u'\U0001F46F\U0000200D\U00002642\U0000FE0F',
u':men_wrestling:': u'\U0001F93C\U0000200D\U00002642',
u':men_wrestling_selector:': u'\U0001F93C\U0000200D\U00002642\U0000FE0F',
u':menorah:': u'\U0001F54E',
u':men\u2019s_room:': u'\U0001F6B9',
u':mermaid-dark_skin_tone:': u'\U0001F9DC\U0001F3FF\U0000200D\U00002640',
u':mermaid-light_skin_tone:': u'\U0001F9DC\U0001F3FB\U0000200D\U00002640',
u':mermaid-medium-dark_skin_tone:': u'\U0001F9DC\U0001F3FE\U0000200D\U00002640',
u':mermaid-medium-light_skin_tone:': u'\U0001F9DC\U0001F3FC\U0000200D\U00002640',
u':mermaid-medium_skin_tone:': u'\U0001F9DC\U0001F3FD\U0000200D\U00002640',
u':mermaid:': u'\U0001F9DC\U0000200D\U00002640',
u':mermaid_dark_skin_tone:': u'\U0001F9DC\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':mermaid_light_skin_tone:': u'\U0001F9DC\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':mermaid_medium-dark_skin_tone:': u'\U0001F9DC\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':mermaid_medium-light_skin_tone:': u'\U0001F9DC\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':mermaid_medium_skin_tone:': u'\U0001F9DC\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':mermaid_selector:': u'\U0001F9DC\U0000200D\U00002640\U0000FE0F',
u':merman-dark_skin_tone:': u'\U0001F9DC\U0001F3FF\U0000200D\U00002642',
u':merman-light_skin_tone:': u'\U0001F9DC\U0001F3FB\U0000200D\U00002642',
u':merman-medium-dark_skin_tone:': u'\U0001F9DC\U0001F3FE\U0000200D\U00002642',
u':merman-medium-light_skin_tone:': u'\U0001F9DC\U0001F3FC\U0000200D\U00002642',
u':merman-medium_skin_tone:': u'\U0001F9DC\U0001F3FD\U0000200D\U00002642',
u':merman:': u'\U0001F9DC\U0000200D\U00002642',
u':merman_dark_skin_tone:': u'\U0001F9DC\U0001F3FF\U0000200D\U00002642\U0000FE0F',
u':merman_light_skin_tone:': u'\U0001F9DC\U0001F3FB\U0000200D\U00002642\U0000FE0F',
u':merman_medium-dark_skin_tone:': u'\U0001F9DC\U0001F3FE\U0000200D\U00002642\U0000FE0F',
u':merman_medium-light_skin_tone:': u'\U0001F9DC\U0001F3FC\U0000200D\U00002642\U0000FE0F',
u':merman_medium_skin_tone:': u'\U0001F9DC\U0001F3FD\U0000200D\U00002642\U0000FE0F',
u':merman_selector:': u'\U0001F9DC\U0000200D\U00002642\U0000FE0F',
u':merperson:': u'\U0001F9DC',
u':merperson_dark_skin_tone:': u'\U0001F9DC\U0001F3FF',
u':merperson_light_skin_tone:': u'\U0001F9DC\U0001F3FB',
u':merperson_medium-dark_skin_tone:': u'\U0001F9DC\U0001F3FE',
u':merperson_medium-light_skin_tone:': u'\U0001F9DC\U0001F3FC',
u':merperson_medium_skin_tone:': u'\U0001F9DC\U0001F3FD',
u':metro:': u'\U0001F687',
u':microbe:': u'\U0001F9A0',
u':microphone:': u'\U0001F3A4',
u':microscope:': u'\U0001F52C',
u':middle_finger:': u'\U0001F595',
u':middle_finger_dark_skin_tone:': u'\U0001F595\U0001F3FF',
u':middle_finger_light_skin_tone:': u'\U0001F595\U0001F3FB',
u':middle_finger_medium-dark_skin_tone:': u'\U0001F595\U0001F3FE',
u':middle_finger_medium-light_skin_tone:': u'\U0001F595\U0001F3FC',
u':middle_finger_medium_skin_tone:': u'\U0001F595\U0001F3FD',
u':military_medal:': u'\U0001F396',
u':military_medal_selector:': u'\U0001F396\U0000FE0F',
u':milky_way:': u'\U0001F30C',
u':minibus:': u'\U0001F690',
u':moai:': u'\U0001F5FF',
u':mobile_phone:': u'\U0001F4F1',
u':mobile_phone_off:': u'\U0001F4F4',
u':mobile_phone_with_arrow:': u'\U0001F4F2',
u':money-mouth_face:': u'\U0001F911',
u':money_bag:': u'\U0001F4B0',
u':money_with_wings:': u'\U0001F4B8',
u':monkey:': u'\U0001F412',
u':monkey_face:': u'\U0001F435',
u':monorail:': u'\U0001F69D',
u':moon_cake:': u'\U0001F96E',
u':moon_viewing_ceremony:': u'\U0001F391',
u':mosque:': u'\U0001F54C',
u':mosquito:': u'\U0001F99F',
u':motor_boat:': u'\U0001F6E5',
u':motor_boat_selector:': u'\U0001F6E5\U0000FE0F',
u':motor_scooter:': u'\U0001F6F5',
u':motorcycle:': u'\U0001F3CD',
u':motorcycle_selector:': u'\U0001F3CD\U0000FE0F',
u':motorized_wheelchair:': u'\U0001F9BC',
u':motorway:': u'\U0001F6E3',
u':motorway_selector:': u'\U0001F6E3\U0000FE0F',
u':mount_fuji:': u'\U0001F5FB',
u':mountain:': u'\U000026F0',
u':mountain_cableway:': u'\U0001F6A0',
u':mountain_railway:': u'\U0001F69E',
u':mountain_selector:': u'\U000026F0\U0000FE0F',
u':mouse:': u'\U0001F401',
u':mouse_face:': u'\U0001F42D',
u':mouth:': u'\U0001F444',
u':movie_camera:': u'\U0001F3A5',
u':multiplication_sign:': u'\U00002716\U0000FE0F',
u':mushroom:': u'\U0001F344',
u':musical_keyboard:': u'\U0001F3B9',
u':musical_note:': u'\U0001F3B5',
u':musical_notes:': u'\U0001F3B6',
u':musical_score:': u'\U0001F3BC',
u':muted_speaker:': u'\U0001F507',
u':nail_polish:': u'\U0001F485',
u':nail_polish_dark_skin_tone:': u'\U0001F485\U0001F3FF',
u':nail_polish_light_skin_tone:': u'\U0001F485\U0001F3FB',
u':nail_polish_medium-dark_skin_tone:': u'\U0001F485\U0001F3FE',
u':nail_polish_medium-light_skin_tone:': u'\U0001F485\U0001F3FC',
u':nail_polish_medium_skin_tone:': u'\U0001F485\U0001F3FD',
u':name_badge:': u'\U0001F4DB',
u':national_park:': u'\U0001F3DE',
u':national_park_selector:': u'\U0001F3DE\U0000FE0F',
u':nauseated_face:': u'\U0001F922',
u':nazar_amulet:': u'\U0001F9FF',
u':necktie:': u'\U0001F454',
u':nerd_face:': u'\U0001F913',
u':neutral_face:': u'\U0001F610',
u':new_moon:': u'\U0001F311',
u':new_moon_face:': u'\U0001F31A',
u':newspaper:': u'\U0001F4F0',
u':next_track_button:': u'\U000023ED',
u':next_track_button_selector:': u'\U000023ED\U0000FE0F',
u':night_with_stars:': u'\U0001F303',
u':nine-thirty:': u'\U0001F564',
u':nine_o\u2019clock:': u'\U0001F558',
u':no_bicycles:': u'\U0001F6B3',
u':no_entry:': u'\U000026D4',
u':no_littering:': u'\U0001F6AF',
u':no_mobile_phones:': u'\U0001F4F5',
u':no_one_under_eighteen:': u'\U0001F51E',
u':no_pedestrians:': u'\U0001F6B7',
u':no_smoking:': u'\U0001F6AD',
u':non-potable_water:': u'\U0001F6B1',
u':nose:': u'\U0001F443',
u':nose_dark_skin_tone:': u'\U0001F443\U0001F3FF',
u':nose_light_skin_tone:': u'\U0001F443\U0001F3FB',
u':nose_medium-dark_skin_tone:': u'\U0001F443\U0001F3FE',
u':nose_medium-light_skin_tone:': u'\U0001F443\U0001F3FC',
u':nose_medium_skin_tone:': u'\U0001F443\U0001F3FD',
u':notebook:': u'\U0001F4D3',
u':notebook_with_decorative_cover:': u'\U0001F4D4',
u':nut_and_bolt:': u'\U0001F529',
u':octopus:': u'\U0001F419',
u':oden:': u'\U0001F362',
u':office_building:': u'\U0001F3E2',
u':ogre:': u'\U0001F479',
u':oil_drum:': u'\U0001F6E2',
u':oil_drum_selector:': u'\U0001F6E2\U0000FE0F',
u':old_key:': u'\U0001F5DD',
u':old_key_selector:': u'\U0001F5DD\U0000FE0F',
u':old_man:': u'\U0001F474',
u':old_man_dark_skin_tone:': u'\U0001F474\U0001F3FF',
u':old_man_light_skin_tone:': u'\U0001F474\U0001F3FB',
u':old_man_medium-dark_skin_tone:': u'\U0001F474\U0001F3FE',
u':old_man_medium-light_skin_tone:': u'\U0001F474\U0001F3FC',
u':old_man_medium_skin_tone:': u'\U0001F474\U0001F3FD',
u':old_woman:': u'\U0001F475',
u':old_woman_dark_skin_tone:': u'\U0001F475\U0001F3FF',
u':old_woman_light_skin_tone:': u'\U0001F475\U0001F3FB',
u':old_woman_medium-dark_skin_tone:': u'\U0001F475\U0001F3FE',
u':old_woman_medium-light_skin_tone:': u'\U0001F475\U0001F3FC',
u':old_woman_medium_skin_tone:': u'\U0001F475\U0001F3FD',
u':older_adult:': u'\U0001F9D3',
u':older_adult_dark_skin_tone:': u'\U0001F9D3\U0001F3FF',
u':older_adult_light_skin_tone:': u'\U0001F9D3\U0001F3FB',
u':older_adult_medium-dark_skin_tone:': u'\U0001F9D3\U0001F3FE',
u':older_adult_medium-light_skin_tone:': u'\U0001F9D3\U0001F3FC',
u':older_adult_medium_skin_tone:': u'\U0001F9D3\U0001F3FD',
u':om:': u'\U0001F549',
u':om_selector:': u'\U0001F549\U0000FE0F',
u':oncoming_automobile:': u'\U0001F698',
u':oncoming_bus:': u'\U0001F68D',
u':oncoming_fist:': u'\U0001F44A',
u':oncoming_fist_dark_skin_tone:': u'\U0001F44A\U0001F3FF',
u':oncoming_fist_light_skin_tone:': u'\U0001F44A\U0001F3FB',
u':oncoming_fist_medium-dark_skin_tone:': u'\U0001F44A\U0001F3FE',
u':oncoming_fist_medium-light_skin_tone:': u'\U0001F44A\U0001F3FC',
u':oncoming_fist_medium_skin_tone:': u'\U0001F44A\U0001F3FD',
u':oncoming_police_car:': u'\U0001F694',
u':oncoming_taxi:': u'\U0001F696',
u':one-piece_swimsuit:': u'\U0001FA71',
u':one-thirty:': u'\U0001F55C',
u':one_o\u2019clock:': u'\U0001F550',
u':onion:': u'\U0001F9C5',
u':open_book:': u'\U0001F4D6',
u':open_file_folder:': u'\U0001F4C2',
u':open_hands:': u'\U0001F450',
u':open_hands_dark_skin_tone:': u'\U0001F450\U0001F3FF',
u':open_hands_light_skin_tone:': u'\U0001F450\U0001F3FB',
u':open_hands_medium-dark_skin_tone:': u'\U0001F450\U0001F3FE',
u':open_hands_medium-light_skin_tone:': u'\U0001F450\U0001F3FC',
u':open_hands_medium_skin_tone:': u'\U0001F450\U0001F3FD',
u':open_mailbox_with_lowered_flag:': u'\U0001F4ED',
u':open_mailbox_with_raised_flag:': u'\U0001F4EC',
u':optical_disk:': u'\U0001F4BF',
u':orange_book:': u'\U0001F4D9',
u':orange_circle:': u'\U0001F7E0',
u':orange_heart:': u'\U0001F9E1',
u':orange_square:': u'\U0001F7E7',
u':orangutan:': u'\U0001F9A7',
u':orthodox_cross:': u'\U00002626',
u':orthodox_cross_selector:': u'\U00002626\U0000FE0F',
u':otter:': u'\U0001F9A6',
u':outbox_tray:': u'\U0001F4E4',
u':owl:': u'\U0001F989',
u':ox:': u'\U0001F402',
u':oyster:': u'\U0001F9AA',
u':package:': u'\U0001F4E6',
u':page_facing_up:': u'\U0001F4C4',
u':page_with_curl:': u'\U0001F4C3',
u':pager:': u'\U0001F4DF',
u':paintbrush:': u'\U0001F58C',
u':paintbrush_selector:': u'\U0001F58C\U0000FE0F',
u':palm_tree:': u'\U0001F334',
u':palms_up_together:': u'\U0001F932',
u':palms_up_together_dark_skin_tone:': u'\U0001F932\U0001F3FF',
u':palms_up_together_light_skin_tone:': u'\U0001F932\U0001F3FB',
u':palms_up_together_medium-dark_skin_tone:': u'\U0001F932\U0001F3FE',
u':palms_up_together_medium-light_skin_tone:': u'\U0001F932\U0001F3FC',
u':palms_up_together_medium_skin_tone:': u'\U0001F932\U0001F3FD',
u':pancakes:': u'\U0001F95E',
u':panda_face:': u'\U0001F43C',
u':paperclip:': u'\U0001F4CE',
u':parachute:': u'\U0001FA82',
u':parrot:': u'\U0001F99C',
u':part_alternation_mark:': u'\U0000303D',
u':part_alternation_mark_selector:': u'\U0000303D\U0000FE0F',
u':party_popper:': u'\U0001F389',
u':partying_face:': u'\U0001F973',
u':passenger_ship:': u'\U0001F6F3',
u':passenger_ship_selector:': u'\U0001F6F3\U0000FE0F',
u':passport_control:': u'\U0001F6C2',
u':pause_button:': u'\U000023F8',
u':pause_button_selector:': u'\U000023F8\U0000FE0F',
u':paw_prints:': u'\U0001F43E',
u':peace_symbol:': u'\U0000262E',
u':peace_symbol_selector:': u'\U0000262E\U0000FE0F',
u':peach:': u'\U0001F351',
u':peacock:': u'\U0001F99A',
u':peanuts:': u'\U0001F95C',
u':pear:': u'\U0001F350',
u':pen:': u'\U0001F58A',
u':pen_selector:': u'\U0001F58A\U0000FE0F',
u':pencil:': u'\U0000270F',
u':pencil_selector:': u'\U0000270F\U0000FE0F',
u':penguin:': u'\U0001F427',
u':pensive_face:': u'\U0001F614',
u':people_holding_hands-dark_skin_tone-light_skin_tone:': u'\U0001F9D1\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FB',
u':people_holding_hands-dark_skin_tone-medium-dark_skin_tone:': u'\U0001F9D1\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FE',
u':people_holding_hands-dark_skin_tone-medium-light_skin_tone:': u'\U0001F9D1\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FC',
u':people_holding_hands-dark_skin_tone-medium_skin_tone:': u'\U0001F9D1\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FD',
u':people_holding_hands-dark_skin_tone:': u'\U0001F9D1\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FF',
u':people_holding_hands-light_skin_tone:': u'\U0001F9D1\U0001F3FB\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FB',
u':people_holding_hands-medium-dark_skin_tone-light_skin_tone:': u'\U0001F9D1\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FB',
u':people_holding_hands-medium-dark_skin_tone-medium-light_skin_tone:': u'\U0001F9D1\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FC',
u':people_holding_hands-medium-dark_skin_tone-medium_skin_tone:': u'\U0001F9D1\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FD',
u':people_holding_hands-medium-dark_skin_tone:': u'\U0001F9D1\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FE',
u':people_holding_hands-medium-light_skin_tone-light_skin_tone:': u'\U0001F9D1\U0001F3FC\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FB',
u':people_holding_hands-medium-light_skin_tone:': u'\U0001F9D1\U0001F3FC\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FC',
u':people_holding_hands-medium_skin_tone-light_skin_tone:': u'\U0001F9D1\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FB',
u':people_holding_hands-medium_skin_tone-medium-light_skin_tone:': u'\U0001F9D1\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FC',
u':people_holding_hands-medium_skin_tone:': u'\U0001F9D1\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F9D1\U0001F3FD',
u':people_holding_hands:': u'\U0001F9D1\U0000200D\U0001F91D\U0000200D\U0001F9D1',
u':people_with_bunny_ears:': u'\U0001F46F',
u':people_wrestling:': u'\U0001F93C',
u':performing_arts:': u'\U0001F3AD',
u':persevering_face:': u'\U0001F623',
u':person_biking:': u'\U0001F6B4',
u':person_biking_dark_skin_tone:': u'\U0001F6B4\U0001F3FF',
u':person_biking_light_skin_tone:': u'\U0001F6B4\U0001F3FB',
u':person_biking_medium-dark_skin_tone:': u'\U0001F6B4\U0001F3FE',
u':person_biking_medium-light_skin_tone:': u'\U0001F6B4\U0001F3FC',
u':person_biking_medium_skin_tone:': u'\U0001F6B4\U0001F3FD',
u':person_bouncing_ball:': u'\U000026F9',
u':person_bouncing_ball_dark_skin_tone:': u'\U000026F9\U0001F3FF',
u':person_bouncing_ball_light_skin_tone:': u'\U000026F9\U0001F3FB',
u':person_bouncing_ball_medium-dark_skin_tone:': u'\U000026F9\U0001F3FE',
u':person_bouncing_ball_medium-light_skin_tone:': u'\U000026F9\U0001F3FC',
u':person_bouncing_ball_medium_skin_tone:': u'\U000026F9\U0001F3FD',
u':person_bouncing_ball_selector:': u'\U000026F9\U0000FE0F',
u':person_bowing:': u'\U0001F647',
u':person_bowing_dark_skin_tone:': u'\U0001F647\U0001F3FF',
u':person_bowing_light_skin_tone:': u'\U0001F647\U0001F3FB',
u':person_bowing_medium-dark_skin_tone:': u'\U0001F647\U0001F3FE',
u':person_bowing_medium-light_skin_tone:': u'\U0001F647\U0001F3FC',
u':person_bowing_medium_skin_tone:': u'\U0001F647\U0001F3FD',
u':person_cartwheeling:': u'\U0001F938',
u':person_cartwheeling_dark_skin_tone:': u'\U0001F938\U0001F3FF',
u':person_cartwheeling_light_skin_tone:': u'\U0001F938\U0001F3FB',
u':person_cartwheeling_medium-dark_skin_tone:': u'\U0001F938\U0001F3FE',
u':person_cartwheeling_medium-light_skin_tone:': u'\U0001F938\U0001F3FC',
u':person_cartwheeling_medium_skin_tone:': u'\U0001F938\U0001F3FD',
u':person_climbing:': u'\U0001F9D7',
u':person_climbing_dark_skin_tone:': u'\U0001F9D7\U0001F3FF',
u':person_climbing_light_skin_tone:': u'\U0001F9D7\U0001F3FB',
u':person_climbing_medium-dark_skin_tone:': u'\U0001F9D7\U0001F3FE',
u':person_climbing_medium-light_skin_tone:': u'\U0001F9D7\U0001F3FC',
u':person_climbing_medium_skin_tone:': u'\U0001F9D7\U0001F3FD',
u':person_facepalming:': u'\U0001F926',
u':person_facepalming_dark_skin_tone:': u'\U0001F926\U0001F3FF',
u':person_facepalming_light_skin_tone:': u'\U0001F926\U0001F3FB',
u':person_facepalming_medium-dark_skin_tone:': u'\U0001F926\U0001F3FE',
u':person_facepalming_medium-light_skin_tone:': u'\U0001F926\U0001F3FC',
u':person_facepalming_medium_skin_tone:': u'\U0001F926\U0001F3FD',
u':person_fencing:': u'\U0001F93A',
u':person_frowning:': u'\U0001F64D',
u':person_frowning_dark_skin_tone:': u'\U0001F64D\U0001F3FF',
u':person_frowning_light_skin_tone:': u'\U0001F64D\U0001F3FB',
u':person_frowning_medium-dark_skin_tone:': u'\U0001F64D\U0001F3FE',
u':person_frowning_medium-light_skin_tone:': u'\U0001F64D\U0001F3FC',
u':person_frowning_medium_skin_tone:': u'\U0001F64D\U0001F3FD',
u':person_gesturing_NO:': u'\U0001F645',
u':person_gesturing_NO_dark_skin_tone:': u'\U0001F645\U0001F3FF',
u':person_gesturing_NO_light_skin_tone:': u'\U0001F645\U0001F3FB',
u':person_gesturing_NO_medium-dark_skin_tone:': u'\U0001F645\U0001F3FE',
u':person_gesturing_NO_medium-light_skin_tone:': u'\U0001F645\U0001F3FC',
u':person_gesturing_NO_medium_skin_tone:': u'\U0001F645\U0001F3FD',
u':person_gesturing_OK:': u'\U0001F646',
u':person_gesturing_OK_dark_skin_tone:': u'\U0001F646\U0001F3FF',
u':person_gesturing_OK_light_skin_tone:': u'\U0001F646\U0001F3FB',
u':person_gesturing_OK_medium-dark_skin_tone:': u'\U0001F646\U0001F3FE',
u':person_gesturing_OK_medium-light_skin_tone:': u'\U0001F646\U0001F3FC',
u':person_gesturing_OK_medium_skin_tone:': u'\U0001F646\U0001F3FD',
u':person_getting_haircut:': u'\U0001F487',
u':person_getting_haircut_dark_skin_tone:': u'\U0001F487\U0001F3FF',
u':person_getting_haircut_light_skin_tone:': u'\U0001F487\U0001F3FB',
u':person_getting_haircut_medium-dark_skin_tone:': u'\U0001F487\U0001F3FE',
u':person_getting_haircut_medium-light_skin_tone:': u'\U0001F487\U0001F3FC',
u':person_getting_haircut_medium_skin_tone:': u'\U0001F487\U0001F3FD',
u':person_getting_massage:': u'\U0001F486',
u':person_getting_massage_dark_skin_tone:': u'\U0001F486\U0001F3FF',
u':person_getting_massage_light_skin_tone:': u'\U0001F486\U0001F3FB',
u':person_getting_massage_medium-dark_skin_tone:': u'\U0001F486\U0001F3FE',
u':person_getting_massage_medium-light_skin_tone:': u'\U0001F486\U0001F3FC',
u':person_getting_massage_medium_skin_tone:': u'\U0001F486\U0001F3FD',
u':person_golfing:': u'\U0001F3CC',
u':person_golfing_dark_skin_tone:': u'\U0001F3CC\U0001F3FF',
u':person_golfing_light_skin_tone:': u'\U0001F3CC\U0001F3FB',
u':person_golfing_medium-dark_skin_tone:': u'\U0001F3CC\U0001F3FE',
u':person_golfing_medium-light_skin_tone:': u'\U0001F3CC\U0001F3FC',
u':person_golfing_medium_skin_tone:': u'\U0001F3CC\U0001F3FD',
u':person_golfing_selector:': u'\U0001F3CC\U0000FE0F',
u':person_in_bed:': u'\U0001F6CC',
u':person_in_bed_dark_skin_tone:': u'\U0001F6CC\U0001F3FF',
u':person_in_bed_light_skin_tone:': u'\U0001F6CC\U0001F3FB',
u':person_in_bed_medium-dark_skin_tone:': u'\U0001F6CC\U0001F3FE',
u':person_in_bed_medium-light_skin_tone:': u'\U0001F6CC\U0001F3FC',
u':person_in_bed_medium_skin_tone:': u'\U0001F6CC\U0001F3FD',
u':person_in_lotus_position:': u'\U0001F9D8',
u':person_in_lotus_position_dark_skin_tone:': u'\U0001F9D8\U0001F3FF',
u':person_in_lotus_position_light_skin_tone:': u'\U0001F9D8\U0001F3FB',
u':person_in_lotus_position_medium-dark_skin_tone:': u'\U0001F9D8\U0001F3FE',
u':person_in_lotus_position_medium-light_skin_tone:': u'\U0001F9D8\U0001F3FC',
u':person_in_lotus_position_medium_skin_tone:': u'\U0001F9D8\U0001F3FD',
u':person_in_steamy_room:': u'\U0001F9D6',
u':person_in_steamy_room_dark_skin_tone:': u'\U0001F9D6\U0001F3FF',
u':person_in_steamy_room_light_skin_tone:': u'\U0001F9D6\U0001F3FB',
u':person_in_steamy_room_medium-dark_skin_tone:': u'\U0001F9D6\U0001F3FE',
u':person_in_steamy_room_medium-light_skin_tone:': u'\U0001F9D6\U0001F3FC',
u':person_in_steamy_room_medium_skin_tone:': u'\U0001F9D6\U0001F3FD',
u':person_juggling:': u'\U0001F939',
u':person_juggling_dark_skin_tone:': u'\U0001F939\U0001F3FF',
u':person_juggling_light_skin_tone:': u'\U0001F939\U0001F3FB',
u':person_juggling_medium-dark_skin_tone:': u'\U0001F939\U0001F3FE',
u':person_juggling_medium-light_skin_tone:': u'\U0001F939\U0001F3FC',
u':person_juggling_medium_skin_tone:': u'\U0001F939\U0001F3FD',
u':person_kneeling-dark_skin_tone:': u'\U0001F9CE\U0001F3FF',
u':person_kneeling-light_skin_tone:': u'\U0001F9CE\U0001F3FB',
u':person_kneeling-medium-dark_skin_tone:': u'\U0001F9CE\U0001F3FE',
u':person_kneeling-medium-light_skin_tone:': u'\U0001F9CE\U0001F3FC',
u':person_kneeling-medium_skin_tone:': u'\U0001F9CE\U0001F3FD',
u':person_kneeling:': u'\U0001F9CE',
u':person_lifting_weights:': u'\U0001F3CB',
u':person_lifting_weights_dark_skin_tone:': u'\U0001F3CB\U0001F3FF',
u':person_lifting_weights_light_skin_tone:': u'\U0001F3CB\U0001F3FB',
u':person_lifting_weights_medium-dark_skin_tone:': u'\U0001F3CB\U0001F3FE',
u':person_lifting_weights_medium-light_skin_tone:': u'\U0001F3CB\U0001F3FC',
u':person_lifting_weights_medium_skin_tone:': u'\U0001F3CB\U0001F3FD',
u':person_lifting_weights_selector:': u'\U0001F3CB\U0000FE0F',
u':person_mountain_biking:': u'\U0001F6B5',
u':person_mountain_biking_dark_skin_tone:': u'\U0001F6B5\U0001F3FF',
u':person_mountain_biking_light_skin_tone:': u'\U0001F6B5\U0001F3FB',
u':person_mountain_biking_medium-dark_skin_tone:': u'\U0001F6B5\U0001F3FE',
u':person_mountain_biking_medium-light_skin_tone:': u'\U0001F6B5\U0001F3FC',
u':person_mountain_biking_medium_skin_tone:': u'\U0001F6B5\U0001F3FD',
u':person_playing_handball:': u'\U0001F93E',
u':person_playing_handball_dark_skin_tone:': u'\U0001F93E\U0001F3FF',
u':person_playing_handball_light_skin_tone:': u'\U0001F93E\U0001F3FB',
u':person_playing_handball_medium-dark_skin_tone:': u'\U0001F93E\U0001F3FE',
u':person_playing_handball_medium-light_skin_tone:': u'\U0001F93E\U0001F3FC',
u':person_playing_handball_medium_skin_tone:': u'\U0001F93E\U0001F3FD',
u':person_playing_water_polo:': u'\U0001F93D',
u':person_playing_water_polo_dark_skin_tone:': u'\U0001F93D\U0001F3FF',
u':person_playing_water_polo_light_skin_tone:': u'\U0001F93D\U0001F3FB',
u':person_playing_water_polo_medium-dark_skin_tone:': u'\U0001F93D\U0001F3FE',
u':person_playing_water_polo_medium-light_skin_tone:': u'\U0001F93D\U0001F3FC',
u':person_playing_water_polo_medium_skin_tone:': u'\U0001F93D\U0001F3FD',
u':person_pouting:': u'\U0001F64E',
u':person_pouting_dark_skin_tone:': u'\U0001F64E\U0001F3FF',
u':person_pouting_light_skin_tone:': u'\U0001F64E\U0001F3FB',
u':person_pouting_medium-dark_skin_tone:': u'\U0001F64E\U0001F3FE',
u':person_pouting_medium-light_skin_tone:': u'\U0001F64E\U0001F3FC',
u':person_pouting_medium_skin_tone:': u'\U0001F64E\U0001F3FD',
u':person_raising_hand:': u'\U0001F64B',
u':person_raising_hand_dark_skin_tone:': u'\U0001F64B\U0001F3FF',
u':person_raising_hand_light_skin_tone:': u'\U0001F64B\U0001F3FB',
u':person_raising_hand_medium-dark_skin_tone:': u'\U0001F64B\U0001F3FE',
u':person_raising_hand_medium-light_skin_tone:': u'\U0001F64B\U0001F3FC',
u':person_raising_hand_medium_skin_tone:': u'\U0001F64B\U0001F3FD',
u':person_rowing_boat:': u'\U0001F6A3',
u':person_rowing_boat_dark_skin_tone:': u'\U0001F6A3\U0001F3FF',
u':person_rowing_boat_light_skin_tone:': u'\U0001F6A3\U0001F3FB',
u':person_rowing_boat_medium-dark_skin_tone:': u'\U0001F6A3\U0001F3FE',
u':person_rowing_boat_medium-light_skin_tone:': u'\U0001F6A3\U0001F3FC',
u':person_rowing_boat_medium_skin_tone:': u'\U0001F6A3\U0001F3FD',
u':person_running:': u'\U0001F3C3',
u':person_running_dark_skin_tone:': u'\U0001F3C3\U0001F3FF',
u':person_running_light_skin_tone:': u'\U0001F3C3\U0001F3FB',
u':person_running_medium-dark_skin_tone:': u'\U0001F3C3\U0001F3FE',
u':person_running_medium-light_skin_tone:': u'\U0001F3C3\U0001F3FC',
u':person_running_medium_skin_tone:': u'\U0001F3C3\U0001F3FD',
u':person_shrugging:': u'\U0001F937',
u':person_shrugging_dark_skin_tone:': u'\U0001F937\U0001F3FF',
u':person_shrugging_light_skin_tone:': u'\U0001F937\U0001F3FB',
u':person_shrugging_medium-dark_skin_tone:': u'\U0001F937\U0001F3FE',
u':person_shrugging_medium-light_skin_tone:': u'\U0001F937\U0001F3FC',
u':person_shrugging_medium_skin_tone:': u'\U0001F937\U0001F3FD',
u':person_standing-dark_skin_tone:': u'\U0001F9CD\U0001F3FF',
u':person_standing-light_skin_tone:': u'\U0001F9CD\U0001F3FB',
u':person_standing-medium-dark_skin_tone:': u'\U0001F9CD\U0001F3FE',
u':person_standing-medium-light_skin_tone:': u'\U0001F9CD\U0001F3FC',
u':person_standing-medium_skin_tone:': u'\U0001F9CD\U0001F3FD',
u':person_standing:': u'\U0001F9CD',
u':person_surfing:': u'\U0001F3C4',
u':person_surfing_dark_skin_tone:': u'\U0001F3C4\U0001F3FF',
u':person_surfing_light_skin_tone:': u'\U0001F3C4\U0001F3FB',
u':person_surfing_medium-dark_skin_tone:': u'\U0001F3C4\U0001F3FE',
u':person_surfing_medium-light_skin_tone:': u'\U0001F3C4\U0001F3FC',
u':person_surfing_medium_skin_tone:': u'\U0001F3C4\U0001F3FD',
u':person_swimming:': u'\U0001F3CA',
u':person_swimming_dark_skin_tone:': u'\U0001F3CA\U0001F3FF',
u':person_swimming_light_skin_tone:': u'\U0001F3CA\U0001F3FB',
u':person_swimming_medium-dark_skin_tone:': u'\U0001F3CA\U0001F3FE',
u':person_swimming_medium-light_skin_tone:': u'\U0001F3CA\U0001F3FC',
u':person_swimming_medium_skin_tone:': u'\U0001F3CA\U0001F3FD',
u':person_taking_bath:': u'\U0001F6C0',
u':person_taking_bath_dark_skin_tone:': u'\U0001F6C0\U0001F3FF',
u':person_taking_bath_light_skin_tone:': u'\U0001F6C0\U0001F3FB',
u':person_taking_bath_medium-dark_skin_tone:': u'\U0001F6C0\U0001F3FE',
u':person_taking_bath_medium-light_skin_tone:': u'\U0001F6C0\U0001F3FC',
u':person_taking_bath_medium_skin_tone:': u'\U0001F6C0\U0001F3FD',
u':person_tipping_hand:': u'\U0001F481',
u':person_tipping_hand_dark_skin_tone:': u'\U0001F481\U0001F3FF',
u':person_tipping_hand_light_skin_tone:': u'\U0001F481\U0001F3FB',
u':person_tipping_hand_medium-dark_skin_tone:': u'\U0001F481\U0001F3FE',
u':person_tipping_hand_medium-light_skin_tone:': u'\U0001F481\U0001F3FC',
u':person_tipping_hand_medium_skin_tone:': u'\U0001F481\U0001F3FD',
u':person_walking:': u'\U0001F6B6',
u':person_walking_dark_skin_tone:': u'\U0001F6B6\U0001F3FF',
u':person_walking_light_skin_tone:': u'\U0001F6B6\U0001F3FB',
u':person_walking_medium-dark_skin_tone:': u'\U0001F6B6\U0001F3FE',
u':person_walking_medium-light_skin_tone:': u'\U0001F6B6\U0001F3FC',
u':person_walking_medium_skin_tone:': u'\U0001F6B6\U0001F3FD',
u':person_wearing_turban:': u'\U0001F473',
u':person_wearing_turban_dark_skin_tone:': u'\U0001F473\U0001F3FF',
u':person_wearing_turban_light_skin_tone:': u'\U0001F473\U0001F3FB',
u':person_wearing_turban_medium-dark_skin_tone:': u'\U0001F473\U0001F3FE',
u':person_wearing_turban_medium-light_skin_tone:': u'\U0001F473\U0001F3FC',
u':person_wearing_turban_medium_skin_tone:': u'\U0001F473\U0001F3FD',
u':petri_dish:': u'\U0001F9EB',
u':pick:': u'\U000026CF',
u':pick_selector:': u'\U000026CF\U0000FE0F',
u':pie:': u'\U0001F967',
u':pig:': u'\U0001F416',
u':pig_face:': u'\U0001F437',
u':pig_nose:': u'\U0001F43D',
u':pile_of_poo:': u'\U0001F4A9',
u':pill:': u'\U0001F48A',
u':pinching_hand-dark_skin_tone:': u'\U0001F90F\U0001F3FF',
u':pinching_hand-light_skin_tone:': u'\U0001F90F\U0001F3FB',
u':pinching_hand-medium-dark_skin_tone:': u'\U0001F90F\U0001F3FE',
u':pinching_hand-medium-light_skin_tone:': u'\U0001F90F\U0001F3FC',
u':pinching_hand-medium_skin_tone:': u'\U0001F90F\U0001F3FD',
u':pinching_hand:': u'\U0001F90F',
u':pine_decoration:': u'\U0001F38D',
u':pineapple:': u'\U0001F34D',
u':ping_pong:': u'\U0001F3D3',
u':pirate_flag:': u'\U0001F3F4\U0000200D\U00002620',
u':pirate_flag_selector:': u'\U0001F3F4\U0000200D\U00002620\U0000FE0F',
u':pistol:': u'\U0001F52B',
u':pizza:': u'\U0001F355',
u':place_of_worship:': u'\U0001F6D0',
u':play_button:': u'\U000025B6',
u':play_button_selector:': u'\U000025B6\U0000FE0F',
u':play_or_pause_button:': u'\U000023EF',
u':play_or_pause_button_selector:': u'\U000023EF\U0000FE0F',
u':pleading_face:': u'\U0001F97A',
u':police_car:': u'\U0001F693',
u':police_car_light:': u'\U0001F6A8',
u':police_officer:': u'\U0001F46E',
u':police_officer_dark_skin_tone:': u'\U0001F46E\U0001F3FF',
u':police_officer_light_skin_tone:': u'\U0001F46E\U0001F3FB',
u':police_officer_medium-dark_skin_tone:': u'\U0001F46E\U0001F3FE',
u':police_officer_medium-light_skin_tone:': u'\U0001F46E\U0001F3FC',
u':police_officer_medium_skin_tone:': u'\U0001F46E\U0001F3FD',
u':poodle:': u'\U0001F429',
u':pool_8_ball:': u'\U0001F3B1',
u':popcorn:': u'\U0001F37F',
u':post_office:': u'\U0001F3E4',
u':postal_horn:': u'\U0001F4EF',
u':postbox:': u'\U0001F4EE',
u':pot_of_food:': u'\U0001F372',
u':potable_water:': u'\U0001F6B0',
u':potato:': u'\U0001F954',
u':poultry_leg:': u'\U0001F357',
u':pound_banknote:': u'\U0001F4B7',
u':pouting_cat_face:': u'\U0001F63E',
u':pouting_face:': u'\U0001F621',
u':prayer_beads:': u'\U0001F4FF',
u':pregnant_woman:': u'\U0001F930',
u':pregnant_woman_dark_skin_tone:': u'\U0001F930\U0001F3FF',
u':pregnant_woman_light_skin_tone:': u'\U0001F930\U0001F3FB',
u':pregnant_woman_medium-dark_skin_tone:': u'\U0001F930\U0001F3FE',
u':pregnant_woman_medium-light_skin_tone:': u'\U0001F930\U0001F3FC',
u':pregnant_woman_medium_skin_tone:': u'\U0001F930\U0001F3FD',
u':pretzel:': u'\U0001F968',
u':prince:': u'\U0001F934',
u':prince_dark_skin_tone:': u'\U0001F934\U0001F3FF',
u':prince_light_skin_tone:': u'\U0001F934\U0001F3FB',
u':prince_medium-dark_skin_tone:': u'\U0001F934\U0001F3FE',
u':prince_medium-light_skin_tone:': u'\U0001F934\U0001F3FC',
u':prince_medium_skin_tone:': u'\U0001F934\U0001F3FD',
u':princess:': u'\U0001F478',
u':princess_dark_skin_tone:': u'\U0001F478\U0001F3FF',
u':princess_light_skin_tone:': u'\U0001F478\U0001F3FB',
u':princess_medium-dark_skin_tone:': u'\U0001F478\U0001F3FE',
u':princess_medium-light_skin_tone:': u'\U0001F478\U0001F3FC',
u':princess_medium_skin_tone:': u'\U0001F478\U0001F3FD',
u':printer:': u'\U0001F5A8',
u':printer_selector:': u'\U0001F5A8\U0000FE0F',
u':probing_cane:': u'\U0001F9AF',
u':prohibited:': u'\U0001F6AB',
u':purple_circle:': u'\U0001F7E3',
u':purple_heart:': u'\U0001F49C',
u':purple_square:': u'\U0001F7EA',
u':purse:': u'\U0001F45B',
u':pushpin:': u'\U0001F4CC',
u':question_mark:': u'\U00002753',
u':rabbit:': u'\U0001F407',
u':rabbit_face:': u'\U0001F430',
u':raccoon:': u'\U0001F99D',
u':racing_car:': u'\U0001F3CE',
u':racing_car_selector:': u'\U0001F3CE\U0000FE0F',
u':radio:': u'\U0001F4FB',
u':radio_button:': u'\U0001F518',
u':radioactive:': u'\U00002622',
u':radioactive_selector:': u'\U00002622\U0000FE0F',
u':railway_car:': u'\U0001F683',
u':railway_track:': u'\U0001F6E4',
u':railway_track_selector:': u'\U0001F6E4\U0000FE0F',
u':rainbow:': u'\U0001F308',
u':rainbow_flag:': u'\U0001F3F3\U0000200D\U0001F308',
u':rainbow_flag_selector:': u'\U0001F3F3\U0000FE0F\U0000200D\U0001F308',
u':raised_back_of_hand:': u'\U0001F91A',
u':raised_back_of_hand_dark_skin_tone:': u'\U0001F91A\U0001F3FF',
u':raised_back_of_hand_light_skin_tone:': u'\U0001F91A\U0001F3FB',
u':raised_back_of_hand_medium-dark_skin_tone:': u'\U0001F91A\U0001F3FE',
u':raised_back_of_hand_medium-light_skin_tone:': u'\U0001F91A\U0001F3FC',
u':raised_back_of_hand_medium_skin_tone:': u'\U0001F91A\U0001F3FD',
u':raised_fist:': u'\U0000270A',
u':raised_fist_dark_skin_tone:': u'\U0000270A\U0001F3FF',
u':raised_fist_light_skin_tone:': u'\U0000270A\U0001F3FB',
u':raised_fist_medium-dark_skin_tone:': u'\U0000270A\U0001F3FE',
u':raised_fist_medium-light_skin_tone:': u'\U0000270A\U0001F3FC',
u':raised_fist_medium_skin_tone:': u'\U0000270A\U0001F3FD',
u':raised_hand:': u'\U0000270B',
u':raised_hand_dark_skin_tone:': u'\U0000270B\U0001F3FF',
u':raised_hand_light_skin_tone:': u'\U0000270B\U0001F3FB',
u':raised_hand_medium-dark_skin_tone:': u'\U0000270B\U0001F3FE',
u':raised_hand_medium-light_skin_tone:': u'\U0000270B\U0001F3FC',
u':raised_hand_medium_skin_tone:': u'\U0000270B\U0001F3FD',
u':raising_hands:': u'\U0001F64C',
u':raising_hands_dark_skin_tone:': u'\U0001F64C\U0001F3FF',
u':raising_hands_light_skin_tone:': u'\U0001F64C\U0001F3FB',
u':raising_hands_medium-dark_skin_tone:': u'\U0001F64C\U0001F3FE',
u':raising_hands_medium-light_skin_tone:': u'\U0001F64C\U0001F3FC',
u':raising_hands_medium_skin_tone:': u'\U0001F64C\U0001F3FD',
u':ram:': u'\U0001F40F',
u':rat:': u'\U0001F400',
u':razor:': u'\U0001FA92',
u':receipt:': u'\U0001F9FE',
u':record_button:': u'\U000023FA',
u':record_button_selector:': u'\U000023FA\U0000FE0F',
u':recycling_symbol:': u'\U0000267B',
u':recycling_symbol_selector:': u'\U0000267B\U0000FE0F',
u':red-haired_man:': u'\U0001F468\U0000200D\U0001F9B0',
u':red-haired_woman:': u'\U0001F469\U0000200D\U0001F9B0',
u':red_apple:': u'\U0001F34E',
u':red_circle:': u'\U0001F534',
u':red_envelope:': u'\U0001F9E7',
u':red_hair:': u'\U0001F9B0',
u':red_heart:': u'\U00002764',
u':red_heart_selector:': u'\U00002764\U0000FE0F',
u':red_paper_lantern:': u'\U0001F3EE',
u':red_square:': u'\U0001F7E5',
u':red_triangle_pointed_down:': u'\U0001F53B',
u':red_triangle_pointed_up:': u'\U0001F53A',
u':regional_indicator_symbol_letter_a:': u'\U0001F1E6',
u':regional_indicator_symbol_letter_b:': u'\U0001F1E7',
u':regional_indicator_symbol_letter_c:': u'\U0001F1E8',
u':regional_indicator_symbol_letter_d:': u'\U0001F1E9',
u':regional_indicator_symbol_letter_e:': u'\U0001F1EA',
u':regional_indicator_symbol_letter_f:': u'\U0001F1EB',
u':regional_indicator_symbol_letter_g:': u'\U0001F1EC',
u':regional_indicator_symbol_letter_h:': u'\U0001F1ED',
u':regional_indicator_symbol_letter_i:': u'\U0001F1EE',
u':regional_indicator_symbol_letter_j:': u'\U0001F1EF',
u':regional_indicator_symbol_letter_k:': u'\U0001F1F0',
u':regional_indicator_symbol_letter_l:': u'\U0001F1F1',
u':regional_indicator_symbol_letter_m:': u'\U0001F1F2',
u':regional_indicator_symbol_letter_n:': u'\U0001F1F3',
u':regional_indicator_symbol_letter_o:': u'\U0001F1F4',
u':regional_indicator_symbol_letter_p:': u'\U0001F1F5',
u':regional_indicator_symbol_letter_q:': u'\U0001F1F6',
u':regional_indicator_symbol_letter_r:': u'\U0001F1F7',
u':regional_indicator_symbol_letter_s:': u'\U0001F1F8',
u':regional_indicator_symbol_letter_t:': u'\U0001F1F9',
u':regional_indicator_symbol_letter_u:': u'\U0001F1FA',
u':regional_indicator_symbol_letter_v:': u'\U0001F1FB',
u':regional_indicator_symbol_letter_w:': u'\U0001F1FC',
u':regional_indicator_symbol_letter_x:': u'\U0001F1FD',
u':regional_indicator_symbol_letter_y:': u'\U0001F1FE',
u':regional_indicator_symbol_letter_z:': u'\U0001F1FF',
u':registered:': u'\U000000AE',
u':registered_selector:': u'\U000000AE\U0000FE0F',
u':relieved_face:': u'\U0001F60C',
u':reminder_ribbon:': u'\U0001F397',
u':reminder_ribbon_selector:': u'\U0001F397\U0000FE0F',
u':repeat_button:': u'\U0001F501',
u':repeat_single_button:': u'\U0001F502',
u':rescue_worker\u2019s_helmet:': u'\U000026D1',
u':rescue_worker\u2019s_helmet_selector:': u'\U000026D1\U0000FE0F',
u':restroom:': u'\U0001F6BB',
u':reverse_button:': u'\U000025C0',
u':reverse_button_selector:': u'\U000025C0\U0000FE0F',
u':revolving_hearts:': u'\U0001F49E',
u':rhinoceros:': u'\U0001F98F',
u':ribbon:': u'\U0001F380',
u':rice_ball:': u'\U0001F359',
u':rice_cracker:': u'\U0001F358',
u':right-facing_fist:': u'\U0001F91C',
u':right-facing_fist_dark_skin_tone:': u'\U0001F91C\U0001F3FF',
u':right-facing_fist_light_skin_tone:': u'\U0001F91C\U0001F3FB',
u':right-facing_fist_medium-dark_skin_tone:': u'\U0001F91C\U0001F3FE',
u':right-facing_fist_medium-light_skin_tone:': u'\U0001F91C\U0001F3FC',
u':right-facing_fist_medium_skin_tone:': u'\U0001F91C\U0001F3FD',
u':right_anger_bubble:': u'\U0001F5EF',
u':right_anger_bubble_selector:': u'\U0001F5EF\U0000FE0F',
u':right_arrow:': u'\U000027A1',
u':right_arrow_curving_down:': u'\U00002935',
u':right_arrow_curving_down_selector:': u'\U00002935\U0000FE0F',
u':right_arrow_curving_left:': u'\U000021A9',
u':right_arrow_curving_left_selector:': u'\U000021A9\U0000FE0F',
u':right_arrow_curving_up:': u'\U00002934',
u':right_arrow_curving_up_selector:': u'\U00002934\U0000FE0F',
u':right_arrow_selector:': u'\U000027A1\U0000FE0F',
u':ring:': u'\U0001F48D',
u':ringed_planet:': u'\U0001FA90',
u':roasted_sweet_potato:': u'\U0001F360',
u':robot_face:': u'\U0001F916',
u':rocket:': u'\U0001F680',
u':roll_of_paper:': u'\U0001F9FB',
u':rolled-up_newspaper:': u'\U0001F5DE',
u':rolled-up_newspaper_selector:': u'\U0001F5DE\U0000FE0F',
u':roller_coaster:': u'\U0001F3A2',
u':rolling_on_the_floor_laughing:': u'\U0001F923',
u':rooster:': u'\U0001F413',
u':rose:': u'\U0001F339',
u':rosette:': u'\U0001F3F5',
u':rosette_selector:': u'\U0001F3F5\U0000FE0F',
u':round_pushpin:': u'\U0001F4CD',
u':rugby_football:': u'\U0001F3C9',
u':running_shirt:': u'\U0001F3BD',
u':running_shoe:': u'\U0001F45F',
u':sad_but_relieved_face:': u'\U0001F625',
u':safety_pin:': u'\U0001F9F7',
u':safety_vest:': u'\U0001F9BA',
u':sailboat:': u'\U000026F5',
u':sake:': u'\U0001F376',
u':salt:': u'\U0001F9C2',
u':sandwich:': u'\U0001F96A',
u':sari:': u'\U0001F97B',
u':satellite:': u'\U0001F6F0',
u':satellite_antenna:': u'\U0001F4E1',
u':satellite_selector:': u'\U0001F6F0\U0000FE0F',
u':sauropod:': u'\U0001F995',
u':saxophone:': u'\U0001F3B7',
u':scarf:': u'\U0001F9E3',
u':school:': u'\U0001F3EB',
u':school_backpack:': u'\U0001F392',
u':scissors:': u'\U00002702',
u':scissors_selector:': u'\U00002702\U0000FE0F',
u':scorpion:': u'\U0001F982',
u':scroll:': u'\U0001F4DC',
u':seat:': u'\U0001F4BA',
u':see-no-evil_monkey:': u'\U0001F648',
u':seedling:': u'\U0001F331',
u':selfie:': u'\U0001F933',
u':selfie_dark_skin_tone:': u'\U0001F933\U0001F3FF',
u':selfie_light_skin_tone:': u'\U0001F933\U0001F3FB',
u':selfie_medium-dark_skin_tone:': u'\U0001F933\U0001F3FE',
u':selfie_medium-light_skin_tone:': u'\U0001F933\U0001F3FC',
u':selfie_medium_skin_tone:': u'\U0001F933\U0001F3FD',
u':service_dog:': u'\U0001F415\U0000200D\U0001F9BA',
u':seven-thirty:': u'\U0001F562',
u':seven_o\u2019clock:': u'\U0001F556',
u':shallow_pan_of_food:': u'\U0001F958',
u':shamrock:': u'\U00002618',
u':shamrock_selector:': u'\U00002618\U0000FE0F',
u':shark:': u'\U0001F988',
u':shaved_ice:': u'\U0001F367',
u':sheaf_of_rice:': u'\U0001F33E',
u':shield:': u'\U0001F6E1',
u':shield_selector:': u'\U0001F6E1\U0000FE0F',
u':shinto_shrine:': u'\U000026E9\U0000FE0F',
u':ship:': u'\U0001F6A2',
u':shooting_star:': u'\U0001F320',
u':shopping_bags:': u'\U0001F6CD',
u':shopping_bags_selector:': u'\U0001F6CD\U0000FE0F',
u':shopping_cart:': u'\U0001F6D2',
u':shortcake:': u'\U0001F370',
u':shorts:': u'\U0001FA73',
u':shower:': u'\U0001F6BF',
u':shrimp:': u'\U0001F990',
u':shuffle_tracks_button:': u'\U0001F500',
u':shushing_face:': u'\U0001F92B',
u':sign_of_the_horns:': u'\U0001F918',
u':sign_of_the_horns_dark_skin_tone:': u'\U0001F918\U0001F3FF',
u':sign_of_the_horns_light_skin_tone:': u'\U0001F918\U0001F3FB',
u':sign_of_the_horns_medium-dark_skin_tone:': u'\U0001F918\U0001F3FE',
u':sign_of_the_horns_medium-light_skin_tone:': u'\U0001F918\U0001F3FC',
u':sign_of_the_horns_medium_skin_tone:': u'\U0001F918\U0001F3FD',
u':six-thirty:': u'\U0001F561',
u':six_o\u2019clock:': u'\U0001F555',
u':skateboard:': u'\U0001F6F9',
u':skier:': u'\U000026F7',
u':skier_selector:': u'\U000026F7\U0000FE0F',
u':skis:': u'\U0001F3BF',
u':skull:': u'\U0001F480',
u':skull_and_crossbones:': u'\U00002620',
u':skull_and_crossbones_selector:': u'\U00002620\U0000FE0F',
u':skunk:': u'\U0001F9A8',
u':sled:': u'\U0001F6F7',
u':sleeping_face:': u'\U0001F634',
u':sleepy_face:': u'\U0001F62A',
u':slightly_frowning_face:': u'\U0001F641',
u':slightly_smiling_face:': u'\U0001F642',
u':slot_machine:': u'\U0001F3B0',
u':sloth:': u'\U0001F9A5',
u':small_airplane:': u'\U0001F6E9',
u':small_airplane_selector:': u'\U0001F6E9\U0000FE0F',
u':small_blue_diamond:': u'\U0001F539',
u':small_orange_diamond:': u'\U0001F538',
u':smiling_cat_face_with_heart-eyes:': u'\U0001F63B',
u':smiling_face:': u'\U0000263A',
u':smiling_face_selector:': u'\U0000263A\U0000FE0F',
u':smiling_face_with_3_hearts:': u'\U0001F970',
u':smiling_face_with_halo:': u'\U0001F607',
u':smiling_face_with_heart-eyes:': u'\U0001F60D',
u':smiling_face_with_horns:': u'\U0001F608',
u':smiling_face_with_smiling_eyes:': u'\U0001F60A',
u':smiling_face_with_sunglasses:': u'\U0001F60E',
u':smirking_face:': u'\U0001F60F',
u':snail:': u'\U0001F40C',
u':snake:': u'\U0001F40D',
u':sneezing_face:': u'\U0001F927',
u':snow-capped_mountain:': u'\U0001F3D4',
u':snow-capped_mountain_selector:': u'\U0001F3D4\U0000FE0F',
u':snowboarder:': u'\U0001F3C2',
u':snowboarder_dark_skin_tone:': u'\U0001F3C2\U0001F3FF',
u':snowboarder_light_skin_tone:': u'\U0001F3C2\U0001F3FB',
u':snowboarder_medium-dark_skin_tone:': u'\U0001F3C2\U0001F3FE',
u':snowboarder_medium-light_skin_tone:': u'\U0001F3C2\U0001F3FC',
u':snowboarder_medium_skin_tone:': u'\U0001F3C2\U0001F3FD',
u':snowflake:': u'\U00002744',
u':snowflake_selector:': u'\U00002744\U0000FE0F',
u':snowman:': u'\U00002603',
u':snowman_selector:': u'\U00002603\U0000FE0F',
u':snowman_without_snow:': u'\U000026C4',
u':soap:': u'\U0001F9FC',
u':soccer_ball:': u'\U000026BD',
u':socks:': u'\U0001F9E6',
u':soft_ice_cream:': u'\U0001F366',
u':softball:': u'\U0001F94E',
u':spade_suit:': u'\U00002660',
u':spade_suit_selector:': u'\U00002660\U0000FE0F',
u':spaghetti:': u'\U0001F35D',
u':sparkle:': u'\U00002747',
u':sparkle_selector:': u'\U00002747\U0000FE0F',
u':sparkler:': u'\U0001F387',
u':sparkles:': u'\U00002728',
u':sparkling_heart:': u'\U0001F496',
u':speak-no-evil_monkey:': u'\U0001F64A',
u':speaker_high_volume:': u'\U0001F50A',
u':speaker_low_volume:': u'\U0001F508',
u':speaker_medium_volume:': u'\U0001F509',
u':speaking_head:': u'\U0001F5E3',
u':speaking_head_selector:': u'\U0001F5E3\U0000FE0F',
u':speech_balloon:': u'\U0001F4AC',
u':speedboat:': u'\U0001F6A4',
u':spider:': u'\U0001F577',
u':spider_selector:': u'\U0001F577\U0000FE0F',
u':spider_web:': u'\U0001F578',
u':spider_web_selector:': u'\U0001F578\U0000FE0F',
u':spiral_calendar:': u'\U0001F5D3',
u':spiral_calendar_selector:': u'\U0001F5D3\U0000FE0F',
u':spiral_notepad:': u'\U0001F5D2',
u':spiral_notepad_selector:': u'\U0001F5D2\U0000FE0F',
u':spiral_shell:': u'\U0001F41A',
u':sponge:': u'\U0001F9FD',
u':spoon:': u'\U0001F944',
u':sport_utility_vehicle:': u'\U0001F699',
u':sports_medal:': u'\U0001F3C5',
u':spouting_whale:': u'\U0001F433',
u':squid:': u'\U0001F991',
u':squinting_face_with_tongue:': u'\U0001F61D',
u':stadium:': u'\U0001F3DF',
u':stadium_selector:': u'\U0001F3DF\U0000FE0F',
u':star-struck:': u'\U0001F929',
u':star_and_crescent:': u'\U0000262A',
u':star_and_crescent_selector:': u'\U0000262A\U0000FE0F',
u':star_of_David:': u'\U00002721',
u':star_of_David_selector:': u'\U00002721\U0000FE0F',
u':station:': u'\U0001F689',
u':steaming_bowl:': u'\U0001F35C',
u':stethoscope:': u'\U0001FA7A',
u':stop_button:': u'\U000023F9',
u':stop_button_selector:': u'\U000023F9\U0000FE0F',
u':stop_sign:': u'\U0001F6D1',
u':stopwatch:': u'\U000023F1',
u':stopwatch_selector:': u'\U000023F1\U0000FE0F',
u':straight_ruler:': u'\U0001F4CF',
u':strawberry:': u'\U0001F353',
u':studio_microphone:': u'\U0001F399',
u':studio_microphone_selector:': u'\U0001F399\U0000FE0F',
u':stuffed_flatbread:': u'\U0001F959',
u':sun:': u'\U00002600',
u':sun_behind_cloud:': u'\U000026C5',
u':sun_behind_large_cloud:': u'\U0001F325',
u':sun_behind_large_cloud_selector:': u'\U0001F325\U0000FE0F',
u':sun_behind_rain_cloud:': u'\U0001F326',
u':sun_behind_rain_cloud_selector:': u'\U0001F326\U0000FE0F',
u':sun_behind_small_cloud:': u'\U0001F324',
u':sun_behind_small_cloud_selector:': u'\U0001F324\U0000FE0F',
u':sun_selector:': u'\U00002600\U0000FE0F',
u':sun_with_face:': u'\U0001F31E',
u':sunflower:': u'\U0001F33B',
u':sunglasses:': u'\U0001F576',
u':sunglasses_selector:': u'\U0001F576\U0000FE0F',
u':sunrise:': u'\U0001F305',
u':sunrise_over_mountains:': u'\U0001F304',
u':sunset:': u'\U0001F307',
u':superhero-dark_skin_tone:': u'\U0001F9B8\U0001F3FF',
u':superhero-light_skin_tone:': u'\U0001F9B8\U0001F3FB',
u':superhero-medium-dark_skin_tone:': u'\U0001F9B8\U0001F3FE',
u':superhero-medium-light_skin_tone:': u'\U0001F9B8\U0001F3FC',
u':superhero-medium_skin_tone:': u'\U0001F9B8\U0001F3FD',
u':superhero:': u'\U0001F9B8',
u':supervillain-dark_skin_tone:': u'\U0001F9B9\U0001F3FF',
u':supervillain-light_skin_tone:': u'\U0001F9B9\U0001F3FB',
u':supervillain-medium-dark_skin_tone:': u'\U0001F9B9\U0001F3FE',
u':supervillain-medium-light_skin_tone:': u'\U0001F9B9\U0001F3FC',
u':supervillain-medium_skin_tone:': u'\U0001F9B9\U0001F3FD',
u':supervillain:': u'\U0001F9B9',
u':sushi:': u'\U0001F363',
u':suspension_railway:': u'\U0001F69F',
u':swan:': u'\U0001F9A2',
u':sweat_droplets:': u'\U0001F4A6',
u':synagogue:': u'\U0001F54D',
u':syringe:': u'\U0001F489',
u':t-shirt:': u'\U0001F455',
u':taco:': u'\U0001F32E',
u':takeout_box:': u'\U0001F961',
u':tanabata_tree:': u'\U0001F38B',
u':tangerine:': u'\U0001F34A',
u':taxi:': u'\U0001F695',
u':teacup_without_handle:': u'\U0001F375',
u':tear-off_calendar:': u'\U0001F4C6',
u':teddy_bear:': u'\U0001F9F8',
u':telephone:': u'\U0000260E',
u':telephone_receiver:': u'\U0001F4DE',
u':telephone_selector:': u'\U0000260E\U0000FE0F',
u':telescope:': u'\U0001F52D',
u':television:': u'\U0001F4FA',
u':ten-thirty:': u'\U0001F565',
u':ten_o\u2019clock:': u'\U0001F559',
u':tennis:': u'\U0001F3BE',
u':tent:': u'\U000026FA',
u':test_tube:': u'\U0001F9EA',
u':thermometer:': u'\U0001F321',
u':thermometer_selector:': u'\U0001F321\U0000FE0F',
u':thinking_face:': u'\U0001F914',
u':thought_balloon:': u'\U0001F4AD',
u':thread:': u'\U0001F9F5',
u':three-thirty:': u'\U0001F55E',
u':three_o\u2019clock:': u'\U0001F552',
u':thumbs_down:': u'\U0001F44E',
u':thumbs_down_dark_skin_tone:': u'\U0001F44E\U0001F3FF',
u':thumbs_down_light_skin_tone:': u'\U0001F44E\U0001F3FB',
u':thumbs_down_medium-dark_skin_tone:': u'\U0001F44E\U0001F3FE',
u':thumbs_down_medium-light_skin_tone:': u'\U0001F44E\U0001F3FC',
u':thumbs_down_medium_skin_tone:': u'\U0001F44E\U0001F3FD',
u':thumbs_up:': u'\U0001F44D',
u':thumbs_up_dark_skin_tone:': u'\U0001F44D\U0001F3FF',
u':thumbs_up_light_skin_tone:': u'\U0001F44D\U0001F3FB',
u':thumbs_up_medium-dark_skin_tone:': u'\U0001F44D\U0001F3FE',
u':thumbs_up_medium-light_skin_tone:': u'\U0001F44D\U0001F3FC',
u':thumbs_up_medium_skin_tone:': u'\U0001F44D\U0001F3FD',
u':ticket:': u'\U0001F3AB',
u':tiger:': u'\U0001F405',
u':tiger_face:': u'\U0001F42F',
u':timer_clock:': u'\U000023F2',
u':timer_clock_selector:': u'\U000023F2\U0000FE0F',
u':tired_face:': u'\U0001F62B',
u':toilet:': u'\U0001F6BD',
u':tomato:': u'\U0001F345',
u':tongue:': u'\U0001F445',
u':toolbox:': u'\U0001F9F0',
u':tooth:': u'\U0001F9B7',
u':top_hat:': u'\U0001F3A9',
u':tornado:': u'\U0001F32A',
u':tornado_selector:': u'\U0001F32A\U0000FE0F',
u':trackball:': u'\U0001F5B2',
u':trackball_selector:': u'\U0001F5B2\U0000FE0F',
u':tractor:': u'\U0001F69C',
u':trade_mark:': u'\U00002122',
u':trade_mark_selector:': u'\U00002122\U0000FE0F',
u':train:': u'\U0001F686',
u':tram:': u'\U0001F68A',
u':tram_car:': u'\U0001F68B',
u':triangular_flag:': u'\U0001F6A9',
u':triangular_ruler:': u'\U0001F4D0',
u':trident_emblem:': u'\U0001F531',
u':trolleybus:': u'\U0001F68E',
u':trophy:': u'\U0001F3C6',
u':tropical_drink:': u'\U0001F379',
u':tropical_fish:': u'\U0001F420',
u':trumpet:': u'\U0001F3BA',
u':tulip:': u'\U0001F337',
u':tumbler_glass:': u'\U0001F943',
u':turkey:': u'\U0001F983',
u':turtle:': u'\U0001F422',
u':twelve-thirty:': u'\U0001F567',
u':twelve_o\u2019clock:': u'\U0001F55B',
u':two-hump_camel:': u'\U0001F42B',
u':two-thirty:': u'\U0001F55D',
u':two_hearts:': u'\U0001F495',
u':two_men_holding_hands:': u'\U0001F46C',
u':two_o\u2019clock:': u'\U0001F551',
u':two_women_holding_hands:': u'\U0001F46D',
u':umbrella:': u'\U00002602',
u':umbrella_on_ground:': u'\U000026F1',
u':umbrella_on_ground_selector:': u'\U000026F1\U0000FE0F',
u':umbrella_selector:': u'\U00002602\U0000FE0F',
u':umbrella_with_rain_drops:': u'\U00002614',
u':unamused_face:': u'\U0001F612',
u':unicorn_face:': u'\U0001F984',
u':unlocked:': u'\U0001F513',
u':up-down_arrow:': u'\U00002195',
u':up-down_arrow_selector:': u'\U00002195\U0000FE0F',
u':up-left_arrow:': u'\U00002196',
u':up-left_arrow_selector:': u'\U00002196\U0000FE0F',
u':up-right_arrow:': u'\U00002197',
u':up-right_arrow_selector:': u'\U00002197\U0000FE0F',
u':up_arrow:': u'\U00002B06',
u':up_arrow_selector:': u'\U00002B06\U0000FE0F',
u':upside-down_face:': u'\U0001F643',
u':upwards_button:': u'\U0001F53C',
u':vampire:': u'\U0001F9DB',
u':vampire_dark_skin_tone:': u'\U0001F9DB\U0001F3FF',
u':vampire_light_skin_tone:': u'\U0001F9DB\U0001F3FB',
u':vampire_medium-dark_skin_tone:': u'\U0001F9DB\U0001F3FE',
u':vampire_medium-light_skin_tone:': u'\U0001F9DB\U0001F3FC',
u':vampire_medium_skin_tone:': u'\U0001F9DB\U0001F3FD',
u':vertical_traffic_light:': u'\U0001F6A6',
u':vibration_mode:': u'\U0001F4F3',
u':victory_hand:': u'\U0000270C',
u':victory_hand_dark_skin_tone:': u'\U0000270C\U0001F3FF',
u':victory_hand_light_skin_tone:': u'\U0000270C\U0001F3FB',
u':victory_hand_medium-dark_skin_tone:': u'\U0000270C\U0001F3FE',
u':victory_hand_medium-light_skin_tone:': u'\U0000270C\U0001F3FC',
u':victory_hand_medium_skin_tone:': u'\U0000270C\U0001F3FD',
u':victory_hand_selector:': u'\U0000270C\U0000FE0F',
u':video_camera:': u'\U0001F4F9',
u':video_game:': u'\U0001F3AE',
u':videocassette:': u'\U0001F4FC',
u':violin:': u'\U0001F3BB',
u':volcano:': u'\U0001F30B',
u':volleyball:': u'\U0001F3D0',
u':vulcan_salute:': u'\U0001F596',
u':vulcan_salute_dark_skin_tone:': u'\U0001F596\U0001F3FF',
u':vulcan_salute_light_skin_tone:': u'\U0001F596\U0001F3FB',
u':vulcan_salute_medium-dark_skin_tone:': u'\U0001F596\U0001F3FE',
u':vulcan_salute_medium-light_skin_tone:': u'\U0001F596\U0001F3FC',
u':vulcan_salute_medium_skin_tone:': u'\U0001F596\U0001F3FD',
u':waffle:': u'\U0001F9C7',
u':waning_crescent_moon:': u'\U0001F318',
u':waning_gibbous_moon:': u'\U0001F316',
u':warning:': u'\U000026A0',
u':warning_selector:': u'\U000026A0\U0000FE0F',
u':wastebasket:': u'\U0001F5D1',
u':wastebasket_selector:': u'\U0001F5D1\U0000FE0F',
u':watch:': u'\U0000231A',
u':water_buffalo:': u'\U0001F403',
u':water_closet:': u'\U0001F6BE',
u':water_wave:': u'\U0001F30A',
u':watermelon:': u'\U0001F349',
u':waving_hand:': u'\U0001F44B',
u':waving_hand_dark_skin_tone:': u'\U0001F44B\U0001F3FF',
u':waving_hand_light_skin_tone:': u'\U0001F44B\U0001F3FB',
u':waving_hand_medium-dark_skin_tone:': u'\U0001F44B\U0001F3FE',
u':waving_hand_medium-light_skin_tone:': u'\U0001F44B\U0001F3FC',
u':waving_hand_medium_skin_tone:': u'\U0001F44B\U0001F3FD',
u':wavy_dash:': u'\U00003030',
u':wavy_dash_selector:': u'\U00003030\U0000FE0F',
u':waxing_crescent_moon:': u'\U0001F312',
u':waxing_gibbous_moon:': u'\U0001F314',
u':weary_cat_face:': u'\U0001F640',
u':weary_face:': u'\U0001F629',
u':wedding:': u'\U0001F492',
u':whale:': u'\U0001F40B',
u':wheel_of_dharma:': u'\U00002638',
u':wheel_of_dharma_selector:': u'\U00002638\U0000FE0F',
u':wheelchair_symbol:': u'\U0000267F',
u':white-haired_man:': u'\U0001F468\U0000200D\U0001F9B3',
u':white-haired_woman:': u'\U0001F469\U0000200D\U0001F9B3',
u':white_circle:': u'\U000026AA',
u':white_exclamation_mark:': u'\U00002755',
u':white_flag:': u'\U0001F3F3',
u':white_flag_selector:': u'\U0001F3F3\U0000FE0F',
u':white_flower:': u'\U0001F4AE',
u':white_hair:': u'\U0001F9B3',
u':white_heart:': u'\U0001F90D',
u':white_heavy_check_mark:': u'\U00002705',
u':white_large_square:': u'\U00002B1C',
u':white_medium-small_square:': u'\U000025FD',
u':white_medium_square:': u'\U000025FB',
u':white_medium_square_selector:': u'\U000025FB\U0000FE0F',
u':white_medium_star:': u'\U00002B50',
u':white_question_mark:': u'\U00002754',
u':white_small_square:': u'\U000025AB',
u':white_small_square_selector:': u'\U000025AB\U0000FE0F',
u':white_square_button:': u'\U0001F533',
u':wilted_flower:': u'\U0001F940',
u':wind_chime:': u'\U0001F390',
u':wind_face:': u'\U0001F32C',
u':wind_face_selector:': u'\U0001F32C\U0000FE0F',
u':wine_glass:': u'\U0001F377',
u':winking_face:': u'\U0001F609',
u':winking_face_with_tongue:': u'\U0001F61C',
u':wolf_face:': u'\U0001F43A',
u':woman-blond_hair:': u'\U0001F471\U0000200D\U00002640',
u':woman-dark_skin_tone-bald:': u'\U0001F469\U0001F3FF\U0000200D\U0001F9B2',
u':woman-dark_skin_tone-blond_hair:': u'\U0001F471\U0001F3FF\U0000200D\U00002640',
u':woman-dark_skin_tone-curly_hair:': u'\U0001F469\U0001F3FF\U0000200D\U0001F9B1',
u':woman-dark_skin_tone-red_hair:': u'\U0001F469\U0001F3FF\U0000200D\U0001F9B0',
u':woman-dark_skin_tone-white_hair:': u'\U0001F469\U0001F3FF\U0000200D\U0001F9B3',
u':woman-light_skin_tone-bald:': u'\U0001F469\U0001F3FB\U0000200D\U0001F9B2',
u':woman-light_skin_tone-blond_hair:': u'\U0001F471\U0001F3FB\U0000200D\U00002640',
u':woman-light_skin_tone-curly_hair:': u'\U0001F469\U0001F3FB\U0000200D\U0001F9B1',
u':woman-light_skin_tone-red_hair:': u'\U0001F469\U0001F3FB\U0000200D\U0001F9B0',
u':woman-light_skin_tone-white_hair:': u'\U0001F469\U0001F3FB\U0000200D\U0001F9B3',
u':woman-medium-dark_skin_tone-bald:': u'\U0001F469\U0001F3FE\U0000200D\U0001F9B2',
u':woman-medium-dark_skin_tone-blond_hair:': u'\U0001F471\U0001F3FE\U0000200D\U00002640',
u':woman-medium-dark_skin_tone-curly_hair:': u'\U0001F469\U0001F3FE\U0000200D\U0001F9B1',
u':woman-medium-dark_skin_tone-red_hair:': u'\U0001F469\U0001F3FE\U0000200D\U0001F9B0',
u':woman-medium-dark_skin_tone-white_hair:': u'\U0001F469\U0001F3FE\U0000200D\U0001F9B3',
u':woman-medium-light_skin_tone-bald:': u'\U0001F469\U0001F3FC\U0000200D\U0001F9B2',
u':woman-medium-light_skin_tone-blond_hair:': u'\U0001F471\U0001F3FC\U0000200D\U00002640',
u':woman-medium-light_skin_tone-curly_hair:': u'\U0001F469\U0001F3FC\U0000200D\U0001F9B1',
u':woman-medium-light_skin_tone-red_hair:': u'\U0001F469\U0001F3FC\U0000200D\U0001F9B0',
u':woman-medium-light_skin_tone-white_hair:': u'\U0001F469\U0001F3FC\U0000200D\U0001F9B3',
u':woman-medium_skin_tone-bald:': u'\U0001F469\U0001F3FD\U0000200D\U0001F9B2',
u':woman-medium_skin_tone-blond_hair:': u'\U0001F471\U0001F3FD\U0000200D\U00002640',
u':woman-medium_skin_tone-curly_hair:': u'\U0001F469\U0001F3FD\U0000200D\U0001F9B1',
u':woman-medium_skin_tone-red_hair:': u'\U0001F469\U0001F3FD\U0000200D\U0001F9B0',
u':woman-medium_skin_tone-white_hair:': u'\U0001F469\U0001F3FD\U0000200D\U0001F9B3',
u':woman:': u'\U0001F469',
u':woman_and_man_holding_hands-dark_skin_tone-light_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FB',
u':woman_and_man_holding_hands-dark_skin_tone-medium-dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FE',
u':woman_and_man_holding_hands-dark_skin_tone-medium-light_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FC',
u':woman_and_man_holding_hands-dark_skin_tone-medium_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FD',
u':woman_and_man_holding_hands-dark_skin_tone:': u'\U0001F46B\U0001F3FF',
u':woman_and_man_holding_hands-light_skin_tone-dark_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FF',
u':woman_and_man_holding_hands-light_skin_tone-medium-dark_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FE',
u':woman_and_man_holding_hands-light_skin_tone-medium-light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FC',
u':woman_and_man_holding_hands-light_skin_tone-medium_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FD',
u':woman_and_man_holding_hands-light_skin_tone:': u'\U0001F46B\U0001F3FB',
u':woman_and_man_holding_hands-medium-dark_skin_tone-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FF',
u':woman_and_man_holding_hands-medium-dark_skin_tone-light_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FB',
u':woman_and_man_holding_hands-medium-dark_skin_tone-medium-light_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FC',
u':woman_and_man_holding_hands-medium-dark_skin_tone-medium_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FD',
u':woman_and_man_holding_hands-medium-dark_skin_tone:': u'\U0001F46B\U0001F3FE',
u':woman_and_man_holding_hands-medium-light_skin_tone-dark_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FF',
u':woman_and_man_holding_hands-medium-light_skin_tone-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FB',
u':woman_and_man_holding_hands-medium-light_skin_tone-medium-dark_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FE',
u':woman_and_man_holding_hands-medium-light_skin_tone-medium_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FD',
u':woman_and_man_holding_hands-medium-light_skin_tone:': u'\U0001F46B\U0001F3FC',
u':woman_and_man_holding_hands-medium_skin_tone-dark_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FF',
u':woman_and_man_holding_hands-medium_skin_tone-light_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FB',
u':woman_and_man_holding_hands-medium_skin_tone-medium-dark_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FE',
u':woman_and_man_holding_hands-medium_skin_tone-medium-light_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F468\U0001F3FC',
u':woman_and_man_holding_hands-medium_skin_tone:': u'\U0001F46B\U0001F3FD',
u':woman_artist:': u'\U0001F469\U0000200D\U0001F3A8',
u':woman_artist_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F3A8',
u':woman_artist_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F3A8',
u':woman_artist_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F3A8',
u':woman_artist_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F3A8',
u':woman_artist_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F3A8',
u':woman_astronaut:': u'\U0001F469\U0000200D\U0001F680',
u':woman_astronaut_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F680',
u':woman_astronaut_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F680',
u':woman_astronaut_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F680',
u':woman_astronaut_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F680',
u':woman_astronaut_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F680',
u':woman_biking-dark_skin_tone:': u'\U0001F6B4\U0001F3FF\U0000200D\U00002640',
u':woman_biking-light_skin_tone:': u'\U0001F6B4\U0001F3FB\U0000200D\U00002640',
u':woman_biking-medium-dark_skin_tone:': u'\U0001F6B4\U0001F3FE\U0000200D\U00002640',
u':woman_biking-medium-light_skin_tone:': u'\U0001F6B4\U0001F3FC\U0000200D\U00002640',
u':woman_biking-medium_skin_tone:': u'\U0001F6B4\U0001F3FD\U0000200D\U00002640',
u':woman_biking:': u'\U0001F6B4\U0000200D\U00002640',
u':woman_biking_dark_skin_tone:': u'\U0001F6B4\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_biking_light_skin_tone:': u'\U0001F6B4\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_biking_medium-dark_skin_tone:': u'\U0001F6B4\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_biking_medium-light_skin_tone:': u'\U0001F6B4\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_biking_medium_skin_tone:': u'\U0001F6B4\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_biking_selector:': u'\U0001F6B4\U0000200D\U00002640\U0000FE0F',
u':woman_bouncing_ball-dark_skin_tone:': u'\U000026F9\U0001F3FF\U0000200D\U00002640',
u':woman_bouncing_ball-light_skin_tone:': u'\U000026F9\U0001F3FB\U0000200D\U00002640',
u':woman_bouncing_ball-medium-dark_skin_tone:': u'\U000026F9\U0001F3FE\U0000200D\U00002640',
u':woman_bouncing_ball-medium-light_skin_tone:': u'\U000026F9\U0001F3FC\U0000200D\U00002640',
u':woman_bouncing_ball-medium_skin_tone:': u'\U000026F9\U0001F3FD\U0000200D\U00002640',
u':woman_bouncing_ball:': u'\U000026F9\U0000200D\U00002640',
u':woman_bouncing_ball_2:': u'\U000026F9\U0000FE0F\U0000200D\U00002640\U0000FE0F',
u':woman_bouncing_ball_3:': u'\U000026F9\U0000200D\U00002640\U0000FE0F',
u':woman_bouncing_ball_dark_skin_tone:': u'\U000026F9\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_bouncing_ball_light_skin_tone:': u'\U000026F9\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_bouncing_ball_medium-dark_skin_tone:': u'\U000026F9\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_bouncing_ball_medium-light_skin_tone:': u'\U000026F9\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_bouncing_ball_medium_skin_tone:': u'\U000026F9\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_bouncing_ball_selector:': u'\U000026F9\U0000FE0F\U0000200D\U00002640',
u':woman_bowing-dark_skin_tone:': u'\U0001F647\U0001F3FF\U0000200D\U00002640',
u':woman_bowing-light_skin_tone:': u'\U0001F647\U0001F3FB\U0000200D\U00002640',
u':woman_bowing-medium-dark_skin_tone:': u'\U0001F647\U0001F3FE\U0000200D\U00002640',
u':woman_bowing-medium-light_skin_tone:': u'\U0001F647\U0001F3FC\U0000200D\U00002640',
u':woman_bowing-medium_skin_tone:': u'\U0001F647\U0001F3FD\U0000200D\U00002640',
u':woman_bowing:': u'\U0001F647\U0000200D\U00002640',
u':woman_bowing_dark_skin_tone:': u'\U0001F647\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_bowing_light_skin_tone:': u'\U0001F647\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_bowing_medium-dark_skin_tone:': u'\U0001F647\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_bowing_medium-light_skin_tone:': u'\U0001F647\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_bowing_medium_skin_tone:': u'\U0001F647\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_bowing_selector:': u'\U0001F647\U0000200D\U00002640\U0000FE0F',
u':woman_cartwheeling-dark_skin_tone:': u'\U0001F938\U0001F3FF\U0000200D\U00002640',
u':woman_cartwheeling-light_skin_tone:': u'\U0001F938\U0001F3FB\U0000200D\U00002640',
u':woman_cartwheeling-medium-dark_skin_tone:': u'\U0001F938\U0001F3FE\U0000200D\U00002640',
u':woman_cartwheeling-medium-light_skin_tone:': u'\U0001F938\U0001F3FC\U0000200D\U00002640',
u':woman_cartwheeling-medium_skin_tone:': u'\U0001F938\U0001F3FD\U0000200D\U00002640',
u':woman_cartwheeling:': u'\U0001F938\U0000200D\U00002640',
u':woman_cartwheeling_dark_skin_tone:': u'\U0001F938\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_cartwheeling_light_skin_tone:': u'\U0001F938\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_cartwheeling_medium-dark_skin_tone:': u'\U0001F938\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_cartwheeling_medium-light_skin_tone:': u'\U0001F938\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_cartwheeling_medium_skin_tone:': u'\U0001F938\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_cartwheeling_selector:': u'\U0001F938\U0000200D\U00002640\U0000FE0F',
u':woman_climbing-dark_skin_tone:': u'\U0001F9D7\U0001F3FF\U0000200D\U00002640',
u':woman_climbing-light_skin_tone:': u'\U0001F9D7\U0001F3FB\U0000200D\U00002640',
u':woman_climbing-medium-dark_skin_tone:': u'\U0001F9D7\U0001F3FE\U0000200D\U00002640',
u':woman_climbing-medium-light_skin_tone:': u'\U0001F9D7\U0001F3FC\U0000200D\U00002640',
u':woman_climbing-medium_skin_tone:': u'\U0001F9D7\U0001F3FD\U0000200D\U00002640',
u':woman_climbing:': u'\U0001F9D7\U0000200D\U00002640',
u':woman_climbing_dark_skin_tone:': u'\U0001F9D7\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_climbing_light_skin_tone:': u'\U0001F9D7\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_climbing_medium-dark_skin_tone:': u'\U0001F9D7\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_climbing_medium-light_skin_tone:': u'\U0001F9D7\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_climbing_medium_skin_tone:': u'\U0001F9D7\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_climbing_selector:': u'\U0001F9D7\U0000200D\U00002640\U0000FE0F',
u':woman_construction_worker-dark_skin_tone:': u'\U0001F477\U0001F3FF\U0000200D\U00002640',
u':woman_construction_worker-light_skin_tone:': u'\U0001F477\U0001F3FB\U0000200D\U00002640',
u':woman_construction_worker-medium-dark_skin_tone:': u'\U0001F477\U0001F3FE\U0000200D\U00002640',
u':woman_construction_worker-medium-light_skin_tone:': u'\U0001F477\U0001F3FC\U0000200D\U00002640',
u':woman_construction_worker-medium_skin_tone:': u'\U0001F477\U0001F3FD\U0000200D\U00002640',
u':woman_construction_worker:': u'\U0001F477\U0000200D\U00002640',
u':woman_construction_worker_dark_skin_tone:': u'\U0001F477\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_construction_worker_light_skin_tone:': u'\U0001F477\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_construction_worker_medium-dark_skin_tone:': u'\U0001F477\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_construction_worker_medium-light_skin_tone:': u'\U0001F477\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_construction_worker_medium_skin_tone:': u'\U0001F477\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_construction_worker_selector:': u'\U0001F477\U0000200D\U00002640\U0000FE0F',
u':woman_cook:': u'\U0001F469\U0000200D\U0001F373',
u':woman_cook_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F373',
u':woman_cook_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F373',
u':woman_cook_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F373',
u':woman_cook_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F373',
u':woman_cook_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F373',
u':woman_dancing:': u'\U0001F483',
u':woman_dancing_dark_skin_tone:': u'\U0001F483\U0001F3FF',
u':woman_dancing_light_skin_tone:': u'\U0001F483\U0001F3FB',
u':woman_dancing_medium-dark_skin_tone:': u'\U0001F483\U0001F3FE',
u':woman_dancing_medium-light_skin_tone:': u'\U0001F483\U0001F3FC',
u':woman_dancing_medium_skin_tone:': u'\U0001F483\U0001F3FD',
u':woman_dark_skin_tone:': u'\U0001F469\U0001F3FF',
u':woman_detective-dark_skin_tone:': u'\U0001F575\U0001F3FF\U0000200D\U00002640',
u':woman_detective-light_skin_tone:': u'\U0001F575\U0001F3FB\U0000200D\U00002640',
u':woman_detective-medium-dark_skin_tone:': u'\U0001F575\U0001F3FE\U0000200D\U00002640',
u':woman_detective-medium-light_skin_tone:': u'\U0001F575\U0001F3FC\U0000200D\U00002640',
u':woman_detective-medium_skin_tone:': u'\U0001F575\U0001F3FD\U0000200D\U00002640',
u':woman_detective:': u'\U0001F575\U0000200D\U00002640',
u':woman_detective_2:': u'\U0001F575\U0000200D\U00002640\U0000FE0F',
u':woman_detective_selector_2:': u'\U0001F575\U0000FE0F\U0000200D\U00002640\U0000FE0F',
u':woman_detective_dark_skin_tone:': u'\U0001F575\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_detective_light_skin_tone:': u'\U0001F575\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_detective_medium-dark_skin_tone:': u'\U0001F575\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_detective_medium-light_skin_tone:': u'\U0001F575\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_detective_medium_skin_tone:': u'\U0001F575\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_detective_selector:': u'\U0001F575\U0000FE0F\U0000200D\U00002640',
u':woman_elf-dark_skin_tone:': u'\U0001F9DD\U0001F3FF\U0000200D\U00002640',
u':woman_elf-light_skin_tone:': u'\U0001F9DD\U0001F3FB\U0000200D\U00002640',
u':woman_elf-medium-dark_skin_tone:': u'\U0001F9DD\U0001F3FE\U0000200D\U00002640',
u':woman_elf-medium-light_skin_tone:': u'\U0001F9DD\U0001F3FC\U0000200D\U00002640',
u':woman_elf-medium_skin_tone:': u'\U0001F9DD\U0001F3FD\U0000200D\U00002640',
u':woman_elf:': u'\U0001F9DD\U0000200D\U00002640',
u':woman_elf_dark_skin_tone:': u'\U0001F9DD\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_elf_light_skin_tone:': u'\U0001F9DD\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_elf_medium-dark_skin_tone:': u'\U0001F9DD\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_elf_medium-light_skin_tone:': u'\U0001F9DD\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_elf_medium_skin_tone:': u'\U0001F9DD\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_elf_selector:': u'\U0001F9DD\U0000200D\U00002640\U0000FE0F',
u':woman_facepalming-dark_skin_tone:': u'\U0001F926\U0001F3FF\U0000200D\U00002640',
u':woman_facepalming-light_skin_tone:': u'\U0001F926\U0001F3FB\U0000200D\U00002640',
u':woman_facepalming-medium-dark_skin_tone:': u'\U0001F926\U0001F3FE\U0000200D\U00002640',
u':woman_facepalming-medium-light_skin_tone:': u'\U0001F926\U0001F3FC\U0000200D\U00002640',
u':woman_facepalming-medium_skin_tone:': u'\U0001F926\U0001F3FD\U0000200D\U00002640',
u':woman_facepalming:': u'\U0001F926\U0000200D\U00002640',
u':woman_facepalming_dark_skin_tone:': u'\U0001F926\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_facepalming_light_skin_tone:': u'\U0001F926\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_facepalming_medium-dark_skin_tone:': u'\U0001F926\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_facepalming_medium-light_skin_tone:': u'\U0001F926\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_facepalming_medium_skin_tone:': u'\U0001F926\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_facepalming_selector:': u'\U0001F926\U0000200D\U00002640\U0000FE0F',
u':woman_factory_worker:': u'\U0001F469\U0000200D\U0001F3ED',
u':woman_factory_worker_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F3ED',
u':woman_factory_worker_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F3ED',
u':woman_factory_worker_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F3ED',
u':woman_factory_worker_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F3ED',
u':woman_factory_worker_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F3ED',
u':woman_fairy-dark_skin_tone:': u'\U0001F9DA\U0001F3FF\U0000200D\U00002640',
u':woman_fairy-light_skin_tone:': u'\U0001F9DA\U0001F3FB\U0000200D\U00002640',
u':woman_fairy-medium-dark_skin_tone:': u'\U0001F9DA\U0001F3FE\U0000200D\U00002640',
u':woman_fairy-medium-light_skin_tone:': u'\U0001F9DA\U0001F3FC\U0000200D\U00002640',
u':woman_fairy-medium_skin_tone:': u'\U0001F9DA\U0001F3FD\U0000200D\U00002640',
u':woman_fairy:': u'\U0001F9DA\U0000200D\U00002640',
u':woman_fairy_dark_skin_tone:': u'\U0001F9DA\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_fairy_light_skin_tone:': u'\U0001F9DA\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_fairy_medium-dark_skin_tone:': u'\U0001F9DA\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_fairy_medium-light_skin_tone:': u'\U0001F9DA\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_fairy_medium_skin_tone:': u'\U0001F9DA\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_fairy_selector:': u'\U0001F9DA\U0000200D\U00002640\U0000FE0F',
u':woman_farmer:': u'\U0001F469\U0000200D\U0001F33E',
u':woman_farmer_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F33E',
u':woman_farmer_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F33E',
u':woman_farmer_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F33E',
u':woman_farmer_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F33E',
u':woman_farmer_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F33E',
u':woman_firefighter:': u'\U0001F469\U0000200D\U0001F692',
u':woman_firefighter_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F692',
u':woman_firefighter_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F692',
u':woman_firefighter_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F692',
u':woman_firefighter_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F692',
u':woman_firefighter_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F692',
u':woman_frowning-dark_skin_tone:': u'\U0001F64D\U0001F3FF\U0000200D\U00002640',
u':woman_frowning-light_skin_tone:': u'\U0001F64D\U0001F3FB\U0000200D\U00002640',
u':woman_frowning-medium-dark_skin_tone:': u'\U0001F64D\U0001F3FE\U0000200D\U00002640',
u':woman_frowning-medium-light_skin_tone:': u'\U0001F64D\U0001F3FC\U0000200D\U00002640',
u':woman_frowning-medium_skin_tone:': u'\U0001F64D\U0001F3FD\U0000200D\U00002640',
u':woman_frowning:': u'\U0001F64D\U0000200D\U00002640',
u':woman_frowning_dark_skin_tone:': u'\U0001F64D\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_frowning_light_skin_tone:': u'\U0001F64D\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_frowning_medium-dark_skin_tone:': u'\U0001F64D\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_frowning_medium-light_skin_tone:': u'\U0001F64D\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_frowning_medium_skin_tone:': u'\U0001F64D\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_frowning_selector:': u'\U0001F64D\U0000200D\U00002640\U0000FE0F',
u':woman_genie:': u'\U0001F9DE\U0000200D\U00002640',
u':woman_genie_selector:': u'\U0001F9DE\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_NO-dark_skin_tone:': u'\U0001F645\U0001F3FF\U0000200D\U00002640',
u':woman_gesturing_NO-light_skin_tone:': u'\U0001F645\U0001F3FB\U0000200D\U00002640',
u':woman_gesturing_NO-medium-dark_skin_tone:': u'\U0001F645\U0001F3FE\U0000200D\U00002640',
u':woman_gesturing_NO-medium-light_skin_tone:': u'\U0001F645\U0001F3FC\U0000200D\U00002640',
u':woman_gesturing_NO-medium_skin_tone:': u'\U0001F645\U0001F3FD\U0000200D\U00002640',
u':woman_gesturing_NO:': u'\U0001F645\U0000200D\U00002640',
u':woman_gesturing_NO_dark_skin_tone:': u'\U0001F645\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_NO_light_skin_tone:': u'\U0001F645\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_NO_medium-dark_skin_tone:': u'\U0001F645\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_NO_medium-light_skin_tone:': u'\U0001F645\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_NO_medium_skin_tone:': u'\U0001F645\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_NO_selector:': u'\U0001F645\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_OK-dark_skin_tone:': u'\U0001F646\U0001F3FF\U0000200D\U00002640',
u':woman_gesturing_OK-light_skin_tone:': u'\U0001F646\U0001F3FB\U0000200D\U00002640',
u':woman_gesturing_OK-medium-dark_skin_tone:': u'\U0001F646\U0001F3FE\U0000200D\U00002640',
u':woman_gesturing_OK-medium-light_skin_tone:': u'\U0001F646\U0001F3FC\U0000200D\U00002640',
u':woman_gesturing_OK-medium_skin_tone:': u'\U0001F646\U0001F3FD\U0000200D\U00002640',
u':woman_gesturing_OK:': u'\U0001F646\U0000200D\U00002640',
u':woman_gesturing_OK_dark_skin_tone:': u'\U0001F646\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_OK_light_skin_tone:': u'\U0001F646\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_OK_medium-dark_skin_tone:': u'\U0001F646\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_OK_medium-light_skin_tone:': u'\U0001F646\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_OK_medium_skin_tone:': u'\U0001F646\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_gesturing_OK_selector:': u'\U0001F646\U0000200D\U00002640\U0000FE0F',
u':woman_getting_haircut-dark_skin_tone:': u'\U0001F487\U0001F3FF\U0000200D\U00002640',
u':woman_getting_haircut-light_skin_tone:': u'\U0001F487\U0001F3FB\U0000200D\U00002640',
u':woman_getting_haircut-medium-dark_skin_tone:': u'\U0001F487\U0001F3FE\U0000200D\U00002640',
u':woman_getting_haircut-medium-light_skin_tone:': u'\U0001F487\U0001F3FC\U0000200D\U00002640',
u':woman_getting_haircut-medium_skin_tone:': u'\U0001F487\U0001F3FD\U0000200D\U00002640',
u':woman_getting_haircut:': u'\U0001F487\U0000200D\U00002640',
u':woman_getting_haircut_dark_skin_tone:': u'\U0001F487\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_getting_haircut_light_skin_tone:': u'\U0001F487\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_getting_haircut_medium-dark_skin_tone:': u'\U0001F487\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_getting_haircut_medium-light_skin_tone:': u'\U0001F487\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_getting_haircut_medium_skin_tone:': u'\U0001F487\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_getting_haircut_selector:': u'\U0001F487\U0000200D\U00002640\U0000FE0F',
u':woman_getting_massage-dark_skin_tone:': u'\U0001F486\U0001F3FF\U0000200D\U00002640',
u':woman_getting_massage-light_skin_tone:': u'\U0001F486\U0001F3FB\U0000200D\U00002640',
u':woman_getting_massage-medium-dark_skin_tone:': u'\U0001F486\U0001F3FE\U0000200D\U00002640',
u':woman_getting_massage-medium-light_skin_tone:': u'\U0001F486\U0001F3FC\U0000200D\U00002640',
u':woman_getting_massage-medium_skin_tone:': u'\U0001F486\U0001F3FD\U0000200D\U00002640',
u':woman_getting_massage:': u'\U0001F486\U0000200D\U00002640',
u':woman_getting_massage_dark_skin_tone:': u'\U0001F486\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_getting_massage_light_skin_tone:': u'\U0001F486\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_getting_massage_medium-dark_skin_tone:': u'\U0001F486\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_getting_massage_medium-light_skin_tone:': u'\U0001F486\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_getting_massage_medium_skin_tone:': u'\U0001F486\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_getting_massage_selector:': u'\U0001F486\U0000200D\U00002640\U0000FE0F',
u':woman_golfing-dark_skin_tone:': u'\U0001F3CC\U0001F3FF\U0000200D\U00002640',
u':woman_golfing-light_skin_tone:': u'\U0001F3CC\U0001F3FB\U0000200D\U00002640',
u':woman_golfing-medium-dark_skin_tone:': u'\U0001F3CC\U0001F3FE\U0000200D\U00002640',
u':woman_golfing-medium-light_skin_tone:': u'\U0001F3CC\U0001F3FC\U0000200D\U00002640',
u':woman_golfing-medium_skin_tone:': u'\U0001F3CC\U0001F3FD\U0000200D\U00002640',
u':woman_golfing:': u'\U0001F3CC\U0000200D\U00002640',
u':woman_golfing_2:': u'\U0001F3CC\U0000FE0F\U0000200D\U00002640\U0000FE0F',
u':woman_golfing_3:': u'\U0001F3CC\U0000200D\U00002640\U0000FE0F',
u':woman_golfing_dark_skin_tone:': u'\U0001F3CC\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_golfing_light_skin_tone:': u'\U0001F3CC\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_golfing_medium-dark_skin_tone:': u'\U0001F3CC\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_golfing_medium-light_skin_tone:': u'\U0001F3CC\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_golfing_medium_skin_tone:': u'\U0001F3CC\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_golfing_selector:': u'\U0001F3CC\U0000FE0F\U0000200D\U00002640',
u':woman_guard-dark_skin_tone:': u'\U0001F482\U0001F3FF\U0000200D\U00002640',
u':woman_guard-light_skin_tone:': u'\U0001F482\U0001F3FB\U0000200D\U00002640',
u':woman_guard-medium-dark_skin_tone:': u'\U0001F482\U0001F3FE\U0000200D\U00002640',
u':woman_guard-medium-light_skin_tone:': u'\U0001F482\U0001F3FC\U0000200D\U00002640',
u':woman_guard-medium_skin_tone:': u'\U0001F482\U0001F3FD\U0000200D\U00002640',
u':woman_guard:': u'\U0001F482\U0000200D\U00002640',
u':woman_guard_dark_skin_tone:': u'\U0001F482\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_guard_light_skin_tone:': u'\U0001F482\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_guard_medium-dark_skin_tone:': u'\U0001F482\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_guard_medium-light_skin_tone:': u'\U0001F482\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_guard_medium_skin_tone:': u'\U0001F482\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_guard_selector:': u'\U0001F482\U0000200D\U00002640\U0000FE0F',
u':woman_health_worker-dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U00002695',
u':woman_health_worker-light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U00002695',
u':woman_health_worker-medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U00002695',
u':woman_health_worker-medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U00002695',
u':woman_health_worker-medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U00002695',
u':woman_health_worker:': u'\U0001F469\U0000200D\U00002695',
u':woman_health_worker_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U00002695\U0000FE0F',
u':woman_health_worker_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U00002695\U0000FE0F',
u':woman_health_worker_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U00002695\U0000FE0F',
u':woman_health_worker_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U00002695\U0000FE0F',
u':woman_health_worker_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U00002695\U0000FE0F',
u':woman_health_worker_selector:': u'\U0001F469\U0000200D\U00002695\U0000FE0F',
u':woman_in_lotus_position-dark_skin_tone:': u'\U0001F9D8\U0001F3FF\U0000200D\U00002640',
u':woman_in_lotus_position-light_skin_tone:': u'\U0001F9D8\U0001F3FB\U0000200D\U00002640',
u':woman_in_lotus_position-medium-dark_skin_tone:': u'\U0001F9D8\U0001F3FE\U0000200D\U00002640',
u':woman_in_lotus_position-medium-light_skin_tone:': u'\U0001F9D8\U0001F3FC\U0000200D\U00002640',
u':woman_in_lotus_position-medium_skin_tone:': u'\U0001F9D8\U0001F3FD\U0000200D\U00002640',
u':woman_in_lotus_position:': u'\U0001F9D8\U0000200D\U00002640',
u':woman_in_lotus_position_dark_skin_tone:': u'\U0001F9D8\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_in_lotus_position_light_skin_tone:': u'\U0001F9D8\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_in_lotus_position_medium-dark_skin_tone:': u'\U0001F9D8\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_in_lotus_position_medium-light_skin_tone:': u'\U0001F9D8\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_in_lotus_position_medium_skin_tone:': u'\U0001F9D8\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_in_lotus_position_selector:': u'\U0001F9D8\U0000200D\U00002640\U0000FE0F',
u':woman_in_manual_wheelchair-dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F9BD',
u':woman_in_manual_wheelchair-light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F9BD',
u':woman_in_manual_wheelchair-medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F9BD',
u':woman_in_manual_wheelchair-medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F9BD',
u':woman_in_manual_wheelchair-medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F9BD',
u':woman_in_manual_wheelchair:': u'\U0001F469\U0000200D\U0001F9BD',
u':woman_in_motorized_wheelchair-dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F9BC',
u':woman_in_motorized_wheelchair-light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F9BC',
u':woman_in_motorized_wheelchair-medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F9BC',
u':woman_in_motorized_wheelchair-medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F9BC',
u':woman_in_motorized_wheelchair-medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F9BC',
u':woman_in_motorized_wheelchair:': u'\U0001F469\U0000200D\U0001F9BC',
u':woman_in_steamy_room-dark_skin_tone:': u'\U0001F9D6\U0001F3FF\U0000200D\U00002640',
u':woman_in_steamy_room-light_skin_tone:': u'\U0001F9D6\U0001F3FB\U0000200D\U00002640',
u':woman_in_steamy_room-medium-dark_skin_tone:': u'\U0001F9D6\U0001F3FE\U0000200D\U00002640',
u':woman_in_steamy_room-medium-light_skin_tone:': u'\U0001F9D6\U0001F3FC\U0000200D\U00002640',
u':woman_in_steamy_room-medium_skin_tone:': u'\U0001F9D6\U0001F3FD\U0000200D\U00002640',
u':woman_in_steamy_room:': u'\U0001F9D6\U0000200D\U00002640',
u':woman_in_steamy_room_dark_skin_tone:': u'\U0001F9D6\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_in_steamy_room_light_skin_tone:': u'\U0001F9D6\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_in_steamy_room_medium-dark_skin_tone:': u'\U0001F9D6\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_in_steamy_room_medium-light_skin_tone:': u'\U0001F9D6\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_in_steamy_room_medium_skin_tone:': u'\U0001F9D6\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_in_steamy_room_selector:': u'\U0001F9D6\U0000200D\U00002640\U0000FE0F',
u':woman_judge-dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U00002696',
u':woman_judge-light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U00002696',
u':woman_judge-medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U00002696',
u':woman_judge-medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U00002696',
u':woman_judge-medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U00002696',
u':woman_judge:': u'\U0001F469\U0000200D\U00002696',
u':woman_judge_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U00002696\U0000FE0F',
u':woman_judge_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U00002696\U0000FE0F',
u':woman_judge_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U00002696\U0000FE0F',
u':woman_judge_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U00002696\U0000FE0F',
u':woman_judge_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U00002696\U0000FE0F',
u':woman_judge_selector:': u'\U0001F469\U0000200D\U00002696\U0000FE0F',
u':woman_juggling-dark_skin_tone:': u'\U0001F939\U0001F3FF\U0000200D\U00002640',
u':woman_juggling-light_skin_tone:': u'\U0001F939\U0001F3FB\U0000200D\U00002640',
u':woman_juggling-medium-dark_skin_tone:': u'\U0001F939\U0001F3FE\U0000200D\U00002640',
u':woman_juggling-medium-light_skin_tone:': u'\U0001F939\U0001F3FC\U0000200D\U00002640',
u':woman_juggling-medium_skin_tone:': u'\U0001F939\U0001F3FD\U0000200D\U00002640',
u':woman_juggling:': u'\U0001F939\U0000200D\U00002640',
u':woman_juggling_dark_skin_tone:': u'\U0001F939\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_juggling_light_skin_tone:': u'\U0001F939\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_juggling_medium-dark_skin_tone:': u'\U0001F939\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_juggling_medium-light_skin_tone:': u'\U0001F939\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_juggling_medium_skin_tone:': u'\U0001F939\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_juggling_selector:': u'\U0001F939\U0000200D\U00002640\U0000FE0F',
u':woman_kneeling-dark_skin_tone:': u'\U0001F9CE\U0001F3FF\U0000200D\U00002640',
u':woman_kneeling-dark_skin_tone_selector:': u'\U0001F9CE\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_kneeling-light_skin_tone:': u'\U0001F9CE\U0001F3FB\U0000200D\U00002640',
u':woman_kneeling-light_skin_tone_selector:': u'\U0001F9CE\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_kneeling-medium-dark_skin_tone:': u'\U0001F9CE\U0001F3FE\U0000200D\U00002640',
u':woman_kneeling-medium-dark_skin_tone_selector:': u'\U0001F9CE\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_kneeling-medium-light_skin_tone:': u'\U0001F9CE\U0001F3FC\U0000200D\U00002640',
u':woman_kneeling-medium-light_skin_tone_selector:': u'\U0001F9CE\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_kneeling-medium_skin_tone:': u'\U0001F9CE\U0001F3FD\U0000200D\U00002640',
u':woman_kneeling-medium_skin_tone_selector:': u'\U0001F9CE\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_kneeling:': u'\U0001F9CE\U0000200D\U00002640',
u':woman_kneeling_selector:': u'\U0001F9CE\U0000200D\U00002640\U0000FE0F',
u':woman_lifting_weights-dark_skin_tone:': u'\U0001F3CB\U0001F3FF\U0000200D\U00002640',
u':woman_lifting_weights-light_skin_tone:': u'\U0001F3CB\U0001F3FB\U0000200D\U00002640',
u':woman_lifting_weights-medium-dark_skin_tone:': u'\U0001F3CB\U0001F3FE\U0000200D\U00002640',
u':woman_lifting_weights-medium-light_skin_tone:': u'\U0001F3CB\U0001F3FC\U0000200D\U00002640',
u':woman_lifting_weights-medium_skin_tone:': u'\U0001F3CB\U0001F3FD\U0000200D\U00002640',
u':woman_lifting_weights:': u'\U0001F3CB\U0000200D\U00002640',
u':woman_lifting_weights_2:': u'\U0001F3CB\U0000FE0F\U0000200D\U00002640\U0000FE0F',
u':woman_lifting_weights_3:': u'\U0001F3CB\U0000200D\U00002640\U0000FE0F',
u':woman_lifting_weights_dark_skin_tone:': u'\U0001F3CB\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_lifting_weights_light_skin_tone:': u'\U0001F3CB\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_lifting_weights_medium-dark_skin_tone:': u'\U0001F3CB\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_lifting_weights_medium-light_skin_tone:': u'\U0001F3CB\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_lifting_weights_medium_skin_tone:': u'\U0001F3CB\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_lifting_weights_selector:': u'\U0001F3CB\U0000FE0F\U0000200D\U00002640',
u':woman_light_skin_tone:': u'\U0001F469\U0001F3FB',
u':woman_mage-dark_skin_tone:': u'\U0001F9D9\U0001F3FF\U0000200D\U00002640',
u':woman_mage-light_skin_tone:': u'\U0001F9D9\U0001F3FB\U0000200D\U00002640',
u':woman_mage-medium-dark_skin_tone:': u'\U0001F9D9\U0001F3FE\U0000200D\U00002640',
u':woman_mage-medium-light_skin_tone:': u'\U0001F9D9\U0001F3FC\U0000200D\U00002640',
u':woman_mage-medium_skin_tone:': u'\U0001F9D9\U0001F3FD\U0000200D\U00002640',
u':woman_mage:': u'\U0001F9D9\U0000200D\U00002640',
u':woman_mage_dark_skin_tone:': u'\U0001F9D9\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_mage_light_skin_tone:': u'\U0001F9D9\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_mage_medium-dark_skin_tone:': u'\U0001F9D9\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_mage_medium-light_skin_tone:': u'\U0001F9D9\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_mage_medium_skin_tone:': u'\U0001F9D9\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_mage_selector:': u'\U0001F9D9\U0000200D\U00002640\U0000FE0F',
u':woman_mechanic:': u'\U0001F469\U0000200D\U0001F527',
u':woman_mechanic_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F527',
u':woman_mechanic_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F527',
u':woman_mechanic_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F527',
u':woman_mechanic_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F527',
u':woman_mechanic_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F527',
u':woman_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE',
u':woman_medium-light_skin_tone:': u'\U0001F469\U0001F3FC',
u':woman_medium_skin_tone:': u'\U0001F469\U0001F3FD',
u':woman_mountain_biking-dark_skin_tone:': u'\U0001F6B5\U0001F3FF\U0000200D\U00002640',
u':woman_mountain_biking-light_skin_tone:': u'\U0001F6B5\U0001F3FB\U0000200D\U00002640',
u':woman_mountain_biking-medium-dark_skin_tone:': u'\U0001F6B5\U0001F3FE\U0000200D\U00002640',
u':woman_mountain_biking-medium-light_skin_tone:': u'\U0001F6B5\U0001F3FC\U0000200D\U00002640',
u':woman_mountain_biking-medium_skin_tone:': u'\U0001F6B5\U0001F3FD\U0000200D\U00002640',
u':woman_mountain_biking:': u'\U0001F6B5\U0000200D\U00002640',
u':woman_mountain_biking_dark_skin_tone:': u'\U0001F6B5\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_mountain_biking_light_skin_tone:': u'\U0001F6B5\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_mountain_biking_medium-dark_skin_tone:': u'\U0001F6B5\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_mountain_biking_medium-light_skin_tone:': u'\U0001F6B5\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_mountain_biking_medium_skin_tone:': u'\U0001F6B5\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_mountain_biking_selector:': u'\U0001F6B5\U0000200D\U00002640\U0000FE0F',
u':woman_office_worker:': u'\U0001F469\U0000200D\U0001F4BC',
u':woman_office_worker_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F4BC',
u':woman_office_worker_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F4BC',
u':woman_office_worker_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F4BC',
u':woman_office_worker_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F4BC',
u':woman_office_worker_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F4BC',
u':woman_pilot-dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U00002708',
u':woman_pilot-light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U00002708',
u':woman_pilot-medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U00002708',
u':woman_pilot-medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U00002708',
u':woman_pilot-medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U00002708',
u':woman_pilot:': u'\U0001F469\U0000200D\U00002708',
u':woman_pilot_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U00002708\U0000FE0F',
u':woman_pilot_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U00002708\U0000FE0F',
u':woman_pilot_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U00002708\U0000FE0F',
u':woman_pilot_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U00002708\U0000FE0F',
u':woman_pilot_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U00002708\U0000FE0F',
u':woman_pilot_selector:': u'\U0001F469\U0000200D\U00002708\U0000FE0F',
u':woman_playing_handball-dark_skin_tone:': u'\U0001F93E\U0001F3FF\U0000200D\U00002640',
u':woman_playing_handball-light_skin_tone:': u'\U0001F93E\U0001F3FB\U0000200D\U00002640',
u':woman_playing_handball-medium-dark_skin_tone:': u'\U0001F93E\U0001F3FE\U0000200D\U00002640',
u':woman_playing_handball-medium-light_skin_tone:': u'\U0001F93E\U0001F3FC\U0000200D\U00002640',
u':woman_playing_handball-medium_skin_tone:': u'\U0001F93E\U0001F3FD\U0000200D\U00002640',
u':woman_playing_handball:': u'\U0001F93E\U0000200D\U00002640',
u':woman_playing_handball_dark_skin_tone:': u'\U0001F93E\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_playing_handball_light_skin_tone:': u'\U0001F93E\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_playing_handball_medium-dark_skin_tone:': u'\U0001F93E\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_playing_handball_medium-light_skin_tone:': u'\U0001F93E\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_playing_handball_medium_skin_tone:': u'\U0001F93E\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_playing_handball_selector:': u'\U0001F93E\U0000200D\U00002640\U0000FE0F',
u':woman_playing_water_polo-dark_skin_tone:': u'\U0001F93D\U0001F3FF\U0000200D\U00002640',
u':woman_playing_water_polo-light_skin_tone:': u'\U0001F93D\U0001F3FB\U0000200D\U00002640',
u':woman_playing_water_polo-medium-dark_skin_tone:': u'\U0001F93D\U0001F3FE\U0000200D\U00002640',
u':woman_playing_water_polo-medium-light_skin_tone:': u'\U0001F93D\U0001F3FC\U0000200D\U00002640',
u':woman_playing_water_polo-medium_skin_tone:': u'\U0001F93D\U0001F3FD\U0000200D\U00002640',
u':woman_playing_water_polo:': u'\U0001F93D\U0000200D\U00002640',
u':woman_playing_water_polo_dark_skin_tone:': u'\U0001F93D\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_playing_water_polo_light_skin_tone:': u'\U0001F93D\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_playing_water_polo_medium-dark_skin_tone:': u'\U0001F93D\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_playing_water_polo_medium-light_skin_tone:': u'\U0001F93D\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_playing_water_polo_medium_skin_tone:': u'\U0001F93D\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_playing_water_polo_selector:': u'\U0001F93D\U0000200D\U00002640\U0000FE0F',
u':woman_police_officer-dark_skin_tone:': u'\U0001F46E\U0001F3FF\U0000200D\U00002640',
u':woman_police_officer-light_skin_tone:': u'\U0001F46E\U0001F3FB\U0000200D\U00002640',
u':woman_police_officer-medium-dark_skin_tone:': u'\U0001F46E\U0001F3FE\U0000200D\U00002640',
u':woman_police_officer-medium-light_skin_tone:': u'\U0001F46E\U0001F3FC\U0000200D\U00002640',
u':woman_police_officer-medium_skin_tone:': u'\U0001F46E\U0001F3FD\U0000200D\U00002640',
u':woman_police_officer:': u'\U0001F46E\U0000200D\U00002640',
u':woman_police_officer_dark_skin_tone:': u'\U0001F46E\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_police_officer_light_skin_tone:': u'\U0001F46E\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_police_officer_medium-dark_skin_tone:': u'\U0001F46E\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_police_officer_medium-light_skin_tone:': u'\U0001F46E\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_police_officer_medium_skin_tone:': u'\U0001F46E\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_police_officer_selector:': u'\U0001F46E\U0000200D\U00002640\U0000FE0F',
u':woman_pouting-dark_skin_tone:': u'\U0001F64E\U0001F3FF\U0000200D\U00002640',
u':woman_pouting-light_skin_tone:': u'\U0001F64E\U0001F3FB\U0000200D\U00002640',
u':woman_pouting-medium-dark_skin_tone:': u'\U0001F64E\U0001F3FE\U0000200D\U00002640',
u':woman_pouting-medium-light_skin_tone:': u'\U0001F64E\U0001F3FC\U0000200D\U00002640',
u':woman_pouting-medium_skin_tone:': u'\U0001F64E\U0001F3FD\U0000200D\U00002640',
u':woman_pouting:': u'\U0001F64E\U0000200D\U00002640',
u':woman_pouting_dark_skin_tone:': u'\U0001F64E\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_pouting_light_skin_tone:': u'\U0001F64E\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_pouting_medium-dark_skin_tone:': u'\U0001F64E\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_pouting_medium-light_skin_tone:': u'\U0001F64E\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_pouting_medium_skin_tone:': u'\U0001F64E\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_pouting_selector:': u'\U0001F64E\U0000200D\U00002640\U0000FE0F',
u':woman_raising_hand-dark_skin_tone:': u'\U0001F64B\U0001F3FF\U0000200D\U00002640',
u':woman_raising_hand-light_skin_tone:': u'\U0001F64B\U0001F3FB\U0000200D\U00002640',
u':woman_raising_hand-medium-dark_skin_tone:': u'\U0001F64B\U0001F3FE\U0000200D\U00002640',
u':woman_raising_hand-medium-light_skin_tone:': u'\U0001F64B\U0001F3FC\U0000200D\U00002640',
u':woman_raising_hand-medium_skin_tone:': u'\U0001F64B\U0001F3FD\U0000200D\U00002640',
u':woman_raising_hand:': u'\U0001F64B\U0000200D\U00002640',
u':woman_raising_hand_dark_skin_tone:': u'\U0001F64B\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_raising_hand_light_skin_tone:': u'\U0001F64B\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_raising_hand_medium-dark_skin_tone:': u'\U0001F64B\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_raising_hand_medium-light_skin_tone:': u'\U0001F64B\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_raising_hand_medium_skin_tone:': u'\U0001F64B\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_raising_hand_selector:': u'\U0001F64B\U0000200D\U00002640\U0000FE0F',
u':woman_rowing_boat-dark_skin_tone:': u'\U0001F6A3\U0001F3FF\U0000200D\U00002640',
u':woman_rowing_boat-light_skin_tone:': u'\U0001F6A3\U0001F3FB\U0000200D\U00002640',
u':woman_rowing_boat-medium-dark_skin_tone:': u'\U0001F6A3\U0001F3FE\U0000200D\U00002640',
u':woman_rowing_boat-medium-light_skin_tone:': u'\U0001F6A3\U0001F3FC\U0000200D\U00002640',
u':woman_rowing_boat-medium_skin_tone:': u'\U0001F6A3\U0001F3FD\U0000200D\U00002640',
u':woman_rowing_boat:': u'\U0001F6A3\U0000200D\U00002640',
u':woman_rowing_boat_dark_skin_tone:': u'\U0001F6A3\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_rowing_boat_light_skin_tone:': u'\U0001F6A3\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_rowing_boat_medium-dark_skin_tone:': u'\U0001F6A3\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_rowing_boat_medium-light_skin_tone:': u'\U0001F6A3\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_rowing_boat_medium_skin_tone:': u'\U0001F6A3\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_rowing_boat_selector:': u'\U0001F6A3\U0000200D\U00002640\U0000FE0F',
u':woman_running-dark_skin_tone:': u'\U0001F3C3\U0001F3FF\U0000200D\U00002640',
u':woman_running-light_skin_tone:': u'\U0001F3C3\U0001F3FB\U0000200D\U00002640',
u':woman_running-medium-dark_skin_tone:': u'\U0001F3C3\U0001F3FE\U0000200D\U00002640',
u':woman_running-medium-light_skin_tone:': u'\U0001F3C3\U0001F3FC\U0000200D\U00002640',
u':woman_running-medium_skin_tone:': u'\U0001F3C3\U0001F3FD\U0000200D\U00002640',
u':woman_running:': u'\U0001F3C3\U0000200D\U00002640',
u':woman_running_dark_skin_tone:': u'\U0001F3C3\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_running_light_skin_tone:': u'\U0001F3C3\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_running_medium-dark_skin_tone:': u'\U0001F3C3\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_running_medium-light_skin_tone:': u'\U0001F3C3\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_running_medium_skin_tone:': u'\U0001F3C3\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_running_selector:': u'\U0001F3C3\U0000200D\U00002640\U0000FE0F',
u':woman_scientist:': u'\U0001F469\U0000200D\U0001F52C',
u':woman_scientist_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F52C',
u':woman_scientist_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F52C',
u':woman_scientist_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F52C',
u':woman_scientist_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F52C',
u':woman_scientist_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F52C',
u':woman_shrugging-dark_skin_tone:': u'\U0001F937\U0001F3FF\U0000200D\U00002640',
u':woman_shrugging-light_skin_tone:': u'\U0001F937\U0001F3FB\U0000200D\U00002640',
u':woman_shrugging-medium-dark_skin_tone:': u'\U0001F937\U0001F3FE\U0000200D\U00002640',
u':woman_shrugging-medium-light_skin_tone:': u'\U0001F937\U0001F3FC\U0000200D\U00002640',
u':woman_shrugging-medium_skin_tone:': u'\U0001F937\U0001F3FD\U0000200D\U00002640',
u':woman_shrugging:': u'\U0001F937\U0000200D\U00002640',
u':woman_shrugging_dark_skin_tone:': u'\U0001F937\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_shrugging_light_skin_tone:': u'\U0001F937\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_shrugging_medium-dark_skin_tone:': u'\U0001F937\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_shrugging_medium-light_skin_tone:': u'\U0001F937\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_shrugging_medium_skin_tone:': u'\U0001F937\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_shrugging_selector:': u'\U0001F937\U0000200D\U00002640\U0000FE0F',
u':woman_singer:': u'\U0001F469\U0000200D\U0001F3A4',
u':woman_singer_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F3A4',
u':woman_singer_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F3A4',
u':woman_singer_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F3A4',
u':woman_singer_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F3A4',
u':woman_singer_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F3A4',
u':woman_standing-dark_skin_tone:': u'\U0001F9CD\U0001F3FF\U0000200D\U00002640',
u':woman_standing-dark_skin_tone_selector:': u'\U0001F9CD\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_standing-light_skin_tone:': u'\U0001F9CD\U0001F3FB\U0000200D\U00002640',
u':woman_standing-light_skin_tone_selector:': u'\U0001F9CD\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_standing-medium-dark_skin_tone:': u'\U0001F9CD\U0001F3FE\U0000200D\U00002640',
u':woman_standing-medium-dark_skin_tone_selector:': u'\U0001F9CD\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_standing-medium-light_skin_tone:': u'\U0001F9CD\U0001F3FC\U0000200D\U00002640',
u':woman_standing-medium-light_skin_tone_selector:': u'\U0001F9CD\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_standing-medium_skin_tone:': u'\U0001F9CD\U0001F3FD\U0000200D\U00002640',
u':woman_standing-medium_skin_tone_selector:': u'\U0001F9CD\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_standing:': u'\U0001F9CD\U0000200D\U00002640',
u':woman_standing_selector:': u'\U0001F9CD\U0000200D\U00002640\U0000FE0F',
u':woman_student:': u'\U0001F469\U0000200D\U0001F393',
u':woman_student_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F393',
u':woman_student_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F393',
u':woman_student_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F393',
u':woman_student_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F393',
u':woman_student_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F393',
u':woman_superhero-dark_skin_tone:': u'\U0001F9B8\U0001F3FF\U0000200D\U00002640',
u':woman_superhero-dark_skin_tone_selector:': u'\U0001F9B8\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_superhero-light_skin_tone:': u'\U0001F9B8\U0001F3FB\U0000200D\U00002640',
u':woman_superhero-light_skin_tone_selector:': u'\U0001F9B8\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_superhero-medium-dark_skin_tone:': u'\U0001F9B8\U0001F3FE\U0000200D\U00002640',
u':woman_superhero-medium-dark_skin_tone_selector:': u'\U0001F9B8\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_superhero-medium-light_skin_tone:': u'\U0001F9B8\U0001F3FC\U0000200D\U00002640',
u':woman_superhero-medium-light_skin_tone_selector:': u'\U0001F9B8\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_superhero-medium_skin_tone:': u'\U0001F9B8\U0001F3FD\U0000200D\U00002640',
u':woman_superhero-medium_skin_tone_selector:': u'\U0001F9B8\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_superhero:': u'\U0001F9B8\U0000200D\U00002640',
u':woman_superhero_selector:': u'\U0001F9B8\U0000200D\U00002640\U0000FE0F',
u':woman_supervillain-dark_skin_tone:': u'\U0001F9B9\U0001F3FF\U0000200D\U00002640',
u':woman_supervillain-dark_skin_tone_selector:': u'\U0001F9B9\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_supervillain-light_skin_tone:': u'\U0001F9B9\U0001F3FB\U0000200D\U00002640',
u':woman_supervillain-light_skin_tone_selector:': u'\U0001F9B9\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_supervillain-medium-dark_skin_tone:': u'\U0001F9B9\U0001F3FE\U0000200D\U00002640',
u':woman_supervillain-medium-dark_skin_tone_selector:': u'\U0001F9B9\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_supervillain-medium-light_skin_tone:': u'\U0001F9B9\U0001F3FC\U0000200D\U00002640',
u':woman_supervillain-medium-light_skin_tone_selector:': u'\U0001F9B9\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_supervillain-medium_skin_tone:': u'\U0001F9B9\U0001F3FD\U0000200D\U00002640',
u':woman_supervillain-medium_skin_tone_selector:': u'\U0001F9B9\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_supervillain:': u'\U0001F9B9\U0000200D\U00002640',
u':woman_supervillain_selector:': u'\U0001F9B9\U0000200D\U00002640\U0000FE0F',
u':woman_surfing-dark_skin_tone:': u'\U0001F3C4\U0001F3FF\U0000200D\U00002640',
u':woman_surfing-light_skin_tone:': u'\U0001F3C4\U0001F3FB\U0000200D\U00002640',
u':woman_surfing-medium-dark_skin_tone:': u'\U0001F3C4\U0001F3FE\U0000200D\U00002640',
u':woman_surfing-medium-light_skin_tone:': u'\U0001F3C4\U0001F3FC\U0000200D\U00002640',
u':woman_surfing-medium_skin_tone:': u'\U0001F3C4\U0001F3FD\U0000200D\U00002640',
u':woman_surfing:': u'\U0001F3C4\U0000200D\U00002640',
u':woman_surfing_dark_skin_tone:': u'\U0001F3C4\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_surfing_light_skin_tone:': u'\U0001F3C4\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_surfing_medium-dark_skin_tone:': u'\U0001F3C4\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_surfing_medium-light_skin_tone:': u'\U0001F3C4\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_surfing_medium_skin_tone:': u'\U0001F3C4\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_surfing_selector:': u'\U0001F3C4\U0000200D\U00002640\U0000FE0F',
u':woman_swimming-dark_skin_tone:': u'\U0001F3CA\U0001F3FF\U0000200D\U00002640',
u':woman_swimming-light_skin_tone:': u'\U0001F3CA\U0001F3FB\U0000200D\U00002640',
u':woman_swimming-medium-dark_skin_tone:': u'\U0001F3CA\U0001F3FE\U0000200D\U00002640',
u':woman_swimming-medium-light_skin_tone:': u'\U0001F3CA\U0001F3FC\U0000200D\U00002640',
u':woman_swimming-medium_skin_tone:': u'\U0001F3CA\U0001F3FD\U0000200D\U00002640',
u':woman_swimming:': u'\U0001F3CA\U0000200D\U00002640',
u':woman_swimming_dark_skin_tone:': u'\U0001F3CA\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_swimming_light_skin_tone:': u'\U0001F3CA\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_swimming_medium-dark_skin_tone:': u'\U0001F3CA\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_swimming_medium-light_skin_tone:': u'\U0001F3CA\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_swimming_medium_skin_tone:': u'\U0001F3CA\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_swimming_selector:': u'\U0001F3CA\U0000200D\U00002640\U0000FE0F',
u':woman_teacher:': u'\U0001F469\U0000200D\U0001F3EB',
u':woman_teacher_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F3EB',
u':woman_teacher_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F3EB',
u':woman_teacher_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F3EB',
u':woman_teacher_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F3EB',
u':woman_teacher_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F3EB',
u':woman_technologist:': u'\U0001F469\U0000200D\U0001F4BB',
u':woman_technologist_dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F4BB',
u':woman_technologist_light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F4BB',
u':woman_technologist_medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F4BB',
u':woman_technologist_medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F4BB',
u':woman_technologist_medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F4BB',
u':woman_tipping_hand-dark_skin_tone:': u'\U0001F481\U0001F3FF\U0000200D\U00002640',
u':woman_tipping_hand-light_skin_tone:': u'\U0001F481\U0001F3FB\U0000200D\U00002640',
u':woman_tipping_hand-medium-dark_skin_tone:': u'\U0001F481\U0001F3FE\U0000200D\U00002640',
u':woman_tipping_hand-medium-light_skin_tone:': u'\U0001F481\U0001F3FC\U0000200D\U00002640',
u':woman_tipping_hand-medium_skin_tone:': u'\U0001F481\U0001F3FD\U0000200D\U00002640',
u':woman_tipping_hand:': u'\U0001F481\U0000200D\U00002640',
u':woman_tipping_hand_dark_skin_tone:': u'\U0001F481\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_tipping_hand_light_skin_tone:': u'\U0001F481\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_tipping_hand_medium-dark_skin_tone:': u'\U0001F481\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_tipping_hand_medium-light_skin_tone:': u'\U0001F481\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_tipping_hand_medium_skin_tone:': u'\U0001F481\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_tipping_hand_selector:': u'\U0001F481\U0000200D\U00002640\U0000FE0F',
u':woman_vampire-dark_skin_tone:': u'\U0001F9DB\U0001F3FF\U0000200D\U00002640',
u':woman_vampire-light_skin_tone:': u'\U0001F9DB\U0001F3FB\U0000200D\U00002640',
u':woman_vampire-medium-dark_skin_tone:': u'\U0001F9DB\U0001F3FE\U0000200D\U00002640',
u':woman_vampire-medium-light_skin_tone:': u'\U0001F9DB\U0001F3FC\U0000200D\U00002640',
u':woman_vampire-medium_skin_tone:': u'\U0001F9DB\U0001F3FD\U0000200D\U00002640',
u':woman_vampire:': u'\U0001F9DB\U0000200D\U00002640',
u':woman_vampire_dark_skin_tone:': u'\U0001F9DB\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_vampire_light_skin_tone:': u'\U0001F9DB\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_vampire_medium-dark_skin_tone:': u'\U0001F9DB\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_vampire_medium-light_skin_tone:': u'\U0001F9DB\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_vampire_medium_skin_tone:': u'\U0001F9DB\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_vampire_selector:': u'\U0001F9DB\U0000200D\U00002640\U0000FE0F',
u':woman_walking-dark_skin_tone:': u'\U0001F6B6\U0001F3FF\U0000200D\U00002640',
u':woman_walking-light_skin_tone:': u'\U0001F6B6\U0001F3FB\U0000200D\U00002640',
u':woman_walking-medium-dark_skin_tone:': u'\U0001F6B6\U0001F3FE\U0000200D\U00002640',
u':woman_walking-medium-light_skin_tone:': u'\U0001F6B6\U0001F3FC\U0000200D\U00002640',
u':woman_walking-medium_skin_tone:': u'\U0001F6B6\U0001F3FD\U0000200D\U00002640',
u':woman_walking:': u'\U0001F6B6\U0000200D\U00002640',
u':woman_walking_dark_skin_tone:': u'\U0001F6B6\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_walking_light_skin_tone:': u'\U0001F6B6\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_walking_medium-dark_skin_tone:': u'\U0001F6B6\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_walking_medium-light_skin_tone:': u'\U0001F6B6\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_walking_medium_skin_tone:': u'\U0001F6B6\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_walking_selector:': u'\U0001F6B6\U0000200D\U00002640\U0000FE0F',
u':woman_wearing_turban-dark_skin_tone:': u'\U0001F473\U0001F3FF\U0000200D\U00002640',
u':woman_wearing_turban-light_skin_tone:': u'\U0001F473\U0001F3FB\U0000200D\U00002640',
u':woman_wearing_turban-medium-dark_skin_tone:': u'\U0001F473\U0001F3FE\U0000200D\U00002640',
u':woman_wearing_turban-medium-light_skin_tone:': u'\U0001F473\U0001F3FC\U0000200D\U00002640',
u':woman_wearing_turban-medium_skin_tone:': u'\U0001F473\U0001F3FD\U0000200D\U00002640',
u':woman_wearing_turban:': u'\U0001F473\U0000200D\U00002640',
u':woman_wearing_turban_dark_skin_tone:': u'\U0001F473\U0001F3FF\U0000200D\U00002640\U0000FE0F',
u':woman_wearing_turban_light_skin_tone:': u'\U0001F473\U0001F3FB\U0000200D\U00002640\U0000FE0F',
u':woman_wearing_turban_medium-dark_skin_tone:': u'\U0001F473\U0001F3FE\U0000200D\U00002640\U0000FE0F',
u':woman_wearing_turban_medium-light_skin_tone:': u'\U0001F473\U0001F3FC\U0000200D\U00002640\U0000FE0F',
u':woman_wearing_turban_medium_skin_tone:': u'\U0001F473\U0001F3FD\U0000200D\U00002640\U0000FE0F',
u':woman_wearing_turban_selector:': u'\U0001F473\U0000200D\U00002640\U0000FE0F',
u':woman_with_headscarf:': u'\U0001F9D5',
u':woman_with_headscarf_dark_skin_tone:': u'\U0001F9D5\U0001F3FF',
u':woman_with_headscarf_light_skin_tone:': u'\U0001F9D5\U0001F3FB',
u':woman_with_headscarf_medium-dark_skin_tone:': u'\U0001F9D5\U0001F3FE',
u':woman_with_headscarf_medium-light_skin_tone:': u'\U0001F9D5\U0001F3FC',
u':woman_with_headscarf_medium_skin_tone:': u'\U0001F9D5\U0001F3FD',
u':woman_with_probing_cane-dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F9AF',
u':woman_with_probing_cane-light_skin_tone:': u'\U0001F469\U0001F3FB\U0000200D\U0001F9AF',
u':woman_with_probing_cane-medium-dark_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F9AF',
u':woman_with_probing_cane-medium-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F9AF',
u':woman_with_probing_cane-medium_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F9AF',
u':woman_with_probing_cane:': u'\U0001F469\U0000200D\U0001F9AF',
u':woman_zombie:': u'\U0001F9DF\U0000200D\U00002640',
u':woman_zombie_selector:': u'\U0001F9DF\U0000200D\U00002640\U0000FE0F',
u':woman\u2019s_boot:': u'\U0001F462',
u':woman\u2019s_clothes:': u'\U0001F45A',
u':woman\u2019s_hat:': u'\U0001F452',
u':woman\u2019s_sandal:': u'\U0001F461',
u':women_holding_hands-dark_skin_tone-light_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FB',
u':women_holding_hands-dark_skin_tone-medium-dark_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FE',
u':women_holding_hands-dark_skin_tone-medium-light_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FC',
u':women_holding_hands-dark_skin_tone-medium_skin_tone:': u'\U0001F469\U0001F3FF\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FD',
u':women_holding_hands-dark_skin_tone:': u'\U0001F46D\U0001F3FF',
u':women_holding_hands-light_skin_tone:': u'\U0001F46D\U0001F3FB',
u':women_holding_hands-medium-dark_skin_tone-light_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FB',
u':women_holding_hands-medium-dark_skin_tone-medium-light_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FC',
u':women_holding_hands-medium-dark_skin_tone-medium_skin_tone:': u'\U0001F469\U0001F3FE\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FD',
u':women_holding_hands-medium-dark_skin_tone:': u'\U0001F46D\U0001F3FE',
u':women_holding_hands-medium-light_skin_tone-light_skin_tone:': u'\U0001F469\U0001F3FC\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FB',
u':women_holding_hands-medium-light_skin_tone:': u'\U0001F46D\U0001F3FC',
u':women_holding_hands-medium_skin_tone-light_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FB',
u':women_holding_hands-medium_skin_tone-medium-light_skin_tone:': u'\U0001F469\U0001F3FD\U0000200D\U0001F91D\U0000200D\U0001F469\U0001F3FC',
u':women_holding_hands-medium_skin_tone:': u'\U0001F46D\U0001F3FD',
u':women_with_bunny_ears:': u'\U0001F46F\U0000200D\U00002640',
u':women_with_bunny_ears_selector:': u'\U0001F46F\U0000200D\U00002640\U0000FE0F',
u':women_wrestling:': u'\U0001F93C\U0000200D\U00002640',
u':women_wrestling_selector:': u'\U0001F93C\U0000200D\U00002640\U0000FE0F',
u':women\u2019s_room:': u'\U0001F6BA',
u':woozy_face:': u'\U0001F974',
u':world_map:': u'\U0001F5FA',
u':world_map_selector:': u'\U0001F5FA\U0000FE0F',
u':worried_face:': u'\U0001F61F',
u':wrapped_gift:': u'\U0001F381',
u':wrench:': u'\U0001F527',
u':writing_hand:': u'\U0000270D',
u':writing_hand_dark_skin_tone:': u'\U0000270D\U0001F3FF',
u':writing_hand_light_skin_tone:': u'\U0000270D\U0001F3FB',
u':writing_hand_medium-dark_skin_tone:': u'\U0000270D\U0001F3FE',
u':writing_hand_medium-light_skin_tone:': u'\U0000270D\U0001F3FC',
u':writing_hand_medium_skin_tone:': u'\U0000270D\U0001F3FD',
u':writing_hand_selector:': u'\U0000270D\U0000FE0F',
u':yarn:': u'\U0001F9F6',
u':yawning_face:': u'\U0001F971',
u':yellow_circle:': u'\U0001F7E1',
u':yellow_heart:': u'\U0001F49B',
u':yellow_square:': u'\U0001F7E8',
u':yen_banknote:': u'\U0001F4B4',
u':yin_yang:': u'\U0000262F',
u':yin_yang_selector:': u'\U0000262F\U0000FE0F',
u':yo-yo:': u'\U0001FA80',
u':zany_face:': u'\U0001F92A',
u':zebra:': u'\U0001F993',
u':zipper-mouth_face:': u'\U0001F910',
u':zombie:': u'\U0001F9DF',
u':zzz:': u'\U0001F4A4',
}
EMOJI_ALIAS_UNICODE = dict(EMOJI_UNICODE.items(), **{
u':admission_tickets:': u'\U0001F39F',
u':aerial_tramway:': u'\U0001F6A1',
u':airplane:': u'\U00002708',
u':airplane_arriving:': u'\U0001F6EC',
u':airplane_departure:': u'\U0001F6EB',
u':alarm_clock:': u'\U000023F0',
u':alembic:': u'\U00002697',
u':space_invader:': u'\U0001F47E',
u':ambulance:': u'\U0001F691',
u':football:': u'\U0001F3C8',
u':amphora:': u'\U0001F3FA',
u':anchor:': u'\U00002693',
u':anger:': u'\U0001F4A2',
u':angry:': u'\U0001F620',
u':anguished:': u'\U0001F627',
u':ant:': u'\U0001F41C',
u':signal_strength:': u'\U0001F4F6',
u':arrows_counterclockwise:': u'\U0001F504',
u':aquarius:': u'\U00002652',
u':aries:': u'\U00002648',
u':arrow_heading_down:': u'\U00002935',
u':arrow_heading_up:': u'\U00002934',
u':articulated_lorry:': u'\U0001F69B',
u':art:': u'\U0001F3A8',
u':astonished:': u'\U0001F632',
u':athletic_shoe:': u'\U0001F45F',
u':atom_symbol:': u'\U0000269B',
u':eggplant:': u'\U0001F346',
u':atm:': u'\U0001F3E7',
u':car:': u'\U0001F697',
u':red_car:': u'\U0001F697',
u':baby:': u'\U0001F476',
u':angel:': u'\U0001F47C',
u':baby_bottle:': u'\U0001F37C',
u':baby_chick:': u'\U0001F424',
u':baby_symbol:': u'\U0001F6BC',
u':back:': u'\U0001F519',
u':camel:': u'\U0001F42B',
u':badminton_racquet_and_shuttlecock:': u'\U0001F3F8',
u':baggage_claim:': u'\U0001F6C4',
u':balloon:': u'\U0001F388',
u':ballot_box_with_ballot:': u'\U0001F5F3',
u':ballot_box_with_check:': u'\U00002611',
u':banana:': u'\U0001F34C',
u':bank:': u'\U0001F3E6',
u':dollar:': u'\U0001F4B5',
u':euro:': u'\U0001F4B6',
u':pound:': u'\U0001F4B7',
u':yen:': u'\U0001F4B4',
u':bar_chart:': u'\U0001F4CA',
u':barber:': u'\U0001F488',
u':baseball:': u'\U000026BE',
u':basketball:': u'\U0001F3C0',
u':bath:': u'\U0001F6C0',
u':bathtub:': u'\U0001F6C1',
u':battery:': u'\U0001F50B',
u':beach_with_umbrella:': u'\U0001F3D6',
u':bear:': u'\U0001F43B',
u':heartbeat:': u'\U0001F493',
u':bed:': u'\U0001F6CF',
u':beer:': u'\U0001F37A',
u':bell:': u'\U0001F514',
u':no_bell:': u'\U0001F515',
u':bellhop_bell:': u'\U0001F6CE',
u':bento:': u'\U0001F371',
u':bike:': u'\U0001F6B2',
u':bicyclist:': u'\U0001F6B4',
u':bikini:': u'\U0001F459',
u':8ball:': u'\U0001F3B1',
u':biohazard_sign:': u'\U00002623',
u':bird:': u'\U0001F426',
u':birthday:': u'\U0001F382',
u':black_circle_for_record:': u'\U000023FA',
u':clubs:': u'\U00002663',
u':diamonds:': u'\U00002666',
u':arrow_double_down:': u'\U000023EC',
u':hearts:': u'\U00002665',
u':black_large_square:': u'\U00002B1B',
u':rewind:': u'\U000023EA',
u':black_left__pointing_double_triangle_with_vertical_bar:': u'\U000023EE',
u':arrow_backward:': u'\U000025C0',
u':black_medium_small_square:': u'\U000025FE',
u':black_medium_square:': u'\U000025FC',
u':black_nib:': u'\U00002712',
u':question:': u'\U00002753',
u':fast_forward:': u'\U000023E9',
u':black_right__pointing_double_triangle_with_vertical_bar:': u'\U000023ED',
u':arrow_forward:': u'\U000025B6',
u':black_right__pointing_triangle_with_double_vertical_bar:': u'\U000023EF',
u':arrow_right:': u'\U000027A1',
u':scissors:': u'\U00002702',
u':black_small_square:': u'\U000025AA',
u':spades:': u'\U00002660',
u':black_square_button:': u'\U0001F532',
u':black_square_for_stop:': u'\U000023F9',
u':sunny:': u'\U00002600',
u':phone:': u'\U0000260E',
u':telephone:': u'\U0000260E',
u':recycle:': u'\U0000267B',
u':arrow_double_up:': u'\U000023EB',
u':blossom:': u'\U0001F33C',
u':blowfish:': u'\U0001F421',
u':blue_book:': u'\U0001F4D8',
u':blue_heart:': u'\U0001F499',
u':boar:': u'\U0001F417',
u':bomb:': u'\U0001F4A3',
u':bookmark:': u'\U0001F516',
u':bookmark_tabs:': u'\U0001F4D1',
u':books:': u'\U0001F4DA',
u':bottle_with_popping_cork:': u'\U0001F37E',
u':bouquet:': u'\U0001F490',
u':bow_and_arrow:': u'\U0001F3F9',
u':bowling:': u'\U0001F3B3',
u':boy:': u'\U0001F466',
u':bread:': u'\U0001F35E',
u':bride_with_veil:': u'\U0001F470',
u':bridge_at_night:': u'\U0001F309',
u':briefcase:': u'\U0001F4BC',
u':broken_heart:': u'\U0001F494',
u':bug:': u'\U0001F41B',
u':building_construction:': u'\U0001F3D7',
u':burrito:': u'\U0001F32F',
u':bus:': u'\U0001F68C',
u':busstop:': u'\U0001F68F',
u':bust_in_silhouette:': u'\U0001F464',
u':busts_in_silhouette:': u'\U0001F465',
u':cactus:': u'\U0001F335',
u':date:': u'\U0001F4C5',
u':camera:': u'\U0001F4F7',
u':camera_with_flash:': u'\U0001F4F8',
u':camping:': u'\U0001F3D5',
u':cancer:': u'\U0000264B',
u':candle:': u'\U0001F56F',
u':candy:': u'\U0001F36C',
u':capricorn:': u'\U00002651',
u':card_file_box:': u'\U0001F5C3',
u':card_index:': u'\U0001F4C7',
u':card_index_dividers:': u'\U0001F5C2',
u':carousel_horse:': u'\U0001F3A0',
u':flags:': u'\U0001F38F',
u':cat2:': u'\U0001F408',
u':cat:': u'\U0001F431',
u':joy_cat:': u'\U0001F639',
u':smirk_cat:': u'\U0001F63C',
u':chains:': u'\U000026D3',
u':chart_with_downwards_trend:': u'\U0001F4C9',
u':chart_with_upwards_trend:': u'\U0001F4C8',
u':chart:': u'\U0001F4B9',
u':mega:': u'\U0001F4E3',
u':cheese_wedge:': u'\U0001F9C0',
u':checkered_flag:': u'\U0001F3C1',
u':cherries:': u'\U0001F352',
u':cherry_blossom:': u'\U0001F338',
u':chestnut:': u'\U0001F330',
u':chicken:': u'\U0001F414',
u':children_crossing:': u'\U0001F6B8',
u':chipmunk:': u'\U0001F43F',
u':chocolate_bar:': u'\U0001F36B',
u':christmas_tree:': u'\U0001F384',
u':church:': u'\U000026EA',
u':cinema:': u'\U0001F3A6',
u':accept:': u'\U0001F251',
u':ideograph_advantage:': u'\U0001F250',
u':congratulations:': u'\U00003297',
u':secret:': u'\U00003299',
u':m:': u'\U000024C2',
u':circus_tent:': u'\U0001F3AA',
u':cityscape:': u'\U0001F3D9',
u':city_sunset:': u'\U0001F306',
u':clapper:': u'\U0001F3AC',
u':clap:': u'\U0001F44F',
u':classical_building:': u'\U0001F3DB',
u':beers:': u'\U0001F37B',
u':clipboard:': u'\U0001F4CB',
u':clock830:': u'\U0001F563',
u':clock8:': u'\U0001F557',
u':clock1130:': u'\U0001F566',
u':clock11:': u'\U0001F55A',
u':clock530:': u'\U0001F560',
u':clock5:': u'\U0001F554',
u':clock430:': u'\U0001F55F',
u':clock4:': u'\U0001F553',
u':clock930:': u'\U0001F564',
u':clock9:': u'\U0001F558',
u':clock130:': u'\U0001F55C',
u':clock1:': u'\U0001F550',
u':clock730:': u'\U0001F562',
u':clock7:': u'\U0001F556',
u':clock630:': u'\U0001F561',
u':clock6:': u'\U0001F555',
u':clock1030:': u'\U0001F565',
u':clock10:': u'\U0001F559',
u':clock330:': u'\U0001F55E',
u':clock3:': u'\U0001F552',
u':clock1230:': u'\U0001F567',
u':clock12:': u'\U0001F55B',
u':clock230:': u'\U0001F55D',
u':clock2:': u'\U0001F551',
u':arrows_clockwise:': u'\U0001F503',
u':repeat:': u'\U0001F501',
u':repeat_one:': u'\U0001F502',
u':closed_book:': u'\U0001F4D5',
u':closed_lock_with_key:': u'\U0001F510',
u':mailbox_closed:': u'\U0001F4EA',
u':mailbox:': u'\U0001F4EB',
u':closed_umbrella:': u'\U0001F302',
u':cloud:': u'\U00002601',
u':cloud_with_lightning:': u'\U0001F329',
u':cloud_with_rain:': u'\U0001F327',
u':cloud_with_snow:': u'\U0001F328',
u':cloud_with_tornado:': u'\U0001F32A',
u':cocktail:': u'\U0001F378',
u':coffin:': u'\U000026B0',
u':boom:': u'\U0001F4A5',
u':collision:': u'\U0001F4A5',
u':comet:': u'\U00002604',
u':compression:': u'\U0001F5DC',
u':confetti_ball:': u'\U0001F38A',
u':confounded:': u'\U0001F616',
u':confused:': u'\U0001F615',
u':construction:': u'\U0001F6A7',
u':construction_worker:': u'\U0001F477',
u':control_knobs:': u'\U0001F39B',
u':convenience_store:': u'\U0001F3EA',
u':rice:': u'\U0001F35A',
u':cookie:': u'\U0001F36A',
u':egg:': u'\U0001F373',
u':egg2:': u'\U0001F95A',
u':copyright:': u'\U000000A9',
u':couch_and_lamp:': u'\U0001F6CB',
u':couple_with_heart:': u'\U0001F491',
u':cow2:': u'\U0001F404',
u':cow:': u'\U0001F42E',
u':crab:': u'\U0001F980',
u':credit_card:': u'\U0001F4B3',
u':crescent_moon:': u'\U0001F319',
u':cricket_bat_and_ball:': u'\U0001F3CF',
u':crocodile:': u'\U0001F40A',
u':x:': u'\U0000274C',
u':crossed_flags:': u'\U0001F38C',
u':crossed_swords:': u'\U00002694',
u':crown:': u'\U0001F451',
u':crying_cat_face:': u'\U0001F63F',
u':cry:': u'\U0001F622',
u':crystal_ball:': u'\U0001F52E',
u':curly_loop:': u'\U000027B0',
u':currency_exchange:': u'\U0001F4B1',
u':curry:': u'\U0001F35B',
u':custard:': u'\U0001F36E',
u':customs:': u'\U0001F6C3',
u':cyclone:': u'\U0001F300',
u':dagger_knife:': u'\U0001F5E1',
u':dancer:': u'\U0001F483',
u':dango:': u'\U0001F361',
u':dark_sunglasses:': u'\U0001F576',
u':dash:': u'\U0001F4A8',
u':deciduous_tree:': u'\U0001F333',
u':truck:': u'\U0001F69A',
u':department_store:': u'\U0001F3EC',
u':derelict_house_building:': u'\U0001F3DA',
u':desert:': u'\U0001F3DC',
u':desert_island:': u'\U0001F3DD',
u':desktop_computer:': u'\U0001F5A5',
u':diamond_shape_with_a_dot_inside:': u'\U0001F4A0',
u':dart:': u'\U0001F3AF',
u':disappointed_relieved:': u'\U0001F625',
u':disappointed:': u'\U0001F61E',
u':dizzy_face:': u'\U0001F635',
u':dizzy:': u'\U0001F4AB',
u':do_not_litter:': u'\U0001F6AF',
u':dog2:': u'\U0001F415',
u':dog:': u'\U0001F436',
u':dolphin:': u'\U0001F42C',
u':flipper:': u'\U0001F42C',
u':door:': u'\U0001F6AA',
u':loop:': u'\U000027BF',
u':bangbang:': u'\U0000203C',
u':double_vertical_bar:': u'\U000023F8',
u':doughnut:': u'\U0001F369',
u':dove_of_peace:': u'\U0001F54A',
u':small_red_triangle_down:': u'\U0001F53B',
u':arrow_down_small:': u'\U0001F53D',
u':arrow_down:': u'\U00002B07',
u':dragon:': u'\U0001F409',
u':dragon_face:': u'\U0001F432',
u':dress:': u'\U0001F457',
u':dromedary_camel:': u'\U0001F42A',
u':droplet:': u'\U0001F4A7',
u':dvd:': u'\U0001F4C0',
u':e__mail:': u'\U0001F4E7',
u':ear:': u'\U0001F442',
u':corn:': u'\U0001F33D',
u':ear_of_rice:': u'\U0001F33E',
u':earth_americas:': u'\U0001F30E',
u':earth_asia:': u'\U0001F30F',
u':earth_africa:': u'\U0001F30D',
u':eight_pointed_black_star:': u'\U00002734',
u':eight_spoked_asterisk:': u'\U00002733',
u':eject_symbol:': u'\U000023CF',
u':bulb:': u'\U0001F4A1',
u':electric_plug:': u'\U0001F50C',
u':flashlight:': u'\U0001F526',
u':elephant:': u'\U0001F418',
u':emoji_modifier_fitzpatrick_type__1__2:': u'\U0001F3FB',
u':emoji_modifier_fitzpatrick_type__3:': u'\U0001F3FC',
u':emoji_modifier_fitzpatrick_type__4:': u'\U0001F3FD',
u':emoji_modifier_fitzpatrick_type__5:': u'\U0001F3FE',
u':emoji_modifier_fitzpatrick_type__6:': u'\U0001F3FF',
u':end:': u'\U0001F51A',
u':email:': u'\U00002709',
u':envelope:': u'\U00002709',
u':envelope_with_arrow:': u'\U0001F4E9',
u':european_castle:': u'\U0001F3F0',
u':european_post_office:': u'\U0001F3E4',
u':evergreen_tree:': u'\U0001F332',
u':interrobang:': u'\U00002049',
u':expressionless:': u'\U0001F611',
u':alien:': u'\U0001F47D',
u':eye:': u'\U0001F441',
u':eyeglasses:': u'\U0001F453',
u':eyes:': u'\U0001F440',
u':massage:': u'\U0001F486',
u':yum:': u'\U0001F60B',
u':scream:': u'\U0001F631',
u':kissing_heart:': u'\U0001F618',
u':sweat:': u'\U0001F613',
u':face_with_head__bandage:': u'\U0001F915',
u':triumph:': u'\U0001F624',
u':mask:': u'\U0001F637',
u':no_good:': u'\U0001F645',
u':ok_woman:': u'\U0001F646',
u':open_mouth:': u'\U0001F62E',
u':cold_sweat:': u'\U0001F630',
u':face_with_rolling_eyes:': u'\U0001F644',
u':stuck_out_tongue:': u'\U0001F61B',
u':stuck_out_tongue_closed_eyes:': u'\U0001F61D',
u':stuck_out_tongue_winking_eye:': u'\U0001F61C',
u':joy:': u'\U0001F602',
u':face_with_thermometer:': u'\U0001F912',
u':no_mouth:': u'\U0001F636',
u':factory:': u'\U0001F3ED',
u':fallen_leaf:': u'\U0001F342',
u':family:': u'\U0001F46A',
u':santa:': u'\U0001F385',
u':fax:': u'\U0001F4E0',
u':fearful:': u'\U0001F628',
u':ferris_wheel:': u'\U0001F3A1',
u':ferry:': u'\U000026F4',
u':field_hockey_stick_and_ball:': u'\U0001F3D1',
u':file_cabinet:': u'\U0001F5C4',
u':file_folder:': u'\U0001F4C1',
u':film_frames:': u'\U0001F39E',
u':film_projector:': u'\U0001F4FD',
u':fire:': u'\U0001F525',
u':fire_engine:': u'\U0001F692',
u':sparkler:': u'\U0001F387',
u':fireworks:': u'\U0001F386',
u':first_quarter_moon:': u'\U0001F313',
u':first_quarter_moon_with_face:': u'\U0001F31B',
u':fish:': u'\U0001F41F',
u':fish_cake:': u'\U0001F365',
u':fishing_pole_and_fish:': u'\U0001F3A3',
u':facepunch:': u'\U0001F44A',
u':punch:': u'\U0001F44A',
u':flag_for_Afghanistan:': u'\U0001F1E6\U0001F1EB',
u':flag_for_Albania:': u'\U0001F1E6\U0001F1F1',
u':flag_for_Algeria:': u'\U0001F1E9\U0001F1FF',
u':flag_for_American_Samoa:': u'\U0001F1E6\U0001F1F8',
u':flag_for_Andorra:': u'\U0001F1E6\U0001F1E9',
u':flag_for_Angola:': u'\U0001F1E6\U0001F1F4',
u':flag_for_Anguilla:': u'\U0001F1E6\U0001F1EE',
u':flag_for_Antarctica:': u'\U0001F1E6\U0001F1F6',
u':flag_for_Antigua_&_Barbuda:': u'\U0001F1E6\U0001F1EC',
u':flag_for_Argentina:': u'\U0001F1E6\U0001F1F7',
u':flag_for_Armenia:': u'\U0001F1E6\U0001F1F2',
u':flag_for_Aruba:': u'\U0001F1E6\U0001F1FC',
u':flag_for_Ascension_Island:': u'\U0001F1E6\U0001F1E8',
u':flag_for_Australia:': u'\U0001F1E6\U0001F1FA',
u':flag_for_Austria:': u'\U0001F1E6\U0001F1F9',
u':flag_for_Azerbaijan:': u'\U0001F1E6\U0001F1FF',
u':flag_for_Bahamas:': u'\U0001F1E7\U0001F1F8',
u':flag_for_Bahrain:': u'\U0001F1E7\U0001F1ED',
u':flag_for_Bangladesh:': u'\U0001F1E7\U0001F1E9',
u':flag_for_Barbados:': u'\U0001F1E7\U0001F1E7',
u':flag_for_Belarus:': u'\U0001F1E7\U0001F1FE',
u':flag_for_Belgium:': u'\U0001F1E7\U0001F1EA',
u':flag_for_Belize:': u'\U0001F1E7\U0001F1FF',
u':flag_for_Benin:': u'\U0001F1E7\U0001F1EF',
u':flag_for_Bermuda:': u'\U0001F1E7\U0001F1F2',
u':flag_for_Bhutan:': u'\U0001F1E7\U0001F1F9',
u':flag_for_Bolivia:': u'\U0001F1E7\U0001F1F4',
u':flag_for_Bosnia_&_Herzegovina:': u'\U0001F1E7\U0001F1E6',
u':flag_for_Botswana:': u'\U0001F1E7\U0001F1FC',
u':flag_for_Bouvet_Island:': u'\U0001F1E7\U0001F1FB',
u':flag_for_Brazil:': u'\U0001F1E7\U0001F1F7',
u':flag_for_British_Indian_Ocean_Territory:': u'\U0001F1EE\U0001F1F4',
u':flag_for_British_Virgin_Islands:': u'\U0001F1FB\U0001F1EC',
u':flag_for_Brunei:': u'\U0001F1E7\U0001F1F3',
u':flag_for_Bulgaria:': u'\U0001F1E7\U0001F1EC',
u':flag_for_Burkina_Faso:': u'\U0001F1E7\U0001F1EB',
u':flag_for_Burundi:': u'\U0001F1E7\U0001F1EE',
u':flag_for_Cambodia:': u'\U0001F1F0\U0001F1ED',
u':flag_for_Cameroon:': u'\U0001F1E8\U0001F1F2',
u':flag_for_Canada:': u'\U0001F1E8\U0001F1E6',
u':flag_for_Canary_Islands:': u'\U0001F1EE\U0001F1E8',
u':flag_for_Cape_Verde:': u'\U0001F1E8\U0001F1FB',
u':flag_for_Caribbean_Netherlands:': u'\U0001F1E7\U0001F1F6',
u':flag_for_Cayman_Islands:': u'\U0001F1F0\U0001F1FE',
u':flag_for_Central_African_Republic:': u'\U0001F1E8\U0001F1EB',
u':flag_for_Ceuta_&_Melilla:': u'\U0001F1EA\U0001F1E6',
u':flag_for_Chad:': u'\U0001F1F9\U0001F1E9',
u':flag_for_Chile:': u'\U0001F1E8\U0001F1F1',
u':flag_for_China:': u'\U0001F1E8\U0001F1F3',
u':flag_for_Christmas_Island:': u'\U0001F1E8\U0001F1FD',
u':flag_for_Clipperton_Island:': u'\U0001F1E8\U0001F1F5',
u':flag_for_Cocos__Islands:': u'\U0001F1E8\U0001F1E8',
u':flag_for_Colombia:': u'\U0001F1E8\U0001F1F4',
u':flag_for_Comoros:': u'\U0001F1F0\U0001F1F2',
u':flag_for_Congo____Brazzaville:': u'\U0001F1E8\U0001F1EC',
u':flag_for_Congo____Kinshasa:': u'\U0001F1E8\U0001F1E9',
u':flag_for_Cook_Islands:': u'\U0001F1E8\U0001F1F0',
u':flag_for_Costa_Rica:': u'\U0001F1E8\U0001F1F7',
u':flag_for_Croatia:': u'\U0001F1ED\U0001F1F7',
u':flag_for_Cuba:': u'\U0001F1E8\U0001F1FA',
u':flag_for_Curaçao:': u'\U0001F1E8\U0001F1FC',
u':flag_for_Cyprus:': u'\U0001F1E8\U0001F1FE',
u':flag_for_Czech_Republic:': u'\U0001F1E8\U0001F1FF',
u':flag_for_Côte_d’Ivoire:': u'\U0001F1E8\U0001F1EE',
u':flag_for_Denmark:': u'\U0001F1E9\U0001F1F0',
u':flag_for_Diego_Garcia:': u'\U0001F1E9\U0001F1EC',
u':flag_for_Djibouti:': u'\U0001F1E9\U0001F1EF',
u':flag_for_Dominica:': u'\U0001F1E9\U0001F1F2',
u':flag_for_Dominican_Republic:': u'\U0001F1E9\U0001F1F4',
u':flag_for_Ecuador:': u'\U0001F1EA\U0001F1E8',
u':flag_for_Egypt:': u'\U0001F1EA\U0001F1EC',
u':flag_for_El_Salvador:': u'\U0001F1F8\U0001F1FB',
u':flag_for_Equatorial_Guinea:': u'\U0001F1EC\U0001F1F6',
u':flag_for_Eritrea:': u'\U0001F1EA\U0001F1F7',
u':flag_for_Estonia:': u'\U0001F1EA\U0001F1EA',
u':flag_for_Ethiopia:': u'\U0001F1EA\U0001F1F9',
u':flag_for_European_Union:': u'\U0001F1EA\U0001F1FA',
u':flag_for_Falkland_Islands:': u'\U0001F1EB\U0001F1F0',
u':flag_for_Faroe_Islands:': u'\U0001F1EB\U0001F1F4',
u':flag_for_Fiji:': u'\U0001F1EB\U0001F1EF',
u':flag_for_Finland:': u'\U0001F1EB\U0001F1EE',
u':flag_for_France:': u'\U0001F1EB\U0001F1F7',
u':flag_for_French_Guiana:': u'\U0001F1EC\U0001F1EB',
u':flag_for_French_Polynesia:': u'\U0001F1F5\U0001F1EB',
u':flag_for_French_Southern_Territories:': u'\U0001F1F9\U0001F1EB',
u':flag_for_Gabon:': u'\U0001F1EC\U0001F1E6',
u':flag_for_Gambia:': u'\U0001F1EC\U0001F1F2',
u':flag_for_Georgia:': u'\U0001F1EC\U0001F1EA',
u':flag_for_Germany:': u'\U0001F1E9\U0001F1EA',
u':flag_for_Ghana:': u'\U0001F1EC\U0001F1ED',
u':flag_for_Gibraltar:': u'\U0001F1EC\U0001F1EE',
u':flag_for_Greece:': u'\U0001F1EC\U0001F1F7',
u':flag_for_Greenland:': u'\U0001F1EC\U0001F1F1',
u':flag_for_Grenada:': u'\U0001F1EC\U0001F1E9',
u':flag_for_Guadeloupe:': u'\U0001F1EC\U0001F1F5',
u':flag_for_Guam:': u'\U0001F1EC\U0001F1FA',
u':flag_for_Guatemala:': u'\U0001F1EC\U0001F1F9',
u':flag_for_Guernsey:': u'\U0001F1EC\U0001F1EC',
u':flag_for_Guinea:': u'\U0001F1EC\U0001F1F3',
u':flag_for_Guinea__Bissau:': u'\U0001F1EC\U0001F1FC',
u':flag_for_Guyana:': u'\U0001F1EC\U0001F1FE',
u':flag_for_Haiti:': u'\U0001F1ED\U0001F1F9',
u':flag_for_Heard_&_McDonald_Islands:': u'\U0001F1ED\U0001F1F2',
u':flag_for_Honduras:': u'\U0001F1ED\U0001F1F3',
u':flag_for_Hong_Kong:': u'\U0001F1ED\U0001F1F0',
u':flag_for_Hungary:': u'\U0001F1ED\U0001F1FA',
u':flag_for_Iceland:': u'\U0001F1EE\U0001F1F8',
u':flag_for_India:': u'\U0001F1EE\U0001F1F3',
u':flag_for_Indonesia:': u'\U0001F1EE\U0001F1E9',
u':flag_for_Iran:': u'\U0001F1EE\U0001F1F7',
u':flag_for_Iraq:': u'\U0001F1EE\U0001F1F6',
u':flag_for_Ireland:': u'\U0001F1EE\U0001F1EA',
u':flag_for_Isle_of_Man:': u'\U0001F1EE\U0001F1F2',
u':flag_for_Israel:': u'\U0001F1EE\U0001F1F1',
u':flag_for_Italy:': u'\U0001F1EE\U0001F1F9',
u':flag_for_Jamaica:': u'\U0001F1EF\U0001F1F2',
u':flag_for_Japan:': u'\U0001F1EF\U0001F1F5',
u':flag_for_Jersey:': u'\U0001F1EF\U0001F1EA',
u':flag_for_Jordan:': u'\U0001F1EF\U0001F1F4',
u':flag_for_Kazakhstan:': u'\U0001F1F0\U0001F1FF',
u':flag_for_Kenya:': u'\U0001F1F0\U0001F1EA',
u':flag_for_Kiribati:': u'\U0001F1F0\U0001F1EE',
u':flag_for_Kosovo:': u'\U0001F1FD\U0001F1F0',
u':flag_for_Kuwait:': u'\U0001F1F0\U0001F1FC',
u':flag_for_Kyrgyzstan:': u'\U0001F1F0\U0001F1EC',
u':flag_for_Laos:': u'\U0001F1F1\U0001F1E6',
u':flag_for_Latvia:': u'\U0001F1F1\U0001F1FB',
u':flag_for_Lebanon:': u'\U0001F1F1\U0001F1E7',
u':flag_for_Lesotho:': u'\U0001F1F1\U0001F1F8',
u':flag_for_Liberia:': u'\U0001F1F1\U0001F1F7',
u':flag_for_Libya:': u'\U0001F1F1\U0001F1FE',
u':flag_for_Liechtenstein:': u'\U0001F1F1\U0001F1EE',
u':flag_for_Lithuania:': u'\U0001F1F1\U0001F1F9',
u':flag_for_Luxembourg:': u'\U0001F1F1\U0001F1FA',
u':flag_for_Macau:': u'\U0001F1F2\U0001F1F4',
u':flag_for_Macedonia:': u'\U0001F1F2\U0001F1F0',
u':flag_for_Madagascar:': u'\U0001F1F2\U0001F1EC',
u':flag_for_Malawi:': u'\U0001F1F2\U0001F1FC',
u':flag_for_Malaysia:': u'\U0001F1F2\U0001F1FE',
u':flag_for_Maldives:': u'\U0001F1F2\U0001F1FB',
u':flag_for_Mali:': u'\U0001F1F2\U0001F1F1',
u':flag_for_Malta:': u'\U0001F1F2\U0001F1F9',
u':flag_for_Marshall_Islands:': u'\U0001F1F2\U0001F1ED',
u':flag_for_Martinique:': u'\U0001F1F2\U0001F1F6',
u':flag_for_Mauritania:': u'\U0001F1F2\U0001F1F7',
u':flag_for_Mauritius:': u'\U0001F1F2\U0001F1FA',
u':flag_for_Mayotte:': u'\U0001F1FE\U0001F1F9',
u':flag_for_Mexico:': u'\U0001F1F2\U0001F1FD',
u':flag_for_Micronesia:': u'\U0001F1EB\U0001F1F2',
u':flag_for_Moldova:': u'\U0001F1F2\U0001F1E9',
u':flag_for_Monaco:': u'\U0001F1F2\U0001F1E8',
u':flag_for_Mongolia:': u'\U0001F1F2\U0001F1F3',
u':flag_for_Montenegro:': u'\U0001F1F2\U0001F1EA',
u':flag_for_Montserrat:': u'\U0001F1F2\U0001F1F8',
u':flag_for_Morocco:': u'\U0001F1F2\U0001F1E6',
u':flag_for_Mozambique:': u'\U0001F1F2\U0001F1FF',
u':flag_for_Myanmar:': u'\U0001F1F2\U0001F1F2',
u':flag_for_Namibia:': u'\U0001F1F3\U0001F1E6',
u':flag_for_Nauru:': u'\U0001F1F3\U0001F1F7',
u':flag_for_Nepal:': u'\U0001F1F3\U0001F1F5',
u':flag_for_Netherlands:': u'\U0001F1F3\U0001F1F1',
u':flag_for_New_Caledonia:': u'\U0001F1F3\U0001F1E8',
u':flag_for_New_Zealand:': u'\U0001F1F3\U0001F1FF',
u':flag_for_Nicaragua:': u'\U0001F1F3\U0001F1EE',
u':flag_for_Niger:': u'\U0001F1F3\U0001F1EA',
u':flag_for_Nigeria:': u'\U0001F1F3\U0001F1EC',
u':flag_for_Niue:': u'\U0001F1F3\U0001F1FA',
u':flag_for_Norfolk_Island:': u'\U0001F1F3\U0001F1EB',
u':flag_for_North_Korea:': u'\U0001F1F0\U0001F1F5',
u':flag_for_Northern_Mariana_Islands:': u'\U0001F1F2\U0001F1F5',
u':flag_for_Norway:': u'\U0001F1F3\U0001F1F4',
u':flag_for_Oman:': u'\U0001F1F4\U0001F1F2',
u':flag_for_Pakistan:': u'\U0001F1F5\U0001F1F0',
u':flag_for_Palau:': u'\U0001F1F5\U0001F1FC',
u':flag_for_Palestinian_Territories:': u'\U0001F1F5\U0001F1F8',
u':flag_for_Panama:': u'\U0001F1F5\U0001F1E6',
u':flag_for_Papua_New_Guinea:': u'\U0001F1F5\U0001F1EC',
u':flag_for_Paraguay:': u'\U0001F1F5\U0001F1FE',
u':flag_for_Peru:': u'\U0001F1F5\U0001F1EA',
u':flag_for_Philippines:': u'\U0001F1F5\U0001F1ED',
u':flag_for_Pitcairn_Islands:': u'\U0001F1F5\U0001F1F3',
u':flag_for_Poland:': u'\U0001F1F5\U0001F1F1',
u':flag_for_Portugal:': u'\U0001F1F5\U0001F1F9',
u':flag_for_Puerto_Rico:': u'\U0001F1F5\U0001F1F7',
u':flag_for_Qatar:': u'\U0001F1F6\U0001F1E6',
u':flag_for_Romania:': u'\U0001F1F7\U0001F1F4',
u':flag_for_Russia:': u'\U0001F1F7\U0001F1FA',
u':flag_for_Rwanda:': u'\U0001F1F7\U0001F1FC',
u':flag_for_Réunion:': u'\U0001F1F7\U0001F1EA',
u':flag_for_Samoa:': u'\U0001F1FC\U0001F1F8',
u':flag_for_San_Marino:': u'\U0001F1F8\U0001F1F2',
u':flag_for_Saudi_Arabia:': u'\U0001F1F8\U0001F1E6',
u':flag_for_Senegal:': u'\U0001F1F8\U0001F1F3',
u':flag_for_Serbia:': u'\U0001F1F7\U0001F1F8',
u':flag_for_Seychelles:': u'\U0001F1F8\U0001F1E8',
u':flag_for_Sierra_Leone:': u'\U0001F1F8\U0001F1F1',
u':flag_for_Singapore:': u'\U0001F1F8\U0001F1EC',
u':flag_for_Sint_Maarten:': u'\U0001F1F8\U0001F1FD',
u':flag_for_Slovakia:': u'\U0001F1F8\U0001F1F0',
u':flag_for_Slovenia:': u'\U0001F1F8\U0001F1EE',
u':flag_for_Solomon_Islands:': u'\U0001F1F8\U0001F1E7',
u':flag_for_Somalia:': u'\U0001F1F8\U0001F1F4',
u':flag_for_South_Africa:': u'\U0001F1FF\U0001F1E6',
u':flag_for_South_Georgia_&_South_Sandwich_Islands:': u'\U0001F1EC\U0001F1F8',
u':flag_for_South_Korea:': u'\U0001F1F0\U0001F1F7',
u':flag_for_South_Sudan:': u'\U0001F1F8\U0001F1F8',
u':flag_for_Spain:': u'\U0001F1EA\U0001F1F8',
u':flag_for_Sri_Lanka:': u'\U0001F1F1\U0001F1F0',
u':flag_for_St._Barthélemy:': u'\U0001F1E7\U0001F1F1',
u':flag_for_St._Helena:': u'\U0001F1F8\U0001F1ED',
u':flag_for_St._Kitts_&_Nevis:': u'\U0001F1F0\U0001F1F3',
u':flag_for_St._Lucia:': u'\U0001F1F1\U0001F1E8',
u':flag_for_St._Martin:': u'\U0001F1F2\U0001F1EB',
u':flag_for_St._Pierre_&_Miquelon:': u'\U0001F1F5\U0001F1F2',
u':flag_for_St._Vincent_&_Grenadines:': u'\U0001F1FB\U0001F1E8',
u':flag_for_Sudan:': u'\U0001F1F8\U0001F1E9',
u':flag_for_Suriname:': u'\U0001F1F8\U0001F1F7',
u':flag_for_Svalbard_&_Jan_Mayen:': u'\U0001F1F8\U0001F1EF',
u':flag_for_Swaziland:': u'\U0001F1F8\U0001F1FF',
u':flag_for_Sweden:': u'\U0001F1F8\U0001F1EA',
u':flag_for_Switzerland:': u'\U0001F1E8\U0001F1ED',
u':flag_for_Syria:': u'\U0001F1F8\U0001F1FE',
u':flag_for_São_Tomé_&_Príncipe:': u'\U0001F1F8\U0001F1F9',
u':flag_for_Taiwan:': u'\U0001F1F9\U0001F1FC',
u':flag_for_Tajikistan:': u'\U0001F1F9\U0001F1EF',
u':flag_for_Tanzania:': u'\U0001F1F9\U0001F1FF',
u':flag_for_Thailand:': u'\U0001F1F9\U0001F1ED',
u':flag_for_Timor__Leste:': u'\U0001F1F9\U0001F1F1',
u':flag_for_Togo:': u'\U0001F1F9\U0001F1EC',
u':flag_for_Tokelau:': u'\U0001F1F9\U0001F1F0',
u':flag_for_Tonga:': u'\U0001F1F9\U0001F1F4',
u':flag_for_Trinidad_&_Tobago:': u'\U0001F1F9\U0001F1F9',
u':flag_for_Tristan_da_Cunha:': u'\U0001F1F9\U0001F1E6',
u':flag_for_Tunisia:': u'\U0001F1F9\U0001F1F3',
u':flag_for_Turkey:': u'\U0001F1F9\U0001F1F7',
u':flag_for_Turkmenistan:': u'\U0001F1F9\U0001F1F2',
u':flag_for_Turks_&_Caicos_Islands:': u'\U0001F1F9\U0001F1E8',
u':flag_for_Tuvalu:': u'\U0001F1F9\U0001F1FB',
u':flag_for_U.S._Outlying_Islands:': u'\U0001F1FA\U0001F1F2',
u':flag_for_U.S._Virgin_Islands:': u'\U0001F1FB\U0001F1EE',
u':flag_for_Uganda:': u'\U0001F1FA\U0001F1EC',
u':flag_for_Ukraine:': u'\U0001F1FA\U0001F1E6',
u':flag_for_United_Arab_Emirates:': u'\U0001F1E6\U0001F1EA',
u':flag_for_United_Kingdom:': u'\U0001F1EC\U0001F1E7',
u':flag_for_United_States:': u'\U0001F1FA\U0001F1F8',
u':flag_for_Uruguay:': u'\U0001F1FA\U0001F1FE',
u':flag_for_Uzbekistan:': u'\U0001F1FA\U0001F1FF',
u':flag_for_Vanuatu:': u'\U0001F1FB\U0001F1FA',
u':flag_for_Vatican_City:': u'\U0001F1FB\U0001F1E6',
u':flag_for_Venezuela:': u'\U0001F1FB\U0001F1EA',
u':flag_for_Vietnam:': u'\U0001F1FB\U0001F1F3',
u':flag_for_Wallis_&_Futuna:': u'\U0001F1FC\U0001F1EB',
u':flag_for_Western_Sahara:': u'\U0001F1EA\U0001F1ED',
u':flag_for_Yemen:': u'\U0001F1FE\U0001F1EA',
u':flag_for_Zambia:': u'\U0001F1FF\U0001F1F2',
u':flag_for_Zimbabwe:': u'\U0001F1FF\U0001F1FC',
u':flag_for_Åland_Islands:': u'\U0001F1E6\U0001F1FD',
u':golf:': u'\U000026F3',
u':fleur__de__lis:': u'\U0000269C',
u':muscle:': u'\U0001F4AA',
u':floppy_disk:': u'\U0001F4BE',
u':flower_playing_cards:': u'\U0001F3B4',
u':flushed:': u'\U0001F633',
u':fog:': u'\U0001F32B',
u':foggy:': u'\U0001F301',
u':footprints:': u'\U0001F463',
u':fork_and_knife:': u'\U0001F374',
u':fork_and_knife_with_plate:': u'\U0001F37D',
u':fountain:': u'\U000026F2',
u':four_leaf_clover:': u'\U0001F340',
u':frame_with_picture:': u'\U0001F5BC',
u':fries:': u'\U0001F35F',
u':fried_shrimp:': u'\U0001F364',
u':frog:': u'\U0001F438',
u':hatched_chick:': u'\U0001F425',
u':frowning:': u'\U0001F626',
u':fuelpump:': u'\U000026FD',
u':full_moon:': u'\U0001F315',
u':full_moon_with_face:': u'\U0001F31D',
u':funeral_urn:': u'\U000026B1',
u':game_die:': u'\U0001F3B2',
u':gear:': u'\U00002699',
u':gem:': u'\U0001F48E',
u':gemini:': u'\U0000264A',
u':ghost:': u'\U0001F47B',
u':girl:': u'\U0001F467',
u':globe_with_meridians:': u'\U0001F310',
u':star2:': u'\U0001F31F',
u':goat:': u'\U0001F410',
u':golfer:': u'\U0001F3CC',
u':mortar_board:': u'\U0001F393',
u':grapes:': u'\U0001F347',
u':green_apple:': u'\U0001F34F',
u':green_book:': u'\U0001F4D7',
u':green_heart:': u'\U0001F49A',
u':grimacing:': u'\U0001F62C',
u':smile_cat:': u'\U0001F638',
u':grinning:': u'\U0001F600',
u':grin:': u'\U0001F601',
u':heartpulse:': u'\U0001F497',
u':guardsman:': u'\U0001F482',
u':guitar:': u'\U0001F3B8',
u':haircut:': u'\U0001F487',
u':hamburger:': u'\U0001F354',
u':hammer:': u'\U0001F528',
u':hammer_and_pick:': u'\U00002692',
u':hammer_and_wrench:': u'\U0001F6E0',
u':hamster:': u'\U0001F439',
u':handbag:': u'\U0001F45C',
u':raising_hand:': u'\U0001F64B',
u':hatching_chick:': u'\U0001F423',
u':headphones:': u'\U0001F3A7',
u':hear_no_evil:': u'\U0001F649',
u':heart_decoration:': u'\U0001F49F',
u':cupid:': u'\U0001F498',
u':gift_heart:': u'\U0001F49D',
u':heart:': u'\U00002764',
u':heavy_check_mark:': u'\U00002714',
u':heavy_division_sign:': u'\U00002797',
u':heavy_dollar_sign:': u'\U0001F4B2',
u':exclamation:': u'\U00002757',
u':heavy_exclamation_mark:': u'\U00002757',
u':heavy_heart_exclamation_mark_ornament:': u'\U00002763',
u':o:': u'\U00002B55',
u':heavy_minus_sign:': u'\U00002796',
u':heavy_multiplication_x:': u'\U00002716',
u':heavy_plus_sign:': u'\U00002795',
u':helicopter:': u'\U0001F681',
u':helm_symbol:': u'\U00002388',
u':helmet_with_white_cross:': u'\U000026D1',
u':herb:': u'\U0001F33F',
u':hibiscus:': u'\U0001F33A',
u':high_heel:': u'\U0001F460',
u':bullettrain_side:': u'\U0001F684',
u':bullettrain_front:': u'\U0001F685',
u':high_brightness:': u'\U0001F506',
u':zap:': u'\U000026A1',
u':hocho:': u'\U0001F52A',
u':knife:': u'\U0001F52A',
u':hole:': u'\U0001F573',
u':honey_pot:': u'\U0001F36F',
u':bee:': u'\U0001F41D',
u':traffic_light:': u'\U0001F6A5',
u':racehorse:': u'\U0001F40E',
u':horse:': u'\U0001F434',
u':horse_racing:': u'\U0001F3C7',
u':hospital:': u'\U0001F3E5',
u':coffee:': u'\U00002615',
u':hot_dog:': u'\U0001F32D',
u':hot_pepper:': u'\U0001F336',
u':hotsprings:': u'\U00002668',
u':hotel:': u'\U0001F3E8',
u':hourglass:': u'\U0000231B',
u':hourglass_flowing_sand:': u'\U000023F3',
u':house:': u'\U0001F3E0',
u':house_buildings:': u'\U0001F3D8',
u':house_with_garden:': u'\U0001F3E1',
u':hugging_face:': u'\U0001F917',
u':100:': u'\U0001F4AF',
u':hushed:': u'\U0001F62F',
u':ice_cream:': u'\U0001F368',
u':ice_hockey_stick_and_puck:': u'\U0001F3D2',
u':ice_skate:': u'\U000026F8',
u':imp:': u'\U0001F47F',
u':inbox_tray:': u'\U0001F4E5',
u':incoming_envelope:': u'\U0001F4E8',
u':information_desk_person:': u'\U0001F481',
u':information_source:': u'\U00002139',
u':capital_abcd:': u'\U0001F520',
u':abc:': u'\U0001F524',
u':abcd:': u'\U0001F521',
u':1234:': u'\U0001F522',
u':symbols:': u'\U0001F523',
u':izakaya_lantern:': u'\U0001F3EE',
u':lantern:': u'\U0001F3EE',
u':jack_o_lantern:': u'\U0001F383',
u':japanese_castle:': u'\U0001F3EF',
u':dolls:': u'\U0001F38E',
u':japanese_goblin:': u'\U0001F47A',
u':japanese_ogre:': u'\U0001F479',
u':post_office:': u'\U0001F3E3',
u':beginner:': u'\U0001F530',
u':jeans:': u'\U0001F456',
u':joystick:': u'\U0001F579',
u':kaaba:': u'\U0001F54B',
u':key:': u'\U0001F511',
u':keyboard:': u'\U00002328',
u':zero:': u'\U00000030\U0000FE0F\U000020E3',
u':one:': u'\U00000031\U0000FE0F\U000020E3',
u':ten:': u'\U0001F51F',
u':two:': u'\U00000032\U0000FE0F\U000020E3',
u':three:': u'\U00000033\U0000FE0F\U000020E3',
u':four:': u'\U00000034\U0000FE0F\U000020E3',
u':five:': u'\U00000035\U0000FE0F\U000020E3',
u':six:': u'\U00000036\U0000FE0F\U000020E3',
u':seven:': u'\U00000037\U0000FE0F\U000020E3',
u':eight:': u'\U00000038\U0000FE0F\U000020E3',
u':nine:': u'\U00000039\U0000FE0F\U000020E3',
u':kimono:': u'\U0001F458',
u':couplekiss:': u'\U0001F48F',
u':kiss:': u'\U0001F48B',
u':kissing_cat:': u'\U0001F63D',
u':kissing:': u'\U0001F617',
u':kissing_closed_eyes:': u'\U0001F61A',
u':kissing_smiling_eyes:': u'\U0001F619',
u':koala:': u'\U0001F428',
u':label:': u'\U0001F3F7',
u':beetle:': u'\U0001F41E',
u':large_blue_circle:': u'\U0001F535',
u':large_blue_diamond:': u'\U0001F537',
u':large_orange_diamond:': u'\U0001F536',
u':red_circle:': u'\U0001F534',
u':last_quarter_moon:': u'\U0001F317',
u':last_quarter_moon_with_face:': u'\U0001F31C',
u':latin_cross:': u'\U0000271D',
u':leaves:': u'\U0001F343',
u':ledger:': u'\U0001F4D2',
u':mag:': u'\U0001F50D',
u':left_luggage:': u'\U0001F6C5',
u':left_right_arrow:': u'\U00002194',
u':leftwards_arrow_with_hook:': u'\U000021A9',
u':arrow_left:': u'\U00002B05',
u':lemon:': u'\U0001F34B',
u':leo:': u'\U0000264C',
u':leopard:': u'\U0001F406',
u':level_slider:': u'\U0001F39A',
u':libra:': u'\U0000264E',
u':light_rail:': u'\U0001F688',
u':link:': u'\U0001F517',
u':linked_paperclips:': u'\U0001F587',
u':lion_face:': u'\U0001F981',
u':lipstick:': u'\U0001F484',
u':lock:': u'\U0001F512',
u':lock_with_ink_pen:': u'\U0001F50F',
u':lollipop:': u'\U0001F36D',
u':sob:': u'\U0001F62D',
u':love_hotel:': u'\U0001F3E9',
u':love_letter:': u'\U0001F48C',
u':low_brightness:': u'\U0001F505',
u':lower_left_ballpoint_pen:': u'\U0001F58A',
u':lower_left_crayon:': u'\U0001F58D',
u':lower_left_fountain_pen:': u'\U0001F58B',
u':lower_left_paintbrush:': u'\U0001F58C',
u':mahjong:': u'\U0001F004',
u':man:': u'\U0001F468',
u':couple:': u'\U0001F46B',
u':man_in_business_suit_levitating:': u'\U0001F574',
u':man_with_gua_pi_mao:': u'\U0001F472',
u':man_with_turban:': u'\U0001F473',
u':mans_shoe:': u'\U0001F45E',
u':shoe:': u'\U0001F45E',
u':mantelpiece_clock:': u'\U0001F570',
u':maple_leaf:': u'\U0001F341',
u':meat_on_bone:': u'\U0001F356',
u':black_circle:': u'\U000026AB',
u':white_circle:': u'\U000026AA',
u':melon:': u'\U0001F348',
u':memo:': u'\U0001F4DD',
u':pencil:': u'\U0001F4DD',
u':menorah_with_nine_branches:': u'\U0001F54E',
u':mens:': u'\U0001F6B9',
u':metro:': u'\U0001F687',
u':microphone:': u'\U0001F3A4',
u':microscope:': u'\U0001F52C',
u':military_medal:': u'\U0001F396',
u':milky_way:': u'\U0001F30C',
u':minibus:': u'\U0001F690',
u':minidisc:': u'\U0001F4BD',
u':iphone:': u'\U0001F4F1',
u':mobile_phone_off:': u'\U0001F4F4',
u':calling:': u'\U0001F4F2',
u':money__mouth_face:': u'\U0001F911',
u':moneybag:': u'\U0001F4B0',
u':money_with_wings:': u'\U0001F4B8',
u':monkey:': u'\U0001F412',
u':monkey_face:': u'\U0001F435',
u':monorail:': u'\U0001F69D',
u':rice_scene:': u'\U0001F391',
u':mosque:': u'\U0001F54C',
u':motor_boat:': u'\U0001F6E5',
u':motorway:': u'\U0001F6E3',
u':mount_fuji:': u'\U0001F5FB',
u':mountain:': u'\U000026F0',
u':mountain_bicyclist:': u'\U0001F6B5',
u':mountain_cableway:': u'\U0001F6A0',
u':mountain_railway:': u'\U0001F69E',
u':mouse2:': u'\U0001F401',
u':mouse:': u'\U0001F42D',
u':lips:': u'\U0001F444',
u':movie_camera:': u'\U0001F3A5',
u':moyai:': u'\U0001F5FF',
u':notes:': u'\U0001F3B6',
u':mushroom:': u'\U0001F344',
u':musical_keyboard:': u'\U0001F3B9',
u':musical_note:': u'\U0001F3B5',
u':musical_score:': u'\U0001F3BC',
u':nail_care:': u'\U0001F485',
u':name_badge:': u'\U0001F4DB',
u':national_park:': u'\U0001F3DE',
u':necktie:': u'\U0001F454',
u':ab:': u'\U0001F18E',
u':negative_squared_cross_mark:': u'\U0000274E',
u':a:': u'\U0001F170',
u':b:': u'\U0001F171',
u':o2:': u'\U0001F17E',
u':parking:': u'\U0001F17F',
u':nerd_face:': u'\U0001F913',
u':neutral_face:': u'\U0001F610',
u':new_moon:': u'\U0001F311',
u':honeybee:': u'\U0001F41D',
u':new_moon_with_face:': u'\U0001F31A',
u':newspaper:': u'\U0001F4F0',
u':night_with_stars:': u'\U0001F303',
u':no_bicycles:': u'\U0001F6B3',
u':no_entry:': u'\U000026D4',
u':no_entry_sign:': u'\U0001F6AB',
u':no_mobile_phones:': u'\U0001F4F5',
u':underage:': u'\U0001F51E',
u':no_pedestrians:': u'\U0001F6B7',
u':no_smoking:': u'\U0001F6AD',
u':non__potable_water:': u'\U0001F6B1',
u':arrow_upper_right:': u'\U00002197',
u':arrow_upper_left:': u'\U00002196',
u':nose:': u'\U0001F443',
u':notebook:': u'\U0001F4D3',
u':notebook_with_decorative_cover:': u'\U0001F4D4',
u':nut_and_bolt:': u'\U0001F529',
u':octopus:': u'\U0001F419',
u':oden:': u'\U0001F362',
u':office:': u'\U0001F3E2',
u':oil_drum:': u'\U0001F6E2',
u':ok_hand:': u'\U0001F44C',
u':old_key:': u'\U0001F5DD',
u':older_man:': u'\U0001F474',
u':older_woman:': u'\U0001F475',
u':om_symbol:': u'\U0001F549',
u':on:': u'\U0001F51B',
u':oncoming_automobile:': u'\U0001F698',
u':oncoming_bus:': u'\U0001F68D',
u':oncoming_police_car:': u'\U0001F694',
u':oncoming_taxi:': u'\U0001F696',
u':book:': u'\U0001F4D6',
u':open_book:': u'\U0001F4D6',
u':open_file_folder:': u'\U0001F4C2',
u':open_hands:': u'\U0001F450',
u':unlock:': u'\U0001F513',
u':mailbox_with_no_mail:': u'\U0001F4ED',
u':mailbox_with_mail:': u'\U0001F4EC',
u':ophiuchus:': u'\U000026CE',
u':cd:': u'\U0001F4BF',
u':orange_book:': u'\U0001F4D9',
u':orthodox_cross:': u'\U00002626',
u':outbox_tray:': u'\U0001F4E4',
u':ox:': u'\U0001F402',
u':package:': u'\U0001F4E6',
u':page_facing_up:': u'\U0001F4C4',
u':page_with_curl:': u'\U0001F4C3',
u':pager:': u'\U0001F4DF',
u':palm_tree:': u'\U0001F334',
u':panda_face:': u'\U0001F43C',
u':paperclip:': u'\U0001F4CE',
u':part_alternation_mark:': u'\U0000303D',
u':tada:': u'\U0001F389',
u':passenger_ship:': u'\U0001F6F3',
u':passport_control:': u'\U0001F6C2',
u':feet:': u'\U0001F43E',
u':paw_prints:': u'\U0001F43E',
u':peace_symbol:': u'\U0000262E',
u':peach:': u'\U0001F351',
u':pear:': u'\U0001F350',
u':walking:': u'\U0001F6B6',
u':pencil2:': u'\U0000270F',
u':penguin:': u'\U0001F427',
u':pensive:': u'\U0001F614',
u':performing_arts:': u'\U0001F3AD',
u':persevere:': u'\U0001F623',
u':bow:': u'\U0001F647',
u':person_frowning:': u'\U0001F64D',
u':raised_hands:': u'\U0001F64C',
u':person_with_ball:': u'\U000026F9',
u':person_with_blond_hair:': u'\U0001F471',
u':pray:': u'\U0001F64F',
u':person_with_pouting_face:': u'\U0001F64E',
u':computer:': u'\U0001F4BB',
u':pick:': u'\U000026CF',
u':pig2:': u'\U0001F416',
u':pig:': u'\U0001F437',
u':pig_nose:': u'\U0001F43D',
u':hankey:': u'\U0001F4A9',
u':poop:': u'\U0001F4A9',
u':shit:': u'\U0001F4A9',
u':pill:': u'\U0001F48A',
u':bamboo:': u'\U0001F38D',
u':pineapple:': u'\U0001F34D',
u':pisces:': u'\U00002653',
u':gun:': u'\U0001F52B',
u':place_of_worship:': u'\U0001F6D0',
u':black_joker:': u'\U0001F0CF',
u':police_car:': u'\U0001F693',
u':rotating_light:': u'\U0001F6A8',
u':cop:': u'\U0001F46E',
u':poodle:': u'\U0001F429',
u':popcorn:': u'\U0001F37F',
u':postal_horn:': u'\U0001F4EF',
u':postbox:': u'\U0001F4EE',
u':stew:': u'\U0001F372',
u':potable_water:': u'\U0001F6B0',
u':pouch:': u'\U0001F45D',
u':poultry_leg:': u'\U0001F357',
u':pouting_cat:': u'\U0001F63E',
u':rage:': u'\U0001F621',
u':prayer_beads:': u'\U0001F4FF',
u':princess:': u'\U0001F478',
u':printer:': u'\U0001F5A8',
u':loudspeaker:': u'\U0001F4E2',
u':purple_heart:': u'\U0001F49C',
u':purse:': u'\U0001F45B',
u':pushpin:': u'\U0001F4CC',
u':put_litter_in_its_place:': u'\U0001F6AE',
u':rabbit2:': u'\U0001F407',
u':rabbit:': u'\U0001F430',
u':racing_car:': u'\U0001F3CE',
u':racing_motorcycle:': u'\U0001F3CD',
u':radio:': u'\U0001F4FB',
u':radio_button:': u'\U0001F518',
u':radioactive_sign:': u'\U00002622',
u':railway_car:': u'\U0001F683',
u':railway_track:': u'\U0001F6E4',
u':rainbow:': u'\U0001F308',
u':fist:': u'\U0000270A',
u':hand:': u'\U0000270B',
u':raised_hand:': u'\U0000270B',
u':raised_hand_with_fingers_splayed:': u'\U0001F590',
u':raised_hand_with_part_between_middle_and_ring_fingers:': u'\U0001F596',
u':ram:': u'\U0001F40F',
u':rat:': u'\U0001F400',
u':blue_car:': u'\U0001F699',
u':apple:': u'\U0001F34E',
u':registered:': u'\U000000AE',
u':relieved:': u'\U0001F60C',
u':reminder_ribbon:': u'\U0001F397',
u':restroom:': u'\U0001F6BB',
u':reversed_hand_with_middle_finger_extended:': u'\U0001F595',
u':revolving_hearts:': u'\U0001F49E',
u':ribbon:': u'\U0001F380',
u':rice_ball:': u'\U0001F359',
u':rice_cracker:': u'\U0001F358',
u':mag_right:': u'\U0001F50E',
u':right_anger_bubble:': u'\U0001F5EF',
u':arrow_right_hook:': u'\U000021AA',
u':ring:': u'\U0001F48D',
u':sweet_potato:': u'\U0001F360',
u':robot_face:': u'\U0001F916',
u':robot:': u'\U0001F916',
u':rocket:': u'\U0001F680',
u':rolled__up_newspaper:': u'\U0001F5DE',
u':roller_coaster:': u'\U0001F3A2',
u':rooster:': u'\U0001F413',
u':rose:': u'\U0001F339',
u':rosette:': u'\U0001F3F5',
u':round_pushpin:': u'\U0001F4CD',
u':rowboat:': u'\U0001F6A3',
u':rugby_football:': u'\U0001F3C9',
u':runner:': u'\U0001F3C3',
u':running:': u'\U0001F3C3',
u':running_shirt_with_sash:': u'\U0001F3BD',
u':sagittarius:': u'\U00002650',
u':boat:': u'\U000026F5',
u':sailboat:': u'\U000026F5',
u':sake:': u'\U0001F376',
u':satellite:': u'\U0001F4E1',
u':saxophone:': u'\U0001F3B7',
u':scales:': u'\U00002696',
u':school:': u'\U0001F3EB',
u':school_satchel:': u'\U0001F392',
u':scorpion:': u'\U0001F982',
u':scorpius:': u'\U0000264F',
u':scroll:': u'\U0001F4DC',
u':seat:': u'\U0001F4BA',
u':see_no_evil:': u'\U0001F648',
u':seedling:': u'\U0001F331',
u':shamrock:': u'\U00002618',
u':shaved_ice:': u'\U0001F367',
u':sheep:': u'\U0001F411',
u':shield:': u'\U0001F6E1',
u':shinto_shrine:': u'\U000026E9',
u':ship:': u'\U0001F6A2',
u':stars:': u'\U0001F320',
u':shopping_bags:': u'\U0001F6CD',
u':cake:': u'\U0001F370',
u':shower:': u'\U0001F6BF',
u':sign_of_the_horns:': u'\U0001F918',
u':japan:': u'\U0001F5FE',
u':six_pointed_star:': u'\U0001F52F',
u':ski:': u'\U0001F3BF',
u':skier:': u'\U000026F7',
u':skull:': u'\U0001F480',
u':skull_and_crossbones:': u'\U00002620',
u':sleeping_accommodation:': u'\U0001F6CC',
u':sleeping:': u'\U0001F634',
u':zzz:': u'\U0001F4A4',
u':sleepy:': u'\U0001F62A',
u':sleuth_or_spy:': u'\U0001F575',
u':pizza:': u'\U0001F355',
u':slightly_frowning_face:': u'\U0001F641',
u':slightly_smiling_face:': u'\U0001F642',
u':slot_machine:': u'\U0001F3B0',
u':small_airplane:': u'\U0001F6E9',
u':small_blue_diamond:': u'\U0001F539',
u':small_orange_diamond:': u'\U0001F538',
u':heart_eyes_cat:': u'\U0001F63B',
u':smiley_cat:': u'\U0001F63A',
u':innocent:': u'\U0001F607',
u':heart_eyes:': u'\U0001F60D',
u':smiling_imp:': u'\U0001F608',
u':smiley:': u'\U0001F603',
u':sweat_smile:': u'\U0001F605',
u':smile:': u'\U0001F604',
u':laughing:': u'\U0001F606',
u':satisfied:': u'\U0001F606',
u':blush:': u'\U0001F60A',
u':sunglasses:': u'\U0001F60E',
u':smirk:': u'\U0001F60F',
u':smoking:': u'\U0001F6AC',
u':snail:': u'\U0001F40C',
u':snake:': u'\U0001F40D',
u':snow_capped_mountain:': u'\U0001F3D4',
u':snowboarder:': u'\U0001F3C2',
u':snowflake:': u'\U00002744',
u':snowman:': u'\U00002603',
u':soccer:': u'\U000026BD',
u':icecream:': u'\U0001F366',
u':soon:': u'\U0001F51C',
u':arrow_lower_right:': u'\U00002198',
u':arrow_lower_left:': u'\U00002199',
u':spaghetti:': u'\U0001F35D',
u':sparkle:': u'\U00002747',
u':sparkles:': u'\U00002728',
u':sparkling_heart:': u'\U0001F496',
u':speak_no_evil:': u'\U0001F64A',
u':speaker:': u'\U0001F508',
u':mute:': u'\U0001F507',
u':sound:': u'\U0001F509',
u':loud_sound:': u'\U0001F50A',
u':speaking_head_in_silhouette:': u'\U0001F5E3',
u':speech_balloon:': u'\U0001F4AC',
u':speedboat:': u'\U0001F6A4',
u':spider:': u'\U0001F577',
u':spider_web:': u'\U0001F578',
u':spiral_calendar_pad:': u'\U0001F5D3',
u':spiral_note_pad:': u'\U0001F5D2',
u':shell:': u'\U0001F41A',
u':sweat_drops:': u'\U0001F4A6',
u':sports_medal:': u'\U0001F3C5',
u':whale:': u'\U0001F433',
u':u5272:': u'\U0001F239',
u':u5408:': u'\U0001F234',
u':u55b6:': u'\U0001F23A',
u':u6307:': u'\U0001F22F',
u':u6708:': u'\U0001F237',
u':u6709:': u'\U0001F236',
u':u6e80:': u'\U0001F235',
u':u7121:': u'\U0001F21A',
u':u7533:': u'\U0001F238',
u':u7981:': u'\U0001F232',
u':u7a7a:': u'\U0001F233',
u':cl:': u'\U0001F191',
u':cool:': u'\U0001F192',
u':free:': u'\U0001F193',
u':id:': u'\U0001F194',
u':koko:': u'\U0001F201',
u':sa:': u'\U0001F202',
u':new:': u'\U0001F195',
u':ng:': u'\U0001F196',
u':ok:': u'\U0001F197',
u':sos:': u'\U0001F198',
u':up:': u'\U0001F199',
u':vs:': u'\U0001F19A',
u':stadium:': u'\U0001F3DF',
u':star_and_crescent:': u'\U0000262A',
u':star_of_david:': u'\U00002721',
u':station:': u'\U0001F689',
u':statue_of_liberty:': u'\U0001F5FD',
u':steam_locomotive:': u'\U0001F682',
u':ramen:': u'\U0001F35C',
u':stopwatch:': u'\U000023F1',
u':straight_ruler:': u'\U0001F4CF',
u':strawberry:': u'\U0001F353',
u':studio_microphone:': u'\U0001F399',
u':partly_sunny:': u'\U000026C5',
u':sun_with_face:': u'\U0001F31E',
u':sunflower:': u'\U0001F33B',
u':sunrise:': u'\U0001F305',
u':sunrise_over_mountains:': u'\U0001F304',
u':city_sunrise:': u'\U0001F307',
u':surfer:': u'\U0001F3C4',
u':sushi:': u'\U0001F363',
u':suspension_railway:': u'\U0001F69F',
u':swimmer:': u'\U0001F3CA',
u':synagogue:': u'\U0001F54D',
u':syringe:': u'\U0001F489',
u':shirt:': u'\U0001F455',
u':tshirt:': u'\U0001F455',
u':table_tennis_paddle_and_ball:': u'\U0001F3D3',
u':taco:': u'\U0001F32E',
u':tanabata_tree:': u'\U0001F38B',
u':tangerine:': u'\U0001F34A',
u':taurus:': u'\U00002649',
u':taxi:': u'\U0001F695',
u':tea:': u'\U0001F375',
u':calendar:': u'\U0001F4C6',
u':telephone_receiver:': u'\U0001F4DE',
u':telescope:': u'\U0001F52D',
u':tv:': u'\U0001F4FA',
u':tennis:': u'\U0001F3BE',
u':tent:': u'\U000026FA',
u':thermometer:': u'\U0001F321',
u':thinking_face:': u'\U0001F914',
u':thought_balloon:': u'\U0001F4AD',
u':three_button_mouse:': u'\U0001F5B1',
u':+1:': u'\U0001F44D',
u':thumbsup:': u'\U0001F44D',
u':__1:': u'\U0001F44E',
u':-1:': u'\U0001F44E',
u':thumbsdown:': u'\U0001F44E',
u':thunder_cloud_and_rain:': u'\U000026C8',
u':ticket:': u'\U0001F3AB',
u':tiger2:': u'\U0001F405',
u':tiger:': u'\U0001F42F',
u':timer_clock:': u'\U000023F2',
u':tired_face:': u'\U0001F62B',
u':toilet:': u'\U0001F6BD',
u':tokyo_tower:': u'\U0001F5FC',
u':tomato:': u'\U0001F345',
u':tongue:': u'\U0001F445',
u':tophat:': u'\U0001F3A9',
u':top:': u'\U0001F51D',
u':trackball:': u'\U0001F5B2',
u':tractor:': u'\U0001F69C',
u':tm:': u'\U00002122',
u':train2:': u'\U0001F686',
u':tram:': u'\U0001F68A',
u':train:': u'\U0001F68B',
u':triangular_flag_on_post:': u'\U0001F6A9',
u':triangular_ruler:': u'\U0001F4D0',
u':trident:': u'\U0001F531',
u':trolleybus:': u'\U0001F68E',
u':trophy:': u'\U0001F3C6',
u':tropical_drink:': u'\U0001F379',
u':tropical_fish:': u'\U0001F420',
u':trumpet:': u'\U0001F3BA',
u':tulip:': u'\U0001F337',
u':turkey:': u'\U0001F983',
u':turtle:': u'\U0001F422',
u':twisted_rightwards_arrows:': u'\U0001F500',
u':two_hearts:': u'\U0001F495',
u':two_men_holding_hands:': u'\U0001F46C',
u':two_women_holding_hands:': u'\U0001F46D',
u':umbrella:': u'\U00002602',
u':umbrella_on_ground:': u'\U000026F1',
u':unamused:': u'\U0001F612',
u':unicorn_face:': u'\U0001F984',
u':small_red_triangle:': u'\U0001F53A',
u':arrow_up_small:': u'\U0001F53C',
u':arrow_up_down:': u'\U00002195',
u':upside__down_face:': u'\U0001F643',
u':arrow_up:': u'\U00002B06',
u':vertical_traffic_light:': u'\U0001F6A6',
u':vibration_mode:': u'\U0001F4F3',
u':v:': u'\U0000270C',
u':video_camera:': u'\U0001F4F9',
u':video_game:': u'\U0001F3AE',
u':vhs:': u'\U0001F4FC',
u':violin:': u'\U0001F3BB',
u':virgo:': u'\U0000264D',
u':volcano:': u'\U0001F30B',
u':volleyball:': u'\U0001F3D0',
u':waning_crescent_moon:': u'\U0001F318',
u':waning_gibbous_moon:': u'\U0001F316',
u':warning:': u'\U000026A0',
u':wastebasket:': u'\U0001F5D1',
u':watch:': u'\U0000231A',
u':water_buffalo:': u'\U0001F403',
u':wc:': u'\U0001F6BE',
u':ocean:': u'\U0001F30A',
u':watermelon:': u'\U0001F349',
u':waving_black_flag:': u'\U0001F3F4',
u':wave:': u'\U0001F44B',
u':waving_white_flag:': u'\U0001F3F3',
u':wavy_dash:': u'\U00003030',
u':waxing_crescent_moon:': u'\U0001F312',
u':moon:': u'\U0001F314',
u':waxing_gibbous_moon:': u'\U0001F314',
u':scream_cat:': u'\U0001F640',
u':weary:': u'\U0001F629',
u':wedding:': u'\U0001F492',
u':weight_lifter:': u'\U0001F3CB',
u':whale2:': u'\U0001F40B',
u':wheel_of_dharma:': u'\U00002638',
u':wheelchair:': u'\U0000267F',
u':point_down:': u'\U0001F447',
u':grey_exclamation:': u'\U00002755',
u':white_flower:': u'\U0001F4AE',
u':white_frowning_face:': u'\U00002639',
u':white_check_mark:': u'\U00002705',
u':white_large_square:': u'\U00002B1C',
u':point_left:': u'\U0001F448',
u':white_medium_small_square:': u'\U000025FD',
u':white_medium_square:': u'\U000025FB',
u':star:': u'\U00002B50',
u':grey_question:': u'\U00002754',
u':point_right:': u'\U0001F449',
u':white_small_square:': u'\U000025AB',
u':relaxed:': u'\U0000263A',
u':white_square_button:': u'\U0001F533',
u':white_sun_behind_cloud:': u'\U0001F325',
u':white_sun_behind_cloud_with_rain:': u'\U0001F326',
u':white_sun_with_small_cloud:': u'\U0001F324',
u':point_up_2:': u'\U0001F446',
u':point_up:': u'\U0000261D',
u':wind_blowing_face:': u'\U0001F32C',
u':wind_chime:': u'\U0001F390',
u':wine_glass:': u'\U0001F377',
u':wink:': u'\U0001F609',
u':wolf:': u'\U0001F43A',
u':woman:': u'\U0001F469',
u':dancers:': u'\U0001F46F',
u':boot:': u'\U0001F462',
u':womans_clothes:': u'\U0001F45A',
u':womans_hat:': u'\U0001F452',
u':sandal:': u'\U0001F461',
u':womens:': u'\U0001F6BA',
u':world_map:': u'\U0001F5FA',
u':worried:': u'\U0001F61F',
u':gift:': u'\U0001F381',
u':wrench:': u'\U0001F527',
u':writing_hand:': u'\U0000270D',
u':yellow_heart:': u'\U0001F49B',
u':yin_yang:': u'\U0000262F',
u':zipper__mouth_face:': u'\U0001F910',
u':regional_indicator_a:': u'\U0001F1E6',
u':regional_indicator_b:': u'\U0001F1E7',
u':regional_indicator_c:': u'\U0001F1E8',
u':regional_indicator_d:': u'\U0001F1E9',
u':regional_indicator_e:': u'\U0001F1EA',
u':regional_indicator_f:': u'\U0001F1EB',
u':regional_indicator_g:': u'\U0001F1EC',
u':regional_indicator_h:': u'\U0001F1ED',
u':regional_indicator_i:': u'\U0001F1EE',
u':regional_indicator_j:': u'\U0001F1EF',
u':regional_indicator_k:': u'\U0001F1F0',
u':regional_indicator_l:': u'\U0001F1F1',
u':regional_indicator_m:': u'\U0001F1F2',
u':regional_indicator_n:': u'\U0001F1F3',
u':regional_indicator_o:': u'\U0001F1F4',
u':regional_indicator_p:': u'\U0001F1F5',
u':regional_indicator_q:': u'\U0001F1F6',
u':regional_indicator_r:': u'\U0001F1F7',
u':regional_indicator_s:': u'\U0001F1F8',
u':regional_indicator_t:': u'\U0001F1F9',
u':regional_indicator_u:': u'\U0001F1FA',
u':regional_indicator_v:': u'\U0001F1FB',
u':regional_indicator_w:': u'\U0001F1FC',
u':regional_indicator_x:': u'\U0001F1FD',
u':regional_indicator_y:': u'\U0001F1FE',
u':regional_indicator_z:': u'\U0001F1FF',
})
UNICODE_EMOJI = {v: k for k, v in EMOJI_UNICODE.items()}
UNICODE_EMOJI_ALIAS = {v: k for k, v in EMOJI_ALIAS_UNICODE.items()}
EMOJI_UNICODE_SET = set(EMOJI_UNICODE.values())
| janesolomon/twitter_search | twitter_search/unicode_codes.py | Python | mit | 306,254 | [
"FLEUR",
"Octopus"
] | 7ed3d005c905f8e4258ffe4269f5809848ab12170254c6a32a0c019dbaee47ea |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
UA_TYPE_MAPPING = {
'desktop':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) '
'AppleWebKit/535.19 (KHTML, like Gecko) '
'Chrome/18.0.1025.151 Safari/535.19',
'mobile':
'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) '
'AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile '
'Safari/535.19',
'tablet':
'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus 7 Build/IMM76B) '
'AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 '
'Safari/535.19',
}
def GetChromeUserAgentArgumentFromType(user_agent_type):
"""Returns a chrome user agent based on a user agent type.
This is derived from:
https://developers.google.com/chrome/mobile/docs/user-agent
"""
if user_agent_type:
return ['--user-agent="%s"' % UA_TYPE_MAPPING[user_agent_type]]
return []
| leighpauls/k2cro4 | tools/telemetry/telemetry/user_agent.py | Python | bsd-3-clause | 1,023 | [
"Galaxy"
] | f171f32ce641e46be86834fa418b882510c0d0584139c5123ce77cb1400e51e1 |
# -*- coding: utf-8 -*-
#
# test_connect_array_fixed_outdegree.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests of connection with rule fixed_outdegree
and parameter arrays in syn_spec
"""
import unittest
import nest
import numpy
@nest.ll_api.check_stack
class ConnectArrayFixedOutdegreeTestCase(unittest.TestCase):
"""Tests of connections with fixed outdegree and parameter arrays"""
def test_Connect_Array_Fixed_Outdegree(self):
"""Tests of connections with fixed outdegree and parameter arrays"""
N = 20 # number of neurons in each subnet
K = 5 # number of connections per neuron
############################################
# test with connection rule fixed_outdegree
############################################
nest.ResetKernel()
net1 = nest.Create('iaf_psc_alpha', N) # creates source subnet
net2 = nest.Create('iaf_psc_alpha', N) # creates target subnet
Warr = [[y*K+x for x in range(K)] for y in range(N)] # weight array
Darr = [[y*K+x + 1 for x in range(K)] for y in range(N)] # delay array
# synapses and connection dictionaries
syn_dict = {'model': 'static_synapse', 'weight': Warr, 'delay': Darr}
conn_dict = {'rule': 'fixed_outdegree', 'outdegree': K}
# connects source to target subnet
nest.Connect(net1, net2, conn_spec=conn_dict, syn_spec=syn_dict)
for i in range(N): # loop on all neurons of source subnet
# gets all connections from the source neuron
conns = nest.GetConnections(source=net1[i:i+1])
Warr1 = [] # creates empty weight array
# loop on synapses that connect from source neuron
for j in range(len(conns)):
c = conns[j:j+1]
w = nest.GetStatus(c, 'weight')[0] # gets synaptic weight
d = nest.GetStatus(c, 'delay')[0] # gets synaptic delay
self.assertTrue(d - w == 1) # checks that delay = weight + 1
Warr1.append(w) # appends w to Warr1
self.assertTrue(len(Warr1) == K) # checks the size of Warr1
Warr1.sort() # sorts the elements of Warr1
# get row of original weight array, sort it
# and compare it with Warr1
Warr2 = sorted(Warr[i])
for k in range(K):
self.assertTrue(Warr1[k]-Warr2[k] == 0.0)
def suite():
suite = unittest.makeSuite(ConnectArrayFixedOutdegreeTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| hakonsbm/nest-simulator | pynest/nest/tests/test_connect_array_fixed_outdegree.py | Python | gpl-2.0 | 3,336 | [
"NEURON"
] | f97d311440df882d3d68030b886946530606fcb49fe686505076d40d1b096e57 |
# -*- coding: utf-8 -*-
import moose
print( 'Using moose from %s. VERSION=%s' % (moose.__file__, moose.__version__) )
import numpy as np
import chan_proto
import param_chan
t_stop = 10
dend_diameter = 2.2627398e-6
dend_length = 1.131369936e-6
Cm = 4.021231698e-12
Rm = 1865100032
Em = -0.07100000232
Vm_0 = -0.0705
dt = 50e-6
spines_no = 0
difshell_no = 2
difshell_name = "Ca_shell"
Ca_basal = 50e-6
Ca_initial = Ca_basal*200
dca = 200.0e-12
difbuff_no = 1
difbuff_name = "Buff"
btotal = 80.0e-3
kf = 0.028e6
kb = 19.6
d = 66e-12
inject = 0.1e-9
gbar = 1
#MMpump
km = 0.3e-3
kcat = 85e-22
pumps = 1
def linoid(x, param):
den = (param[2] + np.exp((V + param[3]) / param[4]))
nom = (param[0] + param[1] * V)
return nom / den
def add_difshell(comp, i, shell_thickness, shell_radius):
new_name = comp.path.split('[')[0] + '/' + \
comp.name + '/' + difshell_name + str(i)
dif = moose.DifShell(new_name)
#dif.C = Ca_initial
dif.Ceq = Ca_basal
dif.D = dca
dif.valence = 2
dif.leak = 0
dif.shapeMode = 0
dif.length = comp.length
dif.diameter = 2 * shell_radius
dif.thickness = shell_thickness
return dif
def add_difbuffer_to_dishell(comp, i, j, shell_thickness, shell_radius):
new_name = comp.path.split('[')[0] + '/' + comp.name + \
'/' + difshell_name + str(i) + '_' + difbuff_name + str(j)
buf = moose.DifBuffer(new_name)
buf.bTot = btotal
buf.shapeMode = 0
buf.length = comp.length
buf.diameter = 2 * shell_radius
buf.thickness = shell_thickness
buf.kf = kf
buf.kb = kb
buf.D = d
return buf
def add_difshells_and_buffers(comp,difshell_no,difbuff_no):
if difshell_no < 1:
return [], []
difshell = []
shell_thickness = comp.diameter / difshell_no / 2.
difbuffer = []
for i in range(difshell_no):
shell_radius = comp.diameter / 2 - i * shell_thickness
dif = add_difshell(comp, i, shell_thickness, shell_radius)
difshell.append(dif)
if i > 0:
moose.connect(
difshell[i - 1], "outerDifSourceOut", difshell[i], "fluxFromOut")
moose.connect(difshell[i], "innerDifSourceOut",
difshell[i - 1], "fluxFromIn")
if difbuff_no > 0:
difbuffer.append([])
for j in range(difbuff_no):
buf = add_difbuffer_to_dishell(
comp, i, j, shell_thickness, shell_radius)
difbuffer[i].append(buf)
moose.connect(
difshell[i], "concentrationOut", buf, "concentration")
moose.connect(buf, "reactionOut", difshell[i], "reaction")
if i > 0:
moose.connect(
difbuffer[i - 1][j], "outerDifSourceOut", difbuffer[i][j], "fluxFromOut")
moose.connect(difbuffer[i][j], "innerDifSourceOut", difbuffer[
i - 1][j], "fluxFromIn")
return difshell, difbuffer
def addOneChan(chanpath, gbar,comp):
SA = np.pi * comp.length * comp.diameter
proto = moose.element('/library/' + chanpath)
chan = moose.copy(proto, comp, chanpath)
chan.Gbar = gbar * SA
# If we are using GHK AND it is a calcium channel, connect it to GHK
moose.connect(comp, 'VmOut', chan, 'Vm')
moose.connect(chan, "channelOut", comp, "handleChannel")
return chan
if __name__ == '__main__':
lib = moose.Neutral('/library')
for tick in range(0, 7):
moose.setClock(tick, dt)
moose.setClock(8, 0.005) # set output clock
model = moose.Neutral('/model')
dend = moose.Compartment('/model/dend')
pulse = moose.PulseGen('/model/pulse')
data = moose.Neutral('/data')
vmtab = moose.Table('/data/dend_Vm')
gktab = moose.Table('/data/CaT_Gk')
iktab = moose.Table('/data/CaT_Ik')
dend.Cm = Cm
dend.Rm = Rm
dend.Em = Em
dend.initVm = Vm_0
dend.diameter = dend_diameter
dend.length = dend_length
pulse.delay[0] = 8.
pulse.width[0] = 500e-3
pulse.level[0] = inject
pulse.delay[1] = 1e9
chan = chan_proto.chan_proto('/library/CaL12',param_chan.Cal)
m = moose.connect(pulse, 'output', dend, 'injectMsg')
moose.connect(vmtab, 'requestOut', dend, 'getVm')
chan = addOneChan('CaL12', gbar, dend)
moose.connect(gktab, 'requestOut', chan, 'getGk')
moose.connect(iktab, 'requestOut', chan, 'getIk')
diftab = []
buftab = []
difs, difb = add_difshells_and_buffers(dend,difshell_no,difbuff_no)
if pumps:
pump = moose.MMPump('/model/dend/pump')
pump.Vmax = kcat
pump.Kd = km
moose.connect(pump, "PumpOut", difs[0], "mmPump")
if difs:
moose.connect(chan, "IkOut", difs[0], "influx")
moose.connect(difs[0], 'concentrationOut', chan, 'concen')
for i, dif in enumerate(difs):
res_dif = moose.Table('/data/' + difshell_name + str(i))
diftab.append(res_dif)
moose.connect(diftab[i], 'requestOut', dif, 'getC')
if (difbuff_no):
buftab.append([])
for j, buf in enumerate(difb[i]):
res_buf = moose.Table(
'/data/' + difshell_name + str(i) + '_' + difbuff_name + str(j))
buftab[i].append(res_buf)
moose.connect(buftab[i][j], 'requestOut', buf, 'getBBound')
moose.reinit()
if not gbar:
for i, dif in enumerate(difs):
if i == 0:
dif.C = Ca_initial
else:
dif.C = 0
for j, dbuf in enumerate(difb[i]):
dbuf.bFree = dbuf.bTot
moose.start(t_stop)
t = np.linspace(0, t_stop, len(vmtab.vector))
fname = 'moose_results_difshell_no_' + str(difshell_no) + '_difbuffer_no_' + str(
difbuff_no) + '_pump_' + str(pumps) + '_gbar_' + str(gbar) + '.txt'
header = 'time Vm Ik Gk'
number = 4 + difshell_no * (difbuff_no + 1)
res = np.zeros((len(t), number))
res[:, 0] = t
res[:, 1] = vmtab.vector
res[:, 2] = iktab.vector
res[:, 3] = gktab.vector
for i in range(difshell_no):
header += ' difshell_' + str(i)
res[:, 4 + i * (difbuff_no + 1)] = diftab[i].vector
if (difbuff_no):
for j, buf in enumerate(buftab[i]):
res[:, 4 + i * (difbuff_no + 1) + j + 1] = buf.vector
header += ' difshell_' + str(i) + '_difbuff_' + str(j)
assert np.isclose(res.mean(), 0.60599, atol=1e-5), \
'Expected 0.60599, got %g' % np.mean(res)
assert np.isclose(np.std(res), 1.9505, atol=1e-3), \
'Expected 1.9505 got %g' % np.std(res)
print( 'All done' )
| BhallaLab/moose | moose-core/tests/python/test_difshells.py | Python | gpl-3.0 | 6,706 | [
"MOOSE"
] | a24d7fbfb7280e339a233e0396e00ec31a04d72e6c1d5894cf86f2f178b0803c |
#!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation is at http://beammaster.rtfd.org."""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='beammaster',
version='0.1.0',
description='Measures the beam waist of a gaussian laserbeam with a webcam',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='Nicolas Zuber',
author_email='nico.zuber@web.de',
url='https://github.com/tripiti/beammaster',
packages=[
'beammaster',
],
package_dir={'beammaster': 'beammaster'},
include_package_data=True,
install_requires=[
],
license='MIT',
zip_safe=False,
keywords='beammaster',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| tripiti/beammaster | setup.py | Python | mit | 1,486 | [
"Gaussian"
] | be8b7172c4529d3b8ad8d110cc8c3beb18d9b2f0b0957f3930658d1fd2c86343 |
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numpy as np
from menpo.base import MenpoMissingDependencyError
class Menpo3dMissingError(MenpoMissingDependencyError):
r"""
Exception that is thrown when an attempt is made to import a 3D
visualisation method, but 'menpo3d' is not installed.
"""
def __init__(self, actual_missing_import_name):
super(Menpo3dMissingError, self).__init__(actual_missing_import_name)
self.message += (
"\nThis import is required in order to use the " "'menpo3d' package"
)
class Renderer(object):
r"""
Abstract class for rendering visualizations. Framework specific
implementations of these classes are made in order to separate
implementation cleanly from the rest of the code.
It is assumed that the renderers follow some form of stateful pattern for
rendering to Figures. Therefore, the major interface for rendering involves
providing a `figure_id` or a `bool` about whether a new figure should be
used. If neither are provided then the default state of the rendering engine
is assumed to be maintained.
Providing both a ``figure_id`` and ``new_figure == True`` is not a valid
state.
Parameters
----------
figure_id : `object`
A figure id. Could be any valid object that identifies a figure in a
given framework (`str`, `int`, `float`, etc.).
new_figure : `bool`
Whether the rendering engine should create a new figure.
Raises
------
ValueError
It is not valid to provide a figure id AND request a new figure to
be rendered on.
"""
def __init__(self, figure_id, new_figure):
if figure_id is not None and new_figure:
raise ValueError(
"Conflicting arguments. figure_id cannot be "
"specified if the new_figure flag is True"
)
self.figure_id = figure_id
self.new_figure = new_figure
self.figure = self.get_figure()
def render(self, **kwargs):
r"""
Abstract method to be overridden by the renderer. This will implement
the actual rendering code for a given object class.
Parameters
----------
kwargs : `dict`
Passed through to specific rendering engine.
Returns
-------
viewer : :map:`Renderer`
Pointer to `self`.
"""
pass
def get_figure(self):
r"""
Abstract method for getting the correct figure to render on. Should
also set the correct `figure_id` for the figure.
Returns
-------
figure : `object`
The figure object that the renderer will render on.
"""
pass
def save_figure(self, **kwargs):
r"""
Abstract method for saving the figure of the current `figure_id` to
file. It will implement the actual saving code for a given object class.
Parameters
----------
kwargs : `dict`
Options to be set when saving the figure to file.
"""
pass
def clear_figure(self):
r"""
Abstract method for clearing the current figure.
"""
pass
def force_draw(self):
r"""
Abstract method for forcing the current figure to render.
"""
pass
class viewwrapper(object):
r"""
This class abuses the Python descriptor protocol in order to dynamically
change the view method at runtime. Although this is more obviously achieved
through inheritance, the view methods practically amount to syntactic sugar
and so we want to maintain a single view method per class. We do not want
to add the mental overhead of implementing different 2D and 3D PointCloud
classes for example, since, outside of viewing, their implementations would
be identical.
Also note that we could have separated out viewing entirely and made the
check there, but the view method is an important paradigm in menpo that
we want to maintain.
Therefore, this function cleverly (and obscurely) returns the correct
view method for the dimensionality of the given object.
"""
def __init__(self, wrapped_func):
fname = wrapped_func.__name__
self._2d_fname = "_{}_2d".format(fname)
self._3d_fname = "_{}_3d".format(fname)
def __get__(self, instance, instancetype):
if instance.n_dims == 2:
return getattr(instance, self._2d_fname)
elif instance.n_dims == 3:
return getattr(instance, self._3d_fname)
else:
def raise_not_supported(*args, **kwargs):
r"""
Viewing of objects with greater than 3 dimensions is not
currently possible.
"""
raise ValueError(
"Viewing of objects with greater than 3 "
"dimensions is not currently possible."
)
return raise_not_supported
class Viewable(object):
r"""
Abstract interface for objects that can visualize themselves. This assumes
that the class has dimensionality as the view method checks the ``n_dims``
property to wire up the correct view method.
"""
@viewwrapper
def view(self):
r"""
Abstract method for viewing. See the :map:`viewwrapper` documentation
for an explanation of how the `view` method works.
"""
pass
def _view_2d(self, **kwargs):
raise NotImplementedError("2D Viewing is not supported.")
def _view_3d(self, **kwargs):
raise NotImplementedError("3D Viewing is not supported.")
class LandmarkableViewable(object):
r"""
Mixin for :map:`Landmarkable` and :map:`Viewable` objects. Provides a
single helper method for viewing Landmarks and `self` on the same figure.
"""
@viewwrapper
def view_landmarks(self, **kwargs):
pass
def _view_landmarks_2d(self, **kwargs):
raise NotImplementedError("2D Landmark Viewing is not supported.")
def _view_landmarks_3d(self, **kwargs):
raise NotImplementedError("3D Landmark Viewing is not supported.")
from menpo.visualize.viewmatplotlib import (
MatplotlibImageViewer2d,
MatplotlibImageSubplotsViewer2d,
MatplotlibLandmarkViewer2d,
MatplotlibAlignmentViewer2d,
MatplotlibGraphPlotter,
MatplotlibMultiImageViewer2d,
MatplotlibMultiImageSubplotsViewer2d,
MatplotlibPointGraphViewer2d,
)
# Default importer types
PointGraphViewer2d = MatplotlibPointGraphViewer2d
LandmarkViewer2d = MatplotlibLandmarkViewer2d
ImageViewer2d = MatplotlibImageViewer2d
ImageSubplotsViewer2d = MatplotlibImageSubplotsViewer2d
AlignmentViewer2d = MatplotlibAlignmentViewer2d
GraphPlotter = MatplotlibGraphPlotter
MultiImageViewer2d = MatplotlibMultiImageViewer2d
MultiImageSubplotsViewer2d = MatplotlibMultiImageSubplotsViewer2d
class ImageViewer(object):
r"""
Base :map:`Image` viewer that abstracts away dimensionality. It can
visualize multiple channels of an image in subplots.
Parameters
----------
figure_id : `object`
A figure id. Could be any valid object that identifies a figure in a
given framework (`str`, `int`, `float`, etc.).
new_figure : `bool`
Whether the rendering engine should create a new figure.
dimensions : {``2``, ``3``} `int`
The number of dimensions in the image.
pixels : ``(N, D)`` `ndarray`
The pixels to render.
channels: `int` or `list` or ``'all'`` or `None`
A specific selection of channels to render. The user can choose either
a single or multiple channels. If ``'all'``, render all channels in
subplot mode. If `None` and image is not greyscale or RGB, render all
channels in subplots. If `None` and image is greyscale or RGB, then do
not plot channels in different subplots.
mask: ``(N, D)`` `ndarray`
A `bool` mask to be applied to the image. All points outside the
mask are set to ``0``.
"""
def __init__(
self, figure_id, new_figure, dimensions, pixels, channels=None, mask=None
):
if len(pixels.shape) == 3 and pixels.shape[0] == 3:
# then probably an RGB image, so ensure the clipped pixels.
from menpo.image import Image
image = Image(pixels, copy=False)
image_clipped = image.clip_pixels()
pixels = image_clipped.pixels
else:
pixels = pixels.copy()
self.figure_id = figure_id
self.new_figure = new_figure
self.dimensions = dimensions
pixels, self.use_subplots = self._parse_channels(channels, pixels)
self.pixels = self._masked_pixels(pixels, mask)
self._flip_image_channels()
def _flip_image_channels(self):
if self.pixels.ndim == 3:
from menpo.image.base import channels_to_back
self.pixels = channels_to_back(self.pixels)
def _parse_channels(self, channels, pixels):
r"""
Parse `channels` parameter. If `channels` is `int` or `list`, keep it as
is. If `channels` is ``'all'``, return a `list` of all the image's
channels. If `channels` is `None`, return the minimum between an
`upper_limit` and the image's number of channels. If image is greyscale
or RGB and `channels` is `None`, then do not plot channels in different
subplots.
Parameters
----------
channels : `int` or `list` or ``'all'`` or `None`
A specific selection of channels to render.
pixels : ``(N, D)`` `ndarray`
The image's pixels to render.
Returns
-------
pixels : ``(N, D)`` `ndarray`
The pixels to be visualized.
use_subplots : `bool`
Whether to visualize using subplots.
"""
# Flag to trigger ImageSubplotsViewer2d or ImageViewer2d
use_subplots = True
n_channels = pixels.shape[0]
if channels is None:
if n_channels == 1:
pixels = pixels[0, ...]
use_subplots = False
elif n_channels == 3:
use_subplots = False
elif channels != "all":
if isinstance(channels, Iterable):
if len(channels) == 1:
pixels = pixels[channels[0], ...]
use_subplots = False
else:
pixels = pixels[channels, ...]
else:
pixels = pixels[channels, ...]
use_subplots = False
return pixels, use_subplots
def _masked_pixels(self, pixels, mask):
r"""
Return the masked pixels using a given `bool` mask. In order to make
sure that the non-masked pixels are visualized in white, their value
is set to the maximum of pixels.
Parameters
----------
pixels : ``(N, D)`` `ndarray`
The image's pixels to render.
mask: ``(N, D)`` `ndarray`
A `bool` mask to be applied to the image. All points outside the
mask are set to the image max. If mask is `None`, then the initial
pixels are returned.
Returns
-------
masked_pixels : ``(N, D)`` `ndarray`
The masked pixels.
"""
if mask is not None:
nanmax = np.nanmax(pixels)
pixels[..., ~mask] = nanmax + (0.01 * nanmax)
return pixels
def render(self, **kwargs):
r"""
Select the correct type of image viewer for the given image
dimensionality.
Parameters
----------
kwargs : `dict`
Passed through to image viewer.
Returns
-------
viewer : :map:`Renderer`
The rendering object.
Raises
------
ValueError
Only 2D images are supported.
"""
if self.dimensions == 2:
if self.use_subplots:
return ImageSubplotsViewer2d(
self.figure_id, self.new_figure, self.pixels
).render(**kwargs)
else:
return ImageViewer2d(
self.figure_id, self.new_figure, self.pixels
).render(**kwargs)
else:
raise ValueError("Only 2D images are currently supported")
def view_image_landmarks(
image,
channels,
masked,
group,
with_labels,
without_labels,
figure_id,
new_figure,
interpolation,
cmap_name,
alpha,
render_lines,
line_colour,
line_style,
line_width,
render_markers,
marker_style,
marker_size,
marker_face_colour,
marker_edge_colour,
marker_edge_width,
render_numbering,
numbers_horizontal_align,
numbers_vertical_align,
numbers_font_name,
numbers_font_size,
numbers_font_style,
numbers_font_weight,
numbers_font_colour,
render_legend,
legend_title,
legend_font_name,
legend_font_style,
legend_font_size,
legend_font_weight,
legend_marker_scale,
legend_location,
legend_bbox_to_anchor,
legend_border_axes_pad,
legend_n_columns,
legend_horizontal_spacing,
legend_vertical_spacing,
legend_border,
legend_border_padding,
legend_shadow,
legend_rounded_corners,
render_axes,
axes_font_name,
axes_font_size,
axes_font_style,
axes_font_weight,
axes_x_limits,
axes_y_limits,
axes_x_ticks,
axes_y_ticks,
figure_size,
):
r"""
This is a helper method that abstracts away the fact that viewing
images and masked images is identical apart from the mask. Therefore,
we do the class check in this method and then proceed identically whether
the image is masked or not.
See the documentation for _view_2d on Image or _view_2d on MaskedImage
for information about the parameters.
"""
import matplotlib.pyplot as plt
if not image.has_landmarks:
raise ValueError(
"Image does not have landmarks attached, unable " "to view landmarks."
)
# Parse axes limits
image_axes_x_limits = None
landmarks_axes_x_limits = axes_x_limits
if axes_x_limits is None:
image_axes_x_limits = landmarks_axes_x_limits = [0, image.width - 1]
image_axes_y_limits = None
landmarks_axes_y_limits = axes_y_limits
if axes_y_limits is None:
image_axes_y_limits = landmarks_axes_y_limits = [0, image.height - 1]
# Render image
from menpo.image import MaskedImage
if isinstance(image, MaskedImage):
self_view = image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
masked=masked,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_x_limits=image_axes_x_limits,
axes_y_limits=image_axes_y_limits,
)
else:
self_view = image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_x_limits=image_axes_x_limits,
axes_y_limits=image_axes_y_limits,
)
# Render landmarks
# correct group label in legend
if group is None and image.landmarks.n_groups == 1:
group = image.landmarks.group_labels[0]
landmark_view = None # initialize viewer object
# useful in order to visualize the legend only for the last axis object
render_legend_tmp = False
for i, ax in enumerate(self_view.axes_list):
# set current axis
plt.sca(ax)
# show legend only for the last axis object
if i == len(self_view.axes_list) - 1:
render_legend_tmp = render_legend
# viewer
landmark_view = image.landmarks[group].view(
with_labels=with_labels,
without_labels=without_labels,
group=group,
figure_id=self_view.figure_id,
new_figure=False,
image_view=True,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend_tmp,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=landmarks_axes_x_limits,
axes_y_limits=landmarks_axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
return landmark_view
class MultipleImageViewer(ImageViewer):
def __init__(
self, figure_id, new_figure, dimensions, pixels_list, channels=None, mask=None
):
super(MultipleImageViewer, self).__init__(
figure_id,
new_figure,
dimensions,
pixels_list[0],
channels=channels,
mask=mask,
)
pixels_list = [self._parse_channels(channels, p)[0] for p in pixels_list]
self.pixels_list = [self._masked_pixels(p, mask) for p in pixels_list]
def render(self, **kwargs):
if self.dimensions == 2:
if self.use_subplots:
MultiImageSubplotsViewer2d(
self.figure_id, self.new_figure, self.pixels_list
).render(**kwargs)
else:
return MultiImageViewer2d(
self.figure_id, self.new_figure, self.pixels_list
).render(**kwargs)
else:
raise ValueError("Only 2D images are currently supported")
def plot_curve(
x_axis,
y_axis,
figure_id=None,
new_figure=True,
legend_entries=None,
title="",
x_label="",
y_label="",
axes_x_limits=0.0,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour="k",
marker_edge_width=1.0,
render_legend=True,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
figure_size=(7, 7),
render_grid=True,
grid_line_style="--",
grid_line_width=1,
):
r"""
Plot a single or multiple curves on the same figure.
Parameters
----------
x_axis : `list` or `array`
The values of the horizontal axis. They are common for all curves.
y_axis : `list` of `lists` or `arrays`
A `list` with `lists` or `arrays` with the values of the vertical axis
for each curve.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
legend_entries : `list of `str` or ``None``, optional
If `list` of `str`, it must have the same length as `errors` `list` and
each `str` will be used to name each curve. If ``None``, the CED curves
will be named as `'Curve %d'`.
title : `str`, optional
The figure's title.
x_label : `str`, optional
The label of the horizontal axis.
y_label : `str`, optional
The label of the vertical axis.
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the graph as a percentage of the curves' width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the graph as a percentage of the curves' height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
render_lines : `bool` or `list` of `bool`, optional
If ``True``, the line will be rendered. If `bool`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`.
line_colour : `colour` or `list` of `colour` or ``None``, optional
The colour of the lines. If not a `list`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`. If ``None``, the
colours will be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
line_style : ``{'-', '--', '-.', ':'}`` or `list` of those, optional
The style of the lines. If not a `list`, this value will be used for all
curves. If `list`, a value must be specified for each curve, thus it must
have the same length as `y_axis`.
line_width : `float` or `list` of `float`, optional
The width of the lines. If `float`, this value will be used for all
curves. If `list`, a value must be specified for each curve, thus it must
have the same length as `y_axis`.
render_markers : `bool` or `list` of `bool`, optional
If ``True``, the markers will be rendered. If `bool`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`.
marker_style : `marker` or `list` of `markers`, optional
The style of the markers. If not a `list`, this value will be used for
all curves. If `list`, a value must be specified for each curve, thus it
must have the same length as `y_axis`.
Example `marker` options ::
{'.', ',', 'o', 'v', '^', '<', '>', '+', 'x', 'D', 'd', 's',
'p', '*', 'h', 'H', '1', '2', '3', '4', '8'}
marker_size : `int` or `list` of `int`, optional
The size of the markers in points. If `int`, this value will be used
for all curves. If `list`, a value must be specified for each curve, thus
it must have the same length as `y_axis`.
marker_face_colour : `colour` or `list` of `colour` or ``None``, optional
The face (filling) colour of the markers. If not a `list`, this value
will be used for all curves. If `list`, a value must be specified for
each curve, thus it must have the same length as `y_axis`. If ``None``,
the colours will be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
marker_edge_colour : `colour` or `list` of `colour` or ``None``, optional
The edge colour of the markers. If not a `list`, this value will be used
for all curves. If `list`, a value must be specified for each curve, thus
it must have the same length as `y_axis`. If ``None``, the colours will
be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
marker_edge_width : `float` or `list` of `float`, optional
The width of the markers' edge. If `float`, this value will be used for
all curves. If `list`, a value must be specified for each curve, thus it
must have the same length as `y_axis`.
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend.
Example options ::
{'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'}
legend_font_style : ``{'normal', 'italic', 'oblique'}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See below, optional
The font weight of the legend.
Example options ::
{'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ===
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ===
legend_bbox_to_anchor : (`float`, `float`), optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See below, optional
The font of the axes.
Example options ::
{'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{'normal', 'italic', 'oblique'}``, optional
The font style of the axes.
axes_font_weight : See below, optional
The font weight of the axes.
Example options ::
{'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'}
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : ``{'-', '--', '-.', ':'}``, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Raises
------
ValueError
legend_entries list has different length than y_axis list
Returns
-------
viewer : :map:`GraphPlotter`
The viewer object.
"""
from menpo.visualize import GraphPlotter
# check y_axis
if not isinstance(y_axis, list):
y_axis = [y_axis]
# check legend_entries
if legend_entries is not None and len(legend_entries) != len(y_axis):
raise ValueError("legend_entries list has different length than y_axis " "list")
# render
return GraphPlotter(
figure_id=figure_id,
new_figure=new_figure,
x_axis=x_axis,
y_axis=y_axis,
title=title,
legend_entries=legend_entries,
x_label=x_label,
y_label=y_label,
x_axis_limits=axes_x_limits,
y_axis_limits=axes_y_limits,
x_axis_ticks=axes_x_ticks,
y_axis_ticks=axes_y_ticks,
).render(
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_legend=render_legend,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
figure_size=figure_size,
render_grid=render_grid,
grid_line_style=grid_line_style,
grid_line_width=grid_line_width,
)
def render_rectangles_around_patches(
centers,
patch_shape,
axes=None,
image_view=True,
line_colour="r",
line_style="-",
line_width=1,
interpolation="none",
):
r"""
Method that renders rectangles of the specified `patch_shape` centered
around all the points of the provided `centers`.
Parameters
----------
centers : :map:`PointCloud`
The centers around which to draw the rectangles.
patch_shape : `tuple` or `ndarray`, optional
The size of the rectangle to render.
axes : `matplotlib.pyplot.axes` object or ``None``, optional
The axes object on which to render.
image_view : `bool`, optional
If ``True`` the rectangles will be viewed as if they are in the image
coordinate system.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
interpolation : See Below, optional
In case a patch-based image is already rendered on the specified axes,
this argument controls how tight the rectangles would be to the patches.
It needs to have the same value as the one used when rendering the
patches image, otherwise there is the danger that the rectangles won't
be exactly on the border of the patches. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# Dictionary with the line styles
line_style_dict = {"-": "solid", "--": "dashed", "-.": "dashdot", ":": "dotted"}
# Get axes object
if axes is None:
axes = plt.gca()
# Need those in order to compute the lower left corner of the rectangle
half_patch_shape = [patch_shape[0] / 2, patch_shape[1] / 2]
# Set the view mode
if image_view:
xi = 1
yi = 0
else:
xi = 0
yi = 1
# Set correct offsets so that the rectangle is tight to the patch
if interpolation == "none":
off_start = 0.5
off_end = 0.0
else:
off_start = 1.0
off_end = 0.5
# Render rectangles
for p in range(centers.shape[0]):
xc = np.intp(centers[p, xi] - half_patch_shape[xi]) - off_start
yc = np.intp(centers[p, yi] - half_patch_shape[yi]) - off_start
axes.add_patch(
Rectangle(
(xc, yc),
patch_shape[xi] + off_end,
patch_shape[yi] + off_end,
fill=False,
edgecolor=line_colour,
linewidth=line_width,
linestyle=line_style_dict[line_style],
)
)
def view_patches(
patches,
patch_centers,
patches_indices=None,
offset_index=None,
figure_id=None,
new_figure=False,
background="white",
render_patches=True,
channels=None,
interpolation="none",
cmap_name=None,
alpha=1.0,
render_patches_bboxes=True,
bboxes_line_colour="r",
bboxes_line_style="-",
bboxes_line_width=1,
render_centers=True,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour=None,
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
r"""
Method that renders the provided `patches` on a canvas. The user can
choose whether to render the patch centers (`render_centers`) as well as
rectangle boundaries around the patches (`render_patches_bboxes`).
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods of the :map:`Image` class. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that are
returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects.
patch_centers : :map:`PointCloud`
The centers around which to visualize the patches.
patches_indices : `int` or `list` of `int` or ``None``, optional
Defines the patches that will be visualized. If ``None``, then all the
patches are selected.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the index
of the second dimension from which to sample. If ``None``, then ``0`` is
used.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
background : ``{'black', 'white'}``, optional
If ``'black'``, then the background is set equal to the minimum value
of `patches`. If ``'white'``, then the background is set equal to the
maximum value of `patches`.
render_patches : `bool`, optional
Flag that determines whether to render the patch values.
channels : `int` or `list` of `int` or ``all`` or ``None``, optional
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_patches_bboxes : `bool`, optional
Flag that determines whether to render the bounding box lines around the
patches.
bboxes_line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
bboxes_line_style : ``{-, --, -., :}``, optional
The style of the lines.
bboxes_line_width : `float`, optional
The width of the lines.
render_centers : `bool`, optional
Flag that determines whether to render the patch centers.
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the shape as a percentage of the shape's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the shape as a percentage of the shape's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
from menpo.image.base import (
_convert_patches_list_to_single_array,
_create_patches_image,
)
# If patches is a list, convert it to an array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(patches, patch_centers.n_points)
# Create patches image
if render_patches:
patches_image = _create_patches_image(
patches,
patch_centers,
patches_indices=patches_indices,
offset_index=offset_index,
background=background,
)
else:
if background == "black":
tmp_patches = np.zeros(
(
patches.shape[0],
patches.shape[1],
3,
patches.shape[3],
patches.shape[4],
)
)
elif background == "white":
tmp_patches = np.ones(
(
patches.shape[0],
patches.shape[1],
3,
patches.shape[3],
patches.shape[4],
)
)
patches_image = _create_patches_image(
tmp_patches,
patch_centers,
patches_indices=patches_indices,
offset_index=offset_index,
background=background,
)
channels = None
# Render patches image
if render_centers:
patch_view = patches_image.view_landmarks(
channels=channels,
group="patch_centers",
figure_id=figure_id,
new_figure=new_figure,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=False,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
else:
patch_view = patches_image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
# Render rectangles around patches
if render_patches_bboxes:
patch_shape = [patches.shape[3], patches.shape[4]]
render_rectangles_around_patches(
patches_image.landmarks["patch_centers"].points,
patch_shape,
image_view=True,
line_colour=bboxes_line_colour,
line_style=bboxes_line_style,
line_width=bboxes_line_width,
interpolation=interpolation,
)
return patch_view
def plot_gaussian_ellipses(
covariances,
means,
n_std=2,
render_colour_bar=True,
colour_bar_label="Normalized Standard Deviation",
colour_map="jet",
figure_id=None,
new_figure=False,
image_view=True,
line_colour="r",
line_style="-",
line_width=1.0,
render_markers=True,
marker_edge_colour="k",
marker_face_colour="k",
marker_edge_width=1.0,
marker_size=5,
marker_style="o",
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
crop_proportion=0.1,
figure_size=(7, 7),
):
r"""
Method that renders the Gaussian ellipses that correspond to a set of
covariance matrices and mean vectors. Naturally, this only works for
2-dimensional random variables.
Parameters
----------
covariances : `list` of ``(2, 2)`` `ndarray`
The covariance matrices that correspond to each ellipse.
means : `list` of ``(2, )`` `ndarray`
The mean vectors that correspond to each ellipse.
n_std : `float`, optional
This defines the size of the ellipses in terms of number of standard
deviations.
render_colour_bar : `bool`, optional
If ``True``, then the ellipses will be coloured based on their
normalized standard deviations and a colour bar will also appear on
the side. If ``False``, then all the ellipses will have the same colour.
colour_bar_label : `str`, optional
The title of the colour bar. It only applies if `render_colour_bar`
is ``True``.
colour_map : `str`, optional
A valid Matplotlib colour map. For more info, please refer to
`matplotlib.cm`.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
image_view : `bool`, optional
If ``True`` the ellipses will be rendered in the image coordinates
system.
line_colour : See Below, optional
The colour of the lines of the ellipses.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines of the ellipses.
line_width : `float`, optional
The width of the lines of the ellipses.
render_markers : `bool`, optional
If ``True``, the centers of the ellipses will be rendered.
marker_style : See Below, optional
The style of the centers of the ellipses. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the centers of the ellipses in points.
marker_face_colour : See Below, optional
The face (filling) colour of the centers of the ellipses.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the centers of the ellipses.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The edge width of the centers of the ellipses.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
crop_proportion : `float`, optional
The proportion to be left around the centers' pointcloud.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.font_manager import FontProperties
from menpo.shape import PointCloud
def eigh_sorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
# get correct line style
if line_style == "-":
line_style = "solid"
elif line_style == "--":
line_style = "dashed"
elif line_style == "-.":
line_style = "dashdot"
elif line_style == ":":
line_style = "dotted"
else:
raise ValueError("line_style must be selected from " "['-', '--', '-.', ':'].")
# create pointcloud
pc = PointCloud(np.array(means))
# compute axes limits
bounds = pc.bounds()
r = pc.range()
x_rr = r[0] * crop_proportion
y_rr = r[1] * crop_proportion
axes_x_limits = [bounds[0][1] - x_rr, bounds[1][1] + x_rr]
axes_y_limits = [bounds[0][0] - y_rr, bounds[1][0] + y_rr]
normalizer = np.sum(r) / 2.0
# compute height, width, theta and std
stds = []
heights = []
widths = []
thetas = []
for cov in covariances:
vals, vecs = eigh_sorted(cov)
width, height = np.sqrt(vals)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
stds.append(np.mean([height, width]) / normalizer)
heights.append(height)
widths.append(width)
thetas.append(theta)
if render_colour_bar:
# set colormap values
cmap = plt.get_cmap(colour_map)
cNorm = colors.Normalize(vmin=np.min(stds), vmax=np.max(stds))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
# visualize pointcloud
if render_colour_bar:
renderer = pc.view(
figure_id=figure_id,
new_figure=new_figure,
image_view=image_view,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
figure_size=figure_size,
render_markers=False,
)
else:
renderer = pc.view(
figure_id=figure_id,
new_figure=new_figure,
image_view=image_view,
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
marker_edge_width=marker_edge_width,
marker_size=marker_size,
marker_style=marker_style,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
figure_size=figure_size,
render_markers=render_markers,
)
# plot ellipses
ax = plt.gca()
for i in range(len(covariances)):
# Width and height are "full" widths, not radius
width = 2 * n_std * widths[i]
height = 2 * n_std * heights[i]
if image_view:
colour = line_colour
if render_colour_bar:
colour = scalarMap.to_rgba(stds[i])
if render_markers:
plt.plot(
means[i][1],
means[i][0],
facecolor=colour,
edgecolor=colour,
linewidth=0,
)
ellip = Ellipse(
xy=means[i][-1::-1],
width=height,
height=width,
angle=thetas[i],
linestyle=line_style,
linewidth=line_width,
edgecolor=colour,
facecolor="none",
)
else:
colour = line_colour
if render_colour_bar:
colour = scalarMap.to_rgba(stds[i])
if render_markers:
plt.plot(
means[i][0],
means[i][1],
facecolor=colour,
edgecolor=colour,
linewidth=0,
)
ellip = Ellipse(
xy=means[i],
width=width,
height=height,
angle=thetas[i],
linestyle=line_style,
linewidth=line_width,
edgecolor=colour,
facecolor="none",
)
ax.add_artist(ellip)
# show colour bar
if render_colour_bar:
scalarMap.set_array(stds)
cb = plt.colorbar(scalarMap, label=colour_bar_label)
# change colour bar's font properties
ax = cb.ax
text = ax.yaxis.label
font = FontProperties(
size=axes_font_size,
weight=axes_font_weight,
style=axes_font_style,
family=axes_font_name,
)
text.set_font_properties(font)
return renderer
| patricksnape/menpo | menpo/visualize/base.py | Python | bsd-3-clause | 57,597 | [
"Gaussian"
] | 442ad4f1c04e661dd209dc74aa78fbdc49b0e9047c42fcabca51658a77856c3f |
from pylab import *
from math import exp, sqrt
from utils import *
# This module contains implementations of gaussian and bilateral kernels,
# as well as generic filtering functions
# The implementations work for d-dimensional grayscale images.
# applies a gaussian filter with standard deviation sigma to image im.
# The kernel_radius parameter sets the shape of the kernel matrix.
# If kernel_radius is not set, it is calculated according to
# the function square_kernel_shape_for_sigma
def gaussian_filter(im, sigma, kernel_radius=None):
if kernel_radius == None:
kernel_radius = square_kernel_shape_for_sigma(sigma, im)
gaussian_kernel_matrix = gaussian_kernel(kernel_radius, sigma)
return filter_image_kernel(im, gaussian_kernel_matrix)
# applies a gaussian filter with domain standard deviation sigmaD and
# range standard deviation sigmaR to image im.
# The kernel_radius parameter sets the shape of the kernel matrix.
# If kernel_radius is not set, it is calculated according to
# the function square_kernel_shape_for_sigma
def bilateral_filter(im, sigmaD, sigmaR, kernel_radius=None):
if kernel_radius == None:
kernel_radius = square_kernel_shape_for_sigma(sigmaD, im)
kernel_function = bilateral_kernel(kernel_radius, sigmaD, sigmaR)
return filter_image_function(im, kernel_function, kernel_radius)
def function_kernel(kernel_radius, fn):
f = zeros(kernel_radius * 2 + 1)
for p in array_iterator(f):
f[p] = fn(kernel_radius, p)
f = f / f.sum()
return f
# Returns a kernel function to use with filter_image_function
# The returned function generates a filtered value for an image at given position
# according to the bilateral filtering formulation
def bilateral_kernel(kernel_radius, sigmaD, sigmaR):
domain_kernel = gaussian_kernel(kernel_radius, sigmaD) #precalculate domain_kernel
f = lambda im, pos: bilateral_kernel_function(kernel_radius, domain_kernel, sigmaR, im, pos)
return f
# This function actually performs bilateral filtering on image im,
# with a domain kernel, a range kernel with sigmaR and size kernel_radius,
# for a position p of the image
def bilateral_kernel_function(kernel_radius, domain_kernel, sigmaR, im, p):
sigmaR_squared = 2 * sigmaR * sigmaR
v = im[p]
neighbourhood_slice = [slice(i - r, i + r + 1) for i, r in itertools.izip(p, kernel_radius)]
neighbourhood = im[neighbourhood_slice]
#distances = np.square(neighbourhood - v) doesn't work
distances = np.square(neighbourhood - (np.zeros(neighbourhood.shape)+v))
exponente=-distances / sigmaR_squared
distances = np.exp(exponente)
range_kernel = distances #/ sum(distances)
kernel = np.multiply(range_kernel, domain_kernel)
kernel = kernel / sum(kernel)
return sum(np.multiply(neighbourhood, kernel))
# filters image I with kernel function f
# f receives two parameters:
# 1) the image to filter
# 2) the position for which a new filtered value must be calculated,
# and returns this new value.
def filter_image_function(I, f, kernel_radius):
J = np.copy(I) #copy the original, else the borders will be black in the resulting image
for p in array_iterator_avoiding_edges(J, kernel_radius):
J[p] = f(I, p)
return J
# filters image I with kernel matrix f
def filter_image_kernel(I, f):
''' Restrictions: sum(f)=1, f.shape[0]=f.shape[1], odd(f.shape[0]) '''
kernel_radius = (array(f.shape) - 1) / 2
J = np.copy(I) #copy the original, else the borders will be black in the resulting image
for p in array_iterator_avoiding_edges(J, kernel_radius):
neighbourhood_slice = [slice(i - r, i + r + 1) for i, r in itertools.izip(p, kernel_radius)]
neighbourhood = I[neighbourhood_slice]
J[p] = sum(np.multiply(neighbourhood, f))
return J
# generates the kernel matrix to perform gaussian filtering
# with standard deviation sigma
# To be used with filter_image_kernel
def gaussian_kernel(kernel_radius, sigma):
sigma_squared = 2 * sigma * sigma
f = zeros(kernel_radius * 2 + 1)
center = kernel_radius
for p in array_iterator(f):
d = array(p) - center
d = np.dot(d, d)
#d=np.dot(d,d)
f[p] = math.exp(-d / sigma_squared)
f = f / f.sum()
return f
# generate a square kernel shape for the given image's shape
# to implement a gaussian kernel with sigma standard deviation
def square_kernel_shape_for_sigma(sigma, image):
return square_kernel_shape_for_image(kernel_radius_for_sigma(sigma), image)
# return the kernel radius for a given sigma
# Here we use the trick that for a gaussian kernel values
# 2 sigma away from the center of the kernel are mostly 0
def kernel_radius_for_sigma(sigma):
return int(math.ceil(sigma * 2))
# return the kernel shape of a given radius for the given image
# the image is needed to get it's shape (ie, 2D, 3D, etc)
def square_kernel_shape_for_image(radius, image):
return array([radius] * len(image.shape)) | facundoq/ipim | tp1/py/filters.py | Python | gpl-3.0 | 4,987 | [
"Gaussian"
] | 1e0487104d19995e1c16c41c616209c709b772ccfa5b86c7fb7e563009db3701 |
from Classes import *
from Buttons import *
from Fuctions import *
ALL_DEVICES_LIST = []
ZONECONTROLLER_LIST = []
class DEVICE:
def Image(self,image):
self.image = image
ALL_DEVICES_LIST.append(image)
def GetImage(self):
return self.image
def ItsButton(self):
itsbutton = self.GetImage()
itsbutton = itsbutton.replace('.png','')
return itsbutton
def Communication(self,comtyp,hardtyp,comimage):
self.CommunicationType = comtyp
self.HardwareType = hardtyp
self.CommunicationImage = comimage
def Text(self,text):
self.Text = text
return self.Text
def PrintName(self):
print self.Name
def Name(self,name):
self.Name = name
class ZONECONTROLLER(DEVICE):
def Name(self,name):
self.Name = name
ZONECONTROLLER_LIST.append(self.Name)
def AddCommunication(self):
print "\t" * 8 + "Adding Communication For -> " + str(self.Name)
media.c()
communciationdevices.r()
Type('a')
CommunicationType(self.CommunicationType)
HardwareType(self.HardwareType)
if self.HardwareType == 'Ethernet':
Press("enter")
MinAll()
####################################################################
def DeleteCommunication(self):
pass
def AddFull(self):
self.AddCommunication()
self.Add()
def DeleteFull(self):
pass
def Delete(self):
print "Deleting - >" + str(self.Name)
media.c()
MinAll()
video.c()
media.c()
sleep(2)
zonecontrollers.Expand()
sleep(1)
eval(self.ItsButton()).r()
Type('d')
Press('enter')
Press('enter')
Press('enter')
eval(self.ItsButton()).wv()
def Add(self):
print "\t" * 8 + "Adding -> " + str(self.Name)
media.c()
zonecontrollers.r()
Type('a')
DeviceType(self.Text)
Press('enter')
version.wv()
MinAll()
class SECURITYPANELS(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class DOORLOCKS(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class THERMOSTATS(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class HEATINGCOOLINGUNITS(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class LIGHTINGINTERFACES(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class VIDEOCAMERASSOURCES(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class IRRIGATIONCONTROLLERS(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class POOLCONTROLLERS(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class UPSPOWERSUPPLIES(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
class UPSPOWERSUPPLIES(DEVICE):
def AddCommunication(self):
pass
def DeleteCommunication(self):
pass
def AddFull(self):
pass
def DeleteFull(self):
pass
def Add(self):
pass
def Delete(self):
pass
#######################################################Zone Controllers
ADASuite1616videozones = ZONECONTROLLER()
ADASuite1616videozones.Image("adasuite1616videozones.png")
ADASuite1616videozones.Text("ADA Suite 16 (16 video zones)")
ADASuite1616videozones.Name("ADASuite1616videozones")
ADASuite1616zone = ZONECONTROLLER()
ADASuite1616zone.Image("adasuite1616zone.png")
ADASuite1616zone.Text("ADA Suite 16 (16 zone)")
ADASuite1616zone.Name("ADASuite1616zone")
ADASuite1632videozones = ZONECONTROLLER()
ADASuite1632videozones.Image("adasuite1632videozones.png")
ADASuite1632videozones.Text("ADA Suite 16 (32 video zones)")
ADASuite1632videozones.Name("ADASuite1632videozones")
ADASuite1632zone = ZONECONTROLLER()
ADASuite1632zone.Image("adasuite1632zone.png")
ADASuite1632zone.Text("ADA Suite 16 (32 zone)")
ADASuite1632zone.Name("ADASuite1632zone")
ADASuite1648zone = ZONECONTROLLER()
ADASuite1648zone.Image("adasuite1648zone.png")
ADASuite1648zone.Text("ADA Suite 16 (48 zone)")
ADASuite1648zone.Name("ADASuite1648zone")
ADASuite1664zone = ZONECONTROLLER()
ADASuite1664zone.Image("adasuite1664zone.png")
ADASuite1664zone.Text("ADA Suite 16 (64 zone)")
ADASuite1664zone.Name("ADASuite1664zone")
ADASuite1696zone = ZONECONTROLLER()
ADASuite1696zone.Image("adasuite1696zone.png")
ADASuite1696zone.Text("ADA Suite 16 (96 zone)")
ADASuite1696zone.Name("ADASuite1696zone")
ADASuite3232zone = ZONECONTROLLER()
ADASuite3232zone.Image("adasuite3232zone.png")
ADASuite3232zone.Text("ADA Suite 32 (32 zone)")
ADASuite3232zone.Name("ADASuite3232zone")
ADASuite3264zone = ZONECONTROLLER()
ADASuite3264zone.Image("adasuite3264zone.png")
ADASuite3264zone.Text("ADA Suite 32 (64 zone)")
ADASuite3264zone.Name("ADASuite3264zone")
ADASuite3296zone = ZONECONTROLLER()
ADASuite3296zone.Image("adasuite3296zone.png")
ADASuite3296zone.Text("ADA Suite 32 (96 zone)")
ADASuite3296zone.Name("ADASuite3296zone")
ADASuite71 = ZONECONTROLLER()
ADASuite71.Image("adasuite71.png")
ADASuite71.Text("ADA Suite 7.1")
ADASuite71.Name("ADASuite71")
ADASuite8100 = ZONECONTROLLER()
ADASuite8100.Image("adasuite8100.png")
ADASuite8100.Text("ADA Suite 8100")
ADASuite8100.Name("ADASuite8100")
ADASuite8200 = ZONECONTROLLER()
ADASuite8200.Image("adasuite8200.png")
ADASuite8200.Text("ADA Suite 8200")
ADASuite8200.Name("ADASuite8200")
AH66TSingleChassis = ZONECONTROLLER()
AH66TSingleChassis.Image("ah66tsinglechassis.png")
AH66TSingleChassis.Text("AH66T Single Chassis")
AH66TSingleChassis.Name("AH66TSingleChassis")
AtlonaATH2H44M4x4HDMI = ZONECONTROLLER()
AtlonaATH2H44M4x4HDMI.Image("atlonaath2h44m4x4hdmi.png")
AtlonaATH2H44M4x4HDMI.Text("Atlona AT-H2H-44M (4x4 HDMI)")
AtlonaATH2H44M4x4HDMI.Name("AtlonaATH2H44M4x4HDMI")
AtlonaATH2H88M8x8HDMI = ZONECONTROLLER()
AtlonaATH2H88M8x8HDMI.Image("atlonaath2h88m8x8hdmi.png")
AtlonaATH2H88M8x8HDMI.Text("Atlona AT-H2H-88M (8x8 HDMI)")
AtlonaATH2H88M8x8HDMI.Name("AtlonaATH2H88M8x8HDMI")
AtlonaATHDV1616M16x16HDMI = ZONECONTROLLER()
AtlonaATHDV1616M16x16HDMI.Image("atlonaathdv1616m16x16hdmi.png")
AtlonaATHDV1616M16x16HDMI.Text("Atlona AT-HD-V1616M (16x16 HDMI)")
AtlonaATHDV1616M16x16HDMI.Name("AtlonaATHDV1616M16x16HDMI")
AtlonaATHDV44M4x4HDMI = ZONECONTROLLER()
AtlonaATHDV44M4x4HDMI.Image("atlonaathdv44m4x4hdmi.png")
AtlonaATHDV44M4x4HDMI.Text("Atlona AT-HD-V44M (4x4 HDMI)")
AtlonaATHDV44M4x4HDMI.Name("AtlonaATHDV44M4x4HDMI")
AtlonaATPRO2HD1616M16x16HDBaseT = ZONECONTROLLER()
AtlonaATPRO2HD1616M16x16HDBaseT.Image("atlonaatpro2hd1616m16x16hdbaset.png")
AtlonaATPRO2HD1616M16x16HDBaseT.Text("Atlona AT-PRO2HD1616M (16x16 HD-Base T)")
AtlonaATPRO2HD1616M16x16HDBaseT.Name("AtlonaATPRO2HD1616M16x16HDBaseT")
AtlonaATPRO2HD44M4x4HDBaseT = ZONECONTROLLER()
AtlonaATPRO2HD44M4x4HDBaseT.Image("atlonaatpro2hd44m4x4hdbaset.png")
AtlonaATPRO2HD44M4x4HDBaseT.Text("Atlona AT-PRO2HD44M (4x4 HD-Base T)")
AtlonaATPRO2HD44M4x4HDBaseT.Name("AtlonaATPRO2HD44M4x4HDBaseT")
AtlonaATPRO2HD88M8x8HDBaseT = ZONECONTROLLER()
AtlonaATPRO2HD88M8x8HDBaseT.Image("atlonaatpro2hd88m8x8hdbaset.png")
AtlonaATPRO2HD88M8x8HDBaseT.Text("Atlona AT-PRO2HD88M (8x8 HD-Base T)")
AtlonaATPRO2HD88M8x8HDBaseT.Name("AtlonaATPRO2HD88M8x8HDBaseT")
AtlonaATPRO3HD44M4x4HDBaseT = ZONECONTROLLER()
AtlonaATPRO3HD44M4x4HDBaseT.Image("atlonaatpro3hd44m4x4hdbaset.png")
AtlonaATPRO3HD44M4x4HDBaseT.Text("Atlona AT-PRO3HD44M (4x4 HD-Base T)")
AtlonaATPRO3HD44M4x4HDBaseT.Name("AtlonaATPRO3HD44M4x4HDBaseT")
####################################################################################
AtlonaATPRO3HD66M6x6HDBaseT = ZONECONTROLLER()
AtlonaATPRO3HD66M6x6HDBaseT.Image("atlonaatpro3hd66m6x6hdbaset.png")
AtlonaATPRO3HD66M6x6HDBaseT.Text("Atlona AT-PRO3HD66M (6x6 HD-Base T)")
AtlonaATPRO3HD66M6x6HDBaseT.Name("AtlonaATPRO3HD66M6x6HDBaseT")
Denon28053805 = ZONECONTROLLER()
Denon28053805.Image("denon28053805.png")
Denon28053805.Text("Denon 2805,3805")
Denon28053805.Name("Denon28053805")
DenonAVPA1HDCI = ZONECONTROLLER()
DenonAVPA1HDCI.Image("denonavpa1hdci.png")
DenonAVPA1HDCI.Text("Denon AVP-A1HDCI")
DenonAVPA1HDCI.Name("DenonAVPA1HDCI")
DenonAVR1613 = ZONECONTROLLER()
DenonAVR1613.Image("denonavr1613.png")
DenonAVR1613.Text("Denon AVR-1613")
DenonAVR1613.Name("DenonAVR1613")
DenonAVR1713 = ZONECONTROLLER()
DenonAVR1713.Image("denonavr1713.png")
DenonAVR1713.Text("Denon AVR-1713")
DenonAVR1713.Name("DenonAVR1713")
DenonAVR1912CI = ZONECONTROLLER()
DenonAVR1912CI.Image("denonavr1912ci.png")
DenonAVR1912CI.Text("Denon AVR-1912CI")
DenonAVR1912CI.Name("DenonAVR1912CI")
DenonAVR1913 = ZONECONTROLLER()
DenonAVR1913.Image("denonavr1913.png")
DenonAVR1913.Text("Denon AVR-1913")
DenonAVR1913.Name("DenonAVR1913")
DenonAVR2112CI = ZONECONTROLLER()
DenonAVR2112CI.Image("denonavr2112ci.png")
DenonAVR2112CI.Text("Denon AVR-2112CI")
DenonAVR2112CI.Name("DenonAVR2112CI")
DenonAVR2113CI = ZONECONTROLLER()
DenonAVR2113CI.Image("denonavr2113ci.png")
DenonAVR2113CI.Text("Denon AVR-2113CI")
DenonAVR2113CI.Name("DenonAVR2113CI")
DenonAVR2310CI = ZONECONTROLLER()
DenonAVR2310CI.Image("denonavr2310ci.png")
DenonAVR2310CI.Text("Denon AVR-2310CI")
DenonAVR2310CI.Name("DenonAVR2310CI")
DenonAVR2311CI = ZONECONTROLLER()
DenonAVR2311CI.Image("denonavr2311ci.png")
DenonAVR2311CI.Text("Denon AVR-2311CI")
DenonAVR2311CI.Name("DenonAVR2311CI")
DenonAVR2312CI = ZONECONTROLLER()
DenonAVR2312CI.Image("denonavr2312ci.png")
DenonAVR2312CI.Text("Denon AVR-2312CI")
DenonAVR2312CI.Name("DenonAVR2312CI")
DenonAVR2313CI = ZONECONTROLLER()
DenonAVR2313CI.Image("denonavr2313ci.png")
DenonAVR2313CI.Text("Denon AVR-2313CI")
DenonAVR2313CI.Name("DenonAVR2313CI")
DenonAVR2808CI = ZONECONTROLLER()
DenonAVR2808CI.Image("denonavr2808ci.png")
DenonAVR2808CI.Text("Denon AVR-2808CI")
DenonAVR2808CI.Name("DenonAVR2808CI")
DenonAVR3310CI = ZONECONTROLLER()
DenonAVR3310CI.Image("denonavr3310ci.png")
DenonAVR3310CI.Text("Denon AVR-3310CI")
DenonAVR3310CI.Name("DenonAVR3310CI")
DenonAVR3311CI = ZONECONTROLLER()
DenonAVR3311CI.Image("denonavr3311ci.png")
DenonAVR3311CI.Text("Denon AVR-3311CI")
DenonAVR3311CI.Name("DenonAVR3311CI")
DenonAVR3312CI = ZONECONTROLLER()
DenonAVR3312CI.Image("denonavr3312ci.png")
DenonAVR3312CI.Text("Denon AVR-3312CI")
DenonAVR3312CI.Name("DenonAVR3312CI")
DenonAVR3313CI = ZONECONTROLLER()
DenonAVR3313CI.Image("denonavr3313ci.png")
DenonAVR3313CI.Text("Denon AVR-3313CI")
DenonAVR3313CI.Name("DenonAVR3313CI")
DenonAVR3806 = ZONECONTROLLER()
DenonAVR3806.Image("denonavr3806.png")
DenonAVR3806.Text("Denon AVR-3806")
DenonAVR3806.Name("DenonAVR3806")
DenonAVR3808CI = ZONECONTROLLER()
DenonAVR3808CI.Image("denonavr3808ci.png")
DenonAVR3808CI.Text("Denon AVR-3808CI")
DenonAVR3808CI.Name("DenonAVR3808CI")
DenonAVR4308CI = ZONECONTROLLER()
DenonAVR4308CI.Image("denonavr4308ci.png")
DenonAVR4308CI.Text("Denon AVR-4308CI")
DenonAVR4308CI.Name("DenonAVR4308CI")
DenonAVR4310CI = ZONECONTROLLER()
DenonAVR4310CI.Image("denonavr4310ci.png")
DenonAVR4310CI.Text("Denon AVR-4310CI")
DenonAVR4310CI.Name("DenonAVR4310CI")
DenonAVR4311CI = ZONECONTROLLER()
DenonAVR4311CI.Image("denonavr4311ci.png")
DenonAVR4311CI.Text("Denon AVR-4311CI")
DenonAVR4311CI.Name("DenonAVR4311CI")
DenonAVR45204520CI = ZONECONTROLLER()
DenonAVR45204520CI.Image("denonavr45204520ci.png")
DenonAVR45204520CI.Text("Denon AVR-4520/4520CI")
DenonAVR45204520CI.Name("DenonAVR45204520CI")
DenonAVR4806 = ZONECONTROLLER()
DenonAVR4806.Image("denonavr4806.png")
DenonAVR4806.Text("Denon AVR-4806")
DenonAVR4806.Name("DenonAVR4806")
DenonAVR4810CI = ZONECONTROLLER()
DenonAVR4810CI.Image("denonavr4810ci.png")
DenonAVR4810CI.Text("Denon AVR-4810CI")
DenonAVR4810CI.Name("DenonAVR4810CI")
DenonAVR5308CI = ZONECONTROLLER()
DenonAVR5308CI.Image("denonavr5308ci.png")
DenonAVR5308CI.Text("Denon AVR-5308CI")
DenonAVR5308CI.Name("DenonAVR5308CI")
DenonAVR5805 = ZONECONTROLLER()
DenonAVR5805.Image("denonavr5805.png")
DenonAVR5805.Text("Denon AVR-5805")
DenonAVR5805.Name("DenonAVR5805")
#DenonAVRS700 = ZONECONTROLLER()
#DenonAVRS700.Image("denonavrs700.png")
#DenonAVRS700.Text("Denon AVR-S700")
#DenonAVRS700.Name("DenonAVRS700")
#DenonAVRS900 = ZONECONTROLLER()
#DenonAVRS900.Image("denonavrs900.png")
#DenonAVRS900.Text("Denon AVR-S900")
#DenonAVRS900.Name("DenonAVRS900")
DenonAVRX1000International = ZONECONTROLLER()
DenonAVRX1000International.Image("denonavrx1000international.png")
DenonAVRX1000International.Text("Denon AVR-X1000 (International)")
DenonAVRX1000International.Name("DenonAVRX1000International")
DenonAVRX1000US = ZONECONTROLLER()
DenonAVRX1000US.Image("denonavrx1000us.png")
DenonAVRX1000US.Text("Denon AVR-X1000 (US)")
DenonAVRX1000US.Name("DenonAVRX1000US")
DenonAVRX1100 = ZONECONTROLLER()
DenonAVRX1100.Image("denonavrx1100.png")
DenonAVRX1100.Text("Denon AVR-X1100")
DenonAVRX1100.Name("DenonAVRX1100")
DenonAVRX2000 = ZONECONTROLLER()
DenonAVRX2000.Image("denonavrx2000.png")
DenonAVRX2000.Text("Denon AVR-X2000")
DenonAVRX2000.Name("DenonAVRX2000")
DenonAVRX2100 = ZONECONTROLLER()
DenonAVRX2100.Image("denonavrx2100.png")
DenonAVRX2100.Text("Denon AVR-X2100")
DenonAVRX2100.Name("DenonAVRX2100")
DenonAVRX3000 = ZONECONTROLLER()
DenonAVRX3000.Image("denonavrx3000.png")
DenonAVRX3000.Text("Denon AVR-X3000")
DenonAVRX3000.Name("DenonAVRX3000")
DenonAVRX3100 = ZONECONTROLLER()
DenonAVRX3100.Image("denonavrx3100.png")
DenonAVRX3100.Text("Denon AVR-X3100")
DenonAVRX3100.Name("DenonAVRX3100")
DenonAVRX4000 = ZONECONTROLLER()
DenonAVRX4000.Image("denonavrx4000.png")
DenonAVRX4000.Text("Denon AVR-X4000")
DenonAVRX4000.Name("DenonAVRX4000")
DenonAVRX4100 = ZONECONTROLLER()
DenonAVRX4100.Image("denonavrx4100.png")
DenonAVRX4100.Text("Denon AVR-X4100")
DenonAVRX4100.Name("DenonAVRX4100")
DenonAVRX5200US = ZONECONTROLLER()
DenonAVRX5200US.Image("denonavrx5200us.png")
DenonAVRX5200US.Text("Denon AVR-X5200 (US)")
DenonAVRX5200US.Name("DenonAVRX5200US")
ELANgMV64Ethernet = ZONECONTROLLER()
ELANgMV64Ethernet.Image("elangmv64ethernet.png")
ELANgMV64Ethernet.Text("ELAN gMV64 (Ethernet)")
ELANgMV64Ethernet.Name("ELANgMV64Ethernet")
ELANgMV64RS232 = ZONECONTROLLER()
ELANgMV64RS232.Image("elangmv64rs232.png")
ELANgMV64RS232.Text("ELAN gMV64 (RS-232)")
ELANgMV64RS232.Name("ELANgMV64RS232")
ELANM86A12Zones = ZONECONTROLLER()
ELANM86A12Zones.Image("elanm86a12zones.png")
ELANM86A12Zones.Text("ELAN M86A (12 Zones)")
ELANM86A12Zones.Name("ELANM86A12Zones")
ELANM86A18Zones = ZONECONTROLLER()
ELANM86A18Zones.Image("elanm86a18zones.png")
ELANM86A18Zones.Text("ELAN M86A (18 Zones)")
ELANM86A18Zones.Name("ELANM86A18Zones")
ELANM86A24Zones = ZONECONTROLLER()
ELANM86A24Zones.Image("elanm86a24zones.png")
ELANM86A24Zones.Text("ELAN M86A (24 Zones)")
ELANM86A24Zones.Name("ELANM86A24Zones")
ELANM86A6Zone = ZONECONTROLLER()
ELANM86A6Zone.Image("elanm86a6zone.png")
ELANM86A6Zone.Text("ELAN M86A (6 Zone)")
ELANM86A6Zone.Name("ELANM86A6Zone")
ELANS1616ADualChassisMode = ZONECONTROLLER()
ELANS1616ADualChassisMode.Image("elans1616adualchassismode.png")
ELANS1616ADualChassisMode.Text("ELAN S1616A (Dual Chassis Mode)")
ELANS1616ADualChassisMode.Name("ELANS1616ADualChassisMode")
ELANS1616ASingleChassisMode = ZONECONTROLLER()
ELANS1616ASingleChassisMode.Image("elans1616asinglechassismode.png")
ELANS1616ASingleChassisMode.Text("ELAN S1616A (Single Chassis Mode)")
ELANS1616ASingleChassisMode.Name("ELANS1616ASingleChassisMode")
ELANS86AP = ZONECONTROLLER()
ELANS86AP.Image("elans86ap.png")
ELANS86AP.Text("ELAN S86A/P")
ELANS86AP.Name("ELANS86AP")
ELANS86AP12Zones = ZONECONTROLLER()
ELANS86AP12Zones.Image("elans86ap12zones.png")
ELANS86AP12Zones.Text("ELAN S86A/P (12 Zones)")
ELANS86AP12Zones.Name("ELANS86AP12Zones")
ELANS86AP18Zones = ZONECONTROLLER()
ELANS86AP18Zones.Image("elans86ap18zones.png")
ELANS86AP18Zones.Text("ELAN S86A/P (18 Zones)")
ELANS86AP18Zones.Name("ELANS86AP18Zones")
ELANS86AP24Zones = ZONECONTROLLER()
ELANS86AP24Zones.Image("elans86ap24zones.png")
ELANS86AP24Zones.Text("ELAN S86A/P (24 Zones)")
ELANS86AP24Zones.Name("ELANS86AP24Zones")
ELANSystem1208Zones = ZONECONTROLLER()
ELANSystem1208Zones.Image("elansystem1208zones.png")
ELANSystem1208Zones.Text("ELAN System12 (08 Zones)")
ELANSystem1208Zones.Name("ELANSystem1208Zones")
ELANSystem1216Zones = ZONECONTROLLER()
ELANSystem1216Zones.Image("elansystem1216zones.png")
ELANSystem1216Zones.Text("ELAN System12 (16 Zones)")
ELANSystem1216Zones.Name("ELANSystem1216Zones")
ELANSystem1224Zones = ZONECONTROLLER()
ELANSystem1224Zones.Image("elansystem1224zones.png")
ELANSystem1224Zones.Text("ELAN System12 (24 Zones)")
ELANSystem1224Zones.Name("ELANSystem1224Zones")
ELANSystem1232Zones = ZONECONTROLLER()
ELANSystem1232Zones.Image("elansystem1232zones.png")
ELANSystem1232Zones.Text("ELAN System12 (32 Zones)")
ELANSystem1232Zones.Name("ELANSystem1232Zones")
ElanV8 = ZONECONTROLLER()
ElanV8.Image("elanv8.png")
ElanV8.Text("Elan V8")
ElanV8.Name("ElanV8")
ElanV85 = ZONECONTROLLER()
ElanV85.Image("elanv85.png")
ElanV85.Text("Elan V85")
ElanV85.Name("ElanV85")
ElanV883 = ZONECONTROLLER()
ElanV883.Image("elanv883.png")
ElanV883.Text("Elan V883")
ElanV883.Name("ElanV883")
GefenHDFST4444ELR = ZONECONTROLLER()
GefenHDFST4444ELR.Image("gefenhdfst4444elr.png")
GefenHDFST4444ELR.Text("Gefen HD-FST444-4ELR")
GefenHDFST4444ELR.Name("GefenHDFST4444ELR")
GefenHDFST848 = ZONECONTROLLER()
GefenHDFST848.Image("gefenhdfst848.png")
GefenHDFST848.Text("Gefen HD-FST848")
GefenHDFST848.Name("GefenHDFST848")
GenericSingleZoneController = ZONECONTROLLER()
GenericSingleZoneController.Image("genericsinglezonecontroller.png")
GenericSingleZoneController.Text("Generic Single Zone Controller")
GenericSingleZoneController.Name("GenericSingleZoneController")
IntegraDHC806 = ZONECONTROLLER()
IntegraDHC806.Image("integradhc806.png")
IntegraDHC806.Text("Integra DHC-80.6")
IntegraDHC806.Name("IntegraDHC806")
IntegraDTR46DTR56DTR66DTR76 = ZONECONTROLLER()
IntegraDTR46DTR56DTR66DTR76.Image("integradtr46dtr56dtr66dtr76.png")
IntegraDTR46DTR56DTR66DTR76.Text("Integra DTR 4.6 / DTR 5.6 / DTR 6.6 / DTR 7.6")
IntegraDTR46DTR56DTR66DTR76.Name("IntegraDTR46DTR56DTR66DTR76")
IntegraDTR74DTR54 = ZONECONTROLLER()
IntegraDTR74DTR54.Image("integradtr74dtr54.png")
IntegraDTR74DTR54.Text("Integra DTR 7.4 / DTR 5.4")
IntegraDTR74DTR54.Name("IntegraDTR74DTR54")
#########################################################################
IntegraDTR203 = ZONECONTROLLER()
IntegraDTR203.Image("integradtr203.png")
IntegraDTR203.Text("Integra DTR-20.3")
IntegraDTR203.Name("IntegraDTR203")
IntegraDTR303 = ZONECONTROLLER()
IntegraDTR303.Image("integradtr303.png")
IntegraDTR303.Text("Integra DTR-30.3")
IntegraDTR303.Name("IntegraDTR303")
IntegraDTR306 = ZONECONTROLLER()
IntegraDTR306.Image("integradtr306.png")
IntegraDTR306.Text("Integra DTR-30.6")
IntegraDTR306.Name("IntegraDTR306")
IntegraDTR49 = ZONECONTROLLER()
IntegraDTR49.Image("integradtr49.png")
IntegraDTR49.Text("Integra DTR-4.9")
IntegraDTR49.Name("IntegraDTR49")
IntegraDTR401OnkyoTXNR1007 = ZONECONTROLLER()
IntegraDTR401OnkyoTXNR1007.Image("integradtr401onkyotxnr1007.png")
IntegraDTR401OnkyoTXNR1007.Text("Integra DTR-40.1, Onkyo TX-NR1007")
IntegraDTR401OnkyoTXNR1007.Name("IntegraDTR401OnkyoTXNR1007")
IntegraDTR403 = ZONECONTROLLER()
IntegraDTR403.Image("integradtr403.png")
IntegraDTR403.Text("Integra DTR-40.3")
IntegraDTR403.Name("IntegraDTR403")
IntegraDTR404 = ZONECONTROLLER()
IntegraDTR404.Image("integradtr404.png")
IntegraDTR404.Text("Integra DTR-40.4")
IntegraDTR404.Name("IntegraDTR404")
IntegraDTR405 = ZONECONTROLLER()
IntegraDTR405.Image("integradtr405.png")
IntegraDTR405.Text("Integra DTR-40.5")
IntegraDTR405.Name("IntegraDTR405")
IntegraDTR406 = ZONECONTROLLER()
IntegraDTR406.Image("integradtr406.png")
IntegraDTR406.Text("Integra DTR-40.6")
IntegraDTR406.Name("IntegraDTR406")
IntegraDTR59 = ZONECONTROLLER()
IntegraDTR59.Image("integradtr59.png")
IntegraDTR59.Text("Integra DTR-5.9")
IntegraDTR59.Name("IntegraDTR59")
IntegraDTR501 = ZONECONTROLLER()
IntegraDTR501.Image("integradtr501.png")
IntegraDTR501.Text("Integra DTR-50.1")
IntegraDTR501.Name("IntegraDTR501")
IntegraDTR503 = ZONECONTROLLER()
IntegraDTR503.Image("integradtr503.png")
IntegraDTR503.Text("Integra DTR-50.3")
IntegraDTR503.Name("IntegraDTR503")
#################################################################
IntegraDTR504 = ZONECONTROLLER()
IntegraDTR504.Image("integradtr504.png")
IntegraDTR504.Text("Integra DTR-50.4")
IntegraDTR504.Name("IntegraDTR504")
IntegraDTR505 = ZONECONTROLLER()
IntegraDTR505.Image("integradtr505.png")
IntegraDTR505.Text("Integra DTR-50.5")
IntegraDTR505.Name("IntegraDTR505")
IntegraDTR506 = ZONECONTROLLER()
IntegraDTR506.Image("integradtr506.png")
IntegraDTR506.Text("Integra DTR-50.6")
IntegraDTR506.Name("IntegraDTR506")
IntegraDTR605 = ZONECONTROLLER()
IntegraDTR605.Image("integradtr605.png")
IntegraDTR605.Text("Integra DTR-60.5")
IntegraDTR605.Name("IntegraDTR605")
IntegraDTR606 = ZONECONTROLLER()
IntegraDTR606.Image("integradtr606.png")
IntegraDTR606.Text("Integra DTR-60.6")
IntegraDTR606.Name("IntegraDTR606")
IntegraDTR79DTR69OnkyoTXSR806TXSR706 = ZONECONTROLLER()
IntegraDTR79DTR69OnkyoTXSR806TXSR706.Image("integradtr79dtr69onkyotxsr806txsr706.png")
IntegraDTR79DTR69OnkyoTXSR806TXSR706.Text("Integra DTR-7.9 / DTR-6.9, Onkyo TX-SR806 / TX-SR706")
IntegraDTR79DTR69OnkyoTXSR806TXSR706.Name("IntegraDTR79DTR69OnkyoTXSR806TXSR706")
IntegraDTR701OnkyoTXNR3007 = ZONECONTROLLER()
IntegraDTR701OnkyoTXNR3007.Image("integradtr701onkyotxnr3007.png")
IntegraDTR701OnkyoTXNR3007.Text("Integra DTR-70.1, Onkyo TX-NR3007")
IntegraDTR701OnkyoTXNR3007.Name("IntegraDTR701OnkyoTXNR3007")
IntegraDTR703DTR803 = ZONECONTROLLER()
IntegraDTR703DTR803.Image("integradtr703dtr803.png")
IntegraDTR703DTR803.Text("Integra DTR-70.3, DTR-80.3")
IntegraDTR703DTR803.Name("IntegraDTR703DTR803")
IntegraDTR704 = ZONECONTROLLER()
IntegraDTR704.Image("integradtr704.png")
IntegraDTR704.Text("Integra DTR-70.4")
IntegraDTR704.Name("IntegraDTR704")
IntegraDTR706 = ZONECONTROLLER()
IntegraDTR706.Image("integradtr706.png")
IntegraDTR706.Text("Integra DTR-70.6")
IntegraDTR706.Name("IntegraDTR706")
IntegraDTR89OnkyoTXSR876PRSC886 = ZONECONTROLLER()
IntegraDTR89OnkyoTXSR876PRSC886.Image("integradtr89onkyotxsr876prsc886.png")
IntegraDTR89OnkyoTXSR876PRSC886.Text("Integra DTR-8.9, Onkyo TX-SR876 / PR-SC886")
IntegraDTR89OnkyoTXSR876PRSC886.Name("IntegraDTR89OnkyoTXSR876PRSC886")
IntegraDTR801OnkyoTXNR5007 = ZONECONTROLLER()
IntegraDTR801OnkyoTXNR5007.Image("integradtr801onkyotxnr5007.png")
IntegraDTR801OnkyoTXNR5007.Text("Integra DTR-80.1, Onkyo TX-NR5007")
IntegraDTR801OnkyoTXNR5007.Name("IntegraDTR801OnkyoTXNR5007")
IntegraDTR99OnkyoTXNR906 = ZONECONTROLLER()
IntegraDTR99OnkyoTXNR906.Image("integradtr99onkyotxnr906.png")
IntegraDTR99OnkyoTXNR906.Text("Integra DTR-9.9, Onkyo TX-NR906")
IntegraDTR99OnkyoTXNR906.Name("IntegraDTR99OnkyoTXNR906")
JAPHDoverIPSwitch = ZONECONTROLLER()
JAPHDoverIPSwitch.Image("japhdoveripswitch.png")
JAPHDoverIPSwitch.Text("JAP HD over IP Switch")
JAPHDoverIPSwitch.Name("JAPHDoverIPSwitch")
MarantzAV7701DType = ZONECONTROLLER()
MarantzAV7701DType.Image("marantzav7701dtype.png")
MarantzAV7701DType.Text("Marantz AV7701 (D-Type)")
MarantzAV7701DType.Name("MarantzAV7701DType")
MarantzAV7702DType = ZONECONTROLLER()
MarantzAV7702DType.Image("marantzav7702dtype.png")
MarantzAV7702DType.Text("Marantz AV7702 (D-Type)")
MarantzAV7702DType.Name("MarantzAV7702DType")
MarantzAV8801DType = ZONECONTROLLER()
MarantzAV8801DType.Image("marantzav8801dtype.png")
MarantzAV8801DType.Text("Marantz AV8801 (D-Type)")
MarantzAV8801DType.Name("MarantzAV8801DType")
MarantzNR1504DType = ZONECONTROLLER()
MarantzNR1504DType.Image("marantznr1504dtype.png")
MarantzNR1504DType.Text("Marantz NR1504 (D-Type)")
MarantzNR1504DType.Name("MarantzNR1504DType")
MarantzNR1602DType = ZONECONTROLLER()
MarantzNR1602DType.Image("marantznr1602dtype.png")
MarantzNR1602DType.Text("Marantz NR1602 (D-Type)")
MarantzNR1602DType.Name("MarantzNR1602DType")
MarantzNR1603DType = ZONECONTROLLER()
MarantzNR1603DType.Image("marantznr1603dtype.png")
MarantzNR1603DType.Text("Marantz NR1603 (D-Type)")
MarantzNR1603DType.Name("MarantzNR1603DType")
MarantzNR1604DType = ZONECONTROLLER()
MarantzNR1604DType.Image("marantznr1604dtype.png")
MarantzNR1604DType.Text("Marantz NR1604 (D-Type)")
MarantzNR1604DType.Name("MarantzNR1604DType")
MarantzNR1605DType = ZONECONTROLLER()
MarantzNR1605DType.Image("marantznr1605dtype.png")
MarantzNR1605DType.Text("Marantz NR1605 (D-Type)")
MarantzNR1605DType.Name("MarantzNR1605DType")
MarantzSR5004 = ZONECONTROLLER()
MarantzSR5004.Image("marantzsr5004.png")
MarantzSR5004.Text("Marantz SR5004")
MarantzSR5004.Name("MarantzSR5004")
MarantzSR5005 = ZONECONTROLLER()
MarantzSR5005.Image("marantzsr5005.png")
MarantzSR5005.Text("Marantz SR5005")
MarantzSR5005.Name("MarantzSR5005")
MarantzSR5006DType = ZONECONTROLLER()
MarantzSR5006DType.Image("marantzsr5006dtype.png")
MarantzSR5006DType.Text("Marantz SR5006 (D-Type)")
MarantzSR5006DType.Name("MarantzSR5006DType")
MarantzSR5007DType = ZONECONTROLLER()
MarantzSR5007DType.Image("marantzsr5007dtype.png")
MarantzSR5007DType.Text("Marantz SR5007 (D-Type)")
MarantzSR5007DType.Name("MarantzSR5007DType")
MarantzSR5008DType = ZONECONTROLLER()
MarantzSR5008DType.Image("marantzsr5008dtype.png")
MarantzSR5008DType.Text("Marantz SR5008 (D-Type)")
MarantzSR5008DType.Name("MarantzSR5008DType")
###########################################################################continue
MarantzSR5009DType = ZONECONTROLLER()
MarantzSR5009DType.Image("marantzsr5009dtype.png")
MarantzSR5009DType.Text("Marantz SR5009 (D-Type)")
MarantzSR5009DType.Name("MarantzSR5009DType")
MarantzSR5500560075008500 = ZONECONTROLLER()
MarantzSR5500560075008500.Image("marantzsr5500560075008500.png")
MarantzSR5500560075008500.Text("Marantz SR5500,5600,7500,8500")
MarantzSR5500560075008500.Name("MarantzSR5500560075008500")
MarantzSR6004 = ZONECONTROLLER()
MarantzSR6004.Image("marantzsr6004.png")
MarantzSR6004.Text("Marantz SR6004")
MarantzSR6004.Name("MarantzSR6004")
MarantzSR6005DType = ZONECONTROLLER()
MarantzSR6005DType.Image("marantzsr6005dtype.png")
MarantzSR6005DType.Text("Marantz SR6005 (D-Type)")
MarantzSR6005DType.Name("MarantzSR6005DType")
MarantzSR6006DType = ZONECONTROLLER()
MarantzSR6006DType.Image("marantzsr6006dtype.png")
MarantzSR6006DType.Text("Marantz SR6006 (D-Type)")
MarantzSR6006DType.Name("MarantzSR6006DType")
MarantzSR6007DType = ZONECONTROLLER()
MarantzSR6007DType.Image("marantzsr6007dtype.png")
MarantzSR6007DType.Text("Marantz SR6007 (D-Type)")
MarantzSR6007DType.Name("MarantzSR6007DType")
MarantzSR6008DType = ZONECONTROLLER()
MarantzSR6008DType.Image("marantzsr6008dtype.png")
MarantzSR6008DType.Text("Marantz SR6008 (D-Type)")
MarantzSR6008DType.Name("MarantzSR6008DType")
MarantzSR6009DType = ZONECONTROLLER()
MarantzSR6009DType.Image("marantzsr6009dtype.png")
MarantzSR6009DType.Text("Marantz SR6009 (D-Type)")
MarantzSR6009DType.Name("MarantzSR6009DType")
MarantzSR7002 = ZONECONTROLLER()
MarantzSR7002.Image("marantzsr7002.png")
MarantzSR7002.Text("Marantz SR7002")
MarantzSR7002.Name("MarantzSR7002")
MarantzSR7005AV7005DType = ZONECONTROLLER()
MarantzSR7005AV7005DType.Image("marantzsr7005av7005dtype.png")
MarantzSR7005AV7005DType.Text("Marantz SR7005,AV7005 (D-Type)")
MarantzSR7005AV7005DType.Name("MarantzSR7005AV7005DType")
MarantzSR7007DType = ZONECONTROLLER()
MarantzSR7007DType.Image("marantzsr7007dtype.png")
MarantzSR7007DType.Text("Marantz SR7007 (D-Type)")
MarantzSR7007DType.Name("MarantzSR7007DType")
MarantzSR7008DType = ZONECONTROLLER()
MarantzSR7008DType.Image("marantzsr7008dtype.png")
MarantzSR7008DType.Text("Marantz SR7008 (D-Type)")
MarantzSR7008DType.Name("MarantzSR7008DType")
##############################################################
MarantzSR7009DType = ZONECONTROLLER()
MarantzSR7009DType.Image("marantzsr7009dtype.png")
MarantzSR7009DType.Text("Marantz SR7009 (D-Type)")
MarantzSR7009DType.Name("MarantzSR7009DType")
MarantzSR8001 = ZONECONTROLLER()
MarantzSR8001.Image("marantzsr8001.png")
MarantzSR8001.Text("Marantz SR8001")
MarantzSR8001.Name("MarantzSR8001")
MarantzSR8002 = ZONECONTROLLER()
MarantzSR8002.Image("marantzsr8002.png")
MarantzSR8002.Text("Marantz SR8002")
MarantzSR8002.Name("MarantzSR8002")
MarantzSR9600 = ZONECONTROLLER()
MarantzSR9600.Image("marantzsr9600.png")
MarantzSR9600.Text("Marantz SR9600")
MarantzSR9600.Name("MarantzSR9600")
NilesGXR2Ethernet = ZONECONTROLLER()
NilesGXR2Ethernet.Image("nilesgxr2ethernet.png")
NilesGXR2Ethernet.Text("Niles GXR2 Ethernet")
NilesGXR2Ethernet.Name("NilesGXR2Ethernet")
NilesMRC6430 = ZONECONTROLLER()
NilesMRC6430.Image("nilesmrc6430.png")
NilesMRC6430.Text("Niles MRC6430")
NilesMRC6430.Name("NilesMRC6430")
NuvoConcerto = ZONECONTROLLER()
NuvoConcerto.Image("nuvoconcerto.png")
NuvoConcerto.Text("Nuvo Concerto")
NuvoConcerto.Name("NuvoConcerto")
NuvoEssentia = ZONECONTROLLER()
NuvoEssentia.Image("nuvoessentia.png")
NuvoEssentia.Text("Nuvo Essentia")
NuvoEssentia.Name("NuvoEssentia")
NuvoEssentiaNVE6G12zone = ZONECONTROLLER()
NuvoEssentiaNVE6G12zone.Image("nuvoessentianve6g12zone.png")
NuvoEssentiaNVE6G12zone.Text("Nuvo Essentia NV_E6G (12 zone)")
NuvoEssentiaNVE6G12zone.Name("NuvoEssentiaNVE6G12zone")
NuvoEssentiaNVE6G6zone = ZONECONTROLLER()
NuvoEssentiaNVE6G6zone.Image("nuvoessentianve6g6zone.png")
NuvoEssentiaNVE6G6zone.Text("Nuvo Essentia NV_E6G (6 zone)")
NuvoEssentiaNVE6G6zone.Name("NuvoEssentiaNVE6G6zone")
NuvoGrandConcerto16zone = ZONECONTROLLER()
NuvoGrandConcerto16zone.Image("nuvograndconcerto16zone.png")
NuvoGrandConcerto16zone.Text("Nuvo Grand Concerto (16 zone)")
NuvoGrandConcerto16zone.Name("NuvoGrandConcerto16zone")
NuvoGrandConcerto8zone = ZONECONTROLLER()
NuvoGrandConcerto8zone.Image("nuvograndconcerto8zone.png")
NuvoGrandConcerto8zone.Text("Nuvo Grand Concerto (8 zone)")
NuvoGrandConcerto8zone.Name("NuvoGrandConcerto8zone")
OnkyoPRSC5530 = ZONECONTROLLER()
OnkyoPRSC5530.Image("onkyoprsc5530.png")
OnkyoPRSC5530.Text("Onkyo PR-SC5530")
OnkyoPRSC5530.Name("OnkyoPRSC5530")
OnkyoTXNR1009TXNR3009TXNR5009 = ZONECONTROLLER()
OnkyoTXNR1009TXNR3009TXNR5009.Image("onkyotxnr1009txnr3009txnr5009.png")
OnkyoTXNR1009TXNR3009TXNR5009.Text("Onkyo TX-NR1009, TX-NR3009, TX-NR5009")
OnkyoTXNR1009TXNR3009TXNR5009.Name("OnkyoTXNR1009TXNR3009TXNR5009")
OnkyoTXNR1010 = ZONECONTROLLER()
OnkyoTXNR1010.Image("onkyotxnr1010.png")
OnkyoTXNR1010.Text("Onkyo TX-NR1010")
OnkyoTXNR1010.Name("OnkyoTXNR1010")
OnkyoTXNR1030 = ZONECONTROLLER()
OnkyoTXNR1030.Image("onkyotxnr1030.png")
OnkyoTXNR1030.Text("Onkyo TX-NR1030")
OnkyoTXNR1030.Name("OnkyoTXNR1030")
OnkyoTXNR3010 = ZONECONTROLLER()
OnkyoTXNR3010.Image("onkyotxnr3010.png")
OnkyoTXNR3010.Text("Onkyo TX-NR3010")
OnkyoTXNR3010.Name("OnkyoTXNR3010")
OnkyoTXNR3030 = ZONECONTROLLER()
OnkyoTXNR3030.Image("onkyotxnr3030.png")
OnkyoTXNR3030.Text("Onkyo TX-NR3030")
OnkyoTXNR3030.Name("OnkyoTXNR3030")
OnkyoTXNR5010 = ZONECONTROLLER()
OnkyoTXNR5010.Image("onkyotxnr5010.png")
OnkyoTXNR5010.Text("Onkyo TX-NR5010")
OnkyoTXNR5010.Name("OnkyoTXNR5010")
OnkyoTXNR515 = ZONECONTROLLER()
OnkyoTXNR515.Image("onkyotxnr515.png")
OnkyoTXNR515.Text("Onkyo TX-NR515")
OnkyoTXNR515.Name("OnkyoTXNR515")
OnkyoTXNR525 = ZONECONTROLLER()
OnkyoTXNR525.Image("onkyotxnr525.png")
OnkyoTXNR525.Text("Onkyo TX-NR525")
OnkyoTXNR525.Name("OnkyoTXNR525")
OnkyoTXNR535 = ZONECONTROLLER()
OnkyoTXNR535.Image("onkyotxnr535.png")
OnkyoTXNR535.Text("Onkyo TX-NR535")
OnkyoTXNR535.Name("OnkyoTXNR535")
OnkyoTXNR609 = ZONECONTROLLER()
OnkyoTXNR609.Image("onkyotxnr609.png")
OnkyoTXNR609.Text("Onkyo TX-NR609")
OnkyoTXNR609.Name("OnkyoTXNR609")
OnkyoTXNR616 = ZONECONTROLLER()
OnkyoTXNR616.Image("onkyotxnr616.png")
OnkyoTXNR616.Text("Onkyo TX-NR616")
OnkyoTXNR616.Name("OnkyoTXNR616")
OnkyoTXNR626 = ZONECONTROLLER()
OnkyoTXNR626.Image("onkyotxnr626.png")
OnkyoTXNR626.Text("Onkyo TX-NR626")
OnkyoTXNR626.Name("OnkyoTXNR626")
OnkyoTXNR636HTRC660 = ZONECONTROLLER()
OnkyoTXNR636HTRC660.Image("onkyotxnr636htrc660.png")
OnkyoTXNR636HTRC660.Text("Onkyo TX-NR636/HT-RC660")
OnkyoTXNR636HTRC660.Name("OnkyoTXNR636HTRC660")
####################################################################
OnkyoTXNR709 = ZONECONTROLLER()
OnkyoTXNR709.Image("onkyotxnr709.png")
OnkyoTXNR709.Text("Onkyo TX-NR709")
OnkyoTXNR709.Name("OnkyoTXNR709")
OnkyoTXNR717 = ZONECONTROLLER()
OnkyoTXNR717.Image("onkyotxnr717.png")
OnkyoTXNR717.Text("Onkyo TX-NR717")
OnkyoTXNR717.Name("OnkyoTXNR717")
OnkyoTXNR727 = ZONECONTROLLER()
OnkyoTXNR727.Image("onkyotxnr727.png")
OnkyoTXNR727.Text("Onkyo TX-NR727")
OnkyoTXNR727.Name("OnkyoTXNR727")
OnkyoTXNR737 = ZONECONTROLLER()
OnkyoTXNR737.Image("onkyotxnr737.png")
OnkyoTXNR737.Text("Onkyo TX-NR737")
OnkyoTXNR737.Name("OnkyoTXNR737")
OnkyoTXNR809 = ZONECONTROLLER()
OnkyoTXNR809.Image("onkyotxnr809.png")
OnkyoTXNR809.Text("Onkyo TX-NR809")
OnkyoTXNR809.Name("OnkyoTXNR809")
OnkyoTXNR818 = ZONECONTROLLER()
OnkyoTXNR818.Image("onkyotxnr818.png")
OnkyoTXNR818.Text("Onkyo TX-NR818")
OnkyoTXNR818.Name("OnkyoTXNR818")
OnkyoTXNR828 = ZONECONTROLLER()
OnkyoTXNR828.Image("onkyotxnr828.png")
OnkyoTXNR828.Text("Onkyo TX-NR828")
OnkyoTXNR828.Name("OnkyoTXNR828")
OnkyoTXNR838 = ZONECONTROLLER()
OnkyoTXNR838.Image("onkyotxnr838.png")
OnkyoTXNR838.Text("Onkyo TX-NR838")
OnkyoTXNR838.Name("OnkyoTXNR838")
OnkyoTXNR929 = ZONECONTROLLER()
OnkyoTXNR929.Image("onkyotxnr929.png")
OnkyoTXNR929.Text("Onkyo TX-NR929")
OnkyoTXNR929.Name("OnkyoTXNR929")
PioneerSC1223K = ZONECONTROLLER()
PioneerSC1223K.Image("pioneersc1223k.png")
PioneerSC1223K.Text("Pioneer SC-1223-K")
PioneerSC1223K.Name("PioneerSC1223K")
PioneerSC1323K = ZONECONTROLLER()
PioneerSC1323K.Image("pioneersc1323k.png")
PioneerSC1323K.Text("Pioneer SC-1323-K")
PioneerSC1323K.Name("PioneerSC1323K")
PioneerSC1523K = ZONECONTROLLER()
PioneerSC1523K.Image("pioneersc1523k.png")
PioneerSC1523K.Text("Pioneer SC-1523-K")
PioneerSC1523K.Name("PioneerSC1523K")
PioneerSC2023K = ZONECONTROLLER()
PioneerSC2023K.Image("pioneersc2023k.png")
PioneerSC2023K.Text("Pioneer SC-2023-K")
PioneerSC2023K.Name("PioneerSC2023K")
PioneerSC55 = ZONECONTROLLER()
PioneerSC55.Image("pioneersc55.png")
PioneerSC55.Text("Pioneer SC-55")
PioneerSC55.Name("PioneerSC55")
PioneerSC57 = ZONECONTROLLER()
PioneerSC57.Image("pioneersc57.png")
PioneerSC57.Text("Pioneer SC-57")
PioneerSC57.Name("PioneerSC57")
PioneerSC61 = ZONECONTROLLER()
PioneerSC61.Image("pioneersc61.png")
PioneerSC61.Text("Pioneer SC-61")
PioneerSC61.Name("PioneerSC61")
PioneerSC63 = ZONECONTROLLER()
PioneerSC63.Image("pioneersc63.png")
PioneerSC63.Text("Pioneer SC-63")
PioneerSC63.Name("PioneerSC63")
PioneerSC65 = ZONECONTROLLER()
PioneerSC65.Image("pioneersc65.png")
PioneerSC65.Text("Pioneer SC-65")
PioneerSC65.Name("PioneerSC65")
PioneerSC67 = ZONECONTROLLER()
PioneerSC67.Image("pioneersc67.png")
PioneerSC67.Text("Pioneer SC-67")
PioneerSC67.Name("PioneerSC67")
PioneerSC68 = ZONECONTROLLER()
PioneerSC68.Image("pioneersc68.png")
PioneerSC68.Text("Pioneer SC-68")
PioneerSC68.Name("PioneerSC68")
PioneerSC71 = ZONECONTROLLER()
PioneerSC71.Image("pioneersc71.png")
PioneerSC71.Text("Pioneer SC-71")
PioneerSC71.Name("PioneerSC71")
PioneerSC72 = ZONECONTROLLER()
PioneerSC72.Image("pioneersc72.png")
PioneerSC72.Text("Pioneer SC-72")
PioneerSC72.Name("PioneerSC72")
PioneerSC75 = ZONECONTROLLER()
PioneerSC75.Image("pioneersc75.png")
PioneerSC75.Text("Pioneer SC-75")
PioneerSC75.Name("PioneerSC75")
####################################################################
PioneerSC77 = ZONECONTROLLER()
PioneerSC77.Image("pioneersc77.png")
PioneerSC77.Text("Pioneer SC-77")
PioneerSC77.Name("PioneerSC77")
PioneerSC79 = ZONECONTROLLER()
PioneerSC79.Image("pioneersc79.png")
PioneerSC79.Text("Pioneer SC-79")
PioneerSC79.Name("PioneerSC79")
PioneerSCLX57K = ZONECONTROLLER()
PioneerSCLX57K.Image("pioneersclx57k.png")
PioneerSCLX57K.Text("Pioneer SC-LX57-K")
PioneerSCLX57K.Name("PioneerSCLX57K")
PioneerSCLX77K = ZONECONTROLLER()
PioneerSCLX77K.Image("pioneersclx77k.png")
PioneerSCLX77K.Text("Pioneer SC-LX77-K")
PioneerSCLX77K.Name("PioneerSCLX77K")
PioneerSCLX87K = ZONECONTROLLER()
PioneerSCLX87K.Image("pioneersclx87k.png")
PioneerSCLX87K.Text("Pioneer SC-LX87-K")
PioneerSCLX87K.Name("PioneerSCLX87K")
PioneerVSX1123K = ZONECONTROLLER()
PioneerVSX1123K.Image("pioneervsx1123k.png")
PioneerVSX1123K.Text("Pioneer VSX-1123-K")
PioneerVSX1123K.Name("PioneerVSX1123K")
PioneerVSX50 = ZONECONTROLLER()
PioneerVSX50.Image("pioneervsx50.png")
PioneerVSX50.Text("Pioneer VSX-50")
PioneerVSX50.Name("PioneerVSX50")
PioneerVSX51 = ZONECONTROLLER()
PioneerVSX51.Image("pioneervsx51.png")
PioneerVSX51.Text("Pioneer VSX-51")
PioneerVSX51.Name("PioneerVSX51")
PioneerVSX52 = ZONECONTROLLER()
PioneerVSX52.Image("pioneervsx52.png")
PioneerVSX52.Text("Pioneer VSX-52")
PioneerVSX52.Name("PioneerVSX52")
PioneerVSX53 = ZONECONTROLLER()
PioneerVSX53.Image("pioneervsx53.png")
PioneerVSX53.Text("Pioneer VSX-53")
PioneerVSX53.Name("PioneerVSX53")
PioneerVSX60 = ZONECONTROLLER()
PioneerVSX60.Image("pioneervsx60.png")
PioneerVSX60.Text("Pioneer VSX-60")
PioneerVSX60.Name("PioneerVSX60")
PioneerVSX70K = ZONECONTROLLER()
PioneerVSX70K.Image("pioneervsx70k.png")
PioneerVSX70K.Text("Pioneer VSX-70-K")
PioneerVSX70K.Name("PioneerVSX70K")
PioneerVSX923K = ZONECONTROLLER()
PioneerVSX923K.Image("pioneervsx923k.png")
PioneerVSX923K.Text("Pioneer VSX-923-K")
PioneerVSX923K.Name("PioneerVSX923K")
SnapAVB100B3004x4or8x8 = ZONECONTROLLER()
SnapAVB100B3004x4or8x8.Image("snapavb100b3004x4or8x8.png")
SnapAVB100B3004x4or8x8.Text("SnapAV B100/B300 (4x4 or 8x8)")
SnapAVB100B3004x4or8x8.Name("SnapAVB100B3004x4or8x8")
SpeakerCraftMRA664 = ZONECONTROLLER()
SpeakerCraftMRA664.Image("speakercraftmra664.png")
SpeakerCraftMRA664.Text("SpeakerCraft MRA664")
SpeakerCraftMRA664.Name("SpeakerCraftMRA664")
SpeakerCraftMZC64 = ZONECONTROLLER()
SpeakerCraftMZC64.Image("speakercraftmzc64.png")
SpeakerCraftMZC64.Text("SpeakerCraft MZC-64")
SpeakerCraftMZC64.Name("SpeakerCraftMZC64")
SpeakerCraftMZC648zone = ZONECONTROLLER()
SpeakerCraftMZC648zone.Image("speakercraftmzc648zone.png")
SpeakerCraftMZC648zone.Text("SpeakerCraft MZC-64 (8 zone)")
SpeakerCraftMZC648zone.Name("SpeakerCraftMZC648zone")
SpeakerCraftMZC66 = ZONECONTROLLER()
SpeakerCraftMZC66.Image("speakercraftmzc66.png")
SpeakerCraftMZC66.Text("SpeakerCraft MZC-66")
SpeakerCraftMZC66.Name("SpeakerCraftMZC66")
SpeakerCraftMZC6612zone = ZONECONTROLLER()
SpeakerCraftMZC6612zone.Image("speakercraftmzc6612zone.png")
SpeakerCraftMZC6612zone.Text("SpeakerCraft MZC-66 (12 zone)")
SpeakerCraftMZC6612zone.Name("SpeakerCraftMZC6612zone")
SpeakerCraftMZC6618zone = ZONECONTROLLER()
SpeakerCraftMZC6618zone.Image("speakercraftmzc6618zone.png")
SpeakerCraftMZC6618zone.Text("SpeakerCraft MZC-66 (18 zone)")
SpeakerCraftMZC6618zone.Name("SpeakerCraftMZC6618zone")
SpeakerCraftMZC6624zone = ZONECONTROLLER()
SpeakerCraftMZC6624zone.Image("speakercraftmzc6624zone.png")
SpeakerCraftMZC6624zone.Text("SpeakerCraft MZC-66 (24 zone)")
SpeakerCraftMZC6624zone.Name("SpeakerCraftMZC6624zone")
SpeakerCraftMZC88 = ZONECONTROLLER()
SpeakerCraftMZC88.Image("speakercraftmzc88.png")
SpeakerCraftMZC88.Text("SpeakerCraft MZC-88")
SpeakerCraftMZC88.Name("SpeakerCraftMZC88")
SpeakerCraftMZC8816zone = ZONECONTROLLER()
SpeakerCraftMZC8816zone.Image("speakercraftmzc8816zone.png")
SpeakerCraftMZC8816zone.Text("SpeakerCraft MZC-88 (16 zone)")
SpeakerCraftMZC8816zone.Name("SpeakerCraftMZC8816zone")
SpeakerCraftMZC8824zone = ZONECONTROLLER()
SpeakerCraftMZC8824zone.Image("speakercraftmzc8824zone.png")
SpeakerCraftMZC8824zone.Text("SpeakerCraft MZC-88 (24 zone)")
SpeakerCraftMZC8824zone.Name("SpeakerCraftMZC8824zone")
SpeakerCraftMZC8832zone = ZONECONTROLLER()
SpeakerCraftMZC8832zone.Image("speakercraftmzc8832zone.png")
SpeakerCraftMZC8832zone.Text("SpeakerCraft MZC-88 (32 zone)")
SpeakerCraftMZC8832zone.Name("SpeakerCraftMZC8832zone")
SunfireTGR3TGP5 = ZONECONTROLLER()
SunfireTGR3TGP5.Image("sunfiretgr3tgp5.png")
SunfireTGR3TGP5.Text("Sunfire TGR-3, TGP-5")
SunfireTGR3TGP5.Name("SunfireTGR3TGP5")
SunfireTGR401TGP401 = ZONECONTROLLER()
SunfireTGR401TGP401.Image("sunfiretgr401tgp401.png")
SunfireTGR401TGP401.Text("Sunfire TGR-401, TGP-401")
SunfireTGR401TGP401.Name("SunfireTGR401TGP401")
WyreStormMX0404 = ZONECONTROLLER()
WyreStormMX0404.Image("wyrestormmx0404.png")
WyreStormMX0404.Text("WyreStorm MX0404")
WyreStormMX0404.Name("WyreStormMX0404")
WyreStormMX0606 = ZONECONTROLLER()
WyreStormMX0606.Image("wyrestormmx0606.png")
WyreStormMX0606.Text("WyreStorm MX0606")
WyreStormMX0606.Name("WyreStormMX0606")
WyreStormMX0804 = ZONECONTROLLER()
WyreStormMX0804.Image("wyrestormmx0804.png")
WyreStormMX0804.Text("WyreStorm MX0804")
WyreStormMX0804.Name("WyreStormMX0804")
WyreStormMX0808 = ZONECONTROLLER()
WyreStormMX0808.Image("wyrestormmx0808.png")
WyreStormMX0808.Text("WyreStorm MX0808")
WyreStormMX0808.Name("WyreStormMX0808")
WyreStormMX0808310 = ZONECONTROLLER()
WyreStormMX0808310.Image("wyrestormmx0808310.png")
WyreStormMX0808310.Text("WyreStorm MX0808_310")
WyreStormMX0808310.Name("WyreStormMX0808310")
WyreStormMX0816310 = ZONECONTROLLER()
WyreStormMX0816310.Image("wyrestormmx0816310.png")
WyreStormMX0816310.Text("WyreStorm MX0816_310")
WyreStormMX0816310.Name("WyreStormMX0816310")
WyreStormMX1616310 = ZONECONTROLLER()
WyreStormMX1616310.Image("wyrestormmx1616310.png")
WyreStormMX1616310.Text("WyreStorm MX1616_310")
WyreStormMX1616310.Name("WyreStormMX1616310")
XantechHD44CC514Units = ZONECONTROLLER()
XantechHD44CC514Units.Image("xantechhd44cc514units.png")
XantechHD44CC514Units.Text("Xantech HD44CC5 (1-4 Units)")
XantechHD44CC514Units.Name("XantechHD44CC514Units")
XantechHD88CC514Units = ZONECONTROLLER()
XantechHD88CC514Units.Image("xantechhd88cc514units.png")
XantechHD88CC514Units.Text("Xantech HD88CC5 (1-4 Units)")
XantechHD88CC514Units.Name("XantechHD88CC514Units")
YamahaRXA1000YNCA = ZONECONTROLLER()
YamahaRXA1000YNCA.Image("yamaharxa1000ynca.png")
YamahaRXA1000YNCA.Text("Yamaha RX-A1000 (YNCA)")
YamahaRXA1000YNCA.Name("YamahaRXA1000YNCA")
YamahaRXA1010YNCA = ZONECONTROLLER()
YamahaRXA1010YNCA.Image("yamaharxa1010ynca.png")
YamahaRXA1010YNCA.Text("Yamaha RX-A1010 (YNCA)")
YamahaRXA1010YNCA.Name("YamahaRXA1010YNCA")
YamahaRXA1020YNCA = ZONECONTROLLER()
YamahaRXA1020YNCA.Image("yamaharxa1020ynca.png")
YamahaRXA1020YNCA.Text("Yamaha RX-A1020 (YNCA)")
YamahaRXA1020YNCA.Name("YamahaRXA1020YNCA")
YamahaRXA1030YNCA = ZONECONTROLLER()
YamahaRXA1030YNCA.Image("yamaharxa1030ynca.png")
YamahaRXA1030YNCA.Text("Yamaha RX-A1030 (YNCA)")
YamahaRXA1030YNCA.Name("YamahaRXA1030YNCA")
YamahaRXA1040YNCA = ZONECONTROLLER()
YamahaRXA1040YNCA.Image("yamaharxa1040ynca.png")
YamahaRXA1040YNCA.Text("Yamaha RX-A1040 (YNCA)")
YamahaRXA1040YNCA.Name("YamahaRXA1040YNCA")
YamahaRXA2000YNCA = ZONECONTROLLER()
YamahaRXA2000YNCA.Image("yamaharxa2000ynca.png")
YamahaRXA2000YNCA.Text("Yamaha RX-A2000 (YNCA)")
YamahaRXA2000YNCA.Name("YamahaRXA2000YNCA")
YamahaRXA2010YNCA = ZONECONTROLLER()
YamahaRXA2010YNCA.Image("yamaharxa2010ynca.png")
YamahaRXA2010YNCA.Text("Yamaha RX-A2010 (YNCA)")
YamahaRXA2010YNCA.Name("YamahaRXA2010YNCA")
YamahaRXA2020YNCA = ZONECONTROLLER()
YamahaRXA2020YNCA.Image("yamaharxa2020ynca.png")
YamahaRXA2020YNCA.Text("Yamaha RX-A2020 (YNCA)")
YamahaRXA2020YNCA.Name("YamahaRXA2020YNCA")
YamahaRXA2030YNCA = ZONECONTROLLER()
YamahaRXA2030YNCA.Image("yamaharxa2030ynca.png")
YamahaRXA2030YNCA.Text("Yamaha RX-A2030 (YNCA)")
YamahaRXA2030YNCA.Name("YamahaRXA2030YNCA")
YamahaRXA2040YNCA = ZONECONTROLLER()
YamahaRXA2040YNCA.Image("yamaharxa2040ynca.png")
YamahaRXA2040YNCA.Text("Yamaha RX-A2040 (YNCA)")
YamahaRXA2040YNCA.Name("YamahaRXA2040YNCA")
YamahaRXA2040YNCA.Communication("Yamaha YNCA (Ethernet)","Ethernet","yamahayncaethernet.png")
YamahaRXA3000YNCA = ZONECONTROLLER()
YamahaRXA3000YNCA.Image("yamaharxa3000ynca.png")
YamahaRXA3000YNCA.Text("Yamaha RX-A3000 (YNCA)")
YamahaRXA3000YNCA.Name("YamahaRXA3000YNCA")
YamahaRXA3010YNCA = ZONECONTROLLER()
YamahaRXA3010YNCA.Image("yamaharxa3010ynca.png")
YamahaRXA3010YNCA.Text("Yamaha RX-A3010 (YNCA)")
YamahaRXA3010YNCA.Name("YamahaRXA3010YNCA")
YamahaRXA3020YNCA = ZONECONTROLLER()
YamahaRXA3020YNCA.Image("yamaharxa3020ynca.png")
YamahaRXA3020YNCA.Text("Yamaha RX-A3020 (YNCA)")
YamahaRXA3020YNCA.Name("YamahaRXA3020YNCA")
YamahaRXA3030YNCA = ZONECONTROLLER()
YamahaRXA3030YNCA.Image("yamaharxa3030ynca.png")
YamahaRXA3030YNCA.Text("Yamaha RX-A3030 (YNCA)")
YamahaRXA3030YNCA.Name("YamahaRXA3030YNCA")
YamahaRXA3040YNCA = ZONECONTROLLER()
YamahaRXA3040YNCA.Image("yamaharxa3040ynca.png")
YamahaRXA3040YNCA.Text("Yamaha RX-A3040 (YNCA)")
YamahaRXA3040YNCA.Name("YamahaRXA3040YNCA")
YamahaRXA710YNCA = ZONECONTROLLER()
YamahaRXA710YNCA.Image("yamaharxa710ynca.png")
YamahaRXA710YNCA.Text("Yamaha RX-A710 (YNCA)")
YamahaRXA710YNCA.Name("YamahaRXA710YNCA")
YamahaRXA720YNCA = ZONECONTROLLER()
YamahaRXA720YNCA.Image("yamaharxa720ynca.png")
YamahaRXA720YNCA.Text("Yamaha RX-A720 (YNCA)")
YamahaRXA720YNCA.Name("YamahaRXA720YNCA")
YamahaRXA730YNCA = ZONECONTROLLER()
YamahaRXA730YNCA.Image("yamaharxa730ynca.png")
YamahaRXA730YNCA.Text("Yamaha RX-A730 (YNCA)")
YamahaRXA730YNCA.Name("YamahaRXA730YNCA")
YamahaRXA740YNCA = ZONECONTROLLER()
YamahaRXA740YNCA.Image("yamaharxa740ynca.png")
YamahaRXA740YNCA.Text("Yamaha RX-A740 (YNCA)")
YamahaRXA740YNCA.Name("YamahaRXA740YNCA")
YamahaRXA800YNCA = ZONECONTROLLER()
YamahaRXA800YNCA.Image("yamaharxa800ynca.png")
YamahaRXA800YNCA.Text("Yamaha RX-A800 (YNCA)")
YamahaRXA800YNCA.Name("YamahaRXA800YNCA")
YamahaRXA810YNCA = ZONECONTROLLER()
YamahaRXA810YNCA.Image("yamaharxa810ynca.png")
YamahaRXA810YNCA.Text("Yamaha RX-A810 (YNCA)")
YamahaRXA810YNCA.Name("YamahaRXA810YNCA")
YamahaRXA820YNCA = ZONECONTROLLER()
YamahaRXA820YNCA.Image("yamaharxa820ynca.png")
YamahaRXA820YNCA.Text("Yamaha RX-A820 (YNCA)")
YamahaRXA820YNCA.Name("YamahaRXA820YNCA")
###########################################################################
YamahaRXA830YNCA = ZONECONTROLLER()
YamahaRXA830YNCA.Image("yamaharxa830ynca.png")
YamahaRXA830YNCA.Text("Yamaha RX-A830 (YNCA)")
YamahaRXA830YNCA.Name("YamahaRXA830YNCA")
YamahaRXA840YNCA = ZONECONTROLLER()
YamahaRXA840YNCA.Image("yamaharxa840ynca.png")
YamahaRXA840YNCA.Text("Yamaha RX-A840 (YNCA)")
YamahaRXA840YNCA.Name("YamahaRXA840YNCA")
YamahaRXV1600V2600 = ZONECONTROLLER()
YamahaRXV1600V2600.Image("yamaharxv1600v2600.png")
YamahaRXV1600V2600.Text("Yamaha RX-V1600,V2600")
YamahaRXV1600V2600.Name("YamahaRXV1600V2600")
YamahaRXV1700V2700 = ZONECONTROLLER()
YamahaRXV1700V2700.Image("yamaharxv1700v2700.png")
YamahaRXV1700V2700.Text("Yamaha RX-V1700,V2700")
YamahaRXV1700V2700.Name("YamahaRXV1700V2700")
YamahaRXV2065Ethernet = ZONECONTROLLER()
YamahaRXV2065Ethernet.Image("yamaharxv2065ethernet.png")
YamahaRXV2065Ethernet.Text("Yamaha RX-V2065 (Ethernet)")
YamahaRXV2065Ethernet.Name("YamahaRXV2065Ethernet")
YamahaRXV2065RS232 = ZONECONTROLLER()
YamahaRXV2065RS232.Image("yamaharxv2065rs232.png")
YamahaRXV2065RS232.Text("Yamaha RX-V2065 (RS-232)")
YamahaRXV2065RS232.Name("YamahaRXV2065RS232")
YamahaRXV3900Ethernet = ZONECONTROLLER()
YamahaRXV3900Ethernet.Image("yamaharxv3900ethernet.png")
YamahaRXV3900Ethernet.Text("Yamaha RX-V3900 (Ethernet)")
YamahaRXV3900Ethernet.Name("YamahaRXV3900Ethernet")
YamahaRXZ7Ethernet = ZONECONTROLLER()
YamahaRXZ7Ethernet.Image("yamaharxz7ethernet.png")
YamahaRXZ7Ethernet.Text("Yamaha RX-Z7 (Ethernet)")
YamahaRXZ7Ethernet.Name("YamahaRXZ7Ethernet")
YamahaRXZ9 = ZONECONTROLLER()
YamahaRXZ9.Image("yamaharxz9.png")
YamahaRXZ9.Text("Yamaha RX-Z9")
YamahaRXZ9.Name("YamahaRXZ9")
############################################################Security Panels
AdemcoVISTA128BP250BPFA1660C = SECURITYPANELS()
AdemcoVISTA128BP250BPFA1660C.Image("ademcovista128bp250bpfa1660c.png")
AdemcoVISTA128BP250BPFA1660C.Text("Ademco VISTA-128BP,250BP,FA1660C")
AdemcoVISTA128BP250BPFA1660C.Name("AdemcoVISTA128BP250BPFA1660C")
AdemcoVISTA128BPT250BPT = SECURITYPANELS()
AdemcoVISTA128BPT250BPT.Image("ademcovista128bpt250bpt.png")
AdemcoVISTA128BPT250BPT.Text("Ademco VISTA-128BPT,250BPT")
AdemcoVISTA128BPT250BPT.Name("AdemcoVISTA128BPT250BPT")
AdemcoVISTA128FBP250FBP = SECURITYPANELS()
AdemcoVISTA128FBP250FBP.Image("ademcovista128fbp250fbp.png")
AdemcoVISTA128FBP250FBP.Text("Ademco VISTA-128FBP,250FBP")
AdemcoVISTA128FBP250FBP.Name("AdemcoVISTA128FBP250FBP")
BoschRadionicsD7412GD9412G = SECURITYPANELS()
BoschRadionicsD7412GD9412G.Image("boschradionicsd7412gd9412g.png")
BoschRadionicsD7412GD9412G.Text("Bosch/Radionics D7412G,D9412G")
BoschRadionicsD7412GD9412G.Name("BoschRadionicsD7412GD9412G")
DSCMAXSYS = SECURITYPANELS()
DSCMAXSYS.Image("dscmaxsys.png")
DSCMAXSYS.Text("DSC MAXSYS")
DSCMAXSYS.Name("DSCMAXSYS")
DSCPowerSeries5401 = SECURITYPANELS()
DSCPowerSeries5401.Image("dscpowerseries5401.png")
DSCPowerSeries5401.Text("DSC Power Series / 5401")
DSCPowerSeries5401.Name("DSCPowerSeries5401")
DSCPowerSeriesIT100 = SECURITYPANELS()
DSCPowerSeriesIT100.Image("dscpowerseriesit100.png")
DSCPowerSeriesIT100.Text("DSC Power Series / IT-100")
DSCPowerSeriesIT100.Name("DSCPowerSeriesIT100")
ELKM1 = SECURITYPANELS()
ELKM1.Image("elkm1.png")
ELKM1.Text("ELK-M1")
ELKM1.Name("ELKM1")
GEConcord = SECURITYPANELS()
GEConcord.Image("geconcord.png")
GEConcord.Text("GE Concord")
GEConcord.Name("GEConcord")
GENetworXNX4688E = SECURITYPANELS()
GENetworXNX4688E.Image("genetworxnx4688e.png")
GENetworXNX4688E.Text("GE NetworX NX-4,6,8,8E")
GENetworXNX4688E.Name("GENetworXNX4688E")
HAIOmniSeries = SECURITYPANELS()
HAIOmniSeries.Image("haiomniseries.png")
HAIOmniSeries.Text("HAI Omni Series")
HAIOmniSeries.Name("HAIOmniSeries")
NapcoGeminiGEMX255P9600 = SECURITYPANELS()
NapcoGeminiGEMX255P9600.Image("napcogeminigemx255p9600.png")
NapcoGeminiGEMX255P9600.Text("Napco Gemini GEM-X255, P9600")
NapcoGeminiGEMX255P9600.Name("NapcoGeminiGEMX255P9600")
ParadoxDigiplex = SECURITYPANELS()
ParadoxDigiplex.Image("paradoxdigiplex.png")
ParadoxDigiplex.Text("Paradox Digiplex")
ParadoxDigiplex.Name("ParadoxDigiplex")
TexecomPremierElite = SECURITYPANELS()
TexecomPremierElite.Image("texecompremierelite.png")
TexecomPremierElite.Text("Texecom Premier Elite")
TexecomPremierElite.Name("TexecomPremierElite")
VirtualSecurityController = SECURITYPANELS()
VirtualSecurityController.Image("virtualsecuritycontroller.png")
VirtualSecurityController.Text("Virtual Security Controller")
VirtualSecurityController.Name("VirtualSecurityController")
#######################################################DOORLOCKS
VirtualDoorLock = DOORLOCKS()
VirtualDoorLock.Image("virtualdoorlock.png")
VirtualDoorLock.Text("Virtual Door Lock")
VirtualDoorLock.Name("VirtualDoorLock")
ZWaveDoorLock = DOORLOCKS()
ZWaveDoorLock.Image("zwavedoorlock.png")
ZWaveDoorLock.Text("Z-Wave Door Lock")
ZWaveDoorLock.Name("ZWaveDoorLock")
#######################################################thermostats
Aprilaire8800Thermostat = THERMOSTATS()
Aprilaire8800Thermostat.Image("aprilaire8800thermostat.png")
Aprilaire8800Thermostat.Text("Aprilaire 8800 Thermostat")
Aprilaire8800Thermostat.Name("Aprilaire8800Thermostat")
Aprilaire8870Thermostat = THERMOSTATS()
Aprilaire8870Thermostat.Image("aprilaire8870thermostat.png")
Aprilaire8870Thermostat.Text("Aprilaire 8870 Thermostat")
Aprilaire8870Thermostat.Name("Aprilaire8870Thermostat")
CarrierCZIIThermostat = THERMOSTATS()
CarrierCZIIThermostat.Image("carriercziithermostat.png")
CarrierCZIIThermostat.Text("Carrier CZII Thermostat")
CarrierCZIIThermostat.Name("CarrierCZIIThermostat")
CarrierInfinityThermostat = THERMOSTATS()
CarrierInfinityThermostat.Image("carrierinfinitythermostat.png")
CarrierInfinityThermostat.Text("Carrier Infinity Thermostat")
CarrierInfinityThermostat.Name("CarrierInfinityThermostat")
HAIOmniSeriesThermostat = THERMOSTATS()
HAIOmniSeriesThermostat.Image("haiomniseriesthermostat.png")
HAIOmniSeriesThermostat.Text("HAI Omni Series Thermostat")
HAIOmniSeriesThermostat.Name("HAIOmniSeriesThermostat")
HeatmiserThermostat = THERMOSTATS()
HeatmiserThermostat.Image("heatmiserthermostat.png")
HeatmiserThermostat.Text("Heatmiser Thermostat")
HeatmiserThermostat.Name("HeatmiserThermostat")
KNXThermostat = THERMOSTATS()
KNXThermostat.Image("knxthermostat.png")
KNXThermostat.Text("KNX Thermostat")
KNXThermostat.Name("KNXThermostat")
LifeSenseLT1002Thermostat = THERMOSTATS()
LifeSenseLT1002Thermostat.Image("lifesenselt1002thermostat.png")
LifeSenseLT1002Thermostat.Text("LifeSense LT1002 Thermostat")
LifeSenseLT1002Thermostat.Name("LifeSenseLT1002Thermostat")
LutronQSThermostat = THERMOSTATS()
LutronQSThermostat.Image("lutronqsthermostat.png")
LutronQSThermostat.Text("Lutron QS Thermostat")
LutronQSThermostat.Name("LutronQSThermostat")
RCSTR16Thermostat = THERMOSTATS()
RCSTR16Thermostat.Image("rcstr16thermostat.png")
RCSTR16Thermostat.Text("RCS TR16 Thermostat")
RCSTR16Thermostat.Name("RCSTR16Thermostat")
RCSTR40TR60Thermostat = THERMOSTATS()
RCSTR40TR60Thermostat.Image("rcstr40tr60thermostat.png")
RCSTR40TR60Thermostat.Text("RCS TR40,TR60 Thermostat")
RCSTR40TR60Thermostat.Name("RCSTR40TR60Thermostat")
TekmarThermostat = THERMOSTATS()
TekmarThermostat.Image("tekmarthermostat.png")
TekmarThermostat.Text("Tekmar Thermostat")
TekmarThermostat.Name("TekmarThermostat")
VantageThermostatInFusion = THERMOSTATS()
VantageThermostatInFusion.Image("vantagethermostatinfusion.png")
VantageThermostatInFusion.Text("Vantage Thermostat (InFusion)")
VantageThermostatInFusion.Name("VantageThermostatInFusion")
VantageThermostatQLink = THERMOSTATS()
VantageThermostatQLink.Image("vantagethermostatqlink.png")
VantageThermostatQLink.Text("Vantage Thermostat (QLink)")
VantageThermostatQLink.Name("VantageThermostatQLink")
VirtualHVACThermostat = THERMOSTATS()
VirtualHVACThermostat.Image("virtualhvacthermostat.png")
VirtualHVACThermostat.Text("Virtual HVAC Thermostat")
VirtualHVACThermostat.Name("VirtualHVACThermostat")
ZWaveThermostat2GIGZStat = THERMOSTATS()
ZWaveThermostat2GIGZStat.Image("zwavethermostat2gigzstat.png")
ZWaveThermostat2GIGZStat.Text("Z-Wave Thermostat 2GIG Z-Stat")
ZWaveThermostat2GIGZStat.Name("ZWaveThermostat2GIGZStat")
ZWaveThermostatHoneywellYTH8320ZW1007U = THERMOSTATS()
ZWaveThermostatHoneywellYTH8320ZW1007U.Image("zwavethermostathoneywellyth8320zw1007u.png")
ZWaveThermostatHoneywellYTH8320ZW1007U.Text("Z-Wave Thermostat Honeywell YTH8320ZW1007/U")
ZWaveThermostatHoneywellYTH8320ZW1007U.Name("ZWaveThermostatHoneywellYTH8320ZW1007U")
ZWaveThermostatRCSTZ45 = THERMOSTATS()
ZWaveThermostatRCSTZ45.Image("zwavethermostatrcstz45.png")
ZWaveThermostatRCSTZ45.Text("Z-Wave Thermostat RCS TZ45")
ZWaveThermostatRCSTZ45.Name("ZWaveThermostatRCSTZ45")
ZWaveThermostatTraneTZEMT400BB3 = THERMOSTATS()
ZWaveThermostatTraneTZEMT400BB3.Image("zwavethermostattranetzemt400bb3.png")
ZWaveThermostatTraneTZEMT400BB3.Text("Z-Wave Thermostat Trane TZEMT400BB3")
ZWaveThermostatTraneTZEMT400BB3.Name("ZWaveThermostatTraneTZEMT400BB3")
#############################################HEATING AND COOLING UNITS
GenericHVACUnit = HEATINGCOOLINGUNITS()
GenericHVACUnit.Image("generichvacunit.png")
GenericHVACUnit.Text("Generic HVAC Unit")
GenericHVACUnit.Name("GenericHVACUnit")
#####################################################LIGHTING INTERFACES
CentraLiteElegancesingleMCP = LIGHTINGINTERFACES()
CentraLiteElegancesingleMCP.Image("centraliteelegancesinglemcp.png")
CentraLiteElegancesingleMCP.Text("CentraLite Elegance (single MCP)")
CentraLiteElegancesingleMCP.Name("CentraLiteElegancesingleMCP")
CentraLiteJetStream = LIGHTINGINTERFACES()
CentraLiteJetStream.Image("centralitejetstream.png")
CentraLiteJetStream.Text("CentraLite JetStream")
CentraLiteJetStream.Name("CentraLiteJetStream")
ClipsalLighting = LIGHTINGINTERFACES()
ClipsalLighting.Image("clipsallighting.png")
ClipsalLighting.Text("Clipsal Lighting")
ClipsalLighting.Name("ClipsalLighting")
ElectronicSolutionsR2D7 = LIGHTINGINTERFACES()
ElectronicSolutionsR2D7.Image("electronicsolutionsr2d7.png")
ElectronicSolutionsR2D7.Text("Electronic Solutions R2D7")
ElectronicSolutionsR2D7.Name("ElectronicSolutionsR2D7")
HAILightingController = LIGHTINGINTERFACES()
HAILightingController.Image("hailightingcontroller.png")
HAILightingController.Text("HAI Lighting Controller")
HAILightingController.Name("HAILightingController")
iLightSourceController = LIGHTINGINTERFACES()
iLightSourceController.Image("ilightsourcecontroller.png")
iLightSourceController.Text("iLight Source Controller")
iLightSourceController.Name("iLightSourceController")
LevitonZWaveRS232Controller = LIGHTINGINTERFACES()
LevitonZWaveRS232Controller.Image("levitonzwavers232controller.png")
LevitonZWaveRS232Controller.Text("Leviton Z-Wave RS232 Controller")
LevitonZWaveRS232Controller.Name("LevitonZWaveRS232Controller")
LutronHomeWorksGraphikEye = LIGHTINGINTERFACES()
LutronHomeWorksGraphikEye.Image("lutronhomeworksgraphikeye.png")
LutronHomeWorksGraphikEye.Text("Lutron HomeWorks GraphikEye")
LutronHomeWorksGraphikEye.Name("LutronHomeWorksGraphikEye")
LutronHomeWorksIlluminationEthernet = LIGHTINGINTERFACES()
LutronHomeWorksIlluminationEthernet.Image("lutronhomeworksilluminationethernet.png")
LutronHomeWorksIlluminationEthernet.Text("Lutron HomeWorks Illumination (Ethernet)")
LutronHomeWorksIlluminationEthernet.Name("LutronHomeWorksIlluminationEthernet")
LutronHomeWorksIlluminationRS232 = LIGHTINGINTERFACES()
LutronHomeWorksIlluminationRS232.Image("lutronhomeworksilluminationrs232.png")
LutronHomeWorksIlluminationRS232.Text("Lutron HomeWorks Illumination (RS-232)")
LutronHomeWorksIlluminationRS232.Name("LutronHomeWorksIlluminationRS232")
LutronHomeWorksInteractive = LIGHTINGINTERFACES()
LutronHomeWorksInteractive.Image("lutronhomeworksinteractive.png")
LutronHomeWorksInteractive.Text("Lutron HomeWorks Interactive")
LutronHomeWorksInteractive.Name("LutronHomeWorksInteractive")
LutronHomeWorksQS = LIGHTINGINTERFACES()
LutronHomeWorksQS.Image("lutronhomeworksqs.png")
LutronHomeWorksQS.Text("Lutron HomeWorks QS")
LutronHomeWorksQS.Name("LutronHomeWorksQS")
LutronRadioRA = LIGHTINGINTERFACES()
LutronRadioRA.Image("lutronradiora.png")
LutronRadioRA.Text("Lutron RadioRA")
LutronRadioRA.Name("LutronRadioRA")
LutronRadioRA2QS = LIGHTINGINTERFACES()
LutronRadioRA2QS.Image("lutronradiora2qs.png")
LutronRadioRA2QS.Text("Lutron RadioRA2 QS")
LutronRadioRA2QS.Name("LutronRadioRA2QS")
LuxomLightingController = LIGHTINGINTERFACES()
LuxomLightingController.Image("luxomlightingcontroller.png")
LuxomLightingController.Text("Luxom Lighting Controller")
LuxomLightingController.Name("LuxomLightingController")
PCSPIMSerial = LIGHTINGINTERFACES()
PCSPIMSerial.Image("pcspimserial.png")
PCSPIMSerial.Text("PCS PIM Serial")
PCSPIMSerial.Name("PCSPIMSerial")
PCSPIMIP = LIGHTINGINTERFACES()
PCSPIMIP.Image("pcspimip.png")
PCSPIMIP.Text("PCS PIM-IP")
PCSPIMIP.Name("PCSPIMIP")
RakoBridgeRARTCWAWTCBridge = LIGHTINGINTERFACES()
RakoBridgeRARTCWAWTCBridge.Image("rakobridgerartcwawtcbridge.png")
RakoBridgeRARTCWAWTCBridge.Text("Rako Bridge (RA/RTC/WA/WTC-Bridge)")
RakoBridgeRARTCWAWTCBridge.Name("RakoBridgeRARTCWAWTCBridge")
SomfyRS232UniversalRTSInterfaceII = LIGHTINGINTERFACES()
SomfyRS232UniversalRTSInterfaceII.Image("somfyrs232universalrtsinterfaceii.png")
SomfyRS232UniversalRTSInterfaceII.Text("Somfy RS232 Universal RTS Interface II")
SomfyRS232UniversalRTSInterfaceII.Name("SomfyRS232UniversalRTSInterfaceII")
SomfyRS485UniversalRTSInterfaceII = LIGHTINGINTERFACES()
SomfyRS485UniversalRTSInterfaceII.Image("somfyrs485universalrtsinterfaceii.png")
SomfyRS485UniversalRTSInterfaceII.Text("Somfy RS485 Universal RTS Interface II")
SomfyRS485UniversalRTSInterfaceII.Name("SomfyRS485UniversalRTSInterfaceII")
TapkoSIMKNX = LIGHTINGINTERFACES()
TapkoSIMKNX.Image("tapkosimknx.png")
TapkoSIMKNX.Text("Tapko SIM-KNX")
TapkoSIMKNX.Name("TapkoSIMKNX")
VantageInFusionController = LIGHTINGINTERFACES()
VantageInFusionController.Image("vantageinfusioncontroller.png")
VantageInFusionController.Text("Vantage InFusion Controller")
VantageInFusionController.Name("VantageInFusionController")
VantageQSeriesController = LIGHTINGINTERFACES()
VantageQSeriesController.Image("vantageqseriescontroller.png")
VantageQSeriesController.Text("Vantage Q-Series Controller")
VantageQSeriesController.Name("VantageQSeriesController")
VirtualLightingDeviceController = LIGHTINGINTERFACES()
VirtualLightingDeviceController.Image("virtuallightingdevicecontroller.png")
VirtualLightingDeviceController.Text("Virtual Lighting Device Controller")
VirtualLightingDeviceController.Name("VirtualLightingDeviceController")
#######################################################VIDEO CAMERA SOURCES
AvioSys9060I = VIDEOCAMERASSOURCES()
AvioSys9060I.Image("aviosys9060i.png")
AvioSys9060I.Text("AvioSys 9060I")
AvioSys9060I.Name("AvioSys9060I")
AvioSys9060SL = VIDEOCAMERASSOURCES()
AvioSys9060SL.Image("aviosys9060sl.png")
AvioSys9060SL.Text("AvioSys 9060SL")
AvioSys9060SL.Name("AvioSys9060SL")
AvioSysVideoPort = VIDEOCAMERASSOURCES()
AvioSysVideoPort.Image("aviosysvideoport.png")
AvioSysVideoPort.Text("AvioSys Video Port")
AvioSysVideoPort.Name("AvioSysVideoPort")
AXIS214PTZ = VIDEOCAMERASSOURCES()
AXIS214PTZ.Image("axis214ptz.png")
AXIS214PTZ.Text("AXIS 214PTZ")
AXIS214PTZ.Name("AXIS214PTZ")
AXIS216FD = VIDEOCAMERASSOURCES()
AXIS216FD.Image("axis216fd.png")
AXIS216FD.Text("AXIS 216FD")
AXIS216FD.Name("AXIS216FD")
AXIS240Q241QPort = VIDEOCAMERASSOURCES()
AXIS240Q241QPort.Image("axis240q241qport.png")
AXIS240Q241QPort.Text("AXIS 240Q/241Q Port")
AXIS240Q241QPort.Name("AXIS240Q241QPort")
AXISM7001 = VIDEOCAMERASSOURCES()
AXISM7001.Image("axism7001.png")
AXISM7001.Text("AXIS M7001")
AXISM7001.Name("AXISM7001")
AXISM7001VE = VIDEOCAMERASSOURCES()
AXISM7001VE.Image("axism7001ve.png")
AXISM7001VE.Text("AXIS M7001VE")
AXISM7001VE.Name("AXISM7001VE")
AXISM7014Port = VIDEOCAMERASSOURCES()
AXISM7014Port.Image("axism7014port.png")
AXISM7014Port.Text("AXIS M7014 Port")
AXISM7014Port.Name("AXISM7014Port")
AXISM7014VE = VIDEOCAMERASSOURCES()
AXISM7014VE.Image("axism7014ve.png")
AXISM7014VE.Text("AXIS M7014VE")
AXISM7014VE.Name("AXISM7014VE")
AXISP3301 = VIDEOCAMERASSOURCES()
AXISP3301.Image("axisp3301.png")
AXISP3301.Text("AXIS P3301")
AXISP3301.Name("AXISP3301")
AXISP5512PTZ = VIDEOCAMERASSOURCES()
AXISP5512PTZ.Image("axisp5512ptz.png")
AXISP5512PTZ.Text("AXIS P5512PTZ")
AXISP5512PTZ.Name("AXISP5512PTZ")
PanasonicProSeries = VIDEOCAMERASSOURCES()
PanasonicProSeries.Image("panasonicproseries.png")
PanasonicProSeries.Text("Panasonic Pro Series")
PanasonicProSeries.Name("PanasonicProSeries")
PanasonicStandardSeries = VIDEOCAMERASSOURCES()
PanasonicStandardSeries.Image("panasonicstandardseries.png")
PanasonicStandardSeries.Text("Panasonic Standard Series")
PanasonicStandardSeries.Name("PanasonicStandardSeries")
VivotekNetworkCamera7xxx8xxx = VIDEOCAMERASSOURCES()
VivotekNetworkCamera7xxx8xxx.Image("vivoteknetworkcamera7xxx8xxx.png")
VivotekNetworkCamera7xxx8xxx.Text("Vivotek Network Camera 7xxx/8xxx")
VivotekNetworkCamera7xxx8xxx.Name("VivotekNetworkCamera7xxx8xxx")
VivotekVideoPortonVS2403 = VIDEOCAMERASSOURCES()
VivotekVideoPortonVS2403.Image("vivotekvideoportonvs2403.png")
VivotekVideoPortonVS2403.Text("Vivotek Video Port on VS2403")
VivotekVideoPortonVS2403.Name("VivotekVideoPortonVS2403")
VivotekVideoPortonVS8x01 = VIDEOCAMERASSOURCES()
VivotekVideoPortonVS8x01.Image("vivotekvideoportonvs8x01.png")
VivotekVideoPortonVS8x01.Text("Vivotek Video Port on VS8x01")
VivotekVideoPortonVS8x01.Name("VivotekVideoPortonVS8x01")
WirepathCamera = VIDEOCAMERASSOURCES()
WirepathCamera.Image("wirepathcamera.png")
WirepathCamera.Text("Wirepath Camera")
WirepathCamera.Name("WirepathCamera")
########################################################IRRIGATIONCONTROLLERS
HunterSRCProCICCXCORE = IRRIGATIONCONTROLLERS()
HunterSRCProCICCXCORE.Image("huntersrcprociccxcore.png")
HunterSRCProCICCXCORE.Text("Hunter SRC, Pro-C, ICC, XCORE")
HunterSRCProCICCXCORE.Name("HunterSRCProCICCXCORE")
VirtualIrrigationController = IRRIGATIONCONTROLLERS()
VirtualIrrigationController.Image("virtualirrigationcontroller.png")
VirtualIrrigationController.Text("Virtual Irrigation Controller")
VirtualIrrigationController.Name("VirtualIrrigationController")
#####################################################################Pool
JandyAquaLinkRS = POOLCONTROLLERS()
JandyAquaLinkRS.Image("jandyaqualinkrs.png")
JandyAquaLinkRS.Text("Jandy AquaLink RS")
JandyAquaLinkRS.Name("JandyAquaLinkRS")
PentairIntelliTouchEasyTouch = POOLCONTROLLERS()
PentairIntelliTouchEasyTouch.Image("pentairintellitoucheasytouch.png")
PentairIntelliTouchEasyTouch.Text("Pentair IntelliTouch, EasyTouch")
PentairIntelliTouchEasyTouch.Name("PentairIntelliTouchEasyTouch")
VirtualPoolController = POOLCONTROLLERS()
VirtualPoolController.Image("virtualpoolcontroller.png")
VirtualPoolController.Text("Virtual Pool Controller")
VirtualPoolController.Name("VirtualPoolController")
##################################################################UPS
FurmanF1000 = UPSPOWERSUPPLIES()
FurmanF1000.Image("furmanf1000.png")
FurmanF1000.Text("Furman F1000")
FurmanF1000.Name("FurmanF1000")
FurmanF1500 = UPSPOWERSUPPLIES()
FurmanF1500.Image("furmanf1500.png")
FurmanF1500.Text("Furman F1500")
FurmanF1500.Name("FurmanF1500")
PanamaxM4320 = UPSPOWERSUPPLIES()
PanamaxM4320.Image("panamaxm4320.png")
PanamaxM4320.Text("Panamax M4320")
PanamaxM4320.Name("PanamaxM4320")
PanamaxMB1000 = UPSPOWERSUPPLIES()
PanamaxMB1000.Image("panamaxmb1000.png")
PanamaxMB1000.Text("Panamax MB1000")
PanamaxMB1000.Name("PanamaxMB1000")
PanamaxMB1500 = UPSPOWERSUPPLIES()
PanamaxMB1500.Image("panamaxmb1500.png")
PanamaxMB1500.Text("Panamax MB1500")
PanamaxMB1500.Name("PanamaxMB1500")
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/elan/__OLD_SCRIPTS/Devices.py | Python | gpl-3.0 | 67,734 | [
"Elk"
] | 8b22c8b8be3697d73602877e955a1749d736229c3964aae35716c663b6ccb12e |
""" LineGraph represents line graphs both simple and stacked. It includes
also cumulative graph functionality.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, PrettyDateLocator, \
PrettyDateFormatter, PrettyScalarFormatter
from matplotlib.patches import Polygon
from matplotlib.dates import date2num
import datetime
class LineGraph(PlotBase):
"""
The LineGraph class is a straightforward line graph; given a dictionary
of values, it takes the keys as the independent variable and the values
as the dependent variable.
"""
def __init__(self, data, ax, prefs, *args, **kw):
PlotBase.__init__(self, data, ax, prefs, *args, **kw)
def draw(self):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
tmp_x = []
tmp_y = []
labels = self.gdata.getLabels()
nKeys = self.gdata.getNumberOfKeys()
tmp_b = []
for n in range(nKeys):
if 'log_yaxis' in self.prefs:
tmp_b.append(0.001)
else:
tmp_b.append(0.)
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num(datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num(datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
self.polygons = []
seq_b = [(self.gdata.max_num_key, 0.0), (self.gdata.min_num_key, 0.0)]
zorder = 0.0
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot', 0.)]
color = self.prefs.get('plot_color', 'Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot', color)
else:
labels = [(color, 0.)]
for label, num in labels:
color = self.palette.getColor(label)
ind = 0
tmp_x = []
tmp_y = []
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
value = 0.
tmp_x.append(key)
tmp_y.append(float(value) + tmp_b[ind])
ind += 1
seq_t = list(zip(tmp_x, tmp_y))
seq = seq_t + seq_b
poly = Polygon(seq, facecolor=color, fill=True, linewidth=.2, zorder=zorder)
self.ax.add_patch(poly)
self.polygons.append(poly)
tmp_b = list(tmp_y)
zorder -= 0.1
ymax = max(tmp_b)
ymax *= 1.1
ymin = min(min(tmp_b), 0.)
ymin *= 1.1
if 'log_yaxis' in self.prefs:
ymin = 0.001
xmax = max(tmp_x)
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get('ymin', ymin)
ymax = self.prefs.get('ymax', ymax)
xmin = self.prefs.get('xmin', xmin)
xmax = self.prefs.get('xmax', xmax)
self.ax.set_xlim(xmin=xmin, xmax=xmax)
self.ax.set_ylim(ymin=ymin, ymax=ymax)
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim(xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim(xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb(self, ax):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks([i + .5 for i in ticks])
ax.set_xticklabels([reverse_smap[i] for i in ticks])
ax.grid(False)
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim(xmin=xmin, xmax=len(ticks))
elif self.gdata.key_type == "time":
dl = PrettyDateLocator()
df = PrettyDateFormatter(dl)
ax.xaxis.set_major_locator(dl)
ax.xaxis.set_major_formatter(df)
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter()
ax.yaxis.set_major_formatter(sf)
else:
return None
| yujikato/DIRAC | src/DIRAC/Core/Utilities/Graphs/LineGraph.py | Python | gpl-3.0 | 4,298 | [
"DIRAC"
] | 5b31a2f8d250341db9828c543ac181131986650fb853fe768b9022da1c219dba |
# -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; tab-width: 4; -*-
# vim: set tabstop=8 shiftwidth=4 softtabstop=4 expandtab:
"""Models used by ncharts django web app.
2014 Copyright University Corporation for Atmospheric Research
This file is part of the "django-ncharts" package.
The license and distribution terms for this file may be found in the
file LICENSE in this package.
"""
import os, pytz, logging
from collections import OrderedDict
from django.db import models
from ncharts import netcdf, fileset, raf_database
from django.core import exceptions as dj_exc
from django.utils.translation import ugettext_lazy
import datetime
from timezone_field import TimeZoneField
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class TimeZone(models.Model):
"""A timezone.
Uses TimeZoneField from django-timezone-field app.
"""
# If you add "default=pytz.utc" to TimeZoneField, then
# makemigrations fails, reporting it can't serialize "<UTC>".
# Haven't found a solution, so leave it off. Probably not an issue.
# pylint thinks this class member name is too short
# pylint: disable=invalid-name
tz = TimeZoneField(primary_key=True)
class Project(models.Model):
"""A field project, with a unique name.
To get all projects:
Project.objects.all()
To find all platforms of a project:
Platform.objects.filter(projects__name__exact='METCRAXII')
So we don't need this:
platforms = models.ManyToManyField('ncharts.Platform')
To find all datasets of a project:
Dataset.objects.filter(project__name__exact='METCRAXII')
So, you don't need this:
datasets = models.ManyToManyField('ncharts.Dataset',
related_name='datasets')
"""
name = models.CharField(max_length=64, primary_key=True)
location = models.CharField(max_length=256, blank=True)
long_name = models.CharField(
blank=True,
max_length=256,
help_text=ugettext_lazy('More detailed description of the project'))
timezones = models.ManyToManyField(
TimeZone,
blank=True,
related_name='+',
help_text=ugettext_lazy('Supported timezones for plotting data of this project'))
start_year = models.IntegerField()
end_year = models.IntegerField(null=True)
@classmethod
def make_tabs(cls, projects):
"""A class method for creating dictionary of projects based on their start years
and end years. The dictionary keys will be the years and the values will be
the projects that happen within the corresponding years. The years and projects are sorted
numerically and alphabetically.
Args: The Project class itself and the list of projects from netcdf.
Ret: The sorted dictionary of years and projects.
"""
res = {}
now = datetime.datetime.now()
for project in projects:
if project.end_year == None:
project.end_year = now.year
for year in list(range(project.start_year, project.end_year + 1)):
if year not in res:
res[year] = []
res[year].append(project)
for year, projects in res.items():
projects.sort(key=lambda x: x.name)
res = OrderedDict(sorted(res.items(), key=lambda x: x[0]))
return res
def __str__(self):
return self.name
class Platform(models.Model):
"""An observing platform with a unique name, deployed on one or more
projects.
To get all platforms:
Platform.objects.all()
"""
name = models.CharField(max_length=64, primary_key=True)
long_name = models.CharField(
blank=True,
max_length=256,
help_text=ugettext_lazy('More detailed description of the platform'))
# This adds a platform_set attribute to Project.
projects = models.ManyToManyField(Project)
def __str__(self):
# return 'Platform: %s' % self.name
return self.name
class Variable(models.Model):
"""A variable in a dataset, used if the dataset does not have
sufficient meta-data for its variables.
"""
name = models.CharField(max_length=64)
units = models.CharField(max_length=64, blank=True)
long_name = models.CharField(max_length=256, blank=True)
class Dataset(models.Model):
"""A dataset, whose name should be unique within a project.
Tried making this an abstract base class in django.
From the django doc on abstract base classes of models:
This model will then not be used to create any database table.
Instead, when it is used as a base class for other models,
its fields will be added to those of the child class. It is
an error to have fields in the abstract base class with the
same name as those in the child (and Django will raise an exception).
However, a Dataset is a ForeignKey of a ClientState, and
it appears an abstract model cannot be a ForeignKey. So we
use the Multi-table inheritance in django.
Then, to determine if a Dataset is a FileDataset, do
try:
x = dataset.filedataset
except FileDataset.DoesNotExist as exc:
pass
To find all datasets of a project:
Dataset.objects.filter(project__name__exact='METCRAXII')
To find all datasets of a platform:
Dataset.objects.filter(platforms__name__exact="ISFS")
To find all datasets of a project and platform:
Dataset.objects.filter(
platforms__name__exact=platform_name).filter(
project__name__exact=project_name)
Don't add __init__ method, instead add @classmethod create() or a
custom Manager.
See https://docs.djangoproject.com/en/dev/ref/models/instances/
For other instance variables, just set them in instance methods.
"""
# class Meta:
# abstract = False
name = models.CharField(
max_length=128,
help_text=ugettext_lazy('The name of a dataset should be unique within a project'))
long_name = models.CharField(
blank=True,
max_length=256,
help_text=ugettext_lazy('More detailed description of a dataset'))
url = models.URLField(
blank=True,
max_length=200,
help_text=ugettext_lazy('The URL that specifies the complete project dataset'))
status = models.CharField(
blank=True,
max_length=256,
help_text=ugettext_lazy('Current status of the project dataset'))
# This adds a dataset_set attribute to Project
project = models.ForeignKey(
Project,
help_text=ugettext_lazy('A dataset is associated with one project'))
# This adds a dataset_set attribute to Platform
platforms = models.ManyToManyField(
Platform,
help_text=ugettext_lazy('A dataset is associated with one or more platforms'))
timezones = models.ManyToManyField(
TimeZone,
help_text=ugettext_lazy('Overrides the timezones of the project'))
start_time = models.DateTimeField()
end_time = models.DateTimeField()
location = models.CharField(
max_length=256, blank=True,
help_text=ugettext_lazy("Location for dataset if different than for project"))
dset_type = models.CharField(
blank=True,
max_length=16,
help_text=ugettext_lazy('Type of dataset: time-series, sounding'))
# '+' tells django not to create a backwards relation from
# Variable to Dataset
variables = models.ManyToManyField(
Variable, related_name='+')
# netcdf_time_series, raf_postgres
# dstype = models.CharField(max_length=64, blank=True)
def __str__(self):
# return 'Dataset: %s' % (self.name,)
return self.name
def add_platform(self, platform):
"""When one does a dataset.platforms.add(isfs), also do
project.platforms.add(isfs).
"""
self.platforms.add(platform)
platform.projects.add(self.project)
def get_start_time(self):
'''
A datetime object d is aware if d.tzinfo is not None and
d.tzinfo.utcoffset(d) does not return None. If d.tzinfo is
None, or if d.tzinfo is not None but d.tzinfo.utcoffset(d)
returns None, d is naive.
'''
# _logger.debug("Dataset get_start_time, start_time=%s",
# self.start_time.isoformat())
if self.start_time.tzinfo == None or \
self.start_time.tzinfo.utcoffset(self.start_time) == None:
self.start_time = pytz.utc.localize(self.start_time)
_logger.debug(
"Dataset localized start_time: %s",
self.start_time.isoformat())
return self.start_time
def get_end_time(self):
"""
A datetime object d is aware if d.tzinfo is not None and
d.tzinfo.utcoffset(d) does not return None. If d.tzinfo is None,
or if d.tzinfo is not None but d.tzinfo.utcoffset(d) returns None,
d is naive.
"""
# _logger.debug("Dataset get_end_time, end_time=%s",
# self.end_time.isoformat())
if self.end_time.tzinfo == None or \
self.end_time.tzinfo.utcoffset(self.end_time) == None:
self.end_time = pytz.utc.localize(self.end_time)
_logger.debug(
"Dataset localized end_time: %s",
self.end_time.isoformat())
return self.end_time
def alphabetic_tabs(self, variables):
"""Create a dictionary of tabs for the elements in variables.
This is so that a large number of checkbox widgets for the
selection of data variables to be plotted can be split into
tabbed panes.
The tab names can be created from the first character of the
variable names, or in a platform-dependent way, by a
category determined from the variable name.
Args:
variables: a django.forms.forms.BoundField, such as
from form['variables'], where form is an instance
of ncharts.forms.DataSelectionForm, which has a
class member named variables of type
forms.MultipleChoiceField. The variables have been
alphabetically sorted prior to this call.
Each element returned by iterating over variables is
a django.forms.widgets.CheckboxChoiceInput.
An instance of CheckboxChoiceInput has a choice_label
attribute containing the label part of the choice tuple,
(the variable name) and a tab attribute, which when
rendered in a template, creates the checkbox html.
References to these widgets are copied into lists
under each tab.
"""
nvars = len(variables)
tabs = OrderedDict()
for var in iter(variables):
vname = var.choice_label
char1 = vname[0].upper()
if not char1 in tabs:
tabs[char1] = {"variables":[]}
tabs[char1]["variables"].append(var)
# Combine neighboring tabs if they each contain
# fewer than tab_limit elements
tab_limit = 10
comb_tabs = OrderedDict()
for tab, vals in tabs.items():
# Sort by first letter
# vals.sort(key=lambda x: x.choice_label.lower())
# pylint thinks ctab could be used before assignment
# pylint: disable=used-before-assignment
if len(comb_tabs) == 0 or \
len(comb_tabs[ctab]["variables"]) > tab_limit or \
len(vals["variables"]) > tab_limit:
ctab = tab
comb_tabs[ctab] = vals
else:
nctab = ctab[0] + "-" + tab
if not nctab in comb_tabs:
comb_tabs[nctab] = {"variables":[]}
comb_tabs[nctab]["variables"] = comb_tabs[ctab]["variables"] + vals["variables"]
del comb_tabs[ctab]
ctab = nctab
# Double check that we didn't lose any variables
nres = 0
for tab, vals in comb_tabs.items():
nres += len(vals["variables"])
if nres != nvars:
_logger.error("%d variables unaccounted for in building tabs", (nvars - nres))
return comb_tabs
def isfs_tabs(self, variables):
"""Create a tabs dictionary for ISFS variables
Args:
variables: a django.forms.forms.BoundField, such as
from form['variables'], where form is an instance
of ncharts.forms.DataSelectionForm, which has a
class member named variables of type
forms.MultipleChoiceField. The variables have been
alphabetically sorted prior to this call.
"""
tabs = {}
tabs["Met"] = {"tooltip":"Meteorological Variables", "variables":[]}
tabs["Power"] = {"tooltip":"Battery and Solar Power", "variables":[]}
tabs["Rad"] = {"tooltip":"Radiation Variables", "variables":[]}
tabs["Soil"] = {"tooltip":"Soil Variables", "variables":[]}
tabs["3dWind"] = {"tooltip":"3D Wind Variables", "variables":[]}
tabs["Scalars"] = {"tooltip":"Fast Scalars Variables", "variables":[]}
tabs["Others"] = {"tooltip":"Other Variables", "variables":[]}
tabs["2ndMoments"] = {"tooltip":"2nd Moments Variables", "variables":[]}
tabs["3rdMoments"] = {"tooltip":"3rd Moments Variables", "variables":[]}
tabs["4thMoments"] = {"tooltip":"4th Moments Variables", "variables":[]}
met_list = ["T", "RH", "P", "Spd", "Spd_max", "Dir", "U", "V", "Ifan"]
pow_list = ["Vbatt", "Tbatt", "Iload", "Icharge", "Vmote"]
rad_list = ["Rnet", "Rsw", "Rlw", "Rpile", "Rpar", "Tcase", "Tdome", "Wetness"]
soil_list = ["Tsoil", "dTsoil_dt", "Qsoil", "Gsoil", "Vheat", "Vpile", \
"Tau63", "Lambdasoil", "asoil", "Cvsoil", "Gsfc"]
wind_list = ["u", "v", "w", "ldiag", "diagbits", "spd", "spd_max", "dir"]
scalars_list = ["tc", "t", "h2o", "co2", "kh2o", "o3", "q", "mr", "irgadiag", "p"]
for var in iter(variables):
start_field = var.choice_label.split(".", 1)[0]
quote_num = start_field.count("'")
if quote_num == 0:
if start_field in met_list:
tabs["Met"]["variables"].append(var)
elif start_field in pow_list:
tabs["Power"]["variables"].append(var)
elif start_field in rad_list:
tabs["Rad"]["variables"].append(var)
elif start_field in soil_list:
tabs["Soil"]["variables"].append(var)
elif start_field in wind_list:
tabs["3dWind"]["variables"].append(var)
elif start_field in scalars_list:
tabs["Scalars"]["variables"].append(var)
else:
tabs["Others"]["variables"].append(var)
elif quote_num == 2:
tabs["2ndMoments"]["variables"].append(var)
elif quote_num == 3:
tabs["3rdMoments"]["variables"].append(var)
elif quote_num == 4:
tabs["4thMoments"]["variables"].append(var)
else:
tabs["Others"]["variables"].append(var)
tabs = {key: value for key, value in tabs.items() if value["variables"]}
tabs = OrderedDict(sorted(tabs.items(), key=lambda x: x[0]))
return tabs
def make_tabs(self, variables):
"""Select the correct tabbing method for the corresponding platform.
If the dataset if of ISFS platform, the isfs_tabs method is used.
Else, the alphabetic_tabs method is used.
"""
is_isfs = False
for plat in self.platforms.all():
if plat.name == "ISFS":
is_isfs = True
if is_isfs:
return self.isfs_tabs(variables)
else:
return self.alphabetic_tabs(variables)
class FileDataset(Dataset):
"""A Dataset consisting of a set of similarly named files.
"""
directory = models.CharField(
max_length=256,
help_text=ugettext_lazy('Path to the directory containing the files for this dataset'))
# format of file names, often containing timedate descriptors: %Y etc
filenames = models.CharField(
max_length=256,
help_text=ugettext_lazy('Format of file names, often containing ' \
'timedate descriptors such as %Y'))
def get_fileset(self):
"""Return a fileset.Fileset corresponding to this
FileDataset.
"""
return fileset.Fileset(
os.path.join(self.directory, self.filenames))
def get_netcdf_dataset(self):
"""Return the netcdf.NetCDFDataset corresponding to this
FileDataset.
"""
return netcdf.NetCDFDataset(
os.path.join(self.directory, self.filenames),
self.get_start_time(), self.get_end_time())
def get_variables(self):
"""Return the time series variable names of this dataset.
Raises:
exception.NoDataFoundException
"""
if len(self.variables.values()) > 0:
res = {}
for var in self.variables.all():
res[var.name] = \
{"units": var.units, "long_name": var.long_name}
return res
ncdset = self.get_netcdf_dataset()
return ncdset.get_variables()
def get_series_tuples(
self,
series_name_fmt="",
start_time=pytz.utc.localize(datetime.datetime.min),
end_time=pytz.utc.localize(datetime.datetime.max)):
"""Get the names of the series between the start and end times.
"""
if not self.dset_type == "sounding":
return []
files = self.get_fileset().scan(start_time, end_time)
# series names, formatted from the time of the file.
# The scan function returns the file previous to start_time.
# Remove that.
return [(f.time.strftime(series_name_fmt), f.time.timestamp()) for f in files \
if f.time >= start_time]
def get_series_names(
self,
series_name_fmt="",
start_time=pytz.utc.localize(datetime.datetime.min),
end_time=pytz.utc.localize(datetime.datetime.max)):
"""Get the names of the series between the start and end times.
"""
if not self.dset_type == "sounding":
return []
files = self.get_fileset().scan(start_time, end_time)
# series names, formatted from the time of the file.
# The scan function returns the file previous to start_time.
# Remove that.
return [f.time.strftime(series_name_fmt) for f in files \
if f.time >= start_time]
class DBDataset(Dataset):
"""A Dataset whose contents are in a database.
"""
dbname = models.CharField(
max_length=128,
help_text=ugettext_lazy('Database name'))
host = models.CharField(
max_length=128,
help_text=ugettext_lazy('Database host'))
user = models.CharField(
max_length=128,
help_text=ugettext_lazy('Database user'))
password = models.CharField(
max_length=128,
help_text=ugettext_lazy('Database password'))
port = models.IntegerField(
default=5432,
help_text=ugettext_lazy('Database port number, defaults to 5432'))
table = models.CharField(
max_length=128,
help_text=ugettext_lazy('Database table name'))
def get_connection(self):
"""Return a database connection for this DBDataset.
Raises:
exception.NoDataFoundException
"""
return raf_database.RAFDatabase(
database=self.dbname,
host=self.host,
port=self.port,
user=self.user,
password=self.password)
def get_variables(self):
"""Return the time series variables in this DBDataset.
Raises:
exception.NoDataFoundException
"""
return self.get_connection().get_variables()
def get_start_time(self):
"""
Raises:
exception.NoDataFoundException
"""
return self.get_connection().get_start_time()
def validate_positive(value):
"""Validator."""
if value <= 0:
raise dj_exc.ValidationError('%s is not greater than zero' % value)
class VariableTimes(models.Model):
"""Times of data sent to a client.
"""
# blank=False means it is required
name = models.CharField(max_length=64, blank=False)
last_ok = models.IntegerField(blank=False)
last = models.IntegerField(blank=False)
class ClientState(models.Model):
"""Current state of an nchart client.
The automatic primary key 'id' of an instance of this model
is stored in the user's session by project and dataset name,
and so when a user returns to view this dataset, their
previous state is provided.
"""
variables = models.TextField(blank=True) # list of variables, stringified by json
# Variable on sounding Y axis
yvariable = models.TextField(blank=True)
# The selected Dataset. Dataset is a base class for several
# types of Datasets. Since it is used here as a ForeignKey,
# it cannot be abstract.
# related_name='+' tells django not to create a backwards relation
# from Dataset to ClientState, which we don't need.
dataset = models.ForeignKey(Dataset, related_name='+')
timezone = TimeZoneField(blank=False)
start_time = models.DateTimeField()
time_length = models.FloatField(
blank=False, validators=[validate_positive],
default=datetime.timedelta(days=1).total_seconds())
track_real_time = models.BooleanField(default=False)
data_times = models.ManyToManyField(
VariableTimes,
blank=True,
related_name='+')
# list of sounding series, stringified by json
soundings = models.TextField(blank=True)
def __str__(self):
return 'ClientState for dataset: %s' % (self.dataset.name)
def clean(self):
if self.start_time < self.dataset.get_start_time():
raise dj_exc.ValidationError(
"start_time is earlier than dataset.start_time")
# if self.end_time > self.dataset.end_time:
# raise dj_exc.ValidationError(
# "end_time is earlier than dataset.end_time")
# if self.start_time >= self.end_time:
# raise dj_exc.ValidationError(
# "start_time is not earlier than end_time")
if self.time_length <= 0:
raise dj_exc.ValidationError("time_length is not positive")
def save_data_times(self, vname, time_last_ok, time_last):
"""Save the times associated with the last chunk of data sent to this client.
"""
try:
vart = self.data_times.get(name=vname)
vart.last_ok = time_last_ok
vart.last = time_last
vart.save()
except VariableTimes.DoesNotExist:
vart = VariableTimes.objects.create(
name=vname, last_ok=time_last_ok, last=time_last)
self.data_times.add(vart)
def get_data_times(self, vname):
"""Fetch the times associated with the last chunk of data sent to this client.
"""
try:
vart = self.data_times.get(name=vname)
return [vart.last_ok, vart.last]
except VariableTimes.DoesNotExist:
return [None, None]
| nguyenduchien1994/django-ncharts | ncharts/models.py | Python | bsd-2-clause | 23,771 | [
"NetCDF"
] | f2c32db1ad51fbae0c0266e02ee15c04f7ecbce7a139c5787fa3e963fe2cb441 |
#
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import espressomd
import unittest as ut
import numpy as np
from espressomd.electrostatics import EwaldGpu
class ewald_GPU_test(ut.TestCase):
es=espressomd.System()
test_params={}
test_params["bjerrum_length"]=2
test_params["num_kx"]=2
test_params["num_ky"]=2
test_params["num_kz"]=2
test_params["K_max"]=10
test_params["time_calc_steps"]=100
test_params["rcut"]=0.9
test_params["accuracy"]=1e-1
test_params["precision"]=1e-2
test_params["alpha"]=3.5
def runTest(self):
ewald=EwaldGpu(bjerrum_length=self.test_params["bjerrum_length"], num_kx=self.test_params["num_kx"], num_ky=self.test_params["num_ky"], num_kz=self.test_params["num_kz"], rcut=self.test_params["rcut"], accuracy=self.test_params["accuracy"], precision=self.test_params["precision"], alpha=self.test_params["alpha"], time_calc_steps=self.test_params["time_calc_steps"], K_max=self.test_params["K_max"])
self.es.actors.add(ewald)
set_params=ewald._getParamsFromEsCore()
SAME=True
for i in self.test_params.keys():
if set_params[i] != self.test_params[i]:
print "Parameter mismatch: ", i, set_params[i], self.test_params[i]
SAME=False
break
return SAME
if __name__ == "__main__":
print("Features: ",espressomd.features())
ut.main()
| jdegraaf/espresso | testsuite/python/ewald_gpu.py | Python | gpl-3.0 | 2,129 | [
"ESPResSo"
] | 4bee92ad729d9778411a549acd51fb76f2e5d2f38bdd451966db932cd9a11d77 |
# -*- coding: utf-8 -*-
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2014 and later, Alexander J G Pitchford
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# @author: Alexander Pitchford
# @email1: agp1@aber.ac.uk
# @email2: alex.pitchford@gmail.com
# @organization: Aberystwyth University
# @supervisor: Daniel Burgarth
# @date: Sep 2015
"""
Tests for main control.pulseoptim methods
Some associated objects also tested.
"""
from __future__ import division
import os
import uuid
import shutil
import numpy as np
from numpy.testing import (
assert_, assert_almost_equal, run_module_suite, assert_equal)
from scipy.optimize import check_grad
from qutip import Qobj, identity, sigmax, sigmay, sigmaz, tensor
from qutip.qip import hadamard_transform
from qutip.qip.algorithms import qft
import qutip.control.optimconfig as optimconfig
import qutip.control.dynamics as dynamics
import qutip.control.termcond as termcond
import qutip.control.optimizer as optimizer
import qutip.control.stats as stats
import qutip.control.pulsegen as pulsegen
import qutip.control.errors as errors
import qutip.control.loadparams as loadparams
import qutip.control.pulseoptim as cpo
import qutip.control.symplectic as sympl
class TestPulseOptim:
"""
A test class for the QuTiP functions for generating quantum gates
"""
def setUp(self):
# list of file paths to be removed after test
self.tmp_files = []
# list of folder paths to be removed after test
self.tmp_dirs = []
def tearDown(self):
for f in self.tmp_files:
try:
os.remove(f)
except:
pass
for d in self.tmp_dirs:
shutil.rmtree(d, ignore_errors=True)
def test_01_1_unitary_hadamard(self):
"""
control.pulseoptim: Hadamard gate with linear initial pulses
assert that goal is achieved and fidelity error is below threshold
"""
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 10
evo_time = 10
# Run the optimisation
result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
gen_stats=True)
assert_(result.goal_achieved, msg="Hadamard goal not achieved. "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
assert_almost_equal(result.fid_err, 0.0, decimal=10,
err_msg="Hadamard infidelity too high")
def test_01_2_unitary_hadamard_no_stats(self):
"""
control.pulseoptim: Hadamard gate with linear initial pulses (no stats)
assert that goal is achieved
"""
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 10
evo_time = 10
# Run the optimisation
#Try without stats
result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
gen_stats=False)
assert_(result.goal_achieved, msg="Hadamard goal not achieved "
"(no stats). "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
def test_01_3_unitary_hadamard_tau(self):
"""
control.pulseoptim: Hadamard gate with linear initial pulses (tau)
assert that goal is achieved
"""
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
# Run the optimisation
#Try setting timeslots with tau array
tau = np.arange(1.0, 10.0, 1.0)
result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
tau=tau,
fid_err_targ=1e-10,
init_pulse_type='LIN',
gen_stats=False)
assert_(result.goal_achieved, msg="Hadamard goal not achieved "
"(tau as timeslots). "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
def test_01_4_unitary_hadamard_qobj(self):
"""
control.pulseoptim: Hadamard gate with linear initial pulses (Qobj)
assert that goal is achieved
"""
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 10
evo_time = 10
# Run the optimisation
#Try with Qobj propagation
result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
dyn_params={'oper_dtype':Qobj},
gen_stats=True)
assert_(result.goal_achieved, msg="Hadamard goal not achieved "
"(Qobj propagation). "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
def test_01_5_unitary_hadamard_oo(self):
"""
control.pulseoptim: Hadamard gate with linear initial pulses (OO)
assert that goal is achieved and pulseoptim method achieves
same result as OO method
"""
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 10
evo_time = 10
# Run the optimisation
optim = cpo.create_pulse_optimizer(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
dyn_type='UNIT',
init_pulse_type='LIN',
gen_stats=True)
dyn = optim.dynamics
init_amps = optim.pulse_generator.gen_pulse().reshape([-1, 1])
dyn.initialize_controls(init_amps)
result_oo = optim.run_optimization()
# Run the pulseoptim func
result_po = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
gen_stats=True)
assert_almost_equal(result_oo.fid_err, result_po.fid_err, decimal=10,
err_msg="OO and pulseoptim methods produce "
"different results for Hadamard")
def test_01_6_unitary_hadamard_grad(self):
"""
control.pulseoptim: Hadamard gate gradient check
assert that gradient approx and exact gradient match in tolerance
"""
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 10
evo_time = 10
# Create the optim objects
optim = cpo.create_pulse_optimizer(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
dyn_type='UNIT',
init_pulse_type='LIN',
gen_stats=True)
dyn = optim.dynamics
init_amps = optim.pulse_generator.gen_pulse().reshape([-1, 1])
dyn.initialize_controls(init_amps)
# Check the exact gradient
func = optim.fid_err_func_wrapper
grad = optim.fid_err_grad_wrapper
x0 = dyn.ctrl_amps.flatten()
grad_diff = check_grad(func, grad, x0)
assert_almost_equal(grad_diff, 0.0, decimal=6,
err_msg="Unitary gradient outside tolerance")
def test_02_1_qft(self):
"""
control.pulseoptim: QFT gate with linear initial pulses
assert that goal is achieved and fidelity error is below threshold
"""
Sx = sigmax()
Sy = sigmay()
Sz = sigmaz()
Si = 0.5*identity(2)
H_d = 0.5*(tensor(Sx, Sx) + tensor(Sy, Sy) + tensor(Sz, Sz))
H_c = [tensor(Sx, Si), tensor(Sy, Si), tensor(Si, Sx), tensor(Si, Sy)]
U_0 = identity(4)
# Target for the gate evolution - Quantum Fourier Transform gate
U_targ = qft.qft(2)
n_ts = 10
evo_time = 10
result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-9,
init_pulse_type='LIN',
gen_stats=True)
assert_(result.goal_achieved, msg="QFT goal not achieved. "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
assert_almost_equal(result.fid_err, 0.0, decimal=7,
err_msg="QFT infidelity too high")
# check bounds
result2 = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-9,
amp_lbound=-1.0, amp_ubound=1.0,
init_pulse_type='LIN',
gen_stats=True)
assert_((result2.final_amps >= -1.0).all() and
(result2.final_amps <= 1.0).all(),
msg="Amplitude bounds exceeded for QFT")
def test_02_2_qft_bounds(self):
"""
control.pulseoptim: QFT gate with linear initial pulses (bounds)
assert that amplitudes remain in bounds
"""
Sx = sigmax()
Sy = sigmay()
Sz = sigmaz()
Si = 0.5*identity(2)
H_d = 0.5*(tensor(Sx, Sx) + tensor(Sy, Sy) + tensor(Sz, Sz))
H_c = [tensor(Sx, Si), tensor(Sy, Si), tensor(Si, Sx), tensor(Si, Sy)]
U_0 = identity(4)
# Target for the gate evolution - Quantum Fourier Transform gate
U_targ = qft.qft(2)
n_ts = 10
evo_time = 10
result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-9,
amp_lbound=-1.0, amp_ubound=1.0,
init_pulse_type='LIN',
gen_stats=True)
assert_((result.final_amps >= -1.0).all() and
(result.final_amps <= 1.0).all(),
msg="Amplitude bounds exceeded for QFT")
def test_03_dumping(self):
"""
control: data dumping
Dump out processing data, check file counts
"""
N_EXP_OPTIMDUMP_FILES = 10
N_EXP_DYNDUMP_FILES = 49
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 1000
evo_time = 4
dump_folder = str(uuid.uuid4())
qtrl_dump_dir = os.path.expanduser(os.path.join('~', dump_folder))
self.tmp_dirs.append(qtrl_dump_dir)
optim_dump_dir = os.path.join(qtrl_dump_dir, 'optim')
dyn_dump_dir = os.path.join(qtrl_dump_dir, 'dyn')
result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-9,
init_pulse_type='LIN',
optim_params={'dumping':'FULL', 'dump_to_file':True,
'dump_dir':optim_dump_dir},
dyn_params={'dumping':'FULL', 'dump_to_file':True,
'dump_dir':dyn_dump_dir},
gen_stats=True)
# check dumps were generated
optim = result.optimizer
dyn = optim.dynamics
assert_(optim.dump is not None, msg='optimizer dump not created')
assert_(dyn.dump is not None, msg='dynamics dump not created')
# Count files that were output
nfiles = len(os.listdir(optim.dump.dump_dir))
assert_(nfiles == N_EXP_OPTIMDUMP_FILES,
msg="{} optimizer dump files generated, {} expected".format(
nfiles, N_EXP_OPTIMDUMP_FILES))
nfiles = len(os.listdir(dyn.dump.dump_dir))
assert_(nfiles == N_EXP_DYNDUMP_FILES,
msg="{} dynamics dump files generated, {} expected".format(
nfiles, N_EXP_DYNDUMP_FILES))
# dump all to specific file stream
fpath = os.path.expanduser(os.path.join('~', str(uuid.uuid4())))
self.tmp_files.append(fpath)
with open(fpath, 'wb') as f:
optim.dump.writeout(f)
assert_(os.stat(fpath).st_size > 0,
msg="Nothing written to optimizer dump file")
fpath = os.path.expanduser(os.path.join('~', str(uuid.uuid4())))
self.tmp_files.append(fpath)
with open(fpath, 'wb') as f:
dyn.dump.writeout(f)
assert_(os.stat(fpath).st_size > 0,
msg="Nothing written to dynamics dump file")
def test_04_unitarity(self):
"""
control: unitarity checking (via dump)
Dump out processing data and use to check unitary evolution
"""
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 1000
evo_time = 4
result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-9,
init_pulse_type='LIN',
dyn_params={'dumping':'FULL'},
gen_stats=True)
# check dumps were generated
optim = result.optimizer
dyn = optim.dynamics
assert_(dyn.dump is not None, msg='dynamics dump not created')
# Use the dump to check unitarity of all propagators and evo_ops
dyn.unitarity_tol = 1e-14
nu_prop = 0
nu_fwd_evo = 0
nu_onto_evo = 0
for d in dyn.dump.evo_dumps:
for k in range(dyn.num_tslots):
if not dyn._is_unitary(d.prop[k]): nu_prop += 1
if not dyn._is_unitary(d.fwd_evo[k]): nu_fwd_evo += 1
if not dyn._is_unitary(d.onto_evo[k]): nu_onto_evo += 1
assert_(nu_prop==0,
msg="{} propagators found to be non-unitary".format(nu_prop))
assert_(nu_fwd_evo==0,
msg="{} fwd evo ops found to be non-unitary".format(
nu_fwd_evo))
assert_(nu_onto_evo==0,
msg="{} onto evo ops found to be non-unitary".format(
nu_onto_evo))
def test_05_1_state_to_state(self):
"""
control.pulseoptim: state-to-state transfer
linear initial pulse used
assert that goal is achieved
"""
# 2 qubits with Ising interaction
# some arbitary coupling constants
alpha = [0.9, 0.7]
beta = [0.8, 0.9]
Sx = sigmax()
Sz = sigmaz()
H_d = (alpha[0]*tensor(Sx,identity(2)) +
alpha[1]*tensor(identity(2),Sx) +
beta[0]*tensor(Sz,identity(2)) +
beta[1]*tensor(identity(2),Sz))
H_c = [tensor(Sz,Sz)]
q1_0 = q2_0 = Qobj([[1], [0]])
q1_T = q2_T = Qobj([[0], [1]])
psi_0 = tensor(q1_0, q2_0)
psi_T = tensor(q1_T, q2_T)
n_ts = 10
evo_time = 18
# Run the optimisation
result = cpo.optimize_pulse_unitary(H_d, H_c, psi_0, psi_T,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
gen_stats=True)
assert_(result.goal_achieved, msg="State-to-state goal not achieved. "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
assert_almost_equal(result.fid_err, 0.0, decimal=10,
err_msg="Hadamard infidelity too high")
def test_05_2_state_to_state_qobj(self):
"""
control.pulseoptim: state-to-state transfer (Qobj)
linear initial pulse used
assert that goal is achieved
"""
# 2 qubits with Ising interaction
# some arbitary coupling constants
alpha = [0.9, 0.7]
beta = [0.8, 0.9]
Sx = sigmax()
Sz = sigmaz()
H_d = (alpha[0]*tensor(Sx,identity(2)) +
alpha[1]*tensor(identity(2),Sx) +
beta[0]*tensor(Sz,identity(2)) +
beta[1]*tensor(identity(2),Sz))
H_c = [tensor(Sz,Sz)]
q1_0 = q2_0 = Qobj([[1], [0]])
q1_T = q2_T = Qobj([[0], [1]])
psi_0 = tensor(q1_0, q2_0)
psi_T = tensor(q1_T, q2_T)
n_ts = 10
evo_time = 18
#Try with Qobj propagation
result = cpo.optimize_pulse_unitary(H_d, H_c, psi_0, psi_T,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
dyn_params={'oper_dtype':Qobj},
gen_stats=True)
assert_(result.goal_achieved, msg="State-to-state goal not achieved "
"(Qobj propagation)"
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
def test_06_lindbladian(self):
"""
control.pulseoptim: amplitude damping channel
Lindbladian dynamics
assert that fidelity error is below threshold
"""
Sx = sigmax()
Sz = sigmaz()
Si = identity(2)
Sd = Qobj(np.array([[0, 1], [0, 0]]))
Sm = Qobj(np.array([[0, 0], [1, 0]]))
Sd_m = Qobj(np.array([[1, 0], [0, 0]]))
gamma = 0.1
L0_Ad = gamma*(2*tensor(Sm, Sd.trans()) -
(tensor(Sd_m, Si) + tensor(Si, Sd_m.trans())))
LC_x = -1j*(tensor(Sx, Si) - tensor(Si, Sx))
LC_z = -1j*(tensor(Sz, Si) - tensor(Si, Sz))
drift = L0_Ad
ctrls = [LC_z, LC_x]
n_ctrls = len(ctrls)
initial = tensor(Si, Si)
had_gate = hadamard_transform(1)
target_DP = tensor(had_gate, had_gate)
n_ts = 10
evo_time = 5
result = cpo.optimize_pulse(drift, ctrls, initial, target_DP,
n_ts, evo_time,
fid_err_targ=1e-3,
max_iter=200,
init_pulse_type='LIN',
gen_stats=True)
assert_(result.fid_err < 0.1,
msg="Fidelity higher than expected")
# Repeat with Qobj propagation
result = cpo.optimize_pulse(drift, ctrls, initial, target_DP,
n_ts, evo_time,
fid_err_targ=1e-3,
max_iter=200,
init_pulse_type='LIN',
dyn_params={'oper_dtype':Qobj},
gen_stats=True)
assert_(result.fid_err < 0.1,
msg="Fidelity higher than expected (Qobj propagation)")
# Check same result is achieved using the create objects method
optim = cpo.create_pulse_optimizer(drift, ctrls,
initial, target_DP,
n_ts, evo_time,
fid_err_targ=1e-3,
init_pulse_type='LIN',
gen_stats=True)
dyn = optim.dynamics
p_gen = optim.pulse_generator
init_amps = np.zeros([n_ts, n_ctrls])
for j in range(n_ctrls):
init_amps[:, j] = p_gen.gen_pulse()
dyn.initialize_controls(init_amps)
# Check the exact gradient
func = optim.fid_err_func_wrapper
grad = optim.fid_err_grad_wrapper
x0 = dyn.ctrl_amps.flatten()
grad_diff = check_grad(func, grad, x0)
assert_almost_equal(grad_diff, 0.0, decimal=7,
err_msg="Frechet gradient outside tolerance")
result2 = optim.run_optimization()
assert_almost_equal(result.fid_err, result2.fid_err, decimal=3,
err_msg="Direct and indirect methods produce "
"different results for ADC")
def test_07_symplectic(self):
"""
control.pulseoptim: coupled oscillators (symplectic dynamics)
assert that fidelity error is below threshold
"""
g1 = 1.0
g2 = 0.2
A0 = Qobj(np.array([[1, 0, g1, 0],
[0, 1, 0, g2],
[g1, 0, 1, 0],
[0, g2, 0, 1]]))
A_rot = Qobj(np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
]))
A_sqz = Qobj(0.4*np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
]))
A_c = [A_rot, A_sqz]
n_ctrls = len(A_c)
initial = identity(4)
A_targ = Qobj(np.array([
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0]
]))
Omg = Qobj(sympl.calc_omega(2))
S_targ = (-A_targ*Omg*np.pi/2.0).expm()
n_ts = 20
evo_time = 10
result = cpo.optimize_pulse(A0, A_c, initial, S_targ,
n_ts, evo_time,
fid_err_targ=1e-3,
max_iter=200,
dyn_type='SYMPL',
init_pulse_type='ZERO',
gen_stats=True)
assert_(result.goal_achieved, msg="Symplectic goal not achieved. "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
assert_almost_equal(result.fid_err, 0.0, decimal=2,
err_msg="Symplectic infidelity too high")
# Repeat with Qobj integration
resultq = cpo.optimize_pulse(A0, A_c, initial, S_targ,
n_ts, evo_time,
fid_err_targ=1e-3,
max_iter=200,
dyn_type='SYMPL',
init_pulse_type='ZERO',
dyn_params={'oper_dtype':Qobj},
gen_stats=True)
assert_(resultq.goal_achieved, msg="Symplectic goal not achieved "
"(Qobj integration). "
"Terminated due to: {}, with infidelity: {}".format(
resultq.termination_reason, result.fid_err))
# Check same result is achieved using the create objects method
optim = cpo.create_pulse_optimizer(A0, list(A_c),
initial, S_targ,
n_ts, evo_time,
fid_err_targ=1e-3,
dyn_type='SYMPL',
init_pulse_type='ZERO',
gen_stats=True)
dyn = optim.dynamics
p_gen = optim.pulse_generator
init_amps = np.zeros([n_ts, n_ctrls])
for j in range(n_ctrls):
init_amps[:, j] = p_gen.gen_pulse()
dyn.initialize_controls(init_amps)
# Check the exact gradient
func = optim.fid_err_func_wrapper
grad = optim.fid_err_grad_wrapper
x0 = dyn.ctrl_amps.flatten()
grad_diff = check_grad(func, grad, x0)
assert_almost_equal(grad_diff, 0.0, decimal=5,
err_msg="Frechet gradient outside tolerance "
"(SYMPL)")
result2 = optim.run_optimization()
assert_almost_equal(result.fid_err, result2.fid_err, decimal=6,
err_msg="Direct and indirect methods produce "
"different results for Symplectic")
def test_08_crab(self):
"""
control.pulseoptim: Hadamard gate using CRAB algorithm
Apply guess and ramping pulse
assert that goal is achieved and fidelity error is below threshold
assert that starting amplitude is zero
"""
# Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 12
evo_time = 10
# Run the optimisation
result = cpo.opt_pulse_crab_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-5,
alg_params={'crab_pulse_params':{'randomize_coeffs':False,
'randomize_freqs':False}},
init_coeff_scaling=0.5,
guess_pulse_type='GAUSSIAN',
guess_pulse_params={'variance':0.1*evo_time},
guess_pulse_scaling=1.0, guess_pulse_offset=1.0,
amp_lbound=None, amp_ubound=None,
ramping_pulse_type='GAUSSIAN_EDGE',
ramping_pulse_params={'decay_time':evo_time/100.0},
gen_stats=True)
assert_(result.goal_achieved, msg="Hadamard goal not achieved. "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
assert_almost_equal(result.fid_err, 0.0, decimal=3,
err_msg="Hadamard infidelity too high")
assert_almost_equal(result.final_amps[0, 0], 0.0, decimal=3,
err_msg="lead in amplitude not zero")
# Repeat with Qobj integration
result = cpo.opt_pulse_crab_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-5,
alg_params={'crab_pulse_params':{'randomize_coeffs':False,
'randomize_freqs':False}},
dyn_params={'oper_dtype':Qobj},
init_coeff_scaling=0.5,
guess_pulse_type='GAUSSIAN',
guess_pulse_params={'variance':0.1*evo_time},
guess_pulse_scaling=1.0, guess_pulse_offset=1.0,
amp_lbound=None, amp_ubound=None,
ramping_pulse_type='GAUSSIAN_EDGE',
ramping_pulse_params={'decay_time':evo_time/100.0},
gen_stats=True)
assert_(result.goal_achieved, msg="Hadamard goal not achieved"
"(Qobj integration). "
"Terminated due to: {}, with infidelity: {}".format(
result.termination_reason, result.fid_err))
def test_09_load_params(self):
"""
control.pulseoptim: Hadamard gate (loading config from file)
compare with result produced by pulseoptim method
"""
H_d = sigmaz()
H_c = sigmax()
U_0 = identity(2)
U_targ = hadamard_transform(1)
cfg = optimconfig.OptimConfig()
cfg.param_fname = "Hadamard_params.ini"
cfg.param_fpath = os.path.join(os.path.dirname(__file__),
cfg.param_fname)
cfg.pulse_type = "ZERO"
loadparams.load_parameters(cfg.param_fpath, config=cfg)
dyn = dynamics.DynamicsUnitary(cfg)
dyn.target = U_targ
dyn.initial = U_0
dyn.drift_dyn_gen = H_d
dyn.ctrl_dyn_gen = [H_c]
loadparams.load_parameters(cfg.param_fpath, dynamics=dyn)
dyn.init_timeslots()
n_ts = dyn.num_tslots
n_ctrls = dyn.num_ctrls
pgen = pulsegen.create_pulse_gen(pulse_type=cfg.pulse_type, dyn=dyn)
loadparams.load_parameters(cfg.param_fpath, pulsegen=pgen)
tc = termcond.TerminationConditions()
loadparams.load_parameters(cfg.param_fpath, term_conds=tc)
if cfg.optim_method == 'BFGS':
optim = optimizer.OptimizerBFGS(cfg, dyn)
elif cfg.optim_method == 'FMIN_L_BFGS_B':
optim = optimizer.OptimizerLBFGSB(cfg, dyn)
elif cfg.optim_method is None:
raise errors.UsageError("Optimisation algorithm must be specified "
"via 'optim_method' parameter")
else:
optim = optimizer.Optimizer(cfg, dyn)
optim.method = cfg.optim_method
loadparams.load_parameters(cfg.param_fpath, optim=optim)
sts = stats.Stats()
dyn.stats = sts
optim.stats = sts
optim.config = cfg
optim.dynamics = dyn
optim.pulse_generator = pgen
optim.termination_conditions = tc
init_amps = np.zeros([n_ts, n_ctrls])
for j in range(n_ctrls):
init_amps[:, j] = pgen.gen_pulse()
dyn.initialize_controls(init_amps)
result = optim.run_optimization()
result2 = cpo.optimize_pulse_unitary(H_d, list([H_c]), U_0, U_targ,
6, 6, fid_err_targ=1e-10,
init_pulse_type='LIN',
amp_lbound=-1.0, amp_ubound=1.0,
gen_stats=True)
assert_almost_equal(result.final_amps, result2.final_amps, decimal=5,
err_msg="Pulses do not match")
def test_10_init_pulse_params(self):
"""
control.pulsegen: Check periodic control functions
"""
def count_waves(n_ts, evo_time, ptype, freq=None, num_waves=None):
# Any dyn config will do
#Hadamard
H_d = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
pulse_params = {}
if freq is not None:
pulse_params['freq'] = freq
if num_waves is not None:
pulse_params['num_waves'] = num_waves
optim = cpo.create_pulse_optimizer(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
dyn_type='UNIT',
init_pulse_type=ptype,
init_pulse_params=pulse_params,
gen_stats=False)
pgen = optim.pulse_generator
pulse = pgen.gen_pulse()
# count number of waves
zero_cross = pulse[0:-2]*pulse[1:-1] < 0
return (sum(zero_cross) + 1) / 2
n_ts = 1000
evo_time = 10
ptypes = ['SINE', 'SQUARE', 'TRIANGLE', 'SAW']
numws = [1, 5, 10, 100]
freqs = [0.1, 1, 10, 20]
for ptype in ptypes:
for freq in freqs:
exp_num_waves = evo_time*freq
fnd_num_waves = count_waves(n_ts, evo_time, ptype, freq=freq)
# print("Found {} waves for pulse type '{}', "
# "freq {}".format(fnd_num_waves, ptype, freq))
assert_equal(exp_num_waves, fnd_num_waves, err_msg=
"Number of waves incorrect for pulse type '{}', "
"freq {}".format(ptype, freq))
for num_waves in numws:
exp_num_waves = num_waves
fnd_num_waves = count_waves(n_ts, evo_time, ptype,
num_waves=num_waves)
# print("Found {} waves for pulse type '{}', "
# "num_waves {}".format(fnd_num_waves, ptype, num_waves))
assert_equal(exp_num_waves, fnd_num_waves, err_msg=
"Number of waves incorrect for pulse type '{}', "
"num_waves {}".format(ptype, num_waves))
def test_11_time_dependent_drift(self):
"""
control.pulseoptim: Hadamard gate with fixed and time varying drift
assert that goal is achieved for both and that different control
pulses are produced (only) when they should be
"""
# Hadamard
H_0 = sigmaz()
H_c = [sigmax()]
U_0 = identity(2)
U_targ = hadamard_transform(1)
n_ts = 20
evo_time = 10
drift_amps_flat = np.ones([n_ts], dtype=float)
dript_amps_step = [np.round(float(k)/n_ts) for k in range(n_ts)]
# Run the optimisations
result_fixed = cpo.optimize_pulse_unitary(H_0, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
gen_stats=True)
assert_(result_fixed.goal_achieved,
msg="Fixed drift goal not achieved. "
"Terminated due to: {}, with infidelity: {}".format(
result_fixed.termination_reason, result_fixed.fid_err))
H_d = [drift_amps_flat[k]*H_0 for k in range(n_ts)]
result_flat = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
gen_stats=True)
assert_(result_flat.goal_achieved, msg="Flat drift goal not achieved. "
"Terminated due to: {}, with infidelity: {}".format(
result_flat.termination_reason, result_flat.fid_err))
# Check fixed and flat produced the same pulse
assert_almost_equal(result_fixed.final_amps, result_flat.final_amps,
decimal=9,
err_msg="Flat and fixed drift result in "
"different control pules")
H_d = [dript_amps_step[k]*H_0 for k in range(n_ts)]
result_step = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ,
n_ts, evo_time,
fid_err_targ=1e-10,
init_pulse_type='LIN',
gen_stats=True)
assert_(result_step.goal_achieved, msg="Step drift goal not achieved. "
"Terminated due to: {}, with infidelity: {}".format(
result_step.termination_reason, result_step.fid_err))
# Check step and flat produced different results
assert_(np.any(
np.abs(result_flat.final_amps - result_step.final_amps) > 1e-3),
msg="Flat and step drift result in "
"the same control pules")
if __name__ == "__main__":
run_module_suite()
| anubhavvardhan/qutip | qutip/tests/test_control_pulseoptim.py | Python | bsd-3-clause | 38,320 | [
"Gaussian"
] | f1466548d1f1e3777af90d148ee003b449657302ae9c3cfb504e8e5dcfb1e2ca |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import numbers
import os
def is_integral(x):
"""Any integer value"""
try:
return isinstance(int(x), numbers.Integral) and \
not isinstance(x, bool) and int(x) > 0
except ValueError:
return False
class Nek5000(Package):
"""A fast and scalable high-order solver for computational fluid
dynamics"""
homepage = "https://nek5000.mcs.anl.gov/"
url = "https://github.com/Nek5000/Nek5000/releases/download/v17.0/Nek5000-v17.0.tar.gz"
git = "https://github.com/Nek5000/Nek5000.git"
tags = ['cfd', 'flow', 'hpc', 'solver', 'navier-stokes',
'spectral-elements', 'fluid', 'ecp', 'ecp-apps']
version('develop', branch='master')
version('17.0', '6a13bfad2ce023897010dd88f54a0a87')
# MPI, Profiling and Visit variants
variant('mpi', default=True, description='Build with MPI.')
variant('profiling', default=True, description='Build with profiling data.')
variant('visit', default=False, description='Build with Visit.')
# TODO: add a variant 'blas' or 'external-blas' to enable the usage of
# Spack installed/configured blas.
# Variant for MAXNEL, we need to read this from user
variant(
'MAXNEL',
default=150000,
description='Maximum number of elements for Nek5000 tools.',
values=is_integral
)
# Variants for Nek tools
variant('genbox', default=True, description='Build genbox tool.')
variant('int_tp', default=True, description='Build int_tp tool.')
variant('n2to3', default=True, description='Build n2to3 tool.')
variant('postnek', default=True, description='Build postnek tool.')
variant('reatore2', default=True, description='Build reatore2 tool.')
variant('genmap', default=True, description='Build genmap tool.')
variant('nekmerge', default=True, description='Build nekmerge tool.')
variant('prenek', default=True, description='Build prenek tool.')
# Dependencies
depends_on('mpi', when="+mpi")
depends_on('libx11', when="+prenek")
depends_on('libx11', when="+postnek")
# libxt is needed for X11/Intrinsic.h but not for linking
depends_on('libxt', when="+prenek")
depends_on('xproto', when="+prenek")
depends_on('libxt', when="+postnek")
depends_on('visit', when="+visit")
@run_before('install')
def fortran_check(self):
if not self.compiler.f77:
msg = 'Cannot build Nek5000 without a Fortran 77 compiler.'
raise RuntimeError(msg)
@run_after('install')
def test_install(self):
with working_dir('short_tests/eddy'):
os.system(join_path(self.prefix.bin, 'makenek') + ' eddy_uv')
if not os.path.isfile(join_path(os.getcwd(), 'nek5000')):
msg = 'Cannot build example: short_tests/eddy.'
raise RuntimeError(msg)
def install(self, spec, prefix):
tools_dir = 'tools'
bin_dir = 'bin'
# Do not use the Spack compiler wrappers.
# Use directly the compilers:
fc = self.compiler.f77
cc = self.compiler.cc
fflags = spec.compiler_flags['fflags']
cflags = spec.compiler_flags['cflags']
if ('+prenek' in spec) or ('+postnek' in spec):
libx11_h = find_headers('Xlib', spec['libx11'].prefix.include,
recursive=True)
if not libx11_h:
raise RuntimeError('Xlib.h not found in %s' %
spec['libx11'].prefix.include)
cflags += ['-I%s' % os.path.dirname(libx11_h.directories[0])]
xproto_h = find_headers('X', spec['xproto'].prefix.include,
recursive=True)
if not xproto_h:
raise RuntimeError('X.h not found in %s' %
spec['xproto'].prefix.include)
cflags += ['-I%s' % os.path.dirname(xproto_h.directories[0])]
libxt_h = find_headers('Intrinsic', spec['libxt'].prefix.include,
recursive=True)
if not libxt_h:
raise RuntimeError('X11/Intrinsic.h not found in %s' %
spec['libxt'].prefix.include)
cflags += ['-I%s' % os.path.dirname(libxt_h.directories[0])]
if self.compiler.name in ['xl', 'xl_r']:
# Use '-qextname' to add underscores.
# Use '-WF,-qnotrigraph' to fix an error about a string: '... ??'
fflags += ['-qextname', '-WF,-qnotrigraph']
fflags = ' '.join(fflags)
cflags = ' '.join(cflags)
# Build the tools, maketools copy them to Nek5000/bin by default.
# We will then install Nek5000/bin under prefix after that.
with working_dir(tools_dir):
# Update the maketools script to use correct compilers
filter_file(r'^#FC\s*=.*', 'FC="{0}"'.format(fc), 'maketools')
filter_file(r'^#CC\s*=.*', 'CC="{0}"'.format(cc), 'maketools')
if fflags:
filter_file(r'^#FFLAGS=.*', 'FFLAGS="{0}"'.format(fflags),
'maketools')
if cflags:
filter_file(r'^#CFLAGS=.*', 'CFLAGS="{0}"'.format(cflags),
'maketools')
if self.compiler.name in ['xl', 'xl_r']:
# Patch 'maketools' to use '-qextname' when checking for
# underscore becasue 'xl'/'xl_r' use this option to enable the
# addition of the underscore.
filter_file(r'^\$FC -c ', '$FC -qextname -c ', 'maketools')
libx11_lib = find_libraries('libX11', spec['libx11'].prefix.lib,
shared=True, recursive=True)
if not libx11_lib:
libx11_lib = \
find_libraries('libX11', spec['libx11'].prefix.lib64,
shared=True, recursive=True)
if not libx11_lib:
raise RuntimeError('libX11 not found in %s/{lib,lib64}' %
spec['libx11'].prefix)
# There is no other way to set the X11 library path except brute
# force:
filter_file(r'-L\$\(X\)', libx11_lib.search_flags,
join_path('prenek', 'makefile'))
filter_file(r'-L\$\(X\)', libx11_lib.search_flags,
join_path('postnek', 'makefile'))
if self.compiler.name in ['xl', 'xl_r']:
# Use '-qextname' when compiling mxm.f
filter_file('\$\(OLAGS\)', '-qextname $(OLAGS)',
join_path('postnek', 'makefile'))
# Define 'rename_' function that calls 'rename'
with open(join_path('postnek', 'xdriver.c'), 'a') as xdriver:
xdriver.write('\nvoid rename_(char *from, char *to)\n{\n'
' rename(from, to);\n}\n')
maxnel = self.spec.variants['MAXNEL'].value
filter_file(r'^#MAXNEL\s*=.*', 'MAXNEL=' + maxnel, 'maketools')
maketools = Executable('./maketools')
# Build the tools
if '+genbox' in spec:
maketools('genbox')
# "ERROR: int_tp does not exist!"
# if '+int_tp' in spec:
# maketools('int_tp')
if '+n2to3' in spec:
maketools('n2to3')
if '+postnek' in spec:
maketools('postnek')
if '+reatore2' in spec:
maketools('reatore2')
if '+genmap' in spec:
maketools('genmap')
if '+nekmerge' in spec:
maketools('nekmerge')
if '+prenek' in spec:
maketools('prenek')
with working_dir(bin_dir):
if '+mpi' in spec:
fc = spec['mpi'].mpif77
cc = spec['mpi'].mpicc
else:
filter_file(r'^#MPI=0', 'MPI=0', 'makenek')
if '+profiling' not in spec:
filter_file(r'^#PROFILING=0', 'PROFILING=0', 'makenek')
if '+visit' in spec:
filter_file(r'^#VISIT=1', 'VISIT=1', 'makenek')
filter_file(r'^#VISIT_INSTALL=.*', 'VISIT_INSTALL=\"' +
spec['visit'].prefix.bin + '\"', 'makenek')
# Update the makenek to use correct compilers and
# Nek5000 source.
filter_file(r'^#FC\s*=.*', 'FC="{0}"'.format(fc), 'makenek')
filter_file(r'^#CC\s*=.*', 'CC="{0}"'.format(cc), 'makenek')
filter_file(r'^#SOURCE_ROOT\s*=\"\$H.*', 'SOURCE_ROOT=\"' +
prefix.bin.Nek5000 + '\"', 'makenek')
if fflags:
filter_file(r'^#FFLAGS=.*', 'FFLAGS="{0}"'.format(fflags),
'makenek')
if cflags:
filter_file(r'^#CFLAGS=.*', 'CFLAGS="{0}"'.format(cflags),
'makenek')
with working_dir('core'):
if self.compiler.name in ['xl', 'xl_r']:
# Patch 'core/makenek.inc' and 'makefile.template' to use
# '-qextname' when checking for underscore becasue 'xl'/'xl_r'
# use this option to enable the addition of the underscore.
filter_file(r'^\$FCcomp -c ', '$FCcomp -qextname -c ',
'makenek.inc')
filter_file(r'\$\(FC\) -c \$\(L0\)',
'$(FC) -c -qextname $(L0)', 'makefile.template')
# Install Nek5000/bin in prefix/bin
install_tree(bin_dir, prefix.bin)
# Copy Nek5000 source to prefix/bin
install_tree('../Nek5000', prefix.bin.Nek5000)
| mfherbst/spack | var/spack/repos/builtin/packages/nek5000/package.py | Python | lgpl-2.1 | 11,092 | [
"VisIt"
] | 3b45a2007ffbf0bc50801968be9d0374b661c3dfe045d974a7a53b86c908f8e5 |
# et.po
val = {"" : "Project-Id-Version: sheltermanager\nReport-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\nPOT-Creation-Date: 2013-01-24 10:55+0000\nPO-Revision-Date: 2010-08-23 10:57+0000\nLast-Translator: lyyser <Unknown>\nLanguage-Team: Estonian <et@li.org>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Launchpad-Export-Date: 2014-01-23 05:32+0000\nX-Generator: Launchpad (build 16901)\n",
"{plural3} people with active reservations have not been homechecked" : "",
"Donation Type" : "",
"Use animal comments if photo notes are blank" : "",
"Half-Yearly" : "Poole aasta tagant",
"Select recommended" : "",
"At least the last name should be completed." : "",
"Chinese Crested Dog" : "Chinese Crested Dog",
"New template" : "",
"Include incomplete medical and vaccination records when generating document templates" : "",
"Due today" : "",
"You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "",
"Warnings" : "Hoiatused",
"Corded" : "Nöörjas (corded)",
"Edit diet" : "",
"Stolen {0}" : "",
"Domestic Long Hair" : "Kodukass pikakarvaline",
"{plural2} year" : "",
"Export this database in various formats" : "",
"Person - Name and Address" : "",
"Not For Adoption" : "",
"The date the animal was tattooed" : "",
"Entered From" : "",
"Base Color" : "",
"TT = first and second letter of animal type" : "",
"Reconcile" : "",
"Default Species" : "",
"View Manual" : "",
"Search Results for '{0}'" : "",
"Lost Animal - Details" : "",
"Remove the heartworm test fields from animal health details" : "",
"Income::Adoption" : "",
"{plural0} animal was euthanized" : "",
"If this person is a member, their membership number." : "",
"The size of this animal" : "",
"Use Automatic Insurance Numbers" : "",
"Cane Corso Mastiff" : "Cane Corso Mastiff",
"Negative" : "Negatiivne",
"View Found Animal" : "",
"More diary notes" : "",
"Show animal thumbnails in movement and medical books" : "",
"Install the selected reports to your database" : "",
"Reservation date cannot be after cancellation date." : "",
"American Staffordshire Terrier" : "American Staffordshire Terrier",
"Publish to folder" : "",
"New Diary" : "",
"This person has been banned from adopting animals." : "",
"Amazon" : "Amazon",
"Email person" : "",
"Default destination account for donations" : "",
"Affenpinscher" : "Affenpinscher",
"Homecheck History" : "",
"Dosage" : "Doos",
"Urgency" : "Pakiline vajadus",
"Bank::Savings" : "",
"Last Name" : "",
"Tuesday" : "",
"Ginger" : "Punakaspruun",
"New Regimen" : "",
"Australian Cattle Dog/Blue Heeler" : "Australian Cattle Dog/Blue Heeler",
"Boarding cost type" : "",
"Owner" : "Omanik",
"Medical Book" : "Meditsiiniline raamat",
"Date lost cannot be blank." : "",
"Irish Terrier" : "Irish Terrier",
"Mark selected donations received" : "",
"Found Animal: {0}" : "Leitud loom: {0}",
"Edit cost" : "",
"Jump to donations" : "",
"Successfully posted to Facebook" : "",
"Adoption Number" : "",
"McNab" : "McNab",
"Munsterlander" : "Munsterlander",
"Recently deceased shelter animals (last 30 days)." : "",
"Staff record" : "",
"Add a log entry" : "",
"Generate document from this donation" : "",
"Create waiting list records from the selected forms" : "",
"ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "",
"June" : "juuni",
"The secondary breed of this animal" : "",
"Stay" : "",
"Lost to" : "",
"Removed" : "",
"Reservation Book" : "Broneeringute raamat",
"Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "",
"Points for matching features" : "",
"Credit Card" : "Krediitkaart",
"Cockatoo" : "Kakaduu",
"Perform Homecheck" : "",
"Person" : "",
"Debit Card" : "",
"View Report" : "",
"Generate a new animal code" : "Loo uus looma kood",
"Oriental Tabby" : "Oriental Tabby",
"Address Contains" : "",
"Financial" : "",
"Appaloosa" : "Appaloosa",
"Text" : "Tekst",
"Test book" : "",
"Header" : "",
"Heartworm Test Date" : "",
"English Coonhound" : "English Coonhound",
"Owner Vet" : "",
"Add movement" : "",
"Tibetan Spaniel" : "Tibetan Spaniel",
"Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "",
"Exclude this image when publishing" : "",
"Chocolate Labrador Retriever" : "Chocolate Labrador Retriever",
"This animal has been FIV/L tested" : "",
"Don't scale" : "",
"Allergies" : "Allergia",
"Chart (Bar)" : "",
"Keep table headers visible when scrolling" : "",
"Tooltip" : "Vihje",
"Animal food costs" : "",
"{plural2} urgent entries on the waiting list" : "",
"U (Unwanted Cat)" : "S (Soovimatu kass)",
"MeetAPet Publisher" : "",
"Add a medical regimen" : "",
"Alaskan Malamute" : "Alaskan Malamute",
"Wheaten Terrier" : "Wheaten Terrier",
"Glen of Imaal Terrier" : "Glen of Imaal Terrier",
"Irish Water Spaniel" : "Irish Water Spaniel",
"{plural3} shelter animals have people looking for them" : "",
"Mountain Dog" : "Mountain Dog",
"Silky Terrier" : "Silky Terrier",
"Peacock/Pea fowl" : "Paabulind",
"White German Shepherd" : "White German Shepherd (Valge saksa lambakoer)",
"Create a new animal from this waiting list entry" : "",
"To continue using ASM, please renew {0}" : "",
"Please select a PDF, HTML or JPG image file to attach" : "",
"Patterdale Terrier (Fell Terrier)" : "Patterdale Terrier (Fell Terrier)",
"Old Password" : "",
"Pixie-Bob" : "Pixie-Bob",
"Great Dane" : "Great Dane",
"Executing..." : "",
"New Log" : "",
"Added by {0} on {1}" : "",
"Sloughi" : "Sloughi",
"Expenses::Electricity" : "",
"Species to use when publishing to third party services and adoption sites" : "",
"Add found animal" : "",
"Show codes on the shelter view screen" : "",
"Rotate image 90 degrees anticlockwise" : "",
"FTP username" : "",
"Make this the default video link when publishing to the web" : "",
"Test marked as performed for {0} - {1}" : "",
"Rough" : "Karm",
"Use a single breed field" : "",
"Blue" : "Sinine",
"{0} treatments every {1} months" : "",
"Flemish Giant" : "Flemish Giant",
"Edit my diary notes" : "",
"Removal Reason" : "",
"If the shelter provides initial insurance cover to new adopters, the policy number" : "",
"Add {0}" : "",
"Scottish Terrier Scottie" : "Scottish Terrier Scottie",
"Found animals reported in the last 30 days." : "",
"Create Log" : "",
"This animal is a crossbreed" : "",
"On Foster (in figures)" : "Kasuperes (arvudes)",
"{plural0} shelter animal has people looking for them" : "",
"Quarterhorse" : "Quarterhorse",
"Housetrained" : "Puhtust pidav",
"Name and Address" : "",
"Remove the good with fields from animal notes" : "",
"Donation" : "Annetus",
"(none)" : "(puudub)",
"Path" : "",
"weeks" : "nädal",
"Flat-coated Retriever" : "Flat-coated Retriever",
"Mobile" : "",
"Address" : "Aadress",
"{plural3} unaltered animals have been adopted in the last month" : "",
"Positive/Negative" : "",
"These are the default values for these fields when creating new records." : "",
"{plural0} test needs to be performed today" : "",
"Black and White" : "Must ja Valge",
"Fawn" : "",
"Reference" : "Viide",
"Lancashire Heeler" : "Lancashire Heeler",
"Ocicat" : "Ocicat",
"Goose" : "Hani",
"Default image for this record and the web" : "",
"{plural1} weeks" : "",
"Mouse" : "Hiir",
"The date this animal was reserved" : "",
"Change Investigation" : "",
"Default daily boarding cost" : "",
"Enable accounts functionality" : "",
"Lost Animal Contact" : "Kaotatud looma kontakt",
"Diary note cannot be blank" : "",
"Accountant" : "",
"Investigation" : "",
"Animal Name" : "Looma nimi",
"Day Pivot" : "",
"Type" : "Tüüp",
"Area where the animal was lost" : "",
"Message successfully sent" : "",
"Username" : "",
"Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Costs: {3}, Total Costs: {4} Total Donations: {5}, Balance: {6}" : "",
"Add Found Animal" : "",
"You will need to upgrade to iOS 6 or higher to upload files." : "",
"Owners Vet" : "",
"Heartworm Tested" : "",
"Rabbit" : "Jänes",
"Manchester Terrier" : "Manchester Terrier",
"Hold" : "",
"{plural0} medical treatment needs to be administered today" : "",
"Health Problems" : "",
"This person has been banned from adopting animals" : "",
"Bank::Deposit" : "",
"Adopt" : "",
"{plural3} animals died" : "",
"{plural0} day." : "",
"{plural3} animals were transferred to other shelters" : "",
"Found Animal {0}" : "",
"Enable sharing animals via Facebook" : "",
"Enable FTP uploading" : "",
"Add report" : "",
"New password and confirmation password don't match." : "",
"Add Diets" : "",
"Email users their diary notes each day" : "",
"September" : "september",
"When posting an animal to Facebook, make a note of it in the log with this type" : "",
"Investigations" : "",
"Not eligible for gift aid" : "",
"days" : "päev(a)",
"Urgent" : "Kiireloomuline",
"Litter" : "",
"Bank current account" : "",
"The date the animal was altered" : "",
"Include CSV header line" : "",
"Found Animal - Details" : "",
"Longest On Shelter" : "",
"Update system options" : "",
"Liver and White" : "Maksavärvi ja valge",
"UUUUUUUUUU or UUUU = unique number" : "",
"Lookup Values" : "",
"If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "",
"Create diary notes from a task" : "",
"Due" : "",
"Syntax check this SQL" : "",
"Default Vaccination Type" : "",
"Additional date field '{0}' contains an invalid date." : "",
"Electricity Bills" : "",
"Quarterly" : "Kord kvartalis",
"Set this to 0 to never automatically remove." : "",
"Tests" : "",
"Points for matching species" : "",
"Voucher Types" : "",
"Welcome!" : "",
"Liability" : "Kohustus",
"Message from {0}" : "",
"Publishing Logs" : "",
"Search" : "Otsi",
"Email a copy of the selected documents" : "",
"Remove the investigation tab from person records" : "",
"Contact Contains" : "",
"Find a lost animal" : "",
"{plural2} medical treatments need to be administered today" : "",
"Creme DArgent" : "",
"Neapolitan Mastiff" : "Neapolitan Mastiff",
"Removal reason" : "",
"Code" : "Kood",
"Features" : "Omadused",
"Sep" : "september",
"Dove" : "Tuvi",
"The microchip number" : "",
"Sex" : "Sugu",
"Akita" : "Akita",
"View Donation" : "",
"Frequency" : "Sagedus",
"Generated document '{0}'" : "",
"Softbill (Other)" : "Softbill (Other)",
"Trial adoption" : "",
"Movements" : "Liikumised",
"Date lost cannot be blank" : "",
"Code format tokens:" : "",
"Generate image thumbnails as tn_$$IMAGE$$" : "",
"Daily Boarding Cost" : "",
"Waiting list urgency update period in days" : "",
"Add donation" : "",
"Creating..." : "",
"The litter this animal belongs to" : "",
"Additional fields need a name, label and type." : "",
"Delete Cost" : "",
"Clone" : "Klooni",
"Retailer book" : "",
"Shelter animal {0} '{1}'" : "",
"White and Black" : "Valge ja must",
"Rabies Tag" : "",
"Bank" : "Pank",
"Find a found animal" : "",
"Bulk Complete Diary" : "",
"Bull Terrier" : "Bull Terrier",
"Reports" : "Aruanded",
"Sorry. ASM will not work without Javascript." : "",
"Login" : "Logi sisse",
"Vaccination marked as given for {0} - {1}" : "",
"AdoptAPet Publisher" : "",
"Location and Species" : "",
"Date reported cannot be blank." : "",
"{plural0} person with an active reservation has not been homechecked" : "",
"When a message is created, email it to each matching user" : "",
"Allow overriding of the movement number on the Move menu screens" : "",
"FoundLost animal entry {0} successfully created." : "",
"Start Of Day" : "",
"Prefill new media notes for animal images with animal comments if left blank" : "",
"Shelter Details" : "Varjupaiga üksikasjad",
"HelpingLostPets Publisher" : "",
"Date put on cannot be blank" : "",
"Curly" : "Lokkis",
"Tabby and White" : "Vöödiline ja valge",
"Template" : "",
"Mark an animal deceased" : "",
"New Owner" : "",
"Start date must be a valid date" : "",
"SQL Interface" : "",
"Time On List" : "",
"Norwegian Lundehund" : "Norwegian Lundehund",
"Shelter stats (this year)" : "",
"Vaccinate Animal" : "",
"Cocker Spaniel" : "Cocker Spaniel",
"View Lost Animal" : "",
"Returned to Owner {0}" : "",
"Edit diary notes" : "",
"FTP password" : "",
"Waiting list entries matching '{0}'." : "",
"Account code '{0}' is not valid." : "",
"Mark treatments given today" : "",
"Any markings or distinguishing features the animal has" : "",
"Account" : "",
"Havana" : "Havana",
"Black and Tan" : "Must pruuni markeeringuga",
"You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "",
"Find animal columns" : "",
"Belgian Hare" : "Belgian Hare",
"Accounts need a code." : "",
"Death Reasons" : "Surma põhjused",
"Add litter" : "",
"Add Person" : "",
"Leave" : "",
"Sorrel Tortoiseshell" : "",
"Default Cost" : "",
"Organization" : "",
"Reason for entry" : "",
"Belgian Shepherd Malinois" : "Belgian Shepherd Malinois",
"Peruvian Paso" : "Peruvian Paso",
"Date of birth is not valid" : "",
"Expenses::Phone" : "",
"Tricolour" : "Tricolour",
"Movement numbers must be unique." : "",
"Change Movement" : "",
"Quicklinks" : "",
"Norwich Terrier" : "Norwich Terrier",
"Find person" : "",
"Delete Found Animal" : "",
"Abyssinian" : "Abyssinian",
"The date the animal was adopted" : "",
"Access System Menu" : "",
"Show the full diary (instead of just my notes) on the home page" : "",
"Include quarantined animals" : "",
"This person is not flagged as a retailer and cannot handle retailer movements." : "",
"Jack Russell Terrier" : "Jack Russell Terrier",
"Priority" : "",
"Foster" : "Kasupereks hakkamine",
"Sick/Injured" : "Haige/vigastatud",
"View Animals" : "Vaata loomi",
"Save this record" : "Salvesta kirje",
"Animal code format" : "",
"Microchip" : "",
"Dogs" : "Koerad",
"Bunny Rabbit" : "Bunny Rabbit (küülik)",
"Dwarf" : "Dwarf",
"New Cost" : "Uus maksumus",
"Blue Tortie" : "",
"Foster book" : "",
"Select person to merge" : "",
"Retailer movement successfully created." : "",
"Terrier" : "Terrier",
"Advanced" : "Laiendatud",
"Newfoundland Dog" : "Newfoundland Dog",
"How urgent is it that we take this animal?" : "",
"Settings" : "",
"Warn when creating multiple reservations on the same animal" : "",
"These fields determine which columns are shown on the find animal and find person screens." : "",
"The date this animal was found" : "",
"Return an animal from transfer" : "",
"New Test" : "",
"{plural0} trial adoption has ended" : "",
"RabiesTag" : "",
"Illyrian Sheepdog" : "Illyrian Sheepdog",
"Found Animal Contact" : "Leitud looma kontakt",
"Automatically cancel any outstanding reservations on an animal when it is adopted" : "",
"Remove the document repository functionality from menus" : "",
"Generate a document from this person" : "",
"These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "",
"Date found cannot be blank." : "",
"Transferred In" : "Ületoodud",
"A short version of the reference number" : "",
"Given" : "Manustatud",
"Paso Fino" : "Paso Fino",
"Scottish Fold" : "Scottish Fold",
"Log successfully added." : "",
"Add Users" : "",
"All animals who are flagged as not for adoption." : "",
"Cell Phone" : "",
"Columns" : "",
"Movement" : "Liikumine",
"Visual Theme" : "",
"Attach Link" : "",
"The date the animal was microchipped" : "",
"New Password" : "",
"Boxer" : "Boxer",
"Cheque" : "",
"Eskimo Dog" : "Eskimo Dog",
"{0} treatments every {1} weeks" : "",
"A publish job is already running." : "",
"Black Labrador Retriever" : "Black Labrador Retriever",
"Tonkinese" : "Tonkinese",
"CC" : "",
"You can bookmark search results, animals, people and most data entry screens." : "",
"Publishing complete." : "",
"Create Animal" : "",
"Unit within the location, eg: pen or cage number" : "",
"Javanese" : "Javanese",
"Hidden comments about the animal" : "",
"Income::OpeningBalances" : "",
"{plural2} shelter animals have people looking for them" : "",
"Whippet" : "Whippet",
"Lop Eared" : "Lop Eared",
"{plural1} vaccinations need to be administered today" : "",
"Escaped" : "Lahti pääsenud",
"Error contacting server." : "",
"ASM 3 is compatible with your iPad and other tablets." : "",
"Can't reserve an animal that has an active movement." : "",
"Health and Identification" : "",
"The date this animal was removed from the waiting list" : "",
"Current Vet" : "Praegune veterinaar",
"Reservation" : "Broneeritud",
"Delete Movement" : "",
"There is not enough information in the form to create a lost animal record (need a description and area lost)." : "",
"Add vaccination" : "",
"Match lost and found animals" : "",
"Exclude animals who are aged under" : "",
"Shelter stats (this month)" : "",
"IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "",
"Donations of type" : "",
"Movements require an animal." : "",
"Create a cost record" : "",
"Ragdoll" : "Ragdoll",
"Selkirk Rex" : "Selkirk Rex",
"Toucan" : "Tuukan",
"Border Terrier" : "Border Terrier",
"Update animals with SmartTag Pet ID" : "",
"Retriever" : "Retriever",
"Email Address" : "",
"Add cost" : "",
"Animal - Entry" : "",
"The SmartTag PETID number" : "",
"This animal was euthanized" : "",
"Edit litter" : "",
"Home" : "",
"{plural3} months" : "",
"Yellow Labrador Retriever" : "Yellow Labrador Retriever",
"Delete Medical Records" : "",
"The result of the heartworm test" : "",
"Income::Shop" : "",
"Settings, Options" : "",
"Find person columns" : "",
"City contains" : "",
"All staff on file." : "",
"Duration" : "Kestus",
"Table" : "",
"F (Stray Dog)" : "H (Hulkuv koer)",
"Heartworm Test Result" : "",
"{plural1} animals were reclaimed by their owners" : "",
"Homechecked By" : "",
"Errors" : "",
"The person record to merge must be different from the original." : "",
"Guinea Pig" : "Merisiga",
"Smooth Fox Terrier" : "Smooth Fox Terrier",
"Unsaved Changes" : "Salvestamata muudatused",
"Default Location" : "",
"Arabian" : "Arabian",
"West Highland White Terrier Westie" : "West Highland White Terrier Westie",
"SubTotal" : "Vahesumma",
"Cancel unadopted reservations after this many days, or 0 to never cancel" : "",
"Add diet" : "",
"Looking for" : "",
"{plural0} animal was adopted" : "",
"In SubTotal" : "",
"View Shelter Animals" : "",
"Good with Children" : "",
"Silver Marten" : "Silver Marten",
"Donations require a received date" : "",
"Deceased Date" : "",
"Messages" : "",
"Insurance No" : "",
"Lookup data" : "",
"Edit form field" : "",
"Create a new animal by copying this one" : "Loo uus loom selle kopeerimise teel",
"Add diary" : "",
"Add user" : "",
"From retailer is only valid on adoption movements." : "",
"Edit media notes" : "",
"Document templates" : "",
"Shih Tzu" : "Shih Tzu",
"White and Brindle" : "Valge ja hallikaspruun",
"Produce a PDF of printable labels" : "",
"When adding animals" : "",
"You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "",
"{plural2} weeks" : "",
"This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "",
"Species" : "Liik",
"Norwegian Forest Cat" : "Norwegian Forest Cat",
"Edit HTML publishing templates" : "",
"Change Accounts" : "",
"System" : "Süsteem",
"SQL is syntactically correct." : "",
"Movements require an animal" : "",
"Hold until {0}" : "",
"Great Pyrenees" : "Great Pyrenees",
"Online form fields need a name and label." : "",
"Mastiff" : "Mastiff",
"Change Donation" : "",
"View Movement" : "",
"Facebook" : "",
"Kerry Blue Terrier" : "Kerry Blue Terrier",
"Only publish a set number of animals" : "",
"Lost Animal - Additional" : "",
"Staffordshire Bull Terrier" : "Staffordshire Bull Terrier",
"Postage costs" : "",
"New online form" : "",
"Return an animal from adoption" : "",
"Log date must be a valid date" : "",
"Boarding Cost" : "",
"Deposit" : "",
"Found to" : "",
"Amber" : "",
"Subject" : "Teema",
"Image" : "",
"months" : "kuud",
"Entry Reason Category" : "",
"Role is in use and cannot be deleted." : "",
"Similar Person" : "",
"Animal - Health and Identification" : "",
"{plural3} animals were euthanized" : "",
">>" : "",
"Tattoo" : "",
"Feb" : "veebruar",
"{plural2} days." : "",
"Required date must be a valid date" : "",
"Lovebird" : "Lembelind",
"New Field" : "",
"Draft" : "Draft (Mustand)",
"Animals matching '{0}'." : "",
"Add Movement" : "",
"Vizsla" : "Vizsla",
"Pug" : "Pug (Mops)",
"Add voucher" : "",
"Marketer" : "",
"Aged From" : "",
"The default username is 'user' with the password 'letmein'" : "",
"Publisher Species" : "",
"Breed to use when publishing to third party services and adoption sites" : "",
"Link to an external web resource" : "",
"This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"Spaniel" : "Spaniel",
"The tattoo number" : "",
"Missouri Foxtrotter" : "Missouri Foxtrotter",
"Use SQL Interface" : "",
"S (Stray Cat)" : "T (Hulkuv kass)",
"Multiple Treatments" : "Mitmekordne ravi",
"Bouvier des Flanders" : "Bouvier des Flanders",
"These are the HTML headers and footers used when displaying online forms." : "",
"Paper Size" : "",
"Column" : "",
"January" : "jaanuar",
"White and Torti" : "Valge ja kilpkonnavärvi",
"Hedgehog" : "Siil",
"Golden" : "",
"Document Link" : "",
"Vaccination Book" : "Vaktsineerimisraamat",
"The date the animal was born" : "",
"Transfer To" : "",
"Add Report" : "",
"Settings, Lookup data" : "",
"This animal is not on the shelter." : "",
"Generate Report" : "",
"Cream" : "Kreemikas",
"Points for matching breed" : "",
"Give and Reschedule" : "",
"Budgie/Budgerigar" : "Viirpapagoi",
"Kittens (under {0} months)" : "",
"Japanese Chin" : "Japanese Chin",
"Reason the owner did not bring in the animal themselves" : "",
"{plural2} animals are not available for adoption" : "",
"Other Shelter" : "",
"This income account is the source for donations received of this type" : "",
"Mark this owner homechecked" : "",
"Description" : "Kirjeldus",
"Good with Dogs" : "",
"Kennel Cough" : "Kennelköha",
"Korat" : "Korat",
"Costs" : "Kulud",
"Title Initials Last" : "",
"Only PDF, HTML and JPG image files can be attached." : "",
"Percheron" : "Percheron",
"{0} {1} {2} aged {3}" : "",
"Color" : "",
"Creating donations and donation types creates matching accounts and transactions" : "",
"Go the options screen and set your shelter's contact details and other settings." : "",
"Test Book" : "",
"Person looking for report" : "",
"Add a new log" : "",
"Mo" : "",
"Chicken" : "Kana",
"Unreserved" : "Broneerimata",
"Send mass emails and perform mail merges" : "",
"Ragamuffin" : "Ragamuffin",
"Title" : "Pealkiri",
"Returned" : "Tagastatud",
"Light Amber" : "",
"Delete this animal" : "Kustuta see loom",
"All donors on file." : "",
"Upload all available images for animals" : "",
"Adopted" : "Loovutatud",
"View media" : "",
"The date the retailer movement is effective from" : "",
"Diary notes for: {0}" : "",
"Change date given on selected treatments" : "",
"Canaan Dog" : "Canaan Dog",
"Remove the size field from animal details" : "",
"Stationary costs" : "",
"Today" : "",
"Schipperke" : "Schipperke",
"South Russian Ovcharka" : "South Russian Ovcharka",
"Premises" : "",
"Entered (oldest first)" : "",
"Donations require a person" : "",
"Scottish Deerhound" : "Scottish Deerhound",
"Released To Wild" : "Vabadusse lastud",
"Create this person" : "",
"Rhodesian Ridgeback" : "Rhodesian Ridgeback",
"FIV" : "",
"Roles" : "",
"Include animals who don't have a picture" : "",
"Diets need a start date." : "",
"Ruddy" : "",
"Delete Regimen" : "",
"Welsh Springer Spaniel" : "Welsh Springer Spaniel",
"{plural0} person has an overdue donation" : "",
"Telephone" : "Telefon",
"Animal - Notes" : "",
"Donation of {0} successfully received ({1})." : "",
"A description or other information about the animal" : "",
"There is not enough information in the form to create a waiting list record (need a description)." : "",
"Treat foster animals as part of the shelter inventory" : "",
"Rank" : "Koht",
"View Investigations" : "",
"Accounts" : "",
"Presa Canario" : "Presa Canario",
"Tan" : "Kollakaspruun",
"From Other" : "Teisest",
"Rex" : "Rex",
"Britannia Petite" : "Britannia Petite",
"Add to log" : "",
"New Movement" : "",
"White and Tabby" : "Valge ja vöödiline",
"Date Found" : "Leidmise kuupäev",
"Import complete with {plural1} errors." : "",
"Internal Location" : "Sisemine asukoht",
"Label" : "",
"Find lost animal returned {0} results." : "",
"Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "",
"Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "",
"{plural0} week" : "",
"Default Breed" : "",
"French-Lop" : "French-Lop",
"Post to Facebook as" : "",
"IP Restriction" : "",
"This will permanently remove the selected records, are you sure?" : "",
"(all)" : "(kõik)",
"The shelter reference number" : "",
"Default Coat Type" : "",
"{plural1} urgent entries on the waiting list" : "",
"Remove the microchip fields from animal identification details" : "",
"Ferret" : "Tuhkur",
"Diary date is not valid" : "",
"Return a transferred animal" : "",
"Chocolate Tortie" : "",
"Doberman Pinscher" : "Doberman Pinscher",
"Dalmatian" : "Dalmatian",
"Add a photo" : "",
"FTP hostname" : "",
"(unknown)" : "",
"This type of movement requires a date." : "",
"Age" : "Vanus",
"Change System Options" : "",
"Show the breed fields" : "",
"{plural1} animals are not available for adoption" : "",
"This date of birth is an estimate" : "",
"Hamster" : "Hamster",
"{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "",
"Days On Shelter" : "",
"Mustang" : "Mustang",
"Super user" : "",
"{plural3} trial adoptions have ended" : "",
"Labrador Retriever" : "Labrador Retriever",
"Automatically remove" : "",
"Brindle and Black" : "Hallikaspruun ja must",
"Save this person" : "",
"Entering 'deceased' in the search box will show you recently deceased animals." : "",
"Transferred Out" : "Üleviidud",
"DOB" : "",
"Member" : "Liige",
"Show short shelter codes on screens" : "",
"Good With Dogs" : "",
"Found from" : "",
"Donation?" : "Annetus?",
"Import" : "",
"Not Reconciled" : "",
"Karelian Bear Dog" : "Karelian Bear Dog",
"Complaint" : "Kaebus",
"August" : "august",
"Due in next week" : "",
"Animal Types" : "Looma tüübid",
"SQL dump (ASM2 HSQLDB Format)" : "",
"Diet" : "Dieet",
"Create lost animal records from the selected forms" : "",
"Income::Sponsorship" : "",
"Show transactions from" : "",
"Positive" : "Positiivne",
"Remove the bonded with fields from animal entry details" : "",
"Died" : "Surnud",
"Delete Log" : "",
"Diary Task" : "",
"October" : "oktoober",
"Up for adoption" : "",
"Document Templates" : "",
"Required" : "Nõutav",
"Remove retailer functionality from the movement screens and menus" : "",
"Edit template" : "",
"Columns displayed" : "",
"Donations" : "Annetused",
"The animal sex" : "",
"Time" : "",
"Biting" : "Hammustamine",
"Transferred In {0}" : "",
"All animals who are currently quarantined." : "",
"Transactions need a date and description." : "",
"Good With Cats" : "",
"Children" : "",
"Out" : "",
"Media" : "Meedia",
"Gaited" : "Gaited",
"American Water Spaniel" : "American Water Spaniel",
"Time On Shelter" : "",
"Dutch" : "Dutch",
"Warn when adopting to a person who has been banned from adopting animals" : "",
"The primary breed of this animal" : "",
"Thoroughbred" : "Thoroughbred",
"Add an animal to the waiting list" : "",
"Altered" : "",
"My diary notes" : "",
"Persian" : "Pärsia",
"Medical Profiles" : "Meditsiiniprofiilid",
"No Locations" : "Asukoht puudub",
"New Profile" : "",
"Vaccinations need an animal and at least a required date." : "",
"Chartreux" : "Chartreux",
"When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "",
"Reservation Cancelled" : "",
"Tabby" : "Vöödiline",
"Account code '{0}' has already been used." : "",
"Rhinelander" : "Rhinelander",
"ASM News" : "",
"Delete Incoming Forms" : "",
"Email this message to all matching users" : "",
"This code has already been used." : "",
"Document file" : "",
"View" : "",
"People matching '{0}'." : "",
"Reason Not From Owner" : "",
"Locale" : "",
"Mini-Lop" : "Mini-Lop",
"Add Waiting List" : "",
"Parvovirus" : "Parvoviirus",
"This animal is currently fostered, it will be automatically returned first." : "",
"Cinnamon Tortoiseshell" : "",
"Warn when adopting to a person who lives in the same area as the original owner" : "",
"This can take some time and generate a large file, are you sure?" : "",
"Add Medical Records" : "",
"Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "",
"Sheep" : "Lammas",
"Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "",
"Template for Facebook posts" : "",
"Sponsorship donations" : "",
"Add template" : "",
"Removal" : "Eemaldamine",
"Jan" : "jaanuar",
"Waiting List Donation" : "Ootenimekirja annetus",
"Create boarding cost record when animal is adopted" : "",
"Induct a new animal" : "",
"White" : "Valge",
"Additional fields" : "",
"Brussels Griffon" : "Brussels Griffon",
"{plural2} unaltered animals have been adopted in the last month" : "",
"Use TLS" : "",
"Add investigation" : "",
"Transactions" : "",
"Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "",
"Email media" : "",
"Change User Settings" : "",
"Updating..." : "",
"Edit transaction" : "",
"Browse sheltermanager.com" : "",
"Expenses::Stationary" : "",
"Auto removed due to lack of owner contact." : "Eemaldatud automaatselt omaniku kontaktandmete puudumise tõttu.",
"Animal (optional)" : "",
"Edit Roles" : "",
"Add Log to Animal" : "",
"System user accounts" : "",
"Attach a link to a web resource" : "",
"View Diary" : "",
"Profile name cannot be blank" : "",
"Person - Type" : "",
"Initials" : "",
"Simple" : "Lihtne",
"N (Non Shelter Animal)" : "E (Mitte varjupaigaloom)",
"Animal - Details" : "",
"System Admin" : "",
"Lost animal entry {0} successfully created." : "",
"Include deceased" : "",
"New Litter" : "",
"Cattle Dog" : "Cattle Dog",
"Samoyed" : "Samoyed",
"Organisation" : "Organisatsioon",
"Generate a javascript database for the search page" : "",
"Change Log" : "",
"Chinchilla" : "Chinchilla",
"Start date" : "",
"Roles need a name." : "",
"Appenzell Mountain Dog" : "Appenzell Mountain Dog",
"Shepherd" : "Shepherd",
"Added" : "",
"Boston Terrier" : "Boston Terrier",
"Change Found Animal" : "",
"Add Message" : "",
"All animal shelters on file." : "",
"A unique number to identify this movement" : "",
"Booster" : "Booster",
"Found Animal" : "Leitud loom",
"Transfers must have a valid transfer date." : "",
"Time on list" : "",
"Siamese" : "Siamese (Siiam)",
"If this person is a member, the date that membership expires." : "",
"Cockapoo" : "Cockapoo",
"treatments" : "ravid",
"Black and Tan Coonhound" : "Black and Tan Coonhound",
"All fields should be completed." : "",
"This year" : "",
"Incoming forms are online forms that have been completed and submitted by people on the web." : "",
"Options" : "Valikud",
"Incoming donations (misc)" : "",
"Aged To" : "",
"Only show special needs" : "",
"Apr" : "aprill",
"Money" : "Raha",
"Shar Pei" : "Shar Pei",
"Microchip Date" : "",
"Change Medical Records" : "",
"Transfer?" : "",
"Dachshund" : "Dachshund",
"Sexes" : "",
"Return Category" : "",
"Next>" : "",
"View Accounts" : "",
"Fawn Tortoiseshell" : "",
"Black" : "Must",
"View Tests" : "",
"Edit diary tasks" : "",
"View the animals in this litter" : "Vaata loomi selles pesakonnas",
"All homecheckers on file." : "",
"Yes/No" : "Jah/ei",
"Most relevant" : "",
"Change Transactions" : "",
"Remove previously published files before uploading" : "",
"Litter Reference" : "",
"Monday" : "",
"Find Lost Animal" : "Leia kadunud loom",
"Split baby/adult age at" : "",
"Add Donation" : "",
"Aged Between" : "",
"Use fancy tooltips" : "",
"Delete Treatments" : "",
"Date" : "Kuupäev",
"View Litter" : "",
"Data" : "",
"Find Animal" : "Leia loom",
"Superuser" : "",
"All time" : "",
"All fosterers on file." : "",
"Enabled" : "",
"Find found animal returned {0} results." : "",
"Default to advanced find person screen" : "",
"Payment Types" : "",
"Cinnamon" : "Cinnamon",
"Install" : "",
"Corgi" : "Corgi",
"Omit header/footer" : "",
"ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "",
"History" : "Ajalugu",
"Attach File" : "",
"(both)" : "(mõlemad)",
"Publish now" : "",
"{plural3} years." : "",
"ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "",
"SmartTag PETID" : "",
"Due in next year" : "",
"Create Waiting List" : "",
"Title First Last" : "",
"Various" : "Mitmesugune",
"Password is incorrect." : "",
"Welsh Corgi" : "Welsh Corgi",
"Dogue de Bordeaux" : "Dogue de Bordeaux",
"{plural1} months" : "",
"Movement dates clash with an existing movement." : "",
"Hovawart" : "Hovawart",
"Conure" : "Conure",
"{plural1} days." : "",
"Mail" : "",
"Balinese" : "Balinese",
"Vaccination book" : "",
"Save this animal" : "",
"Important" : "",
"{plural3} animals were adopted" : "",
"Maltese" : "Maltese",
"New Waiting List Entry" : "Uus ootenimekirja sissekanne",
"1 treatment" : "",
"Name" : "Nimi",
"This animal was transferred from another shelter" : "",
"Healthy" : "Terve",
"Seal" : "",
"Crossbreed" : "Segavereline",
"treatments, every" : "",
"Publishing template" : "",
"{plural3} tests need to be performed today" : "",
"The result of the FIV test" : "",
"Invalid email address" : "",
"Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "",
"Jindo" : "Jindo",
"Work" : "",
"All animal care officers on file." : "",
"filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "",
"Name contains" : "",
"Add Cost" : "",
"after connecting, chdir to" : "",
"Is this a permanent foster?" : "",
"Wed" : "",
"Norwegian Buhund" : "Norwegian Buhund",
"Comments" : "Kommentaarid",
"Turkish Angora" : "Turkish Angora",
"Movement Date" : "",
"UK Giftaid" : "",
"Test" : "",
"PetLink Publisher" : "",
"We" : "",
"Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "",
"American Bulldog" : "American Bulldog",
"Wk" : "",
"Account disabled." : "",
"Lost animals reported in the last 30 days." : "",
"Order published animals by" : "",
"Diary" : "Päevik",
"{0} incurred in costs" : "",
"The SmartTag type" : "",
"Get more reports from sheltermanager.com" : "",
"Default Donation Type" : "",
"Show the location unit field" : "",
"Jump to diary" : "",
"Show quick links on all pages" : "",
"Brotogeris" : "Brotogeris",
"Annual" : "",
"Welsh Terrier" : "Welsh Terrier",
"Large" : "Suur",
"Add Accounts" : "",
"Recently Changed" : "",
"Points for matching lost/found area" : "",
"Template Name" : "",
"Animal shortcode format" : "",
"Diary note {0} rediarised for {1}" : "",
"Find this address on a map" : "",
"Grey and White" : "Hall ja valge",
"Delete Media" : "",
"Shiba Inu" : "Shiba Inu",
"Hound" : "Hound",
"First Last" : "",
"Edit test" : "",
"Californian" : "Californian",
"Add a found animal" : "",
"Vaccination Types" : "Vaktsiineerimise tüübid",
"Bite" : "Hammustus/hammustama",
"Income" : "Tulu",
"Zipcode" : "",
"Jump to media" : "",
"Facebook page" : "",
"Horizontal Pitch" : "",
"Location" : "Asukoht",
"Chinese Foo Dog" : "Chinese Foo Dog",
"View Person" : "",
"Carolina Dog" : "Carolina Dog",
"Pig (Farm)" : "Kodusiga",
"Saint Bernard St. Bernard" : "Saint Bernard St. Bernard (Bernhardiin)",
"Dead On Arrival" : "Saabudes surnud",
"On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "",
"Saddlebred" : "Saddlebred",
"This person has not passed a homecheck" : "",
"Diary subject cannot be blank" : "",
"Edit vaccination" : "",
"Bank::Current" : "",
"All homechecked owners on file." : "",
"weeks after last contact." : "",
"Paint/Pinto" : "Paint/Pinto",
"Burmese" : "Birma",
"African Grey" : "African Grey",
"Himalayan" : "Himalayan",
"Points for being found within 2 weeks of being lost" : "",
"User Roles" : "",
"This animal is quarantined" : "",
"Report Title" : "",
"Attach a file" : "",
"Give" : "",
"Standardbred" : "Standardbred",
"Defaults formats for code and shortcode are TYYYYNNN and NNT" : "",
"Reset Password" : "",
"Users need a username, password and at least one role or the superuser flag setting." : "",
"Clydesdale" : "Clydesdale",
"Basset Hound" : "Basset Hound",
"Field Spaniel" : "Field Spaniel",
"Last Month" : "",
"Vouchers" : "Vautšerid",
"Domestic Medium Hair" : "Kodukass keskmise pikkusega karvaga",
"Remove the FIV/L test fields from animal health details" : "",
"Default to advanced find animal screen" : "",
"Add log" : "",
"Log requires an animal." : "",
"{plural2} animals were transferred to other shelters" : "",
"Yes/No/Unknown" : "",
"Animal Shelter Manager Login" : "Animal Shelter Manager Login",
"Date brought in cannot be blank" : "",
"Publishing" : "",
"Create a new template by copying the selected template" : "",
"This animal was dead on arrival to the shelter" : "",
"Unaltered Adopted Animals" : "",
"Foster movements must have a valid foster date." : "",
"Logout" : "Logi välja",
"You didn't specify any search criteria, so an on-shelter search was assumed." : "",
"Zipcode contains" : "",
"Receive a donation" : "",
"Remove unwanted functionality" : "",
"{plural1} animals were transferred to other shelters" : "",
"English Shepherd" : "English Shepherd",
"Available for adoption" : "",
"Australian Kelpie" : "Australian Kelpie",
"The date this person was homechecked." : "",
"Species A-Z" : "",
"Highlight" : "",
"Birman" : "Birman",
"Any information about the animal" : "",
"Area Found" : "Leidmise piirkond",
"Available sheltermanager.com reports" : "",
"Mandatory" : "",
"To" : "",
"Date Reported" : "",
"Animal Emblems" : "",
"{plural0} animal was transferred to another shelter" : "",
"{plural1} trial adoptions have ended" : "",
"Colors" : "",
"All animals matching current publishing options." : "",
"Lhasa Apso" : "Lhasa Apso",
"This animal has a SmartTag PETID" : "",
"Edit online form" : "",
"Bulk Complete Waiting List" : "",
"Size" : "Suurus",
"Additional" : "Täiendav",
"Document Repository" : "",
"FIV Result" : "FIV testi tulemus",
"Transfer" : "Ülekanne",
"Akbash" : "Akbash",
"Palomino" : "Palomino",
"Somali" : "Somali",
"Find Found Animal" : "Leia leitud loom",
"Waiting List - Removal" : "",
"Profile" : "",
"The result of the FLV test" : "",
"Complete" : "",
"Litters" : "Pesakonnad",
"Chart" : "",
"Lost animals must have a contact" : "",
"URL" : "",
"First Vaccination" : "Esimene vaktsineerimine",
"Silver" : "Silver",
"When creating donations from the Move menu screens, mark them due instead of received" : "",
"Culling" : "Väljapraakimine",
"Comments Contain" : "",
"Tosa Inu" : "Tosa Inu",
"When ASM should stop showing this message" : "",
"Clear" : "Puhasta",
"{plural0} result found in {1} seconds. Order: {2}" : "",
"Create missing lookup values" : "",
"Donation book" : "",
"There is not enough information in the form to create a found animal record (need a description and area found)." : "",
"Brittany Spaniel" : "Brittany Spaniel",
"Defaults" : "Vaikimisi",
"Owl" : "Kakk",
"This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "",
"No view permission for this report" : "",
"Add Litter" : "",
"Message Board" : "",
"In" : "",
"Old English Sheepdog" : "Old English Sheepdog",
"Date found cannot be blank" : "",
"Holland Lop" : "Holland Lop",
"{plural2} trial adoptions have ended" : "",
"Vaccination" : "Vaktsineerimine",
"Pumi" : "Pumi",
"Points for matching age group" : "",
"Form URL" : "",
"Death" : "Surm",
"Lookup" : "Päring",
"Eclectus" : "Eclectus",
"Microchip Number" : "Mikrokiibi number",
"Peruvian Inca Orchid" : "Peruvian Inca Orchid",
"{plural2} people with active reservations have not been homechecked" : "",
"Completed" : "Täidetud",
"View Diets" : "",
"Donation Types" : "Annetuse tüübid",
"State contains" : "",
"When I generate a document, make a note of it in the log with this type" : "",
"Change" : "",
"Default Color" : "",
"To Retailer" : "",
"Adoptions {0}" : "",
"Adoption fee donations" : "",
"Manx" : "Manx",
"Starts" : "",
"Boykin Spaniel" : "Boykin Spaniel",
"Dandi Dinmont Terrier" : "Dandi Dinmont Terrier",
"This type of movement requires a person." : "",
"Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "",
"<Prev" : "",
"Create a new animal from this found animal record" : "",
"SQL dump (without media)" : "",
"Note" : "Märkus",
"Publishing Options" : "",
"Split pages with a species name prefix" : "",
"Show" : "",
"Greyhound" : "Greyhound",
"9 months" : "",
"Pig" : "Siga",
"Publish to PetFinder.com" : "",
"Brought In" : "",
"Animal '{0}' successfully marked deceased." : "",
"When displaying person names in lists, use the format" : "",
"Payment Type" : "",
"Add message" : "",
"FLV" : "",
"SQL interface" : "",
"Animal Type" : "Looma liik",
"New" : "Uus",
"Default Test Type" : "",
"Publish to MeetAPet.com" : "",
"Online Forms" : "",
"Match this animal with the lost and found database" : "",
"Homechecked by" : "",
"Edit document" : "",
"Create found animal records from the selected forms" : "",
"{plural2} animals were euthanized" : "",
"Access them via the url 'image?mode=dbfs&id=/reports/NAME'" : "",
"{plural3} animals are not available for adoption" : "",
"Oriental Long Hair" : "Oriental Long Hair",
"Completed notes upto today" : "",
"SmartTag Publisher" : "",
"Configuration" : "",
"Cash" : "",
"Reservation Date" : "",
"Husky" : "Husky",
"Norfolk Terrier" : "Norfolk Terrier",
"Description Contains" : "",
"Create a new template" : "",
"View Person Links" : "",
"on" : "",
"Polish Lowland Sheepdog" : "Polish Lowland Sheepdog",
"New Task" : "",
"Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "",
"System Options" : "",
"White and Tan" : "Valge ja kollakaspruun",
"Message" : "",
"or" : "",
"Include this information on animals shared via Facebook" : "",
"Young Adult" : "Nooruk",
"No" : "Ei",
"Can afford donation?" : "",
"Add" : "",
"Retailer movements must have a valid movement date." : "",
"{plural1} weeks." : "",
"Black and Brown" : "Must ja pruun",
"Original Owner" : "Algne omanik",
"This animal is part of a cruelty case against an owner" : "",
"Active users: {0}" : "",
"More Medications" : "",
"Add Log" : "",
"Letter" : "",
"Italian Spinone" : "Italian Spinone",
"Expiry date" : "",
"You can drag and drop animals in shelter view to change their locations." : "",
"Kishu" : "Kishu",
"If you don't select any locations, publishers will include animals in all locations." : "",
"Error" : "Viga",
"Small" : "Väike",
"Add Tests" : "",
"Irish Setter" : "Irish Setter",
"Skye Terrier" : "Skye Terrier",
"Set the email content-type header to text/html" : "",
"Complete Tasks" : "",
"Surname" : "Perekonnanimi",
"Bulk Complete Vaccinations" : "",
"Remove the tattoo fields from animal identification details" : "",
"Add details of this email to the log after sending" : "",
"Close" : "Sulge",
"Entering 'os' in the search box will show you all shelter animals." : "",
"Change Date Required" : "",
"Gecko" : "Geko",
"This person lives in the same area as the person who brought the animal to the shelter." : "",
"Alphabetically Z-A" : "",
"{plural1} months." : "",
"{plural3} medical treatments need to be administered today" : "",
"Plott Hound" : "Plott Hound",
"Thursday" : "",
"Not reconciled" : "",
"Waiting list entry successfully added." : "",
"Costs need a date and amount." : "",
"Change Report" : "",
"Shelter animals" : "",
"Hold until" : "",
"{plural3} animals were reclaimed by their owners" : "",
"Treatment marked as given for {0} - {1}" : "",
"Sizes" : "",
"Import a CSV file" : "",
"Criteria:" : "",
"NNN or NN = number unique for this type of animal for this year" : "",
"A movement must have a reservation date or type." : "",
"You have unsaved changes, are you sure you want to leave this page?" : "",
"Black and Brindle" : "Must ja hallikaspruun",
"Vertical Pitch" : "",
"Password" : "",
"Transfer In" : "",
"Forenames" : "",
"(master user, not editable)" : "",
"Edit {0}" : "Muuda {0}",
"Warn if the name of the new animal is similar to one entered recently" : "",
"Leukaemia" : "Leukeemia",
"Start at" : "",
"All vets on file." : "",
"Entry" : "",
"Once assigned, codes cannot be changed" : "",
"This will permanently remove this person, are you sure?" : "",
"View littermates" : "",
"Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "",
"Breeds" : "Tõud",
"Forms need a name." : "",
"The date this animal was put on the waiting list" : "",
"First Names" : "",
"Import complete with {plural3} errors." : "",
"This animal has movements and cannot be removed." : "",
"Quicklinks are shown on the home page and allow quick access to areas of the system." : "",
"Find Animal/Person" : "",
"Donations need at least one date, an amount and a person." : "",
"Update publishing options" : "",
"Deceased" : "",
"Image file" : "",
"Only show cruelty cases" : "",
"Urgencies" : "",
"Default transaction view" : "",
"Fox Terrier" : "Fox Terrier",
"Checkered Giant" : "Checkered Giant",
"Unknown" : "Tundmatu",
"Breed" : "Tõug",
"{plural1} unaltered animals have been adopted in the last month" : "",
"Black Mouth Cur" : "Black Mouth Cur",
"I've finished, Don't show me this popup again." : "",
"SM Account" : "",
"Points required to appear on match report" : "",
"Cymric" : "Cymric",
"Beauceron" : "Beauceron",
"Top Margin" : "",
"Brown" : "Pruun",
"Fostered" : "Kasuperesse antud",
"Adoption Fee" : "Loovutustasu",
"Due in next month" : "",
"Your CSV file should have a header row with field names ASM recognises. Please see the manual for more information." : "",
"Not Available for Adoption" : "",
"to" : "",
"Fosterer" : "",
"Moving..." : "",
"Add extra images for use in reports and documents" : "",
"American Eskimo Dog" : "American Eskimo Dog",
"<<" : "",
"Schnauzer" : "Schnauzer",
"Tattoo Number" : "",
"Animal Selection" : "",
"Parrot (Other)" : "Papagoi (muu)",
"Refresh" : "Värskenda",
"Lost and Found" : "",
"Amount" : "Kogus",
"Edit donation" : "",
"Other Organisation" : "",
"Edit All Diary Notes" : "Muuda kõiki päeviku sissekandeid",
"The species of this animal" : "",
"Homecheck areas" : "",
"Expenses::Postage" : "",
"Home Phone" : "",
"This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"The movement number '{0}' is not unique." : "",
"and" : "ja",
"Macaw" : "Aara",
"Jump to movements" : "",
"Canary" : "Kanaarilind",
"Temporary Vaccination" : "Tähtajaline vaktsineerimine",
"Medical book" : "",
"Or upload a script" : "",
"[None]" : "[Puudub]",
"Lost animal entries matching '{0}'." : "",
"English Toy Spaniel" : "English Toy Spaniel",
"Airedale Terrier" : "Airedale Terrier",
"All members on file." : "",
"Log" : "Logi",
"HTML/FTP Publisher" : "",
"{plural1} shelter animals have people looking for them" : "",
"Date Of Birth" : "Sünnikuupäev",
"None" : "Puudub",
"Points for matching zipcode" : "",
"Publish HTML via FTP" : "",
"This person has been under investigation" : "",
"A list of areas this person will homecheck - eg: S60 S61" : "",
"Parrotlet" : "Parrotlet",
"Vaccinate" : "",
"All volunteers on file." : "",
"Vaccination Given" : "",
"Finnish Spitz" : "Finnish Spitz",
"Abuse" : "Väärkohtlemine",
"Date Removed" : "Eemaldamise kuupäev",
"Vaccinations" : "",
"People with active reservations, but no homecheck has been done." : "",
"Cancel unadopted reservations after" : "",
"{plural2} results found in {1} seconds. Order: {2}" : "",
"Performed" : "",
"Default image for documents" : "",
"No matches found." : "Vastavusi ei leitud.",
"Log entries need a date and text." : "",
"Short" : "Lühike",
"Puli" : "Puli",
"{0} received in donations" : "",
"Future notes" : "",
"You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "",
"Recently Adopted" : "",
"Pass Homecheck" : "",
"Contact" : "Kontakt",
"Include cruelty case animals" : "",
"GiftAid" : "",
"No data to show on the report." : "",
"Go the system users screen and add user accounts for your staff." : "",
"Thai Ridgeback" : "Thai Ridgeback",
"Shelter stats (all time)" : "",
"Add Media" : "",
"Prairie Dog" : "Rohtlahaukur",
"Return date cannot be before the movement date." : "",
"FLV Result" : "",
"Tennessee Walker" : "Tennessee Walker",
"Date and notes are mandatory." : "",
"This screen allows you to add extra images to your database, for use in reports and documents." : "",
"Logged in Facebook user" : "",
"Pension" : "Pension",
"Aug" : "august",
"View Staff Person Records" : "",
"Generate a document from this animal" : "",
"Unspecified" : "Määramata",
"Edit voucher" : "",
"Animal" : "Loom",
"Standard" : "",
"estimate" : "",
"Male" : "Isane",
"Edit Online Forms" : "",
"Pot Bellied" : "Pot Bellied (vatsakas)",
"Cost Type" : "",
"Liver" : "Maksavärvi",
"Display Index" : "",
"Died {0}" : "",
"Medical profiles" : "",
"more" : "",
"Change Vaccinations" : "",
"When matching lost animals, include shelter animals" : "",
"Multi-Lookup" : "",
"Add Animals" : "Lisa loomi",
"Su" : "",
"Wire-haired Pointing Griffon" : "Wire-haired Pointing Griffon",
"American" : "American",
"CSV of person data" : "",
"Found" : "",
"Sa" : "",
"Miniature Pinscher" : "Miniature Pinscher",
"Jersey Wooly" : "Jersey Wooly",
"Produce a CSV File" : "",
"New Voucher" : "Uus vautšer",
"If this is the web preferred image, web publishers will use these notes as the animal description" : "",
"Manually enter codes (do not generate)" : "",
"{plural2} weeks." : "",
"The date the donation was received" : "",
"Open reports in a new browser tab" : "",
"Include fostered animals" : "",
"Edit diary task" : "",
"Person - Additional" : "",
"Edit the current waiting list" : "",
"RescueGroups Publisher" : "",
"Area Lost" : "",
"Bulk Complete Medical Records" : "",
"Remove short shelter code box from the animal details screen" : "",
"Add this text to all animal descriptions" : "",
"Portuguese Water Dog" : "Portuguese Water Dog",
"{0} cannot be blank" : "",
"The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "",
"Go the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "",
"Incoming" : "Sissetulev",
"Recently Entered Shelter" : "",
"German Wirehaired Pointer" : "German Wirehaired Pointer",
"Hidden" : "",
"All diary notes" : "",
"Animal - Death" : "",
"Shelter code {0} has already been allocated to another animal." : "",
"Expires" : "",
"English Bulldog" : "English Bulldog",
"New Diet" : "Uus dieet",
"Recently Fostered" : "",
"All animals on the shelter." : "",
"Dog" : "Koer",
"Flags" : "",
"Withdrawal" : "",
"Dutch Shepherd" : "Dutch Shepherd",
"Password for '{0}' has been reset to default of 'password'" : "",
"Brought In By" : "Looma tooja",
"Times should be in HH:MM format, eg: 09:00, 16:30" : "",
"Show the internal location field" : "",
"Warn when adopting to a person who has previously brought an animal to the shelter" : "",
"Edit Lookups" : "",
"Code contains" : "",
"Save this waiting list entry" : "",
"Entry Category" : "",
"File" : "",
"Mail Merge" : "Kirjakooste",
"Change Diets" : "",
"View Document Repository" : "",
"Horse" : "Hobune",
"Kyi Leo" : "Kyi Leo",
"{plural2} reservations have been active over a week without adoption" : "",
"Enable the waiting list functionality" : "",
"Diary for {0}" : "Päevik {0}",
"Default urgency" : "",
"The date this animal was lost" : "",
"Oct" : "oktoober",
"Movement Number" : "",
"Thumbnail size" : "",
"Silver Fox" : "Silver Fox",
"Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "",
"March" : "märts",
"Nova Scotia Duck-Tolling Retriever" : "Nova Scotia Duck-Tolling Retriever",
"Priority Floor" : "",
"Wirehaired Terrier" : "Wirehaired Terrier",
"Estimate" : "Hinnanguline",
"Treatment name cannot be blank" : "",
"All animals who are currently held in case of reclaim." : "",
"Edit report" : "",
"Last name" : "",
"Select a person to attach this form to." : "",
"Retailer Book" : "Vahendajate raamat",
"Senior" : "Seenior",
"Include this image when publishing" : "",
"Mark new animals as not for adoption" : "Märgi uued loomad kui mitteloovutatavad",
"Cell" : "",
"Match Lost and Found" : "",
"Generate documentation" : "",
"This person is very similar to another person on file, carry on creating this record?" : "",
"Waiting List: {0}" : "Ootenimekiri: {0}",
"Generate Documents" : "",
"Terrapin" : "Lamekilpkonn",
"Rat" : "Rott",
"Chihuahua" : "Chihuahua",
"Diary task items need a pivot, subject and note." : "",
"Delete Animals" : "Kustuta loomad",
"Warn when adopting to a person who has not been homechecked" : "",
"Updated." : "",
"Facebook Sharing" : "",
"Delete Vaccinations" : "",
"Search sort order" : "",
"Rhea" : "Nandu",
"Donor" : "Sponsor",
"Treatment Given" : "",
"Reservation book" : "",
"Expenses::Water" : "",
"Lowest" : "Madalaim",
"Medium" : "Keskmine",
"New Role" : "",
"{plural2} tests need to be performed today" : "",
"Default Entry Reason" : "",
"This animal should be held in case it is reclaimed" : "",
"Flag" : "",
"This person is linked to an animal and cannot be removed." : "",
"Create a new document" : "",
"Report" : "Aruanne",
"Add diary task" : "",
"Move an animal to a retailer" : "",
"Weekly" : "Iganädalane",
"Open records in a new browser tab" : "",
"Lookups" : "",
"New Zealand" : "New Zealand",
"Receive" : "",
"Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "",
"{plural1} tests need to be performed today" : "",
"Sealyham Terrier" : "Sealyham Terrier",
"Age Group 3" : "",
"Age Group 2" : "",
"Age Group 1" : "",
"Age Group 7" : "",
"Age Group 6" : "",
"Age Group 5" : "",
"Age Group 4" : "",
"Age Group 8" : "",
"Default Brought In By" : "",
"Neutered" : "Kastreeritud",
"Delete" : "Kustuta",
"You can set a default amount for different donation types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "",
"Video Link" : "",
"Waiting list entry for {0} ({1})" : "",
"After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "",
"Returned To Owner" : "Tagastatud omanikule",
"Generate a random name for this animal" : "Loo loomale suvaline nimi",
"Clear tables before importing" : "",
"Turkish Van" : "Turkish Van",
"Upload Document" : "",
"Make this the default image when viewing this record and publishing to the web" : "",
"Diary task: {0}" : "",
"Leonberger" : "Leonberger",
"{plural1} animals were euthanized" : "",
"{plural2} animals were adopted" : "",
"This Week" : "",
"Fri" : "",
"Siberian" : "Siberian",
"Saluki" : "Saluki",
"The selected file is not an image." : "",
"Bluetick Coonhound" : "Bluetick Coonhound",
"{plural1} medical treatments need to be administered today" : "",
"Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "",
"Advanced find animal screen defaults to on shelter" : "",
"Add Vouchers" : "",
"Foxhound" : "Foxhound",
"Attach this form to an existing person" : "",
"Issue a new insurance number for this animal/adoption" : "",
"This animal has a tattoo" : "",
"Toy Fox Terrier" : "Toy Fox Terrier",
"Started" : "",
"{plural2} vaccinations need to be administered today" : "",
"Italian Greyhound" : "Italian Greyhound",
"View Animal Vet" : "Vaata looma veterinaari",
"Reservations must have a valid reservation date." : "",
"Date put on" : "",
"Puppies (under {0} months)" : "",
"This animal has been heartworm tested" : "",
"Gift Aid" : "",
"Edit Header/Footer" : "",
"8 weeks" : "8 nädalat",
"Tattoo Date" : "",
"People Looking For" : "",
"The date the trial adoption is over" : "",
"Add medical regimen" : "",
"Create a new waiting list entry from this found animal record" : "",
"Did you know?" : "",
"Show tips on the home page" : "",
"(blank)" : "",
"Reason not from Owner" : "",
"Edit medical regimen" : "",
"British Shorthair" : "British Shorthair (Briti lühikarvaline)",
"Delete Litter" : "",
"Income::WaitingList" : "",
"Cancel" : "Loobu",
"Devon Rex" : "Devon Rex",
"Save and leave" : "",
"Loading..." : "",
"Reason for Entry" : "",
"Default Size" : "",
"Current" : "",
"Received in last year" : "",
"{0} {1} aged {2}" : "",
"The coat type of this animal" : "",
"Users" : "",
"Shelter code cannot be blank" : "",
"Parakeet (Other)" : "(Väike pika sabaga) papagoi",
"Add a test" : "",
"Treeing Walker Coonhound" : "Treeing Walker Coonhound",
"{plural0} month." : "",
"Media notes contain" : "",
"Calico" : "Calico",
"American Curl" : "American Curl",
"Thu" : "",
"Chart (Steps)" : "",
"Entry Reasons" : "Sissetuleku põhjused",
"{plural1} years." : "",
"Change Tests" : "",
"Add Document to Repository" : "",
"New Vaccination" : "Uus vaktsineerimine",
"{plural1} results found in {1} seconds. Order: {2}" : "",
"Bernese Mountain Dog" : "Bernese Mountain Dog",
"Cost" : "",
"Publish to HelpingLostPets.com" : "",
"Edit additional field" : "",
"Address contains" : "",
"Clumber Spaniel" : "Clumber Spaniel",
"The date the owner last contacted the shelter" : "",
"Total donations" : "",
"T = first letter of animal type" : "",
"Unsuitable Accomodation" : "Ebasobivad tingimused",
"Show the color field" : "",
"Extra images" : "",
"Rediarised" : "",
"The date the foster is effective from" : "",
"Medical profiles need a profile name, treatment, dosage and frequencies." : "",
"Vietnamese Pot Bellied" : "Vietnamese Pot Bellied",
"Medicate Animal" : "",
"Delete Donation" : "",
"Field names should not contain spaces." : "",
"Reconciled" : "",
"less" : "",
"A unique reference for this litter" : "",
"Link" : "Link",
"Generate" : "",
"Select date for diary task" : "",
"Add lost animal" : "",
"Messages successfully sent" : "",
"Rottweiler" : "Rottweiler",
"(everyone)" : "",
"Search returned {0} results." : "",
"MM = current month" : "",
"Found Animal - Additional" : "",
"Quaker Parakeet" : "Quaker Parakeet",
"Display" : "",
"You can use incoming forms to create new records or attach them to existing people." : "",
"Chart (Point)" : "",
"(any)" : "(iga)",
"{0} is running ({1}% complete)." : "",
"Held" : "",
"The date the animal was heartworm tested" : "",
"Female" : "Emane",
"Mini Rex" : "Mini Rex",
"Hairless" : "Karvutu",
"Bombay" : "Bombay",
"Dogo Argentino" : "Dogo Argentino",
"Confirm Password" : "",
"Do not show" : "",
"Lost animal - {0} {1} [{2}]" : "",
"For" : "Kellele",
"Create" : "",
"First name(s)" : "",
"Work Phone" : "",
"CSV of animal/adopter data" : "",
"Kai Dog" : "Kai Dog",
"This animal has been altered" : "",
"Good With Children" : "",
"Execute the SQL in the box below" : "",
"Medical regimens need an animal, name, dosage, a start date and frequencies." : "",
"Belgian Shepherd Tervuren" : "Belgian Shepherd Tervuren",
"Change Date Given" : "",
"Phone" : "Telefon",
"Norwegian Elkhound" : "Norwegian Elkhound",
"Extra Images" : "",
"Select all" : "",
"Entered shelter" : "",
"Expenses::Food" : "",
"1 year" : "",
"Least recently changed" : "",
"The locale determines the language ASM will use when displaying text, dates and currencies." : "",
"Delete Report" : "",
"Manual" : "",
"{plural2} months." : "",
"Income::EntryDonation" : "",
"{plural3} animals entered the shelter" : "",
"Not for adoption" : "Loom ei ole loovutamiseks",
"{plural2} animals died" : "",
"Ostrich" : "Jaanalind",
"Maremma Sheepdog" : "Maremma Sheepdog",
"German Pinscher" : "German Pinscher",
"Brindle and White" : "Hallikaspruun ja valge",
"Publish Animals to the Internet" : "",
"Special Needs" : "",
"Diary notes need a date and subject." : "",
"FIV/L Test Date" : "",
"Death Reason" : "",
"Date of birth cannot be blank" : "",
"Medicate" : "",
"Unit" : "",
"Stop Publishing" : "",
"Brindle" : "Hallikaspruun",
"Yes" : "Jah",
"Setter" : "Setter",
"Keeshond" : "Keeshond",
"Edit notes" : "",
"Edit log" : "",
"No publishers are running." : "",
"Harlequin" : "Harlequin",
"End at" : "",
"Florida White" : "Florida White",
"Diary tasks need a name." : "",
"6 months" : "",
"Delete Accounts" : "",
"This animal has active reservations, they will be cancelled." : "",
"This month" : "",
"Eligible for gift aid" : "",
"Homechecker" : "Kodude külastaja",
"Waiting list entries must have a contact" : "",
"On shelter for {0} days. Total cost: {1}" : "Varjupaigas {0} päeva. Maksumus kokku: {1}",
"Euthanized" : "",
"Homechecked" : "Kodu on külastatud",
"Copy from animal comments" : "",
"Permanent Foster" : "",
"{plural3} weeks." : "",
"Donkey" : "Eesel",
"Location and Unit" : "",
"Emu" : "Emu",
"Account code cannot be blank." : "",
"Cavalier King Charles Spaniel" : "Cavalier King Charles Spaniel",
"{plural1} animals entered the shelter" : "",
"Th" : "",
"Remove the declawed box from animal health details" : "",
"All Publishers" : "",
"Issued" : "",
"Delete Investigation" : "",
"YY or YYYY = current year" : "",
"German Shorthaired Pointer" : "German Shorthaired Pointer",
"Very Large" : "Väga suur",
"Done" : "",
"People with overdue donations." : "",
"Unaltered" : "",
"Non-Shelter" : "Mitte-varjupaiga",
"This person has not passed a homecheck." : "",
"Applehead Siamese" : "Applehead Siamese",
"City" : "",
"Bullmastiff" : "Bullmastiff",
"Name Contains" : "",
"Australian Shepherd" : "Australian Shepherd",
"{plural0} reservation has been active over a week without adoption" : "",
"Number of animal links to show" : "",
"Successfully attached to {0}" : "",
"Share this animal on Facebook" : "",
"Shetland Sheepdog Sheltie" : "Shetland Sheepdog Sheltie",
"View Vaccinations" : "",
"Abandoned" : "Hüljatud",
"{plural1} people have overdue donations" : "",
"The date this animal was returned to its owner" : "Looma omanikult tagastamise kuupäev",
"The period in days before waiting list urgency is increased" : "",
"Sorrel" : "",
"Add a new person" : "",
"{plural2} animals were reclaimed by their owners" : "",
"White and Brown" : "Valge ja pruun",
"Import complete with {plural0} error." : "",
"Last Location" : "",
"Extra-Toes Cat (Hemingway Polydactyl)" : "Extra-Toes Cat (Hemingway Polydactyl)",
"Canadian Hairless" : "Canadian Hairless",
"Bearded Collie" : "Bearded Collie",
"New Report" : "",
"{0} results." : "",
"Cow" : "Lehm",
"Investigator" : "",
"Expenses::Gas" : "",
"You can sort tables by clicking on the column headings." : "",
"Belgian Shepherd Laekenois" : "Belgian Shepherd Laekenois",
"Execute Script" : "",
"Include Removed" : "",
"Auto log users out after this many minutes of inactivity" : "",
"Bedlington Terrier" : "Bedlington Terrier",
"Camel" : "Kaamel",
"Cost record" : "",
"Telephone Bills" : "",
"Select a person" : "",
"Reupload animal images every time" : "",
"Publisher Logs" : "",
"Points for matching color" : "",
"Date Brought In" : "Toomise kuupäev",
"Animals per page" : "",
"Vet" : "Veterinaar",
"Found animals must have a contact" : "",
"Password successfully changed." : "",
"Tortie" : "",
"Checked By" : "",
"Publish to AdoptAPet.com" : "",
"Bobtail" : "Bobtail",
"The date the transfer is effective from" : "",
"New Fosterer" : "",
"New task detail" : "Uus ülesande detail",
"(use system)" : "",
"Japanese Bobtail" : "Japanese Bobtail",
"Delete Diets" : "",
"The date the animal was FIV/L tested" : "",
"Chocolate" : "",
"The name of the page you want to post to (eg: Your Humane Society). Leave blank to post to your wall." : "",
"Snake" : "Madu",
"Both" : "",
"Membership Number" : "",
"Tortoise" : "Kilpkonn",
"English Setter" : "English Setter",
"This animal has special needs" : "",
"Delete Diary" : "",
"Swedish Vallhund" : "Swedish Vallhund",
"Yellow and Grey" : "Kollane ja hall",
"This database is locked and in read-only mode. You cannot add, change or delete records." : "",
"Movement Type" : "Liikumise tüüp",
"Pheasant" : "Faasan",
"Links" : "Lingid",
"Points for matching sex" : "",
"Tortie and White" : "",
"Update the daily boarding cost for this animal" : "",
"{plural2} years." : "",
"Species Z-A" : "",
"Littermates" : "",
"Add online form" : "",
"Delete Waiting List" : "",
"{plural2} animals entered the shelter" : "",
"Lost Animal: {0}" : "Kaotatud loom: {0}",
"Pekingese" : "Pekingese",
"When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "",
"Confirm" : "",
"Single Treatment" : "Ühekordne ravi",
"One-Off" : "One-Off",
"Gas Bills" : "",
"Entlebucher" : "Entlebucher",
"Stolen" : "Varastatud",
"Spitz" : "Spitz",
"Email" : "E-post",
"Crossbreed animal should have different breeds" : "",
"Age Groups" : "Vanusegrupid",
"{0} - {1} {2}" : "",
"English Lop" : "English Lop",
"Return an animal from another movement" : "",
"4 weeks" : "4 nädalat",
"Test Types" : "",
"You can change how ASM looks by choosing a new theme under Settings-Options- Shelter Details-Visual Theme." : "",
"Chart (Pie)" : "",
"Add a new animal" : "Lisa uus loom",
"Invalid username or password." : "",
"User Accounts" : "",
"Portugese Podengo" : "Portugese Podengo",
"PetFinder Publisher" : "",
"Status" : "Olek",
"Released To Wild {0}" : "",
"Euthanized {0}" : "",
"Foster successfully created." : "",
"Show the size field" : "",
"Mail Merge - {0}" : "",
"View Log" : "",
"Ringneck/Psittacula" : "Ringneck/Psittacula",
"Turtle" : "Kilpkonn",
"Pony" : "Pony",
"{plural3} people have overdue donations" : "",
"Inactive - do not include" : "",
"Forbidden" : "",
"Copy of {0}" : "Koopia {0}",
"{plural0} animal entered the shelter" : "",
"Death Comments" : "",
"Normal user" : "",
"Briard" : "Briard",
"Row" : "",
"Finnish Lapphund" : "Finnish Lapphund",
"Siberian Husky" : "Siberian Husky",
"Egyptian Mau" : "Egyptian Mau",
"Test Animal" : "",
"Show the litter ID field" : "",
"ACO" : "",
"Shares" : "",
"Quick Links" : "",
"This will permanently remove this waiting list entry, are you sure?" : "",
"Irish Wolfhound" : "Irish Wolfhound",
"Tiger" : "Tiger",
"Dead on arrival" : "",
"Remember me on this computer" : "",
"Forgotten password?" : "",
"Add a diary note" : "",
"Invalid time, times should be in HH:MM format" : "",
"This person is not flagged as a fosterer and cannot foster animals." : "",
"{0} treatments every {1} days" : "",
"Send via email" : "",
"Homecheck Areas" : "",
"All notes upto today" : "",
"Animals" : "",
"{0} {1}: posted to Facebook page {2} by {3}" : "",
"inches" : "",
"Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "",
"Time on shelter" : "",
"The reason the owner wants to part with the animal" : "",
"Edit account" : "",
"{plural0} vaccination needs to be administered today" : "",
"Tests need an animal and at least a required date." : "",
"{plural0} unaltered animal has been adopted in the last month" : "",
"Lilac" : "Lilac",
"Loan" : "Laen",
"Munchkin" : "Munchkin",
"Books" : "",
"American Wirehair" : "American Wirehair",
"Type of animal links to show" : "",
"Log Types" : "",
"Iguana" : "Iguaan",
"English Springer Spaniel" : "English Springer Spaniel",
"Papillon" : "Papillon",
"Belgian Shepherd Dog Sheepdog" : "Belgian Shepherd Dog Sheepdog",
"Tu" : "",
"Attach" : "",
"Good with children" : "Sobib lastega",
"Reserve an animal" : "",
"Adopt an animal" : "",
"{0}: {1} {2} - {3} {4}" : "",
"Find animal" : "",
"Map" : "",
"Mar" : "märts",
"May" : "mai",
"Default Return Reason" : "",
"{plural3} urgent entries on the waiting list" : "",
"Waiting List" : "Ootenimekiri",
"A4" : "",
"View Vouchers" : "",
"White and Liver" : "Valge ja maksavärvi",
"Publisher Breed" : "",
"Diary date cannot be blank" : "",
"Quarantine" : "",
"Last Week" : "",
"The date reported to the shelter" : "",
"Skunk" : "Skunk",
"Dialog title" : "",
"The reason this animal was removed from the waiting list" : "",
"Is this a trial adoption?" : "",
"Tan and White" : "Kollakaspruun ja must",
"Comments contain" : "Kommentaarid sisaldavad",
"Set publishing options" : "",
"Reservation successfully created." : "",
"Satin" : "Satin",
"Unable to Afford" : "Ei jõua ülal pidada",
"Ibizan Hound" : "Ibizan Hound",
"Edit litters" : "",
"{plural0} year" : "",
"{0} {1} ({2} treatments)" : "",
"Monthly" : "Igakuine",
"Bengal" : "Bengal",
"Delete this record" : "Kustuta see sissekanne",
"This animal has the same name as another animal recently added to the system." : "",
"Asset" : "Vara",
"Last changed by {0} on {1}" : "",
"Homecheck Date" : "",
"Person Flags" : "",
"The shelter category for this animal" : "",
"Parent" : "Vanem",
"Returning" : "",
"{plural1} reservations have been active over a week without adoption" : "",
"SMTP server" : "",
"Remove the online form functionality from menus" : "",
"Goldfish" : "Kuldkala",
"Nov" : "november",
"The date the reservation is effective from" : "",
"Include held animals" : "",
"Lakeland Terrier" : "Lakeland Terrier",
"Diary and Messages" : "",
"Domestic Short Hair" : "Kodukass lühikarvaline",
"End Of Day" : "",
"Retailer" : "Edasimüüja",
"Bank savings account" : "",
"Select" : "Vali",
"Make this the default image when creating documents" : "",
"Turkey" : "Türgi",
"Index" : "",
"Diary note {0} marked completed" : "",
"Stats" : "",
"Weimaraner" : "Weimaraner",
"Long" : "Pikk",
"State" : "",
"American Fuzzy Lop" : "American Fuzzy Lop",
"The date the foster animal will be returned if known" : "",
"Animal '{0}' created with code {1}" : "",
"This animal already has an active reservation." : "",
"Passwords cannot be blank." : "",
"{plural2} people have overdue donations" : "",
"Email address" : "",
"Create this message" : "",
"Cornish Rex" : "Cornish Rex",
"This week" : "",
"HTML" : "",
"Markings" : "Märgistus",
"Internal Locations" : "Sisemine asukoht",
"Remove holds after" : "",
"{plural1} year" : "",
"Remove the neutered fields from animal health details" : "",
"Only show account totals for the current period, which starts on " : "",
"Publisher" : "",
"Champagne DArgent" : "Champagne DArgent",
"Redbone Coonhound" : "Redbone Coonhound",
"Borzoi" : "Borzoi",
"Russian Blue" : "Russian Blue (Vene sinine)",
"Log Text" : "",
"Poicephalus/Senegal" : "Poicephalus/Senegal (turteltuvi)",
"Edit medical profile" : "",
"Green" : "Roheline",
"More Vaccinations" : "",
"Default video for publishing" : "",
"Change Password" : "",
"Water Bills" : "",
"Back" : "",
"Date Put On" : "Date Put On",
"Download" : "",
"Good with kids" : "",
"Prefill new media notes with the filename if left blank" : "",
"Staff" : "Personal",
"Category" : "Kategooria",
"Change Vouchers" : "",
"{plural0} year." : "",
"Lory/Lorikeet" : "Lory/Lorikeet",
"Opening balances" : "",
"Pigeon" : "Tuvi",
"Bichon Frise" : "Bichon Frise",
"Anatolian Shepherd" : "Anatolian Shepherd",
"Treatment" : "",
"American Shorthair" : "American Shorthair",
"Donations for animals entering the shelter" : "",
"New diary task" : "",
"Media Notes" : "",
"New Document" : "",
"Fr" : "",
"Pomeranian" : "Pomeranian",
"An animal cannot have multiple open movements." : "",
"Income::" : "",
"Add waiting list" : "",
"Change Litter" : "",
"Transferred Out {0}" : "",
"The animal name" : "",
"Date put on list" : "",
"3 months" : "",
"Shelter stats (this week)" : "",
"Wednesday" : "",
"Remove the insurance number field from the movement screens" : "",
"Merge" : "",
"View Medical Records" : "",
"Animal Codes" : "Loomade koodid",
"Generate HTML from this SQL" : "",
"Show the date brought in field" : "",
"Change Cost" : "",
"Create and edit" : "",
"Litter Ref" : "",
"Animal Shelter Manager" : "",
"Tibetan Terrier" : "Tibetan Terrier",
"Email signature" : "",
"{plural3} year" : "",
"Donkey/Mule" : "Donkey/Mule",
"Clone Animals" : "Klooni loomi",
"Entry Donation" : "",
"Diets" : "Dieedid",
"Split pages with a baby/adult prefix" : "",
"Not Available For Adoption" : "",
"Beagle" : "Beagle",
"Cats" : "Kassid",
"Details" : "Üksikasjad",
"Edit movement" : "",
"Out Between" : "",
"Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "",
"Mynah" : "Acridotheres maina lind",
"Delete Document from Repository" : "",
"Treat trial adoptions as part of the shelter inventory" : "",
"Add additional field" : "",
"Komondor" : "Komondor",
"Date brought in is not valid" : "",
"Email successfully sent to {0}" : "",
"Notes" : "Märkmed",
"M (Miscellaneous)" : "M (Muu)",
"Received in last month" : "",
"Transferred" : "Edastatud",
"Comments copied to web preferred media." : "",
"Lizard" : "Sisalik",
"This movement cannot be from a retailer when the animal has no prior retailer movements." : "",
"Change Animals" : "Muuda loomi",
"Australian Terrier" : "Australian Terrier",
"Waiting List Contact" : "Ootenimekirja kontakt",
"Remove the Litter ID field from animal details" : "",
"Include non-shelter" : "",
"Pharaoh Hound" : "Pharaoh Hound",
"Reserved" : "Broneeritud",
"Reclaimed" : "Tagastatud",
"New Account" : "",
"Results" : "",
"Brown and White" : "Pruun ja valge",
"Test Results" : "",
"Account Types" : "",
"FIV/L Tested" : "",
"Read the manual for more information about Animal Shelter Manager." : "",
"Non-Shelter Animal" : "Mitte-varjupaiga loom.",
"This animal should not be shown in figures and is not in the custody of the shelter" : "",
"Otterhound" : "Otterhound",
"Edit Users" : "",
"{plural1} people with active reservations have not been homechecked" : "",
"Basenji" : "Basenji",
"You can't have a return without a movement." : "",
"To Other" : "Muusse",
"Bird" : "Lind",
"Cockatiel" : "Cockatiel",
"Add Lost Animal" : "",
"Lowchen" : "",
"Here are some things you should do before you start adding animals and people to your database." : "",
"Add a lost animal" : "",
"The date the litter entered the shelter" : "",
"Found animal entries matching '{0}'." : "",
"Bank deposit account" : "",
"Transfer successfully created." : "",
"Havanese" : "Havanese",
"Volunteer" : "Vabatahtlik",
"Modify Lookups" : "",
"Execute" : "",
"Alerts" : "",
"Log requires a date." : "",
"Add Investigation" : "",
"{plural1} animals died" : "",
"Cat" : "Kass",
"Edit role" : "",
"Mother" : "",
"{plural3} days." : "",
"Cost date must be a valid date" : "",
"Intakes {0}" : "",
"View Roles" : "",
"Create person records from the selected forms" : "",
"This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "",
"Delete Tests" : "",
"Add Diary" : "",
"Sat" : "",
"From" : "",
"Bonded With" : "",
"Poodle" : "Poodle",
"Send" : "Saada",
"Next" : "",
"{plural3} reservations have been active over a week without adoption" : "",
"These options change the behaviour of the search box at the top of the page." : "",
"Entry Reason" : "",
"Send Emails" : "",
"Edit Reports" : "",
"Mark Deceased" : "",
"{plural2} months" : "",
"Delete Person" : "",
"Number in litter" : "",
"Select an animal" : "Vali loom",
"Baby" : "Beebi",
"Reservation For" : "",
"Tue" : "",
"Vouchers need an issue and expiry date." : "",
"Coat Type" : "",
"Delete this waiting list entry" : "",
"{0} - {1} ({2} {3} aged {4})" : "",
"Copy animal comments to the notes field of the web preferred media for this animal" : "",
"Edit Diary Tasks" : "Muuda päeviku ülesandeid",
"Gordon Setter" : "Gordon Setter",
"Lilac Tortie" : "",
"Expense" : "Kulu",
"Sphynx (hairless cat)" : "Sphynx (hairless cat) (Sfinks)",
"Grade" : "Grade",
"Edit system users" : "",
"Any health problems the animal has" : "Looma terviseprobeemid",
"Online Form: {0}" : "",
"DD = current day" : "",
"Send emails" : "",
"Income::Interest" : "",
"This will permanently remove this record, are you sure?" : "",
"Only show transfers" : "",
"Add Vaccinations" : "",
"No results found." : "",
"Litters need at least a required date and number." : "",
"D (Dog)" : "K (Koer)",
"Foster an animal" : "",
"This animal is microchipped" : "",
"Page extension" : "",
"Remove the Rabies Tag field from animal health details" : "",
"Sheep Dog" : "Sheep Dog",
"Adult" : "Täiskasvanu",
"Default view" : "",
"ASM" : "",
"Catahoula Leopard Dog" : "Catahoula Leopard Dog",
"Coat Types" : "",
"Entered To" : "",
"Database" : "Andmebaas",
"Edit investigation" : "",
"Edit report template HTML header/footer" : "",
"Settings, System user accounts" : "",
"R" : "",
"This database is locked." : "",
"Swan" : "Luik",
"Caique" : "Caique",
"Update animals with PetLink Microchips" : "",
"Enable visual effects" : "",
"Lost/Found" : "",
"Animal - Additional" : "",
"Moved to animal record {0}" : "",
"Escaped {0}" : "",
"Declawed" : "",
"Duck" : "Part",
"Information" : "Info",
"Rows" : "",
"This person has donations and cannot be removed." : "",
"Log Type" : "",
"SMTP username" : "",
"Edit my diary notes" : "",
"Add account" : "",
"1 week" : "1 nädal",
"Real name" : "",
"Requested" : "Soovitud",
"Your password is currently set to 'password'. This is highly insecure and we strongly suggest you choose a new password." : "",
"Stats period" : "",
"When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "",
"Found animal - {0} {1} [{2}]" : "",
"Recently deceased" : "",
"Hidden Comments" : "",
"Return Date" : "Tagastamise kuupäev",
"This animal is currently at a retailer, it will be automatically returned first." : "",
"Donation From" : "",
"Fila Brasileiro" : "Fila Brasileiro",
"November" : "november",
"{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "{0} - {1} {2} ({3}), kontakt {4} ({5}) - kaotatod {6}, indeks {7}, {8}",
"SMTP password" : "",
"Our shelter does trial adoptions, allow us to mark these on movement screens" : "",
"Default Type" : "",
"Trial Adoption" : "",
"Reserve" : "",
"Don't create a cost record" : "",
"Adoption successfully created." : "",
"Animal Sponsorship" : "Looma sponsoreerimine",
"Netherland Dwarf" : "Netherland Dwarf",
"High" : "Kõrge",
"Shelter" : "",
"Export" : "",
"SQL dump" : "",
"Home page" : "",
"Saturday" : "",
"Include reserved animals" : "",
"Allow entry of two donations on the Move menu screens" : "",
"Show alerts on the home page" : "",
"This will permanently remove this animal, are you sure?" : "",
"Dwarf Eared" : "Dwarf Eared",
"No data." : "",
"Microchipped" : "",
"Good with dogs" : "Sobib koertega",
"Date reported cannot be blank" : "",
"Document" : "",
"Goat" : "Kits",
"Waiting List - Details" : "",
"Dec" : "detsember",
"Redirect to URL after POST" : "",
"New form field" : "",
"These are the HTML headers and footers used when generating reports." : "",
"Second Vaccination" : "Teine vaktsineerimine",
"Add form field" : "",
"Trial adoption book" : "",
"Mon" : "",
"This Year" : "",
"Heartworm" : "",
"Rotate image 90 degrees clockwise" : "",
"Areas" : "",
"Age Group" : "",
"All people on file." : "",
"This person has movements and cannot be removed." : "",
"Coat" : "",
"Create note this many days from today, or 9999 to ask" : "",
"Add medical profile" : "",
"From Fostering" : "Kasuperest",
"View animals matching publishing options" : "",
"Diary Task: {0}" : "",
"The date the animal was brought into the shelter" : "Looma varjupaika toomise kuupäev",
"This will permanently remove the selected roles, are you sure?" : "",
"Date of last owner contact" : "",
"years" : "aastad",
"Exotic Shorthair" : "Exotic Shorthair",
"Test Performed" : "",
"Move" : "",
"Include animals in the following locations" : "",
"Edit online form HTML header/footer" : "",
"Cancelled Reservation" : "Tühistatud broneering",
"Guinea fowl" : "Pärlkana",
"Good with cats" : "Sobib kassidega",
"Ginger and White" : "Punakaspruun ja valge",
"Use the icon in the lower right of notes fields to view them in a separate window." : "",
"Area" : "Piirkond",
"Cost Types" : "Kulu tüübid",
"Adoption" : "Loovutus",
"Other Account" : "",
"Units" : "",
"Add a vaccination" : "",
"Coonhound" : "Coonhound",
"Name cannot be blank" : "",
"Hotot" : "Hotot",
"Pit Bull Terrier" : "Pit Bull Terrier",
"Some info text" : "",
"Add person" : "",
"Scale published animal images to" : "",
"Change Person" : "",
"Match" : "",
"July" : "juuli",
"Sugar Glider" : "Liugurkuskus",
"{plural0} animal is not available for adoption" : "",
"Body" : "",
"American Sable" : "American Sable",
"Lost Animal" : "Kaotatud loom",
"New Template" : "",
"Chesapeake Bay Retriever" : "Chesapeake Bay Retriever",
"View Waiting List" : "Vaata ootenimekirja",
"Border Collie" : "Border Collie",
"Left shelter" : "",
"German Shepherd Dog" : "German Shepherd Dog",
"Or move this diary on to" : "",
"Movement Types" : "",
"Oriental Short Hair" : "Oriental Short Hair",
"Notes about the death of the animal" : "",
"Result" : "",
"Results for '{0}'." : "",
"Singapura" : "Singapura",
"Start Date" : "Alguskuupäev",
"Add role" : "",
"Delete Vouchers" : "",
"Warmblood" : "Warmblood",
"Delete Lost Animal" : "",
"cm" : "",
"{0} rows affected." : "",
"Treatments" : "",
"Low" : "Madal",
"Cairn Terrier" : "Cairn Terrier",
"Show PDF files inline instead of sending them as attachments" : "",
"Returns {0}" : "",
"New Guinea Singing Dog" : "New Guinea Singing Dog",
"Where this animal is located within the shelter" : "",
"Jun" : "juuni",
"{plural3} months." : "",
"Jul" : "juuli",
"Animal cannot be deceased before it was brought to the shelter" : "",
"Llama" : "Laama",
"Active" : "Aktiivne",
"Last, First" : "",
"Petit Basset Griffon Vendeen" : "Petit Basset Griffon Vendeen",
"Bloodhound" : "Bloodhound",
"Feral" : "Metsik",
"Footer" : "",
"Change Waiting List" : "",
"2 weeks" : "2 nädalat",
"Income::Donation" : "",
"Waiting List - Additional" : "",
"Valid tokens for the subject and text" : "",
"An age in years, eg: 1, 0.5" : "",
"Number" : "Number",
"Add a person" : "",
"Fish" : "Kala",
"Upload Photo" : "",
"{0} treatments every {1} years" : "",
"Received" : "",
"View Media" : "",
"Membership Expiry" : "",
"Afghan Hound" : "Afghan Hound",
"Coton de Tulear" : "Coton de Tulear",
"View publishing logs" : "",
"Shelter view" : "",
"Maine Coon" : "Maine Coon",
"Brown and Black" : "Pruun ja must",
"Match against other lost/found animals" : "",
"Diary Tasks" : "",
"Lost and found entries must have a contact" : "",
"Snowshoe" : "Snowshoe",
"Additional Fields" : "",
"This Month" : "",
"Neuter/Spay" : "Steriliseerima/kastreerima",
"Medical" : "Meditsiiniline",
"Grey" : "Hall",
"Email this person" : "",
"Out SubTotal" : "",
"Log requires a person." : "",
"No adjustment" : "",
"{0} record(s) match the mail merge." : "",
"Stray" : "Hulkuv",
"Altered Date" : "",
"A person is required for this movement type." : "",
"Burmilla" : "Burmilla",
"When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "",
"{plural3} results found in {1} seconds. Order: {2}" : "",
"{plural3} weeks" : "",
"Include retailer animals" : "",
"User roles" : "",
"On Shelter" : "Varjupaigas",
"Similar Animal" : "",
"English Pointer" : "English Pointer",
"This animal died outside the care of the shelter, and the death should be kept out of reports" : "",
"Collie" : "Collie",
"Sunday" : "",
"Xoloitzcuintle/Mexican Hairless" : "Xoloitzcuintle/Mexican Hairless",
"Good with Cats" : "",
"Remaining" : "Järele jäänud",
"Settings, Reports" : "",
"Income from an on-site shop" : "",
"Angora Rabbit" : "Angora Rabbit",
"Add test" : "",
"Died off shelter" : "",
"Change Media" : "",
"Friday" : "",
"Pionus" : "Pionus",
"Ends after" : "Lõppeb peale",
"Expiry" : "",
"No results." : "",
"Reason" : "Põhjus",
"Omit criteria" : "",
"Transfer an animal" : "",
"Separate waiting list rank by species" : "",
"By" : "",
"Unable to Cope" : "Ei saa hakkama",
"Adoptable" : "Loovutatav",
"Merge another person into this one" : "",
"English Cocker Spaniel" : "English Cocker Spaniel",
"Change Lost Animal" : "",
"{plural0} urgent entry on the waiting list" : "",
"This animal should not be included when publishing animals for adoption" : "",
"Date Lost" : "",
"To Fostering" : "Kasuperesse",
"Number of Tasks" : "",
"The date the animal died" : "Looma surma kuupäev",
"Import complete with {plural2} errors." : "",
"Hold the animal until this date or blank to hold indefinitely" : "",
"View PDF" : "",
"Edit roles" : "",
"Facebook page name" : "",
"You must supply a code." : "",
"SQL" : "SQL",
"Polish" : "Polish",
"Remove the coat type field from animal details" : "",
"Adoption movements must have a valid adoption date." : "",
"Waiting List {0}" : "",
"Adopted Transferred In {0}" : "",
"Gerbil" : "Liivahiir",
"Sun" : "",
"Greater Swiss Mountain Dog" : "Greater Swiss Mountain Dog",
"Date removed" : "",
"Photo successfully uploaded." : "",
"Finch" : "Vint, leevike",
"Most recently changed" : "",
"Yorkshire Terrier Yorkie" : "Yorkshire Terrier Yorkie",
"Preview" : "Eelvaade",
"The base color of this animal" : "",
"Date of Birth" : "",
"Tan and Black" : "Kollakaspruun mustaga",
"People" : "",
"Area Postcode" : "Piirkonna indeks",
"Edit user" : "",
"Incomplete notes upto today" : "",
"Edit diary" : "",
"Receipt No" : "",
"{plural0} month" : "",
"Server clock adjustment" : "",
"This animal has been declawed" : "",
"Show a minimap of the address on person screens" : "",
"All existing animals, people, movements and donations in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "",
"Mountain Cur" : "Mountain Cur",
"Tibetan Mastiff" : "Tibetan Mastiff",
"Default Log Filter" : "",
"Rat Terrier" : "Rat Terrier",
"Weight" : "Kaal",
"Set to 0 to never update urgencies." : "",
"December" : "detsember",
"Edit My Diary Notes" : "Muuda mu päeviku märkmeid",
"Generate letters" : "",
"All retailers on file." : "",
"Lifetime" : "",
"Shelter Animals" : "",
"Pointer" : "Pointer",
"Rosella" : "Rosella",
"Marriage/Relationship split" : "Abielu/kooselu lahutus",
"Set to 0 for no limit." : "",
"Thank you for choosing Animal Shelter Manager for your shelter!" : "",
"Foster Book" : "Hoiukodude raamat",
"Kuvasz" : "Kuvasz",
"Find Person" : "",
"Trial ends on" : "",
"Bank account interest" : "",
"Shelter stats (today)" : "",
"Number of fields" : "",
"Waiting list donations" : "",
"Not available for adoption" : "",
"Remove the location unit field from animal details" : "",
"Golden Retriever" : "Golden Retriever",
"Remove the city/state fields from person details" : "",
"HTML Publishing Templates" : "",
"Entry reason" : "",
"Will this owner give a donation?" : "",
"Chow Chow" : "Chow Chow",
"Publish to RescueGroups.org" : "",
"New User" : "",
"Receipt/Invoice" : "",
"Use HTML5 client side image scaling where available to speed up image uploads" : "",
"or estimated age in years" : "",
"Balance" : "",
"Alphabetically A-Z" : "",
"Lost from" : "",
"More Tests" : "",
"Show quick links on the home page" : "",
" days." : "",
"Delete this person" : "",
"{plural0} week." : "",
"Entered (newest first)" : "",
"English Spot" : "English Spot",
"Default Death Reason" : "",
"Insurance" : "",
"Chart (Line)" : "",
"Saving..." : "",
"Updated database to version {0}" : "",
"Attach link" : "",
"One Off" : "",
"Cruelty Case" : "",
"Received in last week" : "",
"The entry reason for this animal" : "",
"5 Year" : "",
"{plural1} animals were adopted" : "",
"Save" : "",
"French Bulldog" : "French Bulldog",
"Banned" : "Loomavõtmiskeeld",
"{plural0} animal died" : "",
"Board and Food" : "",
"Asset::Premises" : "",
"View Cost" : "",
"Enable lost and found functionality" : "",
"February" : "veebruar",
"Kakariki" : "Kakariki",
"are sent to" : "",
"There is not enough information in the form to create a person record (need a surname)." : "",
"Lost" : "",
"White and Grey" : "Valge ja hall",
"Left Margin" : "",
"Remove" : "",
"Annually" : "Igal aastal",
"April" : "aprill",
"New Donation" : "Uus annetus",
"Area where the animal was found" : "",
"Morgan" : "Morgan",
"Incoming Forms" : "",
"Display a search button at the right side of the search box" : "",
"{plural0} animal was reclaimed by its owner" : "",
"{plural3} vaccinations need to be administered today" : "",
"Overdue" : "",
"View Incoming Forms" : "",
"The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "",
"Beveren" : "Beveren",
"Black Tortie" : "",
"Description cannot be blank" : "",
"These values are required for correct operation of the system. ONLY change them if you are translating to another language." : ""
}
| aubzen/sheltermanager | src/locale/locale_et.py | Python | gpl-3.0 | 90,001 | [
"Amber",
"VisIt"
] | b5911bd8ae96fd0bbf42377914d94f1a74e26b31523a030a36b90c3c92d350c4 |
"""Processing of implementation manifests.
A manifest is a string representing a directory tree, with the property
that two trees will generate identical manifest strings if and only if:
- They have extactly the same set of files, directories and symlinks.
- For each pair of corresponding directories in the two sets:
- The mtimes are the same (OldSHA1 only).
- For each pair of corresponding files in the two sets:
- The size, executable flag and mtime are the same.
- The contents have matching secure hash values.
- For each pair of corresponding symlinks in the two sets:
- The mtime and size are the same.
- The targets have matching secure hash values.
The manifest is typically processed with a secure hash itself. So, the idea is that
any significant change to the contents of the tree will change the secure hash value
of the manifest.
A top-level ".manifest" file is ignored.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, stat
from zeroinstall import SafeException, _
from zeroinstall.zerostore import BadDigest
try:
import hashlib
sha1_new = hashlib.sha1
except:
import sha
sha1_new = sha.new
hashlib = None
class Algorithm:
"""Abstract base class for algorithms.
An algorithm knows how to generate a manifest from a directory tree.
@ivar rating: how much we like this algorithm (higher is better)
@type rating: int
"""
def generate_manifest(self, root):
"""Returns an iterator that yields each line of the manifest for the directory
tree rooted at 'root'."""
raise Exception('Abstract')
def new_digest(self):
"""Create a new digest. Call update() on the returned object to digest the data.
Call getID() to turn it into a full ID string."""
raise Exception('Abstract')
def getID(self, digest):
"""Convert a digest (from new_digest) to a full ID."""
raise Exception('Abstract')
class OldSHA1(Algorithm):
"""@deprecated: Injector versions before 0.20 only supported this algorithm."""
rating = 10
def generate_manifest(self, root):
def recurse(sub):
# To ensure that a line-by-line comparison of the manifests
# is possible, we require that filenames don't contain newlines.
# Otherwise, you can name a file so that the part after the \n
# would be interpreted as another line in the manifest.
if '\n' in sub: raise BadDigest("Newline in filename '%s'" % sub)
assert sub.startswith('/')
if sub == '/.manifest': return
full = os.path.join(root, sub[1:].replace('/', os.sep))
info = os.lstat(full)
m = info.st_mode
if stat.S_ISDIR(m):
if sub != '/':
yield "D %s %s" % (int(info.st_mtime), sub)
items = os.listdir(full)
items.sort()
subdir = sub
if not subdir.endswith('/'):
subdir += '/'
for x in items:
for y in recurse(subdir + x):
yield y
return
assert sub[1:]
leaf = os.path.basename(sub[1:])
if stat.S_ISREG(m):
d = sha1_new(open(full).read()).hexdigest()
if m & 0o111:
yield "X %s %s %s %s" % (d, int(info.st_mtime), info.st_size, leaf)
else:
yield "F %s %s %s %s" % (d, int(info.st_mtime), info.st_size, leaf)
elif stat.S_ISLNK(m):
target = os.readlink(full)
d = sha1_new(target).hexdigest()
# Note: Can't use utime on symlinks, so skip mtime
# Note: eCryptfs may report length as zero, so count ourselves instead
yield "S %s %s %s" % (d, len(target), leaf)
else:
raise SafeException(_("Unknown object '%s' (not a file, directory or symlink)") %
full)
for x in recurse('/'): yield x
def new_digest(self):
return sha1_new()
def getID(self, digest):
return 'sha1=' + digest.hexdigest()
def get_algorithm(name):
"""Look-up an L{Algorithm} by name.
@raise BadDigest: if the name is unknown."""
try:
return algorithms[name]
except KeyError:
raise BadDigest(_("Unknown algorithm '%s'") % name)
def generate_manifest(root, alg = 'sha1'):
"""@deprecated: use L{get_algorithm} and L{Algorithm.generate_manifest} instead."""
return get_algorithm(alg).generate_manifest(root)
def add_manifest_file(dir, digest_or_alg):
"""Writes a .manifest file into 'dir', and returns the digest.
You should call fixup_permissions before this to ensure that the permissions are correct.
On exit, dir itself has mode 555. Subdirectories are not changed.
@param dir: root of the implementation
@param digest_or_alg: should be an instance of Algorithm. Passing a digest
here is deprecated."""
mfile = os.path.join(dir, '.manifest')
if os.path.islink(mfile) or os.path.exists(mfile):
raise SafeException(_("Directory '%s' already contains a .manifest file!") % dir)
manifest = ''
if isinstance(digest_or_alg, Algorithm):
alg = digest_or_alg
digest = alg.new_digest()
else:
digest = digest_or_alg
alg = get_algorithm('sha1')
for line in alg.generate_manifest(dir):
manifest += line + '\n'
digest.update(manifest)
os.chmod(dir, 0o755)
stream = open(mfile, 'wb')
os.chmod(dir, 0o555)
stream.write(manifest)
stream.close()
os.chmod(mfile, 0o444)
return digest
def splitID(id):
"""Take an ID in the form 'alg=value' and return a tuple (alg, value),
where 'alg' is an instance of Algorithm and 'value' is a string.
@raise BadDigest: if the algorithm isn't known or the ID has the wrong format."""
parts = id.split('=', 1)
if len(parts) != 2:
raise BadDigest(_("Digest '%s' is not in the form 'algorithm=value'") % id)
return (get_algorithm(parts[0]), parts[1])
def copy_with_verify(src, dest, mode, alg, required_digest):
"""Copy path src to dest, checking that the contents give the right digest.
dest must not exist. New file is created with a mode of 'mode & umask'.
@param src: source filename
@type src: str
@param dest: target filename
@type dest: str
@param mode: target mode
@type mode: int
@param alg: algorithm to generate digest
@type alg: L{Algorithm}
@param required_digest: expected digest value
@type required_digest: str
@raise BadDigest: the contents of the file don't match required_digest"""
src_obj = open(src)
dest_fd = os.open(dest, os.O_WRONLY | os.O_CREAT | os.O_EXCL, mode)
try:
digest = alg.new_digest()
while True:
data = src_obj.read(256)
if not data: break
digest.update(data)
while data:
written = os.write(dest_fd, data)
assert written >= 0
data = data[written:]
finally:
os.close(dest_fd)
src_obj.close()
actual = digest.hexdigest()
if actual == required_digest: return
os.unlink(dest)
raise BadDigest(_("Copy failed: file '%(src)s' has wrong digest (may have been tampered with)\n"
"Expected: %(required_digest)s\n"
"Actual: %(actual_digest)s") % {'src': src, 'required_digest': required_digest, 'actual_digest': actual})
def verify(root, required_digest = None):
"""Ensure that directory 'dir' generates the given digest.
For a non-error return:
- Dir's name must be a digest (in the form "alg=value")
- The calculated digest of the contents must match this name.
- If there is a .manifest file, then its digest must also match.
@raise BadDigest: if verification fails."""
if required_digest is None:
required_digest = os.path.basename(root)
alg = splitID(required_digest)[0]
digest = alg.new_digest()
lines = []
for line in alg.generate_manifest(root):
line += '\n'
digest.update(line)
lines.append(line)
actual_digest = alg.getID(digest)
manifest_file = os.path.join(root, '.manifest')
if os.path.isfile(manifest_file):
digest = alg.new_digest()
digest.update(open(manifest_file, 'rb').read())
manifest_digest = alg.getID(digest)
else:
manifest_digest = None
if required_digest == actual_digest == manifest_digest:
return
error = BadDigest(_("Cached item does NOT verify."))
error.detail = _(" Expected: %(required_digest)s\n"
" Actual: %(actual_digest)s\n"
".manifest digest: %(manifest_digest)s\n\n") \
% {'required_digest': required_digest, 'actual_digest': actual_digest, 'manifest_digest': manifest_digest or _('No .manifest file')}
if manifest_digest is None:
error.detail += _("No .manifest, so no further details available.")
elif manifest_digest == actual_digest:
error.detail += _("The .manifest file matches the actual contents. Very strange!")
elif manifest_digest == required_digest:
import difflib
diff = difflib.unified_diff(open(manifest_file, 'rb').readlines(), lines,
'Recorded', 'Actual')
error.detail += _("The .manifest file matches the directory name.\n" \
"The contents of the directory have changed:\n") + \
''.join(diff)
elif required_digest == actual_digest:
error.detail += _("The directory contents are correct, but the .manifest file is wrong!")
else:
error.detail += _("The .manifest file matches neither of the other digests. Odd.")
raise error
# XXX: Be more careful about the source tree changing under us. In particular, what happens if:
# - A regualar file suddenly turns into a symlink?
# - We find a device file (users can hard-link them if on the same device)
def copy_tree_with_verify(source, target, manifest_data, required_digest):
"""Copy directory source to be a subdirectory of target if it matches the required_digest.
manifest_data is normally source/.manifest. source and manifest_data are not trusted
(will typically be under the control of another user).
The copy is first done to a temporary directory in target, then renamed to the final name
only if correct. Therefore, an invalid 'target/required_digest' will never exist.
A successful return means than target/required_digest now exists (whether we created it or not)."""
import tempfile
from logging import info
alg, digest_value = splitID(required_digest)
if isinstance(alg, OldSHA1):
raise SafeException(_("Sorry, the 'sha1' algorithm does not support copying."))
digest = alg.new_digest()
digest.update(manifest_data)
manifest_digest = alg.getID(digest)
if manifest_digest != required_digest:
raise BadDigest(_("Manifest has been tampered with!\n"
"Manifest digest: %(actual_digest)s\n"
"Directory name : %(required_digest)s")
% {'actual_digest': manifest_digest, 'required_digest': required_digest})
target_impl = os.path.join(target, required_digest)
if os.path.isdir(target_impl):
info(_("Target directory '%s' already exists"), target_impl)
return
# We've checked that the source's manifest matches required_digest, so it
# is what we want. Make a list of all the files we need to copy...
wanted = _parse_manifest(manifest_data)
tmpdir = tempfile.mkdtemp(prefix = 'tmp-copy-', dir = target)
try:
_copy_files(alg, wanted, source, tmpdir)
if wanted:
raise SafeException(_('Copy failed; files missing from source:') + '\n- ' +
'\n- '.join(wanted.keys()))
# Make directories read-only (files are already RO)
for root, dirs, files in os.walk(tmpdir):
for d in dirs:
path = os.path.join(root, d)
mode = os.stat(path).st_mode
os.chmod(path, mode & 0o555)
# Check that the copy is correct
actual_digest = alg.getID(add_manifest_file(tmpdir, alg))
if actual_digest != required_digest:
raise SafeException(_("Copy failed; double-check of target gave the wrong digest.\n"
"Unless the target was modified during the copy, this is a BUG\n"
"in 0store and should be reported.\n"
"Expected: %(required_digest)s\n"
"Actual: %(actual_digest)s") % {'required_digest': required_digest, 'actual_digest': actual_digest})
try:
os.chmod(tmpdir, 0o755) # need write permission to rename on MacOS X
os.rename(tmpdir, target_impl)
os.chmod(target_impl, 0o555)
tmpdir = None
except OSError:
if not os.path.isdir(target_impl):
raise
# else someone else installed it already - return success
finally:
if tmpdir is not None:
info(_("Deleting tmpdir '%s'") % tmpdir)
from zeroinstall.support import ro_rmtree
ro_rmtree(tmpdir)
def _parse_manifest(manifest_data):
"""Parse a manifest file.
@param manifest_data: the contents of the manifest file
@type manifest_data: str
@return: a mapping from paths to information about that path
@rtype: {str: tuple}"""
wanted = {}
dir = ''
for line in manifest_data.split('\n'):
if not line: break
if line[0] == 'D':
data = line.split(' ', 1)
if len(data) != 2: raise BadDigest(_("Bad line '%s'") % line)
path = data[-1]
if not path.startswith('/'): raise BadDigest(_("Not absolute: '%s'") % line)
path = path[1:]
dir = path
elif line[0] == 'S':
data = line.split(' ', 3)
path = os.path.join(dir, data[-1])
if len(data) != 4: raise BadDigest(_("Bad line '%s'") % line)
else:
data = line.split(' ', 4)
path = os.path.join(dir, data[-1])
if len(data) != 5: raise BadDigest(_("Bad line '%s'") % line)
if path in wanted:
raise BadDigest(_('Duplicate entry "%s"') % line)
wanted[path] = data[:-1]
return wanted
def _copy_files(alg, wanted, source, target):
"""Scan for files under 'source'. For each one:
If it is in wanted and has the right details (or they can be fixed; e.g. mtime),
then copy it into 'target'.
If it's not in wanted, warn and skip it.
On exit, wanted contains only files that were not found."""
from logging import warn
dir = ''
for line in alg.generate_manifest(source):
if line[0] == 'D':
type, name = line.split(' ', 1)
assert name.startswith('/')
dir = name[1:]
path = dir
elif line[0] == 'S':
type, actual_digest, actual_size, name = line.split(' ', 3)
path = os.path.join(dir, name)
else:
assert line[0] in 'XF'
type, actual_digest, actual_mtime, actual_size, name = line.split(' ', 4)
path = os.path.join(dir, name)
try:
required_details = wanted.pop(path)
except KeyError:
warn(_("Skipping file not in manifest: '%s'"), path)
continue
if required_details[0] != type:
raise BadDigest(_("Item '%s' has wrong type!") % path)
if type == 'D':
os.mkdir(os.path.join(target, path))
elif type in 'XF':
required_type, required_digest, required_mtime, required_size = required_details
if required_size != actual_size:
raise SafeException(_("File '%(path)s' has wrong size (%(actual_size)s bytes, but should be "
"%(required_size)s according to manifest)") %
{'path': path, 'actual_size': actual_size, 'required_size': required_size})
required_mtime = int(required_mtime)
dest_path = os.path.join(target, path)
if type == 'X':
mode = 0o555
else:
mode = 0o444
copy_with_verify(os.path.join(source, path),
dest_path,
mode,
alg,
required_digest)
os.utime(dest_path, (required_mtime, required_mtime))
elif type == 'S':
required_type, required_digest, required_size = required_details
if required_size != actual_size:
raise SafeException(_("Symlink '%(path)s' has wrong size (%(actual_size)s bytes, but should be "
"%(required_size)s according to manifest)") %
{'path': path, 'actual_size': actual_size, 'required_size': required_size})
symlink_target = os.readlink(os.path.join(source, path))
symlink_digest = alg.new_digest()
symlink_digest.update(symlink_target)
if symlink_digest.hexdigest() != required_digest:
raise SafeException(_("Symlink '%(path)s' has wrong target (digest should be "
"%(digest)s according to manifest)") % {'path': path, 'digest': required_digest})
dest_path = os.path.join(target, path)
os.symlink(symlink_target, dest_path)
else:
raise SafeException(_("Unknown manifest type %(type)s for '%(path)s'") % {'type': type, 'path': path})
class HashLibAlgorithm(Algorithm):
new_digest = None # Constructor for digest objects
def __init__(self, name, rating):
if name == 'sha1':
self.new_digest = sha1_new
self.name = 'sha1new'
else:
self.new_digest = getattr(hashlib, name)
self.name = name
self.rating = rating
def generate_manifest(self, root):
def recurse(sub):
# To ensure that a line-by-line comparison of the manifests
# is possible, we require that filenames don't contain newlines.
# Otherwise, you can name a file so that the part after the \n
# would be interpreted as another line in the manifest.
if '\n' in sub: raise BadDigest(_("Newline in filename '%s'") % sub)
assert sub.startswith('/')
full = os.path.join(root, sub[1:])
info = os.lstat(full)
new_digest = self.new_digest
m = info.st_mode
if not stat.S_ISDIR(m): raise Exception(_('Not a directory: "%s"') % full)
if sub != '/':
yield "D %s" % sub
items = os.listdir(full)
items.sort()
dirs = []
for leaf in items:
path = os.path.join(root, sub[1:], leaf)
info = os.lstat(path)
m = info.st_mode
if stat.S_ISREG(m):
if leaf == '.manifest': continue
d = new_digest(open(path).read()).hexdigest()
if m & 0o111:
yield "X %s %s %s %s" % (d, int(info.st_mtime), info.st_size, leaf)
else:
yield "F %s %s %s %s" % (d, int(info.st_mtime), info.st_size, leaf)
elif stat.S_ISLNK(m):
target = os.readlink(path)
d = new_digest(target).hexdigest()
# Note: Can't use utime on symlinks, so skip mtime
# Note: eCryptfs may report length as zero, so count ourselves instead
yield "S %s %s %s" % (d, len(target), leaf)
elif stat.S_ISDIR(m):
dirs.append(leaf)
else:
raise SafeException(_("Unknown object '%s' (not a file, directory or symlink)") %
path)
if not sub.endswith('/'):
sub += '/'
for x in dirs:
# Note: "sub" is always Unix style. Don't use os.path.join here.
for y in recurse(sub + x): yield y
return
for x in recurse('/'): yield x
def getID(self, digest):
return self.name + '=' + digest.hexdigest()
algorithms = {
'sha1': OldSHA1(),
'sha1new': HashLibAlgorithm('sha1', 50),
}
if hashlib is not None:
algorithms['sha256'] = HashLibAlgorithm('sha256', 80)
def fixup_permissions(root):
"""Set permissions recursively for children of root:
- If any X bit is set, they all must be.
- World readable, non-writable.
@raise Exception: if there are unsafe special bits set (setuid, etc)."""
for main, dirs, files in os.walk(root):
for x in ['.'] + files:
full = os.path.join(main, x)
raw_mode = os.lstat(full).st_mode
if stat.S_ISLNK(raw_mode): continue
mode = stat.S_IMODE(raw_mode)
if mode & ~0o777:
raise Exception(_("Unsafe mode: extracted file '%(filename)s' had special bits set in mode '%(mode)s'") % {'filename': full, 'mode': oct(mode)})
if mode & 0o111:
os.chmod(full, 0o555)
else:
os.chmod(full, 0o444)
| dabrahams/zeroinstall | zeroinstall/zerostore/manifest.py | Python | lgpl-2.1 | 18,586 | [
"VisIt"
] | beb67ff9acf394dbaf0138cff304e0865ce7e07e1dfac0ddddba63b6606d1b63 |
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.compatibility import as_int, is_sequence, range
from sympy.core.exprtools import factor_terms
from sympy.core.function import _mexpand
from sympy.core.mul import Mul
from sympy.core.numbers import Rational
from sympy.core.numbers import igcdex, ilcm, igcd
from sympy.core.power import integer_nthroot, isqrt
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import Symbol, symbols
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.matrices.dense import MutableDenseMatrix as Matrix
from sympy.ntheory.factor_ import (
divisors, factorint, multiplicity, perfect_power)
from sympy.ntheory.generate import nextprime
from sympy.ntheory.primetest import is_square, isprime
from sympy.ntheory.residue_ntheory import sqrt_mod
from sympy.polys.polyerrors import GeneratorsNeeded
from sympy.polys.polytools import Poly, factor_list
from sympy.simplify.simplify import signsimp
from sympy.solvers.solvers import check_assumptions
from sympy.solvers.solveset import solveset_real
from sympy.utilities import default_sort_key, numbered_symbols
from sympy.utilities.misc import filldedent
# these are imported with 'from sympy.solvers.diophantine import *
__all__ = ['diophantine', 'classify_diop']
# these types are known (but not necessarily handled)
diop_known = {
"binary_quadratic",
"cubic_thue",
"general_pythagorean",
"general_sum_of_even_powers",
"general_sum_of_squares",
"homogeneous_general_quadratic",
"homogeneous_ternary_quadratic",
"homogeneous_ternary_quadratic_normal",
"inhomogeneous_general_quadratic",
"inhomogeneous_ternary_quadratic",
"linear",
"univariate"}
def _is_int(i):
try:
as_int(i)
return True
except ValueError:
pass
def _sorted_tuple(*i):
return tuple(sorted(i))
def _remove_gcd(*x):
try:
g = igcd(*x)
return tuple([i//g for i in x])
except ValueError:
return x
except TypeError:
raise TypeError('_remove_gcd(a,b,c) or _remove_gcd(*container)')
def _rational_pq(a, b):
# return `(numer, denom)` for a/b; sign in numer and gcd removed
return _remove_gcd(sign(b)*a, abs(b))
def _nint_or_floor(p, q):
# return nearest int to p/q; in case of tie return floor(p/q)
w, r = divmod(p, q)
if abs(r) <= abs(q)//2:
return w
return w + 1
def _odd(i):
return i % 2 != 0
def _even(i):
return i % 2 == 0
def diophantine(eq, param=symbols("t", integer=True), syms=None,
permute=False):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved
independently and combined. Each term is solved by calling
``diop_solve()``.
Output of ``diophantine()`` is a set of tuples. The elements of the
tuple are the solutions for each variable in the equation and
are arranged according to the alphabetic ordering of the variables.
e.g. For an equation with two variables, `a` and `b`, the first
element of the tuple is the solution for `a` and the second for `b`.
Usage
=====
``diophantine(eq, t, syms)``: Solve the diophantine
equation ``eq``.
``t`` is the optional parameter to be used by ``diop_solve()``.
``syms`` is an optional list of symbols which determines the
order of the elements in the returned tuple.
By default, only the base solution is returned. If ``permute`` is set to
True then permutations of the base solution and/or permutations of the
signs of the values will be returned when applicable.
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import a, b
>>> eq = a**4 + b**4 - (2**4 + 3**4)
>>> diophantine(eq)
{(2, 3)}
>>> diophantine(eq, permute=True)
{(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
{(t_0, -t_0), (t_0, t_0)}
>>> diophantine(x*(2*x + 3*y - z))
{(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)}
>>> diophantine(x**2 + 3*x*y + 4*x)
{(0, n1), (3*t_0 - 4, -t_0)}
See Also
========
diop_solve()
sympy.utilities.iterables.permute_signs
sympy.utilities.iterables.signed_permutations
"""
from sympy.utilities.iterables import (
subsets, permute_signs, signed_permutations)
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
try:
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
if syms:
if not is_sequence(syms):
raise TypeError(
'syms should be given as a sequence, e.g. a list')
syms = [i for i in syms if i in var]
if syms != var:
dict_sym_index = dict(zip(syms, range(len(syms))))
return {tuple([t[dict_sym_index[i]] for i in var])
for t in diophantine(eq, param)}
n, d = eq.as_numer_denom()
if not n.free_symbols:
return set()
if d.free_symbols:
dsol = diophantine(d)
good = diophantine(n) - dsol
return {s for s in good if _mexpand(d.subs(zip(var, s)))}
else:
eq = n
eq = factor_terms(eq)
assert not eq.is_number
eq = eq.as_independent(*var, as_Add=False)[1]
p = Poly(eq)
assert not any(g.is_number for g in p.gens)
eq = p.as_expr()
assert eq.is_polynomial()
except (GeneratorsNeeded, AssertionError, AttributeError):
raise TypeError(filldedent('''
Equation should be a polynomial with Rational coefficients.'''))
# permute only sign
do_permute_signs = False
# permute sign and values
do_permute_signs_var = False
# permute few signs
permute_few_signs = False
try:
# if we know that factoring should not be attempted, skip
# the factoring step
v, c, t = classify_diop(eq)
# check for permute sign
if permute:
len_var = len(v)
permute_signs_for = [
'general_sum_of_squares',
'general_sum_of_even_powers']
permute_signs_check = [
'homogeneous_ternary_quadratic',
'homogeneous_ternary_quadratic_normal',
'binary_quadratic']
if t in permute_signs_for:
do_permute_signs_var = True
elif t in permute_signs_check:
# if all the variables in eq have even powers
# then do_permute_sign = True
if len_var == 3:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y), (x, z), (y, z)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda a: a[0]*a[1], var_mul)
# if coeff(y*z), coeff(y*x), coeff(x*z) is not 0 then
# `xy_coeff` => True and do_permute_sign => False.
# Means no permuted solution.
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[var[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2, z**2, const is present
do_permute_signs = True
elif not x_coeff:
permute_few_signs = True
elif len_var == 2:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda x: x[0]*x[1], var_mul)
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[var[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2 and const is present
# so we can get more soln by permuting this soln.
do_permute_signs = True
elif not x_coeff:
# when coeff(x), coeff(y) is not present then signs of
# x, y can be permuted such that their sign are same
# as sign of x*y.
# e.g 1. (x_val,y_val)=> (x_val,y_val), (-x_val,-y_val)
# 2. (-x_vall, y_val)=> (-x_val,y_val), (x_val,-y_val)
permute_few_signs = True
if t == 'general_sum_of_squares':
# trying to factor such expressions will sometimes hang
terms = [(eq, 1)]
else:
raise TypeError
except (TypeError, NotImplementedError):
terms = factor_list(eq)[1]
sols = set([])
for term in terms:
base, _ = term
var_t, _, eq_type = classify_diop(base, _dict=False)
_, base = signsimp(base, evaluate=False).as_coeff_Mul()
solution = diop_solve(base, param)
if eq_type in [
"linear",
"homogeneous_ternary_quadratic",
"homogeneous_ternary_quadratic_normal",
"general_pythagorean"]:
sols.add(merge_solution(var, var_t, solution))
elif eq_type in [
"binary_quadratic",
"general_sum_of_squares",
"general_sum_of_even_powers",
"univariate"]:
for sol in solution:
sols.add(merge_solution(var, var_t, sol))
else:
raise NotImplementedError('unhandled type: %s' % eq_type)
# remove null merge results
if () in sols:
sols.remove(())
null = tuple([0]*len(var))
# if there is no solution, return trivial solution
if not sols and eq.subs(zip(var, null)) is S.Zero:
sols.add(null)
final_soln = set([])
for sol in sols:
if all(_is_int(s) for s in sol):
if do_permute_signs:
permuted_sign = set(permute_signs(sol))
final_soln.update(permuted_sign)
elif permute_few_signs:
lst = list(permute_signs(sol))
lst = list(filter(lambda x: x[0]*x[1] == sol[1]*sol[0], lst))
permuted_sign = set(lst)
final_soln.update(permuted_sign)
elif do_permute_signs_var:
permuted_sign_var = set(signed_permutations(sol))
final_soln.update(permuted_sign_var)
else:
final_soln.add(sol)
else:
final_soln.add(sol)
return final_soln
def merge_solution(var, var_t, solution):
"""
This is used to construct the full solution from the solutions of sub
equations.
For example when solving the equation `(x - y)(x^2 + y^2 - z^2) = 0`,
solutions for each of the equations `x - y = 0` and `x^2 + y^2 - z^2` are
found independently. Solutions for `x - y = 0` are `(x, y) = (t, t)`. But
we should introduce a value for z when we output the solution for the
original equation. This function converts `(t, t)` into `(t, t, n_{1})`
where `n_{1}` is an integer parameter.
"""
sol = []
if None in solution:
return ()
solution = iter(solution)
params = numbered_symbols("n", integer=True, start=1)
for v in var:
if v in var_t:
sol.append(next(solution))
else:
sol.append(next(params))
for val, symb in zip(sol, var):
if check_assumptions(val, **symb.assumptions0) is False:
return tuple()
return tuple(sol)
def diop_solve(eq, param=symbols("t", integer=True)):
"""
Solves the diophantine equation ``eq``.
Unlike ``diophantine()``, factoring of ``eq`` is not attempted. Uses
``classify_diop()`` to determine the type of the equation and calls
the appropriate solver function.
Usage
=====
``diop_solve(eq, t)``: Solve diophantine equation, ``eq`` using ``t``
as a parameter if needed.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_solve
>>> from sympy.abc import x, y, z, w
>>> diop_solve(2*x + 3*y - 5)
(3*t_0 - 5, -2*t_0 + 5)
>>> diop_solve(4*x + 3*y - 4*z + 5)
(t_0, 8*t_0 + 4*t_1 + 5, 7*t_0 + 3*t_1 + 5)
>>> diop_solve(x + 3*y - 4*z + w - 6)
(t_0, t_0 + t_1, 6*t_0 + 5*t_1 + 4*t_2 - 6, 5*t_0 + 4*t_1 + 3*t_2 - 6)
>>> diop_solve(x**2 + y**2 - 5)
{(-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)}
See Also
========
diophantine()
"""
var, coeff, eq_type = classify_diop(eq, _dict=False)
if eq_type == "linear":
return _diop_linear(var, coeff, param)
elif eq_type == "binary_quadratic":
return _diop_quadratic(var, coeff, param)
elif eq_type == "homogeneous_ternary_quadratic":
x_0, y_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _parametrize_ternary_quadratic(
(x_0, y_0, z_0), var, coeff)
elif eq_type == "homogeneous_ternary_quadratic_normal":
x_0, y_0, z_0 = _diop_ternary_quadratic_normal(var, coeff)
return _parametrize_ternary_quadratic(
(x_0, y_0, z_0), var, coeff)
elif eq_type == "general_pythagorean":
return _diop_general_pythagorean(var, coeff, param)
elif eq_type == "univariate":
return set([(int(i),) for i in solveset_real(
eq, var[0]).intersect(S.Integers)])
elif eq_type == "general_sum_of_squares":
return _diop_general_sum_of_squares(var, -int(coeff[1]), limit=S.Infinity)
elif eq_type == "general_sum_of_even_powers":
for k in coeff.keys():
if k.is_Pow and coeff[k]:
p = k.exp
return _diop_general_sum_of_even_powers(var, p, -int(coeff[1]), limit=S.Infinity)
if eq_type is not None and eq_type not in diop_known:
raise ValueError(filldedent('''
Alhough this type of equation was identified, it is not yet
handled. It should, however, be listed in `diop_known` at the
top of this file. Developers should see comments at the end of
`classify_diop`.
''')) # pragma: no cover
else:
raise NotImplementedError(
'No solver has been written for %s.' % eq_type)
def classify_diop(eq, _dict=True):
# docstring supplied externally
try:
var = list(eq.free_symbols)
assert var
except (AttributeError, AssertionError):
raise ValueError('equation should have 1 or more free symbols')
var.sort(key=default_sort_key)
eq = eq.expand(force=True)
coeff = eq.as_coefficients_dict()
if not all(_is_int(c) for c in coeff.values()):
raise TypeError("Coefficients should be Integers")
diop_type = None
total_degree = Poly(eq).total_degree()
homogeneous = 1 not in coeff
if total_degree == 1:
diop_type = "linear"
elif len(var) == 1:
diop_type = "univariate"
elif total_degree == 2 and len(var) == 2:
diop_type = "binary_quadratic"
elif total_degree == 2 and len(var) == 3 and homogeneous:
if set(coeff) & set(var):
diop_type = "inhomogeneous_ternary_quadratic"
else:
nonzero = [k for k in coeff if coeff[k]]
if len(nonzero) == 3 and all(i**2 in nonzero for i in var):
diop_type = "homogeneous_ternary_quadratic_normal"
else:
diop_type = "homogeneous_ternary_quadratic"
elif total_degree == 2 and len(var) >= 3:
if set(coeff) & set(var):
diop_type = "inhomogeneous_general_quadratic"
else:
# there may be Pow keys like x**2 or Mul keys like x*y
if any(k.is_Mul for k in coeff): # cross terms
if not homogeneous:
diop_type = "inhomogeneous_general_quadratic"
else:
diop_type = "homogeneous_general_quadratic"
else: # all squares: x**2 + y**2 + ... + constant
if all(coeff[k] == 1 for k in coeff if k != 1):
diop_type = "general_sum_of_squares"
elif all(is_square(abs(coeff[k])) for k in coeff):
if abs(sum(sign(coeff[k]) for k in coeff)) == \
len(var) - 2:
# all but one has the same sign
# e.g. 4*x**2 + y**2 - 4*z**2
diop_type = "general_pythagorean"
elif total_degree == 3 and len(var) == 2:
diop_type = "cubic_thue"
elif (total_degree > 3 and total_degree % 2 == 0 and
all(k.is_Pow and k.exp == total_degree for k in coeff if k != 1)):
if all(coeff[k] == 1 for k in coeff if k != 1):
diop_type = 'general_sum_of_even_powers'
if diop_type is not None:
return var, dict(coeff) if _dict else coeff, diop_type
# new diop type instructions
# --------------------------
# if this error raises and the equation *can* be classified,
# * it should be identified in the if-block above
# * the type should be added to the diop_known
# if a solver can be written for it,
# * a dedicated handler should be written (e.g. diop_linear)
# * it should be passed to that handler in diop_solve
raise NotImplementedError(filldedent('''
This equation is not yet recognized or else has not been
simplified sufficiently to put it in a form recognized by
diop_classify().'''))
classify_diop.func_doc = '''
Helper routine used by diop_solve() to find information about ``eq``.
Returns a tuple containing the type of the diophantine equation
along with the variables (free symbols) and their coefficients.
Variables are returned as a list and coefficients are returned
as a dict with the key being the respective term and the constant
term is keyed to 1. The type is one of the following:
* %s
Usage
=====
``classify_diop(eq)``: Return variables, coefficients and type of the
``eq``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``_dict`` is for internal use: when True (default) a dict is returned,
otherwise a defaultdict which supplies 0 for missing keys is returned.
Examples
========
>>> from sympy.solvers.diophantine import classify_diop
>>> from sympy.abc import x, y, z, w, t
>>> classify_diop(4*x + 6*y - 4)
([x, y], {1: -4, x: 4, y: 6}, 'linear')
>>> classify_diop(x + 3*y -4*z + 5)
([x, y, z], {1: 5, x: 1, y: 3, z: -4}, 'linear')
>>> classify_diop(x**2 + y**2 - x*y + x + 5)
([x, y], {1: 5, x: 1, x**2: 1, y**2: 1, x*y: -1}, 'binary_quadratic')
''' % ('\n * '.join(sorted(diop_known)))
def diop_linear(eq, param=symbols("t", integer=True)):
"""
Solves linear diophantine equations.
A linear diophantine equation is an equation of the form `a_{1}x_{1} +
a_{2}x_{2} + .. + a_{n}x_{n} = 0` where `a_{1}, a_{2}, ..a_{n}` are
integer constants and `x_{1}, x_{2}, ..x_{n}` are integer variables.
Usage
=====
``diop_linear(eq)``: Returns a tuple containing solutions to the
diophantine equation ``eq``. Values in the tuple is arranged in the same
order as the sorted variables.
Details
=======
``eq`` is a linear diophantine equation which is assumed to be zero.
``param`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_linear
>>> from sympy.abc import x, y, z, t
>>> diop_linear(2*x - 3*y - 5) # solves equation 2*x - 3*y - 5 == 0
(3*t_0 - 5, 2*t_0 - 5)
Here x = -3*t_0 - 5 and y = -2*t_0 - 5
>>> diop_linear(2*x - 3*y - 4*z -3)
(t_0, 2*t_0 + 4*t_1 + 3, -t_0 - 3*t_1 - 3)
See Also
========
diop_quadratic(), diop_ternary_quadratic(), diop_general_pythagorean(),
diop_general_sum_of_squares()
"""
from sympy.core.function import count_ops
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == "linear":
return _diop_linear(var, coeff, param)
def _diop_linear(var, coeff, param):
"""
Solves diophantine equations of the form:
a_0*x_0 + a_1*x_1 + ... + a_n*x_n == c
Note that no solution exists if gcd(a_0, ..., a_n) doesn't divide c.
"""
if 1 in coeff:
# negate coeff[] because input is of the form: ax + by + c == 0
# but is used as: ax + by == -c
c = -coeff[1]
else:
c = 0
# Some solutions will have multiple free variables in their solutions.
if param is None:
params = [symbols('t')]*len(var)
else:
temp = str(param) + "_%i"
params = [symbols(temp % i, integer=True) for i in range(len(var))]
if len(var) == 1:
q, r = divmod(c, coeff[var[0]])
if not r:
return (q,)
else:
return (None,)
'''
base_solution_linear() can solve diophantine equations of the form:
a*x + b*y == c
We break down multivariate linear diophantine equations into a
series of bivariate linear diophantine equations which can then
be solved individually by base_solution_linear().
Consider the following:
a_0*x_0 + a_1*x_1 + a_2*x_2 == c
which can be re-written as:
a_0*x_0 + g_0*y_0 == c
where
g_0 == gcd(a_1, a_2)
and
y == (a_1*x_1)/g_0 + (a_2*x_2)/g_0
This leaves us with two binary linear diophantine equations.
For the first equation:
a == a_0
b == g_0
c == c
For the second:
a == a_1/g_0
b == a_2/g_0
c == the solution we find for y_0 in the first equation.
The arrays A and B are the arrays of integers used for
'a' and 'b' in each of the n-1 bivariate equations we solve.
'''
A = [coeff[v] for v in var]
B = []
if len(var) > 2:
B.append(igcd(A[-2], A[-1]))
A[-2] = A[-2] // B[0]
A[-1] = A[-1] // B[0]
for i in range(len(A) - 3, 0, -1):
gcd = igcd(B[0], A[i])
B[0] = B[0] // gcd
A[i] = A[i] // gcd
B.insert(0, gcd)
B.append(A[-1])
'''
Consider the trivariate linear equation:
4*x_0 + 6*x_1 + 3*x_2 == 2
This can be re-written as:
4*x_0 + 3*y_0 == 2
where
y_0 == 2*x_1 + x_2
(Note that gcd(3, 6) == 3)
The complete integral solution to this equation is:
x_0 == 2 + 3*t_0
y_0 == -2 - 4*t_0
where 't_0' is any integer.
Now that we have a solution for 'x_0', find 'x_1' and 'x_2':
2*x_1 + x_2 == -2 - 4*t_0
We can then solve for '-2' and '-4' independently,
and combine the results:
2*x_1a + x_2a == -2
x_1a == 0 + t_0
x_2a == -2 - 2*t_0
2*x_1b + x_2b == -4*t_0
x_1b == 0*t_0 + t_1
x_2b == -4*t_0 - 2*t_1
==>
x_1 == t_0 + t_1
x_2 == -2 - 6*t_0 - 2*t_1
where 't_0' and 't_1' are any integers.
Note that:
4*(2 + 3*t_0) + 6*(t_0 + t_1) + 3*(-2 - 6*t_0 - 2*t_1) == 2
for any integral values of 't_0', 't_1'; as required.
This method is generalised for many variables, below.
'''
solutions = []
for i in range(len(B)):
tot_x, tot_y = [], []
for j, arg in enumerate(Add.make_args(c)):
if arg.is_Integer:
# example: 5 -> k = 5
k, p = arg, S.One
pnew = params[0]
else: # arg is a Mul or Symbol
# example: 3*t_1 -> k = 3
# example: t_0 -> k = 1
k, p = arg.as_coeff_Mul()
pnew = params[params.index(p) + 1]
sol = sol_x, sol_y = base_solution_linear(k, A[i], B[i], pnew)
if p is S.One:
if None in sol:
return tuple([None]*len(var))
else:
# convert a + b*pnew -> a*p + b*pnew
if isinstance(sol_x, Add):
sol_x = sol_x.args[0]*p + sol_x.args[1]
if isinstance(sol_y, Add):
sol_y = sol_y.args[0]*p + sol_y.args[1]
tot_x.append(sol_x)
tot_y.append(sol_y)
solutions.append(Add(*tot_x))
c = Add(*tot_y)
solutions.append(c)
if param is None:
# just keep the additive constant (i.e. replace t with 0)
solutions = [i.as_coeff_Add()[0] for i in solutions]
return tuple(solutions)
def base_solution_linear(c, a, b, t=None):
"""
Return the base solution for the linear equation, `ax + by = c`.
Used by ``diop_linear()`` to find the base solution of a linear
Diophantine equation. If ``t`` is given then the parametrized solution is
returned.
Usage
=====
``base_solution_linear(c, a, b, t)``: ``a``, ``b``, ``c`` are coefficients
in `ax + by = c` and ``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import base_solution_linear
>>> from sympy.abc import t
>>> base_solution_linear(5, 2, 3) # equation 2*x + 3*y = 5
(-5, 5)
>>> base_solution_linear(0, 5, 7) # equation 5*x + 7*y = 0
(0, 0)
>>> base_solution_linear(5, 2, 3, t) # equation 2*x + 3*y = 5
(3*t - 5, -2*t + 5)
>>> base_solution_linear(0, 5, 7, t) # equation 5*x + 7*y = 0
(7*t, -5*t)
"""
a, b, c = _remove_gcd(a, b, c)
if c == 0:
if t is not None:
if b < 0:
t = -t
return (b*t , -a*t)
else:
return (0, 0)
else:
x0, y0, d = igcdex(abs(a), abs(b))
x0 *= sign(a)
y0 *= sign(b)
if divisible(c, d):
if t is not None:
if b < 0:
t = -t
return (c*x0 + b*t, c*y0 - a*t)
else:
return (c*x0, c*y0)
else:
return (None, None)
def divisible(a, b):
"""
Returns `True` if ``a`` is divisible by ``b`` and `False` otherwise.
"""
return not a % b
def diop_quadratic(eq, param=symbols("t", integer=True)):
"""
Solves quadratic diophantine equations.
i.e. equations of the form `Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0`. Returns a
set containing the tuples `(x, y)` which contains the solutions. If there
are no solutions then `(None, None)` is returned.
Usage
=====
``diop_quadratic(eq, param)``: ``eq`` is a quadratic binary diophantine
equation. ``param`` is used to indicate the parameter to be used in the
solution.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``param`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, t
>>> from sympy.solvers.diophantine import diop_quadratic
>>> diop_quadratic(x**2 + y**2 + 2*x + 2*y + 2, t)
{(-1, -1)}
References
==========
.. [1] Methods to solve Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0, [online],
Available: http://www.alpertron.com.ar/METHODS.HTM
.. [2] Solving the equation ax^2+ bxy + cy^2 + dx + ey + f= 0, [online],
Available: http://www.jpr2718.org/ax2p.pdf
See Also
========
diop_linear(), diop_ternary_quadratic(), diop_general_sum_of_squares(),
diop_general_pythagorean()
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == "binary_quadratic":
return _diop_quadratic(var, coeff, param)
def _diop_quadratic(var, coeff, t):
x, y = var
A = coeff[x**2]
B = coeff[x*y]
C = coeff[y**2]
D = coeff[x]
E = coeff[y]
F = coeff[1]
A, B, C, D, E, F = [as_int(i) for i in _remove_gcd(A, B, C, D, E, F)]
# (1) Simple-Hyperbolic case: A = C = 0, B != 0
# In this case equation can be converted to (Bx + E)(By + D) = DE - BF
# We consider two cases; DE - BF = 0 and DE - BF != 0
# More details, http://www.alpertron.com.ar/METHODS.HTM#SHyperb
sol = set([])
discr = B**2 - 4*A*C
if A == 0 and C == 0 and B != 0:
if D*E - B*F == 0:
q, r = divmod(E, B)
if not r:
sol.add((-q, t))
q, r = divmod(D, B)
if not r:
sol.add((t, -q))
else:
div = divisors(D*E - B*F)
div = div + [-term for term in div]
for d in div:
x0, r = divmod(d - E, B)
if not r:
q, r = divmod(D*E - B*F, d)
if not r:
y0, r = divmod(q - D, B)
if not r:
sol.add((x0, y0))
# (2) Parabolic case: B**2 - 4*A*C = 0
# There are two subcases to be considered in this case.
# sqrt(c)D - sqrt(a)E = 0 and sqrt(c)D - sqrt(a)E != 0
# More Details, http://www.alpertron.com.ar/METHODS.HTM#Parabol
elif discr == 0:
if A == 0:
s = _diop_quadratic([y, x], coeff, t)
for soln in s:
sol.add((soln[1], soln[0]))
else:
g = sign(A)*igcd(A, C)
a = A // g
b = B // g
c = C // g
e = sign(B/A)
sqa = isqrt(a)
sqc = isqrt(c)
_c = e*sqc*D - sqa*E
if not _c:
z = symbols("z", real=True)
eq = sqa*g*z**2 + D*z + sqa*F
roots = solveset_real(eq, z).intersect(S.Integers)
for root in roots:
ans = diop_solve(sqa*x + e*sqc*y - root)
sol.add((ans[0], ans[1]))
elif _is_int(c):
solve_x = lambda u: -e*sqc*g*_c*t**2 - (E + 2*e*sqc*g*u)*t\
- (e*sqc*g*u**2 + E*u + e*sqc*F) // _c
solve_y = lambda u: sqa*g*_c*t**2 + (D + 2*sqa*g*u)*t \
+ (sqa*g*u**2 + D*u + sqa*F) // _c
for z0 in range(0, abs(_c)):
# Check if the coefficients of y and x obtained are integers or not
if (divisible(sqa*g*z0**2 + D*z0 + sqa*F, _c) and
divisible(e*sqc**g*z0**2 + E*z0 + e*sqc*F, _c)):
sol.add((solve_x(z0), solve_y(z0)))
# (3) Method used when B**2 - 4*A*C is a square, is described in p. 6 of the below paper
# by John P. Robertson.
# http://www.jpr2718.org/ax2p.pdf
elif is_square(discr):
if A != 0:
r = sqrt(discr)
u, v = symbols("u, v", integer=True)
eq = _mexpand(
4*A*r*u*v + 4*A*D*(B*v + r*u + r*v - B*u) +
2*A*4*A*E*(u - v) + 4*A*r*4*A*F)
solution = diop_solve(eq, t)
for s0, t0 in solution:
num = B*t0 + r*s0 + r*t0 - B*s0
x_0 = S(num)/(4*A*r)
y_0 = S(s0 - t0)/(2*r)
if isinstance(s0, Symbol) or isinstance(t0, Symbol):
if check_param(x_0, y_0, 4*A*r, t) != (None, None):
ans = check_param(x_0, y_0, 4*A*r, t)
sol.add((ans[0], ans[1]))
elif x_0.is_Integer and y_0.is_Integer:
if is_solution_quad(var, coeff, x_0, y_0):
sol.add((x_0, y_0))
else:
s = _diop_quadratic(var[::-1], coeff, t) # Interchange x and y
while s: # |
sol.add(s.pop()[::-1]) # and solution <--------+
# (4) B**2 - 4*A*C > 0 and B**2 - 4*A*C not a square or B**2 - 4*A*C < 0
else:
P, Q = _transformation_to_DN(var, coeff)
D, N = _find_DN(var, coeff)
solns_pell = diop_DN(D, N)
if D < 0:
for x0, y0 in solns_pell:
for x in [-x0, x0]:
for y in [-y0, y0]:
s = P*Matrix([x, y]) + Q
try:
sol.add(tuple([as_int(_) for _ in s]))
except ValueError:
pass
else:
# In this case equation can be transformed into a Pell equation
solns_pell = set(solns_pell)
for X, Y in list(solns_pell):
solns_pell.add((-X, -Y))
a = diop_DN(D, 1)
T = a[0][0]
U = a[0][1]
if all(_is_int(_) for _ in P[:4] + Q[:2]):
for r, s in solns_pell:
_a = (r + s*sqrt(D))*(T + U*sqrt(D))**t
_b = (r - s*sqrt(D))*(T - U*sqrt(D))**t
x_n = _mexpand(S(_a + _b)/2)
y_n = _mexpand(S(_a - _b)/(2*sqrt(D)))
s = P*Matrix([x_n, y_n]) + Q
sol.add(tuple(s))
else:
L = ilcm(*[_.q for _ in P[:4] + Q[:2]])
k = 1
T_k = T
U_k = U
while (T_k - 1) % L != 0 or U_k % L != 0:
T_k, U_k = T_k*T + D*U_k*U, T_k*U + U_k*T
k += 1
for X, Y in solns_pell:
for i in range(k):
if all(_is_int(_) for _ in P*Matrix([X, Y]) + Q):
_a = (X + sqrt(D)*Y)*(T_k + sqrt(D)*U_k)**t
_b = (X - sqrt(D)*Y)*(T_k - sqrt(D)*U_k)**t
Xt = S(_a + _b)/2
Yt = S(_a - _b)/(2*sqrt(D))
s = P*Matrix([Xt, Yt]) + Q
sol.add(tuple(s))
X, Y = X*T + D*U*Y, X*U + Y*T
return sol
def is_solution_quad(var, coeff, u, v):
"""
Check whether `(u, v)` is solution to the quadratic binary diophantine
equation with the variable list ``var`` and coefficient dictionary
``coeff``.
Not intended for use by normal users.
"""
reps = dict(zip(var, (u, v)))
eq = Add(*[j*i.xreplace(reps) for i, j in coeff.items()])
return _mexpand(eq) == 0
def diop_DN(D, N, t=symbols("t", integer=True)):
"""
Solves the equation `x^2 - Dy^2 = N`.
Mainly concerned with the case `D > 0, D` is not a perfect square,
which is the same as the generalized Pell equation. The LMM
algorithm [1]_ is used to solve this equation.
Returns one solution tuple, (`x, y)` for each class of the solutions.
Other solutions of the class can be constructed according to the
values of ``D`` and ``N``.
Usage
=====
``diop_DN(D, N, t)``: D and N are integers as in `x^2 - Dy^2 = N` and
``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine import diop_DN
>>> diop_DN(13, -4) # Solves equation x**2 - 13*y**2 = -4
[(3, 1), (393, 109), (36, 10)]
The output can be interpreted as follows: There are three fundamental
solutions to the equation `x^2 - 13y^2 = -4` given by (3, 1), (393, 109)
and (36, 10). Each tuple is in the form (x, y), i.e. solution (3, 1) means
that `x = 3` and `y = 1`.
>>> diop_DN(986, 1) # Solves equation x**2 - 986*y**2 = 1
[(49299, 1570)]
See Also
========
find_DN(), diop_bf_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Pages 16 - 17. [online], Available:
http://www.jpr2718.org/pell.pdf
"""
if D < 0:
if N == 0:
return [(0, 0)]
elif N < 0:
return []
elif N > 0:
sol = []
for d in divisors(square_factor(N)):
sols = cornacchia(1, -D, N // d**2)
if sols:
for x, y in sols:
sol.append((d*x, d*y))
if D == -1:
sol.append((d*y, d*x))
return sol
elif D == 0:
if N < 0:
return []
if N == 0:
return [(0, t)]
sN, _exact = integer_nthroot(N, 2)
if _exact:
return [(sN, t)]
else:
return []
else: # D > 0
sD, _exact = integer_nthroot(D, 2)
if _exact:
if N == 0:
return [(sD*t, t)]
else:
sol = []
for y in range(floor(sign(N)*(N - 1)/(2*sD)) + 1):
try:
sq, _exact = integer_nthroot(D*y**2 + N, 2)
except ValueError:
_exact = False
if _exact:
sol.append((sq, y))
return sol
elif 1 < N**2 < D:
# It is much faster to call `_special_diop_DN`.
return _special_diop_DN(D, N)
else:
if N == 0:
return [(0, 0)]
elif abs(N) == 1:
pqa = PQa(0, 1, D)
j = 0
G = []
B = []
for i in pqa:
a = i[2]
G.append(i[5])
B.append(i[4])
if j != 0 and a == 2*sD:
break
j = j + 1
if _odd(j):
if N == -1:
x = G[j - 1]
y = B[j - 1]
else:
count = j
while count < 2*j - 1:
i = next(pqa)
G.append(i[5])
B.append(i[4])
count += 1
x = G[count]
y = B[count]
else:
if N == 1:
x = G[j - 1]
y = B[j - 1]
else:
return []
return [(x, y)]
else:
fs = []
sol = []
div = divisors(N)
for d in div:
if divisible(N, d**2):
fs.append(d)
for f in fs:
m = N // f**2
zs = sqrt_mod(D, abs(m), all_roots=True)
zs = [i for i in zs if i <= abs(m) // 2 ]
if abs(m) != 2:
zs = zs + [-i for i in zs if i] # omit dupl 0
for z in zs:
pqa = PQa(z, abs(m), D)
j = 0
G = []
B = []
for i in pqa:
a = i[2]
G.append(i[5])
B.append(i[4])
if j != 0 and abs(i[1]) == 1:
r = G[j-1]
s = B[j-1]
if r**2 - D*s**2 == m:
sol.append((f*r, f*s))
elif diop_DN(D, -1) != []:
a = diop_DN(D, -1)
sol.append((f*(r*a[0][0] + a[0][1]*s*D), f*(r*a[0][1] + s*a[0][0])))
break
j = j + 1
if j == length(z, abs(m), D):
break
return sol
def _special_diop_DN(D, N):
"""
Solves the equation `x^2 - Dy^2 = N` for the special case where
`1 < N**2 < D` and `D` is not a perfect square.
It is better to call `diop_DN` rather than this function, as
the former checks the condition `1 < N**2 < D`, and calls the latter only
if appropriate.
Usage
=====
WARNING: Internal method. Do not call directly!
``_special_diop_DN(D, N)``: D and N are integers as in `x^2 - Dy^2 = N`.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
Examples
========
>>> from sympy.solvers.diophantine import _special_diop_DN
>>> _special_diop_DN(13, -3) # Solves equation x**2 - 13*y**2 = -3
[(7, 2), (137, 38)]
The output can be interpreted as follows: There are two fundamental
solutions to the equation `x^2 - 13y^2 = -3` given by (7, 2) and
(137, 38). Each tuple is in the form (x, y), i.e. solution (7, 2) means
that `x = 7` and `y = 2`.
>>> _special_diop_DN(2445, -20) # Solves equation x**2 - 2445*y**2 = -20
[(445, 9), (17625560, 356454), (698095554475, 14118073569)]
See Also
========
diop_DN()
References
==========
.. [1] Section 4.4.4 of the following book:
Quadratic Diophantine Equations, T. Andreescu and D. Andrica,
Springer, 2015.
"""
# The following assertion was removed for efficiency, with the understanding
# that this method is not called directly. The parent method, `diop_DN`
# is responsible for performing the appropriate checks.
#
# assert (1 < N**2 < D) and (not integer_nthroot(D, 2)[1])
sqrt_D = sqrt(D)
F = [(N, 1)]
f = 2
while True:
f2 = f**2
if f2 > abs(N):
break
n, r = divmod(N, f2)
if r == 0:
F.append((n, f))
f += 1
P = 0
Q = 1
G0, G1 = 0, 1
B0, B1 = 1, 0
solutions = []
i = 0
while True:
a = floor((P + sqrt_D) / Q)
P = a*Q - P
Q = (D - P**2) // Q
G2 = a*G1 + G0
B2 = a*B1 + B0
for n, f in F:
if G2**2 - D*B2**2 == n:
solutions.append((f*G2, f*B2))
i += 1
if Q == 1 and i % 2 == 0:
break
G0, G1 = G1, G2
B0, B1 = B1, B2
return solutions
def cornacchia(a, b, m):
"""
Solves `ax^2 + by^2 = m` where `\gcd(a, b) = 1 = gcd(a, m)` and `a, b > 0`.
Uses the algorithm due to Cornacchia. The method only finds primitive
solutions, i.e. ones with `\gcd(x, y) = 1`. So this method can't be used to
find the solutions of `x^2 + y^2 = 20` since the only solution to former is
`(x, y) = (4, 2)` and it is not primitive. When `a = b`, only the
solutions with `x \leq y` are found. For more details, see the References.
Examples
========
>>> from sympy.solvers.diophantine import cornacchia
>>> cornacchia(2, 3, 35) # equation 2x**2 + 3y**2 = 35
{(2, 3), (4, 1)}
>>> cornacchia(1, 1, 25) # equation x**2 + y**2 = 25
{(4, 3)}
References
===========
.. [1] A. Nitaj, "L'algorithme de Cornacchia"
.. [2] Solving the diophantine equation ax**2 + by**2 = m by Cornacchia's
method, [online], Available:
http://www.numbertheory.org/php/cornacchia.html
See Also
========
sympy.utilities.iterables.signed_permutations
"""
sols = set()
a1 = igcdex(a, m)[0]
v = sqrt_mod(-b*a1, m, all_roots=True)
if not v:
return None
for t in v:
if t < m // 2:
continue
u, r = t, m
while True:
u, r = r, u % r
if a*r**2 < m:
break
m1 = m - a*r**2
if m1 % b == 0:
m1 = m1 // b
s, _exact = integer_nthroot(m1, 2)
if _exact:
if a == b and r < s:
r, s = s, r
sols.add((int(r), int(s)))
return sols
def PQa(P_0, Q_0, D):
"""
Returns useful information needed to solve the Pell equation.
There are six sequences of integers defined related to the continued
fraction representation of `\\frac{P + \sqrt{D}}{Q}`, namely {`P_{i}`},
{`Q_{i}`}, {`a_{i}`},{`A_{i}`}, {`B_{i}`}, {`G_{i}`}. ``PQa()`` Returns
these values as a 6-tuple in the same order as mentioned above. Refer [1]_
for more detailed information.
Usage
=====
``PQa(P_0, Q_0, D)``: ``P_0``, ``Q_0`` and ``D`` are integers corresponding
to `P_{0}`, `Q_{0}` and `D` in the continued fraction
`\\frac{P_{0} + \sqrt{D}}{Q_{0}}`.
Also it's assumed that `P_{0}^2 == D mod(|Q_{0}|)` and `D` is square free.
Examples
========
>>> from sympy.solvers.diophantine import PQa
>>> pqa = PQa(13, 4, 5) # (13 + sqrt(5))/4
>>> next(pqa) # (P_0, Q_0, a_0, A_0, B_0, G_0)
(13, 4, 3, 3, 1, -1)
>>> next(pqa) # (P_1, Q_1, a_1, A_1, B_1, G_1)
(-1, 1, 1, 4, 1, 3)
References
==========
.. [1] Solving the generalized Pell equation x^2 - Dy^2 = N, John P.
Robertson, July 31, 2004, Pages 4 - 8. http://www.jpr2718.org/pell.pdf
"""
A_i_2 = B_i_1 = 0
A_i_1 = B_i_2 = 1
G_i_2 = -P_0
G_i_1 = Q_0
P_i = P_0
Q_i = Q_0
while(1):
a_i = floor((P_i + sqrt(D))/Q_i)
A_i = a_i*A_i_1 + A_i_2
B_i = a_i*B_i_1 + B_i_2
G_i = a_i*G_i_1 + G_i_2
yield P_i, Q_i, a_i, A_i, B_i, G_i
A_i_1, A_i_2 = A_i, A_i_1
B_i_1, B_i_2 = B_i, B_i_1
G_i_1, G_i_2 = G_i, G_i_1
P_i = a_i*Q_i - P_i
Q_i = (D - P_i**2)/Q_i
def diop_bf_DN(D, N, t=symbols("t", integer=True)):
"""
Uses brute force to solve the equation, `x^2 - Dy^2 = N`.
Mainly concerned with the generalized Pell equation which is the case when
`D > 0, D` is not a perfect square. For more information on the case refer
[1]_. Let `(t, u)` be the minimal positive solution of the equation
`x^2 - Dy^2 = 1`. Then this method requires
`\sqrt{\\frac{\mid N \mid (t \pm 1)}{2D}}` to be small.
Usage
=====
``diop_bf_DN(D, N, t)``: ``D`` and ``N`` are coefficients in
`x^2 - Dy^2 = N` and ``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine import diop_bf_DN
>>> diop_bf_DN(13, -4)
[(3, 1), (-3, 1), (36, 10)]
>>> diop_bf_DN(986, 1)
[(49299, 1570)]
See Also
========
diop_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 15. http://www.jpr2718.org/pell.pdf
"""
D = as_int(D)
N = as_int(N)
sol = []
a = diop_DN(D, 1)
u = a[0][0]
v = a[0][1]
if abs(N) == 1:
return diop_DN(D, N)
elif N > 1:
L1 = 0
L2 = integer_nthroot(int(N*(u - 1)/(2*D)), 2)[0] + 1
elif N < -1:
L1, _exact = integer_nthroot(-int(N/D), 2)
if not _exact:
L1 += 1
L2 = integer_nthroot(-int(N*(u + 1)/(2*D)), 2)[0] + 1
else: # N = 0
if D < 0:
return [(0, 0)]
elif D == 0:
return [(0, t)]
else:
sD, _exact = integer_nthroot(D, 2)
if _exact:
return [(sD*t, t), (-sD*t, t)]
else:
return [(0, 0)]
for y in range(L1, L2):
try:
x, _exact = integer_nthroot(N + D*y**2, 2)
except ValueError:
_exact = False
if _exact:
sol.append((x, y))
if not equivalent(x, y, -x, y, D, N):
sol.append((-x, y))
return sol
def equivalent(u, v, r, s, D, N):
"""
Returns True if two solutions `(u, v)` and `(r, s)` of `x^2 - Dy^2 = N`
belongs to the same equivalence class and False otherwise.
Two solutions `(u, v)` and `(r, s)` to the above equation fall to the same
equivalence class iff both `(ur - Dvs)` and `(us - vr)` are divisible by
`N`. See reference [1]_. No test is performed to test whether `(u, v)` and
`(r, s)` are actually solutions to the equation. User should take care of
this.
Usage
=====
``equivalent(u, v, r, s, D, N)``: `(u, v)` and `(r, s)` are two solutions
of the equation `x^2 - Dy^2 = N` and all parameters involved are integers.
Examples
========
>>> from sympy.solvers.diophantine import equivalent
>>> equivalent(18, 5, -18, -5, 13, -1)
True
>>> equivalent(3, 1, -18, 393, 109, -4)
False
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 12. http://www.jpr2718.org/pell.pdf
"""
return divisible(u*r - D*v*s, N) and divisible(u*s - v*r, N)
def length(P, Q, D):
"""
Returns the (length of aperiodic part + length of periodic part) of
continued fraction representation of `\\frac{P + \sqrt{D}}{Q}`.
It is important to remember that this does NOT return the length of the
periodic part but the sum of the lengths of the two parts as mentioned
above.
Usage
=====
``length(P, Q, D)``: ``P``, ``Q`` and ``D`` are integers corresponding to
the continued fraction `\\frac{P + \sqrt{D}}{Q}`.
Details
=======
``P``, ``D`` and ``Q`` corresponds to P, D and Q in the continued fraction,
`\\frac{P + \sqrt{D}}{Q}`.
Examples
========
>>> from sympy.solvers.diophantine import length
>>> length(-2 , 4, 5) # (-2 + sqrt(5))/4
3
>>> length(-5, 4, 17) # (-5 + sqrt(17))/4
5
See Also
========
sympy.ntheory.continued_fraction.continued_fraction_periodic
"""
from sympy.ntheory.continued_fraction import continued_fraction_periodic
v = continued_fraction_periodic(P, Q, D)
if type(v[-1]) is list:
rpt = len(v[-1])
nonrpt = len(v) - 1
else:
rpt = 0
nonrpt = len(v)
return rpt + nonrpt
def transformation_to_DN(eq):
"""
This function transforms general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`
to more easy to deal with `X^2 - DY^2 = N` form.
This is used to solve the general quadratic equation by transforming it to
the latter form. Refer [1]_ for more detailed information on the
transformation. This function returns a tuple (A, B) where A is a 2 X 2
matrix and B is a 2 X 1 matrix such that,
Transpose([x y]) = A * Transpose([X Y]) + B
Usage
=====
``transformation_to_DN(eq)``: where ``eq`` is the quadratic to be
transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine import transformation_to_DN
>>> from sympy.solvers.diophantine import classify_diop
>>> A, B = transformation_to_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
>>> A
Matrix([
[1/26, 3/26],
[ 0, 1/13]])
>>> B
Matrix([
[-6/13],
[-4/13]])
A, B returned are such that Transpose((x y)) = A * Transpose((X Y)) + B.
Substituting these values for `x` and `y` and a bit of simplifying work
will give an equation of the form `x^2 - Dy^2 = N`.
>>> from sympy.abc import X, Y
>>> from sympy import Matrix, simplify
>>> u = (A*Matrix([X, Y]) + B)[0] # Transformation for x
>>> u
X/26 + 3*Y/26 - 6/13
>>> v = (A*Matrix([X, Y]) + B)[1] # Transformation for y
>>> v
Y/13 - 4/13
Next we will substitute these formulas for `x` and `y` and do
``simplify()``.
>>> eq = simplify((x**2 - 3*x*y - y**2 - 2*y + 1).subs(zip((x, y), (u, v))))
>>> eq
X**2/676 - Y**2/52 + 17/13
By multiplying the denominator appropriately, we can get a Pell equation
in the standard form.
>>> eq * 676
X**2 - 13*Y**2 + 884
If only the final equation is needed, ``find_DN()`` can be used.
See Also
========
find_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == "binary_quadratic":
return _transformation_to_DN(var, coeff)
def _transformation_to_DN(var, coeff):
x, y = var
a = coeff[x**2]
b = coeff[x*y]
c = coeff[y**2]
d = coeff[x]
e = coeff[y]
f = coeff[1]
a, b, c, d, e, f = [as_int(i) for i in _remove_gcd(a, b, c, d, e, f)]
X, Y = symbols("X, Y", integer=True)
if b:
B, C = _rational_pq(2*a, b)
A, T = _rational_pq(a, B**2)
# eq_1 = A*B*X**2 + B*(c*T - A*C**2)*Y**2 + d*T*X + (B*e*T - d*T*C)*Y + f*T*B
coeff = {X**2: A*B, X*Y: 0, Y**2: B*(c*T - A*C**2), X: d*T, Y: B*e*T - d*T*C, 1: f*T*B}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S(1)/B, -S(C)/B, 0, 1])*A_0, Matrix(2, 2, [S(1)/B, -S(C)/B, 0, 1])*B_0
else:
if d:
B, C = _rational_pq(2*a, d)
A, T = _rational_pq(a, B**2)
# eq_2 = A*X**2 + c*T*Y**2 + e*T*Y + f*T - A*C**2
coeff = {X**2: A, X*Y: 0, Y**2: c*T, X: 0, Y: e*T, 1: f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S(1)/B, 0, 0, 1])*A_0, Matrix(2, 2, [S(1)/B, 0, 0, 1])*B_0 + Matrix([-S(C)/B, 0])
else:
if e:
B, C = _rational_pq(2*c, e)
A, T = _rational_pq(c, B**2)
# eq_3 = a*T*X**2 + A*Y**2 + f*T - A*C**2
coeff = {X**2: a*T, X*Y: 0, Y**2: A, X: 0, Y: 0, 1: f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [1, 0, 0, S(1)/B])*A_0, Matrix(2, 2, [1, 0, 0, S(1)/B])*B_0 + Matrix([0, -S(C)/B])
else:
# TODO: pre-simplification: Not necessary but may simplify
# the equation.
return Matrix(2, 2, [S(1)/a, 0, 0, 1]), Matrix([0, 0])
def find_DN(eq):
"""
This function returns a tuple, `(D, N)` of the simplified form,
`x^2 - Dy^2 = N`, corresponding to the general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`.
Solving the general quadratic is then equivalent to solving the equation
`X^2 - DY^2 = N` and transforming the solutions by using the transformation
matrices returned by ``transformation_to_DN()``.
Usage
=====
``find_DN(eq)``: where ``eq`` is the quadratic to be transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine import find_DN
>>> find_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
(13, -884)
Interpretation of the output is that we get `X^2 -13Y^2 = -884` after
transforming `x^2 - 3xy - y^2 - 2y + 1` using the transformation returned
by ``transformation_to_DN()``.
See Also
========
transformation_to_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == "binary_quadratic":
return _find_DN(var, coeff)
def _find_DN(var, coeff):
x, y = var
X, Y = symbols("X, Y", integer=True)
A, B = _transformation_to_DN(var, coeff)
u = (A*Matrix([X, Y]) + B)[0]
v = (A*Matrix([X, Y]) + B)[1]
eq = x**2*coeff[x**2] + x*y*coeff[x*y] + y**2*coeff[y**2] + x*coeff[x] + y*coeff[y] + coeff[1]
simplified = _mexpand(eq.subs(zip((x, y), (u, v))))
coeff = simplified.as_coefficients_dict()
return -coeff[Y**2]/coeff[X**2], -coeff[1]/coeff[X**2]
def check_param(x, y, a, t):
"""
If there is a number modulo ``a`` such that ``x`` and ``y`` are both
integers, then return a parametric representation for ``x`` and ``y``
else return (None, None).
Here ``x`` and ``y`` are functions of ``t``.
"""
from sympy.simplify.simplify import clear_coefficients
if x.is_number and not x.is_Integer:
return (None, None)
if y.is_number and not y.is_Integer:
return (None, None)
m, n = symbols("m, n", integer=True)
c, p = (m*x + n*y).as_content_primitive()
if a % c.q:
return (None, None)
# clear_coefficients(mx + b, R)[1] -> (R - b)/m
eq = clear_coefficients(x, m)[1] - clear_coefficients(y, n)[1]
junk, eq = eq.as_content_primitive()
return diop_solve(eq, t)
def diop_ternary_quadratic(eq):
"""
Solves the general quadratic ternary form,
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Returns a tuple `(x, y, z)` which is a base solution for the above
equation. If there are no solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic(eq)``: Return a tuple containing a basic solution
to ``eq``.
Details
=======
``eq`` should be an homogeneous expression of degree two in three variables
and it is assumed to be zero.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import diop_ternary_quadratic
>>> diop_ternary_quadratic(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic(45*x**2 - 7*y**2 - 8*x*y - z**2)
(28, 45, 105)
>>> diop_ternary_quadratic(x**2 - 49*y**2 - z**2 + 13*z*y -8*x*y)
(9, 1, 5)
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type in (
"homogeneous_ternary_quadratic",
"homogeneous_ternary_quadratic_normal"):
return _diop_ternary_quadratic(var, coeff)
def _diop_ternary_quadratic(_var, coeff):
x, y, z = _var
var = [x, y, z]
# Equations of the form B*x*y + C*z*x + E*y*z = 0 and At least two of the
# coefficients A, B, C are non-zero.
# There are infinitely many solutions for the equation.
# Ex: (0, 0, t), (0, t, 0), (t, 0, 0)
# Equation can be re-written as y*(B*x + E*z) = -C*x*z and we can find rather
# unobvious solutions. Set y = -C and B*x + E*z = x*z. The latter can be solved by
# using methods for binary quadratic diophantine equations. Let's select the
# solution which minimizes |x| + |z|
if not any(coeff[i**2] for i in var):
if coeff[x*z]:
sols = diophantine(coeff[x*y]*x + coeff[y*z]*z - x*z)
s = sols.pop()
min_sum = abs(s[0]) + abs(s[1])
for r in sols:
if abs(r[0]) + abs(r[1]) < min_sum:
s = r
min_sum = abs(s[0]) + abs(s[1])
x_0, y_0, z_0 = s[0], -coeff[x*z], s[1]
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _remove_gcd(x_0, y_0, z_0)
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = _diop_ternary_quadratic(var, coeff)
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
else:
if coeff[x*y] or coeff[x*z]:
# Apply the transformation x --> X - (B*y + C*z)/(2*A)
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = dict()
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
x_0, y_0, z_0 = _diop_ternary_quadratic(var, _coeff)
if x_0 is None:
return (None, None, None)
p, q = _rational_pq(B*y_0 + C*z_0, 2*A)
x_0, y_0, z_0 = x_0*q - p, y_0*q, z_0*q
elif coeff[z*y] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
A = coeff[x**2]
E = coeff[y*z]
b, a = _rational_pq(-E, A)
x_0, y_0, z_0 = b, a, b
else:
# Ax**2 + E*y*z + F*z**2 = 0
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = _diop_ternary_quadratic(var, coeff)
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, C may be zero
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
else:
# Ax**2 + D*y**2 + F*z**2 = 0, C may be zero
x_0, y_0, z_0 = _diop_ternary_quadratic_normal(var, coeff)
return _remove_gcd(x_0, y_0, z_0)
def transformation_to_normal(eq):
"""
Returns the transformation Matrix that converts a general ternary
quadratic equation `eq` (`ax^2 + by^2 + cz^2 + dxy + eyz + fxz`)
to a form without cross terms: `ax^2 + by^2 + cz^2 = 0`. This is
not used in solving ternary quadratics; it is only implemented for
the sake of completeness.
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type in (
"homogeneous_ternary_quadratic",
"homogeneous_ternary_quadratic_normal"):
return _transformation_to_normal(var, coeff)
def _transformation_to_normal(var, coeff):
_var = list(var) # copy
x, y, z = var
if not any(coeff[i**2] for i in var):
# https://math.stackexchange.com/questions/448051/transform-quadratic-ternary-form-to-normal-form/448065#448065
a = coeff[x*y]
b = coeff[y*z]
c = coeff[x*z]
swap = False
if not a: # b can't be 0 or else there aren't 3 vars
swap = True
a, b = b, a
T = Matrix(((1, 1, -b/a), (1, -1, -c/a), (0, 0, 1)))
if swap:
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
_var[0], _var[2] = var[2], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
else:
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
# Apply the transformation x --> X - (B*Y + C*Z)/(2*A)
if coeff[x*y] != 0 or coeff[x*z] != 0:
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = dict()
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
T_0 = _transformation_to_normal(_var, _coeff)
return Matrix(3, 3, [1, S(-B)/(2*A), S(-C)/(2*A), 0, 1, 0, 0, 0, 1])*T_0
elif coeff[y*z] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
# Apply transformation y -> Y + Z ans z -> Y - Z
return Matrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, -1])
else:
# Ax**2 + E*y*z + F*z**2 = 0
_var[0], _var[2] = var[2], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, F may be zero
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
else:
return Matrix.eye(3)
def parametrize_ternary_quadratic(eq):
"""
Returns the parametrized general solution for the ternary quadratic
equation ``eq`` which has the form
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import parametrize_ternary_quadratic
>>> parametrize_ternary_quadratic(x**2 + y**2 - z**2)
(2*p*q, p**2 - q**2, p**2 + q**2)
Here `p` and `q` are two co-prime integers.
>>> parametrize_ternary_quadratic(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z)
(2*p**2 - 2*p*q - q**2, 2*p**2 + 2*p*q - q**2, 2*p**2 - 2*p*q + 3*q**2)
>>> parametrize_ternary_quadratic(124*x**2 - 30*y**2 - 7729*z**2)
(-1410*p**2 - 363263*q**2, 2700*p**2 + 30916*p*q - 695610*q**2, -60*p**2 + 5400*p*q + 15458*q**2)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type in (
"homogeneous_ternary_quadratic",
"homogeneous_ternary_quadratic_normal"):
x_0, y_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _parametrize_ternary_quadratic(
(x_0, y_0, z_0), var, coeff)
def _parametrize_ternary_quadratic(solution, _var, coeff):
# called for a*x**2 + b*y**2 + c*z**2 + d*x*y + e*y*z + f*x*z = 0
assert 1 not in coeff
x, y, z = _var
x_0, y_0, z_0 = solution
v = list(_var) # copy
if x_0 is None:
return (None, None, None)
if solution.count(0) >= 2:
# if there are 2 zeros the equation reduces
# to k*X**2 == 0 where X is x, y, or z so X must
# be zero, too. So there is only the trivial
# solution.
return (None, None, None)
if x_0 == 0:
v[0], v[1] = v[1], v[0]
y_p, x_p, z_p = _parametrize_ternary_quadratic(
(y_0, x_0, z_0), v, coeff)
return x_p, y_p, z_p
x, y, z = v
r, p, q = symbols("r, p, q", integer=True)
eq = sum(k*v for k, v in coeff.items())
eq_1 = _mexpand(eq.subs(zip(
(x, y, z), (r*x_0, r*y_0 + p, r*z_0 + q))))
A, B = eq_1.as_independent(r, as_Add=True)
x = A*x_0
y = (A*y_0 - _mexpand(B/r*p))
z = (A*z_0 - _mexpand(B/r*q))
return x, y, z
def diop_ternary_quadratic_normal(eq):
"""
Solves the quadratic ternary diophantine equation,
`ax^2 + by^2 + cz^2 = 0`.
Here the coefficients `a`, `b`, and `c` should be non zero. Otherwise the
equation will be a quadratic binary or univariate equation. If solvable,
returns a tuple `(x, y, z)` that satisfies the given equation. If the
equation does not have integer solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic_normal(eq)``: where ``eq`` is an equation of the form
`ax^2 + by^2 + cz^2 = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import diop_ternary_quadratic_normal
>>> diop_ternary_quadratic_normal(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic_normal(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic_normal(34*x**2 - 3*y**2 - 301*z**2)
(4, 9, 1)
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == "homogeneous_ternary_quadratic_normal":
return _diop_ternary_quadratic_normal(var, coeff)
def _diop_ternary_quadratic_normal(var, coeff):
x, y, z = var
a = coeff[x**2]
b = coeff[y**2]
c = coeff[z**2]
try:
assert len([k for k in coeff if coeff[k]]) == 3
assert all(coeff[i**2] for i in var)
except AssertionError:
raise ValueError(filldedent('''
coeff dict is not consistent with assumption of this routine:
coefficients should be those of an expression in the form
a*x**2 + b*y**2 + c*z**2 where a*b*c != 0.'''))
(sqf_of_a, sqf_of_b, sqf_of_c), (a_1, b_1, c_1), (a_2, b_2, c_2) = \
sqf_normal(a, b, c, steps=True)
A = -a_2*c_2
B = -b_2*c_2
# If following two conditions are satisified then there are no solutions
if A < 0 and B < 0:
return (None, None, None)
if (
sqrt_mod(-b_2*c_2, a_2) is None or
sqrt_mod(-c_2*a_2, b_2) is None or
sqrt_mod(-a_2*b_2, c_2) is None):
return (None, None, None)
z_0, x_0, y_0 = descent(A, B)
z_0, q = _rational_pq(z_0, abs(c_2))
x_0 *= q
y_0 *= q
x_0, y_0, z_0 = _remove_gcd(x_0, y_0, z_0)
# Holzer reduction
if sign(a) == sign(b):
x_0, y_0, z_0 = holzer(x_0, y_0, z_0, abs(a_2), abs(b_2), abs(c_2))
elif sign(a) == sign(c):
x_0, z_0, y_0 = holzer(x_0, z_0, y_0, abs(a_2), abs(c_2), abs(b_2))
else:
y_0, z_0, x_0 = holzer(y_0, z_0, x_0, abs(b_2), abs(c_2), abs(a_2))
x_0 = reconstruct(b_1, c_1, x_0)
y_0 = reconstruct(a_1, c_1, y_0)
z_0 = reconstruct(a_1, b_1, z_0)
sq_lcm = ilcm(sqf_of_a, sqf_of_b, sqf_of_c)
x_0 = abs(x_0*sq_lcm//sqf_of_a)
y_0 = abs(y_0*sq_lcm//sqf_of_b)
z_0 = abs(z_0*sq_lcm//sqf_of_c)
return _remove_gcd(x_0, y_0, z_0)
def sqf_normal(a, b, c, steps=False):
"""
Return `a', b', c'`, the coefficients of the square-free normal
form of `ax^2 + by^2 + cz^2 = 0`, where `a', b', c'` are pairwise
prime. If `steps` is True then also return three tuples:
`sq`, `sqf`, and `(a', b', c')` where `sq` contains the square
factors of `a`, `b` and `c` after removing the `gcd(a, b, c)`;
`sqf` contains the values of `a`, `b` and `c` after removing
both the `gcd(a, b, c)` and the square factors.
The solutions for `ax^2 + by^2 + cz^2 = 0` can be
recovered from the solutions of `a'x^2 + b'y^2 + c'z^2 = 0`.
Examples
========
>>> from sympy.solvers.diophantine import sqf_normal
>>> sqf_normal(2 * 3**2 * 5, 2 * 5 * 11, 2 * 7**2 * 11)
(11, 1, 5)
>>> sqf_normal(2 * 3**2 * 5, 2 * 5 * 11, 2 * 7**2 * 11, True)
((3, 1, 7), (5, 55, 11), (11, 1, 5))
References
==========
.. [1] Legendre's Theorem, Legrange's Descent,
http://public.csusm.edu/aitken_html/notes/legendre.pdf
See Also
========
reconstruct()
"""
ABC = A, B, C = _remove_gcd(a, b, c)
sq = tuple(square_factor(i) for i in ABC)
sqf = A, B, C = tuple([i//j**2 for i,j in zip(ABC, sq)])
pc = igcd(A, B)
A /= pc
B /= pc
pa = igcd(B, C)
B /= pa
C /= pa
pb = igcd(A, C)
A /= pb
B /= pb
A *= pa
B *= pb
C *= pc
if steps:
return (sq, sqf, (A, B, C))
else:
return A, B, C
def square_factor(a):
"""
Returns an integer `c` s.t. `a = c^2k, \ c,k \in Z`. Here `k` is square
free. `a` can be given as an integer or a dictionary of factors.
Examples
========
>>> from sympy.solvers.diophantine import square_factor
>>> square_factor(24)
2
>>> square_factor(-36*3)
6
>>> square_factor(1)
1
>>> square_factor({3: 2, 2: 1, -1: 1}) # -18
3
See Also
========
sympy.ntheory.factor_.core
"""
f = a if isinstance(a, dict) else factorint(a)
return Mul(*[p**(e//2) for p, e in f.items()])
def reconstruct(A, B, z):
"""
Reconstruct the `z` value of an equivalent solution of `ax^2 + by^2 + cz^2`
from the `z` value of a solution of the square-free normal form of the
equation, `a'*x^2 + b'*y^2 + c'*z^2`, where `a'`, `b'` and `c'` are square
free and `gcd(a', b', c') == 1`.
"""
f = factorint(igcd(A, B))
for p, e in f.items():
if e != 1:
raise ValueError('a and b should be square-free')
z *= p
return z
def ldescent(A, B):
"""
Return a non-trivial solution to `w^2 = Ax^2 + By^2` using
Lagrange's method; return None if there is no such solution.
.
Here, `A \\neq 0` and `B \\neq 0` and `A` and `B` are square free. Output a
tuple `(w_0, x_0, y_0)` which is a solution to the above equation.
Examples
========
>>> from sympy.solvers.diophantine import ldescent
>>> ldescent(1, 1) # w^2 = x^2 + y^2
(1, 1, 0)
>>> ldescent(4, -7) # w^2 = 4x^2 - 7y^2
(2, -1, 0)
This means that `x = -1, y = 0` and `w = 2` is a solution to the equation
`w^2 = 4x^2 - 7y^2`
>>> ldescent(5, -1) # w^2 = 5x^2 - y^2
(2, 1, -1)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
.. [2] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
[online], Available:
http://eprints.nottingham.ac.uk/60/1/kvxefz87.pdf
"""
if abs(A) > abs(B):
w, y, x = ldescent(B, A)
return w, x, y
if A == 1:
return (1, 1, 0)
if B == 1:
return (1, 0, 1)
if B == -1: # and A == -1
return
r = sqrt_mod(A, B)
Q = (r**2 - A) // B
if Q == 0:
B_0 = 1
d = 0
else:
div = divisors(Q)
B_0 = None
for i in div:
sQ, _exact = integer_nthroot(abs(Q) // i, 2)
if _exact:
B_0, d = sign(Q)*i, sQ
break
if B_0 is not None:
W, X, Y = ldescent(A, B_0)
return _remove_gcd((-A*X + r*W), (r*X - W), Y*(B_0*d))
def descent(A, B):
"""
Returns a non-trivial solution, (x, y, z), to `x^2 = Ay^2 + Bz^2`
using Lagrange's descent method with lattice-reduction. `A` and `B`
are assumed to be valid for such a solution to exist.
This is faster than the normal Lagrange's descent algorithm because
the Gaussian reduction is used.
Examples
========
>>> from sympy.solvers.diophantine import descent
>>> descent(3, 1) # x**2 = 3*y**2 + z**2
(1, 0, 1)
`(x, y, z) = (1, 0, 1)` is a solution to the above equation.
>>> descent(41, -113)
(-16, -3, 1)
References
==========
.. [1] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
if abs(A) > abs(B):
x, y, z = descent(B, A)
return x, z, y
if B == 1:
return (1, 0, 1)
if A == 1:
return (1, 1, 0)
if B == -A:
return (0, 1, 1)
if B == A:
x, z, y = descent(-1, A)
return (A*y, z, x)
w = sqrt_mod(A, B)
x_0, z_0 = gaussian_reduce(w, A, B)
t = (x_0**2 - A*z_0**2) // B
t_2 = square_factor(t)
t_1 = t // t_2**2
x_1, z_1, y_1 = descent(A, t_1)
return _remove_gcd(x_0*x_1 + A*z_0*z_1, z_0*x_1 + x_0*z_1, t_1*t_2*y_1)
def gaussian_reduce(w, a, b):
"""
Returns a reduced solution `(x, z)` to the congruence
`X^2 - aZ^2 \equiv 0 \ (mod \ b)` so that `x^2 + |a|z^2` is minimal.
Details
=======
Here ``w`` is a solution of the congruence `x^2 \equiv a \ (mod \ b)`
References
==========
.. [1] Gaussian lattice Reduction [online]. Available:
http://home.ie.cuhk.edu.hk/~wkshum/wordpress/?p=404
.. [2] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
u = (0, 1)
v = (1, 0)
if dot(u, v, w, a, b) < 0:
v = (-v[0], -v[1])
if norm(u, w, a, b) < norm(v, w, a, b):
u, v = v, u
while norm(u, w, a, b) > norm(v, w, a, b):
k = dot(u, v, w, a, b) // dot(v, v, w, a, b)
u, v = v, (u[0]- k*v[0], u[1]- k*v[1])
u, v = v, u
if dot(u, v, w, a, b) < dot(v, v, w, a, b)/2 or norm((u[0]-v[0], u[1]-v[1]), w, a, b) > norm(v, w, a, b):
c = v
else:
c = (u[0] - v[0], u[1] - v[1])
return c[0]*w + b*c[1], c[0]
def dot(u, v, w, a, b):
"""
Returns a special dot product of the vectors `u = (u_{1}, u_{2})` and
`v = (v_{1}, v_{2})` which is defined in order to reduce solution of
the congruence equation `X^2 - aZ^2 \equiv 0 \ (mod \ b)`.
"""
u_1, u_2 = u
v_1, v_2 = v
return (w*u_1 + b*u_2)*(w*v_1 + b*v_2) + abs(a)*u_1*v_1
def norm(u, w, a, b):
"""
Returns the norm of the vector `u = (u_{1}, u_{2})` under the dot product
defined by `u \cdot v = (wu_{1} + bu_{2})(w*v_{1} + bv_{2}) + |a|*u_{1}*v_{1}`
where `u = (u_{1}, u_{2})` and `v = (v_{1}, v_{2})`.
"""
u_1, u_2 = u
return sqrt(dot((u_1, u_2), (u_1, u_2), w, a, b))
def holzer(x, y, z, a, b, c):
"""
Simplify the solution `(x, y, z)` of the equation
`ax^2 + by^2 = cz^2` with `a, b, c > 0` and `z^2 \geq \mid ab \mid` to
a new reduced solution `(x', y', z')` such that `z'^2 \leq \mid ab \mid`.
The algorithm is an interpretation of Mordell's reduction as described
on page 8 of Cremona and Rusin's paper [1]_ and the work of Mordell in
reference [2]_.
References
==========
.. [1] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
.. [2] Diophantine Equations, L. J. Mordell, page 48.
"""
if _odd(c):
k = 2*c
else:
k = c//2
small = a*b*c
step = 0
while True:
t1, t2, t3 = a*x**2, b*y**2, c*z**2
# check that it's a solution
if t1 + t2 != t3:
if step == 0:
raise ValueError('bad starting solution')
break
x_0, y_0, z_0 = x, y, z
if max(t1, t2, t3) <= small:
# Holzer condition
break
uv = u, v = base_solution_linear(k, y_0, -x_0)
if None in uv:
break
p, q = -(a*u*x_0 + b*v*y_0), c*z_0
r = Rational(p, q)
if _even(c):
w = _nint_or_floor(p, q)
assert abs(w - r) <= S.Half
else:
w = p//q # floor
if _odd(a*u + b*v + c*w):
w += 1
assert abs(w - r) <= S.One
A = (a*u**2 + b*v**2 + c*w**2)
B = (a*u*x_0 + b*v*y_0 + c*w*z_0)
x = Rational(x_0*A - 2*u*B, k)
y = Rational(y_0*A - 2*v*B, k)
z = Rational(z_0*A - 2*w*B, k)
assert all(i.is_Integer for i in (x, y, z))
step += 1
return tuple([int(i) for i in (x_0, y_0, z_0)])
def diop_general_pythagorean(eq, param=symbols("m", integer=True)):
"""
Solves the general pythagorean equation,
`a_{1}^2x_{1}^2 + a_{2}^2x_{2}^2 + . . . + a_{n}^2x_{n}^2 - a_{n + 1}^2x_{n + 1}^2 = 0`.
Returns a tuple which contains a parametrized solution to the equation,
sorted in the same order as the input variables.
Usage
=====
``diop_general_pythagorean(eq, param)``: where ``eq`` is a general
pythagorean equation which is assumed to be zero and ``param`` is the base
parameter used to construct other parameters by subscripting.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_pythagorean
>>> from sympy.abc import a, b, c, d, e
>>> diop_general_pythagorean(a**2 + b**2 + c**2 - d**2)
(m1**2 + m2**2 - m3**2, 2*m1*m3, 2*m2*m3, m1**2 + m2**2 + m3**2)
>>> diop_general_pythagorean(9*a**2 - 4*b**2 + 16*c**2 + 25*d**2 + e**2)
(10*m1**2 + 10*m2**2 + 10*m3**2 - 10*m4**2, 15*m1**2 + 15*m2**2 + 15*m3**2 + 15*m4**2, 15*m1*m4, 12*m2*m4, 60*m3*m4)
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == "general_pythagorean":
return _diop_general_pythagorean(var, coeff, param)
def _diop_general_pythagorean(var, coeff, t):
if sign(coeff[var[0]**2]) + sign(coeff[var[1]**2]) + sign(coeff[var[2]**2]) < 0:
for key in coeff.keys():
coeff[key] = -coeff[key]
n = len(var)
index = 0
for i, v in enumerate(var):
if sign(coeff[v**2]) == -1:
index = i
m = symbols('%s1:%i' % (t, n), integer=True)
ith = sum(m_i**2 for m_i in m)
L = [ith - 2*m[n - 2]**2]
L.extend([2*m[i]*m[n-2] for i in range(n - 2)])
sol = L[:index] + [ith] + L[index:]
lcm = 1
for i, v in enumerate(var):
if i == index or (index > 0 and i == 0) or (index == 0 and i == 1):
lcm = ilcm(lcm, sqrt(abs(coeff[v**2])))
else:
s = sqrt(coeff[v**2])
lcm = ilcm(lcm, s if _odd(s) else s//2)
for i, v in enumerate(var):
sol[i] = (lcm*sol[i]) / sqrt(abs(coeff[v**2]))
return tuple(sol)
def diop_general_sum_of_squares(eq, limit=1):
"""
Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Returns at most ``limit`` number of solutions.
Usage
=====
``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Details
=======
When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be
no solutions. Refer [1]_ for more details.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_sum_of_squares
>>> from sympy.abc import a, b, c, d, e, f
>>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345)
{(15, 22, 22, 24, 24)}
Reference
=========
.. [1] Representing an integer as a sum of three squares, [online],
Available:
http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == "general_sum_of_squares":
return _diop_general_sum_of_squares(var, -coeff[1], limit)
def _diop_general_sum_of_squares(var, k, limit=1):
# solves Eq(sum(i**2 for i in var), k)
n = len(var)
if n < 3:
raise ValueError('n must be greater than 2')
s = set()
if k < 0 or limit < 1:
return s
sign = [-1 if x.is_nonpositive else 1 for x in var]
negs = sign.count(-1) != 0
took = 0
for t in sum_of_squares(k, n, zeros=True):
if negs:
s.add(tuple([sign[i]*j for i, j in enumerate(t)]))
else:
s.add(t)
took += 1
if took == limit:
break
return s
def diop_general_sum_of_even_powers(eq, limit=1):
"""
Solves the equation `x_{1}^e + x_{2}^e + . . . + x_{n}^e - k = 0`
where `e` is an even, integer power.
Returns at most ``limit`` number of solutions.
Usage
=====
``general_sum_of_even_powers(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^e + x_{2}^e + . . . + x_{n}^e - k = 0`.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_sum_of_even_powers
>>> from sympy.abc import a, b
>>> diop_general_sum_of_even_powers(a**4 + b**4 - (2**4 + 3**4))
{(2, 3)}
See Also
========
power_representation()
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == "general_sum_of_even_powers":
for k in coeff.keys():
if k.is_Pow and coeff[k]:
p = k.exp
return _diop_general_sum_of_even_powers(var, p, -coeff[1], limit)
def _diop_general_sum_of_even_powers(var, p, n, limit=1):
# solves Eq(sum(i**2 for i in var), n)
k = len(var)
s = set()
if n < 0 or limit < 1:
return s
sign = [-1 if x.is_nonpositive else 1 for x in var]
negs = sign.count(-1) != 0
took = 0
for t in power_representation(n, p, k):
if negs:
s.add(tuple([sign[i]*j for i, j in enumerate(t)]))
else:
s.add(t)
took += 1
if took == limit:
break
return s
## Functions below this comment can be more suitably grouped under
## an Additive number theory module rather than the Diophantine
## equation module.
def partition(n, k=None, zeros=False):
"""
Returns a generator that can be used to generate partitions of an integer
`n`.
A partition of `n` is a set of positive integers which add up to `n`. For
example, partitions of 3 are 3, 1 + 2, 1 + 1 + 1. A partition is returned
as a tuple. If ``k`` equals None, then all possible partitions are returned
irrespective of their size, otherwise only the partitions of size ``k`` are
returned. If the ``zero`` parameter is set to True then a suitable
number of zeros are added at the end of every partition of size less than
``k``.
``zero`` parameter is considered only if ``k`` is not None. When the
partitions are over, the last `next()` call throws the ``StopIteration``
exception, so this function should always be used inside a try - except
block.
Details
=======
``partition(n, k)``: Here ``n`` is a positive integer and ``k`` is the size
of the partition which is also positive integer.
Examples
========
>>> from sympy.solvers.diophantine import partition
>>> f = partition(5)
>>> next(f)
(1, 1, 1, 1, 1)
>>> next(f)
(1, 1, 1, 2)
>>> g = partition(5, 3)
>>> next(g)
(1, 1, 3)
>>> next(g)
(1, 2, 2)
>>> g = partition(5, 3, zeros=True)
>>> next(g)
(0, 0, 5)
"""
from sympy.utilities.iterables import ordered_partitions
if not zeros or k is None:
for i in ordered_partitions(n, k):
yield tuple(i)
else:
for m in range(1, k + 1):
for i in ordered_partitions(n, m):
i = tuple(i)
yield (0,)*(k - len(i)) + i
def prime_as_sum_of_two_squares(p):
"""
Represent a prime `p` as a unique sum of two squares; this can
only be done if the prime is congruent to 1 mod 4.
Examples
========
>>> from sympy.solvers.diophantine import prime_as_sum_of_two_squares
>>> prime_as_sum_of_two_squares(7) # can't be done
>>> prime_as_sum_of_two_squares(5)
(1, 2)
Reference
=========
.. [1] Representing a number as a sum of four squares, [online],
Available: http://schorn.ch/lagrange.html
See Also
========
sum_of_squares()
"""
if not p % 4 == 1:
return
if p % 8 == 5:
b = 2
else:
b = 3
while pow(b, (p - 1) // 2, p) == 1:
b = nextprime(b)
b = pow(b, (p - 1) // 4, p)
a = p
while b**2 > p:
a, b = b, a % b
return (int(a % b), int(b)) # convert from long
def sum_of_three_squares(n):
"""
Returns a 3-tuple `(a, b, c)` such that `a^2 + b^2 + c^2 = n` and
`a, b, c \geq 0`.
Returns None if `n = 4^a(8m + 7)` for some `a, m \in Z`. See
[1]_ for more details.
Usage
=====
``sum_of_three_squares(n)``: Here ``n`` is a non-negative integer.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_three_squares
>>> sum_of_three_squares(44542)
(18, 37, 207)
References
==========
.. [1] Representing a number as a sum of three squares, [online],
Available: http://schorn.ch/lagrange.html
See Also
========
sum_of_squares()
"""
special = {1:(1, 0, 0), 2:(1, 1, 0), 3:(1, 1, 1), 10: (1, 3, 0), 34: (3, 3, 4), 58:(3, 7, 0),
85:(6, 7, 0), 130:(3, 11, 0), 214:(3, 6, 13), 226:(8, 9, 9), 370:(8, 9, 15),
526:(6, 7, 21), 706:(15, 15, 16), 730:(1, 27, 0), 1414:(6, 17, 33), 1906:(13, 21, 36),
2986: (21, 32, 39), 9634: (56, 57, 57)}
v = 0
if n == 0:
return (0, 0, 0)
v = multiplicity(4, n)
n //= 4**v
if n % 8 == 7:
return
if n in special.keys():
x, y, z = special[n]
return _sorted_tuple(2**v*x, 2**v*y, 2**v*z)
s, _exact = integer_nthroot(n, 2)
if _exact:
return (2**v*s, 0, 0)
x = None
if n % 8 == 3:
s = s if _odd(s) else s - 1
for x in range(s, -1, -2):
N = (n - x**2) // 2
if isprime(N):
y, z = prime_as_sum_of_two_squares(N)
return _sorted_tuple(2**v*x, 2**v*(y + z), 2**v*abs(y - z))
return
if n % 8 == 2 or n % 8 == 6:
s = s if _odd(s) else s - 1
else:
s = s - 1 if _odd(s) else s
for x in range(s, -1, -2):
N = n - x**2
if isprime(N):
y, z = prime_as_sum_of_two_squares(N)
return _sorted_tuple(2**v*x, 2**v*y, 2**v*z)
def sum_of_four_squares(n):
"""
Returns a 4-tuple `(a, b, c, d)` such that `a^2 + b^2 + c^2 + d^2 = n`.
Here `a, b, c, d \geq 0`.
Usage
=====
``sum_of_four_squares(n)``: Here ``n`` is a non-negative integer.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_four_squares
>>> sum_of_four_squares(3456)
(8, 8, 32, 48)
>>> sum_of_four_squares(1294585930293)
(0, 1234, 2161, 1137796)
References
==========
.. [1] Representing a number as a sum of four squares, [online],
Available: http://schorn.ch/lagrange.html
See Also
========
sum_of_squares()
"""
if n == 0:
return (0, 0, 0, 0)
v = multiplicity(4, n)
n //= 4**v
if n % 8 == 7:
d = 2
n = n - 4
elif n % 8 == 6 or n % 8 == 2:
d = 1
n = n - 1
else:
d = 0
x, y, z = sum_of_three_squares(n)
return _sorted_tuple(2**v*d, 2**v*x, 2**v*y, 2**v*z)
def power_representation(n, p, k, zeros=False):
"""
Returns a generator for finding k-tuples of integers,
`(n_{1}, n_{2}, . . . n_{k})`, such that
`n = n_{1}^p + n_{2}^p + . . . n_{k}^p`.
Usage
=====
``power_representation(n, p, k, zeros)``: Represent non-negative number
``n`` as a sum of ``k`` ``p``th powers. If ``zeros`` is true, then the
solutions is allowed to contain zeros.
Examples
========
>>> from sympy.solvers.diophantine import power_representation
Represent 1729 as a sum of two cubes:
>>> f = power_representation(1729, 3, 2)
>>> next(f)
(9, 10)
>>> next(f)
(1, 12)
If the flag `zeros` is True, the solution may contain tuples with
zeros; any such solutions will be generated after the solutions
without zeros:
>>> list(power_representation(125, 2, 3, zeros=True))
[(5, 6, 8), (3, 4, 10), (0, 5, 10), (0, 2, 11)]
For even `p` the `permute_sign` function can be used to get all
signed values:
>>> from sympy.utilities.iterables import permute_signs
>>> list(permute_signs((1, 12)))
[(1, 12), (-1, 12), (1, -12), (-1, -12)]
All possible signed permutations can also be obtained:
>>> from sympy.utilities.iterables import signed_permutations
>>> list(signed_permutations((1, 12)))
[(1, 12), (-1, 12), (1, -12), (-1, -12), (12, 1), (-12, 1), (12, -1), (-12, -1)]
"""
n, p, k = [as_int(i) for i in (n, p, k)]
if n < 0:
if p % 2:
for t in power_representation(-n, p, k, zeros):
yield tuple(-i for i in t)
return
if p < 1 or k < 1:
raise ValueError(filldedent('''
Expecting positive integers for `(p, k)`, but got `(%s, %s)`'''
% (p, k)))
if n == 0:
if zeros:
yield (0,)*k
return
if k == 1:
if p == 1:
yield (n,)
else:
be = perfect_power(n)
if be:
b, e = be
d, r = divmod(e, p)
if not r:
yield (b**d,)
return
if p == 1:
for t in partition(n, k, zeros=zeros):
yield t
return
if p == 2:
feasible = _can_do_sum_of_squares(n, k)
if not feasible:
return
if not zeros and n > 33 and k >= 5 and k <= n and n - k in (
13, 10, 7, 5, 4, 2, 1):
'''Todd G. Will, "When Is n^2 a Sum of k Squares?", [online].
Available: https://www.maa.org/sites/default/files/Will-MMz-201037918.pdf'''
return
if feasible is 1: # it's prime and k == 2
yield prime_as_sum_of_two_squares(n)
return
if k == 2 and p > 2:
be = perfect_power(n)
if be and be[1] % p == 0:
return # Fermat: a**n + b**n = c**n has no solution for n > 2
if n >= k:
a = integer_nthroot(n - (k - 1), p)[0]
for t in pow_rep_recursive(a, k, n, [], p):
yield tuple(reversed(t))
if zeros:
a = integer_nthroot(n, p)[0]
for i in range(1, k):
for t in pow_rep_recursive(a, i, n, [], p):
yield tuple(reversed(t + (0,) * (k - i)))
sum_of_powers = power_representation
def pow_rep_recursive(n_i, k, n_remaining, terms, p):
if k == 0 and n_remaining == 0:
yield tuple(terms)
else:
if n_i >= 1 and k > 0:
for t in pow_rep_recursive(n_i - 1, k, n_remaining, terms, p):
yield t
residual = n_remaining - pow(n_i, p)
if residual >= 0:
for t in pow_rep_recursive(n_i, k - 1, residual, terms + [n_i], p):
yield t
def sum_of_squares(n, k, zeros=False):
"""Return a generator that yields the k-tuples of nonnegative
values, the squares of which sum to n. If zeros is False (default)
then the solution will not contain zeros. The nonnegative
elements of a tuple are sorted.
* If k == 1 and n is square, (n,) is returned.
* If k == 2 then n can only be written as a sum of squares if
every prime in the factorization of n that has the form
4*k + 3 has an even multiplicity. If n is prime then
it can only be written as a sum of two squares if it is
in the form 4*k + 1.
* if k == 3 then n can be written as a sum of squares if it does
not have the form 4**m*(8*k + 7).
* all integers can be written as the sum of 4 squares.
* if k > 4 then n can be partitioned and each partition can
be written as a sum of 4 squares; if n is not evenly divisible
by 4 then n can be written as a sum of squares only if the
an additional partition can be written as sum of squares.
For example, if k = 6 then n is partitioned into two parts,
the first being written as a sum of 4 squares and the second
being written as a sum of 2 squares -- which can only be
done if the condition above for k = 2 can be met, so this will
automatically reject certain partitions of n.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_squares
>>> list(sum_of_squares(25, 2))
[(3, 4)]
>>> list(sum_of_squares(25, 2, True))
[(3, 4), (0, 5)]
>>> list(sum_of_squares(25, 4))
[(1, 2, 2, 4)]
See Also
========
sympy.utilities.iterables.signed_permutations
"""
for t in power_representation(n, 2, k, zeros):
yield t
def _can_do_sum_of_squares(n, k):
"""Return True if n can be written as the sum of k squares,
False if it can't, or 1 if k == 2 and n is prime (in which
case it *can* be written as a sum of two squares). A False
is returned only if it can't be written as k-squares, even
if 0s are allowed.
"""
if k < 1:
return False
if n < 0:
return False
if n == 0:
return True
if k == 1:
return is_square(n)
if k == 2:
if n in (1, 2):
return True
if isprime(n):
if n % 4 == 1:
return 1 # signal that it was prime
return False
else:
f = factorint(n)
for p, m in f.items():
# we can proceed iff no prime factor in the form 4*k + 3
# has an odd multiplicity
if (p % 4 == 3) and m % 2:
return False
return True
if k == 3:
if (n//4**multiplicity(4, n)) % 8 == 7:
return False
# every number can be written as a sum of 4 squares; for k > 4 partitions
# can be 0
return True
| NikNitro/Python-iBeacon-Scan | sympy/solvers/diophantine.py | Python | gpl-3.0 | 98,058 | [
"Gaussian"
] | f712424347cf982406ab6a29006de2920e658837f99abd5ccf48bf3b4ac2c4bc |
## INFO ########################################################################
## ##
## pypp ##
## ==== ##
## ##
## Advanced C PreProcessor with the Power of Python ##
## Version: 0.1.0.210 (20150406) ##
## File: pypp.py ##
## ##
## For more information about the project, visit <http://pypp.io>. ##
## Copyright (C) 2015 Peter Varo ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from sys import stderr
from io import SEEK_SET
from re import compile, match, sub, finditer, MULTILINE, DOTALL, VERBOSE
# Import user modules
import public_pypp
# Module level constants
#------------------------------------------------------------------------------#
PROMPT = '(pypp)'
EOL_EOF = compile(r'^EO(L|F)')
ERROR = compile(r'^\s*#\s*error\s+PYPP\s*$', flags=MULTILINE)
COMMENTS = compile(r"""
(
# Single line comments:
//\s*pypp\s*(?P<line>.+?)$
|
# Multi line comments:
/\*\s*pypp\s*(?P<block>.+?)\*/
)
""", flags=MULTILINE | DOTALL | VERBOSE)
#------------------------------------------------------------------------------#
def on_error(target, exception, code_buffer):
# Inform the user about the problem
print('{} The following exception occured '
'in these lines:'.format(PROMPT),
code_buffer,
sep='\n\n',
end='\n\n',
file=stderr)
# Make sure the generated file cannot be used as a C file
target.seek(SEEK_SET)
target.write('#error "Invalid `pypp` output, '
'see console for furhter info"')
target.truncate()
# And re-raise the caught exception
# TODO: set proper line and column numbers as well, as file name
raise exception
#------------------------------------------------------------------------------#
def generate_output(source, target):
# Read file, and remove pypp-safety-errors
source = sub(ERROR, '', source.read())
# Reading states
environment_globals = {'pypp': public_pypp}
environment_locals = {}
inside_expression = False
exec_buffer = ''
prev_buffer = ''
curr_index = 0
# Process the whole text
for catch in finditer(COMMENTS, source):
# Get indicies
start_index, end_index = catch.span()
# Get the next python snippet
curr_buffer = catch.group('line') or catch.group('block')
# If the last evaluation was failed, because EOF
if inside_expression:
# Step at the index
prev_buffer += source[curr_index:start_index]
# Update index
curr_index = end_index
# Try to exit from the expression
inside_expression = False
# Try to run the interpreter
try:
# Execute code in the buffers
exec_buffer = prev_buffer + curr_buffer
exec(exec_buffer, environment_globals, environment_locals)
# Empty previous-code-buffer
prev_buffer = ''
# If code-buffer contained invalid code
except SyntaxError as exception:
# If error is not end-of-file or end-of-line
if not match(EOL_EOF, str(exception)):
# Propagate error to user and clean up
on_error(target, exception, exec_buffer)
# If error was about unended expression
prev_buffer += curr_buffer
inside_expression = True
# If `pypp.include` function was called
except public_pypp.IncludeSnippet as include:
# Write all values stored in the exception
for string in include.strings:
target.write(string)
# Clear buffer
prev_buffer = ''
# If some other exception happened
except Exception as exception:
# Propagate error to user and clean up
on_error(target, exception, exec_buffer)
# Write
target.write(source[curr_index:start_index])
curr_index = end_index
# Write what's left from the source to the target
target.write(source[curr_index:])
#------------------------------------------------------------------------------#
def main():
# Open output file
with open('test-target.c', 'w', encoding="utf-8") as target:
# Open input file:
with open('test-source.c', encoding="utf-8") as source:
# If source supports random access
if source.seekable():
generate_output(source, target)
# If not, create a seekable-proxy
else:
generate_output(FileObjectProxy(source.read()), target)
#------------------------------------------------------------------------------#
if __name__ == '__main__':
main()
| petervaro/pypp | pypp.py | Python | gpl-3.0 | 6,571 | [
"VisIt"
] | a9a1d16acce2e0e9ae3cbd552ef8e623194c7e8c54f9b04ea300474efba3d5c8 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2008 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Purchase editors """
import gtk
from kiwi.datatypes import ValidationError
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.domain.purchase import PurchaseOrder, PurchaseItem
from stoqlib.lib.defaults import QUANTITY_PRECISION, MAX_INT
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class PurchaseItemEditor(BaseEditor):
gladefile = 'PurchaseItemEditor'
model_type = PurchaseItem
model_name = _("Purchase Item")
proxy_widgets = ['cost',
'expected_receival_date',
'quantity',
'total']
def __init__(self, store, model, visual_mode=False):
self.proxy = None
BaseEditor.__init__(self, store, model, visual_mode)
order = self.model.order
if order.status == PurchaseOrder.ORDER_CONFIRMED:
self._set_not_editable()
self.sold_lbl.hide()
self.returned_lbl.hide()
self.quantity_sold.hide()
self.quantity_returned.hide()
def _setup_widgets(self):
self.order.set_text(unicode(self.model.order.identifier))
for widget in [self.quantity, self.cost, self.quantity_sold,
self.quantity_returned]:
widget.set_adjustment(gtk.Adjustment(lower=0, upper=MAX_INT,
step_incr=1))
unit = self.model.sellable.unit
digits = QUANTITY_PRECISION if unit and unit.allow_fraction else 0
for widget in [self.quantity,
self.quantity_sold,
self.quantity_returned]:
widget.set_digits(digits)
self.description.set_text(self.model.sellable.get_description())
self.cost.set_digits(sysparam.get_int('COST_PRECISION_DIGITS'))
def _set_not_editable(self):
self.cost.set_sensitive(False)
self.quantity.set_sensitive(False)
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(self.model, self.proxy_widgets)
#
# Kiwi callbacks
#
def after_cost__changed(self, widget):
if self.proxy:
self.proxy.update('total')
def after_quantity__changed(self, widget):
if self.proxy:
self.proxy.update('total')
def on_expected_receival_date__validate(self, widget, value):
if value < localtoday().date():
return ValidationError(_(u'The expected receival date should be '
'a future date or today.'))
def on_cost__validate(self, widget, value):
if value <= 0:
return ValidationError(_(u'The cost should be greater than zero.'))
def on_quantity__validate(self, widget, value):
if value <= 0:
return ValidationError(_(u'The quantity should be greater than '
'zero.'))
class InConsignmentItemEditor(PurchaseItemEditor):
proxy_widgets = PurchaseItemEditor.proxy_widgets[:]
proxy_widgets.extend(['quantity_sold',
'quantity_returned'])
def __init__(self, store, model):
self._original_sold_qty = model.quantity_sold
self._original_returned_qty = model.quantity_returned
self._allowed_sold = None
PurchaseItemEditor.__init__(self, store, model)
order = self.model.order
assert order.status == PurchaseOrder.ORDER_CONSIGNED
self._set_not_editable()
# disable expected_receival_date (the items was already received)
self.expected_receival_date.set_sensitive(False)
# enable consignment fields
self.sold_lbl.show()
self.returned_lbl.show()
self.quantity_sold.show()
self.quantity_returned.show()
#
# Kiwi Callbacks
#
def on_expected_receival_date__validate(self, widget, value):
# Override the signal handler in PurchaseItemEditor, this is the
# simple way to disable this validation, since we dont have the
# handler_id to call self.expected_receival_date.disconnect() method.
pass
def on_quantity_sold__validate(self, widget, value):
if value < self._original_sold_qty:
return ValidationError(_(u'Can not decrease this quantity.'))
total = self.quantity_returned.read() + value
if value and total > self.model.quantity_received:
return ValidationError(_(u'Sold and returned quantity does '
'not match.'))
def on_quantity_returned__validate(self, widget, value):
if value < self._original_returned_qty:
return ValidationError(_(u'Can not decrease this quantity.'))
max_returned = self.model.quantity_received - self.quantity_sold.read()
if value and value > max_returned:
return ValidationError(_(u'Invalid returned quantity'))
class PurchaseQuoteItemEditor(PurchaseItemEditor):
proxy_widgets = PurchaseItemEditor.proxy_widgets[:]
proxy_widgets.remove('cost')
def __init__(self, store, model):
PurchaseItemEditor.__init__(self, store, model)
self.cost.hide()
self.cost_lbl.hide()
| andrebellafronte/stoq | stoqlib/gui/editors/purchaseeditor.py | Python | gpl-2.0 | 6,195 | [
"VisIt"
] | 682ddaded32e5218eadd9985ced91e10e1d0c2e446be92d488d7fdd4857f5b31 |
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='amrclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "amrclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'amrclaw', "Expected claw_pkg = 'amrclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
probdata.add_param('p1', 2., 'density on left')
probdata.add_param('c1', 2., 'sound speed on left')
probdata.add_param('p2', 2., 'density on right')
probdata.add_param('c2', 1., 'sound speed on right')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amrclaw.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -8.000000e+00 # xlower
clawdata.upper[0] = 8.000000e+00 # xupper
clawdata.lower[1] = -1.000000e+00 # ylower
clawdata.upper[1] = 11.000000e+00 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 200 # mx
clawdata.num_cells[1] = 200 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 2
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 21*8
clawdata.tfinal = 21.0
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 2
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'binary' # 'ascii', 'binary', 'netcdf'
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.00000e-02
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.900000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.000000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 2
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer','vanleer']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never
# called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used,
# not recommended.
clawdata.source_split = 0
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this
# option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal
# velocity
clawdata.bc_lower[0] = 'wall' # at xlower
clawdata.bc_upper[0] = 'wall' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'wall' # at yupper
# ---------------
# Gauges:
# ---------------
rundata.gaugedata.gauges = []
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
#rundata.gaugedata.gauges.append([0, 3.5, 0.5, 2.7, 2.85])
#rundata.gaugedata.gauges.append([1, 3.6, 0.5, 2.7, 2.85])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 3
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 1
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [2]
amrdata.refinement_ratios_y = [2]
amrdata.refinement_ratios_t = [2]
# Specify type of each aux variable in clawdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','center']
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 0.001000e+00 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = False # use this?
amrdata.flag2refine_tol = 0.05 # tolerance used in this routine
# User can modify flag2refine to change the criterion for flagging.
# Default: check maximum absolute difference of first component of q
# between a cell and each of its neighbors.
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 2
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
rundata.regiondata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| clawpack/adjoint | paper2_examples/acoustics_2d_ex4/adjoint/setrun.py | Python | bsd-2-clause | 11,357 | [
"NetCDF"
] | d5d5849e53f24e9fb67a0c255c5f18a9ede9b8acc8bf85ed171b3173f5d23ec4 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright (C) 2008 Evan Martin <martine@danga.com>
"""A git-command for integrating reviews on Rietveld."""
from distutils.version import LooseVersion
import glob
import json
import logging
import optparse
import os
import Queue
import re
import stat
import sys
import textwrap
import threading
import urllib2
import urlparse
import webbrowser
try:
import readline # pylint: disable=F0401,W0611
except ImportError:
pass
from third_party import colorama
from third_party import upload
import breakpad # pylint: disable=W0611
import clang_format
import fix_encoding
import gclient_utils
import git_common
import owners_finder
import presubmit_support
import rietveld
import scm
import subcommand
import subprocess2
import watchlists
__version__ = '1.0'
DEFAULT_SERVER = 'https://codereview.appspot.com'
POSTUPSTREAM_HOOK_PATTERN = '.git/hooks/post-cl-%s'
DESCRIPTION_BACKUP_FILE = '~/.git_cl_description_backup'
GIT_INSTRUCTIONS_URL = 'http://code.google.com/p/chromium/wiki/UsingGit'
CHANGE_ID = 'Change-Id:'
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
# Shortcut since it quickly becomes redundant.
Fore = colorama.Fore
# Initialized in main()
settings = None
def DieWithError(message):
print >> sys.stderr, message
sys.exit(1)
def GetNoGitPagerEnv():
env = os.environ.copy()
# 'cat' is a magical git string that disables pagers on all platforms.
env['GIT_PAGER'] = 'cat'
return env
def RunCommand(args, error_ok=False, error_message=None, **kwargs):
try:
return subprocess2.check_output(args, shell=False, **kwargs)
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', args)
if not error_ok:
DieWithError(
'Command "%s" failed.\n%s' % (
' '.join(args), error_message or e.stdout or ''))
return e.stdout
def RunGit(args, **kwargs):
"""Returns stdout."""
return RunCommand(['git'] + args, **kwargs)
def RunGitWithCode(args, suppress_stderr=False):
"""Returns return code and stdout."""
try:
if suppress_stderr:
stderr = subprocess2.VOID
else:
stderr = sys.stderr
out, code = subprocess2.communicate(['git'] + args,
env=GetNoGitPagerEnv(),
stdout=subprocess2.PIPE,
stderr=stderr)
return code, out[0]
except ValueError:
# When the subprocess fails, it returns None. That triggers a ValueError
# when trying to unpack the return value into (out, code).
return 1, ''
def IsGitVersionAtLeast(min_version):
prefix = 'git version '
version = RunGit(['--version']).strip()
return (version.startswith(prefix) and
LooseVersion(version[len(prefix):]) >= LooseVersion(min_version))
def ask_for_data(prompt):
try:
return raw_input(prompt)
except KeyboardInterrupt:
# Hide the exception.
sys.exit(1)
def git_set_branch_value(key, value):
branch = Changelist().GetBranch()
if not branch:
return
cmd = ['config']
if isinstance(value, int):
cmd.append('--int')
git_key = 'branch.%s.%s' % (branch, key)
RunGit(cmd + [git_key, str(value)])
def git_get_branch_default(key, default):
branch = Changelist().GetBranch()
if branch:
git_key = 'branch.%s.%s' % (branch, key)
(_, stdout) = RunGitWithCode(['config', '--int', '--get', git_key])
try:
return int(stdout.strip())
except ValueError:
pass
return default
def add_git_similarity(parser):
parser.add_option(
'--similarity', metavar='SIM', type='int', action='store',
help='Sets the percentage that a pair of files need to match in order to'
' be considered copies (default 50)')
parser.add_option(
'--find-copies', action='store_true',
help='Allows git to look for copies.')
parser.add_option(
'--no-find-copies', action='store_false', dest='find_copies',
help='Disallows git from looking for copies.')
old_parser_args = parser.parse_args
def Parse(args):
options, args = old_parser_args(args)
if options.similarity is None:
options.similarity = git_get_branch_default('git-cl-similarity', 50)
else:
print('Note: Saving similarity of %d%% in git config.'
% options.similarity)
git_set_branch_value('git-cl-similarity', options.similarity)
options.similarity = max(0, min(options.similarity, 100))
if options.find_copies is None:
options.find_copies = bool(
git_get_branch_default('git-find-copies', True))
else:
git_set_branch_value('git-find-copies', int(options.find_copies))
print('Using %d%% similarity for rename/copy detection. '
'Override with --similarity.' % options.similarity)
return options, args
parser.parse_args = Parse
def is_dirty_git_tree(cmd):
# Make sure index is up-to-date before running diff-index.
RunGit(['update-index', '--refresh', '-q'], error_ok=True)
dirty = RunGit(['diff-index', '--name-status', 'HEAD'])
if dirty:
print 'Cannot %s with a dirty tree. You must commit locally first.' % cmd
print 'Uncommitted files: (git diff-index --name-status HEAD)'
print dirty[:4096]
if len(dirty) > 4096:
print '... (run "git diff-index --name-status HEAD" to see full output).'
return True
return False
def MatchSvnGlob(url, base_url, glob_spec, allow_wildcards):
"""Return the corresponding git ref if |base_url| together with |glob_spec|
matches the full |url|.
If |allow_wildcards| is true, |glob_spec| can contain wildcards (see below).
"""
fetch_suburl, as_ref = glob_spec.split(':')
if allow_wildcards:
glob_match = re.match('(.+/)?(\*|{[^/]*})(/.+)?', fetch_suburl)
if glob_match:
# Parse specs like "branches/*/src:refs/remotes/svn/*" or
# "branches/{472,597,648}/src:refs/remotes/svn/*".
branch_re = re.escape(base_url)
if glob_match.group(1):
branch_re += '/' + re.escape(glob_match.group(1))
wildcard = glob_match.group(2)
if wildcard == '*':
branch_re += '([^/]*)'
else:
# Escape and replace surrounding braces with parentheses and commas
# with pipe symbols.
wildcard = re.escape(wildcard)
wildcard = re.sub('^\\\\{', '(', wildcard)
wildcard = re.sub('\\\\,', '|', wildcard)
wildcard = re.sub('\\\\}$', ')', wildcard)
branch_re += wildcard
if glob_match.group(3):
branch_re += re.escape(glob_match.group(3))
match = re.match(branch_re, url)
if match:
return re.sub('\*$', match.group(1), as_ref)
# Parse specs like "trunk/src:refs/remotes/origin/trunk".
if fetch_suburl:
full_url = base_url + '/' + fetch_suburl
else:
full_url = base_url
if full_url == url:
return as_ref
return None
def print_stats(similarity, find_copies, args):
"""Prints statistics about the change to the user."""
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = GetNoGitPagerEnv()
if 'GIT_EXTERNAL_DIFF' in env:
del env['GIT_EXTERNAL_DIFF']
if find_copies:
similarity_options = ['--find-copies-harder', '-l100000',
'-C%s' % similarity]
else:
similarity_options = ['-M%s' % similarity]
try:
stdout = sys.stdout.fileno()
except AttributeError:
stdout = None
return subprocess2.call(
['git',
'diff', '--no-ext-diff', '--stat'] + similarity_options + args,
stdout=stdout, env=env)
class Settings(object):
def __init__(self):
self.default_server = None
self.cc = None
self.root = None
self.is_git_svn = None
self.svn_branch = None
self.tree_status_url = None
self.viewvc_url = None
self.updated = False
self.is_gerrit = None
self.git_editor = None
self.project = None
def LazyUpdateIfNeeded(self):
"""Updates the settings from a codereview.settings file, if available."""
if not self.updated:
# The only value that actually changes the behavior is
# autoupdate = "false". Everything else means "true".
autoupdate = RunGit(['config', 'rietveld.autoupdate'],
error_ok=True
).strip().lower()
cr_settings_file = FindCodereviewSettingsFile()
if autoupdate != 'false' and cr_settings_file:
LoadCodereviewSettingsFromFile(cr_settings_file)
# set updated to True to avoid infinite calling loop
# through DownloadHooks
self.updated = True
DownloadHooks(False)
self.updated = True
def GetDefaultServerUrl(self, error_ok=False):
if not self.default_server:
self.LazyUpdateIfNeeded()
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_ok=True))
if error_ok:
return self.default_server
if not self.default_server:
error_message = ('Could not find settings file. You must configure '
'your review setup by running "git cl config".')
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_message=error_message))
return self.default_server
@staticmethod
def GetRelativeRoot():
return RunGit(['rev-parse', '--show-cdup']).strip()
def GetRoot(self):
if self.root is None:
self.root = os.path.abspath(self.GetRelativeRoot())
return self.root
def GetIsGitSvn(self):
"""Return true if this repo looks like it's using git-svn."""
if self.is_git_svn is None:
# If you have any "svn-remote.*" config keys, we think you're using svn.
self.is_git_svn = RunGitWithCode(
['config', '--local', '--get-regexp', r'^svn-remote\.'])[0] == 0
return self.is_git_svn
def GetSVNBranch(self):
if self.svn_branch is None:
if not self.GetIsGitSvn():
DieWithError('Repo doesn\'t appear to be a git-svn repo.')
# Try to figure out which remote branch we're based on.
# Strategy:
# 1) iterate through our branch history and find the svn URL.
# 2) find the svn-remote that fetches from the URL.
# regexp matching the git-svn line that contains the URL.
git_svn_re = re.compile(r'^\s*git-svn-id: (\S+)@', re.MULTILINE)
# We don't want to go through all of history, so read a line from the
# pipe at a time.
# The -100 is an arbitrary limit so we don't search forever.
cmd = ['git', 'log', '-100', '--pretty=medium']
proc = subprocess2.Popen(cmd, stdout=subprocess2.PIPE,
env=GetNoGitPagerEnv())
url = None
for line in proc.stdout:
match = git_svn_re.match(line)
if match:
url = match.group(1)
proc.stdout.close() # Cut pipe.
break
if url:
svn_remote_re = re.compile(r'^svn-remote\.([^.]+)\.url (.*)$')
remotes = RunGit(['config', '--get-regexp',
r'^svn-remote\..*\.url']).splitlines()
for remote in remotes:
match = svn_remote_re.match(remote)
if match:
remote = match.group(1)
base_url = match.group(2)
rewrite_root = RunGit(
['config', 'svn-remote.%s.rewriteRoot' % remote],
error_ok=True).strip()
if rewrite_root:
base_url = rewrite_root
fetch_spec = RunGit(
['config', 'svn-remote.%s.fetch' % remote],
error_ok=True).strip()
if fetch_spec:
self.svn_branch = MatchSvnGlob(url, base_url, fetch_spec, False)
if self.svn_branch:
break
branch_spec = RunGit(
['config', 'svn-remote.%s.branches' % remote],
error_ok=True).strip()
if branch_spec:
self.svn_branch = MatchSvnGlob(url, base_url, branch_spec, True)
if self.svn_branch:
break
tag_spec = RunGit(
['config', 'svn-remote.%s.tags' % remote],
error_ok=True).strip()
if tag_spec:
self.svn_branch = MatchSvnGlob(url, base_url, tag_spec, True)
if self.svn_branch:
break
if not self.svn_branch:
DieWithError('Can\'t guess svn branch -- try specifying it on the '
'command line')
return self.svn_branch
def GetTreeStatusUrl(self, error_ok=False):
if not self.tree_status_url:
error_message = ('You must configure your tree status URL by running '
'"git cl config".')
self.tree_status_url = self._GetRietveldConfig(
'tree-status-url', error_ok=error_ok, error_message=error_message)
return self.tree_status_url
def GetViewVCUrl(self):
if not self.viewvc_url:
self.viewvc_url = self._GetRietveldConfig('viewvc-url', error_ok=True)
return self.viewvc_url
def GetBugPrefix(self):
return self._GetRietveldConfig('bug-prefix', error_ok=True)
def GetDefaultCCList(self):
return self._GetRietveldConfig('cc', error_ok=True)
def GetDefaultPrivateFlag(self):
return self._GetRietveldConfig('private', error_ok=True)
def GetIsGerrit(self):
"""Return true if this repo is assosiated with gerrit code review system."""
if self.is_gerrit is None:
self.is_gerrit = self._GetConfig('gerrit.host', error_ok=True)
return self.is_gerrit
def GetGitEditor(self):
"""Return the editor specified in the git config, or None if none is."""
if self.git_editor is None:
self.git_editor = self._GetConfig('core.editor', error_ok=True)
return self.git_editor or None
def GetLintRegex(self):
return (self._GetRietveldConfig('cpplint-regex', error_ok=True) or
DEFAULT_LINT_REGEX)
def GetLintIgnoreRegex(self):
return (self._GetRietveldConfig('cpplint-ignore-regex', error_ok=True) or
DEFAULT_LINT_IGNORE_REGEX)
def GetProject(self):
if not self.project:
self.project = self._GetRietveldConfig('project', error_ok=True)
return self.project
def _GetRietveldConfig(self, param, **kwargs):
return self._GetConfig('rietveld.' + param, **kwargs)
def _GetConfig(self, param, **kwargs):
self.LazyUpdateIfNeeded()
return RunGit(['config', param], **kwargs).strip()
def ShortBranchName(branch):
"""Convert a name like 'refs/heads/foo' to just 'foo'."""
return branch.replace('refs/heads/', '')
class Changelist(object):
def __init__(self, branchref=None, issue=None):
# Poke settings so we get the "configure your server" message if necessary.
global settings
if not settings:
# Happens when git_cl.py is used as a utility library.
settings = Settings()
settings.GetDefaultServerUrl()
self.branchref = branchref
if self.branchref:
self.branch = ShortBranchName(self.branchref)
else:
self.branch = None
self.rietveld_server = None
self.upstream_branch = None
self.lookedup_issue = False
self.issue = issue or None
self.has_description = False
self.description = None
self.lookedup_patchset = False
self.patchset = None
self._rpc_server = None
self.cc = None
self.watchers = ()
self._remote = None
self._props = None
def GetCCList(self):
"""Return the users cc'd on this CL.
Return is a string suitable for passing to gcl with the --cc flag.
"""
if self.cc is None:
base_cc = settings.GetDefaultCCList()
more_cc = ','.join(self.watchers)
self.cc = ','.join(filter(None, (base_cc, more_cc))) or ''
return self.cc
def GetCCListWithoutDefault(self):
"""Return the users cc'd on this CL excluding default ones."""
if self.cc is None:
self.cc = ','.join(self.watchers)
return self.cc
def SetWatchers(self, watchers):
"""Set the list of email addresses that should be cc'd based on the changed
files in this CL.
"""
self.watchers = watchers
def GetBranch(self):
"""Returns the short branch name, e.g. 'master'."""
if not self.branch:
self.branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
self.branch = ShortBranchName(self.branchref)
return self.branch
def GetBranchRef(self):
"""Returns the full branch name, e.g. 'refs/heads/master'."""
self.GetBranch() # Poke the lazy loader.
return self.branchref
@staticmethod
def FetchUpstreamTuple(branch):
"""Returns a tuple containing remote and remote ref,
e.g. 'origin', 'refs/heads/master'
"""
remote = '.'
upstream_branch = RunGit(['config', 'branch.%s.merge' % branch],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'branch.%s.remote' % branch]).strip()
else:
upstream_branch = RunGit(['config', 'rietveld.upstream-branch'],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'rietveld.upstream-remote']).strip()
else:
# Fall back on trying a git-svn upstream branch.
if settings.GetIsGitSvn():
upstream_branch = settings.GetSVNBranch()
else:
# Else, try to guess the origin remote.
remote_branches = RunGit(['branch', '-r']).split()
if 'origin/master' in remote_branches:
# Fall back on origin/master if it exits.
remote = 'origin'
upstream_branch = 'refs/heads/master'
elif 'origin/trunk' in remote_branches:
# Fall back on origin/trunk if it exists. Generally a shared
# git-svn clone
remote = 'origin'
upstream_branch = 'refs/heads/trunk'
else:
DieWithError("""Unable to determine default branch to diff against.
Either pass complete "git diff"-style arguments, like
git cl upload origin/master
or verify this branch is set up to track another (via the --track argument to
"git checkout -b ...").""")
return remote, upstream_branch
def GetCommonAncestorWithUpstream(self):
return git_common.get_or_create_merge_base(self.GetBranch(),
self.GetUpstreamBranch())
def GetUpstreamBranch(self):
if self.upstream_branch is None:
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
if remote is not '.':
upstream_branch = upstream_branch.replace('heads', 'remotes/' + remote)
self.upstream_branch = upstream_branch
return self.upstream_branch
def GetRemoteBranch(self):
if not self._remote:
remote, branch = None, self.GetBranch()
seen_branches = set()
while branch not in seen_branches:
seen_branches.add(branch)
remote, branch = self.FetchUpstreamTuple(branch)
branch = ShortBranchName(branch)
if remote != '.' or branch.startswith('refs/remotes'):
break
else:
remotes = RunGit(['remote'], error_ok=True).split()
if len(remotes) == 1:
remote, = remotes
elif 'origin' in remotes:
remote = 'origin'
logging.warning('Could not determine which remote this change is '
'associated with, so defaulting to "%s". This may '
'not be what you want. You may prevent this message '
'by running "git svn info" as documented here: %s',
self._remote,
GIT_INSTRUCTIONS_URL)
else:
logging.warn('Could not determine which remote this change is '
'associated with. You may prevent this message by '
'running "git svn info" as documented here: %s',
GIT_INSTRUCTIONS_URL)
branch = 'HEAD'
if branch.startswith('refs/remotes'):
self._remote = (remote, branch)
else:
self._remote = (remote, 'refs/remotes/%s/%s' % (remote, branch))
return self._remote
def GitSanityChecks(self, upstream_git_obj):
"""Checks git repo status and ensures diff is from local commits."""
# Verify the commit we're diffing against is in our current branch.
upstream_sha = RunGit(['rev-parse', '--verify', upstream_git_obj]).strip()
common_ancestor = RunGit(['merge-base', upstream_sha, 'HEAD']).strip()
if upstream_sha != common_ancestor:
print >> sys.stderr, (
'ERROR: %s is not in the current branch. You may need to rebase '
'your tracking branch' % upstream_sha)
return False
# List the commits inside the diff, and verify they are all local.
commits_in_diff = RunGit(
['rev-list', '^%s' % upstream_sha, 'HEAD']).splitlines()
code, remote_branch = RunGitWithCode(['config', 'gitcl.remotebranch'])
remote_branch = remote_branch.strip()
if code != 0:
_, remote_branch = self.GetRemoteBranch()
commits_in_remote = RunGit(
['rev-list', '^%s' % upstream_sha, remote_branch]).splitlines()
common_commits = set(commits_in_diff) & set(commits_in_remote)
if common_commits:
print >> sys.stderr, (
'ERROR: Your diff contains %d commits already in %s.\n'
'Run "git log --oneline %s..HEAD" to get a list of commits in '
'the diff. If you are using a custom git flow, you can override'
' the reference used for this check with "git config '
'gitcl.remotebranch <git-ref>".' % (
len(common_commits), remote_branch, upstream_git_obj))
return False
return True
def GetGitBaseUrlFromConfig(self):
"""Return the configured base URL from branch.<branchname>.baseurl.
Returns None if it is not set.
"""
return RunGit(['config', 'branch.%s.base-url' % self.GetBranch()],
error_ok=True).strip()
def GetRemoteUrl(self):
"""Return the configured remote URL, e.g. 'git://example.org/foo.git/'.
Returns None if there is no remote.
"""
remote, _ = self.GetRemoteBranch()
url = RunGit(['config', 'remote.%s.url' % remote], error_ok=True).strip()
# If URL is pointing to a local directory, it is probably a git cache.
if os.path.isdir(url):
url = RunGit(['config', 'remote.%s.url' % remote],
error_ok=True,
cwd=url).strip()
return url
def GetIssue(self):
"""Returns the issue number as a int or None if not set."""
if self.issue is None and not self.lookedup_issue:
issue = RunGit(['config', self._IssueSetting()], error_ok=True).strip()
self.issue = int(issue) or None if issue else None
self.lookedup_issue = True
return self.issue
def GetRietveldServer(self):
if not self.rietveld_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
self.rietveld_server = gclient_utils.UpgradeToHttps(RunGit(
['config', self._RietveldServer()], error_ok=True).strip())
if not self.rietveld_server:
self.rietveld_server = settings.GetDefaultServerUrl()
return self.rietveld_server
def GetIssueURL(self):
"""Get the URL for a particular issue."""
if not self.GetIssue():
return None
return '%s/%s' % (self.GetRietveldServer(), self.GetIssue())
def GetDescription(self, pretty=False):
if not self.has_description:
if self.GetIssue():
issue = self.GetIssue()
try:
self.description = self.RpcServer().get_description(issue).strip()
except urllib2.HTTPError as e:
if e.code == 404:
DieWithError(
('\nWhile fetching the description for issue %d, received a '
'404 (not found)\n'
'error. It is likely that you deleted this '
'issue on the server. If this is the\n'
'case, please run\n\n'
' git cl issue 0\n\n'
'to clear the association with the deleted issue. Then run '
'this command again.') % issue)
else:
DieWithError(
'\nFailed to fetch issue description. HTTP error %d' % e.code)
except urllib2.URLError as e:
print >> sys.stderr, (
'Warning: Failed to retrieve CL description due to network '
'failure.')
self.description = ''
self.has_description = True
if pretty:
wrapper = textwrap.TextWrapper()
wrapper.initial_indent = wrapper.subsequent_indent = ' '
return wrapper.fill(self.description)
return self.description
def GetPatchset(self):
"""Returns the patchset number as a int or None if not set."""
if self.patchset is None and not self.lookedup_patchset:
patchset = RunGit(['config', self._PatchsetSetting()],
error_ok=True).strip()
self.patchset = int(patchset) or None if patchset else None
self.lookedup_patchset = True
return self.patchset
def SetPatchset(self, patchset):
"""Set this branch's patchset. If patchset=0, clears the patchset."""
if patchset:
RunGit(['config', self._PatchsetSetting(), str(patchset)])
self.patchset = patchset
else:
RunGit(['config', '--unset', self._PatchsetSetting()],
stderr=subprocess2.PIPE, error_ok=True)
self.patchset = None
def GetMostRecentPatchset(self):
return self.GetIssueProperties()['patchsets'][-1]
def GetPatchSetDiff(self, issue, patchset):
return self.RpcServer().get(
'/download/issue%s_%s.diff' % (issue, patchset))
def GetIssueProperties(self):
if self._props is None:
issue = self.GetIssue()
if not issue:
self._props = {}
else:
self._props = self.RpcServer().get_issue_properties(issue, True)
return self._props
def GetApprovingReviewers(self):
return get_approving_reviewers(self.GetIssueProperties())
def SetIssue(self, issue):
"""Set this branch's issue. If issue=0, clears the issue."""
if issue:
self.issue = issue
RunGit(['config', self._IssueSetting(), str(issue)])
if self.rietveld_server:
RunGit(['config', self._RietveldServer(), self.rietveld_server])
else:
current_issue = self.GetIssue()
if current_issue:
RunGit(['config', '--unset', self._IssueSetting()])
self.issue = None
self.SetPatchset(None)
def GetChange(self, upstream_branch, author):
if not self.GitSanityChecks(upstream_branch):
DieWithError('\nGit sanity check failure')
root = settings.GetRelativeRoot()
if not root:
root = '.'
absroot = os.path.abspath(root)
# We use the sha1 of HEAD as a name of this change.
name = RunGitWithCode(['rev-parse', 'HEAD'])[1].strip()
# Need to pass a relative path for msysgit.
try:
files = scm.GIT.CaptureStatus([root], '.', upstream_branch)
except subprocess2.CalledProcessError:
DieWithError(
('\nFailed to diff against upstream branch %s\n\n'
'This branch probably doesn\'t exist anymore. To reset the\n'
'tracking branch, please run\n'
' git branch --set-upstream %s trunk\n'
'replacing trunk with origin/master or the relevant branch') %
(upstream_branch, self.GetBranch()))
issue = self.GetIssue()
patchset = self.GetPatchset()
if issue:
description = self.GetDescription()
else:
# If the change was never uploaded, use the log messages of all commits
# up to the branch point, as git cl upload will prefill the description
# with these log messages.
args = ['log', '--pretty=format:%s%n%n%b', '%s...' % (upstream_branch)]
description = RunGitWithCode(args)[1].strip()
if not author:
author = RunGit(['config', 'user.email']).strip() or None
return presubmit_support.GitChange(
name,
description,
absroot,
files,
issue,
patchset,
author,
upstream=upstream_branch)
def RunHook(self, committing, may_prompt, verbose, change):
"""Calls sys.exit() if the hook fails; returns a HookResults otherwise."""
try:
return presubmit_support.DoPresubmitChecks(change, committing,
verbose=verbose, output_stream=sys.stdout, input_stream=sys.stdin,
default_presubmit=None, may_prompt=may_prompt,
rietveld_obj=self.RpcServer())
except presubmit_support.PresubmitFailure, e:
DieWithError(
('%s\nMaybe your depot_tools is out of date?\n'
'If all fails, contact maruel@') % e)
def UpdateDescription(self, description):
self.description = description
return self.RpcServer().update_description(
self.GetIssue(), self.description)
def CloseIssue(self):
"""Updates the description and closes the issue."""
return self.RpcServer().close_issue(self.GetIssue())
def SetFlag(self, flag, value):
"""Patchset must match."""
if not self.GetPatchset():
DieWithError('The patchset needs to match. Send another patchset.')
try:
return self.RpcServer().set_flag(
self.GetIssue(), self.GetPatchset(), flag, value)
except urllib2.HTTPError, e:
if e.code == 404:
DieWithError('The issue %s doesn\'t exist.' % self.GetIssue())
if e.code == 403:
DieWithError(
('Access denied to issue %s. Maybe the patchset %s doesn\'t '
'match?') % (self.GetIssue(), self.GetPatchset()))
raise
def RpcServer(self):
"""Returns an upload.RpcServer() to access this review's rietveld instance.
"""
if not self._rpc_server:
self._rpc_server = rietveld.CachingRietveld(
self.GetRietveldServer(), None, None)
return self._rpc_server
def _IssueSetting(self):
"""Return the git setting that stores this change's issue."""
return 'branch.%s.rietveldissue' % self.GetBranch()
def _PatchsetSetting(self):
"""Return the git setting that stores this change's most recent patchset."""
return 'branch.%s.rietveldpatchset' % self.GetBranch()
def _RietveldServer(self):
"""Returns the git setting that stores this change's rietveld server."""
return 'branch.%s.rietveldserver' % self.GetBranch()
def GetCodereviewSettingsInteractively():
"""Prompt the user for settings."""
# TODO(ukai): ask code review system is rietveld or gerrit?
server = settings.GetDefaultServerUrl(error_ok=True)
prompt = 'Rietveld server (host[:port])'
prompt += ' [%s]' % (server or DEFAULT_SERVER)
newserver = ask_for_data(prompt + ':')
if not server and not newserver:
newserver = DEFAULT_SERVER
if newserver:
newserver = gclient_utils.UpgradeToHttps(newserver)
if newserver != server:
RunGit(['config', 'rietveld.server', newserver])
def SetProperty(initial, caption, name, is_url):
prompt = caption
if initial:
prompt += ' ("x" to clear) [%s]' % initial
new_val = ask_for_data(prompt + ':')
if new_val == 'x':
RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)
elif new_val:
if is_url:
new_val = gclient_utils.UpgradeToHttps(new_val)
if new_val != initial:
RunGit(['config', 'rietveld.' + name, new_val])
SetProperty(settings.GetDefaultCCList(), 'CC list', 'cc', False)
SetProperty(settings.GetDefaultPrivateFlag(),
'Private flag (rietveld only)', 'private', False)
SetProperty(settings.GetTreeStatusUrl(error_ok=True), 'Tree status URL',
'tree-status-url', False)
SetProperty(settings.GetViewVCUrl(), 'ViewVC URL', 'viewvc-url', True)
SetProperty(settings.GetBugPrefix(), 'Bug Prefix', 'bug-prefix', False)
# TODO: configure a default branch to diff against, rather than this
# svn-based hackery.
class ChangeDescription(object):
"""Contains a parsed form of the change description."""
R_LINE = r'^[ \t]*(TBR|R)[ \t]*=[ \t]*(.*?)[ \t]*$'
BUG_LINE = r'^[ \t]*(BUG)[ \t]*=[ \t]*(.*?)[ \t]*$'
def __init__(self, description):
self._description_lines = (description or '').strip().splitlines()
@property # www.logilab.org/ticket/89786
def description(self): # pylint: disable=E0202
return '\n'.join(self._description_lines)
def set_description(self, desc):
if isinstance(desc, basestring):
lines = desc.splitlines()
else:
lines = [line.rstrip() for line in desc]
while lines and not lines[0]:
lines.pop(0)
while lines and not lines[-1]:
lines.pop(-1)
self._description_lines = lines
def update_reviewers(self, reviewers):
"""Rewrites the R=/TBR= line(s) as a single line each."""
assert isinstance(reviewers, list), reviewers
if not reviewers:
return
reviewers = reviewers[:]
# Get the set of R= and TBR= lines and remove them from the desciption.
regexp = re.compile(self.R_LINE)
matches = [regexp.match(line) for line in self._description_lines]
new_desc = [l for i, l in enumerate(self._description_lines)
if not matches[i]]
self.set_description(new_desc)
# Construct new unified R= and TBR= lines.
r_names = []
tbr_names = []
for match in matches:
if not match:
continue
people = cleanup_list([match.group(2).strip()])
if match.group(1) == 'TBR':
tbr_names.extend(people)
else:
r_names.extend(people)
for name in r_names:
if name not in reviewers:
reviewers.append(name)
new_r_line = 'R=' + ', '.join(reviewers) if reviewers else None
new_tbr_line = 'TBR=' + ', '.join(tbr_names) if tbr_names else None
# Put the new lines in the description where the old first R= line was.
line_loc = next((i for i, match in enumerate(matches) if match), -1)
if 0 <= line_loc < len(self._description_lines):
if new_tbr_line:
self._description_lines.insert(line_loc, new_tbr_line)
if new_r_line:
self._description_lines.insert(line_loc, new_r_line)
else:
if new_r_line:
self.append_footer(new_r_line)
if new_tbr_line:
self.append_footer(new_tbr_line)
def prompt(self):
"""Asks the user to update the description."""
self.set_description([
'# Enter a description of the change.',
'# This will be displayed on the codereview site.',
'# The first line will also be used as the subject of the review.',
'#--------------------This line is 72 characters long'
'--------------------',
] + self._description_lines)
regexp = re.compile(self.BUG_LINE)
if not any((regexp.match(line) for line in self._description_lines)):
self.append_footer('BUG=%s' % settings.GetBugPrefix())
content = gclient_utils.RunEditor(self.description, True,
git_editor=settings.GetGitEditor())
if not content:
DieWithError('Running editor failed')
lines = content.splitlines()
# Strip off comments.
clean_lines = [line.rstrip() for line in lines if not line.startswith('#')]
if not clean_lines:
DieWithError('No CL description, aborting')
self.set_description(clean_lines)
def append_footer(self, line):
if self._description_lines:
# Add an empty line if either the last line or the new line isn't a tag.
last_line = self._description_lines[-1]
if (not presubmit_support.Change.TAG_LINE_RE.match(last_line) or
not presubmit_support.Change.TAG_LINE_RE.match(line)):
self._description_lines.append('')
self._description_lines.append(line)
def get_reviewers(self):
"""Retrieves the list of reviewers."""
matches = [re.match(self.R_LINE, line) for line in self._description_lines]
reviewers = [match.group(2).strip() for match in matches if match]
return cleanup_list(reviewers)
def get_approving_reviewers(props):
"""Retrieves the reviewers that approved a CL from the issue properties with
messages.
Note that the list may contain reviewers that are not committer, thus are not
considered by the CQ.
"""
return sorted(
set(
message['sender']
for message in props['messages']
if message['approval'] and message['sender'] in props['reviewers']
)
)
def FindCodereviewSettingsFile(filename='codereview.settings'):
"""Finds the given file starting in the cwd and going up.
Only looks up to the top of the repository unless an
'inherit-review-settings-ok' file exists in the root of the repository.
"""
inherit_ok_file = 'inherit-review-settings-ok'
cwd = os.getcwd()
root = settings.GetRoot()
if os.path.isfile(os.path.join(root, inherit_ok_file)):
root = '/'
while True:
if filename in os.listdir(cwd):
if os.path.isfile(os.path.join(cwd, filename)):
return open(os.path.join(cwd, filename))
if cwd == root:
break
cwd = os.path.dirname(cwd)
def LoadCodereviewSettingsFromFile(fileobj):
"""Parse a codereview.settings file and updates hooks."""
keyvals = gclient_utils.ParseCodereviewSettingsContent(fileobj.read())
def SetProperty(name, setting, unset_error_ok=False):
fullname = 'rietveld.' + name
if setting in keyvals:
RunGit(['config', fullname, keyvals[setting]])
else:
RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)
SetProperty('server', 'CODE_REVIEW_SERVER')
# Only server setting is required. Other settings can be absent.
# In that case, we ignore errors raised during option deletion attempt.
SetProperty('cc', 'CC_LIST', unset_error_ok=True)
SetProperty('private', 'PRIVATE', unset_error_ok=True)
SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)
SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)
SetProperty('bug-prefix', 'BUG_PREFIX', unset_error_ok=True)
SetProperty('cpplint-regex', 'LINT_REGEX', unset_error_ok=True)
SetProperty('cpplint-ignore-regex', 'LINT_IGNORE_REGEX', unset_error_ok=True)
SetProperty('project', 'PROJECT', unset_error_ok=True)
if 'GERRIT_HOST' in keyvals:
RunGit(['config', 'gerrit.host', keyvals['GERRIT_HOST']])
if 'PUSH_URL_CONFIG' in keyvals and 'ORIGIN_URL_CONFIG' in keyvals:
#should be of the form
#PUSH_URL_CONFIG: url.ssh://gitrw.chromium.org.pushinsteadof
#ORIGIN_URL_CONFIG: http://src.chromium.org/git
RunGit(['config', keyvals['PUSH_URL_CONFIG'],
keyvals['ORIGIN_URL_CONFIG']])
def urlretrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
def hasSheBang(fname):
"""Checks fname is a #! script."""
with open(fname) as f:
return f.read(2).startswith('#!')
def DownloadHooks(force):
"""downloads hooks
Args:
force: True to update hooks. False to install hooks if not present.
"""
if not settings.GetIsGerrit():
return
src = 'https://gerrit-review.googlesource.com/tools/hooks/commit-msg'
dst = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.access(dst, os.X_OK):
if os.path.exists(dst):
if not force:
return
try:
urlretrieve(src, dst)
if not hasSheBang(dst):
DieWithError('Not a script: %s\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % (dst, src))
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception:
if os.path.exists(dst):
os.remove(dst)
DieWithError('\nFailed to download hooks.\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % src)
@subcommand.usage('[repo root containing codereview.settings]')
def CMDconfig(parser, args):
"""Edits configuration for this tree."""
parser.add_option('--activate-update', action='store_true',
help='activate auto-updating [rietveld] section in '
'.git/config')
parser.add_option('--deactivate-update', action='store_true',
help='deactivate auto-updating [rietveld] section in '
'.git/config')
options, args = parser.parse_args(args)
if options.deactivate_update:
RunGit(['config', 'rietveld.autoupdate', 'false'])
return
if options.activate_update:
RunGit(['config', '--unset', 'rietveld.autoupdate'])
return
if len(args) == 0:
GetCodereviewSettingsInteractively()
DownloadHooks(True)
return 0
url = args[0]
if not url.endswith('codereview.settings'):
url = os.path.join(url, 'codereview.settings')
# Load code review settings and download hooks (if available).
LoadCodereviewSettingsFromFile(urllib2.urlopen(url))
DownloadHooks(True)
return 0
def CMDbaseurl(parser, args):
"""Gets or sets base-url for this branch."""
branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
branch = ShortBranchName(branchref)
_, args = parser.parse_args(args)
if not args:
print("Current base-url:")
return RunGit(['config', 'branch.%s.base-url' % branch],
error_ok=False).strip()
else:
print("Setting base-url to %s" % args[0])
return RunGit(['config', 'branch.%s.base-url' % branch, args[0]],
error_ok=False).strip()
def CMDstatus(parser, args):
"""Show status of changelists.
Colors are used to tell the state of the CL unless --fast is used:
- Red not sent for review or broken
- Blue waiting for review
- Yellow waiting for you to reply to review
- Green LGTM'ed
- Magenta in the commit queue
- Cyan was committed, branch can be deleted
Also see 'git cl comments'.
"""
parser.add_option('--field',
help='print only specific field (desc|id|patch|url)')
parser.add_option('-f', '--fast', action='store_true',
help='Do not retrieve review status')
(options, args) = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % args)
if options.field:
cl = Changelist()
if options.field.startswith('desc'):
print cl.GetDescription()
elif options.field == 'id':
issueid = cl.GetIssue()
if issueid:
print issueid
elif options.field == 'patch':
patchset = cl.GetPatchset()
if patchset:
print patchset
elif options.field == 'url':
url = cl.GetIssueURL()
if url:
print url
return 0
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
print('No local branch found.')
return 0
changes = (Changelist(branchref=b) for b in branches.splitlines())
branches = [c.GetBranch() for c in changes]
alignment = max(5, max(len(b) for b in branches))
print 'Branches associated with reviews:'
# Adhoc thread pool to request data concurrently.
output = Queue.Queue()
# Silence upload.py otherwise it becomes unweldly.
upload.verbosity = 0
if not options.fast:
def fetch(b):
"""Fetches information for an issue and returns (branch, issue, color)."""
c = Changelist(branchref=b)
i = c.GetIssueURL()
props = {}
r = None
if i:
try:
props = c.GetIssueProperties()
r = c.GetApprovingReviewers() if i else None
except urllib2.HTTPError:
# The issue probably doesn't exist anymore.
i += ' (broken)'
msgs = props.get('messages') or []
if not i:
color = Fore.WHITE
elif props.get('closed'):
# Issue is closed.
color = Fore.CYAN
elif props.get('commit'):
# Issue is in the commit queue.
color = Fore.MAGENTA
elif r:
# Was LGTM'ed.
color = Fore.GREEN
elif not msgs:
# No message was sent.
color = Fore.RED
elif msgs[-1]['sender'] != props.get('owner_email'):
color = Fore.YELLOW
else:
color = Fore.BLUE
output.put((b, i, color))
# Process one branch synchronously to work through authentication, then
# spawn threads to process all the other branches in parallel.
if branches:
fetch(branches[0])
threads = [
threading.Thread(target=fetch, args=(b,)) for b in branches[1:]]
for t in threads:
t.daemon = True
t.start()
else:
# Do not use GetApprovingReviewers(), since it requires an HTTP request.
for b in branches:
c = Changelist(branchref=b)
url = c.GetIssueURL()
output.put((b, url, Fore.BLUE if url else Fore.WHITE))
tmp = {}
alignment = max(5, max(len(ShortBranchName(b)) for b in branches))
for branch in sorted(branches):
while branch not in tmp:
b, i, color = output.get()
tmp[b] = (i, color)
issue, color = tmp.pop(branch)
reset = Fore.RESET
if not sys.stdout.isatty():
color = ''
reset = ''
print ' %*s : %s%s%s' % (
alignment, ShortBranchName(branch), color, issue, reset)
cl = Changelist()
print
print 'Current branch:',
if not cl.GetIssue():
print 'no issue assigned.'
return 0
print cl.GetBranch()
print 'Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL())
if not options.fast:
print 'Issue description:'
print cl.GetDescription(pretty=True)
return 0
def colorize_CMDstatus_doc():
"""To be called once in main() to add colors to git cl status help."""
colors = [i for i in dir(Fore) if i[0].isupper()]
def colorize_line(line):
for color in colors:
if color in line.upper():
# Extract whitespaces first and the leading '-'.
indent = len(line) - len(line.lstrip(' ')) + 1
return line[:indent] + getattr(Fore, color) + line[indent:] + Fore.RESET
return line
lines = CMDstatus.__doc__.splitlines()
CMDstatus.__doc__ = '\n'.join(colorize_line(l) for l in lines)
@subcommand.usage('[issue_number]')
def CMDissue(parser, args):
"""Sets or displays the current code review issue number.
Pass issue number 0 to clear the current issue.
"""
_, args = parser.parse_args(args)
cl = Changelist()
if len(args) > 0:
try:
issue = int(args[0])
except ValueError:
DieWithError('Pass a number to set the issue or none to list it.\n'
'Maybe you want to run git cl status?')
cl.SetIssue(issue)
print 'Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL())
return 0
def CMDcomments(parser, args):
"""Shows review comments of the current changelist."""
(_, args) = parser.parse_args(args)
if args:
parser.error('Unsupported argument: %s' % args)
cl = Changelist()
if cl.GetIssue():
data = cl.GetIssueProperties()
for message in sorted(data['messages'], key=lambda x: x['date']):
if message['disapproval']:
color = Fore.RED
elif message['approval']:
color = Fore.GREEN
elif message['sender'] == data['owner_email']:
color = Fore.MAGENTA
else:
color = Fore.BLUE
print '\n%s%s %s%s' % (
color, message['date'].split('.', 1)[0], message['sender'],
Fore.RESET)
if message['text'].strip():
print '\n'.join(' ' + l for l in message['text'].splitlines())
return 0
def CMDdescription(parser, args):
"""Brings up the editor for the current CL's description."""
cl = Changelist()
if not cl.GetIssue():
DieWithError('This branch has no associated changelist.')
description = ChangeDescription(cl.GetDescription())
description.prompt()
cl.UpdateDescription(description.description)
return 0
def CreateDescriptionFromLog(args):
"""Pulls out the commit log to use as a base for the CL description."""
log_args = []
if len(args) == 1 and not args[0].endswith('.'):
log_args = [args[0] + '..']
elif len(args) == 1 and args[0].endswith('...'):
log_args = [args[0][:-1]]
elif len(args) == 2:
log_args = [args[0] + '..' + args[1]]
else:
log_args = args[:] # Hope for the best!
return RunGit(['log', '--pretty=format:%s\n\n%b'] + log_args)
def CMDlint(parser, args):
"""Runs cpplint on the current changelist."""
parser.add_option('--filter', action='append', metavar='-x,+y',
help='Comma-separated list of cpplint\'s category-filters')
(options, args) = parser.parse_args(args)
# Access to a protected member _XX of a client class
# pylint: disable=W0212
try:
import cpplint
import cpplint_chromium
except ImportError:
print "Your depot_tools is missing cpplint.py and/or cpplint_chromium.py."
return 1
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(settings.GetRoot())
try:
cl = Changelist()
change = cl.GetChange(cl.GetCommonAncestorWithUpstream(), None)
files = [f.LocalPath() for f in change.AffectedFiles()]
if not files:
print "Cannot lint an empty CL"
return 1
# Process cpplints arguments if any.
command = args + files
if options.filter:
command = ['--filter=' + ','.join(options.filter)] + command
filenames = cpplint.ParseArguments(command)
white_regex = re.compile(settings.GetLintRegex())
black_regex = re.compile(settings.GetLintIgnoreRegex())
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print "Ignoring file %s" % filename
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print "Skipping file %s" % filename
finally:
os.chdir(previous_cwd)
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
if cpplint._cpplint_state.error_count != 0:
return 1
return 0
def CMDpresubmit(parser, args):
"""Runs presubmit tests on the current changelist."""
parser.add_option('-u', '--upload', action='store_true',
help='Run upload hook instead of the push/dcommit hook')
parser.add_option('-f', '--force', action='store_true',
help='Run checks even if tree is dirty')
(options, args) = parser.parse_args(args)
if not options.force and is_dirty_git_tree('presubmit'):
print 'use --force to check even if tree is dirty.'
return 1
cl = Changelist()
if args:
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
cl.RunHook(
committing=not options.upload,
may_prompt=False,
verbose=options.verbose,
change=cl.GetChange(base_branch, None))
return 0
def AddChangeIdToCommitMessage(options, args):
"""Re-commits using the current message, assumes the commit hook is in
place.
"""
log_desc = options.message or CreateDescriptionFromLog(args)
git_command = ['commit', '--amend', '-m', log_desc]
RunGit(git_command)
new_log_desc = CreateDescriptionFromLog(args)
if CHANGE_ID in new_log_desc:
print 'git-cl: Added Change-Id to commit message.'
else:
print >> sys.stderr, 'ERROR: Gerrit commit-msg hook not available.'
def GerritUpload(options, args, cl):
"""upload the current branch to gerrit."""
# We assume the remote called "origin" is the one we want.
# It is probably not worthwhile to support different workflows.
remote = 'origin'
branch = 'master'
if options.target_branch:
branch = options.target_branch
change_desc = ChangeDescription(
options.message or CreateDescriptionFromLog(args))
if not change_desc.description:
print "Description is empty; aborting."
return 1
if CHANGE_ID not in change_desc.description:
AddChangeIdToCommitMessage(options, args)
commits = RunGit(['rev-list', '%s/%s..' % (remote, branch)]).splitlines()
if len(commits) > 1:
print('WARNING: This will upload %d commits. Run the following command '
'to see which commits will be uploaded: ' % len(commits))
print('git log %s/%s..' % (remote, branch))
print('You can also use `git squash-branch` to squash these into a single'
'commit.')
ask_for_data('About to upload; enter to confirm.')
if options.reviewers:
change_desc.update_reviewers(options.reviewers)
receive_options = []
cc = cl.GetCCList().split(',')
if options.cc:
cc.extend(options.cc)
cc = filter(None, cc)
if cc:
receive_options += ['--cc=' + email for email in cc]
if change_desc.get_reviewers():
receive_options.extend(
'--reviewer=' + email for email in change_desc.get_reviewers())
git_command = ['push']
if receive_options:
git_command.append('--receive-pack=git receive-pack %s' %
' '.join(receive_options))
git_command += [remote, 'HEAD:refs/for/' + branch]
RunGit(git_command)
# TODO(ukai): parse Change-Id: and set issue number?
return 0
def RietveldUpload(options, args, cl, change):
"""upload the patch to rietveld."""
upload_args = ['--assume_yes'] # Don't ask about untracked files.
upload_args.extend(['--server', cl.GetRietveldServer()])
if options.emulate_svn_auto_props:
upload_args.append('--emulate_svn_auto_props')
change_desc = None
if options.email is not None:
upload_args.extend(['--email', options.email])
if cl.GetIssue():
if options.title:
upload_args.extend(['--title', options.title])
if options.message:
upload_args.extend(['--message', options.message])
upload_args.extend(['--issue', str(cl.GetIssue())])
print ("This branch is associated with issue %s. "
"Adding patch to that issue." % cl.GetIssue())
else:
if options.title:
upload_args.extend(['--title', options.title])
message = options.title or options.message or CreateDescriptionFromLog(args)
change_desc = ChangeDescription(message)
if options.reviewers:
change_desc.update_reviewers(options.reviewers)
if options.auto_bots:
masters = presubmit_support.DoGetTryMasters(
change,
change.LocalPaths(),
settings.GetRoot(),
None,
None,
options.verbose,
sys.stdout)
if masters:
change_description = change_desc.description + '\nCQ_TRYBOTS='
lst = []
for master, mapping in masters.iteritems():
lst.append(master + ':' + ','.join(mapping.keys()))
change_desc.set_description(change_description + ';'.join(lst))
if not options.force:
change_desc.prompt()
if not change_desc.description:
print "Description is empty; aborting."
return 1
upload_args.extend(['--message', change_desc.description])
if change_desc.get_reviewers():
upload_args.append('--reviewers=' + ','.join(change_desc.get_reviewers()))
if options.send_mail:
if not change_desc.get_reviewers():
DieWithError("Must specify reviewers to send email.")
upload_args.append('--send_mail')
# We check this before applying rietveld.private assuming that in
# rietveld.cc only addresses which we can send private CLs to are listed
# if rietveld.private is set, and so we should ignore rietveld.cc only when
# --private is specified explicitly on the command line.
if options.private:
logging.warn('rietveld.cc is ignored since private flag is specified. '
'You need to review and add them manually if necessary.')
cc = cl.GetCCListWithoutDefault()
else:
cc = cl.GetCCList()
cc = ','.join(filter(None, (cc, ','.join(options.cc))))
if cc:
upload_args.extend(['--cc', cc])
if options.private or settings.GetDefaultPrivateFlag() == "True":
upload_args.append('--private')
upload_args.extend(['--git_similarity', str(options.similarity)])
if not options.find_copies:
upload_args.extend(['--git_no_find_copies'])
# Include the upstream repo's URL in the change -- this is useful for
# projects that have their source spread across multiple repos.
remote_url = cl.GetGitBaseUrlFromConfig()
if not remote_url:
if settings.GetIsGitSvn():
# URL is dependent on the current directory.
data = RunGit(['svn', 'info'], cwd=settings.GetRoot())
if data:
keys = dict(line.split(': ', 1) for line in data.splitlines()
if ': ' in line)
remote_url = keys.get('URL', None)
else:
if cl.GetRemoteUrl() and '/' in cl.GetUpstreamBranch():
remote_url = (cl.GetRemoteUrl() + '@'
+ cl.GetUpstreamBranch().split('/')[-1])
if remote_url:
upload_args.extend(['--base_url', remote_url])
project = settings.GetProject()
if project:
upload_args.extend(['--project', project])
try:
upload_args = ['upload'] + upload_args + args
logging.info('upload.RealMain(%s)', upload_args)
issue, patchset = upload.RealMain(upload_args)
issue = int(issue)
patchset = int(patchset)
except KeyboardInterrupt:
sys.exit(1)
except:
# If we got an exception after the user typed a description for their
# change, back up the description before re-raising.
if change_desc:
backup_path = os.path.expanduser(DESCRIPTION_BACKUP_FILE)
print '\nGot exception while uploading -- saving description to %s\n' \
% backup_path
backup_file = open(backup_path, 'w')
backup_file.write(change_desc.description)
backup_file.close()
raise
if not cl.GetIssue():
cl.SetIssue(issue)
cl.SetPatchset(patchset)
if options.use_commit_queue:
cl.SetFlag('commit', '1')
return 0
def cleanup_list(l):
"""Fixes a list so that comma separated items are put as individual items.
So that "--reviewers joe@c,john@c --reviewers joa@c" results in
options.reviewers == sorted(['joe@c', 'john@c', 'joa@c']).
"""
items = sum((i.split(',') for i in l), [])
stripped_items = (i.strip() for i in items)
return sorted(filter(None, stripped_items))
@subcommand.usage('[args to "git diff"]')
def CMDupload(parser, args):
"""Uploads the current changelist to codereview."""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('--bypass-watchlists', action='store_true',
dest='bypass_watchlists',
help='bypass watchlists auto CC-ing reviewers')
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-m', dest='message', help='message for patchset')
parser.add_option('-t', dest='title', help='title for patchset')
parser.add_option('-r', '--reviewers',
action='append', default=[],
help='reviewer email addresses')
parser.add_option('--cc',
action='append', default=[],
help='cc email addresses')
parser.add_option('-s', '--send-mail', action='store_true',
help='send email to reviewer immediately')
parser.add_option('--emulate_svn_auto_props',
'--emulate-svn-auto-props',
action="store_true",
dest="emulate_svn_auto_props",
help="Emulate Subversion's auto properties feature.")
parser.add_option('-c', '--use-commit-queue', action='store_true',
help='tell the commit queue to commit this patchset')
parser.add_option('--private', action='store_true',
help='set the review private (rietveld only)')
parser.add_option('--target_branch',
'--target-branch',
help='When uploading to gerrit, remote branch to '
'use for CL. Default: master')
parser.add_option('--email', default=None,
help='email address to use to connect to Rietveld')
parser.add_option('--auto-bots', default=False, action='store_true',
help='Autogenerate which trybots to use for this CL')
add_git_similarity(parser)
(options, args) = parser.parse_args(args)
if options.target_branch and not settings.GetIsGerrit():
parser.error('Use --target_branch for non gerrit repository.')
if is_dirty_git_tree('upload'):
return 1
options.reviewers = cleanup_list(options.reviewers)
options.cc = cleanup_list(options.cc)
cl = Changelist()
if args:
# TODO(ukai): is it ok for gerrit case?
base_branch = args[0]
else:
# Default to diffing against common ancestor of upstream branch
base_branch = cl.GetCommonAncestorWithUpstream()
args = [base_branch, 'HEAD']
# Apply watchlists on upload.
change = cl.GetChange(base_branch, None)
watchlist = watchlists.Watchlists(change.RepositoryRoot())
files = [f.LocalPath() for f in change.AffectedFiles()]
if not options.bypass_watchlists:
cl.SetWatchers(watchlist.GetWatchersForPaths(files))
if not options.bypass_hooks:
if options.reviewers:
# Set the reviewer list now so that presubmit checks can access it.
change_description = ChangeDescription(change.FullDescriptionText())
change_description.update_reviewers(options.reviewers)
change.SetDescriptionText(change_description.description)
hook_results = cl.RunHook(committing=False,
may_prompt=not options.force,
verbose=options.verbose,
change=change)
if not hook_results.should_continue():
return 1
if not options.reviewers and hook_results.reviewers:
options.reviewers = hook_results.reviewers.split(',')
if cl.GetIssue():
latest_patchset = cl.GetMostRecentPatchset()
local_patchset = cl.GetPatchset()
if latest_patchset and local_patchset and local_patchset != latest_patchset:
print ('The last upload made from this repository was patchset #%d but '
'the most recent patchset on the server is #%d.'
% (local_patchset, latest_patchset))
print ('Uploading will still work, but if you\'ve uploaded to this issue '
'from another machine or branch the patch you\'re uploading now '
'might not include those changes.')
ask_for_data('About to upload; enter to confirm.')
print_stats(options.similarity, options.find_copies, args)
if settings.GetIsGerrit():
return GerritUpload(options, args, cl)
ret = RietveldUpload(options, args, cl, change)
if not ret:
git_set_branch_value('last-upload-hash',
RunGit(['rev-parse', 'HEAD']).strip())
return ret
def IsSubmoduleMergeCommit(ref):
# When submodules are added to the repo, we expect there to be a single
# non-git-svn merge commit at remote HEAD with a signature comment.
pattern = '^SVN changes up to revision [0-9]*$'
cmd = ['rev-list', '--merges', '--grep=%s' % pattern, '%s^!' % ref]
return RunGit(cmd) != ''
def SendUpstream(parser, args, cmd):
"""Common code for CMDland and CmdDCommit
Squashes branch into a single commit.
Updates changelog with metadata (e.g. pointer to review).
Pushes/dcommits the code upstream.
Updates review and closes.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-m', dest='message',
help="override review description")
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-c', dest='contributor',
help="external contributor for patch (appended to " +
"description and used as author for git). Should be " +
"formatted as 'First Last <email@example.com>'")
add_git_similarity(parser)
(options, args) = parser.parse_args(args)
cl = Changelist()
current = cl.GetBranch()
remote, upstream_branch = cl.FetchUpstreamTuple(cl.GetBranch())
if not settings.GetIsGitSvn() and remote == '.':
print
print 'Attempting to push branch %r into another local branch!' % current
print
print 'Either reparent this branch on top of origin/master:'
print ' git reparent-branch --root'
print
print 'OR run `git rebase-update` if you think the parent branch is already'
print 'committed.'
print
print ' Current parent: %r' % upstream_branch
return 1
if not args or cmd == 'push':
# Default to merging against our best guess of the upstream branch.
args = [cl.GetUpstreamBranch()]
if options.contributor:
if not re.match('^.*\s<\S+@\S+>$', options.contributor):
print "Please provide contibutor as 'First Last <email@example.com>'"
return 1
base_branch = args[0]
base_has_submodules = IsSubmoduleMergeCommit(base_branch)
if is_dirty_git_tree(cmd):
return 1
# This rev-list syntax means "show all commits not in my branch that
# are in base_branch".
upstream_commits = RunGit(['rev-list', '^' + cl.GetBranchRef(),
base_branch]).splitlines()
if upstream_commits:
print ('Base branch "%s" has %d commits '
'not in this branch.' % (base_branch, len(upstream_commits)))
print 'Run "git merge %s" before attempting to %s.' % (base_branch, cmd)
return 1
# This is the revision `svn dcommit` will commit on top of.
svn_head = RunGit(['log', '--grep=^git-svn-id:', '-1',
'--pretty=format:%H'])
if cmd == 'dcommit':
# If the base_head is a submodule merge commit, the first parent of the
# base_head should be a git-svn commit, which is what we're interested in.
base_svn_head = base_branch
if base_has_submodules:
base_svn_head += '^1'
extra_commits = RunGit(['rev-list', '^' + svn_head, base_svn_head])
if extra_commits:
print ('This branch has %d additional commits not upstreamed yet.'
% len(extra_commits.splitlines()))
print ('Upstream "%s" or rebase this branch on top of the upstream trunk '
'before attempting to %s.' % (base_branch, cmd))
return 1
base_branch = RunGit(['merge-base', base_branch, 'HEAD']).strip()
if not options.bypass_hooks:
author = None
if options.contributor:
author = re.search(r'\<(.*)\>', options.contributor).group(1)
hook_results = cl.RunHook(
committing=True,
may_prompt=not options.force,
verbose=options.verbose,
change=cl.GetChange(base_branch, author))
if not hook_results.should_continue():
return 1
if cmd == 'dcommit':
# Check the tree status if the tree status URL is set.
status = GetTreeStatus()
if 'closed' == status:
print('The tree is closed. Please wait for it to reopen. Use '
'"git cl dcommit --bypass-hooks" to commit on a closed tree.')
return 1
elif 'unknown' == status:
print('Unable to determine tree status. Please verify manually and '
'use "git cl dcommit --bypass-hooks" to commit on a closed tree.')
else:
breakpad.SendStack(
'GitClHooksBypassedCommit',
'Issue %s/%s bypassed hook when committing (tree status was "%s")' %
(cl.GetRietveldServer(), cl.GetIssue(), GetTreeStatus()),
verbose=False)
change_desc = ChangeDescription(options.message)
if not change_desc.description and cl.GetIssue():
change_desc = ChangeDescription(cl.GetDescription())
if not change_desc.description:
if not cl.GetIssue() and options.bypass_hooks:
change_desc = ChangeDescription(CreateDescriptionFromLog([base_branch]))
else:
print 'No description set.'
print 'Visit %s/edit to set it.' % (cl.GetIssueURL())
return 1
# Keep a separate copy for the commit message, because the commit message
# contains the link to the Rietveld issue, while the Rietveld message contains
# the commit viewvc url.
# Keep a separate copy for the commit message.
if cl.GetIssue():
change_desc.update_reviewers(cl.GetApprovingReviewers())
commit_desc = ChangeDescription(change_desc.description)
if cl.GetIssue():
commit_desc.append_footer('Review URL: %s' % cl.GetIssueURL())
if options.contributor:
commit_desc.append_footer('Patch from %s.' % options.contributor)
print('Description:')
print(commit_desc.description)
branches = [base_branch, cl.GetBranchRef()]
if not options.force:
print_stats(options.similarity, options.find_copies, branches)
# We want to squash all this branch's commits into one commit with the proper
# description. We do this by doing a "reset --soft" to the base branch (which
# keeps the working copy the same), then dcommitting that. If origin/master
# has a submodule merge commit, we'll also need to cherry-pick the squashed
# commit onto a branch based on the git-svn head.
MERGE_BRANCH = 'git-cl-commit'
CHERRY_PICK_BRANCH = 'git-cl-cherry-pick'
# Delete the branches if they exist.
for branch in [MERGE_BRANCH, CHERRY_PICK_BRANCH]:
showref_cmd = ['show-ref', '--quiet', '--verify', 'refs/heads/%s' % branch]
result = RunGitWithCode(showref_cmd)
if result[0] == 0:
RunGit(['branch', '-D', branch])
# We might be in a directory that's present in this branch but not in the
# trunk. Move up to the top of the tree so that git commands that expect a
# valid CWD won't fail after we check out the merge branch.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Stuff our change into the merge branch.
# We wrap in a try...finally block so if anything goes wrong,
# we clean up the branches.
retcode = -1
try:
RunGit(['checkout', '-q', '-b', MERGE_BRANCH])
RunGit(['reset', '--soft', base_branch])
if options.contributor:
RunGit(
[
'commit', '--author', options.contributor,
'-m', commit_desc.description,
])
else:
RunGit(['commit', '-m', commit_desc.description])
if base_has_submodules:
cherry_pick_commit = RunGit(['rev-list', 'HEAD^!']).rstrip()
RunGit(['branch', CHERRY_PICK_BRANCH, svn_head])
RunGit(['checkout', CHERRY_PICK_BRANCH])
RunGit(['cherry-pick', cherry_pick_commit])
if cmd == 'push':
# push the merge branch.
remote, branch = cl.FetchUpstreamTuple(cl.GetBranch())
retcode, output = RunGitWithCode(
['push', '--porcelain', remote, 'HEAD:%s' % branch])
logging.debug(output)
else:
# dcommit the merge branch.
retcode, output = RunGitWithCode(['svn', 'dcommit',
'-C%s' % options.similarity,
'--no-rebase', '--rmdir'])
finally:
# And then swap back to the original branch and clean up.
RunGit(['checkout', '-q', cl.GetBranch()])
RunGit(['branch', '-D', MERGE_BRANCH])
if base_has_submodules:
RunGit(['branch', '-D', CHERRY_PICK_BRANCH])
if cl.GetIssue():
if cmd == 'dcommit' and 'Committed r' in output:
revision = re.match('.*?\nCommitted r(\\d+)', output, re.DOTALL).group(1)
elif cmd == 'push' and retcode == 0:
match = (re.match(r'.*?([a-f0-9]{7,})\.\.([a-f0-9]{7,})$', l)
for l in output.splitlines(False))
match = filter(None, match)
if len(match) != 1:
DieWithError("Couldn't parse ouput to extract the committed hash:\n%s" %
output)
revision = match[0].group(2)
else:
return 1
viewvc_url = settings.GetViewVCUrl()
if viewvc_url and revision:
change_desc.append_footer('Committed: ' + viewvc_url + revision)
elif revision:
change_desc.append_footer('Committed: ' + revision)
print ('Closing issue '
'(you may be prompted for your codereview password)...')
cl.UpdateDescription(change_desc.description)
cl.CloseIssue()
props = cl.GetIssueProperties()
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d manually as %s" % (patch_num, revision)
if options.bypass_hooks:
comment += ' (tree was closed).' if GetTreeStatus() == 'closed' else '.'
else:
comment += ' (presubmit successful).'
cl.RpcServer().add_comment(cl.GetIssue(), comment)
cl.SetIssue(None)
if retcode == 0:
hook = POSTUPSTREAM_HOOK_PATTERN % cmd
if os.path.isfile(hook):
RunCommand([hook, base_branch], error_ok=True)
return 0
@subcommand.usage('[upstream branch to apply against]')
def CMDdcommit(parser, args):
"""Commits the current changelist via git-svn."""
if not settings.GetIsGitSvn():
message = """This doesn't appear to be an SVN repository.
If your project has a git mirror with an upstream SVN master, you probably need
to run 'git svn init', see your project's git mirror documentation.
If your project has a true writeable upstream repository, you probably want
to run 'git cl land' instead.
Choose wisely, if you get this wrong, your commit might appear to succeed but
will instead be silently ignored."""
print(message)
ask_for_data('[Press enter to dcommit or ctrl-C to quit]')
return SendUpstream(parser, args, 'dcommit')
@subcommand.usage('[upstream branch to apply against]')
def CMDland(parser, args):
"""Commits the current changelist via git."""
if settings.GetIsGitSvn():
print('This appears to be an SVN repository.')
print('Are you sure you didn\'t mean \'git cl dcommit\'?')
ask_for_data('[Press enter to push or ctrl-C to quit]')
return SendUpstream(parser, args, 'push')
@subcommand.usage('<patch url or issue id>')
def CMDpatch(parser, args):
"""Patches in a code review."""
parser.add_option('-b', dest='newbranch',
help='create a new branch off trunk for the patch')
parser.add_option('-f', '--force', action='store_true',
help='with -b, clobber any existing branch')
parser.add_option('-d', '--directory', action='store', metavar='DIR',
help='Change to the directory DIR immediately, '
'before doing anything else.')
parser.add_option('--reject', action='store_true',
help='failed patches spew .rej files rather than '
'attempting a 3-way merge')
parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
help="don't commit after patch applies")
(options, args) = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
issue_arg = args[0]
# TODO(maruel): Use apply_issue.py
# TODO(ukai): use gerrit-cherry-pick for gerrit repository?
if options.newbranch:
if options.force:
RunGit(['branch', '-D', options.newbranch],
stderr=subprocess2.PIPE, error_ok=True)
RunGit(['checkout', '-b', options.newbranch,
Changelist().GetUpstreamBranch()])
return PatchIssue(issue_arg, options.reject, options.nocommit,
options.directory)
def PatchIssue(issue_arg, reject, nocommit, directory):
if type(issue_arg) is int or issue_arg.isdigit():
# Input is an issue id. Figure out the URL.
issue = int(issue_arg)
cl = Changelist(issue=issue)
patchset = cl.GetMostRecentPatchset()
patch_data = cl.GetPatchSetDiff(issue, patchset)
else:
# Assume it's a URL to the patch. Default to https.
issue_url = gclient_utils.UpgradeToHttps(issue_arg)
match = re.match(r'.*?/issue(\d+)_(\d+).diff', issue_url)
if not match:
DieWithError('Must pass an issue ID or full URL for '
'\'Download raw patch set\'')
issue = int(match.group(1))
patchset = int(match.group(2))
patch_data = urllib2.urlopen(issue_arg).read()
# Switch up to the top-level directory, if necessary, in preparation for
# applying the patch.
top = settings.GetRelativeRoot()
if top:
os.chdir(top)
# Git patches have a/ at the beginning of source paths. We strip that out
# with a sed script rather than the -p flag to patch so we can feed either
# Git or svn-style patches into the same apply command.
# re.sub() should be used but flags=re.MULTILINE is only in python 2.7.
try:
patch_data = subprocess2.check_output(
['sed', '-e', 's|^--- a/|--- |; s|^+++ b/|+++ |'], stdin=patch_data)
except subprocess2.CalledProcessError:
DieWithError('Git patch mungling failed.')
logging.info(patch_data)
# We use "git apply" to apply the patch instead of "patch" so that we can
# pick up file adds.
# The --index flag means: also insert into the index (so we catch adds).
cmd = ['git', 'apply', '--index', '-p0']
if directory:
cmd.extend(('--directory', directory))
if reject:
cmd.append('--reject')
elif IsGitVersionAtLeast('1.7.12'):
cmd.append('--3way')
try:
subprocess2.check_call(cmd, env=GetNoGitPagerEnv(),
stdin=patch_data, stdout=subprocess2.VOID)
except subprocess2.CalledProcessError:
DieWithError('Failed to apply the patch')
# If we had an issue, commit the current state and register the issue.
if not nocommit:
RunGit(['commit', '-m', 'patch from issue %s' % issue])
cl = Changelist()
cl.SetIssue(issue)
cl.SetPatchset(patchset)
print "Committed patch locally."
else:
print "Patch applied to index."
return 0
def CMDrebase(parser, args):
"""Rebases current branch on top of svn repo."""
# Provide a wrapper for git svn rebase to help avoid accidental
# git svn dcommit.
# It's the only command that doesn't use parser at all since we just defer
# execution to git-svn.
return RunGitWithCode(['svn', 'rebase'] + args)[1]
def GetTreeStatus(url=None):
"""Fetches the tree status and returns either 'open', 'closed',
'unknown' or 'unset'."""
url = url or settings.GetTreeStatusUrl(error_ok=True)
if url:
status = urllib2.urlopen(url).read().lower()
if status.find('closed') != -1 or status == '0':
return 'closed'
elif status.find('open') != -1 or status == '1':
return 'open'
return 'unknown'
return 'unset'
def GetTreeStatusReason():
"""Fetches the tree status from a json url and returns the message
with the reason for the tree to be opened or closed."""
url = settings.GetTreeStatusUrl()
json_url = urlparse.urljoin(url, '/current?format=json')
connection = urllib2.urlopen(json_url)
status = json.loads(connection.read())
connection.close()
return status['message']
def GetBuilderMaster(bot_list):
"""For a given builder, fetch the master from AE if available."""
map_url = 'https://builders-map.appspot.com/'
try:
master_map = json.load(urllib2.urlopen(map_url))
except urllib2.URLError as e:
return None, ('Failed to fetch builder-to-master map from %s. Error: %s.' %
(map_url, e))
except ValueError as e:
return None, ('Invalid json string from %s. Error: %s.' % (map_url, e))
if not master_map:
return None, 'Failed to build master map.'
result_master = ''
for bot in bot_list:
builder = bot.split(':', 1)[0]
master_list = master_map.get(builder, [])
if not master_list:
return None, ('No matching master for builder %s.' % builder)
elif len(master_list) > 1:
return None, ('The builder name %s exists in multiple masters %s.' %
(builder, master_list))
else:
cur_master = master_list[0]
if not result_master:
result_master = cur_master
elif result_master != cur_master:
return None, 'The builders do not belong to the same master.'
return result_master, None
def CMDtree(parser, args):
"""Shows the status of the tree."""
_, args = parser.parse_args(args)
status = GetTreeStatus()
if 'unset' == status:
print 'You must configure your tree status URL by running "git cl config".'
return 2
print "The tree is %s" % status
print
print GetTreeStatusReason()
if status != 'open':
return 1
return 0
def CMDtry(parser, args):
"""Triggers a try job through Rietveld."""
group = optparse.OptionGroup(parser, "Try job options")
group.add_option(
"-b", "--bot", action="append",
help=("IMPORTANT: specify ONE builder per --bot flag. Use it multiple "
"times to specify multiple builders. ex: "
"'-b win_rel:ui_tests,webkit_unit_tests -b win_layout'. See "
"the try server waterfall for the builders name and the tests "
"available. Can also be used to specify gtest_filter, e.g. "
"-b win_rel:base_unittests:ValuesTest.*Value"))
group.add_option(
"-m", "--master", default='',
help=("Specify a try master where to run the tries."))
group.add_option(
"-r", "--revision",
help="Revision to use for the try job; default: the "
"revision will be determined by the try server; see "
"its waterfall for more info")
group.add_option(
"-c", "--clobber", action="store_true", default=False,
help="Force a clobber before building; e.g. don't do an "
"incremental build")
group.add_option(
"--project",
help="Override which project to use. Projects are defined "
"server-side to define what default bot set to use")
group.add_option(
"-t", "--testfilter", action="append", default=[],
help=("Apply a testfilter to all the selected builders. Unless the "
"builders configurations are similar, use multiple "
"--bot <builder>:<test> arguments."))
group.add_option(
"-n", "--name", help="Try job name; default to current branch name")
parser.add_option_group(group)
options, args = parser.parse_args(args)
if args:
parser.error('Unknown arguments: %s' % args)
cl = Changelist()
if not cl.GetIssue():
parser.error('Need to upload first')
props = cl.GetIssueProperties()
if props.get('private'):
parser.error('Cannot use trybots with private issue')
if not options.name:
options.name = cl.GetBranch()
if options.bot and not options.master:
options.master, err_msg = GetBuilderMaster(options.bot)
if err_msg:
parser.error('Tryserver master cannot be found because: %s\n'
'Please manually specify the tryserver master'
', e.g. "-m tryserver.chromium.linux".' % err_msg)
def GetMasterMap():
# Process --bot and --testfilter.
if not options.bot:
change = cl.GetChange(cl.GetCommonAncestorWithUpstream(), None)
# Get try masters from PRESUBMIT.py files.
masters = presubmit_support.DoGetTryMasters(
change,
change.LocalPaths(),
settings.GetRoot(),
None,
None,
options.verbose,
sys.stdout)
if masters:
return masters
# Fall back to deprecated method: get try slaves from PRESUBMIT.py files.
options.bot = presubmit_support.DoGetTrySlaves(
change,
change.LocalPaths(),
settings.GetRoot(),
None,
None,
options.verbose,
sys.stdout)
if not options.bot:
parser.error('No default try builder to try, use --bot')
builders_and_tests = {}
# TODO(machenbach): The old style command-line options don't support
# multiple try masters yet.
old_style = filter(lambda x: isinstance(x, basestring), options.bot)
new_style = filter(lambda x: isinstance(x, tuple), options.bot)
for bot in old_style:
if ':' in bot:
builder, tests = bot.split(':', 1)
builders_and_tests.setdefault(builder, []).extend(tests.split(','))
elif ',' in bot:
parser.error('Specify one bot per --bot flag')
else:
builders_and_tests.setdefault(bot, []).append('defaulttests')
for bot, tests in new_style:
builders_and_tests.setdefault(bot, []).extend(tests)
# Return a master map with one master to be backwards compatible. The
# master name defaults to an empty string, which will cause the master
# not to be set on rietveld (deprecated).
return {options.master: builders_and_tests}
masters = GetMasterMap()
if options.testfilter:
forced_tests = sum((t.split(',') for t in options.testfilter), [])
masters = dict((master, dict(
(b, forced_tests) for b, t in slaves.iteritems()
if t != ['compile'])) for master, slaves in masters.iteritems())
for builders in masters.itervalues():
if any('triggered' in b for b in builders):
print >> sys.stderr, (
'ERROR You are trying to send a job to a triggered bot. This type of'
' bot requires an\ninitial job from a parent (usually a builder). '
'Instead send your job to the parent.\n'
'Bot list: %s' % builders)
return 1
patchset = cl.GetMostRecentPatchset()
if patchset and patchset != cl.GetPatchset():
print(
'\nWARNING Mismatch between local config and server. Did a previous '
'upload fail?\ngit-cl try always uses latest patchset from rietveld. '
'Continuing using\npatchset %s.\n' % patchset)
try:
cl.RpcServer().trigger_distributed_try_jobs(
cl.GetIssue(), patchset, options.name, options.clobber,
options.revision, masters)
except urllib2.HTTPError, e:
if e.code == 404:
print('404 from rietveld; '
'did you mean to use "git try" instead of "git cl try"?')
return 1
print('Tried jobs on:')
for (master, builders) in masters.iteritems():
if master:
print 'Master: %s' % master
length = max(len(builder) for builder in builders)
for builder in sorted(builders):
print ' %*s: %s' % (length, builder, ','.join(builders[builder]))
return 0
@subcommand.usage('[new upstream branch]')
def CMDupstream(parser, args):
"""Prints or sets the name of the upstream branch, if any."""
_, args = parser.parse_args(args)
if len(args) > 1:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
if args:
# One arg means set upstream branch.
branch = cl.GetBranch()
RunGit(['branch', '--set-upstream', branch, args[0]])
cl = Changelist()
print "Upstream branch set to " + cl.GetUpstreamBranch()
# Clear configured merge-base, if there is one.
git_common.remove_merge_base(branch)
else:
print cl.GetUpstreamBranch()
return 0
def CMDweb(parser, args):
"""Opens the current CL in the web browser."""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
issue_url = Changelist().GetIssueURL()
if not issue_url:
print >> sys.stderr, 'ERROR No issue to open'
return 1
webbrowser.open(issue_url)
return 0
def CMDset_commit(parser, args):
"""Sets the commit bit to trigger the Commit Queue."""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
props = cl.GetIssueProperties()
if props.get('private'):
parser.error('Cannot set commit on private issue')
cl.SetFlag('commit', '1')
return 0
def CMDset_close(parser, args):
"""Closes the issue."""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
# Ensure there actually is an issue to close.
cl.GetDescription()
cl.CloseIssue()
return 0
def CMDdiff(parser, args):
"""shows differences between local tree and last upload."""
cl = Changelist()
issue = cl.GetIssue()
branch = cl.GetBranch()
if not issue:
DieWithError('No issue found for current branch (%s)' % branch)
TMP_BRANCH = 'git-cl-diff'
base_branch = cl.GetCommonAncestorWithUpstream()
# Create a new branch based on the merge-base
RunGit(['checkout', '-q', '-b', TMP_BRANCH, base_branch])
try:
# Patch in the latest changes from rietveld.
rtn = PatchIssue(issue, False, False, None)
if rtn != 0:
return rtn
# Switch back to starting brand and diff against the temporary
# branch containing the latest rietveld patch.
subprocess2.check_call(['git', 'diff', TMP_BRANCH, branch])
finally:
RunGit(['checkout', '-q', branch])
RunGit(['branch', '-D', TMP_BRANCH])
return 0
def CMDowners(parser, args):
"""interactively find the owners for reviewing"""
parser.add_option(
'--no-color',
action='store_true',
help='Use this option to disable color output')
options, args = parser.parse_args(args)
author = RunGit(['config', 'user.email']).strip() or None
cl = Changelist()
if args:
if len(args) > 1:
parser.error('Unknown args')
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
change = cl.GetChange(base_branch, None)
return owners_finder.OwnersFinder(
[f.LocalPath() for f in
cl.GetChange(base_branch, None).AffectedFiles()],
change.RepositoryRoot(), author,
fopen=file, os_path=os.path, glob=glob.glob,
disable_color=options.no_color).run()
@subcommand.usage('[files or directories to diff]')
def CMDformat(parser, args):
"""Runs clang-format on the diff."""
CLANG_EXTS = ['.cc', '.cpp', '.h', '.mm', '.proto']
parser.add_option('--full', action='store_true',
help='Reformat the full content of all touched files')
parser.add_option('--dry-run', action='store_true',
help='Don\'t modify any file on disk.')
parser.add_option('--diff', action='store_true',
help='Print diff to stdout rather than modifying files.')
opts, args = parser.parse_args(args)
# git diff generates paths against the root of the repository. Change
# to that directory so clang-format can find files even within subdirs.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Generate diff for the current branch's changes.
diff_cmd = ['diff', '--no-ext-diff', '--no-prefix']
if opts.full:
# Only list the names of modified files.
diff_cmd.append('--name-only')
else:
# Only generate context-less patches.
diff_cmd.append('-U0')
# Grab the merge-base commit, i.e. the upstream commit of the current
# branch when it was created or the last time it was rebased. This is
# to cover the case where the user may have called "git fetch origin",
# moving the origin branch to a newer commit, but hasn't rebased yet.
upstream_commit = None
cl = Changelist()
upstream_branch = cl.GetUpstreamBranch()
if upstream_branch:
upstream_commit = RunGit(['merge-base', 'HEAD', upstream_branch])
upstream_commit = upstream_commit.strip()
if not upstream_commit:
DieWithError('Could not find base commit for this branch. '
'Are you in detached state?')
diff_cmd.append(upstream_commit)
# Handle source file filtering.
diff_cmd.append('--')
if args:
for arg in args:
if os.path.isdir(arg):
diff_cmd += [os.path.join(arg, '*' + ext) for ext in CLANG_EXTS]
elif os.path.isfile(arg):
diff_cmd.append(arg)
else:
DieWithError('Argument "%s" is not a file or a directory' % arg)
else:
diff_cmd += ['*' + ext for ext in CLANG_EXTS]
diff_output = RunGit(diff_cmd)
top_dir = os.path.normpath(
RunGit(["rev-parse", "--show-toplevel"]).rstrip('\n'))
# Locate the clang-format binary in the checkout
try:
clang_format_tool = clang_format.FindClangFormatToolInChromiumTree()
except clang_format.NotFoundError, e:
DieWithError(e)
if opts.full:
# diff_output is a list of files to send to clang-format.
files = diff_output.splitlines()
if not files:
print "Nothing to format."
return 0
cmd = [clang_format_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
env = os.environ.copy()
env['PATH'] = os.path.dirname(clang_format_tool)
# diff_output is a patch to send to clang-format-diff.py
try:
script = clang_format.FindClangFormatScriptInChromiumTree(
'clang-format-diff.py')
except clang_format.NotFoundError, e:
DieWithError(e)
cmd = [sys.executable, script, '-p0']
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd, stdin=diff_output, cwd=top_dir, env=env)
if opts.diff:
sys.stdout.write(stdout)
if opts.dry_run and len(stdout) > 0:
return 2
return 0
class OptionParser(optparse.OptionParser):
"""Creates the option parse and add --verbose support."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(
self, *args, prog='git cl', version=__version__, **kwargs)
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Use 2 times for more debugging info')
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)])
return options, args
def main(argv):
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
# Reload settings.
global settings
settings = Settings()
colorize_CMDstatus_doc()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except urllib2.HTTPError, e:
if e.code != 500:
raise
DieWithError(
('AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e)))
if __name__ == '__main__':
# These affect sys.stdout so do it outside of main() to simplify mocks in
# unit testing.
fix_encoding.fix_encoding()
colorama.init()
sys.exit(main(sys.argv[1:]))
| smikes/depot_tools | git_cl.py | Python | bsd-3-clause | 94,998 | [
"VisIt"
] | 122f6102eef0cd2c8a28211dc75eeda7abba381c819d344dc5e98da32c21b9d9 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# __init__ - [insert a few words of module description on this line]
# Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""
GRSfs Kernel package init
""" | heromod/migrid | mig/grsfs-fuse/fs/core/__init__.py | Python | gpl-2.0 | 988 | [
"Brian"
] | bac3396f3c51561e70d0c633b401016bba02e6c86175ab39b6cb36c02289d67f |
from lettuce import step, world
from lettuce.django import django_url
from nose.tools import assert_equal
from splinter.exceptions import ElementDoesNotExist
from django.contrib.auth.models import User
from storybase_user.models import Organization
from storybase_user.utils import format_user_name
@step(u'Given an admin user creates the Organization "([^"]*)" with website URL "([^"]*)" and description "([^"]*)" in the Django admin')
def create(step, name, website_url, description):
step.given('Given the user navigates to the "Organizations" addition page')
step.given('Given the user sets the "name" of the "Organization" to "%s"' % name)
step.given('Given the user sets the "description" of the "Organization" to "%s"' % description)
step.given('Given the user sets the "website URL" of the "Organization" to "%s"' % website_url)
step.given('Given the user clicks the save button')
@step(u'Then the Organization "([^"]*)" should have a canonical URL')
def access_url(step, name):
step.given('Given the user navigates to the "Organizations" admin page')
world.browser.click_link_by_text(name)
try:
# Django 1.3
organization_id = world.browser.find_by_css('.organization_id p').first.value
except ElementDoesNotExist:
# Django 1.4
organization_id = world.browser.find_by_css('.field-organization_id p').first.value
world.assert_is_uuid4(organization_id)
world.browser.visit(django_url('/organizations/%s' % organization_id))
@step(u'Then the Organization\'s website should be listed as "([^"]*)"')
def see_website_url(step, website_url):
world.assert_text_present(website_url)
@step(u'Then the Organization\'s contributors list should be blank')
def no_contributors(step):
world.assert_text_not_present('Members')
@step(u'Then the Organization\'s description should be blank')
def blank_description(step):
world.assert_text_not_present('Description')
@step(u'Given the Organization "([^"]*)" is visible in the Django admin')
def exists_in_admin(step, name):
# Visit the Organization's admin panel
world.browser.visit(django_url('/admin/storybase_user/organization/'))
world.browser.click_link_by_text(name)
try:
# Django 1.3
organization_id = world.browser.find_by_css('.organization_id p').first.value
except ElementDoesNotExist:
# Django 1.4
organization_id = world.browser.find_by_css('.field-organization_id p').first.value
world.save_info('Organization', organization_id)
@step(u'Then the Organization has the website URL "([^"]*)" in the Django admin')
def has_website_url_in_admin(step, website_url):
# Visit the Organization's admin panel
org_website_url = world.browser.find_by_css('#id_website_url').first.value
assert_equal(org_website_url, website_url)
@step(u'Given the user visits the admin edit page for Organization "([^"]*)"')
def visit_admin_edit_page(step, name):
world.browser.visit(django_url('/admin/storybase_user/organization/'))
world.browser.click_link_by_text(name)
@step(u'Then the Organization\'s description is listed as "([^"]*)"')
def see_description(step, description):
world.assert_text_present(description)
@step(u'Then all other fields of the Organization are unchanged')
def other_fields_unchanged(step):
""" Check that the an organization's fields are unchanged """
organization = Organization.objects.get(organization_id=world.organization.organization_id)
for field in ('organization_id', 'website_url', 'description', 'created'):
if field not in world.organization_changed:
assert_equal(getattr(world.organization, field),
getattr(organization, field))
@step(u'Given an admin assigns "([^"]*)" to the Organization "([^"]*)" in the Django admin')
def assign_user_to_org(step, username, name):
""" Assign user to organization via the Organization admin """
user = User.objects.get(username=username)
world.browser.visit(django_url('/admin/storybase_user/organization/'))
world.browser.click_link_by_text(name)
world.browser.select('contributors_old', user.id)
world.browser.find_by_css('.contributors .selector-add').first.click()
world.browser.find_by_name('_save').first.click()
@step(u'Given an admin assigns "([^"]*)" to the Organization "([^"]*)"')
def assign_org_to_user(step, username, name):
""" Assign user to organization via the User admin """
organization = Organization.objects.get(organizationtranslation__name=name)
world.browser.visit(django_url('/admin/auth/user/'))
world.browser.click_link_by_text(username)
world.browser.select('organizations', organization.pk)
world.browser.find_by_name('_save').first.click()
@step(r'"([^"]*)" is listed in the contributors list for Organization "([^"]*)" on its detail page')
def has_member(step, username, name):
user = User.objects.get(username=username)
world.browser.visit(django_url('/organizations/%s' % world.organization.organization_id))
world.assert_text_present("Organization Contributors")
world.assert_text_present(format_user_name(user))
@step(u'Then "([^"]*)" is selected on the "([^"]*)" User admin page')
def listed_in_user_admin(step, name, username):
world.browser.visit(django_url('/admin/auth/user/'))
world.browser.click_link_by_text(username)
for member_elem in world.browser.find_by_css('#id_organizations option'):
if member_elem.text == name:
if member_elem.checked:
break
else:
assert False, "%s not found in member list" % username
assert True
@step(u'Given an admin removes "([^"]*)" from the Organization "([^"]*)"')
def remove_user_from_org(step, username, name):
""" Remove user from organization via the Organization admin """
user = User.objects.get(username=username)
world.browser.find_by_xpath("//*[contains(@class, 'dynamic-organizationmembership_set')]//option[@value='%d']/../../../..//input[@type='checkbox']" % (user.id)).first.check()
world.browser.find_by_name('_save').first.click()
#@step(u'Given an admin removes "([^"]*)" from the Organization "([^"]*)"')
#def remove_org_from_user(step, username, name):
# """ Remove user from organization via the User admin """
# world.browser.visit(django_url('/admin/auth/user/'))
# world.browser.click_link_by_text(username)
# for member_elem in world.browser.find_by_css('#id_organizations option'):
# if member_elem.text == name:
# member_elem.click()
# world.browser.find_by_name('_save').first.click()
@step(u'Then "([^"]*)" is not listed in the contributors list for Organization "([^"]*)"')
def not_member(step, username, name):
user = User.objects.get(username=username)
world.browser.visit(django_url('/organizations/%s' % world.organization.organization_id))
world.assert_text_not_present("Organization Contributors")
world.assert_text_not_present(format_user_name(user))
@step(u'Then "([^"]*)" is not selected on the "([^"]*)" User admin page')
def not_listed_in_user_admin(step, name, username):
world.browser.visit(django_url('/admin/auth/user/'))
world.browser.click_link_by_text(username)
for member_elem in world.browser.find_by_css('#id_organizations option'):
if member_elem.text == name:
if member_elem.checked:
break
else:
return True
assert False, "%s found in member list" % username
@step(u'Given the User "([^"]*)" is associated with the Organization "([^"]*)"')
def add_user_to_org(step, username, org_name):
user = User.objects.get(username=username)
org = Organization.objects.get(organizationtranslation__name=org_name)
try:
Organization.members.through.objects.get(user=user, organization=org)
except Organization.members.through.DoesNotExist:
Organization.members.through.objects.create(user=user, organization=org)
@step(u'Given the user navigates to the Organization\'s detail page')
def visit_detail_page(step):
world.browser.visit(django_url('/organizations/%s' % world.organization.organization_id))
@step(u'Then the Organization\'s name is listed as "([^"]*)"')
def see_name(step, name):
world.assert_text_present(name)
# HACK: I couldn't figure out how to write a regex that would match
# "created|last edited", so I just wrote to different step definitions and
# use this function to do the heavy lifting
def assert_organization_date_now(name, date_type):
org = Organization.objects.get(organizationtranslation__name=name)
if date_type == "created":
date = getattr(org, 'created')
elif date_type == "last edited":
date = getattr(org, 'last_edited')
else:
assert False
world.assert_now(date, 60)
@step(u'Then the "([^"]*)" Organization\'s last edited field should be set to within 1 minute of the current date and time')
def organization_edited_now(step, name):
assert_organization_date_now(name, "last edited")
@step(u'Then the "([^"]*)" Organization\'s created field should be set to within 1 minute of the current date and time')
def organization_created_now(step, name):
assert_organization_date_now(name, "created")
| denverfoundation/storybase | apps/storybase_user/features/organization-steps.py | Python | mit | 9,242 | [
"VisIt"
] | 49d3d59a1dab94c1a317a546ad9deb40790710d55b76ca90def70e62a3769718 |
import sys
import ast
import pprint
from numba import error
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
class With(ast.stmt):
"""
Node for AST compatibility with python 3.3.
"""
_fields = ['context_expr', 'optional_vars', 'body']
class AST3to2(ast.NodeTransformer):
def _visit_list(self, alist):
new_values = []
for value in alist:
if isinstance(value, ast.AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, ast.AST):
new_values.extend(value)
continue
new_values.append(value)
return new_values
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
old_value[:] = self._visit_list(old_value)
elif isinstance(old_value, ast.AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def __visit_FunctionDef(self, node):
new_node = ast.FunctionDef(args=self.visit_arguments(node.args),
body=self._visit_list(node.body),
decorator_list=self._visit_list(node.decorator_list),
name=node.name)
ast.copy_location(new_node, node)
return new_node
def visit_Index(self, node):
if isinstance(node.value, ast.Ellipsis):
return node.value
return node
def visit_arguments(self, node):
ret = []
for arg_node in node.args:
if isinstance(arg_node, ast.arg):
new_node = ast.Name(ctx=ast.Param(), id=arg_node.arg)
ret.append(new_node)
elif isinstance(arg_node, ast.Name):
ret.append(arg_node)
else:
raise TypeError('Cannot transform node %r' % arg_node)
return ast.arguments(args=ret, defaults=node.defaults,
kwarg=node.kwarg, vararg=node.vararg)
def visit_With(self, node):
"""
Rewrite the With statement.
Python < 3.3:
With(expr context_expr, expr? optional_vars, stmt* body)
Python 3.3:
With(withitem* items, stmt* body)
withitem = (expr context_expr, expr? optional_vars)
"""
if sys.version_info[:2] >= (3, 3):
if len(node.items) > 1:
raise error.NumbaError(node,
"Only one 'with' context is support")
withitem = node.items[0]
new_node = With()
new_node.context_expr = withitem.context_expr
new_node.optional_vars = withitem.optional_vars
new_node.body = node.body
node = ast.copy_location(new_node, node)
self.generic_visit(node)
return node | shiquanwang/numba | numba/astsix.py | Python | bsd-2-clause | 3,391 | [
"VisIt"
] | 42f7c345ab1da7d20b662758c203f28e4d5b8b210495fd09ffaa69eb530b3eb9 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************
espressopp.io.DumpGROAdress
***************************
dumps coordinates of atomistic particles instead of coarse-grained particles in Adress simulation
* `dump()`
write configuration to trajectory GRO file. By default filename is "out.gro",
coordinates are folded.
Properties
* `filename`
Name of trajectory file. By default trajectory file name is "out.gro"
* `unfolded`
False if coordinates are folded, True if unfolded. By default - False
* `append`
True if new trajectory data is appended to existing trajectory file. By default - True
* `length_factor`
If length dimension in current system is nm, and unit is 0.23 nm, for example, then
length_factor should be 0.23
* `length_unit`
It is length unit. Can be LJ, nm or A. By default - LJ
* ftpl
fixedtuplelist for the adres system
usage:
>>> ftpl = espressopp.FixedTupleListAdress(system.storage)
>>> ftpl.addTuples(tuples)
>>> system.storage.setFixedTuplesAdress(ftpl)
>>> system.storage.decompose()
writing down trajectory
>>> dump_conf_gro = espressopp.io.DumpGROAdress(system, ftpl, integrator, filename='trajectory.gro')
>>> for i in range (200):
>>> integrator.run(10)
>>> dump_conf_gro.dump()
writing down trajectory using ExtAnalyze extension
>>> dump_conf_gro = espressopp.io.DumpGROAdress(system, ftpl, integrator, filename='trajectory.gro')
>>> ext_analyze = espressopp.integrator.ExtAnalyze(dump_conf_gro, 10)
>>> integrator.addExtension(ext_analyze)
>>> integrator.run(2000)
Both exapmles will give the same result: 200 configurations in trajectory .gro file.
setting up length scale
For example, the Lennard-Jones model for liquid argon with :math:`\sigma=0.34 [nm]`
>>> dump_conf_gro = espressopp.io.DumpGROAdress(system, ftpl, integrator, filename='trj.gro', unfolded=False, length_factor=0.34, length_unit='nm', append=True)
will produce trj.gro with in nanometers
.. function:: espressopp.io.DumpGROAdress(system, fixedtuplelist, integrator, filename, unfolded, length_factor, length_unit, append)
:param system:
:param fixedtuplelist:
:param integrator:
:param filename: (default: 'out.gro')
:param unfolded: (default: False)
:param length_factor: (default: 1.0)
:param length_unit: (default: 'LJ')
:param append: (default: True)
:type system:
:type fixedtuplelist:
:type integrator:
:type filename:
:type unfolded:
:type length_factor: real
:type length_unit:
:type append:
.. function:: espressopp.io.DumpGROAdress.dump()
:rtype:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.ParticleAccess import *
from _espressopp import io_DumpGROAdress
class DumpGROAdressLocal(ParticleAccessLocal, io_DumpGROAdress):
def __init__(self, system, fixedtuplelist, integrator, filename='out.gro', unfolded=False, length_factor=1.0, length_unit='LJ', append=True):
cxxinit(self, io_DumpGROAdress, system, fixedtuplelist, integrator, filename, unfolded, length_factor, length_unit, append)
def dump(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive() ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.dump(self)
if pmi.isController :
class DumpGROAdress(ParticleAccess):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.io.DumpGROAdressLocal',
pmicall = [ 'dump' ],
pmiproperty = ['filename', 'unfolded', 'length_factor', 'length_unit', 'append']
)
| fedepad/espressopp | src/io/DumpGROAdress.py | Python | gpl-3.0 | 4,356 | [
"ESPResSo"
] | a8822513f181d8f99185ecb798251a1092681c3feef2abbfc166864f4425279e |
#!/usr/bin/env python
#
# $Id$
# ---------------------------------------------------------------------------
"""
Calculate a cryptohash on a file or standard input.
Usage:
**digest** *algorithm* [file] ...
The *digest* utility calculates message digests of files or, if no file
is specified, standard input. The set of supported digests depends on the
current Python interpreter and the version of OpenSSL present on the system.
However, at a minimum, *digest* supports the following algorithms:
+-------------+--------------------------------------+
| Argument | Algorithm |
+=============+======================================+
| md5 | The MD5 algorithm |
+-------------+--------------------------------------+
| sha1 | The SHA-1 algorithm |
+-------------+--------------------------------------+
| sha224 | The SHA-224 algorithm |
+-------------+--------------------------------------+
| sha256 | The SHA-256 algorithm |
+-------------+--------------------------------------+
| sha384 | The SHA-384 algorithm |
+-------------+--------------------------------------+
| sha512 | The SHA-512 algorithm |
+-------------+--------------------------------------+
This program is modeled on the *digest* program found in BSD Un\*x systems
and written by Alistair G. Crooks. This Python version is an independently
implemented program based on the manual page and output from the BSD *digest*
program.
"""
__docformat__ = 'restructuredtext'
# Info about the module
__version__ = '1.0.2'
__author__ = 'Brian M. Clapper'
__email__ = 'bmc@clapper.org'
__url__ = 'http://software.clapper.org/digest/'
__copyright__ = '2008-2011 Brian M. Clapper'
__license__ = 'BSD-style license'
# Package stuff
__all__ = ['digest', 'main']
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
import os
import hashlib
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
USAGE = '''Usage: %s algorithm [file] ...
Generate a message digest (cryptohash) of one or more files, or of standard
input.
"algorithm" can be one of: md5, sha1, sha224, sha384, sha512''' %\
os.path.basename(sys.argv[0])
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def die(msg):
print >> sys.stderr, msg
sys.exit(1)
def digest(f, algorithm):
try:
h = hashlib.new(algorithm)
except ValueError, ex:
die('%s: %s' % (algorithm, str(ex)))
h.update(f.read())
return h.hexdigest()
def main():
if len(sys.argv) < 2:
die(USAGE)
algorithm = sys.argv[1]
if len(sys.argv) == 2:
# Standard input.
print digest(sys.stdin, algorithm)
else:
u_algorithm = algorithm.upper()
for filename in sys.argv[2:]:
print '%s (%s) = %s' % \
(u_algorithm, filename, digest(open(filename), algorithm))
return 0
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
sys.exit(main())
| bmc/digest | digest/__init__.py | Python | bsd-3-clause | 3,604 | [
"Brian"
] | d6e37cd02622ab2b747fb35a9ebaa67f9565e7e7506578efc54c91fe447453bb |
#!/usr/bin/env python3
#Inne Lemstra and Michiel Merkx
"""Benchmark a short sequence aligner by calling on different subscripts
These subscripts are stored as libraries in the same directory
ScriptFlow:
This python pipeline consist of the following steps:
Get hardware info (using bash commands)
Run and time the preAligment step of the aligner
Run the alignment step of the aligner (using the output of the prestep)
OutPut:
This script will print a short message to screen of timing of each alignment step.
These timings as wel as hardware info and info about aligner used will be written to a textfile. The name of this file is set iin the first part of the script.
Where ans how the alignment output of the alignment program will be written is determined in the library align. (as well as output for prestep is determined in preAlign library)
"""
import subprocess
import hardware_inquire34
import preAlign
import align
import time
mapper = "Bowtie2"
shortReads = ["../test_data/E_coli_MG1655_1.fasta", "../test_data/E_coli_MG1655_2.fasta"]
##Bwa needs these statements to run.
#mapper = "Bwa"
#shortReads = "../test_data/E_coli_MG1655.fasta"
referenceGenome = "../test_data/E_coli_reference.fasta"
sra_ID = "E_coli_MG1655" #one of the test data sets that could be run
benchmarkFile = "../test_data/{0}_benchmark.txt".format(mapper) #name of output
#gather hardware data
hwInfo = hardware_inquire34.getInfo()
#Run and benchmark preAlignment step
[bmPreAlign, preAlignOutput, debugPreAlign] = preAlign.benchmark(referenceGenome)
#Run and benchmark alignment step
[bmAlign, debug] = align.benchmark(preAlignOutput, shortReads)
#print the time of the preAlignment and alignment steps to screen
print("PreAlign: {0}s\nbmAlign: {1}s\n".format(bmPreAlign, bmAlign))
#Write results to benchmark file
benchmarkHandle = open(benchmarkFile,"w")
benchmarkHandle.write("{0},{1},{2},{3},{4},{5}"\
.format(sra_ID, mapper, bmPreAlign, bmAlign, hwInfo[0], hwInfo[1]))
| MWJMerkx/pcfb_project | client/pipeline.py | Python | gpl-3.0 | 1,977 | [
"BWA"
] | 143455c05ec2879cf9d6c96a9b6bb2a7d44e841315acf609f8ec273b38d40e97 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the gating feature.
"""
from textwrap import dedent
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from common.test.acceptance.tests.helpers import UniqueCourseTest
class GatingTest(UniqueCourseTest):
"""
Test gating feature in LMS.
"""
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "staff101@example.com"
STUDENT_USERNAME = "STUDENT_TESTER"
STUDENT_EMAIL = "student101@example.com"
def setUp(self):
super(GatingTest, self).setUp()
self.logout_page = LogoutPage(self.browser)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
xml = dedent("""
<problem>
<p>What is height of eiffel tower without the antenna?.</p>
<multiplechoiceresponse>
<choicegroup label="What is height of eiffel tower without the antenna?" type="MultipleChoice">
<choice correct="false">324 meters<choicehint>Antenna is 24 meters high</choicehint></choice>
<choice correct="true">300 meters</choice>
<choice correct="false">224 meters</choice>
<choice correct="false">400 meters</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
self.problem1 = XBlockFixtureDesc('problem', 'HEIGHT OF EIFFEL TOWER', data=xml)
# Install a course with sections/problems
course_fixture = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fixture.add_advanced_settings({
"enable_subsection_gating": {"value": "true"}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
self.problem1
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2')
)
)
).install()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
self.logout_page.visit()
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _setup_prereq(self):
"""
Make the first subsection a prerequisite
"""
# Login as staff
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
# Make the first subsection a prerequisite
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog(0)
self.studio_course_outline.select_advanced_tab(desired_item='gated_content')
self.studio_course_outline.make_gating_prerequisite()
def _setup_gated_subsection(self):
"""
Gate the second subsection on the first subsection
"""
# Login as staff
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
# Gate the second subsection based on the score achieved in the first subsection
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog(1)
self.studio_course_outline.select_advanced_tab(desired_item='gated_content')
self.studio_course_outline.add_prerequisite_to_subsection("80", "")
def _fulfill_prerequisite(self):
"""
Fulfill the prerequisite needed to see gated content
"""
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.wait_for_page().problem_name, 'HEIGHT OF EIFFEL TOWER')
problem_page.click_choice('choice_1')
problem_page.click_submit()
def test_subsection_gating_in_studio(self):
"""
Given that I am a staff member
When I visit the course outline page in studio.
And open the subsection edit dialog
Then I can view all settings related to Gating
And update those settings to gate a subsection
"""
self._setup_prereq()
# Assert settings are displayed correctly for a prerequisite subsection
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog(0)
self.studio_course_outline.select_advanced_tab(desired_item='gated_content')
self.assertTrue(self.studio_course_outline.gating_prerequisite_checkbox_is_visible())
self.assertTrue(self.studio_course_outline.gating_prerequisite_checkbox_is_checked())
self.assertFalse(self.studio_course_outline.gating_prerequisites_dropdown_is_visible())
self.assertFalse(self.studio_course_outline.gating_prerequisite_min_score_is_visible())
self._setup_gated_subsection()
# Assert settings are displayed correctly for a gated subsection
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog(1)
self.studio_course_outline.select_advanced_tab(desired_item='gated_content')
self.assertTrue(self.studio_course_outline.gating_prerequisite_checkbox_is_visible())
self.assertTrue(self.studio_course_outline.gating_prerequisites_dropdown_is_visible())
self.assertTrue(self.studio_course_outline.gating_prerequisite_min_score_is_visible())
def test_gated_subsection_in_lms_for_student(self):
"""
Given that I am a student
When I visit the LMS Courseware
Then I can see a gated subsection
The gated subsection should have a lock icon
and be in the format: "<Subsection Title> (Prerequisite Required)"
When I fulfill the gating Prerequisite
Then I can see the gated subsection
Now the gated subsection should have an unlock icon
and screen readers should read the section as: "<Subsection Title> Unlocked"
"""
self._setup_prereq()
self._setup_gated_subsection()
self._auto_auth(self.STUDENT_USERNAME, self.STUDENT_EMAIL, False)
self.course_home_page.visit()
self.assertEqual(self.course_home_page.outline.num_subsections, 2)
# Fulfill prerequisite and verify that gated subsection is shown
self.courseware_page.visit()
self._fulfill_prerequisite()
self.course_home_page.visit()
self.assertEqual(self.course_home_page.outline.num_subsections, 2)
def test_gated_subsection_in_lms_for_staff(self):
"""
Given that I am a staff member
When I visit the LMS Courseware
Then I can see all gated subsections
Displayed along with notification banners
Then if I masquerade as a student
Then I can see a gated subsection
The gated subsection should have a lock icon
and be in the format: "<Subsection Title> (Prerequisite Required)"
"""
self._setup_prereq()
self._setup_gated_subsection()
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.course_home_page.visit()
self.assertEqual(self.course_home_page.preview.staff_view_mode, 'Staff')
self.assertEqual(self.course_home_page.outline.num_subsections, 2)
# Click on gated section and check for banner
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 2')
self.courseware_page.wait_for_page()
self.assertTrue(self.courseware_page.has_banner())
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 1')
self.courseware_page.wait_for_page()
self.course_home_page.visit()
self.course_home_page.preview.set_staff_view_mode('Learner')
self.course_home_page.wait_for_page()
self.assertEqual(self.course_home_page.outline.num_subsections, 2)
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 1')
self.courseware_page.wait_for_page()
# banner displayed informing section is a prereq
self.assertTrue(self.courseware_page.has_banner())
| ahmedaljazzar/edx-platform | common/test/acceptance/tests/lms/test_lms_gating.py | Python | agpl-3.0 | 9,099 | [
"VisIt"
] | 1e1b5d9853eff26a752e782746c106d7d3301d1196927c9e922fc465a88764c1 |
from django import template
from django.core.urlresolvers import reverse
from django.conf import settings
from pylmth.dom import *
from profiles.models import (DataDomain, Time, GeoRecord, GeoLevel,
Value, DataPoint, Indicator, IndicatorPart)
from profiles.utils import format_number as util_format_number
from profiles.utils import get_time_sorted_indicatorset
from data_displays.models import DataDisplayTemplate, DataDisplay
from django.contrib.humanize.templatetags.humanize import intcomma
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
from django.contrib.flatpages.models import FlatPage
import re
from maps.models import Setting
setting = Setting.objects.filter(active=True)
if len(setting) == 0:
raise ImproperlyConfigured('DEFAULT_GEO_RECORD_ID must be defined')
setting = setting[0]
register = template.Library()
def ind_qfilter(qs, level):
"""Filter an indicator query set by a level"""
return qs.filter(levels__in=[level])
register.filter('ind_qfilter', ind_qfilter)
def group_filter(group, level):
from profiles.models import GroupIndex
index = GroupIndex.objects.filter(groups = group)
list = group.sorted_indicators(level)
a = []
for i in index:
a.append(i.order)
b = sorted(a)
order = []
for i in b:
order.append(a.index(i))
try:
result = []
for i in [list[i] for i in order]:
if i.published == True:
result.append(i)
return result #[list[i] for i in order]
except:
result = []
for i in group.sorted_indicators(level):
if i.published == True:
result.append(i)
return result
register.filter('group_filter', group_filter)
def add_order_key(jstring, order_id):
""" add an order: key to json string """
jstring = jstring.replace('}',',"order_key":%s}' % order_id)
return jstring
register.filter('order_key', add_order_key)
#TODO: Dont know why the built in flatpages templatetag was causing such a headache. This is hacky.
@register.simple_tag
def get_all_flatpages(current_fp_id=None):
from django.contrib.flatpages.models import FlatPage
fps = FlatPage.objects.all().order_by('title')
l = '<ul class="flatpage_list">'
for f in fps:
if f.id != current_fp_id:
l += '<li> <a href="%s">%s</a></li>' % (f.url, f.title)
else:
l += '<li class="active"> <a href="%s"> %s</a> </li>' % (f.url, f.title)
l += "</ul>"
return l
def _indicator_href(indicator, geo_record, data_domain=None):
from django.contrib.flatpages.models import FlatPage
# Get the href for the indicator, in the context of the selected geo_record and data_domain.
# If no data_domain is passed, the default domain for that indicator is used
if not data_domain:
data_domain = indicator.default_domain()
return reverse('indicator', kwargs={'geo_level_slug': geo_record.level.slug, 'geo_record_slug': geo_record.slug, 'data_domain_slug': data_domain.slug, 'indicator_slug': indicator.slug})
def _domain_href(domain, geo_record):
# Get the href for the domain, in the context of the selected geo_record
try:
return reverse('data_domain', kwargs={'geo_level_slug': geo_record.level.slug, 'geo_record_slug': geo_record.slug, 'data_domain_slug': domain.slug})
except AttributeError:
return reverse('data_domain', kwargs={'geo_level_slug': None, 'geo_record_slug': None, 'data_domain_slug': None})
@register.simple_tag(takes_context=True)
def record_href(context, record):
data_domain = None
data_domain_slug = None
if context['data_domain']:
data_domain = context['data_domain']
data_domain_slug = data_domain.slug
if context['indicator']:
indicator = context['indicator']
return _indicator_href(indicator, record, data_domain)
else:
return reverse('data_domain', kwargs={'geo_level_slug': record.level.slug, 'geo_record_slug': record.slug, 'data_domain_slug': data_domain_slug})
@register.inclusion_tag('profiles/includes/data_domains.html', takes_context=True)
def data_domains(context, geo_record=None, selected_domain=None):
setting = Setting.objects.filter(active=True);
if len(setting) == 0:
#if geo_record == None:
geo_record = GeoRecord.objects.get(pk=getattr(settings, "DEFAULT_GEO_RECORD_ID", 1))
data_domains = [{'domain':domain,'href':_domain_href(domain, geo_record), }for domain in DataDomain.objects.filter(subdomain_only=False)]
return {'data_domains': data_domains, 'selected_domain': selected_domain}
@register.inclusion_tag('profiles/includes/indicators.html')
def indicators(geo_level, geo_record, data_domain=None):
indicators = [(indicator, _indicator_href(indicator, geo_record, data_domain)) for indicator in data_domain.indicators.all()]
return {'indicators': indicators, }
@register.inclusion_tag('profiles/includes/datatables/attributes.html')
def attributes_table(indicator, geo_record, data_domain):
times = Time.objects.filter(indicatorpart__indicator=indicator).order_by('sort').distinct()
attributes = GeoRecord.objects.filter(parent=geo_record).order_by('name')
if attributes.count() == 0:
# if we're at the bottom of the geo hierarchy, display siblings
attributes = GeoRecord.objects.filter(parent=geo_record.parent)
attributes_and_href = [(attribute, _indicator_href(indicator, attribute, data_domain)) for attribute in attributes]
return {'indicator': indicator, 'times': times, 'geo_record': geo_record, 'attributes': attributes_and_href}
#@register.inclusion_tag('profiles/includes/datatables/indicators.html')
@register.simple_tag
def indicators_table(title, indicators, geo_record, data_domain):
"""
title: the name of the data domain
indicators: A list of indicator objects
get_record: the current GeoRecord
data_domain: the current DataDomain
"""
import markdown
try:
indicators = indicators.order_by('display_name')
except AttributeError:
# its a single indicator
indicators = [indicators]
sorted_indicators = get_time_sorted_indicatorset(indicators, geo_record, data_domain)
tables = ''
# build tables
for i in sorted_indicators:
ind_set = sorted_indicators[i]
tbl = Table()
tbl.attr.className = "table table-bordered data-table" + " times_%s" % len(ind_set['times'])
thead = tbl.add_header()
thr = thead.add_row()
thcol = thr.add_col()
thcol.add_attr('data-original-title')
thcol.attr.data_original_title = 'Click an Indicator name to map'
thcol.add_attr('rel')
thcol.attr.rel="tooltip"
thcol.attr.className="field_lbl indicator-lbl"
thcol.inner_text='Indicator'
#thr.add_col(className='field_lbl indicator-lbl', inner_text='Indicator')
for t in ind_set['times']:
thr.add_col(colspan='2', className='field_lbl', inner_text=t)
if ind_set['display_change']:
thr.add_col(colspan='1', inner_text='Change')
#thr.add_col(colspan='1', inner_text='Notes', className='notes-col')
#tbody
tbody = tbl.add_body()
for i_set in ind_set['indicators']:
umoe=False # keeps track of unacceptable moe
ind = i_set['indicator']
i_vals = i_set['values']
i_times = i_set['indicator_times'] # times objs
i_times.sort(key=lambda tm: tm.name, reverse=True)
suppressed_val = False
tbr = tbody.add_row() #tr
tbr.attr.id = ind.id
ind_col = tbr.add_col(className='indicator-name') # this is the indicator title
ind_col.add_attr('data-original-title')
ind_col.attr.data_original_title = 'Click to map'
ind_col.attr.rel = "tooltip"
cell_wrap = Div()
cell_wrap.attr.className="cell-wrap"
cell_wrap.inner_html = '<a href="%s">%s</a>' % (i_set['href'], ind.display_name)
ind_col.append_child(cell_wrap)
#-----------------------------------------------------------------------------------notes
notes = ind.get_notes()
if notes:
n_href = A()
cell_wrap.append_child(n_href)
n_href.add_attr('date-toggle')
n_href.data_toggle = "modal"
n_href.attr.className = "notes-icon"
n_href.attr.href = "#%s_notes_modal" % ind.slug
modal = Div()
cell_wrap.append_child(modal)
modal.attr.className = "modal hide"
modal.attr.id = "%s_notes_modal" % ind.slug
m_header = Div()
m_header.attr.className = "modal-header"
m_header.inner_html ="<h3>%s</h3>" % ind.display_name
m_body = Div()
m_body.attr.className = "modal-body"
# append fields
for n in notes:
m_body.inner_html += u'<h4>%s</h4>' % n['label']
m_body.inner_html += u'<p>%s</p>' % mark_safe(markdown.markdown(n['text']))
modal.append_child(m_header, m_body)
###############################################Table values################################################33
for t in i_times:
# values are keyed to time by the time id
raw_val = i_vals[t.id] # Value Object
if raw_val: # check if there is a value
d_val = raw_val.to_dict()
f_val = d_val['f_number']
if f_val == "-1":
suppressed_val = True
td = tbr.add_col()
if raw_val.moe: # check to see if there is MOe, if so we need some fancy classes and attrs to display twitter bs(bootstrap that is) :D
td.add_attr('data-original-title') # this is a custom attribute
td.attr.rel = 'tooltip'
td.attr.className = 'value moe'
td.attr.data_original_title = "Margin of Error: %s" % util_format_number(raw_val.moe, ind.data_type)
td.inner_html = '<span class="moe-icon">+/-</span>%s<span class="pr-moe">+/-%s</span>' % (f_val, raw_val.moe)
td.attr.colspan='2'
#check if moe is acceptable
if raw_val.moe > raw_val.number:
umoe=True
td.attr.className +=' u-moe' # unacceptable moe
td.attr.data_original_title = "+/-%s </br>The Margin of Error for this value is larger than the value making it unreliable. " % util_format_number(raw_val.moe, ind.data_type)
td.inner_html = '<span class="moe-icon"><span class="u-moe"></span> +/-</span>%s<span class="pr-moe">+/-%s</span><div class="cell-wrap"></div>' % (f_val, raw_val.moe)
else:
# no moe
td.attr.className='value'
td.attr.colspan='2'
td.inner_html = '<div class="cell-wrap">%s</div>' % f_val
else:
td = tbr.add_col(className="value empty none")
td.attr.colspan='2'
# we need to insert a blank value cell here
#tbr.add_col(className="value empty")
#change
if ind_set['display_change']:
change_val = i_set['change'] # Value Obj
if change_val and suppressed_val == False:
#if change_val.number:
# tbr.add_col(className='value change-num',inner_text=util_format_number(change_val.number, ind.data_type))
#else:
# tbr.add_col(className='value empty no-change-num')
if change_val.percent:
col = tbr.add_col(className='value change-perc', inner_text="%s%%" % util_format_number(change_val.percent, "PERCENT"))
if umoe:
col.attr.className +=' u-moe'
else:
tbr.add_col(className='value empty no-change-perc')
else:
tbr.add_col(className="value empty change-none")
#tbr.add_col(className="value empty change-none")
#denominators
if suppressed_val == False:
for denom in i_set['denominators']:
den = denom['denominator'] # the denominator obj
den_vals = denom['values']
dtr = tbody.add_row(className='denom') # denom tr
dtr.add_col(className='denom', inner_html='<div class="cell-wrap"><a href="%s?denom=%s">...as %s</a></div>' % (i_set['href'], den.id, den.label)) # denom label
# get vals with time keys
for t in i_times:
den_val = den_vals[t.id]
if den_val: # check if None
if den_val.percent:
dtr.add_col(className="value denom denom-perc",inner_text="%s%%" % util_format_number(den_val.percent, "PERCENT") )
else:
dtr.add_col(className="value empty no-denom-perc")
if den_val.number:
dtr.add_col(className="value denom denom-divsor",inner_text="%s" % util_format_number(den_val.number,ind.data_type ))
else:
dtr_col(className="value empty no-denom-divisor")
else:
dtr.add_col(className="value empty denom-none")
dtr.add_col(className="value empty denom-none")
# denom_change
if ind_set['display_change']:
change_val = denom['change'] # Value Obj
if change_val: # check if None
#dtr.add_col(className='value denom-change-num',inner_text=util_format_number(change_val.number))
if change_val.percent:
dtr.add_col(className='value denom-change-perc', inner_text="%spts" % util_format_number(change_val.percent, "PERCENT"))
else:
dtr.add_col(className="value empty no-denom-perc")
else:
dtr.add_col(className="value empty change-none")
tables += str(tbl)
return tables
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split('(\d+)', text.name) ]
@register.simple_tag
def indicator_table(indicator, geo_record):
""" This is the table in the Indicator View. A similar table to indicators_table except for a single indicator """
# we display all nested geographies so lets collect those first
name = geo_record.name
if geo_record.level.name !="Census Tract":
geo_children = geo_record.child_records().exclude(name=name)
else:
geo_children = geo_record.parent.child_records().exclude(name=name)
# make sure the geography we are interested in is the first one.
geos = list(geo_children)
geos.insert(0, geo_record)
times = [t.name for t in indicator.get_times()]
times.sort(reverse=True)
tables = ''
tbl = Table() # the main table
tbl.attr.className = "data-table"
# build the table head
thead = tbl.add_header()
thr = thead.add_row()
thcol = thr.add_col()
thcol.attr.className = 'field_lbl indicator-lbl'
#thcol.inner_text = indicator.display_name
thcol.inner_html = indicator.display_name+'''<a href="#" class="about-indicator-inTable"><img src="'''+settings.STATIC_URL+'''css/img/info.png"/>
</a>
'''
for t in times:
thr.add_col(colspan='1', className='field_lbl', inner_text=t)
if indicator.display_change:
thr.add_col(colspan='1', inner_text='Change')
tbody = tbl.add_body()
geos.sort(key=natural_keys)
for g in geos:
val = indicator.get_values_as_dict(g)
ind = val['indicator']
denoms = val['denominators']
tbr = tbody.add_row() #tr
geo_col = tbr.add_col(className='indicator-name')
cell_wrap = Div()
cell_wrap.attr.className="cell-wrap"
cell_wrap.inner_html = g.name
geo_col.append_child(cell_wrap)
# get values
umoe=None
for t in times:
val_col = tbr.add_col(className='value')
val_wrap = Div()
val_wrap.attr.className="cell-wrap"
try:
if ind[t]['moe']:
val_col.add_attr('data-original-title')
val_col.attr.rel ='tooltip'
val_col.attr.className += " moe"
val_col.attr.data_original_title = "Margin of Error: %s" % ind[t]['f_moe']
val_wrap.inner_html = '<span class="moe-icon">+/-</span>%s<span class="pr-moe">+/-%s</span>' % (ind[t]['f_number'],ind[t]['f_moe'])
# now we check for acceptible moe's
if ind[t]['moe'] > ind[t]['number']:
umoe = True
val_col.attr.className +=' u-moe'
val_col.attr.data_original_title = "+/-%s </br>The Margin of Error for this value is larger than the value making it unreliable. " % ind[t]['f_moe']
else:
val_wrap.inner_html = ind[t]['f_number']
except KeyError:
# no values
val_wrap.attr.className+=" none"
val_col.append_child(val_wrap)
# change
if indicator.display_change:
val_col = tbr.add_col(className='value change')
val_wrap = Div()
val_wrap.attr.className="cell-wrap"
if umoe:
val_col.attr.className += ' u-moe'
try:
val_wrap.inner_html = ind['change']['f_number']
except KeyError:
#no values
val_wrap.attr.className +=" none"
val_col.attr.className += " none"
val_col.append_child(val_wrap)
# denoms
for d in denoms:
d_row = tbody.add_row() # denom row
d_row.attr.className = 'denom-row'
d_col = d_row.add_col()# denom name
d_wrap = Div()
d_wrap.attr.className='cell-wrap'
d_wrap.inner_html = "...as {0}".format(d)
d_col.append_child(d_wrap)
# get values
for t in times:
val_col = d_row.add_col(className='value')
val_wrap = Div()
val_wrap.attr.className="cell-wrap"
try:
val_wrap.inner_html = denoms[d][t]['f_percent']
except KeyError:
val_wrap.attr.className+=" none"
val_col.append_child(val_wrap)
#change
if indicator.display_change:
val_col = d_row.add_col(className='value change')
val_wrap = Div()
val_wrap.attr.className="cell-wrap"
try:
val_wrap.inner_html = denoms[d]['change']['f_percent'].replace('%','pts')
except KeyError:
val_wrap.attr.className+=" none"
val_col.append_child(val_wrap)
tables += str(tbl)
return tables
@register.simple_tag
def indicator_number(indicator, geo_record, time=None):
try:
datapoint = DataPoint.objects.get(indicator=indicator, record=geo_record, time=time)
value = datapoint.value_set.get(denominator__isnull=True)
f_num = "%s" % util_format_number(value.number, data_type=indicator.data_type)
#check for moe
if value.moe:
# we need to format the moe a ltitle differently
div = Div()
div.add_attr('data-original-title')
div.attr.data_original_title = 'Margin of Error: %s' % util_format_number(value.moe, data_type=indicator.data_type)
div.attr.className='value-cell'
div.attr.rel='tooltip'
div.inner_text = f_num
icon = Span()
icon.attr.className = 'moe-icon attr-tbl'
icon.inner_text= '±'
div.append_child(icon)
return str(div)
else:
return f_num
except (DataPoint.DoesNotExist, Value.DoesNotExist):
return ''
@register.simple_tag
def indicator_percent(indicator, geo_record, time=None):
try:
datapoint = DataPoint.objects.get(indicator=indicator, record=geo_record, time=time)
value = datapoint.value_set.get(denominator__isnull=True)
return "%s" % util_format_number(value.percent)
except (DataPoint.DoesNotExist, Value.DoesNotExist):
return ''
@register.simple_tag
def indicator_moe(indicator, geo_record, time):
try:
datapoint = DataPoint.objects.get(indicator=indicator, record=geo_record, time=time)
value = datapoint.value_set.get(denominator__isnull=True)
return util_format_number(value.moe, data_type=indicator.data_type)
except (DataPoint.DoesNotExist, Value.DoesNotExist):
return ''
@register.simple_tag
def geo_mapping(geo_record, geo_level):
mapped = geo_record.mapped_to(geo_level)
return ', '.join(map(lambda m: m.geo_id, mapped))
def _geo_nav_context(context):
"Create a context object to render geo nav widgets"
from profiles.utils import get_default_levels
levels = get_default_levels()
geo_record = None
indicator = None
data_domain = None
if 'geo_record' in context:
geo_record = context['geo_record']
if 'indicator' in context:
indicator = context['indicator']
if 'data_domain' in context:
data_domain = context['data_domain']
if geo_record == None:
#if not hasattr(settings, 'DEFAULT_GEO_RECORD_ID'):
# raise Exception('No geo_record was selected, and DEFAULT_GEO_RECORD_ID was not configured.')
try:
geo_record = GeoRecord.objects.get(pk=setting.DEFAULT_GEO_RECORD_ID)
except GeoRecord.DoesNotExist:
geo_record = GeoRecord.objects.filter(level=GeoLevel.objects.get(slug=levels[0].lower()))[0]
# construct a list of geo levels to display in nav, with a list of records
# and optionally a selected record
#geo_levels = GeoLevel.objects.filter(slug__in=[lev.lower() for lev in levels])
nav = {}
parent_level = geo_record.level.parent
while parent_level:
nav[parent_level.sort_key] = {'name':parent_level.name, 'pk':parent_level.pk, 'slug':parent_level.slug}
parent_level = parent_level.parent
# at this point any level that is not in nav already, is a sub level
sub_levels = GeoLevel.objects.exclude(name__in=nav.keys()).filter(name__in=levels)
for lev in sub_levels:
nav[lev.sort_key] = {'name':lev.name, 'pk':lev.pk, 'slug':lev.slug}
sorted(nav, key=lambda key: nav[key])
# now legs get all the geographoes
for k, v in nav.iteritems():
lev = nav[k]
lev['geos'] = GeoRecord.objects.filter(level__pk=lev['pk']).only('slug', 'pk', 'name')
try:
default_geo = GeoRecord.objects.get(pk=setting.DEFAULT_GEO_RECORD_ID)
except GeoRecord.DoesNotExist:
default_geo = GeoRecord.objects.filter(level=GeoLevel.objects.get(slug=levels[0].lower()))[0]
try:
del nav[1] # we never show the topmost geography
except Exception as e:
pass
return {
#'levels': levels, # a list of the geo levels, with selections, to display
'levels':nav,
'data_domain': data_domain, # currently selected domain
'indicator': indicator, # currently selected indicator
'geo_record': geo_record, # currently selected record
'default_georecord':default_geo
}
@register.inclusion_tag('profiles/includes/geo_nav.inc.html', takes_context=True)
def geo_nav(context):
return _geo_nav_context(context)
@register.inclusion_tag('profiles/includes/search_geo_nav.inc.html', takes_context=True)
def search_geo_nav(context):
return _geo_nav_context(context)
@register.simple_tag
def format_number(number, data_type='COUNT'):
num = util_format_number(number, data_type=data_type)
return num
@register.simple_tag
def get_state_for_geo(record):
state = record.get_state()
if state:
return state.slug
else:
return record.slug
@register.inclusion_tag('profiles/includes/data_displays.inc.html')
def data_displays(geo_record, data_domain, featured_only = True, indicator=None):
# we can get only featured ones
if featured_only:
featured_templates = DataDisplayTemplate.objects.filter(featured=True, domains=data_domain)
else:
featured_templates = DataDisplayTemplate.objects.all()
displays = []
for ddt in featured_templates:
local_featured_displays = DataDisplay.objects.filter(template=ddt, record=geo_record)
if len(local_featured_displays) != 0:
displays += local_featured_displays
else:
# use the parent geography
displays += DataDisplay.objects.filter(template=ddt, record=geo_record.parent)
times = []
for disp in displays:
if disp.time not in times:
times.append(disp.time)
if times:
times.sort(key=lambda time: time.sort, reverse=True)
default_time = times[0].id
else:
default_time = 2000
return {'data_displays': displays, 'default_time':default_time, 'times':times}
def profiles_admin_submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
# capture model type so we can display a generate indicator data
try:
model = context['original']
except KeyError:
model = None
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
'is_indicator':type(model) is Indicator,
'model': model
}
profiles_admin_submit_row = register.inclusion_tag('admin/submit_line.html', takes_context=True)(profiles_admin_submit_row)
@register.simple_tag
def google_analytics():
return settings.GOOGLE_ANALYTICS_UID
@register.simple_tag
def logo_icon():
return settings.LOGO_ICON
@register.simple_tag
def style_css():
try:
return settings.STYLE
except AttributeError:
return "css/profiles.css"
@register.assignment_tag
def search_domainId(domain_id):
from profiles.models import DataDomainIndex
return DataDomainIndex.objects.filter(dataDomain_id=domain_id)
@register.assignment_tag
def search_groupId(group_id):
from profiles.models import GroupIndex
return GroupIndex.objects.filter(groups_id=group_id)
@register.simple_tag
def get_setting(name):
from profiles.models import Setting
return Setting.objects.get(name=name).value
| ProvidencePlan/Profiles | communityprofiles/profiles/templatetags/profiles_tags.py | Python | mit | 28,156 | [
"MOE"
] | 4e7a1ef44b389d81aaeacb0472c2cfc74e4491930c1789d5d2ff2301121bc764 |
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a sphinx extension which proposes a new version of ``.. toctree::``
which takes into account titles dynamically added.
"""
import os
import re
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx.util import logging
from sphinx.errors import NoUri
import sphinx
class tocdelay_node(nodes.paragraph):
"""
defines ``tocdelay`` node
"""
pass
class TocDelayDirective(Directive):
"""
Defines a :epkg:`sphinx` extension which proposes a new version of ``.. toctree::``
which takes into account titles dynamically added. It only considers
one level.
Example::
.. tocdelay::
document
Directive ``.. toctree::`` only considers titles defined by the user,
not titles dynamically created by another directives.
.. warning:: It is not recommended to dynamically insert
such a directive. It is not recursive.
Parameter *rule* implements specific behaviors.
It contains the name of the node which holds
the document name, the title, the id. In case of the blog,
the rule is: ``blogpost_node,toctitle,tocid,tocdoc``.
That means the *TocDelayDirective* will look for nodes
``blogpost_node`` and fetch attributes
*toctitle*, *tocid*, *tocdoc* to fill the toc contents.
No depth is allowed at this point.
The previous value is the default value.
Option *path* is mostly used to test the directive.
"""
node_class = tocdelay_node
name_sphinx = "tocdelay"
has_content = True
regex_title = re.compile("(.*) +[<]([/a-z_A-Z0-9-]+)[>]")
option_spec = {'rule': directives.unchanged,
'path': directives.unchanged}
def run(self):
"""
Just add a @see cl tocdelay_node and list the documents to add.
@return of nodes or list of nodes, container
"""
lineno = self.lineno
settings = self.state.document.settings
env = settings.env if hasattr(settings, "env") else None
docname = None if env is None else env.docname
if docname is not None:
docname = docname.replace("\\", "/").split("/")[-1]
else:
docname = ''
ret = []
# It analyses rule.
rule = self.options.get("rule", "blogpost_node,toctitle,tocid,tocdoc")
spl = rule.split(",")
if len(spl) > 4:
ret.append(self.state.document.reporter.warning(
"tocdelay rule is wrong: '{0}' ".format(rule) +
'document %r' % docname, line=self.lineno))
elif len(spl) == 4:
rule = tuple(spl)
else:
defa = ("blogpost_node", "toctitle", "tocid", "tocdoc")
rule = tuple(spl) + defa[4 - len(spl):]
# It looks for the documents to add.
documents = []
for line in self.content:
sline = line.strip()
if len(sline) > 0:
documents.append(sline)
# It checks their existence.
loc = self.options.get("path", None)
if loc is None:
loc = os.path.join(env.srcdir, os.path.dirname(env.docname))
osjoin = os.path.join
else:
osjoin = os.path.join
keep_list = []
for name in documents:
if name.endswith(">"):
# title <link>
match = TocDelayDirective.regex_title.search(name)
if match:
gr = match.groups()
title = gr[0].strip()
name = gr[1].strip()
else:
ret.append(self.state.document.reporter.warning(
"tocdelay: wrong format for '{0}' ".format(name) +
'document %r' % docname, line=self.lineno))
else:
title = None
docname = osjoin(loc, name)
if not docname.endswith(".rst"):
docname += ".rst"
if not os.path.exists(docname):
ret.append(self.state.document.reporter.warning(
'tocdelay contains reference to nonexisting '
'document %r' % docname, line=self.lineno))
else:
keep_list.append((name, docname, title))
if len(keep_list) == 0:
raise ValueError("No found document in '{0}'\nLIST:\n{1}".format(
loc, "\n".join(documents)))
# It updates internal references in env.
entries = []
includefiles = []
for name, docname, title in keep_list:
entries.append((None, docname))
includefiles.append(docname)
node = tocdelay_node()
node['entries'] = entries
node['includefiles'] = includefiles
node['tdlineno'] = lineno
node['tddocname'] = env.docname
node['tdfullname'] = docname
node["tdprocessed"] = 0
node["tddocuments"] = keep_list
node["tdrule"] = rule
node["tdloc"] = loc
wrappernode = nodes.compound(classes=['toctree-wrapper'])
wrappernode.append(node)
ret.append(wrappernode)
return ret
def process_tocdelay(app, doctree):
"""
Collect all *tocdelay* in the environment.
Look for the section or document which contain them.
Put them into the variable *tocdelay_all_tocdelay* in the config.
"""
for node in doctree.traverse(tocdelay_node):
node["tdprocessed"] += 1
def transform_tocdelay(app, doctree, fromdocname):
"""
The function is called by event ``'doctree_resolved'``. It looks for
every section in page stored in *tocdelay_all_tocdelay*
in the configuration and builds a short table of contents.
The instruction ``.. toctree::`` is resolved before every directive in
the page is executed, the instruction ``.. tocdelay::`` is resolved after.
@param app Sphinx application
@param doctree doctree
@param fromdocname docname
Thiis directive should be used if you need to capture a section
which was dynamically added by another one. For example @see cl RunPythonDirective
calls function ``nested_parse_with_titles``. ``.. tocdelay::`` will capture the
new section this function might eventually add to the page.
"""
post_list = list(doctree.traverse(tocdelay_node))
if len(post_list) == 0:
return
env = app.env
logger = logging.getLogger("tocdelay")
for node in post_list:
if node["tdprocessed"] == 0:
logger.warning("[tocdelay] no first loop was ever processed: 'tdprocessed'={0} , File '{1}', line {2}".format(
node["tdprocessed"], node["tddocname"], node["tdlineno"]))
continue
if node["tdprocessed"] > 1:
# logger.warning("[tocdelay] already processed: 'tdprocessed'={0} , File '{1}', line {2}".format(
# node["tdprocessed"], node["tddocname"], node["tdlineno"]))
continue
docs = node["tddocuments"]
if len(docs) == 0:
# No document to look at.
continue
main_par = nodes.paragraph()
# node += main_par
bullet_list = nodes.bullet_list()
main_par += bullet_list
nodedocname = node["tddocname"]
dirdocname = os.path.dirname(nodedocname)
clname, toctitle, tocid, tocdoc = node["tdrule"]
logger.info("[tocdelay] transform_tocdelay '{0}' from '{1}'".format(
nodedocname, fromdocname))
node["tdprocessed"] += 1
for name, subname, extitle in docs:
if not os.path.exists(subname):
raise FileNotFoundError(
"Unable to find document '{0}'".format(subname))
# The doctree it needs is not necessarily accessible from the main node
# as they are not necessarily attached to it.
subname = "{0}/{1}".format(dirdocname, name)
doc_doctree = env.get_doctree(subname)
if doc_doctree is None:
logger.info("[tocdelay] ERROR (4): No doctree found for '{0}' from '{1}'".format(
subname, nodedocname))
# It finds a node sharing the same name.
diginto = []
for n in doc_doctree.traverse():
if n.__class__.__name__ == clname:
diginto.append(n)
if len(diginto) == 0:
logger.info(
"[tocdelay] ERROR (3): No node '{0}' found for '{1}'".format(clname, subname))
continue
# It takes the first one available.
subnode = None
for d in diginto:
if 'tocdoc' in d.attributes and d['tocdoc'].endswith(subname):
subnode = d
break
if subnode is None:
found = list(
sorted(set(map(lambda x: x.__class__.__name__, diginto))))
ext = diginto[0].attributes if len(diginto) > 0 else ""
logger.warning("[tocdelay] ERROR (2): Unable to find node '{0}' in {1} [{2}]".format(
subname, ", ".join(map(str, found)), ext))
continue
rootnode = subnode
if tocid not in rootnode.attributes:
logger.warning(
"[tocdelay] ERROR (7): Unable to find 'tocid' in '{0}'".format(rootnode))
continue
if tocdoc not in rootnode.attributes:
logger.warning(
"[tocdelay] ERROR (8): Unable to find 'tocdoc' in '{0}'".format(rootnode))
continue
refid = rootnode[tocid]
refdoc = rootnode[tocdoc]
subnode = list(rootnode.traverse(nodes.title))
if not subnode:
logger.warning(
"[tocdelay] ERROR (5): Unable to find a title in '{0}'".format(subname))
continue
subnode = subnode[0]
try:
refuri = app.builder.get_relative_uri(nodedocname, refdoc)
logger.info(
"[tocdelay] add link for '{0}' - '{1}' from '{2}'".format(refid, refdoc, nodedocname))
except NoUri:
docn = list(sorted(app.builder.docnames))
logger.info("[tocdelay] ERROR (9): unable to find a link for '{0}' - '{1}' from '{2}` -- {3} - {4}".format(
refid, refdoc, nodedocname, type(app.builder), docn))
refuri = ''
use_title = extitle or subnode.astext()
par = nodes.paragraph()
ref = nodes.reference(refid=refid, reftitle=use_title, text=use_title,
internal=True, refuri=refuri)
par += ref
bullet = nodes.list_item()
bullet += par
bullet_list += bullet
node.replace_self(main_par)
def _print_loop_on_children(node, indent="", msg="-"):
logger = logging.getLogger("tocdelay")
if hasattr(node, "children"):
logger.info(
"[tocdelay] '{0}' - {1} - {2}".format(type(node), msg, node))
for child in node.children:
logger.info("[tocdelay] {0}{1} - '{2}'".format(indent, type(child),
child.astext().replace("\n", " #EOL# ")))
_print_loop_on_children(child, indent + " ")
def visit_tocdelay_node(self, node):
"""
does nothing
"""
_print_loop_on_children(node, msg="visit")
def depart_tocdelay_node(self, node):
"""
does nothing
"""
_print_loop_on_children(node, msg="depart")
def setup(app):
"""
setup for ``tocdelay`` (sphinx)
"""
if hasattr(app, "add_mapping"):
app.add_mapping('tocdelay', tocdelay_node)
app.add_node(tocdelay_node,
html=(visit_tocdelay_node, depart_tocdelay_node),
epub=(visit_tocdelay_node, depart_tocdelay_node),
elatex=(visit_tocdelay_node, depart_tocdelay_node),
latex=(visit_tocdelay_node, depart_tocdelay_node),
text=(visit_tocdelay_node, depart_tocdelay_node),
md=(visit_tocdelay_node, depart_tocdelay_node),
rst=(visit_tocdelay_node, depart_tocdelay_node))
app.add_directive('tocdelay', TocDelayDirective)
app.connect('doctree-read', process_tocdelay)
app.connect('doctree-resolved', transform_tocdelay)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| sdpython/pyquickhelper | src/pyquickhelper/sphinxext/sphinx_tocdelay_extension.py | Python | mit | 12,678 | [
"VisIt"
] | 530479ade605d516ac931287f0e1a10313e2da1e0eed5090f4d3ba025b9fc738 |
''' Script to test the simulator.
'''
import os
import tables as tb
import numpy as np
import unittest
import pylandau
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from testbeam_analysis.tools import simulate_data, geometry_utils, analysis_utils, test_tools
testing_path = os.path.dirname(__file__)
class TestHitAnalysis(unittest.TestCase):
@classmethod
def setUpClass(self):
# virtual X server for plots under headless LINUX travis testing is needed
if os.getenv('TRAVIS', False) and os.getenv('TRAVIS_OS_NAME', False) == 'linux':
from xvfbwrapper import Xvfb
self.vdisplay = Xvfb()
self.vdisplay.start()
self.simulate_data = simulate_data.SimulateData(random_seed=0)
@classmethod
def tearDownClass(self): # remove created files
for dut_index in range(self.simulate_data.n_duts):
os.remove('simulated_data_DUT%d.h5' % dut_index)
# Test beam position with respect to devices positions
def test_position(self):
self.simulate_data.n_duts = 2
self.simulate_data.set_std_settings()
# Helper function to be called with different position parameter data
def check_position():
# Calculate expectation
expected_mean_column, expected_mean_row = [], []
for dut_index in range(self.simulate_data.n_duts):
expected_mean_column.append(
self.simulate_data.beam_position[0] - self.simulate_data.offsets[dut_index][0])
expected_mean_row.append(
self.simulate_data.beam_position[1] - self.simulate_data.offsets[dut_index][1])
# Extract results
mean_column, mean_row = [], []
for dut_index in range(self.simulate_data.n_duts):
with tb.open_file('simulated_data_DUT%d.h5' % dut_index, 'r') as in_file_h5:
mean_column.append((in_file_h5.root.Hits[:][
'column'].mean() - 1) * self.simulate_data.dut_pixel_size[dut_index][0])
mean_row.append((in_file_h5.root.Hits[:][
'row'].mean() - 1) * self.simulate_data.dut_pixel_size[dut_index][1])
self.assertTrue(
np.allclose(expected_mean_column, mean_column, rtol=0.01, atol=10))
self.assertTrue(
np.allclose(expected_mean_row, mean_row, rtol=0.01, atol=10))
# Test 1: Check different DUT offsets
self.simulate_data.offsets = [(-35000, -35000), (-30000, -30000), (-25000, -25000),
(-20000, -20000), (-15000, -15000), (-10000, -10000)] # Set DUT offsets with respect to beam
self.simulate_data.create_data_and_store(
'simulated_data', n_events=10000)
check_position()
# Test 2: Check different beam offset
self.simulate_data.beam_position = (500, 500) # Shift beam position
self.simulate_data.create_data_and_store(
'simulated_data', n_events=10000)
check_position()
# Test 3: Check different beam parameter
self.simulate_data.beam_position_sigma = (0, 0) # beam position sigma
self.simulate_data.beam_position = (0, 0) # Shift beam position
self.simulate_data.create_data_and_store(
'simulated_data', n_events=10000)
check_position()
def test_charge_distribution(self):
self.simulate_data.reset()
# Helper function to be called with different charge parameter data
def check_charge():
# Loop over DUTs
for dut_index in range(self.simulate_data.n_duts):
# 71 electrons per um in silicon
mpv_charge = 71. * self.simulate_data.dut_thickness[dut_index]
def _calc_landau(x):
if self.simulate_data.dut_noise[dut_index]:
# PyLandau Langau MPV parameter is the MPV of the
# Langau not the Landau only!
y = pylandau.langau(
x, mpv=mpv_charge, eta=mpv_charge / 10., sigma=self.simulate_data.dut_noise[dut_index], scale_langau=False)
else:
y = pylandau.landau(
x, mpv=mpv_charge, eta=mpv_charge / 10)
return y
# Check result with calculated expectation
with tb.open_file('simulated_data_DUT%d.h5' % dut_index, 'r') as in_file_h5:
charge = in_file_h5.root.Hits[:][
'charge'] * 10. # 1 LSB corresponds to 10 electrons
charge_hist, edges = np.histogram(
charge, bins=100, range=(0., 5. * mpv_charge))
charge_hist = charge_hist.astype(np.float)
x = (edges[:-1] + edges[1:]) / 2
y = _calc_landau(x)
self.assertTrue(
np.allclose(y / y.sum() * charge_hist.sum(), charge_hist, rtol=0.07, atol=50.))
# import matplotlib.pyplot as plt
# plt.plot(x, charge_hist, label='Result')
# plt.plot(x, y / y.sum() * charge_hist.sum(), label='Expected')
# print x[y == np.amax(y)]
# print x[charge_hist == np.amax(charge_hist)]
# plt.legend(loc=0)
# plt.show()
# Check Landau for different device thickness
# Create charge distribution in different device thicknesses, thus
# Landau MPW should change
self.simulate_data.dut_thickness = [
(i + 1) * 100 for i in range(self.simulate_data.n_duts)]
# To judge deposited charge, charge sharing has to be off
self.simulate_data.digitization_charge_sharing = False
self.simulate_data.create_data_and_store(
'simulated_data', n_events=100000)
check_charge()
# Check Landau for different device noiose
self.simulate_data.reset()
self.simulate_data.dut_thickness = [
200 for i in range(self.simulate_data.n_duts)] # Fix device thickness
# Create charge distribution in different device noise, thus Langau
# sigma should change
self.simulate_data.dut_noise = [
i * 1000 for i in range(self.simulate_data.n_duts)]
# To judge deposited charge, charge sharing has to be off
self.simulate_data.digitization_charge_sharing = False
self.simulate_data.create_data_and_store(
'simulated_data', n_events=100000)
check_charge()
@unittest.SkipTest # FIXME: FAILS
def test_beam_angle(self):
self.simulate_data.reset()
def check_beam_angle():
# Expected offsets in x, y at DUT planes due to initial beam angle
# theta and direction distribution phi = (start, stop)
expected_offsets_x, expected_offsets_y = [], []
if self.simulate_data.beam_direction[0] < self.simulate_data.beam_direction[1]:
mean_direction_cos = np.cos(np.arange(self.simulate_data.beam_direction[0], self.simulate_data.beam_direction[
1], 0.01)).mean() # A mean angle does not translate linearly to a mean offset
mean_direction_sin = np.sin(np.arange(self.simulate_data.beam_direction[
0], self.simulate_data.beam_direction[1], 0.01)).mean()
else:
mean_direction_cos = np.cos(
self.simulate_data.beam_direction[0])
mean_direction_sin = np.sin(
self.simulate_data.beam_direction[0])
for dut_index in range(self.simulate_data.n_duts):
offset = self.simulate_data.beam_position[
0] - self.simulate_data.offsets[dut_index][0]
expected_offsets_x.append(offset + mean_direction_cos * np.tan(
self.simulate_data.beam_angle / 1000.) * self.simulate_data.z_positions[dut_index])
expected_offsets_y.append(offset + mean_direction_sin * np.tan(
self.simulate_data.beam_angle / 1000.) * self.simulate_data.z_positions[dut_index])
# Extract results
mean_column, mean_row = [], []
for dut_index in range(self.simulate_data.n_duts):
with tb.open_file('simulated_data_DUT%d.h5' % dut_index, 'r') as in_file_h5:
mean_column.append((in_file_h5.root.Hits[:][
'column'].mean() - 1) * self.simulate_data.dut_pixel_size[dut_index][0])
mean_row.append((in_file_h5.root.Hits[:][
'row'].mean() - 1) * self.simulate_data.dut_pixel_size[dut_index][1])
# Check for similarity, on pixel width error expected (binning
# error)
self.assertTrue(np.allclose(expected_offsets_x, mean_column,
rtol=0.001, atol=self.simulate_data.dut_pixel_size[0][0]))
self.assertTrue(np.allclose(
expected_offsets_y, mean_row, rtol=0.001, atol=self.simulate_data.dut_pixel_size[0][0]))
# Test 1: Fixed theta angle, different fixed phi
# If the angle is too small this tests fails due to pixel
# discretisation error
self.simulate_data.beam_angle = 5
# If the pixel size is too big this tests fails due to pixel
# discretisation error
self.simulate_data.dut_pixel_size = [
(1, 1)] * self.simulate_data.n_duts
# If the sensor is too small the mean cannot be easily calculated
self.simulate_data.dut_n_pixel = [
(10000, 10000)] * self.simulate_data.n_duts
self.simulate_data.beam_angle_sigma = 0
self.simulate_data.beam_position_sigma = (0, 0)
self.simulate_data.dut_material_budget = [
0] * self.simulate_data.n_duts # Turn off multiple scattering
# Simplify position reconstruction
self.simulate_data.digitization_charge_sharing = False
self.simulate_data.shuffle_hits = False
for phi in [0, np.pi / 4., np.pi / 2., 3. / 4. * np.pi, np.pi, 5. * np.pi / 4., 3 * np.pi / 2.]:
self.simulate_data.beam_direction = (phi, phi)
self.simulate_data.create_data_and_store(
'simulated_data', n_events=10000)
check_beam_angle()
# Test 2: Fixed theta angle, different phi ranges
for phi_max in [0, np.pi / 4., np.pi / 2., 3. / 4. * np.pi, np.pi, 5. * np.pi / 4., 3 * np.pi / 2.]:
self.simulate_data.beam_direction = (0, phi_max)
self.simulate_data.create_data_and_store(
'simulated_data', n_events=10000)
check_beam_angle()
# Test 3: Fixed theta angle, different phi ranges
for phi_max in [0, np.pi / 4., np.pi / 2., 3. / 4. * np.pi, np.pi, 5. * np.pi / 4., 3 * np.pi / 2.]:
self.simulate_data.beam_direction = (0, phi_max)
self.simulate_data.create_data_and_store(
'simulated_data', n_events=10000)
check_beam_angle()
# Test 4: Gaussian dstributed theta angle, full phi range
self.simulate_data.beam_angle_sigma = 2
self.simulate_data.beam_direction = (0, 2. * np.pi)
self.simulate_data.create_data_and_store(
'simulated_data', n_events=10000)
check_beam_angle()
def test_multiple_scattering(self):
self.simulate_data.reset()
# Set two planes and check the scattering angle due to the material
# budget of the first plane
self.simulate_data.n_duts = 2
self.simulate_data.set_std_settings()
self.simulate_data.beam_angle = 0
self.simulate_data.beam_angle_sigma = 0
self.simulate_data.beam_position_sigma = (0, 0)
# 1m distance to see scattering better
self.simulate_data.z_positions = [
i * 1000000 + 1000 for i in range(self.simulate_data.n_duts)]
# If the pixel size is too big this tests fails due to pixel
# discretisation error
self.simulate_data.dut_pixel_size = [
(1, 1)] * self.simulate_data.n_duts
self.simulate_data.dut_n_pixel = [
(10000, 10000)] * self.simulate_data.n_duts
self.simulate_data.shuffle_hits = False
# Simplify position reconstruction
self.simulate_data.digitization_charge_sharing = False
def gauss(x, A, mu, sigma): # Scattering angle theta fit function
return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))
def check_scattering_angle():
# Expected scattering angle theta_0
theta_0 = self.simulate_data._scattering_angle_sigma(
self.simulate_data.dut_material_budget[0], charge_number=1) * 1000
# Extract theta from simulation results by using the hit positions
with tb.open_file('simulated_data_DUT0.h5', 'r') as in_file_h5:
dut0_x = (in_file_h5.root.Hits[:][
'column'] - 1) * self.simulate_data.dut_pixel_size[0][0]
dut0_y = (
in_file_h5.root.Hits[:]['row'] - 1) * self.simulate_data.dut_pixel_size[0][1]
with tb.open_file('simulated_data_DUT1.h5', 'r') as in_file_h5:
dut1_x = (in_file_h5.root.Hits[:][
'column'] - 1) * self.simulate_data.dut_pixel_size[1][0]
dut1_y = (
in_file_h5.root.Hits[:]['row'] - 1) * self.simulate_data.dut_pixel_size[1][1]
# Calculate theta in spherical coordinates from data
dx = dut1_x.astype(np.float) - dut0_x.astype(np.int32)
dy = dut1_y.astype(np.float) - dut0_y.astype(np.float)
dz = np.ones_like(
dx) * (self.simulate_data.z_positions[1] - self.simulate_data.z_positions[0])
_, theta, _ = geometry_utils.cartesian_to_spherical(dx, dy, dz)
# Special case: no material budget thus not scattering, theta has
# to be beam angle
if theta_0 == 0:
self.assertTrue(
np.allclose(self.simulate_data.beam_angle, theta * 1000., atol=0.01))
return
if self.simulate_data.beam_angle == 0:
# Histogramm scattering angle distribution
hist, bins = np.histogram(
theta * 1000, range=(0, np.pi), bins=1000)
x, y = (bins[:-1] + bins[1:]) / 2., hist
# import matplotlib.pyplot as plt
# plt.bar(x, y, width=np.diff(x)[0])
# plt.plot(x, gauss(x, *(np.amax(y), 0, theta_0)), 'r-', linewidth=2)
# plt.legend()
# plt.show()
# Fit scatterign distribution
# Fit theta distribution
coeff, _ = curve_fit(
gauss, x, y, p0=[np.amax(hist), 0, theta_0])
# Check for similarity, slight differences most likely due to
# pixel position binning
self.assertTrue(np.allclose(theta_0, coeff[
2], atol=0.02), 'The scattering angle for multiple scattering is wrong')
# TODO: this is maybe a bug, the theta distribution with starting
# beam angle does not look gaussian; just check the mean here;
else:
self.assertTrue(np.allclose(np.mean(theta * 1000.), self.simulate_data.beam_angle,
atol=0.05), 'The beam direction with multiple scattering is wrong')
# Test 1: Check scattering for different device thickness from 50 um to
# 1000 um
for device_thickness in range(0, 1000, 50): # Change the thickness
self.simulate_data.dut_thickness = [
device_thickness] * self.simulate_data.n_duts
self.simulate_data.dut_material_budget = [self.simulate_data.dut_thickness[
i] * 1e-4 / 9.370 for i in range(self.simulate_data.n_duts)] # Assume silicon sensor
self.simulate_data.create_data_and_store(
'simulated_data', n_events=100000)
check_scattering_angle()
# Test 2: Check scattering for different device z positions
# Put planes at different positions
for z_position in ([[i, i + 1000000] for i in range(0, 1000000, 250000)]):
self.simulate_data.z_positions = z_position
self.simulate_data.create_data_and_store(
'simulated_data', n_events=100000)
check_scattering_angle()
# Test 3: Check with beam angle
for beam_angle in range(5):
self.simulate_data.beam_angle = beam_angle
self.simulate_data.z_positions = [0, 100000]
self.simulate_data.dut_thickness = [
1000] * self.simulate_data.n_duts
self.simulate_data.dut_material_budget = [self.simulate_data.dut_thickness[
i] * 1e-4 / 9.370 for i in range(self.simulate_data.n_duts)]
self.simulate_data.create_data_and_store(
'simulated_data', n_events=100000)
check_scattering_angle()
def test_dut_rotation(self):
self.simulate_data.reset()
# Set two planes and check the scattering angle due to the material
# budget of the first plane
self.simulate_data.n_duts = 2
self.simulate_data.set_std_settings()
self.simulate_data.tracks_per_event_sigma = 0
self.simulate_data.beam_angle = 0
self.simulate_data.beam_angle_sigma = 0
# If the pixel size is too big this tests fails due to pixel
# discretisation error
self.simulate_data.dut_pixel_size = [
(1, 1)] * self.simulate_data.n_duts
self.simulate_data.dut_n_pixel = [
(100000, 100000)] * self.simulate_data.n_duts
# 1m distance to avoid intersecting DUT planes
self.simulate_data.z_positions = [
i * 1000000. + 1000. for i in range(self.simulate_data.n_duts)]
self.simulate_data.tracks_per_event = 1
self.simulate_data.tracks_per_event_sigma = 0
# Not needed, save time
self.simulate_data.digitization_charge_sharing = False
self.simulate_data.dut_material_budget = [
0] * self.simulate_data.n_duts # Turn off multiple scattering
# Rotations are checked by column / row correlation in the global
# coordinate system at 0,0,0 of the first DUT; correlation have to have
# the slope 1 and the offset 0
def check_rotations():
def line(x, c0, c1): # Correlation line fit
return c1 * x + c0
with tb.open_file('simulated_data_DUT0.h5', 'r') as in_file_1_h5:
with tb.open_file('simulated_data_DUT1.h5', 'r') as in_file_2_h5:
# Merge data on event basis for correlation, since one
# cannot assume that every plane is always hit
hits_0, hits_1 = analysis_utils.merge_on_event_number(
in_file_1_h5.root.Hits[:], in_file_2_h5.root.Hits[:])
# Get transformation matrix (translation + rotation) from
# simulation settting
transformation_matrix = geometry_utils.local_to_global_transformation_matrix(x=self.simulate_data.offsets[1][0] - self.simulate_data.offsets[0][0],
y=self.simulate_data.offsets[1][
1] - self.simulate_data.offsets[0][1],
z=self.simulate_data.z_positions[
1] - self.simulate_data.z_positions[0],
alpha=self.simulate_data.rotations[
1][0],
beta=self.simulate_data.rotations[
1][1],
gamma=self.simulate_data.rotations[
1][2],
)
hits_1_column_global, hits_1_row_global, _ = geometry_utils.apply_transformation_matrix(
hits_1['column'], hits_1['row'], np.zeros_like(hits_1['column']), transformation_matrix)
coeff_column, _ = curve_fit(
line, hits_1_column_global, hits_0['column'], p0=[0, 1.])
coeff_row, _ = curve_fit(
line, hits_1_row_global, hits_0['row'], p0=[0, 1.])
# Check column / row relative offsets are from line fit
# offsets with 1 mu precision
# TODO: Might be a bug that sometimes not many hits are
# created or just geometry
if hits_1_column_global.shape[0] > 25:
self.assertAlmostEqual(coeff_column[0], 0., delta=1)
self.assertAlmostEqual(coeff_row[0], 0., delta=1)
# Check alpha / beta angles with from line fit slopes
# with 0.1 % precision
self.assertAlmostEqual(
coeff_column[1], 1., delta=0.001)
self.assertAlmostEqual(coeff_row[1], 1., delta=0.001)
# plt.plot(hits_1_column_global, hits_0['column'], 'o', label='Data')
# plt.plot(hits_1_column_global, line(hits_1_column_global, *coeff_column), '--', label='Fit')
# plt.plot(hits_1_row_global, hits_0['row'], 'o', label='Data row')
# plt.plot(hits_1_row_global, line(hits_1_row_global, *coeff_row), '--', label='Fit row')
# plt.legend(loc=0)
# plt.show()
# Test: Check correlation for different alpha, beta, gamma angles
# (x/y/z-axis rotation) and different relative offsets in x/y between
# the planes
for alpha in [0, np.pi / 8., np.pi / 6., np.pi / 4., np.pi / 3.]:
for beta in [0, np.pi / 8., np.pi / 6., np.pi / 4., np.pi / 3.]:
for gamma in [0, np.pi / 8., np.pi / 6., np.pi / 4., np.pi / 3.]:
for offset_x in range(-1000, 1001, 500):
for offset_y in range(-1000, 1001, 500):
self.simulate_data.rotations[
1] = (alpha, beta, gamma)
self.simulate_data.offsets[1] = (self.simulate_data.offsets[0][
0] + offset_x, self.simulate_data.offsets[0][1] + offset_y) # Set x/y shift with respect to DUT 0
self.simulate_data.create_data_and_store(
'simulated_data', n_events=1000)
check_rotations()
def test_simulation(self):
''' Check the full simulation '''
self.simulate_data.reset()
self.simulate_data.set_std_settings()
self.assertEqual(self.simulate_data.n_duts, 6)
self.simulate_data.create_data_and_store(
'simulated_data', n_events=10000)
for dut_index in range(self.simulate_data.n_duts):
data_equal, error_msg = test_tools.compare_h5_files('simulated_data_DUT%d.h5' % dut_index,
analysis_utils.get_data('fixtures/simulation/simulated_data_DUT%d.h5' % dut_index,
output=os.path.join(testing_path, 'fixtures/simulation/simulated_data_DUT%d.h5' % dut_index)),
exact=False)
self.assertTrue(data_equal, msg=error_msg)
if __name__ == '__main__':
import logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
suite = unittest.TestLoader().loadTestsFromTestCase(TestHitAnalysis)
unittest.TextTestRunner(verbosity=2).run(suite)
| YannickDieter/testbeam_analysis | testbeam_analysis/testing/test_simulation.py | Python | mit | 25,154 | [
"Gaussian"
] | 1d29aa793a611daae7f5262e0ca2bc696064a12b61dc0cf9328cb80390cb25f8 |
# -*- coding: utf-8 -*-
"""
.. _plot_source_alignment:
Source alignment and coordinate frames
======================================
The aim of this tutorial is to show how to visually assess that the data are
well aligned in space for computing the forward solution, and understand
the different coordinate frames involved in this process.
.. contents:: Topics
:local:
:depth: 2
Let's start out by loading some data.
"""
import os.path as op
import numpy as np
from mayavi import mlab
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
raw = mne.io.read_raw_fif(raw_fname)
trans = mne.read_trans(trans_fname)
src = mne.read_source_spaces(op.join(subjects_dir, 'sample', 'bem',
'sample-oct-6-src.fif'))
###############################################################################
# Understanding coordinate frames
# -------------------------------
# For M/EEG source imaging, there are three **coordinate frames** that we must
# bring into alignment using two 3D
# `transformation matrices <trans_matrices_>`_
# that define how to rotate and translate points in one coordinate frame
# to their equivalent locations in another.
#
# :func:`mne.viz.plot_alignment` is a very useful function for inspecting
# these transformations, and the resulting alignment of EEG sensors, MEG
# sensors, brain sources, and conductor models. If the ``subjects_dir`` and
# ``subject`` parameters are provided, the function automatically looks for the
# Freesurfer MRI surfaces to show from the subject's folder.
#
# We can use the ``show_axes`` argument to see the various coordinate frames
# given our transformation matrices. These are shown by axis arrows for each
# coordinate frame:
#
# * shortest arrow is (**R**)ight/X
# * medium is forward/(**A**)nterior/Y
# * longest is up/(**S**)uperior/Z
#
# i.e., a **RAS** coordinate system in each case. We can also set
# the ``coord_frame`` argument to choose which coordinate
# frame the camera should initially be aligned with.
#
# Let's take a look:
mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
subjects_dir=subjects_dir, surfaces='head-dense',
show_axes=True, dig=True, eeg=[], meg='sensors',
coord_frame='meg')
mlab.view(45, 90, distance=0.6, focalpoint=(0., 0., 0.))
print('Distance from head origin to MEG origin: %0.1f mm'
% (1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3])))
print('Distance from head origin to MRI origin: %0.1f mm'
% (1000 * np.linalg.norm(trans['trans'][:3, 3])))
###############################################################################
# Coordinate frame definitions
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# .. raw:: html
#
# <style>
# .pink {color:DarkSalmon; font-weight:bold}
# .blue {color:DeepSkyBlue; font-weight:bold}
# .gray {color:Gray; font-weight:bold}
# .magenta {color:Magenta; font-weight:bold}
# .purple {color:Indigo; font-weight:bold}
# .green {color:LimeGreen; font-weight:bold}
# .red {color:Red; font-weight:bold}
# </style>
#
# .. role:: pink
# .. role:: blue
# .. role:: gray
# .. role:: magenta
# .. role:: purple
# .. role:: green
# .. role:: red
#
# 1. Neuromag head coordinate frame ("head", :pink:`pink axes`)
# Defined by the intersection of 1) the line between the LPA
# (:red:`red sphere`) and RPA (:purple:`purple sphere`), and
# 2) the line perpendicular to this LPA-RPA line one that goes through
# the Nasion (:green:`green sphere`).
# The axes are oriented as **X** origin→RPA, **Y** origin→Nasion,
# **Z** origin→upward (orthogonal to X and Y).
#
# .. note:: This gets defined during the head digitization stage during
# acquisition, often by use of a Polhemus or other digitizer.
#
# 2. MEG device coordinate frame ("meg", :blue:`blue axes`)
# This is defined by the MEG manufacturers. From the Elekta user manual:
#
# The origin of the device coordinate system is located at the center
# of the posterior spherical section of the helmet with axis going
# from left to right and axis pointing front. The axis is, again
# normal to the plane with positive direction up.
#
# .. note:: The device is coregistered with the head coordinate frame
# during acquisition via emission of sinusoidal currents in
# head position indicator (HPI) coils
# (:magenta:`magenta spheres`) at the beginning of the
# recording. This is stored in ``raw.info['dev_head_t']``.
#
# 3. MRI coordinate frame ("mri", :gray:`gray axes`)
# Defined by Freesurfer, the MRI (surface RAS) origin is at the
# center of a 256×256×256 1mm anisotropic volume (may not be in the center
# of the head).
#
# .. note:: This is aligned to the head coordinate frame that we
# typically refer to in MNE as ``trans``.
#
# A bad example
# -------------
# Let's try using ``trans=None``, which (incorrectly!) equates the MRI
# and head coordinate frames.
mne.viz.plot_alignment(raw.info, trans=None, subject='sample', src=src,
subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
###############################################################################
# It is quite clear that the MRI surfaces (head, brain) are not well aligned
# to the head digitization points (dots).
#
# A good example
# --------------
# Here is the same plot, this time with the ``trans`` properly defined
# (using a precomputed matrix).
mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
src=src, subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
###############################################################################
# Defining the head↔MRI ``trans`` using the GUI
# ---------------------------------------------
# You can try creating the head↔MRI transform yourself using
# :func:`mne.gui.coregistration`.
#
# * First you must load the digitization data from the raw file
# (``Head Shape Source``). The MRI data is already loaded if you provide the
# ``subject`` and ``subjects_dir``. Toggle ``Always Show Head Points`` to see
# the digitization points.
# * To set the landmarks, toggle ``Edit`` radio button in ``MRI Fiducials``.
# * Set the landmarks by clicking the radio button (LPA, Nasion, RPA) and then
# clicking the corresponding point in the image.
# * After doing this for all the landmarks, toggle ``Lock`` radio button. You
# can omit outlier points, so that they don't interfere with the finetuning.
#
# .. note:: You can save the fiducials to a file and pass
# ``mri_fiducials=True`` to plot them in
# :func:`mne.viz.plot_alignment`. The fiducials are saved to the
# subject's bem folder by default.
# * Click ``Fit Head Shape``. This will align the digitization points to the
# head surface. Sometimes the fitting algorithm doesn't find the correct
# alignment immediately. You can try first fitting using LPA/RPA or fiducials
# and then align according to the digitization. You can also finetune
# manually with the controls on the right side of the panel.
# * Click ``Save As...`` (lower right corner of the panel), set the filename
# and read it with :func:`mne.read_trans`.
#
# For more information, see step by step instructions
# `in these slides
# <https://www.slideshare.net/mne-python/mnepython-coregistration>`_.
# Uncomment the following line to align the data yourself.
# mne.gui.coregistration(subject='sample', subjects_dir=subjects_dir)
###############################################################################
# .. _plot_source_alignment_without_mri:
#
# Alignment without MRI
# ---------------------
# The surface alignments above are possible if you have the surfaces available
# from Freesurfer. :func:`mne.viz.plot_alignment` automatically searches for
# the correct surfaces from the provided ``subjects_dir``. Another option is
# to use a :ref:`spherical conductor model <ch_forward_spherical_model>`. It is
# passed through ``bem`` parameter.
sphere = mne.make_sphere_model(info=raw.info, r0='auto', head_radius='auto')
src = mne.setup_volume_source_space(sphere=sphere, pos=10.)
mne.viz.plot_alignment(
raw.info, eeg='projected', bem=sphere, src=src, dig=True,
surfaces=['brain', 'outer_skin'], coord_frame='meg', show_axes=True)
###############################################################################
# It is also possible to use :func:`mne.gui.coregistration`
# to warp a subject (usually ``fsaverage``) to subject digitization data, see
# `these slides
# <https://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
#
# .. _trans_matrices: https://en.wikipedia.org/wiki/Transformation_matrix
| adykstra/mne-python | tutorials/source-modeling/plot_source_alignment.py | Python | bsd-3-clause | 9,208 | [
"Mayavi"
] | bfcbd735a9ec9ddcf20216590c58b9cbaae900577943390b9a34022f121af924 |
#!/usr/bin/env python
"""
Delete a given production
"""
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("prodID: Production ID")
_, args = Script.parseCommandLine()
from DIRAC.ProductionSystem.Client.ProductionClient import ProductionClient
# get arguments
prodID = args[0]
prodClient = ProductionClient()
res = prodClient.deleteProduction(prodID)
if res["OK"]:
DIRAC.gLogger.notice("Production %s successully deleted" % prodID)
else:
DIRAC.gLogger.error(res["Message"])
DIRAC.exit(-1)
DIRAC.exit(0)
if __name__ == "__main__":
main()
| DIRACGrid/DIRAC | src/DIRAC/ProductionSystem/scripts/dirac_prod_delete.py | Python | gpl-3.0 | 751 | [
"DIRAC"
] | 65922a226ab46f03af139a0cfd85c44da0c14a00a0b2d9713cd8e363e48196ee |
#!/usr/bin/env python
'''uditransfer module: '''
from __future__ import generators
import sys
sys.path.append(".")
try:
from . import util
except:
import util
__version__ = "0.4"
__author__ = [
"Desheng Xu <dxu@ptc.com>"
]
__license__ = "PTC Only"
__contributors__ = "Neil, Brian "
#logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
#rootLogger = logging.getLogger()
#fileHandler = logging.FileHandler("{0}/{1}.log".format(os.path.abspath("./logs/"), "uditransfer.log"))
#fileHandler.setFormatter(logFormatter)
#rootLogger.addHandler(fileHandler)
#consoleHandler = logging.StreamHandler()
#consoleHandler.setFormatter(logFormatter)
#rootLogger.addHandler(consoleHandler)
| xudesheng/uditransfer | uditransfer/__init__.py | Python | apache-2.0 | 745 | [
"Brian"
] | 86d9d5e96f0a4cb581e3303a0ff70953fb3bbf3b3900eb958f409b4c399cb9a9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.